You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

744 lines
19 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write to the *
  17. * Free Software Foundation, Inc., *
  18. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  19. ***************************************************************************/
  20. #ifdef HAVE_CONFIG_H
  21. #include "config.h"
  22. #endif
  23. #include "arm.h"
  24. #include "etm.h"
  25. #include "etb.h"
  26. #include "register.h"
  27. static char* etb_reg_list[] =
  28. {
  29. "ETB_identification",
  30. "ETB_ram_depth",
  31. "ETB_ram_width",
  32. "ETB_status",
  33. "ETB_ram_data",
  34. "ETB_ram_read_pointer",
  35. "ETB_ram_write_pointer",
  36. "ETB_trigger_counter",
  37. "ETB_control",
  38. };
  39. static int etb_get_reg(struct reg *reg);
  40. static int etb_set_instr(struct etb *etb, uint32_t new_instr)
  41. {
  42. struct jtag_tap *tap;
  43. tap = etb->tap;
  44. if (tap == NULL)
  45. return ERROR_FAIL;
  46. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  47. {
  48. struct scan_field field;
  49. field.num_bits = tap->ir_length;
  50. void * t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  51. field.out_value = t;
  52. buf_set_u32(t, 0, field.num_bits, new_instr);
  53. field.in_value = NULL;
  54. jtag_add_ir_scan(tap, &field, TAP_IDLE);
  55. free(t);
  56. }
  57. return ERROR_OK;
  58. }
  59. static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
  60. {
  61. if (etb->cur_scan_chain != new_scan_chain)
  62. {
  63. struct scan_field field;
  64. field.num_bits = 5;
  65. void * t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  66. field.out_value = t;
  67. buf_set_u32(t, 0, field.num_bits, new_scan_chain);
  68. field.in_value = NULL;
  69. /* select INTEST instruction */
  70. etb_set_instr(etb, 0x2);
  71. jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
  72. etb->cur_scan_chain = new_scan_chain;
  73. free(t);
  74. }
  75. return ERROR_OK;
  76. }
  77. static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
  78. static int etb_set_reg_w_exec(struct reg *, uint8_t *);
  79. static int etb_read_reg(struct reg *reg)
  80. {
  81. return etb_read_reg_w_check(reg, NULL, NULL);
  82. }
  83. static int etb_get_reg(struct reg *reg)
  84. {
  85. int retval;
  86. if ((retval = etb_read_reg(reg)) != ERROR_OK)
  87. {
  88. LOG_ERROR("BUG: error scheduling ETB register read");
  89. return retval;
  90. }
  91. if ((retval = jtag_execute_queue()) != ERROR_OK)
  92. {
  93. LOG_ERROR("ETB register read failed");
  94. return retval;
  95. }
  96. return ERROR_OK;
  97. }
  98. static const struct reg_arch_type etb_reg_type = {
  99. .get = etb_get_reg,
  100. .set = etb_set_reg_w_exec,
  101. };
  102. struct reg_cache* etb_build_reg_cache(struct etb *etb)
  103. {
  104. struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
  105. struct reg *reg_list = NULL;
  106. struct etb_reg *arch_info = NULL;
  107. int num_regs = 9;
  108. int i;
  109. /* the actual registers are kept in two arrays */
  110. reg_list = calloc(num_regs, sizeof(struct reg));
  111. arch_info = calloc(num_regs, sizeof(struct etb_reg));
  112. /* fill in values for the reg cache */
  113. reg_cache->name = "etb registers";
  114. reg_cache->next = NULL;
  115. reg_cache->reg_list = reg_list;
  116. reg_cache->num_regs = num_regs;
  117. /* set up registers */
  118. for (i = 0; i < num_regs; i++)
  119. {
  120. reg_list[i].name = etb_reg_list[i];
  121. reg_list[i].size = 32;
  122. reg_list[i].dirty = 0;
  123. reg_list[i].valid = 0;
  124. reg_list[i].value = calloc(1, 4);
  125. reg_list[i].arch_info = &arch_info[i];
  126. reg_list[i].type = &etb_reg_type;
  127. reg_list[i].size = 32;
  128. arch_info[i].addr = i;
  129. arch_info[i].etb = etb;
  130. }
  131. return reg_cache;
  132. }
  133. static void etb_getbuf(jtag_callback_data_t arg)
  134. {
  135. uint8_t *in = (uint8_t *)arg;
  136. *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
  137. }
  138. static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
  139. {
  140. struct scan_field fields[3];
  141. int i;
  142. etb_scann(etb, 0x0);
  143. etb_set_instr(etb, 0xc);
  144. fields[0].num_bits = 32;
  145. fields[0].out_value = NULL;
  146. fields[0].in_value = NULL;
  147. fields[1].num_bits = 7;
  148. uint8_t temp1;
  149. fields[1].out_value = &temp1;
  150. buf_set_u32(&temp1, 0, 7, 4);
  151. fields[1].in_value = NULL;
  152. fields[2].num_bits = 1;
  153. uint8_t temp2;
  154. fields[2].out_value = &temp2;
  155. buf_set_u32(&temp2, 0, 1, 0);
  156. fields[2].in_value = NULL;
  157. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  158. for (i = 0; i < num_frames; i++)
  159. {
  160. /* ensure nR/W reamins set to read */
  161. buf_set_u32(&temp2, 0, 1, 0);
  162. /* address remains set to 0x4 (RAM data) until we read the last frame */
  163. if (i < num_frames - 1)
  164. buf_set_u32(&temp1, 0, 7, 4);
  165. else
  166. buf_set_u32(&temp1, 0, 7, 0);
  167. fields[0].in_value = (uint8_t *)(data + i);
  168. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  169. jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
  170. }
  171. jtag_execute_queue();
  172. return ERROR_OK;
  173. }
  174. static int etb_read_reg_w_check(struct reg *reg,
  175. uint8_t* check_value, uint8_t* check_mask)
  176. {
  177. struct etb_reg *etb_reg = reg->arch_info;
  178. uint8_t reg_addr = etb_reg->addr & 0x7f;
  179. struct scan_field fields[3];
  180. LOG_DEBUG("%i", (int)(etb_reg->addr));
  181. etb_scann(etb_reg->etb, 0x0);
  182. etb_set_instr(etb_reg->etb, 0xc);
  183. fields[0].num_bits = 32;
  184. fields[0].out_value = reg->value;
  185. fields[0].in_value = NULL;
  186. fields[0].check_value = NULL;
  187. fields[0].check_mask = NULL;
  188. fields[1].num_bits = 7;
  189. uint8_t temp1;
  190. fields[1].out_value = &temp1;
  191. buf_set_u32(&temp1, 0, 7, reg_addr);
  192. fields[1].in_value = NULL;
  193. fields[1].check_value = NULL;
  194. fields[1].check_mask = NULL;
  195. fields[2].num_bits = 1;
  196. uint8_t temp2;
  197. fields[2].out_value = &temp2;
  198. buf_set_u32(&temp2, 0, 1, 0);
  199. fields[2].in_value = NULL;
  200. fields[2].check_value = NULL;
  201. fields[2].check_mask = NULL;
  202. jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  203. /* read the identification register in the second run, to make sure we
  204. * don't read the ETB data register twice, skipping every second entry
  205. */
  206. buf_set_u32(&temp1, 0, 7, 0x0);
  207. fields[0].in_value = reg->value;
  208. fields[0].check_value = check_value;
  209. fields[0].check_mask = check_mask;
  210. jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  211. return ERROR_OK;
  212. }
  213. static int etb_write_reg(struct reg *, uint32_t);
  214. static int etb_set_reg(struct reg *reg, uint32_t value)
  215. {
  216. int retval;
  217. if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
  218. {
  219. LOG_ERROR("BUG: error scheduling ETB register write");
  220. return retval;
  221. }
  222. buf_set_u32(reg->value, 0, reg->size, value);
  223. reg->valid = 1;
  224. reg->dirty = 0;
  225. return ERROR_OK;
  226. }
  227. static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
  228. {
  229. int retval;
  230. etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
  231. if ((retval = jtag_execute_queue()) != ERROR_OK)
  232. {
  233. LOG_ERROR("ETB: register write failed");
  234. return retval;
  235. }
  236. return ERROR_OK;
  237. }
  238. static int etb_write_reg(struct reg *reg, uint32_t value)
  239. {
  240. struct etb_reg *etb_reg = reg->arch_info;
  241. uint8_t reg_addr = etb_reg->addr & 0x7f;
  242. struct scan_field fields[3];
  243. LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
  244. etb_scann(etb_reg->etb, 0x0);
  245. etb_set_instr(etb_reg->etb, 0xc);
  246. fields[0].num_bits = 32;
  247. uint8_t temp0[4];
  248. fields[0].out_value = temp0;
  249. buf_set_u32(&temp0, 0, 32, value);
  250. fields[0].in_value = NULL;
  251. fields[1].num_bits = 7;
  252. uint8_t temp1;
  253. fields[1].out_value = &temp1;
  254. buf_set_u32(&temp1, 0, 7, reg_addr);
  255. fields[1].in_value = NULL;
  256. fields[2].num_bits = 1;
  257. uint8_t temp2;
  258. fields[2].out_value = &temp2;
  259. buf_set_u32(&temp2, 0, 1, 1);
  260. fields[2].in_value = NULL;
  261. jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  262. return ERROR_OK;
  263. }
  264. COMMAND_HANDLER(handle_etb_config_command)
  265. {
  266. struct target *target;
  267. struct jtag_tap *tap;
  268. struct arm *arm;
  269. if (CMD_ARGC != 2)
  270. {
  271. return ERROR_COMMAND_SYNTAX_ERROR;
  272. }
  273. target = get_target(CMD_ARGV[0]);
  274. if (!target)
  275. {
  276. LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
  277. return ERROR_FAIL;
  278. }
  279. arm = target_to_arm(target);
  280. if (!is_arm(arm))
  281. {
  282. command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
  283. return ERROR_FAIL;
  284. }
  285. tap = jtag_tap_by_string(CMD_ARGV[1]);
  286. if (tap == NULL)
  287. {
  288. command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
  289. return ERROR_FAIL;
  290. }
  291. if (arm->etm)
  292. {
  293. struct etb *etb = malloc(sizeof(struct etb));
  294. arm->etm->capture_driver_priv = etb;
  295. etb->tap = tap;
  296. etb->cur_scan_chain = 0xffffffff;
  297. etb->reg_cache = NULL;
  298. etb->ram_width = 0;
  299. etb->ram_depth = 0;
  300. }
  301. else
  302. {
  303. LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
  304. return ERROR_FAIL;
  305. }
  306. return ERROR_OK;
  307. }
  308. COMMAND_HANDLER(handle_etb_trigger_percent_command)
  309. {
  310. struct target *target;
  311. struct arm *arm;
  312. struct etm_context *etm;
  313. struct etb *etb;
  314. target = get_current_target(CMD_CTX);
  315. arm = target_to_arm(target);
  316. if (!is_arm(arm))
  317. {
  318. command_print(CMD_CTX, "ETB: current target isn't an ARM");
  319. return ERROR_FAIL;
  320. }
  321. etm = arm->etm;
  322. if (!etm) {
  323. command_print(CMD_CTX, "ETB: target has no ETM configured");
  324. return ERROR_FAIL;
  325. }
  326. if (etm->capture_driver != &etb_capture_driver) {
  327. command_print(CMD_CTX, "ETB: target not using ETB");
  328. return ERROR_FAIL;
  329. }
  330. etb = arm->etm->capture_driver_priv;
  331. if (CMD_ARGC > 0) {
  332. uint32_t new_value;
  333. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
  334. if ((new_value < 2) || (new_value > 100))
  335. command_print(CMD_CTX,
  336. "valid percentages are 2%% to 100%%");
  337. else
  338. etb->trigger_percent = (unsigned) new_value;
  339. }
  340. command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
  341. etb->trigger_percent);
  342. return ERROR_OK;
  343. }
  344. static const struct command_registration etb_config_command_handlers[] = {
  345. {
  346. /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
  347. * possibly over SWD, not through separate TAPs...
  348. */
  349. .name = "config",
  350. .handler = handle_etb_config_command,
  351. .mode = COMMAND_CONFIG,
  352. .help = "Associate ETB with target and JTAG TAP.",
  353. .usage = "target tap",
  354. },
  355. {
  356. .name = "trigger_percent",
  357. .handler = handle_etb_trigger_percent_command,
  358. .mode = COMMAND_EXEC,
  359. .help = "Set percent of trace buffer to be filled "
  360. "after the trigger occurs (2..100).",
  361. .usage = "[percent]",
  362. },
  363. COMMAND_REGISTRATION_DONE
  364. };
  365. static const struct command_registration etb_command_handlers[] = {
  366. {
  367. .name = "etb",
  368. .mode = COMMAND_ANY,
  369. .help = "Emebdded Trace Buffer command group",
  370. .chain = etb_config_command_handlers,
  371. },
  372. COMMAND_REGISTRATION_DONE
  373. };
  374. static int etb_init(struct etm_context *etm_ctx)
  375. {
  376. struct etb *etb = etm_ctx->capture_driver_priv;
  377. etb->etm_ctx = etm_ctx;
  378. /* identify ETB RAM depth and width */
  379. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
  380. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
  381. jtag_execute_queue();
  382. etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
  383. etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
  384. etb->trigger_percent = 50;
  385. return ERROR_OK;
  386. }
  387. static trace_status_t etb_status(struct etm_context *etm_ctx)
  388. {
  389. struct etb *etb = etm_ctx->capture_driver_priv;
  390. struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
  391. struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
  392. trace_status_t retval = 0;
  393. int etb_timeout = 100;
  394. etb->etm_ctx = etm_ctx;
  395. /* read control and status registers */
  396. etb_read_reg(control);
  397. etb_read_reg(status);
  398. jtag_execute_queue();
  399. /* See if it's (still) active */
  400. retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
  401. /* check Full bit to identify wraparound/overflow */
  402. if (buf_get_u32(status->value, 0, 1) == 1)
  403. retval |= TRACE_OVERFLOWED;
  404. /* check Triggered bit to identify trigger condition */
  405. if (buf_get_u32(status->value, 1, 1) == 1)
  406. retval |= TRACE_TRIGGERED;
  407. /* check AcqComp to see if trigger counter dropped to zero */
  408. if (buf_get_u32(status->value, 2, 1) == 1) {
  409. /* wait for DFEmpty */
  410. while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
  411. etb_get_reg(status);
  412. if (etb_timeout == 0)
  413. LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
  414. (unsigned) buf_get_u32(status->value, 0, 4));
  415. if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
  416. LOG_WARNING("ETB: trace complete without triggering?");
  417. retval |= TRACE_COMPLETED;
  418. }
  419. /* NOTE: using a trigger is optional; and at least ETB11 has a mode
  420. * where it can ignore the trigger counter.
  421. */
  422. /* update recorded state */
  423. etm_ctx->capture_status = retval;
  424. return retval;
  425. }
  426. static int etb_read_trace(struct etm_context *etm_ctx)
  427. {
  428. struct etb *etb = etm_ctx->capture_driver_priv;
  429. int first_frame = 0;
  430. int num_frames = etb->ram_depth;
  431. uint32_t *trace_data = NULL;
  432. int i, j;
  433. etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
  434. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
  435. jtag_execute_queue();
  436. /* check if we overflowed, and adjust first frame of the trace accordingly
  437. * if we didn't overflow, read only up to the frame that would be written next,
  438. * i.e. don't read invalid entries
  439. */
  440. if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
  441. {
  442. first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  443. }
  444. else
  445. {
  446. num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  447. }
  448. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
  449. /* read data into temporary array for unpacking */
  450. trace_data = malloc(sizeof(uint32_t) * num_frames);
  451. etb_read_ram(etb, trace_data, num_frames);
  452. if (etm_ctx->trace_depth > 0)
  453. {
  454. free(etm_ctx->trace_data);
  455. }
  456. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  457. etm_ctx->trace_depth = num_frames * 3;
  458. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  459. etm_ctx->trace_depth = num_frames * 2;
  460. else
  461. etm_ctx->trace_depth = num_frames;
  462. etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
  463. for (i = 0, j = 0; i < num_frames; i++)
  464. {
  465. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  466. {
  467. /* trace word j */
  468. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  469. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
  470. etm_ctx->trace_data[j].flags = 0;
  471. if ((trace_data[i] & 0x80) >> 7)
  472. {
  473. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  474. }
  475. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  476. {
  477. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  478. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  479. }
  480. /* trace word j + 1 */
  481. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
  482. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
  483. etm_ctx->trace_data[j + 1].flags = 0;
  484. if ((trace_data[i] & 0x8000) >> 15)
  485. {
  486. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  487. }
  488. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  489. {
  490. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  491. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  492. }
  493. /* trace word j + 2 */
  494. etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
  495. etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
  496. etm_ctx->trace_data[j + 2].flags = 0;
  497. if ((trace_data[i] & 0x800000) >> 23)
  498. {
  499. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
  500. }
  501. if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
  502. {
  503. etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
  504. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
  505. }
  506. j += 3;
  507. }
  508. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  509. {
  510. /* trace word j */
  511. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  512. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
  513. etm_ctx->trace_data[j].flags = 0;
  514. if ((trace_data[i] & 0x800) >> 11)
  515. {
  516. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  517. }
  518. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  519. {
  520. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  521. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  522. }
  523. /* trace word j + 1 */
  524. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
  525. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
  526. etm_ctx->trace_data[j + 1].flags = 0;
  527. if ((trace_data[i] & 0x800000) >> 23)
  528. {
  529. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  530. }
  531. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  532. {
  533. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  534. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  535. }
  536. j += 2;
  537. }
  538. else
  539. {
  540. /* trace word j */
  541. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  542. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
  543. etm_ctx->trace_data[j].flags = 0;
  544. if ((trace_data[i] & 0x80000) >> 19)
  545. {
  546. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  547. }
  548. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  549. {
  550. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  551. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  552. }
  553. j += 1;
  554. }
  555. }
  556. free(trace_data);
  557. return ERROR_OK;
  558. }
  559. static int etb_start_capture(struct etm_context *etm_ctx)
  560. {
  561. struct etb *etb = etm_ctx->capture_driver_priv;
  562. uint32_t etb_ctrl_value = 0x1;
  563. uint32_t trigger_count;
  564. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
  565. {
  566. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
  567. {
  568. LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
  569. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  570. }
  571. etb_ctrl_value |= 0x2;
  572. }
  573. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
  574. LOG_ERROR("ETB: can't run in multiplexed mode");
  575. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  576. }
  577. trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
  578. etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
  579. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
  580. etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
  581. jtag_execute_queue();
  582. /* we're starting a new trace, initialize capture status */
  583. etm_ctx->capture_status = TRACE_RUNNING;
  584. return ERROR_OK;
  585. }
  586. static int etb_stop_capture(struct etm_context *etm_ctx)
  587. {
  588. struct etb *etb = etm_ctx->capture_driver_priv;
  589. struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
  590. etb_write_reg(etb_ctrl_reg, 0x0);
  591. jtag_execute_queue();
  592. /* trace stopped, just clear running flag, but preserve others */
  593. etm_ctx->capture_status &= ~TRACE_RUNNING;
  594. return ERROR_OK;
  595. }
  596. struct etm_capture_driver etb_capture_driver =
  597. {
  598. .name = "etb",
  599. .commands = etb_command_handlers,
  600. .init = etb_init,
  601. .status = etb_status,
  602. .start_capture = etb_start_capture,
  603. .stop_capture = etb_stop_capture,
  604. .read_trace = etb_read_trace,
  605. };