You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

758 lines
20 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write to the *
  17. * Free Software Foundation, Inc., *
  18. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  19. ***************************************************************************/
  20. #ifdef HAVE_CONFIG_H
  21. #include "config.h"
  22. #endif
  23. #include "arm.h"
  24. #include "etm.h"
  25. #include "etb.h"
  26. #include "register.h"
  27. static char* etb_reg_list[] =
  28. {
  29. "ETB_identification",
  30. "ETB_ram_depth",
  31. "ETB_ram_width",
  32. "ETB_status",
  33. "ETB_ram_data",
  34. "ETB_ram_read_pointer",
  35. "ETB_ram_write_pointer",
  36. "ETB_trigger_counter",
  37. "ETB_control",
  38. };
  39. static int etb_get_reg(struct reg *reg);
  40. static int etb_set_instr(struct etb *etb, uint32_t new_instr)
  41. {
  42. struct jtag_tap *tap;
  43. tap = etb->tap;
  44. if (tap == NULL)
  45. return ERROR_FAIL;
  46. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  47. {
  48. struct scan_field field;
  49. field.tap = tap;
  50. field.num_bits = tap->ir_length;
  51. field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  52. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  53. field.in_value = NULL;
  54. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  55. free(field.out_value);
  56. }
  57. return ERROR_OK;
  58. }
  59. static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
  60. {
  61. if (etb->cur_scan_chain != new_scan_chain)
  62. {
  63. struct scan_field field;
  64. field.tap = etb->tap;
  65. field.num_bits = 5;
  66. field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  67. buf_set_u32(field.out_value, 0, field.num_bits, new_scan_chain);
  68. field.in_value = NULL;
  69. /* select INTEST instruction */
  70. etb_set_instr(etb, 0x2);
  71. jtag_add_dr_scan(1, &field, jtag_get_end_state());
  72. etb->cur_scan_chain = new_scan_chain;
  73. free(field.out_value);
  74. }
  75. return ERROR_OK;
  76. }
  77. static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
  78. static int etb_set_reg_w_exec(struct reg *, uint8_t *);
  79. static int etb_read_reg(struct reg *reg)
  80. {
  81. return etb_read_reg_w_check(reg, NULL, NULL);
  82. }
  83. static int etb_get_reg(struct reg *reg)
  84. {
  85. int retval;
  86. if ((retval = etb_read_reg(reg)) != ERROR_OK)
  87. {
  88. LOG_ERROR("BUG: error scheduling ETB register read");
  89. return retval;
  90. }
  91. if ((retval = jtag_execute_queue()) != ERROR_OK)
  92. {
  93. LOG_ERROR("ETB register read failed");
  94. return retval;
  95. }
  96. return ERROR_OK;
  97. }
  98. static const struct reg_arch_type etb_reg_type = {
  99. .get = etb_get_reg,
  100. .set = etb_set_reg_w_exec,
  101. };
  102. struct reg_cache* etb_build_reg_cache(struct etb *etb)
  103. {
  104. struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
  105. struct reg *reg_list = NULL;
  106. struct etb_reg *arch_info = NULL;
  107. int num_regs = 9;
  108. int i;
  109. /* the actual registers are kept in two arrays */
  110. reg_list = calloc(num_regs, sizeof(struct reg));
  111. arch_info = calloc(num_regs, sizeof(struct etb_reg));
  112. /* fill in values for the reg cache */
  113. reg_cache->name = "etb registers";
  114. reg_cache->next = NULL;
  115. reg_cache->reg_list = reg_list;
  116. reg_cache->num_regs = num_regs;
  117. /* set up registers */
  118. for (i = 0; i < num_regs; i++)
  119. {
  120. reg_list[i].name = etb_reg_list[i];
  121. reg_list[i].size = 32;
  122. reg_list[i].dirty = 0;
  123. reg_list[i].valid = 0;
  124. reg_list[i].value = calloc(1, 4);
  125. reg_list[i].arch_info = &arch_info[i];
  126. reg_list[i].type = &etb_reg_type;
  127. reg_list[i].size = 32;
  128. arch_info[i].addr = i;
  129. arch_info[i].etb = etb;
  130. }
  131. return reg_cache;
  132. }
  133. static void etb_getbuf(jtag_callback_data_t arg)
  134. {
  135. uint8_t *in = (uint8_t *)arg;
  136. *((uint32_t *)in) = buf_get_u32(in, 0, 32);
  137. }
  138. static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
  139. {
  140. struct scan_field fields[3];
  141. int i;
  142. jtag_set_end_state(TAP_IDLE);
  143. etb_scann(etb, 0x0);
  144. etb_set_instr(etb, 0xc);
  145. fields[0].tap = etb->tap;
  146. fields[0].num_bits = 32;
  147. fields[0].out_value = NULL;
  148. fields[0].in_value = NULL;
  149. fields[1].tap = etb->tap;
  150. fields[1].num_bits = 7;
  151. fields[1].out_value = malloc(1);
  152. buf_set_u32(fields[1].out_value, 0, 7, 4);
  153. fields[1].in_value = NULL;
  154. fields[2].tap = etb->tap;
  155. fields[2].num_bits = 1;
  156. fields[2].out_value = malloc(1);
  157. buf_set_u32(fields[2].out_value, 0, 1, 0);
  158. fields[2].in_value = NULL;
  159. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  160. for (i = 0; i < num_frames; i++)
  161. {
  162. /* ensure nR/W reamins set to read */
  163. buf_set_u32(fields[2].out_value, 0, 1, 0);
  164. /* address remains set to 0x4 (RAM data) until we read the last frame */
  165. if (i < num_frames - 1)
  166. buf_set_u32(fields[1].out_value, 0, 7, 4);
  167. else
  168. buf_set_u32(fields[1].out_value, 0, 7, 0);
  169. fields[0].in_value = (uint8_t *)(data + i);
  170. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  171. jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
  172. }
  173. jtag_execute_queue();
  174. free(fields[1].out_value);
  175. free(fields[2].out_value);
  176. return ERROR_OK;
  177. }
  178. static int etb_read_reg_w_check(struct reg *reg,
  179. uint8_t* check_value, uint8_t* check_mask)
  180. {
  181. struct etb_reg *etb_reg = reg->arch_info;
  182. uint8_t reg_addr = etb_reg->addr & 0x7f;
  183. struct scan_field fields[3];
  184. LOG_DEBUG("%i", (int)(etb_reg->addr));
  185. jtag_set_end_state(TAP_IDLE);
  186. etb_scann(etb_reg->etb, 0x0);
  187. etb_set_instr(etb_reg->etb, 0xc);
  188. fields[0].tap = etb_reg->etb->tap;
  189. fields[0].num_bits = 32;
  190. fields[0].out_value = reg->value;
  191. fields[0].in_value = NULL;
  192. fields[0].check_value = NULL;
  193. fields[0].check_mask = NULL;
  194. fields[1].tap = etb_reg->etb->tap;
  195. fields[1].num_bits = 7;
  196. fields[1].out_value = malloc(1);
  197. buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
  198. fields[1].in_value = NULL;
  199. fields[1].check_value = NULL;
  200. fields[1].check_mask = NULL;
  201. fields[2].tap = etb_reg->etb->tap;
  202. fields[2].num_bits = 1;
  203. fields[2].out_value = malloc(1);
  204. buf_set_u32(fields[2].out_value, 0, 1, 0);
  205. fields[2].in_value = NULL;
  206. fields[2].check_value = NULL;
  207. fields[2].check_mask = NULL;
  208. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  209. /* read the identification register in the second run, to make sure we
  210. * don't read the ETB data register twice, skipping every second entry
  211. */
  212. buf_set_u32(fields[1].out_value, 0, 7, 0x0);
  213. fields[0].in_value = reg->value;
  214. fields[0].check_value = check_value;
  215. fields[0].check_mask = check_mask;
  216. jtag_add_dr_scan_check(3, fields, jtag_get_end_state());
  217. free(fields[1].out_value);
  218. free(fields[2].out_value);
  219. return ERROR_OK;
  220. }
  221. static int etb_write_reg(struct reg *, uint32_t);
  222. static int etb_set_reg(struct reg *reg, uint32_t value)
  223. {
  224. int retval;
  225. if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
  226. {
  227. LOG_ERROR("BUG: error scheduling ETB register write");
  228. return retval;
  229. }
  230. buf_set_u32(reg->value, 0, reg->size, value);
  231. reg->valid = 1;
  232. reg->dirty = 0;
  233. return ERROR_OK;
  234. }
  235. static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
  236. {
  237. int retval;
  238. etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
  239. if ((retval = jtag_execute_queue()) != ERROR_OK)
  240. {
  241. LOG_ERROR("ETB: register write failed");
  242. return retval;
  243. }
  244. return ERROR_OK;
  245. }
  246. static int etb_write_reg(struct reg *reg, uint32_t value)
  247. {
  248. struct etb_reg *etb_reg = reg->arch_info;
  249. uint8_t reg_addr = etb_reg->addr & 0x7f;
  250. struct scan_field fields[3];
  251. LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
  252. jtag_set_end_state(TAP_IDLE);
  253. etb_scann(etb_reg->etb, 0x0);
  254. etb_set_instr(etb_reg->etb, 0xc);
  255. fields[0].tap = etb_reg->etb->tap;
  256. fields[0].num_bits = 32;
  257. fields[0].out_value = malloc(4);
  258. buf_set_u32(fields[0].out_value, 0, 32, value);
  259. fields[0].in_value = NULL;
  260. fields[1].tap = etb_reg->etb->tap;
  261. fields[1].num_bits = 7;
  262. fields[1].out_value = malloc(1);
  263. buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
  264. fields[1].in_value = NULL;
  265. fields[2].tap = etb_reg->etb->tap;
  266. fields[2].num_bits = 1;
  267. fields[2].out_value = malloc(1);
  268. buf_set_u32(fields[2].out_value, 0, 1, 1);
  269. fields[2].in_value = NULL;
  270. free(fields[0].out_value);
  271. free(fields[1].out_value);
  272. free(fields[2].out_value);
  273. return ERROR_OK;
  274. }
  275. COMMAND_HANDLER(handle_etb_config_command)
  276. {
  277. struct target *target;
  278. struct jtag_tap *tap;
  279. struct arm *arm;
  280. if (CMD_ARGC != 2)
  281. {
  282. return ERROR_COMMAND_SYNTAX_ERROR;
  283. }
  284. target = get_target(CMD_ARGV[0]);
  285. if (!target)
  286. {
  287. LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
  288. return ERROR_FAIL;
  289. }
  290. arm = target_to_arm(target);
  291. if (!is_arm(arm))
  292. {
  293. command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
  294. return ERROR_FAIL;
  295. }
  296. tap = jtag_tap_by_string(CMD_ARGV[1]);
  297. if (tap == NULL)
  298. {
  299. command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
  300. return ERROR_FAIL;
  301. }
  302. if (arm->etm)
  303. {
  304. struct etb *etb = malloc(sizeof(struct etb));
  305. arm->etm->capture_driver_priv = etb;
  306. etb->tap = tap;
  307. etb->cur_scan_chain = 0xffffffff;
  308. etb->reg_cache = NULL;
  309. etb->ram_width = 0;
  310. etb->ram_depth = 0;
  311. }
  312. else
  313. {
  314. LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
  315. return ERROR_FAIL;
  316. }
  317. return ERROR_OK;
  318. }
  319. COMMAND_HANDLER(handle_etb_trigger_percent_command)
  320. {
  321. struct target *target;
  322. struct arm *arm;
  323. struct etm_context *etm;
  324. struct etb *etb;
  325. target = get_current_target(CMD_CTX);
  326. arm = target_to_arm(target);
  327. if (!is_arm(arm))
  328. {
  329. command_print(CMD_CTX, "ETB: current target isn't an ARM");
  330. return ERROR_FAIL;
  331. }
  332. etm = arm->etm;
  333. if (!etm) {
  334. command_print(CMD_CTX, "ETB: target has no ETM configured");
  335. return ERROR_FAIL;
  336. }
  337. if (etm->capture_driver != &etb_capture_driver) {
  338. command_print(CMD_CTX, "ETB: target not using ETB");
  339. return ERROR_FAIL;
  340. }
  341. etb = arm->etm->capture_driver_priv;
  342. if (CMD_ARGC > 0) {
  343. uint32_t new_value;
  344. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
  345. if ((new_value < 2) || (new_value > 100))
  346. command_print(CMD_CTX,
  347. "valid percentages are 2%% to 100%%");
  348. else
  349. etb->trigger_percent = (unsigned) new_value;
  350. }
  351. command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
  352. etb->trigger_percent);
  353. return ERROR_OK;
  354. }
  355. static const struct command_registration etb_config_command_handlers[] = {
  356. {
  357. /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
  358. * possibly over SWD, not through separate TAPs...
  359. */
  360. .name = "config",
  361. .handler = handle_etb_config_command,
  362. .mode = COMMAND_CONFIG,
  363. .help = "Associate ETB with target and JTAG TAP.",
  364. .usage = "target tap",
  365. },
  366. {
  367. .name = "trigger_percent",
  368. .handler = handle_etb_trigger_percent_command,
  369. .mode = COMMAND_EXEC,
  370. .help = "Set percent of trace buffer to be filled "
  371. "after the trigger occurs (2..100).",
  372. .usage = "[percent]",
  373. },
  374. COMMAND_REGISTRATION_DONE
  375. };
  376. static const struct command_registration etb_command_handlers[] = {
  377. {
  378. .name = "etb",
  379. .mode = COMMAND_ANY,
  380. .help = "Emebdded Trace Buffer command group",
  381. .chain = etb_config_command_handlers,
  382. },
  383. COMMAND_REGISTRATION_DONE
  384. };
  385. static int etb_init(struct etm_context *etm_ctx)
  386. {
  387. struct etb *etb = etm_ctx->capture_driver_priv;
  388. etb->etm_ctx = etm_ctx;
  389. /* identify ETB RAM depth and width */
  390. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
  391. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
  392. jtag_execute_queue();
  393. etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
  394. etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
  395. etb->trigger_percent = 50;
  396. return ERROR_OK;
  397. }
  398. static trace_status_t etb_status(struct etm_context *etm_ctx)
  399. {
  400. struct etb *etb = etm_ctx->capture_driver_priv;
  401. struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
  402. struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
  403. trace_status_t retval = 0;
  404. int etb_timeout = 100;
  405. etb->etm_ctx = etm_ctx;
  406. /* read control and status registers */
  407. etb_read_reg(control);
  408. etb_read_reg(status);
  409. jtag_execute_queue();
  410. /* See if it's (still) active */
  411. retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
  412. /* check Full bit to identify wraparound/overflow */
  413. if (buf_get_u32(status->value, 0, 1) == 1)
  414. retval |= TRACE_OVERFLOWED;
  415. /* check Triggered bit to identify trigger condition */
  416. if (buf_get_u32(status->value, 1, 1) == 1)
  417. retval |= TRACE_TRIGGERED;
  418. /* check AcqComp to see if trigger counter dropped to zero */
  419. if (buf_get_u32(status->value, 2, 1) == 1) {
  420. /* wait for DFEmpty */
  421. while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
  422. etb_get_reg(status);
  423. if (etb_timeout == 0)
  424. LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
  425. (unsigned) buf_get_u32(status->value, 0, 4));
  426. if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
  427. LOG_WARNING("ETB: trace complete without triggering?");
  428. retval |= TRACE_COMPLETED;
  429. }
  430. /* NOTE: using a trigger is optional; and at least ETB11 has a mode
  431. * where it can ignore the trigger counter.
  432. */
  433. /* update recorded state */
  434. etm_ctx->capture_status = retval;
  435. return retval;
  436. }
  437. static int etb_read_trace(struct etm_context *etm_ctx)
  438. {
  439. struct etb *etb = etm_ctx->capture_driver_priv;
  440. int first_frame = 0;
  441. int num_frames = etb->ram_depth;
  442. uint32_t *trace_data = NULL;
  443. int i, j;
  444. etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
  445. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
  446. jtag_execute_queue();
  447. /* check if we overflowed, and adjust first frame of the trace accordingly
  448. * if we didn't overflow, read only up to the frame that would be written next,
  449. * i.e. don't read invalid entries
  450. */
  451. if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
  452. {
  453. first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  454. }
  455. else
  456. {
  457. num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  458. }
  459. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
  460. /* read data into temporary array for unpacking */
  461. trace_data = malloc(sizeof(uint32_t) * num_frames);
  462. etb_read_ram(etb, trace_data, num_frames);
  463. if (etm_ctx->trace_depth > 0)
  464. {
  465. free(etm_ctx->trace_data);
  466. }
  467. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  468. etm_ctx->trace_depth = num_frames * 3;
  469. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  470. etm_ctx->trace_depth = num_frames * 2;
  471. else
  472. etm_ctx->trace_depth = num_frames;
  473. etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
  474. for (i = 0, j = 0; i < num_frames; i++)
  475. {
  476. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  477. {
  478. /* trace word j */
  479. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  480. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
  481. etm_ctx->trace_data[j].flags = 0;
  482. if ((trace_data[i] & 0x80) >> 7)
  483. {
  484. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  485. }
  486. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  487. {
  488. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  489. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  490. }
  491. /* trace word j + 1 */
  492. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
  493. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
  494. etm_ctx->trace_data[j + 1].flags = 0;
  495. if ((trace_data[i] & 0x8000) >> 15)
  496. {
  497. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  498. }
  499. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  500. {
  501. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  502. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  503. }
  504. /* trace word j + 2 */
  505. etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
  506. etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
  507. etm_ctx->trace_data[j + 2].flags = 0;
  508. if ((trace_data[i] & 0x800000) >> 23)
  509. {
  510. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
  511. }
  512. if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
  513. {
  514. etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
  515. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
  516. }
  517. j += 3;
  518. }
  519. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  520. {
  521. /* trace word j */
  522. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  523. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
  524. etm_ctx->trace_data[j].flags = 0;
  525. if ((trace_data[i] & 0x800) >> 11)
  526. {
  527. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  528. }
  529. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  530. {
  531. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  532. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  533. }
  534. /* trace word j + 1 */
  535. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
  536. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
  537. etm_ctx->trace_data[j + 1].flags = 0;
  538. if ((trace_data[i] & 0x800000) >> 23)
  539. {
  540. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  541. }
  542. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  543. {
  544. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  545. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  546. }
  547. j += 2;
  548. }
  549. else
  550. {
  551. /* trace word j */
  552. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  553. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
  554. etm_ctx->trace_data[j].flags = 0;
  555. if ((trace_data[i] & 0x80000) >> 19)
  556. {
  557. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  558. }
  559. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  560. {
  561. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  562. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  563. }
  564. j += 1;
  565. }
  566. }
  567. free(trace_data);
  568. return ERROR_OK;
  569. }
  570. static int etb_start_capture(struct etm_context *etm_ctx)
  571. {
  572. struct etb *etb = etm_ctx->capture_driver_priv;
  573. uint32_t etb_ctrl_value = 0x1;
  574. uint32_t trigger_count;
  575. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
  576. {
  577. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
  578. {
  579. LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
  580. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  581. }
  582. etb_ctrl_value |= 0x2;
  583. }
  584. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
  585. LOG_ERROR("ETB: can't run in multiplexed mode");
  586. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  587. }
  588. trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
  589. etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
  590. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
  591. etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
  592. jtag_execute_queue();
  593. /* we're starting a new trace, initialize capture status */
  594. etm_ctx->capture_status = TRACE_RUNNING;
  595. return ERROR_OK;
  596. }
  597. static int etb_stop_capture(struct etm_context *etm_ctx)
  598. {
  599. struct etb *etb = etm_ctx->capture_driver_priv;
  600. struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
  601. etb_write_reg(etb_ctrl_reg, 0x0);
  602. jtag_execute_queue();
  603. /* trace stopped, just clear running flag, but preserve others */
  604. etm_ctx->capture_status &= ~TRACE_RUNNING;
  605. return ERROR_OK;
  606. }
  607. struct etm_capture_driver etb_capture_driver =
  608. {
  609. .name = "etb",
  610. .commands = etb_command_handlers,
  611. .init = etb_init,
  612. .status = etb_status,
  613. .start_capture = etb_start_capture,
  614. .stop_capture = etb_stop_capture,
  615. .read_trace = etb_read_trace,
  616. };