You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

732 lines
19 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write to the *
  17. * Free Software Foundation, Inc., *
  18. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  19. ***************************************************************************/
  20. #ifdef HAVE_CONFIG_H
  21. #include "config.h"
  22. #endif
  23. #include "arm.h"
  24. #include "etm.h"
  25. #include "etb.h"
  26. #include "register.h"
  27. static char* etb_reg_list[] =
  28. {
  29. "ETB_identification",
  30. "ETB_ram_depth",
  31. "ETB_ram_width",
  32. "ETB_status",
  33. "ETB_ram_data",
  34. "ETB_ram_read_pointer",
  35. "ETB_ram_write_pointer",
  36. "ETB_trigger_counter",
  37. "ETB_control",
  38. };
  39. static int etb_get_reg(struct reg *reg);
  40. static int etb_set_instr(struct etb *etb, uint32_t new_instr)
  41. {
  42. struct jtag_tap *tap;
  43. tap = etb->tap;
  44. if (tap == NULL)
  45. return ERROR_FAIL;
  46. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  47. {
  48. struct scan_field field;
  49. field.num_bits = tap->ir_length;
  50. void * t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  51. field.out_value = t;
  52. buf_set_u32(t, 0, field.num_bits, new_instr);
  53. field.in_value = NULL;
  54. jtag_add_ir_scan(tap, &field, TAP_IDLE);
  55. free(t);
  56. }
  57. return ERROR_OK;
  58. }
  59. static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
  60. {
  61. if (etb->cur_scan_chain != new_scan_chain)
  62. {
  63. struct scan_field field;
  64. field.num_bits = 5;
  65. void * t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  66. field.out_value = t;
  67. buf_set_u32(t, 0, field.num_bits, new_scan_chain);
  68. field.in_value = NULL;
  69. /* select INTEST instruction */
  70. etb_set_instr(etb, 0x2);
  71. jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
  72. etb->cur_scan_chain = new_scan_chain;
  73. free(t);
  74. }
  75. return ERROR_OK;
  76. }
  77. static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
  78. static int etb_set_reg_w_exec(struct reg *, uint8_t *);
  79. static int etb_read_reg(struct reg *reg)
  80. {
  81. return etb_read_reg_w_check(reg, NULL, NULL);
  82. }
  83. static int etb_get_reg(struct reg *reg)
  84. {
  85. int retval;
  86. if ((retval = etb_read_reg(reg)) != ERROR_OK)
  87. {
  88. LOG_ERROR("BUG: error scheduling ETB register read");
  89. return retval;
  90. }
  91. if ((retval = jtag_execute_queue()) != ERROR_OK)
  92. {
  93. LOG_ERROR("ETB register read failed");
  94. return retval;
  95. }
  96. return ERROR_OK;
  97. }
  98. static const struct reg_arch_type etb_reg_type = {
  99. .get = etb_get_reg,
  100. .set = etb_set_reg_w_exec,
  101. };
  102. struct reg_cache* etb_build_reg_cache(struct etb *etb)
  103. {
  104. struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
  105. struct reg *reg_list = NULL;
  106. struct etb_reg *arch_info = NULL;
  107. int num_regs = 9;
  108. int i;
  109. /* the actual registers are kept in two arrays */
  110. reg_list = calloc(num_regs, sizeof(struct reg));
  111. arch_info = calloc(num_regs, sizeof(struct etb_reg));
  112. /* fill in values for the reg cache */
  113. reg_cache->name = "etb registers";
  114. reg_cache->next = NULL;
  115. reg_cache->reg_list = reg_list;
  116. reg_cache->num_regs = num_regs;
  117. /* set up registers */
  118. for (i = 0; i < num_regs; i++)
  119. {
  120. reg_list[i].name = etb_reg_list[i];
  121. reg_list[i].size = 32;
  122. reg_list[i].dirty = 0;
  123. reg_list[i].valid = 0;
  124. reg_list[i].value = calloc(1, 4);
  125. reg_list[i].arch_info = &arch_info[i];
  126. reg_list[i].type = &etb_reg_type;
  127. reg_list[i].size = 32;
  128. arch_info[i].addr = i;
  129. arch_info[i].etb = etb;
  130. }
  131. return reg_cache;
  132. }
  133. static void etb_getbuf(jtag_callback_data_t arg)
  134. {
  135. uint8_t *in = (uint8_t *)arg;
  136. *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
  137. }
  138. static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
  139. {
  140. struct scan_field fields[3];
  141. int i;
  142. etb_scann(etb, 0x0);
  143. etb_set_instr(etb, 0xc);
  144. fields[0].num_bits = 32;
  145. fields[0].out_value = NULL;
  146. fields[0].in_value = NULL;
  147. fields[1].num_bits = 7;
  148. uint8_t temp1;
  149. fields[1].out_value = &temp1;
  150. buf_set_u32(&temp1, 0, 7, 4);
  151. fields[1].in_value = NULL;
  152. fields[2].num_bits = 1;
  153. uint8_t temp2;
  154. fields[2].out_value = &temp2;
  155. buf_set_u32(&temp2, 0, 1, 0);
  156. fields[2].in_value = NULL;
  157. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  158. for (i = 0; i < num_frames; i++)
  159. {
  160. /* ensure nR/W reamins set to read */
  161. buf_set_u32(&temp2, 0, 1, 0);
  162. /* address remains set to 0x4 (RAM data) until we read the last frame */
  163. if (i < num_frames - 1)
  164. buf_set_u32(&temp1, 0, 7, 4);
  165. else
  166. buf_set_u32(&temp1, 0, 7, 0);
  167. fields[0].in_value = (uint8_t *)(data + i);
  168. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  169. jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
  170. }
  171. jtag_execute_queue();
  172. return ERROR_OK;
  173. }
  174. static int etb_read_reg_w_check(struct reg *reg,
  175. uint8_t* check_value, uint8_t* check_mask)
  176. {
  177. struct etb_reg *etb_reg = reg->arch_info;
  178. uint8_t reg_addr = etb_reg->addr & 0x7f;
  179. struct scan_field fields[3];
  180. LOG_DEBUG("%i", (int)(etb_reg->addr));
  181. etb_scann(etb_reg->etb, 0x0);
  182. etb_set_instr(etb_reg->etb, 0xc);
  183. fields[0].num_bits = 32;
  184. fields[0].out_value = reg->value;
  185. fields[0].in_value = NULL;
  186. fields[0].check_value = NULL;
  187. fields[0].check_mask = NULL;
  188. fields[1].num_bits = 7;
  189. uint8_t temp1;
  190. fields[1].out_value = &temp1;
  191. buf_set_u32(&temp1, 0, 7, reg_addr);
  192. fields[1].in_value = NULL;
  193. fields[1].check_value = NULL;
  194. fields[1].check_mask = NULL;
  195. fields[2].num_bits = 1;
  196. uint8_t temp2;
  197. fields[2].out_value = &temp2;
  198. buf_set_u32(&temp2, 0, 1, 0);
  199. fields[2].in_value = NULL;
  200. fields[2].check_value = NULL;
  201. fields[2].check_mask = NULL;
  202. jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  203. /* read the identification register in the second run, to make sure we
  204. * don't read the ETB data register twice, skipping every second entry
  205. */
  206. buf_set_u32(&temp1, 0, 7, 0x0);
  207. fields[0].in_value = reg->value;
  208. fields[0].check_value = check_value;
  209. fields[0].check_mask = check_mask;
  210. jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  211. return ERROR_OK;
  212. }
  213. static int etb_write_reg(struct reg *, uint32_t);
  214. static int etb_set_reg(struct reg *reg, uint32_t value)
  215. {
  216. int retval;
  217. if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
  218. {
  219. LOG_ERROR("BUG: error scheduling ETB register write");
  220. return retval;
  221. }
  222. buf_set_u32(reg->value, 0, reg->size, value);
  223. reg->valid = 1;
  224. reg->dirty = 0;
  225. return ERROR_OK;
  226. }
  227. static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
  228. {
  229. int retval;
  230. etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
  231. if ((retval = jtag_execute_queue()) != ERROR_OK)
  232. {
  233. LOG_ERROR("ETB: register write failed");
  234. return retval;
  235. }
  236. return ERROR_OK;
  237. }
  238. static int etb_write_reg(struct reg *reg, uint32_t value)
  239. {
  240. struct etb_reg *etb_reg = reg->arch_info;
  241. uint8_t reg_addr = etb_reg->addr & 0x7f;
  242. LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
  243. etb_scann(etb_reg->etb, 0x0);
  244. etb_set_instr(etb_reg->etb, 0xc);
  245. uint8_t temp0[4];
  246. buf_set_u32(&temp0, 0, 32, value);
  247. uint8_t temp1;
  248. buf_set_u32(&temp1, 0, 7, reg_addr);
  249. uint8_t temp2;
  250. buf_set_u32(&temp2, 0, 1, 1);
  251. return ERROR_OK;
  252. }
  253. COMMAND_HANDLER(handle_etb_config_command)
  254. {
  255. struct target *target;
  256. struct jtag_tap *tap;
  257. struct arm *arm;
  258. if (CMD_ARGC != 2)
  259. {
  260. return ERROR_COMMAND_SYNTAX_ERROR;
  261. }
  262. target = get_target(CMD_ARGV[0]);
  263. if (!target)
  264. {
  265. LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
  266. return ERROR_FAIL;
  267. }
  268. arm = target_to_arm(target);
  269. if (!is_arm(arm))
  270. {
  271. command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
  272. return ERROR_FAIL;
  273. }
  274. tap = jtag_tap_by_string(CMD_ARGV[1]);
  275. if (tap == NULL)
  276. {
  277. command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
  278. return ERROR_FAIL;
  279. }
  280. if (arm->etm)
  281. {
  282. struct etb *etb = malloc(sizeof(struct etb));
  283. arm->etm->capture_driver_priv = etb;
  284. etb->tap = tap;
  285. etb->cur_scan_chain = 0xffffffff;
  286. etb->reg_cache = NULL;
  287. etb->ram_width = 0;
  288. etb->ram_depth = 0;
  289. }
  290. else
  291. {
  292. LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
  293. return ERROR_FAIL;
  294. }
  295. return ERROR_OK;
  296. }
  297. COMMAND_HANDLER(handle_etb_trigger_percent_command)
  298. {
  299. struct target *target;
  300. struct arm *arm;
  301. struct etm_context *etm;
  302. struct etb *etb;
  303. target = get_current_target(CMD_CTX);
  304. arm = target_to_arm(target);
  305. if (!is_arm(arm))
  306. {
  307. command_print(CMD_CTX, "ETB: current target isn't an ARM");
  308. return ERROR_FAIL;
  309. }
  310. etm = arm->etm;
  311. if (!etm) {
  312. command_print(CMD_CTX, "ETB: target has no ETM configured");
  313. return ERROR_FAIL;
  314. }
  315. if (etm->capture_driver != &etb_capture_driver) {
  316. command_print(CMD_CTX, "ETB: target not using ETB");
  317. return ERROR_FAIL;
  318. }
  319. etb = arm->etm->capture_driver_priv;
  320. if (CMD_ARGC > 0) {
  321. uint32_t new_value;
  322. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
  323. if ((new_value < 2) || (new_value > 100))
  324. command_print(CMD_CTX,
  325. "valid percentages are 2%% to 100%%");
  326. else
  327. etb->trigger_percent = (unsigned) new_value;
  328. }
  329. command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
  330. etb->trigger_percent);
  331. return ERROR_OK;
  332. }
  333. static const struct command_registration etb_config_command_handlers[] = {
  334. {
  335. /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
  336. * possibly over SWD, not through separate TAPs...
  337. */
  338. .name = "config",
  339. .handler = handle_etb_config_command,
  340. .mode = COMMAND_CONFIG,
  341. .help = "Associate ETB with target and JTAG TAP.",
  342. .usage = "target tap",
  343. },
  344. {
  345. .name = "trigger_percent",
  346. .handler = handle_etb_trigger_percent_command,
  347. .mode = COMMAND_EXEC,
  348. .help = "Set percent of trace buffer to be filled "
  349. "after the trigger occurs (2..100).",
  350. .usage = "[percent]",
  351. },
  352. COMMAND_REGISTRATION_DONE
  353. };
  354. static const struct command_registration etb_command_handlers[] = {
  355. {
  356. .name = "etb",
  357. .mode = COMMAND_ANY,
  358. .help = "Emebdded Trace Buffer command group",
  359. .chain = etb_config_command_handlers,
  360. },
  361. COMMAND_REGISTRATION_DONE
  362. };
  363. static int etb_init(struct etm_context *etm_ctx)
  364. {
  365. struct etb *etb = etm_ctx->capture_driver_priv;
  366. etb->etm_ctx = etm_ctx;
  367. /* identify ETB RAM depth and width */
  368. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
  369. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
  370. jtag_execute_queue();
  371. etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
  372. etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
  373. etb->trigger_percent = 50;
  374. return ERROR_OK;
  375. }
  376. static trace_status_t etb_status(struct etm_context *etm_ctx)
  377. {
  378. struct etb *etb = etm_ctx->capture_driver_priv;
  379. struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
  380. struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
  381. trace_status_t retval = 0;
  382. int etb_timeout = 100;
  383. etb->etm_ctx = etm_ctx;
  384. /* read control and status registers */
  385. etb_read_reg(control);
  386. etb_read_reg(status);
  387. jtag_execute_queue();
  388. /* See if it's (still) active */
  389. retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
  390. /* check Full bit to identify wraparound/overflow */
  391. if (buf_get_u32(status->value, 0, 1) == 1)
  392. retval |= TRACE_OVERFLOWED;
  393. /* check Triggered bit to identify trigger condition */
  394. if (buf_get_u32(status->value, 1, 1) == 1)
  395. retval |= TRACE_TRIGGERED;
  396. /* check AcqComp to see if trigger counter dropped to zero */
  397. if (buf_get_u32(status->value, 2, 1) == 1) {
  398. /* wait for DFEmpty */
  399. while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
  400. etb_get_reg(status);
  401. if (etb_timeout == 0)
  402. LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
  403. (unsigned) buf_get_u32(status->value, 0, 4));
  404. if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
  405. LOG_WARNING("ETB: trace complete without triggering?");
  406. retval |= TRACE_COMPLETED;
  407. }
  408. /* NOTE: using a trigger is optional; and at least ETB11 has a mode
  409. * where it can ignore the trigger counter.
  410. */
  411. /* update recorded state */
  412. etm_ctx->capture_status = retval;
  413. return retval;
  414. }
  415. static int etb_read_trace(struct etm_context *etm_ctx)
  416. {
  417. struct etb *etb = etm_ctx->capture_driver_priv;
  418. int first_frame = 0;
  419. int num_frames = etb->ram_depth;
  420. uint32_t *trace_data = NULL;
  421. int i, j;
  422. etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
  423. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
  424. jtag_execute_queue();
  425. /* check if we overflowed, and adjust first frame of the trace accordingly
  426. * if we didn't overflow, read only up to the frame that would be written next,
  427. * i.e. don't read invalid entries
  428. */
  429. if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
  430. {
  431. first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  432. }
  433. else
  434. {
  435. num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
  436. }
  437. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
  438. /* read data into temporary array for unpacking */
  439. trace_data = malloc(sizeof(uint32_t) * num_frames);
  440. etb_read_ram(etb, trace_data, num_frames);
  441. if (etm_ctx->trace_depth > 0)
  442. {
  443. free(etm_ctx->trace_data);
  444. }
  445. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  446. etm_ctx->trace_depth = num_frames * 3;
  447. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  448. etm_ctx->trace_depth = num_frames * 2;
  449. else
  450. etm_ctx->trace_depth = num_frames;
  451. etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
  452. for (i = 0, j = 0; i < num_frames; i++)
  453. {
  454. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  455. {
  456. /* trace word j */
  457. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  458. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
  459. etm_ctx->trace_data[j].flags = 0;
  460. if ((trace_data[i] & 0x80) >> 7)
  461. {
  462. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  463. }
  464. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  465. {
  466. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  467. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  468. }
  469. /* trace word j + 1 */
  470. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
  471. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
  472. etm_ctx->trace_data[j + 1].flags = 0;
  473. if ((trace_data[i] & 0x8000) >> 15)
  474. {
  475. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  476. }
  477. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  478. {
  479. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  480. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  481. }
  482. /* trace word j + 2 */
  483. etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
  484. etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
  485. etm_ctx->trace_data[j + 2].flags = 0;
  486. if ((trace_data[i] & 0x800000) >> 23)
  487. {
  488. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
  489. }
  490. if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
  491. {
  492. etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
  493. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
  494. }
  495. j += 3;
  496. }
  497. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  498. {
  499. /* trace word j */
  500. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  501. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
  502. etm_ctx->trace_data[j].flags = 0;
  503. if ((trace_data[i] & 0x800) >> 11)
  504. {
  505. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  506. }
  507. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  508. {
  509. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  510. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  511. }
  512. /* trace word j + 1 */
  513. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
  514. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
  515. etm_ctx->trace_data[j + 1].flags = 0;
  516. if ((trace_data[i] & 0x800000) >> 23)
  517. {
  518. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  519. }
  520. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
  521. {
  522. etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  523. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  524. }
  525. j += 2;
  526. }
  527. else
  528. {
  529. /* trace word j */
  530. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  531. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
  532. etm_ctx->trace_data[j].flags = 0;
  533. if ((trace_data[i] & 0x80000) >> 19)
  534. {
  535. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  536. }
  537. if (etm_ctx->trace_data[j].pipestat == STAT_TR)
  538. {
  539. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
  540. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  541. }
  542. j += 1;
  543. }
  544. }
  545. free(trace_data);
  546. return ERROR_OK;
  547. }
  548. static int etb_start_capture(struct etm_context *etm_ctx)
  549. {
  550. struct etb *etb = etm_ctx->capture_driver_priv;
  551. uint32_t etb_ctrl_value = 0x1;
  552. uint32_t trigger_count;
  553. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
  554. {
  555. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
  556. {
  557. LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
  558. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  559. }
  560. etb_ctrl_value |= 0x2;
  561. }
  562. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
  563. LOG_ERROR("ETB: can't run in multiplexed mode");
  564. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  565. }
  566. trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
  567. etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
  568. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
  569. etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
  570. jtag_execute_queue();
  571. /* we're starting a new trace, initialize capture status */
  572. etm_ctx->capture_status = TRACE_RUNNING;
  573. return ERROR_OK;
  574. }
  575. static int etb_stop_capture(struct etm_context *etm_ctx)
  576. {
  577. struct etb *etb = etm_ctx->capture_driver_priv;
  578. struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
  579. etb_write_reg(etb_ctrl_reg, 0x0);
  580. jtag_execute_queue();
  581. /* trace stopped, just clear running flag, but preserve others */
  582. etm_ctx->capture_status &= ~TRACE_RUNNING;
  583. return ERROR_OK;
  584. }
  585. struct etm_capture_driver etb_capture_driver =
  586. {
  587. .name = "etb",
  588. .commands = etb_command_handlers,
  589. .init = etb_init,
  590. .status = etb_status,
  591. .start_capture = etb_start_capture,
  592. .stop_capture = etb_stop_capture,
  593. .read_trace = etb_read_trace,
  594. };