You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

706 lines
19 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write to the *
  17. * Free Software Foundation, Inc., *
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
  19. ***************************************************************************/
  20. #ifdef HAVE_CONFIG_H
  21. #include "config.h"
  22. #endif
  23. #include "arm.h"
  24. #include "etm.h"
  25. #include "etb.h"
  26. #include "register.h"
  27. static char *etb_reg_list[] = {
  28. "ETB_identification",
  29. "ETB_ram_depth",
  30. "ETB_ram_width",
  31. "ETB_status",
  32. "ETB_ram_data",
  33. "ETB_ram_read_pointer",
  34. "ETB_ram_write_pointer",
  35. "ETB_trigger_counter",
  36. "ETB_control",
  37. };
  38. static int etb_get_reg(struct reg *reg);
  39. static int etb_set_instr(struct etb *etb, uint32_t new_instr)
  40. {
  41. struct jtag_tap *tap;
  42. tap = etb->tap;
  43. if (tap == NULL)
  44. return ERROR_FAIL;
  45. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
  46. struct scan_field field;
  47. field.num_bits = tap->ir_length;
  48. void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  49. field.out_value = t;
  50. buf_set_u32(t, 0, field.num_bits, new_instr);
  51. field.in_value = NULL;
  52. jtag_add_ir_scan(tap, &field, TAP_IDLE);
  53. free(t);
  54. }
  55. return ERROR_OK;
  56. }
  57. static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
  58. {
  59. if (etb->cur_scan_chain != new_scan_chain) {
  60. struct scan_field field;
  61. field.num_bits = 5;
  62. void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
  63. field.out_value = t;
  64. buf_set_u32(t, 0, field.num_bits, new_scan_chain);
  65. field.in_value = NULL;
  66. /* select INTEST instruction */
  67. etb_set_instr(etb, 0x2);
  68. jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
  69. etb->cur_scan_chain = new_scan_chain;
  70. free(t);
  71. }
  72. return ERROR_OK;
  73. }
  74. static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
  75. static int etb_set_reg_w_exec(struct reg *, uint8_t *);
  76. static int etb_read_reg(struct reg *reg)
  77. {
  78. return etb_read_reg_w_check(reg, NULL, NULL);
  79. }
  80. static int etb_get_reg(struct reg *reg)
  81. {
  82. int retval;
  83. retval = etb_read_reg(reg);
  84. if (retval != ERROR_OK) {
  85. LOG_ERROR("BUG: error scheduling ETB register read");
  86. return retval;
  87. }
  88. retval = jtag_execute_queue();
  89. if (retval != ERROR_OK) {
  90. LOG_ERROR("ETB register read failed");
  91. return retval;
  92. }
  93. return ERROR_OK;
  94. }
  95. static const struct reg_arch_type etb_reg_type = {
  96. .get = etb_get_reg,
  97. .set = etb_set_reg_w_exec,
  98. };
  99. struct reg_cache *etb_build_reg_cache(struct etb *etb)
  100. {
  101. struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
  102. struct reg *reg_list = NULL;
  103. struct etb_reg *arch_info = NULL;
  104. int num_regs = 9;
  105. int i;
  106. /* the actual registers are kept in two arrays */
  107. reg_list = calloc(num_regs, sizeof(struct reg));
  108. arch_info = calloc(num_regs, sizeof(struct etb_reg));
  109. /* fill in values for the reg cache */
  110. reg_cache->name = "etb registers";
  111. reg_cache->next = NULL;
  112. reg_cache->reg_list = reg_list;
  113. reg_cache->num_regs = num_regs;
  114. /* set up registers */
  115. for (i = 0; i < num_regs; i++) {
  116. reg_list[i].name = etb_reg_list[i];
  117. reg_list[i].size = 32;
  118. reg_list[i].dirty = 0;
  119. reg_list[i].valid = 0;
  120. reg_list[i].value = calloc(1, 4);
  121. reg_list[i].arch_info = &arch_info[i];
  122. reg_list[i].type = &etb_reg_type;
  123. reg_list[i].size = 32;
  124. arch_info[i].addr = i;
  125. arch_info[i].etb = etb;
  126. }
  127. return reg_cache;
  128. }
  129. static void etb_getbuf(jtag_callback_data_t arg)
  130. {
  131. uint8_t *in = (uint8_t *)arg;
  132. *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
  133. }
  134. static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
  135. {
  136. struct scan_field fields[3];
  137. int i;
  138. etb_scann(etb, 0x0);
  139. etb_set_instr(etb, 0xc);
  140. fields[0].num_bits = 32;
  141. fields[0].out_value = NULL;
  142. fields[0].in_value = NULL;
  143. fields[1].num_bits = 7;
  144. uint8_t temp1;
  145. fields[1].out_value = &temp1;
  146. buf_set_u32(&temp1, 0, 7, 4);
  147. fields[1].in_value = NULL;
  148. fields[2].num_bits = 1;
  149. uint8_t temp2;
  150. fields[2].out_value = &temp2;
  151. buf_set_u32(&temp2, 0, 1, 0);
  152. fields[2].in_value = NULL;
  153. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  154. for (i = 0; i < num_frames; i++) {
  155. /* ensure nR/W remains set to read */
  156. buf_set_u32(&temp2, 0, 1, 0);
  157. /* address remains set to 0x4 (RAM data) until we read the last frame */
  158. if (i < num_frames - 1)
  159. buf_set_u32(&temp1, 0, 7, 4);
  160. else
  161. buf_set_u32(&temp1, 0, 7, 0);
  162. fields[0].in_value = (uint8_t *)(data + i);
  163. jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
  164. jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
  165. }
  166. jtag_execute_queue();
  167. return ERROR_OK;
  168. }
  169. static int etb_read_reg_w_check(struct reg *reg,
  170. uint8_t *check_value, uint8_t *check_mask)
  171. {
  172. struct etb_reg *etb_reg = reg->arch_info;
  173. uint8_t reg_addr = etb_reg->addr & 0x7f;
  174. struct scan_field fields[3];
  175. LOG_DEBUG("%i", (int)(etb_reg->addr));
  176. etb_scann(etb_reg->etb, 0x0);
  177. etb_set_instr(etb_reg->etb, 0xc);
  178. fields[0].num_bits = 32;
  179. fields[0].out_value = reg->value;
  180. fields[0].in_value = NULL;
  181. fields[0].check_value = NULL;
  182. fields[0].check_mask = NULL;
  183. fields[1].num_bits = 7;
  184. uint8_t temp1;
  185. fields[1].out_value = &temp1;
  186. buf_set_u32(&temp1, 0, 7, reg_addr);
  187. fields[1].in_value = NULL;
  188. fields[1].check_value = NULL;
  189. fields[1].check_mask = NULL;
  190. fields[2].num_bits = 1;
  191. uint8_t temp2;
  192. fields[2].out_value = &temp2;
  193. buf_set_u32(&temp2, 0, 1, 0);
  194. fields[2].in_value = NULL;
  195. fields[2].check_value = NULL;
  196. fields[2].check_mask = NULL;
  197. jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  198. /* read the identification register in the second run, to make sure we
  199. * don't read the ETB data register twice, skipping every second entry
  200. */
  201. buf_set_u32(&temp1, 0, 7, 0x0);
  202. fields[0].in_value = reg->value;
  203. fields[0].check_value = check_value;
  204. fields[0].check_mask = check_mask;
  205. jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  206. return ERROR_OK;
  207. }
  208. static int etb_write_reg(struct reg *, uint32_t);
  209. static int etb_set_reg(struct reg *reg, uint32_t value)
  210. {
  211. int retval;
  212. retval = etb_write_reg(reg, value);
  213. if (retval != ERROR_OK) {
  214. LOG_ERROR("BUG: error scheduling ETB register write");
  215. return retval;
  216. }
  217. buf_set_u32(reg->value, 0, reg->size, value);
  218. reg->valid = 1;
  219. reg->dirty = 0;
  220. return ERROR_OK;
  221. }
  222. static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
  223. {
  224. int retval;
  225. etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
  226. retval = jtag_execute_queue();
  227. if (retval != ERROR_OK) {
  228. LOG_ERROR("ETB: register write failed");
  229. return retval;
  230. }
  231. return ERROR_OK;
  232. }
  233. static int etb_write_reg(struct reg *reg, uint32_t value)
  234. {
  235. struct etb_reg *etb_reg = reg->arch_info;
  236. uint8_t reg_addr = etb_reg->addr & 0x7f;
  237. struct scan_field fields[3];
  238. LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
  239. etb_scann(etb_reg->etb, 0x0);
  240. etb_set_instr(etb_reg->etb, 0xc);
  241. fields[0].num_bits = 32;
  242. uint8_t temp0[4];
  243. fields[0].out_value = temp0;
  244. buf_set_u32(&temp0, 0, 32, value);
  245. fields[0].in_value = NULL;
  246. fields[1].num_bits = 7;
  247. uint8_t temp1;
  248. fields[1].out_value = &temp1;
  249. buf_set_u32(&temp1, 0, 7, reg_addr);
  250. fields[1].in_value = NULL;
  251. fields[2].num_bits = 1;
  252. uint8_t temp2;
  253. fields[2].out_value = &temp2;
  254. buf_set_u32(&temp2, 0, 1, 1);
  255. fields[2].in_value = NULL;
  256. jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
  257. return ERROR_OK;
  258. }
  259. COMMAND_HANDLER(handle_etb_config_command)
  260. {
  261. struct target *target;
  262. struct jtag_tap *tap;
  263. struct arm *arm;
  264. if (CMD_ARGC != 2)
  265. return ERROR_COMMAND_SYNTAX_ERROR;
  266. target = get_target(CMD_ARGV[0]);
  267. if (!target) {
  268. LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
  269. return ERROR_FAIL;
  270. }
  271. arm = target_to_arm(target);
  272. if (!is_arm(arm)) {
  273. command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
  274. return ERROR_FAIL;
  275. }
  276. tap = jtag_tap_by_string(CMD_ARGV[1]);
  277. if (tap == NULL) {
  278. command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
  279. return ERROR_FAIL;
  280. }
  281. if (arm->etm) {
  282. struct etb *etb = malloc(sizeof(struct etb));
  283. arm->etm->capture_driver_priv = etb;
  284. etb->tap = tap;
  285. etb->cur_scan_chain = 0xffffffff;
  286. etb->reg_cache = NULL;
  287. etb->ram_width = 0;
  288. etb->ram_depth = 0;
  289. } else {
  290. LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
  291. return ERROR_FAIL;
  292. }
  293. return ERROR_OK;
  294. }
  295. COMMAND_HANDLER(handle_etb_trigger_percent_command)
  296. {
  297. struct target *target;
  298. struct arm *arm;
  299. struct etm_context *etm;
  300. struct etb *etb;
  301. target = get_current_target(CMD_CTX);
  302. arm = target_to_arm(target);
  303. if (!is_arm(arm)) {
  304. command_print(CMD_CTX, "ETB: current target isn't an ARM");
  305. return ERROR_FAIL;
  306. }
  307. etm = arm->etm;
  308. if (!etm) {
  309. command_print(CMD_CTX, "ETB: target has no ETM configured");
  310. return ERROR_FAIL;
  311. }
  312. if (etm->capture_driver != &etb_capture_driver) {
  313. command_print(CMD_CTX, "ETB: target not using ETB");
  314. return ERROR_FAIL;
  315. }
  316. etb = arm->etm->capture_driver_priv;
  317. if (CMD_ARGC > 0) {
  318. uint32_t new_value;
  319. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
  320. if ((new_value < 2) || (new_value > 100))
  321. command_print(CMD_CTX,
  322. "valid percentages are 2%% to 100%%");
  323. else
  324. etb->trigger_percent = (unsigned) new_value;
  325. }
  326. command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
  327. etb->trigger_percent);
  328. return ERROR_OK;
  329. }
  330. static const struct command_registration etb_config_command_handlers[] = {
  331. {
  332. /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
  333. * possibly over SWD, not through separate TAPs...
  334. */
  335. .name = "config",
  336. .handler = handle_etb_config_command,
  337. .mode = COMMAND_CONFIG,
  338. .help = "Associate ETB with target and JTAG TAP.",
  339. .usage = "target tap",
  340. },
  341. {
  342. .name = "trigger_percent",
  343. .handler = handle_etb_trigger_percent_command,
  344. .mode = COMMAND_EXEC,
  345. .help = "Set percent of trace buffer to be filled "
  346. "after the trigger occurs (2..100).",
  347. .usage = "[percent]",
  348. },
  349. COMMAND_REGISTRATION_DONE
  350. };
  351. static const struct command_registration etb_command_handlers[] = {
  352. {
  353. .name = "etb",
  354. .mode = COMMAND_ANY,
  355. .help = "Embedded Trace Buffer command group",
  356. .chain = etb_config_command_handlers,
  357. },
  358. COMMAND_REGISTRATION_DONE
  359. };
  360. static int etb_init(struct etm_context *etm_ctx)
  361. {
  362. struct etb *etb = etm_ctx->capture_driver_priv;
  363. etb->etm_ctx = etm_ctx;
  364. /* identify ETB RAM depth and width */
  365. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
  366. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
  367. jtag_execute_queue();
  368. etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
  369. etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
  370. etb->trigger_percent = 50;
  371. return ERROR_OK;
  372. }
  373. static trace_status_t etb_status(struct etm_context *etm_ctx)
  374. {
  375. struct etb *etb = etm_ctx->capture_driver_priv;
  376. struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
  377. struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
  378. trace_status_t retval = 0;
  379. int etb_timeout = 100;
  380. etb->etm_ctx = etm_ctx;
  381. /* read control and status registers */
  382. etb_read_reg(control);
  383. etb_read_reg(status);
  384. jtag_execute_queue();
  385. /* See if it's (still) active */
  386. retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
  387. /* check Full bit to identify wraparound/overflow */
  388. if (buf_get_u32(status->value, 0, 1) == 1)
  389. retval |= TRACE_OVERFLOWED;
  390. /* check Triggered bit to identify trigger condition */
  391. if (buf_get_u32(status->value, 1, 1) == 1)
  392. retval |= TRACE_TRIGGERED;
  393. /* check AcqComp to see if trigger counter dropped to zero */
  394. if (buf_get_u32(status->value, 2, 1) == 1) {
  395. /* wait for DFEmpty */
  396. while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
  397. etb_get_reg(status);
  398. if (etb_timeout == 0)
  399. LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
  400. (unsigned) buf_get_u32(status->value, 0, 4));
  401. if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
  402. LOG_WARNING("ETB: trace complete without triggering?");
  403. retval |= TRACE_COMPLETED;
  404. }
  405. /* NOTE: using a trigger is optional; and at least ETB11 has a mode
  406. * where it can ignore the trigger counter.
  407. */
  408. /* update recorded state */
  409. etm_ctx->capture_status = retval;
  410. return retval;
  411. }
  412. static int etb_read_trace(struct etm_context *etm_ctx)
  413. {
  414. struct etb *etb = etm_ctx->capture_driver_priv;
  415. int first_frame = 0;
  416. int num_frames = etb->ram_depth;
  417. uint32_t *trace_data = NULL;
  418. int i, j;
  419. etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
  420. etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
  421. jtag_execute_queue();
  422. /* check if we overflowed, and adjust first frame of the trace accordingly
  423. * if we didn't overflow, read only up to the frame that would be written next,
  424. * i.e. don't read invalid entries
  425. */
  426. if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
  427. first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
  428. 0,
  429. 32);
  430. else
  431. num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
  432. 0,
  433. 32);
  434. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
  435. /* read data into temporary array for unpacking */
  436. trace_data = malloc(sizeof(uint32_t) * num_frames);
  437. etb_read_ram(etb, trace_data, num_frames);
  438. if (etm_ctx->trace_depth > 0)
  439. free(etm_ctx->trace_data);
  440. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
  441. etm_ctx->trace_depth = num_frames * 3;
  442. else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
  443. etm_ctx->trace_depth = num_frames * 2;
  444. else
  445. etm_ctx->trace_depth = num_frames;
  446. etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
  447. for (i = 0, j = 0; i < num_frames; i++) {
  448. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT) {
  449. /* trace word j */
  450. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  451. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
  452. etm_ctx->trace_data[j].flags = 0;
  453. if ((trace_data[i] & 0x80) >> 7)
  454. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  455. if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
  456. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
  457. 0x7;
  458. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  459. }
  460. /* trace word j + 1 */
  461. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
  462. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
  463. etm_ctx->trace_data[j + 1].flags = 0;
  464. if ((trace_data[i] & 0x8000) >> 15)
  465. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  466. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
  467. etm_ctx->trace_data[j +
  468. 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  469. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  470. }
  471. /* trace word j + 2 */
  472. etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
  473. etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
  474. etm_ctx->trace_data[j + 2].flags = 0;
  475. if ((trace_data[i] & 0x800000) >> 23)
  476. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
  477. if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR) {
  478. etm_ctx->trace_data[j +
  479. 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
  480. etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
  481. }
  482. j += 3;
  483. } else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT) {
  484. /* trace word j */
  485. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  486. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
  487. etm_ctx->trace_data[j].flags = 0;
  488. if ((trace_data[i] & 0x800) >> 11)
  489. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  490. if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
  491. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
  492. 0x7;
  493. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  494. }
  495. /* trace word j + 1 */
  496. etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
  497. etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
  498. etm_ctx->trace_data[j + 1].flags = 0;
  499. if ((trace_data[i] & 0x800000) >> 23)
  500. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
  501. if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
  502. etm_ctx->trace_data[j +
  503. 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
  504. etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
  505. }
  506. j += 2;
  507. } else {
  508. /* trace word j */
  509. etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
  510. etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
  511. etm_ctx->trace_data[j].flags = 0;
  512. if ((trace_data[i] & 0x80000) >> 19)
  513. etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
  514. if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
  515. etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
  516. 0x7;
  517. etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
  518. }
  519. j += 1;
  520. }
  521. }
  522. free(trace_data);
  523. return ERROR_OK;
  524. }
  525. static int etb_start_capture(struct etm_context *etm_ctx)
  526. {
  527. struct etb *etb = etm_ctx->capture_driver_priv;
  528. uint32_t etb_ctrl_value = 0x1;
  529. uint32_t trigger_count;
  530. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED) {
  531. if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT) {
  532. LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
  533. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  534. }
  535. etb_ctrl_value |= 0x2;
  536. }
  537. if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
  538. LOG_ERROR("ETB: can't run in multiplexed mode");
  539. return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
  540. }
  541. trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
  542. etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
  543. etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
  544. etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
  545. jtag_execute_queue();
  546. /* we're starting a new trace, initialize capture status */
  547. etm_ctx->capture_status = TRACE_RUNNING;
  548. return ERROR_OK;
  549. }
  550. static int etb_stop_capture(struct etm_context *etm_ctx)
  551. {
  552. struct etb *etb = etm_ctx->capture_driver_priv;
  553. struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
  554. etb_write_reg(etb_ctrl_reg, 0x0);
  555. jtag_execute_queue();
  556. /* trace stopped, just clear running flag, but preserve others */
  557. etm_ctx->capture_status &= ~TRACE_RUNNING;
  558. return ERROR_OK;
  559. }
  560. struct etm_capture_driver etb_capture_driver = {
  561. .name = "etb",
  562. .commands = etb_command_handlers,
  563. .init = etb_init,
  564. .status = etb_status,
  565. .start_capture = etb_start_capture,
  566. .stop_capture = etb_stop_capture,
  567. .read_trace = etb_read_trace,
  568. };