You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3731 lines
102 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  23. ***************************************************************************/
  24. #ifdef HAVE_CONFIG_H
  25. #include "config.h"
  26. #endif
  27. #include "breakpoints.h"
  28. #include "xscale.h"
  29. #include "target_type.h"
  30. #include "arm_jtag.h"
  31. #include "arm_simulator.h"
  32. #include "arm_disassembler.h"
  33. #include <helper/time_support.h>
  34. #include "register.h"
  35. #include "image.h"
  36. #include "arm_opcodes.h"
  37. #include "armv4_5.h"
  38. /*
  39. * Important XScale documents available as of October 2009 include:
  40. *
  41. * Intel XScale® Core Developer’s Manual, January 2004
  42. * Order Number: 273473-002
  43. * This has a chapter detailing debug facilities, and punts some
  44. * details to chip-specific microarchitecture documents.
  45. *
  46. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  47. * Document Number: 273539-005
  48. * Less detailed than the developer's manual, but summarizes those
  49. * missing details (for most XScales) and gives LOTS of notes about
  50. * debugger/handler interaction issues. Presents a simpler reset
  51. * and load-handler sequence than the arch doc. (Note, OpenOCD
  52. * doesn't currently support "Hot-Debug" as defined there.)
  53. *
  54. * Chip-specific microarchitecture documents may also be useful.
  55. */
  56. /* forward declarations */
  57. static int xscale_resume(struct target *, int current,
  58. target_addr_t address, int handle_breakpoints, int debug_execution);
  59. static int xscale_debug_entry(struct target *);
  60. static int xscale_restore_banked(struct target *);
  61. static int xscale_get_reg(struct reg *reg);
  62. static int xscale_set_reg(struct reg *reg, uint8_t *buf);
  63. static int xscale_set_breakpoint(struct target *, struct breakpoint *);
  64. static int xscale_set_watchpoint(struct target *, struct watchpoint *);
  65. static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
  66. static int xscale_read_trace(struct target *);
  67. /* This XScale "debug handler" is loaded into the processor's
  68. * mini-ICache, which is 2K of code writable only via JTAG.
  69. */
  70. static const uint8_t xscale_debug_handler[] = {
  71. #include "../../contrib/loaders/debug/xscale/debug_handler.inc"
  72. };
  73. static const char *const xscale_reg_list[] = {
  74. "XSCALE_MAINID", /* 0 */
  75. "XSCALE_CACHETYPE",
  76. "XSCALE_CTRL",
  77. "XSCALE_AUXCTRL",
  78. "XSCALE_TTB",
  79. "XSCALE_DAC",
  80. "XSCALE_FSR",
  81. "XSCALE_FAR",
  82. "XSCALE_PID",
  83. "XSCALE_CPACCESS",
  84. "XSCALE_IBCR0", /* 10 */
  85. "XSCALE_IBCR1",
  86. "XSCALE_DBR0",
  87. "XSCALE_DBR1",
  88. "XSCALE_DBCON",
  89. "XSCALE_TBREG",
  90. "XSCALE_CHKPT0",
  91. "XSCALE_CHKPT1",
  92. "XSCALE_DCSR",
  93. "XSCALE_TX",
  94. "XSCALE_RX", /* 20 */
  95. "XSCALE_TXRXCTRL",
  96. };
  97. static const struct xscale_reg xscale_reg_arch_info[] = {
  98. {XSCALE_MAINID, NULL},
  99. {XSCALE_CACHETYPE, NULL},
  100. {XSCALE_CTRL, NULL},
  101. {XSCALE_AUXCTRL, NULL},
  102. {XSCALE_TTB, NULL},
  103. {XSCALE_DAC, NULL},
  104. {XSCALE_FSR, NULL},
  105. {XSCALE_FAR, NULL},
  106. {XSCALE_PID, NULL},
  107. {XSCALE_CPACCESS, NULL},
  108. {XSCALE_IBCR0, NULL},
  109. {XSCALE_IBCR1, NULL},
  110. {XSCALE_DBR0, NULL},
  111. {XSCALE_DBR1, NULL},
  112. {XSCALE_DBCON, NULL},
  113. {XSCALE_TBREG, NULL},
  114. {XSCALE_CHKPT0, NULL},
  115. {XSCALE_CHKPT1, NULL},
  116. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  117. {-1, NULL}, /* TX accessed via JTAG */
  118. {-1, NULL}, /* RX accessed via JTAG */
  119. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  120. };
  121. /* convenience wrapper to access XScale specific registers */
  122. static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
  123. {
  124. uint8_t buf[4];
  125. buf_set_u32(buf, 0, 32, value);
  126. return xscale_set_reg(reg, buf);
  127. }
  128. static const char xscale_not[] = "target is not an XScale";
  129. static int xscale_verify_pointer(struct command_context *cmd_ctx,
  130. struct xscale_common *xscale)
  131. {
  132. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  133. command_print(cmd_ctx, xscale_not);
  134. return ERROR_TARGET_INVALID;
  135. }
  136. return ERROR_OK;
  137. }
  138. static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
  139. {
  140. assert(tap != NULL);
  141. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
  142. struct scan_field field;
  143. uint8_t scratch[4];
  144. memset(&field, 0, sizeof field);
  145. field.num_bits = tap->ir_length;
  146. field.out_value = scratch;
  147. buf_set_u32(scratch, 0, field.num_bits, new_instr);
  148. jtag_add_ir_scan(tap, &field, end_state);
  149. }
  150. return ERROR_OK;
  151. }
  152. static int xscale_read_dcsr(struct target *target)
  153. {
  154. struct xscale_common *xscale = target_to_xscale(target);
  155. int retval;
  156. struct scan_field fields[3];
  157. uint8_t field0 = 0x0;
  158. uint8_t field0_check_value = 0x2;
  159. uint8_t field0_check_mask = 0x7;
  160. uint8_t field2 = 0x0;
  161. uint8_t field2_check_value = 0x0;
  162. uint8_t field2_check_mask = 0x1;
  163. xscale_jtag_set_instr(target->tap,
  164. XSCALE_SELDCSR << xscale->xscale_variant,
  165. TAP_DRPAUSE);
  166. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  167. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  168. memset(&fields, 0, sizeof fields);
  169. fields[0].num_bits = 3;
  170. fields[0].out_value = &field0;
  171. uint8_t tmp;
  172. fields[0].in_value = &tmp;
  173. fields[1].num_bits = 32;
  174. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  175. fields[2].num_bits = 1;
  176. fields[2].out_value = &field2;
  177. uint8_t tmp2;
  178. fields[2].in_value = &tmp2;
  179. jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
  180. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  181. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  182. retval = jtag_execute_queue();
  183. if (retval != ERROR_OK) {
  184. LOG_ERROR("JTAG error while reading DCSR");
  185. return retval;
  186. }
  187. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  188. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  189. /* write the register with the value we just read
  190. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  191. */
  192. field0_check_mask = 0x1;
  193. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  194. fields[1].in_value = NULL;
  195. jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
  196. /* DANGER!!! this must be here. It will make sure that the arguments
  197. * to jtag_set_check_value() does not go out of scope! */
  198. return jtag_execute_queue();
  199. }
  200. static void xscale_getbuf(jtag_callback_data_t arg)
  201. {
  202. uint8_t *in = (uint8_t *)arg;
  203. *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
  204. }
  205. static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
  206. {
  207. if (num_words == 0)
  208. return ERROR_COMMAND_SYNTAX_ERROR;
  209. struct xscale_common *xscale = target_to_xscale(target);
  210. int retval = ERROR_OK;
  211. tap_state_t path[3];
  212. struct scan_field fields[3];
  213. uint8_t *field0 = malloc(num_words * 1);
  214. uint8_t field0_check_value = 0x2;
  215. uint8_t field0_check_mask = 0x6;
  216. uint32_t *field1 = malloc(num_words * 4);
  217. uint8_t field2_check_value = 0x0;
  218. uint8_t field2_check_mask = 0x1;
  219. int words_done = 0;
  220. int words_scheduled = 0;
  221. int i;
  222. path[0] = TAP_DRSELECT;
  223. path[1] = TAP_DRCAPTURE;
  224. path[2] = TAP_DRSHIFT;
  225. memset(&fields, 0, sizeof fields);
  226. fields[0].num_bits = 3;
  227. uint8_t tmp;
  228. fields[0].in_value = &tmp;
  229. fields[0].check_value = &field0_check_value;
  230. fields[0].check_mask = &field0_check_mask;
  231. fields[1].num_bits = 32;
  232. fields[2].num_bits = 1;
  233. uint8_t tmp2;
  234. fields[2].in_value = &tmp2;
  235. fields[2].check_value = &field2_check_value;
  236. fields[2].check_mask = &field2_check_mask;
  237. xscale_jtag_set_instr(target->tap,
  238. XSCALE_DBGTX << xscale->xscale_variant,
  239. TAP_IDLE);
  240. jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
  241. *could be a no-op */
  242. /* repeat until all words have been collected */
  243. int attempts = 0;
  244. while (words_done < num_words) {
  245. /* schedule reads */
  246. words_scheduled = 0;
  247. for (i = words_done; i < num_words; i++) {
  248. fields[0].in_value = &field0[i];
  249. jtag_add_pathmove(3, path);
  250. fields[1].in_value = (uint8_t *)(field1 + i);
  251. jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
  252. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  253. words_scheduled++;
  254. }
  255. retval = jtag_execute_queue();
  256. if (retval != ERROR_OK) {
  257. LOG_ERROR("JTAG error while receiving data from debug handler");
  258. break;
  259. }
  260. /* examine results */
  261. for (i = words_done; i < num_words; i++) {
  262. if (!(field0[i] & 1)) {
  263. /* move backwards if necessary */
  264. int j;
  265. for (j = i; j < num_words - 1; j++) {
  266. field0[j] = field0[j + 1];
  267. field1[j] = field1[j + 1];
  268. }
  269. words_scheduled--;
  270. }
  271. }
  272. if (words_scheduled == 0) {
  273. if (attempts++ == 1000) {
  274. LOG_ERROR(
  275. "Failed to receiving data from debug handler after 1000 attempts");
  276. retval = ERROR_TARGET_TIMEOUT;
  277. break;
  278. }
  279. }
  280. words_done += words_scheduled;
  281. }
  282. for (i = 0; i < num_words; i++)
  283. *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
  284. free(field1);
  285. return retval;
  286. }
  287. static int xscale_read_tx(struct target *target, int consume)
  288. {
  289. struct xscale_common *xscale = target_to_xscale(target);
  290. tap_state_t path[3];
  291. tap_state_t noconsume_path[6];
  292. int retval;
  293. struct timeval timeout, now;
  294. struct scan_field fields[3];
  295. uint8_t field0_in = 0x0;
  296. uint8_t field0_check_value = 0x2;
  297. uint8_t field0_check_mask = 0x6;
  298. uint8_t field2_check_value = 0x0;
  299. uint8_t field2_check_mask = 0x1;
  300. xscale_jtag_set_instr(target->tap,
  301. XSCALE_DBGTX << xscale->xscale_variant,
  302. TAP_IDLE);
  303. path[0] = TAP_DRSELECT;
  304. path[1] = TAP_DRCAPTURE;
  305. path[2] = TAP_DRSHIFT;
  306. noconsume_path[0] = TAP_DRSELECT;
  307. noconsume_path[1] = TAP_DRCAPTURE;
  308. noconsume_path[2] = TAP_DREXIT1;
  309. noconsume_path[3] = TAP_DRPAUSE;
  310. noconsume_path[4] = TAP_DREXIT2;
  311. noconsume_path[5] = TAP_DRSHIFT;
  312. memset(&fields, 0, sizeof fields);
  313. fields[0].num_bits = 3;
  314. fields[0].in_value = &field0_in;
  315. fields[1].num_bits = 32;
  316. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  317. fields[2].num_bits = 1;
  318. uint8_t tmp;
  319. fields[2].in_value = &tmp;
  320. gettimeofday(&timeout, NULL);
  321. timeval_add_time(&timeout, 1, 0);
  322. for (;; ) {
  323. /* if we want to consume the register content (i.e. clear TX_READY),
  324. * we have to go straight from Capture-DR to Shift-DR
  325. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  326. */
  327. if (consume)
  328. jtag_add_pathmove(3, path);
  329. else
  330. jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
  331. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  332. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  333. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  334. retval = jtag_execute_queue();
  335. if (retval != ERROR_OK) {
  336. LOG_ERROR("JTAG error while reading TX");
  337. return ERROR_TARGET_TIMEOUT;
  338. }
  339. gettimeofday(&now, NULL);
  340. if ((now.tv_sec > timeout.tv_sec) ||
  341. ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
  342. LOG_ERROR("time out reading TX register");
  343. return ERROR_TARGET_TIMEOUT;
  344. }
  345. if (!((!(field0_in & 1)) && consume))
  346. goto done;
  347. if (debug_level >= 3) {
  348. LOG_DEBUG("waiting 100ms");
  349. alive_sleep(100); /* avoid flooding the logs */
  350. } else
  351. keep_alive();
  352. }
  353. done:
  354. if (!(field0_in & 1))
  355. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  356. return ERROR_OK;
  357. }
  358. static int xscale_write_rx(struct target *target)
  359. {
  360. struct xscale_common *xscale = target_to_xscale(target);
  361. int retval;
  362. struct timeval timeout, now;
  363. struct scan_field fields[3];
  364. uint8_t field0_out = 0x0;
  365. uint8_t field0_in = 0x0;
  366. uint8_t field0_check_value = 0x2;
  367. uint8_t field0_check_mask = 0x6;
  368. uint8_t field2 = 0x0;
  369. uint8_t field2_check_value = 0x0;
  370. uint8_t field2_check_mask = 0x1;
  371. xscale_jtag_set_instr(target->tap,
  372. XSCALE_DBGRX << xscale->xscale_variant,
  373. TAP_IDLE);
  374. memset(&fields, 0, sizeof fields);
  375. fields[0].num_bits = 3;
  376. fields[0].out_value = &field0_out;
  377. fields[0].in_value = &field0_in;
  378. fields[1].num_bits = 32;
  379. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  380. fields[2].num_bits = 1;
  381. fields[2].out_value = &field2;
  382. uint8_t tmp;
  383. fields[2].in_value = &tmp;
  384. gettimeofday(&timeout, NULL);
  385. timeval_add_time(&timeout, 1, 0);
  386. /* poll until rx_read is low */
  387. LOG_DEBUG("polling RX");
  388. for (;;) {
  389. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  390. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  391. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  392. retval = jtag_execute_queue();
  393. if (retval != ERROR_OK) {
  394. LOG_ERROR("JTAG error while writing RX");
  395. return retval;
  396. }
  397. gettimeofday(&now, NULL);
  398. if ((now.tv_sec > timeout.tv_sec) ||
  399. ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
  400. LOG_ERROR("time out writing RX register");
  401. return ERROR_TARGET_TIMEOUT;
  402. }
  403. if (!(field0_in & 1))
  404. goto done;
  405. if (debug_level >= 3) {
  406. LOG_DEBUG("waiting 100ms");
  407. alive_sleep(100); /* avoid flooding the logs */
  408. } else
  409. keep_alive();
  410. }
  411. done:
  412. /* set rx_valid */
  413. field2 = 0x1;
  414. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  415. retval = jtag_execute_queue();
  416. if (retval != ERROR_OK) {
  417. LOG_ERROR("JTAG error while writing RX");
  418. return retval;
  419. }
  420. return ERROR_OK;
  421. }
  422. /* send count elements of size byte to the debug handler */
  423. static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
  424. {
  425. struct xscale_common *xscale = target_to_xscale(target);
  426. int retval;
  427. int done_count = 0;
  428. xscale_jtag_set_instr(target->tap,
  429. XSCALE_DBGRX << xscale->xscale_variant,
  430. TAP_IDLE);
  431. static const uint8_t t0;
  432. uint8_t t1[4];
  433. static const uint8_t t2 = 1;
  434. struct scan_field fields[3] = {
  435. { .num_bits = 3, .out_value = &t0 },
  436. { .num_bits = 32, .out_value = t1 },
  437. { .num_bits = 1, .out_value = &t2 },
  438. };
  439. int endianness = target->endianness;
  440. while (done_count++ < count) {
  441. uint32_t t;
  442. switch (size) {
  443. case 4:
  444. if (endianness == TARGET_LITTLE_ENDIAN)
  445. t = le_to_h_u32(buffer);
  446. else
  447. t = be_to_h_u32(buffer);
  448. break;
  449. case 2:
  450. if (endianness == TARGET_LITTLE_ENDIAN)
  451. t = le_to_h_u16(buffer);
  452. else
  453. t = be_to_h_u16(buffer);
  454. break;
  455. case 1:
  456. t = buffer[0];
  457. break;
  458. default:
  459. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  460. return ERROR_COMMAND_SYNTAX_ERROR;
  461. }
  462. buf_set_u32(t1, 0, 32, t);
  463. jtag_add_dr_scan(target->tap,
  464. 3,
  465. fields,
  466. TAP_IDLE);
  467. buffer += size;
  468. }
  469. retval = jtag_execute_queue();
  470. if (retval != ERROR_OK) {
  471. LOG_ERROR("JTAG error while sending data to debug handler");
  472. return retval;
  473. }
  474. return ERROR_OK;
  475. }
  476. static int xscale_send_u32(struct target *target, uint32_t value)
  477. {
  478. struct xscale_common *xscale = target_to_xscale(target);
  479. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  480. return xscale_write_rx(target);
  481. }
  482. static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
  483. {
  484. struct xscale_common *xscale = target_to_xscale(target);
  485. int retval;
  486. struct scan_field fields[3];
  487. uint8_t field0 = 0x0;
  488. uint8_t field0_check_value = 0x2;
  489. uint8_t field0_check_mask = 0x7;
  490. uint8_t field2 = 0x0;
  491. uint8_t field2_check_value = 0x0;
  492. uint8_t field2_check_mask = 0x1;
  493. if (hold_rst != -1)
  494. xscale->hold_rst = hold_rst;
  495. if (ext_dbg_brk != -1)
  496. xscale->external_debug_break = ext_dbg_brk;
  497. xscale_jtag_set_instr(target->tap,
  498. XSCALE_SELDCSR << xscale->xscale_variant,
  499. TAP_IDLE);
  500. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  501. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  502. memset(&fields, 0, sizeof fields);
  503. fields[0].num_bits = 3;
  504. fields[0].out_value = &field0;
  505. uint8_t tmp;
  506. fields[0].in_value = &tmp;
  507. fields[1].num_bits = 32;
  508. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  509. fields[2].num_bits = 1;
  510. fields[2].out_value = &field2;
  511. uint8_t tmp2;
  512. fields[2].in_value = &tmp2;
  513. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  514. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  515. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  516. retval = jtag_execute_queue();
  517. if (retval != ERROR_OK) {
  518. LOG_ERROR("JTAG error while writing DCSR");
  519. return retval;
  520. }
  521. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  522. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  523. return ERROR_OK;
  524. }
  525. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  526. static unsigned int parity(unsigned int v)
  527. {
  528. /* unsigned int ov = v; */
  529. v ^= v >> 16;
  530. v ^= v >> 8;
  531. v ^= v >> 4;
  532. v &= 0xf;
  533. /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
  534. return (0x6996 >> v) & 1;
  535. }
  536. static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
  537. {
  538. struct xscale_common *xscale = target_to_xscale(target);
  539. uint8_t packet[4];
  540. uint8_t cmd;
  541. int word;
  542. struct scan_field fields[2];
  543. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  544. /* LDIC into IR */
  545. xscale_jtag_set_instr(target->tap,
  546. XSCALE_LDIC << xscale->xscale_variant,
  547. TAP_IDLE);
  548. /* CMD is b011 to load a cacheline into the Mini ICache.
  549. * Loading into the main ICache is deprecated, and unused.
  550. * It's followed by three zero bits, and 27 address bits.
  551. */
  552. buf_set_u32(&cmd, 0, 6, 0x3);
  553. /* virtual address of desired cache line */
  554. buf_set_u32(packet, 0, 27, va >> 5);
  555. memset(&fields, 0, sizeof fields);
  556. fields[0].num_bits = 6;
  557. fields[0].out_value = &cmd;
  558. fields[1].num_bits = 27;
  559. fields[1].out_value = packet;
  560. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  561. /* rest of packet is a cacheline: 8 instructions, with parity */
  562. fields[0].num_bits = 32;
  563. fields[0].out_value = packet;
  564. fields[1].num_bits = 1;
  565. fields[1].out_value = &cmd;
  566. for (word = 0; word < 8; word++) {
  567. buf_set_u32(packet, 0, 32, buffer[word]);
  568. uint32_t value;
  569. memcpy(&value, packet, sizeof(uint32_t));
  570. cmd = parity(value);
  571. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  572. }
  573. return jtag_execute_queue();
  574. }
  575. static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
  576. {
  577. struct xscale_common *xscale = target_to_xscale(target);
  578. uint8_t packet[4];
  579. uint8_t cmd;
  580. struct scan_field fields[2];
  581. xscale_jtag_set_instr(target->tap,
  582. XSCALE_LDIC << xscale->xscale_variant,
  583. TAP_IDLE);
  584. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  585. buf_set_u32(&cmd, 0, 6, 0x0);
  586. /* virtual address of desired cache line */
  587. buf_set_u32(packet, 0, 27, va >> 5);
  588. memset(&fields, 0, sizeof fields);
  589. fields[0].num_bits = 6;
  590. fields[0].out_value = &cmd;
  591. fields[1].num_bits = 27;
  592. fields[1].out_value = packet;
  593. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  594. return ERROR_OK;
  595. }
  596. static int xscale_update_vectors(struct target *target)
  597. {
  598. struct xscale_common *xscale = target_to_xscale(target);
  599. int i;
  600. int retval;
  601. uint32_t low_reset_branch, high_reset_branch;
  602. for (i = 1; i < 8; i++) {
  603. /* if there's a static vector specified for this exception, override */
  604. if (xscale->static_high_vectors_set & (1 << i))
  605. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  606. else {
  607. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  608. if (retval == ERROR_TARGET_TIMEOUT)
  609. return retval;
  610. if (retval != ERROR_OK) {
  611. /* Some of these reads will fail as part of normal execution */
  612. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  613. }
  614. }
  615. }
  616. for (i = 1; i < 8; i++) {
  617. if (xscale->static_low_vectors_set & (1 << i))
  618. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  619. else {
  620. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  621. if (retval == ERROR_TARGET_TIMEOUT)
  622. return retval;
  623. if (retval != ERROR_OK) {
  624. /* Some of these reads will fail as part of normal execution */
  625. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  626. }
  627. }
  628. }
  629. /* calculate branches to debug handler */
  630. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  631. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  632. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  633. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  634. /* invalidate and load exception vectors in mini i-cache */
  635. xscale_invalidate_ic_line(target, 0x0);
  636. xscale_invalidate_ic_line(target, 0xffff0000);
  637. xscale_load_ic(target, 0x0, xscale->low_vectors);
  638. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  639. return ERROR_OK;
  640. }
  641. static int xscale_arch_state(struct target *target)
  642. {
  643. struct xscale_common *xscale = target_to_xscale(target);
  644. struct arm *arm = &xscale->arm;
  645. static const char *state[] = {
  646. "disabled", "enabled"
  647. };
  648. static const char *arch_dbg_reason[] = {
  649. "", "\n(processor reset)", "\n(trace buffer full)"
  650. };
  651. if (arm->common_magic != ARM_COMMON_MAGIC) {
  652. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  653. return ERROR_COMMAND_SYNTAX_ERROR;
  654. }
  655. arm_arch_state(target);
  656. LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
  657. state[xscale->armv4_5_mmu.mmu_enabled],
  658. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  659. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  660. arch_dbg_reason[xscale->arch_debug_reason]);
  661. return ERROR_OK;
  662. }
  663. static int xscale_poll(struct target *target)
  664. {
  665. int retval = ERROR_OK;
  666. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
  667. enum target_state previous_state = target->state;
  668. retval = xscale_read_tx(target, 0);
  669. if (retval == ERROR_OK) {
  670. /* there's data to read from the tx register, we entered debug state */
  671. target->state = TARGET_HALTED;
  672. /* process debug entry, fetching current mode regs */
  673. retval = xscale_debug_entry(target);
  674. } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
  675. LOG_USER("error while polling TX register, reset CPU");
  676. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  677. target->state = TARGET_HALTED;
  678. }
  679. /* debug_entry could have overwritten target state (i.e. immediate resume)
  680. * don't signal event handlers in that case
  681. */
  682. if (target->state != TARGET_HALTED)
  683. return ERROR_OK;
  684. /* if target was running, signal that we halted
  685. * otherwise we reentered from debug execution */
  686. if (previous_state == TARGET_RUNNING)
  687. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  688. else
  689. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  690. }
  691. return retval;
  692. }
  693. static int xscale_debug_entry(struct target *target)
  694. {
  695. struct xscale_common *xscale = target_to_xscale(target);
  696. struct arm *arm = &xscale->arm;
  697. uint32_t pc;
  698. uint32_t buffer[10];
  699. unsigned i;
  700. int retval;
  701. uint32_t moe;
  702. /* clear external dbg break (will be written on next DCSR read) */
  703. xscale->external_debug_break = 0;
  704. retval = xscale_read_dcsr(target);
  705. if (retval != ERROR_OK)
  706. return retval;
  707. /* get r0, pc, r1 to r7 and cpsr */
  708. retval = xscale_receive(target, buffer, 10);
  709. if (retval != ERROR_OK)
  710. return retval;
  711. /* move r0 from buffer to register cache */
  712. buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  713. arm->core_cache->reg_list[0].dirty = 1;
  714. arm->core_cache->reg_list[0].valid = 1;
  715. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  716. /* move pc from buffer to register cache */
  717. buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
  718. arm->pc->dirty = 1;
  719. arm->pc->valid = 1;
  720. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  721. /* move data from buffer to register cache */
  722. for (i = 1; i <= 7; i++) {
  723. buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  724. arm->core_cache->reg_list[i].dirty = 1;
  725. arm->core_cache->reg_list[i].valid = 1;
  726. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  727. }
  728. arm_set_cpsr(arm, buffer[9]);
  729. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  730. if (!is_arm_mode(arm->core_mode)) {
  731. target->state = TARGET_UNKNOWN;
  732. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  733. return ERROR_TARGET_FAILURE;
  734. }
  735. LOG_DEBUG("target entered debug state in %s mode",
  736. arm_mode_name(arm->core_mode));
  737. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  738. if (arm->spsr) {
  739. xscale_receive(target, buffer, 8);
  740. buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
  741. arm->spsr->dirty = false;
  742. arm->spsr->valid = true;
  743. } else {
  744. /* r8 to r14, but no spsr */
  745. xscale_receive(target, buffer, 7);
  746. }
  747. /* move data from buffer to right banked register in cache */
  748. for (i = 8; i <= 14; i++) {
  749. struct reg *r = arm_reg_current(arm, i);
  750. buf_set_u32(r->value, 0, 32, buffer[i - 8]);
  751. r->dirty = false;
  752. r->valid = true;
  753. }
  754. /* mark xscale regs invalid to ensure they are retrieved from the
  755. * debug handler if requested */
  756. for (i = 0; i < xscale->reg_cache->num_regs; i++)
  757. xscale->reg_cache->reg_list[i].valid = 0;
  758. /* examine debug reason */
  759. xscale_read_dcsr(target);
  760. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  761. /* stored PC (for calculating fixup) */
  762. pc = buf_get_u32(arm->pc->value, 0, 32);
  763. switch (moe) {
  764. case 0x0: /* Processor reset */
  765. target->debug_reason = DBG_REASON_DBGRQ;
  766. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  767. pc -= 4;
  768. break;
  769. case 0x1: /* Instruction breakpoint hit */
  770. target->debug_reason = DBG_REASON_BREAKPOINT;
  771. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  772. pc -= 4;
  773. break;
  774. case 0x2: /* Data breakpoint hit */
  775. target->debug_reason = DBG_REASON_WATCHPOINT;
  776. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  777. pc -= 4;
  778. break;
  779. case 0x3: /* BKPT instruction executed */
  780. target->debug_reason = DBG_REASON_BREAKPOINT;
  781. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  782. pc -= 4;
  783. break;
  784. case 0x4: /* Ext. debug event */
  785. target->debug_reason = DBG_REASON_DBGRQ;
  786. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  787. pc -= 4;
  788. break;
  789. case 0x5: /* Vector trap occured */
  790. target->debug_reason = DBG_REASON_BREAKPOINT;
  791. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  792. pc -= 4;
  793. break;
  794. case 0x6: /* Trace buffer full break */
  795. target->debug_reason = DBG_REASON_DBGRQ;
  796. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  797. pc -= 4;
  798. break;
  799. case 0x7: /* Reserved (may flag Hot-Debug support) */
  800. default:
  801. LOG_ERROR("Method of Entry is 'Reserved'");
  802. exit(-1);
  803. break;
  804. }
  805. /* apply PC fixup */
  806. buf_set_u32(arm->pc->value, 0, 32, pc);
  807. /* on the first debug entry, identify cache type */
  808. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
  809. uint32_t cache_type_reg;
  810. /* read cp15 cache type register */
  811. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  812. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
  813. 0,
  814. 32);
  815. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  816. }
  817. /* examine MMU and Cache settings
  818. * read cp15 control register */
  819. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  820. xscale->cp15_control_reg =
  821. buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  822. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  823. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
  824. (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  825. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
  826. (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  827. /* tracing enabled, read collected trace data */
  828. if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
  829. xscale_read_trace(target);
  830. /* Resume if entered debug due to buffer fill and we're still collecting
  831. * trace data. Note that a debug exception due to trace buffer full
  832. * can only happen in fill mode. */
  833. if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
  834. if (--xscale->trace.fill_counter > 0)
  835. xscale_resume(target, 1, 0x0, 1, 0);
  836. } else /* entered debug for other reason; reset counter */
  837. xscale->trace.fill_counter = 0;
  838. }
  839. return ERROR_OK;
  840. }
  841. static int xscale_halt(struct target *target)
  842. {
  843. struct xscale_common *xscale = target_to_xscale(target);
  844. LOG_DEBUG("target->state: %s",
  845. target_state_name(target));
  846. if (target->state == TARGET_HALTED) {
  847. LOG_DEBUG("target was already halted");
  848. return ERROR_OK;
  849. } else if (target->state == TARGET_UNKNOWN) {
  850. /* this must not happen for a xscale target */
  851. LOG_ERROR("target was in unknown state when halt was requested");
  852. return ERROR_TARGET_INVALID;
  853. } else if (target->state == TARGET_RESET)
  854. LOG_DEBUG("target->state == TARGET_RESET");
  855. else {
  856. /* assert external dbg break */
  857. xscale->external_debug_break = 1;
  858. xscale_read_dcsr(target);
  859. target->debug_reason = DBG_REASON_DBGRQ;
  860. }
  861. return ERROR_OK;
  862. }
  863. static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
  864. {
  865. struct xscale_common *xscale = target_to_xscale(target);
  866. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  867. int retval;
  868. if (xscale->ibcr0_used) {
  869. struct breakpoint *ibcr0_bp =
  870. breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  871. if (ibcr0_bp)
  872. xscale_unset_breakpoint(target, ibcr0_bp);
  873. else {
  874. LOG_ERROR(
  875. "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  876. exit(-1);
  877. }
  878. }
  879. retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
  880. if (retval != ERROR_OK)
  881. return retval;
  882. return ERROR_OK;
  883. }
  884. static int xscale_disable_single_step(struct target *target)
  885. {
  886. struct xscale_common *xscale = target_to_xscale(target);
  887. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  888. int retval;
  889. retval = xscale_set_reg_u32(ibcr0, 0x0);
  890. if (retval != ERROR_OK)
  891. return retval;
  892. return ERROR_OK;
  893. }
  894. static void xscale_enable_watchpoints(struct target *target)
  895. {
  896. struct watchpoint *watchpoint = target->watchpoints;
  897. while (watchpoint) {
  898. if (watchpoint->set == 0)
  899. xscale_set_watchpoint(target, watchpoint);
  900. watchpoint = watchpoint->next;
  901. }
  902. }
  903. static void xscale_enable_breakpoints(struct target *target)
  904. {
  905. struct breakpoint *breakpoint = target->breakpoints;
  906. /* set any pending breakpoints */
  907. while (breakpoint) {
  908. if (breakpoint->set == 0)
  909. xscale_set_breakpoint(target, breakpoint);
  910. breakpoint = breakpoint->next;
  911. }
  912. }
  913. static void xscale_free_trace_data(struct xscale_common *xscale)
  914. {
  915. struct xscale_trace_data *td = xscale->trace.data;
  916. while (td) {
  917. struct xscale_trace_data *next_td = td->next;
  918. if (td->entries)
  919. free(td->entries);
  920. free(td);
  921. td = next_td;
  922. }
  923. xscale->trace.data = NULL;
  924. }
  925. static int xscale_resume(struct target *target, int current,
  926. target_addr_t address, int handle_breakpoints, int debug_execution)
  927. {
  928. struct xscale_common *xscale = target_to_xscale(target);
  929. struct arm *arm = &xscale->arm;
  930. uint32_t current_pc;
  931. int retval;
  932. int i;
  933. LOG_DEBUG("-");
  934. if (target->state != TARGET_HALTED) {
  935. LOG_WARNING("target not halted");
  936. return ERROR_TARGET_NOT_HALTED;
  937. }
  938. if (!debug_execution)
  939. target_free_all_working_areas(target);
  940. /* update vector tables */
  941. retval = xscale_update_vectors(target);
  942. if (retval != ERROR_OK)
  943. return retval;
  944. /* current = 1: continue on current pc, otherwise continue at <address> */
  945. if (!current)
  946. buf_set_u32(arm->pc->value, 0, 32, address);
  947. current_pc = buf_get_u32(arm->pc->value, 0, 32);
  948. /* if we're at the reset vector, we have to simulate the branch */
  949. if (current_pc == 0x0) {
  950. arm_simulate_step(target, NULL);
  951. current_pc = buf_get_u32(arm->pc->value, 0, 32);
  952. }
  953. /* the front-end may request us not to handle breakpoints */
  954. if (handle_breakpoints) {
  955. struct breakpoint *breakpoint;
  956. breakpoint = breakpoint_find(target,
  957. buf_get_u32(arm->pc->value, 0, 32));
  958. if (breakpoint != NULL) {
  959. uint32_t next_pc;
  960. enum trace_mode saved_trace_mode;
  961. /* there's a breakpoint at the current PC, we have to step over it */
  962. LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT "",
  963. breakpoint->address);
  964. xscale_unset_breakpoint(target, breakpoint);
  965. /* calculate PC of next instruction */
  966. retval = arm_simulate_step(target, &next_pc);
  967. if (retval != ERROR_OK) {
  968. uint32_t current_opcode;
  969. target_read_u32(target, current_pc, &current_opcode);
  970. LOG_ERROR(
  971. "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
  972. current_opcode);
  973. }
  974. LOG_DEBUG("enable single-step");
  975. xscale_enable_single_step(target, next_pc);
  976. /* restore banked registers */
  977. retval = xscale_restore_banked(target);
  978. if (retval != ERROR_OK)
  979. return retval;
  980. /* send resume request */
  981. xscale_send_u32(target, 0x30);
  982. /* send CPSR */
  983. xscale_send_u32(target,
  984. buf_get_u32(arm->cpsr->value, 0, 32));
  985. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  986. buf_get_u32(arm->cpsr->value, 0, 32));
  987. for (i = 7; i >= 0; i--) {
  988. /* send register */
  989. xscale_send_u32(target,
  990. buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  991. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
  992. i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  993. }
  994. /* send PC */
  995. xscale_send_u32(target,
  996. buf_get_u32(arm->pc->value, 0, 32));
  997. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
  998. buf_get_u32(arm->pc->value, 0, 32));
  999. /* disable trace data collection in xscale_debug_entry() */
  1000. saved_trace_mode = xscale->trace.mode;
  1001. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  1002. /* wait for and process debug entry */
  1003. xscale_debug_entry(target);
  1004. /* re-enable trace buffer, if enabled previously */
  1005. xscale->trace.mode = saved_trace_mode;
  1006. LOG_DEBUG("disable single-step");
  1007. xscale_disable_single_step(target);
  1008. LOG_DEBUG("set breakpoint at " TARGET_ADDR_FMT "",
  1009. breakpoint->address);
  1010. xscale_set_breakpoint(target, breakpoint);
  1011. }
  1012. }
  1013. /* enable any pending breakpoints and watchpoints */
  1014. xscale_enable_breakpoints(target);
  1015. xscale_enable_watchpoints(target);
  1016. /* restore banked registers */
  1017. retval = xscale_restore_banked(target);
  1018. if (retval != ERROR_OK)
  1019. return retval;
  1020. /* send resume request (command 0x30 or 0x31)
  1021. * clean the trace buffer if it is to be enabled (0x62) */
  1022. if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
  1023. if (xscale->trace.mode == XSCALE_TRACE_FILL) {
  1024. /* If trace enabled in fill mode and starting collection of new set
  1025. * of buffers, initialize buffer counter and free previous buffers */
  1026. if (xscale->trace.fill_counter == 0) {
  1027. xscale->trace.fill_counter = xscale->trace.buffer_fill;
  1028. xscale_free_trace_data(xscale);
  1029. }
  1030. } else /* wrap mode; free previous buffer */
  1031. xscale_free_trace_data(xscale);
  1032. xscale_send_u32(target, 0x62);
  1033. xscale_send_u32(target, 0x31);
  1034. } else
  1035. xscale_send_u32(target, 0x30);
  1036. /* send CPSR */
  1037. xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
  1038. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1039. buf_get_u32(arm->cpsr->value, 0, 32));
  1040. for (i = 7; i >= 0; i--) {
  1041. /* send register */
  1042. xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  1043. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
  1044. i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  1045. }
  1046. /* send PC */
  1047. xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
  1048. LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
  1049. buf_get_u32(arm->pc->value, 0, 32));
  1050. target->debug_reason = DBG_REASON_NOTHALTED;
  1051. if (!debug_execution) {
  1052. /* registers are now invalid */
  1053. register_cache_invalidate(arm->core_cache);
  1054. target->state = TARGET_RUNNING;
  1055. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1056. } else {
  1057. target->state = TARGET_DEBUG_RUNNING;
  1058. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1059. }
  1060. LOG_DEBUG("target resumed");
  1061. return ERROR_OK;
  1062. }
  1063. static int xscale_step_inner(struct target *target, int current,
  1064. uint32_t address, int handle_breakpoints)
  1065. {
  1066. struct xscale_common *xscale = target_to_xscale(target);
  1067. struct arm *arm = &xscale->arm;
  1068. uint32_t next_pc;
  1069. int retval;
  1070. int i;
  1071. target->debug_reason = DBG_REASON_SINGLESTEP;
  1072. /* calculate PC of next instruction */
  1073. retval = arm_simulate_step(target, &next_pc);
  1074. if (retval != ERROR_OK) {
  1075. uint32_t current_opcode, current_pc;
  1076. current_pc = buf_get_u32(arm->pc->value, 0, 32);
  1077. target_read_u32(target, current_pc, &current_opcode);
  1078. LOG_ERROR(
  1079. "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
  1080. current_opcode);
  1081. return retval;
  1082. }
  1083. LOG_DEBUG("enable single-step");
  1084. retval = xscale_enable_single_step(target, next_pc);
  1085. if (retval != ERROR_OK)
  1086. return retval;
  1087. /* restore banked registers */
  1088. retval = xscale_restore_banked(target);
  1089. if (retval != ERROR_OK)
  1090. return retval;
  1091. /* send resume request (command 0x30 or 0x31)
  1092. * clean the trace buffer if it is to be enabled (0x62) */
  1093. if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
  1094. retval = xscale_send_u32(target, 0x62);
  1095. if (retval != ERROR_OK)
  1096. return retval;
  1097. retval = xscale_send_u32(target, 0x31);
  1098. if (retval != ERROR_OK)
  1099. return retval;
  1100. } else {
  1101. retval = xscale_send_u32(target, 0x30);
  1102. if (retval != ERROR_OK)
  1103. return retval;
  1104. }
  1105. /* send CPSR */
  1106. retval = xscale_send_u32(target,
  1107. buf_get_u32(arm->cpsr->value, 0, 32));
  1108. if (retval != ERROR_OK)
  1109. return retval;
  1110. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1111. buf_get_u32(arm->cpsr->value, 0, 32));
  1112. for (i = 7; i >= 0; i--) {
  1113. /* send register */
  1114. retval = xscale_send_u32(target,
  1115. buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  1116. if (retval != ERROR_OK)
  1117. return retval;
  1118. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
  1119. buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
  1120. }
  1121. /* send PC */
  1122. retval = xscale_send_u32(target,
  1123. buf_get_u32(arm->pc->value, 0, 32));
  1124. if (retval != ERROR_OK)
  1125. return retval;
  1126. LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
  1127. buf_get_u32(arm->pc->value, 0, 32));
  1128. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1129. /* registers are now invalid */
  1130. register_cache_invalidate(arm->core_cache);
  1131. /* wait for and process debug entry */
  1132. retval = xscale_debug_entry(target);
  1133. if (retval != ERROR_OK)
  1134. return retval;
  1135. LOG_DEBUG("disable single-step");
  1136. retval = xscale_disable_single_step(target);
  1137. if (retval != ERROR_OK)
  1138. return retval;
  1139. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1140. return ERROR_OK;
  1141. }
  1142. static int xscale_step(struct target *target, int current,
  1143. target_addr_t address, int handle_breakpoints)
  1144. {
  1145. struct arm *arm = target_to_arm(target);
  1146. struct breakpoint *breakpoint = NULL;
  1147. uint32_t current_pc;
  1148. int retval;
  1149. if (target->state != TARGET_HALTED) {
  1150. LOG_WARNING("target not halted");
  1151. return ERROR_TARGET_NOT_HALTED;
  1152. }
  1153. /* current = 1: continue on current pc, otherwise continue at <address> */
  1154. if (!current)
  1155. buf_set_u32(arm->pc->value, 0, 32, address);
  1156. current_pc = buf_get_u32(arm->pc->value, 0, 32);
  1157. /* if we're at the reset vector, we have to simulate the step */
  1158. if (current_pc == 0x0) {
  1159. retval = arm_simulate_step(target, NULL);
  1160. if (retval != ERROR_OK)
  1161. return retval;
  1162. current_pc = buf_get_u32(arm->pc->value, 0, 32);
  1163. LOG_DEBUG("current pc %" PRIx32, current_pc);
  1164. target->debug_reason = DBG_REASON_SINGLESTEP;
  1165. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1166. return ERROR_OK;
  1167. }
  1168. /* the front-end may request us not to handle breakpoints */
  1169. if (handle_breakpoints)
  1170. breakpoint = breakpoint_find(target,
  1171. buf_get_u32(arm->pc->value, 0, 32));
  1172. if (breakpoint != NULL) {
  1173. retval = xscale_unset_breakpoint(target, breakpoint);
  1174. if (retval != ERROR_OK)
  1175. return retval;
  1176. }
  1177. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1178. if (retval != ERROR_OK)
  1179. return retval;
  1180. if (breakpoint)
  1181. xscale_set_breakpoint(target, breakpoint);
  1182. LOG_DEBUG("target stepped");
  1183. return ERROR_OK;
  1184. }
  1185. static int xscale_assert_reset(struct target *target)
  1186. {
  1187. struct xscale_common *xscale = target_to_xscale(target);
  1188. /* TODO: apply hw reset signal in not examined state */
  1189. if (!(target_was_examined(target))) {
  1190. LOG_WARNING("Reset is not asserted because the target is not examined.");
  1191. LOG_WARNING("Use a reset button or power cycle the target.");
  1192. return ERROR_TARGET_NOT_EXAMINED;
  1193. }
  1194. LOG_DEBUG("target->state: %s",
  1195. target_state_name(target));
  1196. /* assert reset */
  1197. jtag_add_reset(0, 1);
  1198. /* sleep 1ms, to be sure we fulfill any requirements */
  1199. jtag_add_sleep(1000);
  1200. jtag_execute_queue();
  1201. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1202. * end up in T-L-R, which would reset JTAG
  1203. */
  1204. xscale_jtag_set_instr(target->tap,
  1205. XSCALE_SELDCSR << xscale->xscale_variant,
  1206. TAP_IDLE);
  1207. /* set Hold reset, Halt mode and Trap Reset */
  1208. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1209. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1210. xscale_write_dcsr(target, 1, 0);
  1211. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1212. xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
  1213. jtag_execute_queue();
  1214. target->state = TARGET_RESET;
  1215. if (target->reset_halt) {
  1216. int retval = target_halt(target);
  1217. if (retval != ERROR_OK)
  1218. return retval;
  1219. }
  1220. return ERROR_OK;
  1221. }
  1222. static int xscale_deassert_reset(struct target *target)
  1223. {
  1224. struct xscale_common *xscale = target_to_xscale(target);
  1225. struct breakpoint *breakpoint = target->breakpoints;
  1226. LOG_DEBUG("-");
  1227. xscale->ibcr_available = 2;
  1228. xscale->ibcr0_used = 0;
  1229. xscale->ibcr1_used = 0;
  1230. xscale->dbr_available = 2;
  1231. xscale->dbr0_used = 0;
  1232. xscale->dbr1_used = 0;
  1233. /* mark all hardware breakpoints as unset */
  1234. while (breakpoint) {
  1235. if (breakpoint->type == BKPT_HARD)
  1236. breakpoint->set = 0;
  1237. breakpoint = breakpoint->next;
  1238. }
  1239. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  1240. xscale_free_trace_data(xscale);
  1241. register_cache_invalidate(xscale->arm.core_cache);
  1242. /* FIXME mark hardware watchpoints got unset too. Also,
  1243. * at least some of the XScale registers are invalid...
  1244. */
  1245. /*
  1246. * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
  1247. * contents got invalidated. Safer to force that, so writing new
  1248. * contents can't ever fail..
  1249. */
  1250. {
  1251. uint32_t address;
  1252. unsigned buf_cnt;
  1253. const uint8_t *buffer = xscale_debug_handler;
  1254. int retval;
  1255. /* release SRST */
  1256. jtag_add_reset(0, 0);
  1257. /* wait 300ms; 150 and 100ms were not enough */
  1258. jtag_add_sleep(300*1000);
  1259. jtag_add_runtest(2030, TAP_IDLE);
  1260. jtag_execute_queue();
  1261. /* set Hold reset, Halt mode and Trap Reset */
  1262. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1263. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1264. xscale_write_dcsr(target, 1, 0);
  1265. /* Load the debug handler into the mini-icache. Since
  1266. * it's using halt mode (not monitor mode), it runs in
  1267. * "Special Debug State" for access to registers, memory,
  1268. * coprocessors, trace data, etc.
  1269. */
  1270. address = xscale->handler_address;
  1271. for (unsigned binary_size = sizeof xscale_debug_handler;
  1272. binary_size > 0;
  1273. binary_size -= buf_cnt, buffer += buf_cnt) {
  1274. uint32_t cache_line[8];
  1275. unsigned i;
  1276. buf_cnt = binary_size;
  1277. if (buf_cnt > 32)
  1278. buf_cnt = 32;
  1279. for (i = 0; i < buf_cnt; i += 4) {
  1280. /* convert LE buffer to host-endian uint32_t */
  1281. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1282. }
  1283. for (; i < 32; i += 4)
  1284. cache_line[i / 4] = 0xe1a08008;
  1285. /* only load addresses other than the reset vectors */
  1286. if ((address % 0x400) != 0x0) {
  1287. retval = xscale_load_ic(target, address,
  1288. cache_line);
  1289. if (retval != ERROR_OK)
  1290. return retval;
  1291. }
  1292. address += buf_cnt;
  1293. }
  1294. retval = xscale_load_ic(target, 0x0,
  1295. xscale->low_vectors);
  1296. if (retval != ERROR_OK)
  1297. return retval;
  1298. retval = xscale_load_ic(target, 0xffff0000,
  1299. xscale->high_vectors);
  1300. if (retval != ERROR_OK)
  1301. return retval;
  1302. jtag_add_runtest(30, TAP_IDLE);
  1303. jtag_add_sleep(100000);
  1304. /* set Hold reset, Halt mode and Trap Reset */
  1305. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1306. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1307. xscale_write_dcsr(target, 1, 0);
  1308. /* clear Hold reset to let the target run (should enter debug handler) */
  1309. xscale_write_dcsr(target, 0, 1);
  1310. target->state = TARGET_RUNNING;
  1311. if (!target->reset_halt) {
  1312. jtag_add_sleep(10000);
  1313. /* we should have entered debug now */
  1314. xscale_debug_entry(target);
  1315. target->state = TARGET_HALTED;
  1316. /* resume the target */
  1317. xscale_resume(target, 1, 0x0, 1, 0);
  1318. }
  1319. }
  1320. return ERROR_OK;
  1321. }
  1322. static int xscale_read_core_reg(struct target *target, struct reg *r,
  1323. int num, enum arm_mode mode)
  1324. {
  1325. /** \todo add debug handler support for core register reads */
  1326. LOG_ERROR("not implemented");
  1327. return ERROR_OK;
  1328. }
  1329. static int xscale_write_core_reg(struct target *target, struct reg *r,
  1330. int num, enum arm_mode mode, uint8_t *value)
  1331. {
  1332. /** \todo add debug handler support for core register writes */
  1333. LOG_ERROR("not implemented");
  1334. return ERROR_OK;
  1335. }
  1336. static int xscale_full_context(struct target *target)
  1337. {
  1338. struct arm *arm = target_to_arm(target);
  1339. uint32_t *buffer;
  1340. int i, j;
  1341. LOG_DEBUG("-");
  1342. if (target->state != TARGET_HALTED) {
  1343. LOG_WARNING("target not halted");
  1344. return ERROR_TARGET_NOT_HALTED;
  1345. }
  1346. buffer = malloc(4 * 8);
  1347. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1348. * we can't enter User mode on an XScale (unpredictable),
  1349. * but User shares registers with SYS
  1350. */
  1351. for (i = 1; i < 7; i++) {
  1352. enum arm_mode mode = armv4_5_number_to_mode(i);
  1353. bool valid = true;
  1354. struct reg *r;
  1355. if (mode == ARM_MODE_USR)
  1356. continue;
  1357. /* check if there are invalid registers in the current mode
  1358. */
  1359. for (j = 0; valid && j <= 16; j++) {
  1360. if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1361. mode, j).valid)
  1362. valid = false;
  1363. }
  1364. if (valid)
  1365. continue;
  1366. /* request banked registers */
  1367. xscale_send_u32(target, 0x0);
  1368. /* send CPSR for desired bank mode */
  1369. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1370. /* get banked registers: r8 to r14; and SPSR
  1371. * except in USR/SYS mode
  1372. */
  1373. if (mode != ARM_MODE_SYS) {
  1374. /* SPSR */
  1375. r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1376. mode, 16);
  1377. xscale_receive(target, buffer, 8);
  1378. buf_set_u32(r->value, 0, 32, buffer[7]);
  1379. r->dirty = false;
  1380. r->valid = true;
  1381. } else
  1382. xscale_receive(target, buffer, 7);
  1383. /* move data from buffer to register cache */
  1384. for (j = 8; j <= 14; j++) {
  1385. r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1386. mode, j);
  1387. buf_set_u32(r->value, 0, 32, buffer[j - 8]);
  1388. r->dirty = false;
  1389. r->valid = true;
  1390. }
  1391. }
  1392. free(buffer);
  1393. return ERROR_OK;
  1394. }
  1395. static int xscale_restore_banked(struct target *target)
  1396. {
  1397. struct arm *arm = target_to_arm(target);
  1398. int i, j;
  1399. if (target->state != TARGET_HALTED) {
  1400. LOG_WARNING("target not halted");
  1401. return ERROR_TARGET_NOT_HALTED;
  1402. }
  1403. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1404. * and check if any banked registers need to be written. Ignore
  1405. * USR mode (number 0) in favor of SYS; we can't enter User mode on
  1406. * an XScale (unpredictable), but they share all registers.
  1407. */
  1408. for (i = 1; i < 7; i++) {
  1409. enum arm_mode mode = armv4_5_number_to_mode(i);
  1410. struct reg *r;
  1411. if (mode == ARM_MODE_USR)
  1412. continue;
  1413. /* check if there are dirty registers in this mode */
  1414. for (j = 8; j <= 14; j++) {
  1415. if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1416. mode, j).dirty)
  1417. goto dirty;
  1418. }
  1419. /* if not USR/SYS, check if the SPSR needs to be written */
  1420. if (mode != ARM_MODE_SYS) {
  1421. if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1422. mode, 16).dirty)
  1423. goto dirty;
  1424. }
  1425. /* there's nothing to flush for this mode */
  1426. continue;
  1427. dirty:
  1428. /* command 0x1: "send banked registers" */
  1429. xscale_send_u32(target, 0x1);
  1430. /* send CPSR for desired mode */
  1431. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1432. /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
  1433. * but this protocol doesn't understand that nuance.
  1434. */
  1435. for (j = 8; j <= 14; j++) {
  1436. r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1437. mode, j);
  1438. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1439. r->dirty = false;
  1440. }
  1441. /* send spsr if not in USR/SYS mode */
  1442. if (mode != ARM_MODE_SYS) {
  1443. r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
  1444. mode, 16);
  1445. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1446. r->dirty = false;
  1447. }
  1448. }
  1449. return ERROR_OK;
  1450. }
  1451. static int xscale_read_memory(struct target *target, target_addr_t address,
  1452. uint32_t size, uint32_t count, uint8_t *buffer)
  1453. {
  1454. struct xscale_common *xscale = target_to_xscale(target);
  1455. uint32_t *buf32;
  1456. uint32_t i;
  1457. int retval;
  1458. LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
  1459. address,
  1460. size,
  1461. count);
  1462. if (target->state != TARGET_HALTED) {
  1463. LOG_WARNING("target not halted");
  1464. return ERROR_TARGET_NOT_HALTED;
  1465. }
  1466. /* sanitize arguments */
  1467. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1468. return ERROR_COMMAND_SYNTAX_ERROR;
  1469. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1470. return ERROR_TARGET_UNALIGNED_ACCESS;
  1471. /* send memory read request (command 0x1n, n: access size) */
  1472. retval = xscale_send_u32(target, 0x10 | size);
  1473. if (retval != ERROR_OK)
  1474. return retval;
  1475. /* send base address for read request */
  1476. retval = xscale_send_u32(target, address);
  1477. if (retval != ERROR_OK)
  1478. return retval;
  1479. /* send number of requested data words */
  1480. retval = xscale_send_u32(target, count);
  1481. if (retval != ERROR_OK)
  1482. return retval;
  1483. /* receive data from target (count times 32-bit words in host endianness) */
  1484. buf32 = malloc(4 * count);
  1485. retval = xscale_receive(target, buf32, count);
  1486. if (retval != ERROR_OK) {
  1487. free(buf32);
  1488. return retval;
  1489. }
  1490. /* extract data from host-endian buffer into byte stream */
  1491. for (i = 0; i < count; i++) {
  1492. switch (size) {
  1493. case 4:
  1494. target_buffer_set_u32(target, buffer, buf32[i]);
  1495. buffer += 4;
  1496. break;
  1497. case 2:
  1498. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1499. buffer += 2;
  1500. break;
  1501. case 1:
  1502. *buffer++ = buf32[i] & 0xff;
  1503. break;
  1504. default:
  1505. LOG_ERROR("invalid read size");
  1506. return ERROR_COMMAND_SYNTAX_ERROR;
  1507. }
  1508. }
  1509. free(buf32);
  1510. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1511. retval = xscale_read_dcsr(target);
  1512. if (retval != ERROR_OK)
  1513. return retval;
  1514. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
  1515. /* clear SA bit */
  1516. retval = xscale_send_u32(target, 0x60);
  1517. if (retval != ERROR_OK)
  1518. return retval;
  1519. return ERROR_TARGET_DATA_ABORT;
  1520. }
  1521. return ERROR_OK;
  1522. }
  1523. static int xscale_read_phys_memory(struct target *target, target_addr_t address,
  1524. uint32_t size, uint32_t count, uint8_t *buffer)
  1525. {
  1526. struct xscale_common *xscale = target_to_xscale(target);
  1527. /* with MMU inactive, there are only physical addresses */
  1528. if (!xscale->armv4_5_mmu.mmu_enabled)
  1529. return xscale_read_memory(target, address, size, count, buffer);
  1530. /** \todo: provide a non-stub implementation of this routine. */
  1531. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1532. target_name(target), __func__);
  1533. return ERROR_FAIL;
  1534. }
  1535. static int xscale_write_memory(struct target *target, target_addr_t address,
  1536. uint32_t size, uint32_t count, const uint8_t *buffer)
  1537. {
  1538. struct xscale_common *xscale = target_to_xscale(target);
  1539. int retval;
  1540. LOG_DEBUG("address: " TARGET_ADDR_FMT ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
  1541. address,
  1542. size,
  1543. count);
  1544. if (target->state != TARGET_HALTED) {
  1545. LOG_WARNING("target not halted");
  1546. return ERROR_TARGET_NOT_HALTED;
  1547. }
  1548. /* sanitize arguments */
  1549. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1550. return ERROR_COMMAND_SYNTAX_ERROR;
  1551. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1552. return ERROR_TARGET_UNALIGNED_ACCESS;
  1553. /* send memory write request (command 0x2n, n: access size) */
  1554. retval = xscale_send_u32(target, 0x20 | size);
  1555. if (retval != ERROR_OK)
  1556. return retval;
  1557. /* send base address for read request */
  1558. retval = xscale_send_u32(target, address);
  1559. if (retval != ERROR_OK)
  1560. return retval;
  1561. /* send number of requested data words to be written*/
  1562. retval = xscale_send_u32(target, count);
  1563. if (retval != ERROR_OK)
  1564. return retval;
  1565. /* extract data from host-endian buffer into byte stream */
  1566. #if 0
  1567. for (i = 0; i < count; i++) {
  1568. switch (size) {
  1569. case 4:
  1570. value = target_buffer_get_u32(target, buffer);
  1571. xscale_send_u32(target, value);
  1572. buffer += 4;
  1573. break;
  1574. case 2:
  1575. value = target_buffer_get_u16(target, buffer);
  1576. xscale_send_u32(target, value);
  1577. buffer += 2;
  1578. break;
  1579. case 1:
  1580. value = *buffer;
  1581. xscale_send_u32(target, value);
  1582. buffer += 1;
  1583. break;
  1584. default:
  1585. LOG_ERROR("should never get here");
  1586. exit(-1);
  1587. }
  1588. }
  1589. #endif
  1590. retval = xscale_send(target, buffer, count, size);
  1591. if (retval != ERROR_OK)
  1592. return retval;
  1593. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1594. retval = xscale_read_dcsr(target);
  1595. if (retval != ERROR_OK)
  1596. return retval;
  1597. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
  1598. /* clear SA bit */
  1599. retval = xscale_send_u32(target, 0x60);
  1600. if (retval != ERROR_OK)
  1601. return retval;
  1602. LOG_ERROR("data abort writing memory");
  1603. return ERROR_TARGET_DATA_ABORT;
  1604. }
  1605. return ERROR_OK;
  1606. }
  1607. static int xscale_write_phys_memory(struct target *target, target_addr_t address,
  1608. uint32_t size, uint32_t count, const uint8_t *buffer)
  1609. {
  1610. struct xscale_common *xscale = target_to_xscale(target);
  1611. /* with MMU inactive, there are only physical addresses */
  1612. if (!xscale->armv4_5_mmu.mmu_enabled)
  1613. return xscale_write_memory(target, address, size, count, buffer);
  1614. /** \todo: provide a non-stub implementation of this routine. */
  1615. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1616. target_name(target), __func__);
  1617. return ERROR_FAIL;
  1618. }
  1619. static int xscale_get_ttb(struct target *target, uint32_t *result)
  1620. {
  1621. struct xscale_common *xscale = target_to_xscale(target);
  1622. uint32_t ttb;
  1623. int retval;
  1624. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1625. if (retval != ERROR_OK)
  1626. return retval;
  1627. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1628. *result = ttb;
  1629. return ERROR_OK;
  1630. }
  1631. static int xscale_disable_mmu_caches(struct target *target, int mmu,
  1632. int d_u_cache, int i_cache)
  1633. {
  1634. struct xscale_common *xscale = target_to_xscale(target);
  1635. uint32_t cp15_control;
  1636. int retval;
  1637. /* read cp15 control register */
  1638. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1639. if (retval != ERROR_OK)
  1640. return retval;
  1641. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1642. if (mmu)
  1643. cp15_control &= ~0x1U;
  1644. if (d_u_cache) {
  1645. /* clean DCache */
  1646. retval = xscale_send_u32(target, 0x50);
  1647. if (retval != ERROR_OK)
  1648. return retval;
  1649. retval = xscale_send_u32(target, xscale->cache_clean_address);
  1650. if (retval != ERROR_OK)
  1651. return retval;
  1652. /* invalidate DCache */
  1653. retval = xscale_send_u32(target, 0x51);
  1654. if (retval != ERROR_OK)
  1655. return retval;
  1656. cp15_control &= ~0x4U;
  1657. }
  1658. if (i_cache) {
  1659. /* invalidate ICache */
  1660. retval = xscale_send_u32(target, 0x52);
  1661. if (retval != ERROR_OK)
  1662. return retval;
  1663. cp15_control &= ~0x1000U;
  1664. }
  1665. /* write new cp15 control register */
  1666. retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1667. if (retval != ERROR_OK)
  1668. return retval;
  1669. /* execute cpwait to ensure outstanding operations complete */
  1670. retval = xscale_send_u32(target, 0x53);
  1671. return retval;
  1672. }
  1673. static int xscale_enable_mmu_caches(struct target *target, int mmu,
  1674. int d_u_cache, int i_cache)
  1675. {
  1676. struct xscale_common *xscale = target_to_xscale(target);
  1677. uint32_t cp15_control;
  1678. int retval;
  1679. /* read cp15 control register */
  1680. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1681. if (retval != ERROR_OK)
  1682. return retval;
  1683. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1684. if (mmu)
  1685. cp15_control |= 0x1U;
  1686. if (d_u_cache)
  1687. cp15_control |= 0x4U;
  1688. if (i_cache)
  1689. cp15_control |= 0x1000U;
  1690. /* write new cp15 control register */
  1691. retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1692. if (retval != ERROR_OK)
  1693. return retval;
  1694. /* execute cpwait to ensure outstanding operations complete */
  1695. retval = xscale_send_u32(target, 0x53);
  1696. return retval;
  1697. }
  1698. static int xscale_set_breakpoint(struct target *target,
  1699. struct breakpoint *breakpoint)
  1700. {
  1701. int retval;
  1702. struct xscale_common *xscale = target_to_xscale(target);
  1703. if (target->state != TARGET_HALTED) {
  1704. LOG_WARNING("target not halted");
  1705. return ERROR_TARGET_NOT_HALTED;
  1706. }
  1707. if (breakpoint->set) {
  1708. LOG_WARNING("breakpoint already set");
  1709. return ERROR_OK;
  1710. }
  1711. if (breakpoint->type == BKPT_HARD) {
  1712. uint32_t value = breakpoint->address | 1;
  1713. if (!xscale->ibcr0_used) {
  1714. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1715. xscale->ibcr0_used = 1;
  1716. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1717. } else if (!xscale->ibcr1_used) {
  1718. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1719. xscale->ibcr1_used = 1;
  1720. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1721. } else {/* bug: availability previously verified in xscale_add_breakpoint() */
  1722. LOG_ERROR("BUG: no hardware comparator available");
  1723. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1724. }
  1725. } else if (breakpoint->type == BKPT_SOFT) {
  1726. if (breakpoint->length == 4) {
  1727. /* keep the original instruction in target endianness */
  1728. retval = target_read_memory(target, breakpoint->address, 4, 1,
  1729. breakpoint->orig_instr);
  1730. if (retval != ERROR_OK)
  1731. return retval;
  1732. /* write the bkpt instruction in target endianness
  1733. *(arm7_9->arm_bkpt is host endian) */
  1734. retval = target_write_u32(target, breakpoint->address,
  1735. xscale->arm_bkpt);
  1736. if (retval != ERROR_OK)
  1737. return retval;
  1738. } else {
  1739. /* keep the original instruction in target endianness */
  1740. retval = target_read_memory(target, breakpoint->address, 2, 1,
  1741. breakpoint->orig_instr);
  1742. if (retval != ERROR_OK)
  1743. return retval;
  1744. /* write the bkpt instruction in target endianness
  1745. *(arm7_9->arm_bkpt is host endian) */
  1746. retval = target_write_u16(target, breakpoint->address,
  1747. xscale->thumb_bkpt);
  1748. if (retval != ERROR_OK)
  1749. return retval;
  1750. }
  1751. breakpoint->set = 1;
  1752. xscale_send_u32(target, 0x50); /* clean dcache */
  1753. xscale_send_u32(target, xscale->cache_clean_address);
  1754. xscale_send_u32(target, 0x51); /* invalidate dcache */
  1755. xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
  1756. }
  1757. return ERROR_OK;
  1758. }
  1759. static int xscale_add_breakpoint(struct target *target,
  1760. struct breakpoint *breakpoint)
  1761. {
  1762. struct xscale_common *xscale = target_to_xscale(target);
  1763. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
  1764. LOG_ERROR("no breakpoint unit available for hardware breakpoint");
  1765. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1766. }
  1767. if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
  1768. LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1769. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1770. }
  1771. if (breakpoint->type == BKPT_HARD)
  1772. xscale->ibcr_available--;
  1773. return xscale_set_breakpoint(target, breakpoint);
  1774. }
  1775. static int xscale_unset_breakpoint(struct target *target,
  1776. struct breakpoint *breakpoint)
  1777. {
  1778. int retval;
  1779. struct xscale_common *xscale = target_to_xscale(target);
  1780. if (target->state != TARGET_HALTED) {
  1781. LOG_WARNING("target not halted");
  1782. return ERROR_TARGET_NOT_HALTED;
  1783. }
  1784. if (!breakpoint->set) {
  1785. LOG_WARNING("breakpoint not set");
  1786. return ERROR_OK;
  1787. }
  1788. if (breakpoint->type == BKPT_HARD) {
  1789. if (breakpoint->set == 1) {
  1790. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1791. xscale->ibcr0_used = 0;
  1792. } else if (breakpoint->set == 2) {
  1793. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1794. xscale->ibcr1_used = 0;
  1795. }
  1796. breakpoint->set = 0;
  1797. } else {
  1798. /* restore original instruction (kept in target endianness) */
  1799. if (breakpoint->length == 4) {
  1800. retval = target_write_memory(target, breakpoint->address, 4, 1,
  1801. breakpoint->orig_instr);
  1802. if (retval != ERROR_OK)
  1803. return retval;
  1804. } else {
  1805. retval = target_write_memory(target, breakpoint->address, 2, 1,
  1806. breakpoint->orig_instr);
  1807. if (retval != ERROR_OK)
  1808. return retval;
  1809. }
  1810. breakpoint->set = 0;
  1811. xscale_send_u32(target, 0x50); /* clean dcache */
  1812. xscale_send_u32(target, xscale->cache_clean_address);
  1813. xscale_send_u32(target, 0x51); /* invalidate dcache */
  1814. xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
  1815. }
  1816. return ERROR_OK;
  1817. }
  1818. static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1819. {
  1820. struct xscale_common *xscale = target_to_xscale(target);
  1821. if (target->state != TARGET_HALTED) {
  1822. LOG_ERROR("target not halted");
  1823. return ERROR_TARGET_NOT_HALTED;
  1824. }
  1825. if (breakpoint->set)
  1826. xscale_unset_breakpoint(target, breakpoint);
  1827. if (breakpoint->type == BKPT_HARD)
  1828. xscale->ibcr_available++;
  1829. return ERROR_OK;
  1830. }
  1831. static int xscale_set_watchpoint(struct target *target,
  1832. struct watchpoint *watchpoint)
  1833. {
  1834. struct xscale_common *xscale = target_to_xscale(target);
  1835. uint32_t enable = 0;
  1836. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1837. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1838. if (target->state != TARGET_HALTED) {
  1839. LOG_ERROR("target not halted");
  1840. return ERROR_TARGET_NOT_HALTED;
  1841. }
  1842. switch (watchpoint->rw) {
  1843. case WPT_READ:
  1844. enable = 0x3;
  1845. break;
  1846. case WPT_ACCESS:
  1847. enable = 0x2;
  1848. break;
  1849. case WPT_WRITE:
  1850. enable = 0x1;
  1851. break;
  1852. default:
  1853. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1854. }
  1855. /* For watchpoint across more than one word, both DBR registers must
  1856. be enlisted, with the second used as a mask. */
  1857. if (watchpoint->length > 4) {
  1858. if (xscale->dbr0_used || xscale->dbr1_used) {
  1859. LOG_ERROR("BUG: sufficient hardware comparators unavailable");
  1860. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1861. }
  1862. /* Write mask value to DBR1, based on the length argument.
  1863. * Address bits ignored by the comparator are those set in mask. */
  1864. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
  1865. watchpoint->length - 1);
  1866. xscale->dbr1_used = 1;
  1867. enable |= 0x100; /* DBCON[M] */
  1868. }
  1869. if (!xscale->dbr0_used) {
  1870. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1871. dbcon_value |= enable;
  1872. xscale_set_reg_u32(dbcon, dbcon_value);
  1873. watchpoint->set = 1;
  1874. xscale->dbr0_used = 1;
  1875. } else if (!xscale->dbr1_used) {
  1876. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1877. dbcon_value |= enable << 2;
  1878. xscale_set_reg_u32(dbcon, dbcon_value);
  1879. watchpoint->set = 2;
  1880. xscale->dbr1_used = 1;
  1881. } else {
  1882. LOG_ERROR("BUG: no hardware comparator available");
  1883. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1884. }
  1885. return ERROR_OK;
  1886. }
  1887. static int xscale_add_watchpoint(struct target *target,
  1888. struct watchpoint *watchpoint)
  1889. {
  1890. struct xscale_common *xscale = target_to_xscale(target);
  1891. if (xscale->dbr_available < 1) {
  1892. LOG_ERROR("no more watchpoint registers available");
  1893. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1894. }
  1895. if (watchpoint->value)
  1896. LOG_WARNING("xscale does not support value, mask arguments; ignoring");
  1897. /* check that length is a power of two */
  1898. for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
  1899. if (len % 2) {
  1900. LOG_ERROR("xscale requires that watchpoint length is a power of two");
  1901. return ERROR_COMMAND_ARGUMENT_INVALID;
  1902. }
  1903. }
  1904. if (watchpoint->length == 4) { /* single word watchpoint */
  1905. xscale->dbr_available--;/* one DBR reg used */
  1906. return ERROR_OK;
  1907. }
  1908. /* watchpoints across multiple words require both DBR registers */
  1909. if (xscale->dbr_available < 2) {
  1910. LOG_ERROR("insufficient watchpoint registers available");
  1911. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1912. }
  1913. if (watchpoint->length > watchpoint->address) {
  1914. LOG_ERROR("xscale does not support watchpoints with length "
  1915. "greater than address");
  1916. return ERROR_COMMAND_ARGUMENT_INVALID;
  1917. }
  1918. xscale->dbr_available = 0;
  1919. return ERROR_OK;
  1920. }
  1921. static int xscale_unset_watchpoint(struct target *target,
  1922. struct watchpoint *watchpoint)
  1923. {
  1924. struct xscale_common *xscale = target_to_xscale(target);
  1925. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1926. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1927. if (target->state != TARGET_HALTED) {
  1928. LOG_WARNING("target not halted");
  1929. return ERROR_TARGET_NOT_HALTED;
  1930. }
  1931. if (!watchpoint->set) {
  1932. LOG_WARNING("breakpoint not set");
  1933. return ERROR_OK;
  1934. }
  1935. if (watchpoint->set == 1) {
  1936. if (watchpoint->length > 4) {
  1937. dbcon_value &= ~0x103; /* clear DBCON[M] as well */
  1938. xscale->dbr1_used = 0; /* DBR1 was used for mask */
  1939. } else
  1940. dbcon_value &= ~0x3;
  1941. xscale_set_reg_u32(dbcon, dbcon_value);
  1942. xscale->dbr0_used = 0;
  1943. } else if (watchpoint->set == 2) {
  1944. dbcon_value &= ~0xc;
  1945. xscale_set_reg_u32(dbcon, dbcon_value);
  1946. xscale->dbr1_used = 0;
  1947. }
  1948. watchpoint->set = 0;
  1949. return ERROR_OK;
  1950. }
  1951. static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1952. {
  1953. struct xscale_common *xscale = target_to_xscale(target);
  1954. if (target->state != TARGET_HALTED) {
  1955. LOG_ERROR("target not halted");
  1956. return ERROR_TARGET_NOT_HALTED;
  1957. }
  1958. if (watchpoint->set)
  1959. xscale_unset_watchpoint(target, watchpoint);
  1960. if (watchpoint->length > 4)
  1961. xscale->dbr_available++;/* both DBR regs now available */
  1962. xscale->dbr_available++;
  1963. return ERROR_OK;
  1964. }
  1965. static int xscale_get_reg(struct reg *reg)
  1966. {
  1967. struct xscale_reg *arch_info = reg->arch_info;
  1968. struct target *target = arch_info->target;
  1969. struct xscale_common *xscale = target_to_xscale(target);
  1970. /* DCSR, TX and RX are accessible via JTAG */
  1971. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  1972. return xscale_read_dcsr(arch_info->target);
  1973. else if (strcmp(reg->name, "XSCALE_TX") == 0) {
  1974. /* 1 = consume register content */
  1975. return xscale_read_tx(arch_info->target, 1);
  1976. } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
  1977. /* can't read from RX register (host -> debug handler) */
  1978. return ERROR_OK;
  1979. } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
  1980. /* can't (explicitly) read from TXRXCTRL register */
  1981. return ERROR_OK;
  1982. } else {/* Other DBG registers have to be transfered by the debug handler
  1983. * send CP read request (command 0x40) */
  1984. xscale_send_u32(target, 0x40);
  1985. /* send CP register number */
  1986. xscale_send_u32(target, arch_info->dbg_handler_number);
  1987. /* read register value */
  1988. xscale_read_tx(target, 1);
  1989. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  1990. reg->dirty = 0;
  1991. reg->valid = 1;
  1992. }
  1993. return ERROR_OK;
  1994. }
  1995. static int xscale_set_reg(struct reg *reg, uint8_t *buf)
  1996. {
  1997. struct xscale_reg *arch_info = reg->arch_info;
  1998. struct target *target = arch_info->target;
  1999. struct xscale_common *xscale = target_to_xscale(target);
  2000. uint32_t value = buf_get_u32(buf, 0, 32);
  2001. /* DCSR, TX and RX are accessible via JTAG */
  2002. if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
  2003. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2004. return xscale_write_dcsr(arch_info->target, -1, -1);
  2005. } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
  2006. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2007. return xscale_write_rx(arch_info->target);
  2008. } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
  2009. /* can't write to TX register (debug-handler -> host) */
  2010. return ERROR_OK;
  2011. } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
  2012. /* can't (explicitly) write to TXRXCTRL register */
  2013. return ERROR_OK;
  2014. } else {/* Other DBG registers have to be transfered by the debug handler
  2015. * send CP write request (command 0x41) */
  2016. xscale_send_u32(target, 0x41);
  2017. /* send CP register number */
  2018. xscale_send_u32(target, arch_info->dbg_handler_number);
  2019. /* send CP register value */
  2020. xscale_send_u32(target, value);
  2021. buf_set_u32(reg->value, 0, 32, value);
  2022. }
  2023. return ERROR_OK;
  2024. }
  2025. static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
  2026. {
  2027. struct xscale_common *xscale = target_to_xscale(target);
  2028. struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2029. struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
  2030. /* send CP write request (command 0x41) */
  2031. xscale_send_u32(target, 0x41);
  2032. /* send CP register number */
  2033. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2034. /* send CP register value */
  2035. xscale_send_u32(target, value);
  2036. buf_set_u32(dcsr->value, 0, 32, value);
  2037. return ERROR_OK;
  2038. }
  2039. static int xscale_read_trace(struct target *target)
  2040. {
  2041. struct xscale_common *xscale = target_to_xscale(target);
  2042. struct arm *arm = &xscale->arm;
  2043. struct xscale_trace_data **trace_data_p;
  2044. /* 258 words from debug handler
  2045. * 256 trace buffer entries
  2046. * 2 checkpoint addresses
  2047. */
  2048. uint32_t trace_buffer[258];
  2049. int is_address[256];
  2050. int i, j;
  2051. unsigned int num_checkpoints = 0;
  2052. if (target->state != TARGET_HALTED) {
  2053. LOG_WARNING("target must be stopped to read trace data");
  2054. return ERROR_TARGET_NOT_HALTED;
  2055. }
  2056. /* send read trace buffer command (command 0x61) */
  2057. xscale_send_u32(target, 0x61);
  2058. /* receive trace buffer content */
  2059. xscale_receive(target, trace_buffer, 258);
  2060. /* parse buffer backwards to identify address entries */
  2061. for (i = 255; i >= 0; i--) {
  2062. /* also count number of checkpointed entries */
  2063. if ((trace_buffer[i] & 0xe0) == 0xc0)
  2064. num_checkpoints++;
  2065. is_address[i] = 0;
  2066. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2067. ((trace_buffer[i] & 0xf0) == 0xd0)) {
  2068. if (i > 0)
  2069. is_address[--i] = 1;
  2070. if (i > 0)
  2071. is_address[--i] = 1;
  2072. if (i > 0)
  2073. is_address[--i] = 1;
  2074. if (i > 0)
  2075. is_address[--i] = 1;
  2076. }
  2077. }
  2078. /* search first non-zero entry that is not part of an address */
  2079. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2080. ;
  2081. if (j == 256) {
  2082. LOG_DEBUG("no trace data collected");
  2083. return ERROR_XSCALE_NO_TRACE_DATA;
  2084. }
  2085. /* account for possible partial address at buffer start (wrap mode only) */
  2086. if (is_address[0]) { /* first entry is address; complete set of 4? */
  2087. i = 1;
  2088. while (i < 4)
  2089. if (!is_address[i++])
  2090. break;
  2091. if (i < 4)
  2092. j += i; /* partial address; can't use it */
  2093. }
  2094. /* if first valid entry is indirect branch, can't use that either (no address) */
  2095. if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
  2096. j++;
  2097. /* walk linked list to terminating entry */
  2098. for (trace_data_p = &xscale->trace.data; *trace_data_p;
  2099. trace_data_p = &(*trace_data_p)->next)
  2100. ;
  2101. *trace_data_p = malloc(sizeof(struct xscale_trace_data));
  2102. (*trace_data_p)->next = NULL;
  2103. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2104. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2105. (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
  2106. (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
  2107. (*trace_data_p)->depth = 256 - j;
  2108. (*trace_data_p)->num_checkpoints = num_checkpoints;
  2109. for (i = j; i < 256; i++) {
  2110. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2111. if (is_address[i])
  2112. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2113. else
  2114. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2115. }
  2116. return ERROR_OK;
  2117. }
  2118. static int xscale_read_instruction(struct target *target, uint32_t pc,
  2119. struct arm_instruction *instruction)
  2120. {
  2121. struct xscale_common *const xscale = target_to_xscale(target);
  2122. int i;
  2123. int section = -1;
  2124. size_t size_read;
  2125. uint32_t opcode;
  2126. int retval;
  2127. if (!xscale->trace.image)
  2128. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2129. /* search for the section the current instruction belongs to */
  2130. for (i = 0; i < xscale->trace.image->num_sections; i++) {
  2131. if ((xscale->trace.image->sections[i].base_address <= pc) &&
  2132. (xscale->trace.image->sections[i].base_address +
  2133. xscale->trace.image->sections[i].size > pc)) {
  2134. section = i;
  2135. break;
  2136. }
  2137. }
  2138. if (section == -1) {
  2139. /* current instruction couldn't be found in the image */
  2140. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2141. }
  2142. if (xscale->trace.core_state == ARM_STATE_ARM) {
  2143. uint8_t buf[4];
  2144. retval = image_read_section(xscale->trace.image, section,
  2145. pc - xscale->trace.image->sections[section].base_address,
  2146. 4, buf, &size_read);
  2147. if (retval != ERROR_OK) {
  2148. LOG_ERROR("error while reading instruction");
  2149. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2150. }
  2151. opcode = target_buffer_get_u32(target, buf);
  2152. arm_evaluate_opcode(opcode, pc, instruction);
  2153. } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
  2154. uint8_t buf[2];
  2155. retval = image_read_section(xscale->trace.image, section,
  2156. pc - xscale->trace.image->sections[section].base_address,
  2157. 2, buf, &size_read);
  2158. if (retval != ERROR_OK) {
  2159. LOG_ERROR("error while reading instruction");
  2160. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2161. }
  2162. opcode = target_buffer_get_u16(target, buf);
  2163. thumb_evaluate_opcode(opcode, pc, instruction);
  2164. } else {
  2165. LOG_ERROR("BUG: unknown core state encountered");
  2166. exit(-1);
  2167. }
  2168. return ERROR_OK;
  2169. }
  2170. /* Extract address encoded into trace data.
  2171. * Write result to address referenced by argument 'target', or 0 if incomplete. */
  2172. static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
  2173. int i, uint32_t *target)
  2174. {
  2175. /* if there are less than four entries prior to the indirect branch message
  2176. * we can't extract the address */
  2177. if (i < 4)
  2178. *target = 0;
  2179. else {
  2180. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2181. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2182. }
  2183. }
  2184. static inline void xscale_display_instruction(struct target *target, uint32_t pc,
  2185. struct arm_instruction *instruction,
  2186. struct command_context *cmd_ctx)
  2187. {
  2188. int retval = xscale_read_instruction(target, pc, instruction);
  2189. if (retval == ERROR_OK)
  2190. command_print(cmd_ctx, "%s", instruction->text);
  2191. else
  2192. command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
  2193. }
  2194. static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
  2195. {
  2196. struct xscale_common *xscale = target_to_xscale(target);
  2197. struct xscale_trace_data *trace_data = xscale->trace.data;
  2198. int i, retval;
  2199. uint32_t breakpoint_pc = 0;
  2200. struct arm_instruction instruction;
  2201. uint32_t current_pc = 0;/* initialized when address determined */
  2202. if (!xscale->trace.image)
  2203. LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
  2204. /* loop for each trace buffer that was loaded from target */
  2205. while (trace_data) {
  2206. int chkpt = 0; /* incremented as checkpointed entries found */
  2207. int j;
  2208. /* FIXME: set this to correct mode when trace buffer is first enabled */
  2209. xscale->trace.core_state = ARM_STATE_ARM;
  2210. /* loop for each entry in this trace buffer */
  2211. for (i = 0; i < trace_data->depth; i++) {
  2212. int exception = 0;
  2213. uint32_t chkpt_reg = 0x0;
  2214. uint32_t branch_target = 0;
  2215. int count;
  2216. /* trace entry type is upper nybble of 'message byte' */
  2217. int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
  2218. /* Target addresses of indirect branches are written into buffer
  2219. * before the message byte representing the branch. Skip past it */
  2220. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2221. continue;
  2222. switch (trace_msg_type) {
  2223. case 0: /* Exceptions */
  2224. case 1:
  2225. case 2:
  2226. case 3:
  2227. case 4:
  2228. case 5:
  2229. case 6:
  2230. case 7:
  2231. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2232. /* FIXME: vector table may be at ffff0000 */
  2233. branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
  2234. break;
  2235. case 8: /* Direct Branch */
  2236. break;
  2237. case 9: /* Indirect Branch */
  2238. xscale_branch_address(trace_data, i, &branch_target);
  2239. break;
  2240. case 13: /* Checkpointed Indirect Branch */
  2241. xscale_branch_address(trace_data, i, &branch_target);
  2242. if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
  2243. chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
  2244. *oldest */
  2245. else
  2246. chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
  2247. *newest */
  2248. chkpt++;
  2249. break;
  2250. case 12: /* Checkpointed Direct Branch */
  2251. if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
  2252. chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
  2253. *oldest */
  2254. else
  2255. chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
  2256. *newest */
  2257. /* if no current_pc, checkpoint will be starting point */
  2258. if (current_pc == 0)
  2259. branch_target = chkpt_reg;
  2260. chkpt++;
  2261. break;
  2262. case 15:/* Roll-over */
  2263. break;
  2264. default:/* Reserved */
  2265. LOG_WARNING("trace is suspect: invalid trace message byte");
  2266. continue;
  2267. }
  2268. /* If we don't have the current_pc yet, but we did get the branch target
  2269. * (either from the trace buffer on indirect branch, or from a checkpoint reg),
  2270. * then we can start displaying instructions at the next iteration, with
  2271. * branch_target as the starting point.
  2272. */
  2273. if (current_pc == 0) {
  2274. current_pc = branch_target; /* remains 0 unless branch_target *obtained */
  2275. continue;
  2276. }
  2277. /* We have current_pc. Read and display the instructions from the image.
  2278. * First, display count instructions (lower nybble of message byte). */
  2279. count = trace_data->entries[i].data & 0x0f;
  2280. for (j = 0; j < count; j++) {
  2281. xscale_display_instruction(target, current_pc, &instruction,
  2282. cmd_ctx);
  2283. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2284. }
  2285. /* An additional instruction is implicitly added to count for
  2286. * rollover and some exceptions: undef, swi, prefetch abort. */
  2287. if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
  2288. xscale_display_instruction(target, current_pc, &instruction,
  2289. cmd_ctx);
  2290. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2291. }
  2292. if (trace_msg_type == 15) /* rollover */
  2293. continue;
  2294. if (exception) {
  2295. command_print(cmd_ctx, "--- exception %i ---", exception);
  2296. continue;
  2297. }
  2298. /* not exception or rollover; next instruction is a branch and is
  2299. * not included in the count */
  2300. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2301. /* for direct branches, extract branch destination from instruction */
  2302. if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
  2303. retval = xscale_read_instruction(target, current_pc, &instruction);
  2304. if (retval == ERROR_OK)
  2305. current_pc = instruction.info.b_bl_bx_blx.target_address;
  2306. else
  2307. current_pc = 0; /* branch destination unknown */
  2308. /* direct branch w/ checkpoint; can also get from checkpoint reg */
  2309. if (trace_msg_type == 12) {
  2310. if (current_pc == 0)
  2311. current_pc = chkpt_reg;
  2312. else if (current_pc != chkpt_reg) /* sanity check */
  2313. LOG_WARNING("trace is suspect: checkpoint register "
  2314. "inconsistent with adddress from image");
  2315. }
  2316. if (current_pc == 0)
  2317. command_print(cmd_ctx, "address unknown");
  2318. continue;
  2319. }
  2320. /* indirect branch; the branch destination was read from trace buffer */
  2321. if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
  2322. current_pc = branch_target;
  2323. /* sanity check (checkpoint reg is redundant) */
  2324. if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
  2325. LOG_WARNING("trace is suspect: checkpoint register "
  2326. "inconsistent with address from trace buffer");
  2327. }
  2328. } /* END: for (i = 0; i < trace_data->depth; i++) */
  2329. breakpoint_pc = trace_data->last_instruction; /* used below */
  2330. trace_data = trace_data->next;
  2331. } /* END: while (trace_data) */
  2332. /* Finally... display all instructions up to the value of the pc when the
  2333. * debug break occurred (saved when trace data was collected from target).
  2334. * This is necessary because the trace only records execution branches and 16
  2335. * consecutive instructions (rollovers), so last few typically missed.
  2336. */
  2337. if (current_pc == 0)
  2338. return ERROR_OK;/* current_pc was never found */
  2339. /* how many instructions remaining? */
  2340. int gap_count = (breakpoint_pc - current_pc) /
  2341. (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
  2342. /* should never be negative or over 16, but verify */
  2343. if (gap_count < 0 || gap_count > 16) {
  2344. LOG_WARNING("trace is suspect: excessive gap at end of trace");
  2345. return ERROR_OK;/* bail; large number or negative value no good */
  2346. }
  2347. /* display remaining instructions */
  2348. for (i = 0; i < gap_count; i++) {
  2349. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2350. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2351. }
  2352. return ERROR_OK;
  2353. }
  2354. static const struct reg_arch_type xscale_reg_type = {
  2355. .get = xscale_get_reg,
  2356. .set = xscale_set_reg,
  2357. };
  2358. static void xscale_build_reg_cache(struct target *target)
  2359. {
  2360. struct xscale_common *xscale = target_to_xscale(target);
  2361. struct arm *arm = &xscale->arm;
  2362. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  2363. struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2364. int i;
  2365. int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
  2366. (*cache_p) = arm_build_reg_cache(target, arm);
  2367. (*cache_p)->next = malloc(sizeof(struct reg_cache));
  2368. cache_p = &(*cache_p)->next;
  2369. /* fill in values for the xscale reg cache */
  2370. (*cache_p)->name = "XScale registers";
  2371. (*cache_p)->next = NULL;
  2372. (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
  2373. (*cache_p)->num_regs = num_regs;
  2374. for (i = 0; i < num_regs; i++) {
  2375. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2376. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2377. (*cache_p)->reg_list[i].dirty = 0;
  2378. (*cache_p)->reg_list[i].valid = 0;
  2379. (*cache_p)->reg_list[i].size = 32;
  2380. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2381. (*cache_p)->reg_list[i].type = &xscale_reg_type;
  2382. arch_info[i] = xscale_reg_arch_info[i];
  2383. arch_info[i].target = target;
  2384. }
  2385. xscale->reg_cache = (*cache_p);
  2386. }
  2387. static int xscale_init_target(struct command_context *cmd_ctx,
  2388. struct target *target)
  2389. {
  2390. xscale_build_reg_cache(target);
  2391. return ERROR_OK;
  2392. }
  2393. static int xscale_init_arch_info(struct target *target,
  2394. struct xscale_common *xscale, struct jtag_tap *tap)
  2395. {
  2396. struct arm *arm;
  2397. uint32_t high_reset_branch, low_reset_branch;
  2398. int i;
  2399. arm = &xscale->arm;
  2400. /* store architecture specfic data */
  2401. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2402. /* PXA3xx with 11 bit IR shifts the JTAG instructions */
  2403. if (tap->ir_length == 11)
  2404. xscale->xscale_variant = XSCALE_PXA3XX;
  2405. else
  2406. xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
  2407. /* the debug handler isn't installed (and thus not running) at this time */
  2408. xscale->handler_address = 0xfe000800;
  2409. /* clear the vectors we keep locally for reference */
  2410. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2411. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2412. /* no user-specified vectors have been configured yet */
  2413. xscale->static_low_vectors_set = 0x0;
  2414. xscale->static_high_vectors_set = 0x0;
  2415. /* calculate branches to debug handler */
  2416. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2417. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2418. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2419. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2420. for (i = 1; i <= 7; i++) {
  2421. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2422. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2423. }
  2424. /* 64kB aligned region used for DCache cleaning */
  2425. xscale->cache_clean_address = 0xfffe0000;
  2426. xscale->hold_rst = 0;
  2427. xscale->external_debug_break = 0;
  2428. xscale->ibcr_available = 2;
  2429. xscale->ibcr0_used = 0;
  2430. xscale->ibcr1_used = 0;
  2431. xscale->dbr_available = 2;
  2432. xscale->dbr0_used = 0;
  2433. xscale->dbr1_used = 0;
  2434. LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
  2435. target_name(target));
  2436. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2437. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2438. xscale->vector_catch = 0x1;
  2439. xscale->trace.data = NULL;
  2440. xscale->trace.image = NULL;
  2441. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2442. xscale->trace.buffer_fill = 0;
  2443. xscale->trace.fill_counter = 0;
  2444. /* prepare ARMv4/5 specific information */
  2445. arm->arch_info = xscale;
  2446. arm->core_type = ARM_MODE_ANY;
  2447. arm->read_core_reg = xscale_read_core_reg;
  2448. arm->write_core_reg = xscale_write_core_reg;
  2449. arm->full_context = xscale_full_context;
  2450. arm_init_arch_info(target, arm);
  2451. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2452. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2453. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2454. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2455. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2456. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2457. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2458. xscale->armv4_5_mmu.mmu_enabled = 0;
  2459. return ERROR_OK;
  2460. }
  2461. static int xscale_target_create(struct target *target, Jim_Interp *interp)
  2462. {
  2463. struct xscale_common *xscale;
  2464. if (sizeof xscale_debug_handler > 0x800) {
  2465. LOG_ERROR("debug_handler.bin: larger than 2kb");
  2466. return ERROR_FAIL;
  2467. }
  2468. xscale = calloc(1, sizeof(*xscale));
  2469. if (!xscale)
  2470. return ERROR_FAIL;
  2471. return xscale_init_arch_info(target, xscale, target->tap);
  2472. }
  2473. COMMAND_HANDLER(xscale_handle_debug_handler_command)
  2474. {
  2475. struct target *target = NULL;
  2476. struct xscale_common *xscale;
  2477. int retval;
  2478. uint32_t handler_address;
  2479. if (CMD_ARGC < 2)
  2480. return ERROR_COMMAND_SYNTAX_ERROR;
  2481. target = get_target(CMD_ARGV[0]);
  2482. if (target == NULL) {
  2483. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2484. return ERROR_FAIL;
  2485. }
  2486. xscale = target_to_xscale(target);
  2487. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2488. if (retval != ERROR_OK)
  2489. return retval;
  2490. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
  2491. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2492. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2493. xscale->handler_address = handler_address;
  2494. else {
  2495. LOG_ERROR(
  2496. "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2497. return ERROR_FAIL;
  2498. }
  2499. return ERROR_OK;
  2500. }
  2501. COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
  2502. {
  2503. struct target *target = NULL;
  2504. struct xscale_common *xscale;
  2505. int retval;
  2506. uint32_t cache_clean_address;
  2507. if (CMD_ARGC < 2)
  2508. return ERROR_COMMAND_SYNTAX_ERROR;
  2509. target = get_target(CMD_ARGV[0]);
  2510. if (target == NULL) {
  2511. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2512. return ERROR_FAIL;
  2513. }
  2514. xscale = target_to_xscale(target);
  2515. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2516. if (retval != ERROR_OK)
  2517. return retval;
  2518. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
  2519. if (cache_clean_address & 0xffff)
  2520. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2521. else
  2522. xscale->cache_clean_address = cache_clean_address;
  2523. return ERROR_OK;
  2524. }
  2525. COMMAND_HANDLER(xscale_handle_cache_info_command)
  2526. {
  2527. struct target *target = get_current_target(CMD_CTX);
  2528. struct xscale_common *xscale = target_to_xscale(target);
  2529. int retval;
  2530. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2531. if (retval != ERROR_OK)
  2532. return retval;
  2533. return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
  2534. }
  2535. static int xscale_virt2phys(struct target *target,
  2536. target_addr_t virtual, target_addr_t *physical)
  2537. {
  2538. struct xscale_common *xscale = target_to_xscale(target);
  2539. uint32_t cb;
  2540. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  2541. LOG_ERROR(xscale_not);
  2542. return ERROR_TARGET_INVALID;
  2543. }
  2544. uint32_t ret;
  2545. int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
  2546. virtual, &cb, &ret);
  2547. if (retval != ERROR_OK)
  2548. return retval;
  2549. *physical = ret;
  2550. return ERROR_OK;
  2551. }
  2552. static int xscale_mmu(struct target *target, int *enabled)
  2553. {
  2554. struct xscale_common *xscale = target_to_xscale(target);
  2555. if (target->state != TARGET_HALTED) {
  2556. LOG_ERROR("Target not halted");
  2557. return ERROR_TARGET_INVALID;
  2558. }
  2559. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2560. return ERROR_OK;
  2561. }
  2562. COMMAND_HANDLER(xscale_handle_mmu_command)
  2563. {
  2564. struct target *target = get_current_target(CMD_CTX);
  2565. struct xscale_common *xscale = target_to_xscale(target);
  2566. int retval;
  2567. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2568. if (retval != ERROR_OK)
  2569. return retval;
  2570. if (target->state != TARGET_HALTED) {
  2571. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2572. return ERROR_OK;
  2573. }
  2574. if (CMD_ARGC >= 1) {
  2575. bool enable;
  2576. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2577. if (enable)
  2578. xscale_enable_mmu_caches(target, 1, 0, 0);
  2579. else
  2580. xscale_disable_mmu_caches(target, 1, 0, 0);
  2581. xscale->armv4_5_mmu.mmu_enabled = enable;
  2582. }
  2583. command_print(CMD_CTX, "mmu %s",
  2584. (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2585. return ERROR_OK;
  2586. }
  2587. COMMAND_HANDLER(xscale_handle_idcache_command)
  2588. {
  2589. struct target *target = get_current_target(CMD_CTX);
  2590. struct xscale_common *xscale = target_to_xscale(target);
  2591. int retval = xscale_verify_pointer(CMD_CTX, xscale);
  2592. if (retval != ERROR_OK)
  2593. return retval;
  2594. if (target->state != TARGET_HALTED) {
  2595. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2596. return ERROR_OK;
  2597. }
  2598. bool icache = false;
  2599. if (strcmp(CMD_NAME, "icache") == 0)
  2600. icache = true;
  2601. if (CMD_ARGC >= 1) {
  2602. bool enable;
  2603. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2604. if (icache) {
  2605. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
  2606. if (enable)
  2607. xscale_enable_mmu_caches(target, 0, 0, 1);
  2608. else
  2609. xscale_disable_mmu_caches(target, 0, 0, 1);
  2610. } else {
  2611. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
  2612. if (enable)
  2613. xscale_enable_mmu_caches(target, 0, 1, 0);
  2614. else
  2615. xscale_disable_mmu_caches(target, 0, 1, 0);
  2616. }
  2617. }
  2618. bool enabled = icache ?
  2619. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
  2620. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
  2621. const char *msg = enabled ? "enabled" : "disabled";
  2622. command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
  2623. return ERROR_OK;
  2624. }
  2625. static const struct {
  2626. char name[15];
  2627. unsigned mask;
  2628. } vec_ids[] = {
  2629. { "fiq", DCSR_TF, },
  2630. { "irq", DCSR_TI, },
  2631. { "dabt", DCSR_TD, },
  2632. { "pabt", DCSR_TA, },
  2633. { "swi", DCSR_TS, },
  2634. { "undef", DCSR_TU, },
  2635. { "reset", DCSR_TR, },
  2636. };
  2637. COMMAND_HANDLER(xscale_handle_vector_catch_command)
  2638. {
  2639. struct target *target = get_current_target(CMD_CTX);
  2640. struct xscale_common *xscale = target_to_xscale(target);
  2641. int retval;
  2642. uint32_t dcsr_value;
  2643. uint32_t catch = 0;
  2644. struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2645. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2646. if (retval != ERROR_OK)
  2647. return retval;
  2648. dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
  2649. if (CMD_ARGC > 0) {
  2650. if (CMD_ARGC == 1) {
  2651. if (strcmp(CMD_ARGV[0], "all") == 0) {
  2652. catch = DCSR_TRAP_MASK;
  2653. CMD_ARGC--;
  2654. } else if (strcmp(CMD_ARGV[0], "none") == 0) {
  2655. catch = 0;
  2656. CMD_ARGC--;
  2657. }
  2658. }
  2659. while (CMD_ARGC-- > 0) {
  2660. unsigned i;
  2661. for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2662. if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
  2663. continue;
  2664. catch |= vec_ids[i].mask;
  2665. break;
  2666. }
  2667. if (i == ARRAY_SIZE(vec_ids)) {
  2668. LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
  2669. return ERROR_COMMAND_SYNTAX_ERROR;
  2670. }
  2671. }
  2672. buf_set_u32(dcsr_reg->value, 0, 32,
  2673. (buf_get_u32(dcsr_reg->value, 0, 32) & ~DCSR_TRAP_MASK) | catch);
  2674. xscale_write_dcsr(target, -1, -1);
  2675. }
  2676. dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
  2677. for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2678. command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
  2679. (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
  2680. }
  2681. return ERROR_OK;
  2682. }
  2683. COMMAND_HANDLER(xscale_handle_vector_table_command)
  2684. {
  2685. struct target *target = get_current_target(CMD_CTX);
  2686. struct xscale_common *xscale = target_to_xscale(target);
  2687. int err = 0;
  2688. int retval;
  2689. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2690. if (retval != ERROR_OK)
  2691. return retval;
  2692. if (CMD_ARGC == 0) { /* print current settings */
  2693. int idx;
  2694. command_print(CMD_CTX, "active user-set static vectors:");
  2695. for (idx = 1; idx < 8; idx++)
  2696. if (xscale->static_low_vectors_set & (1 << idx))
  2697. command_print(CMD_CTX,
  2698. "low %d: 0x%" PRIx32,
  2699. idx,
  2700. xscale->static_low_vectors[idx]);
  2701. for (idx = 1; idx < 8; idx++)
  2702. if (xscale->static_high_vectors_set & (1 << idx))
  2703. command_print(CMD_CTX,
  2704. "high %d: 0x%" PRIx32,
  2705. idx,
  2706. xscale->static_high_vectors[idx]);
  2707. return ERROR_OK;
  2708. }
  2709. if (CMD_ARGC != 3)
  2710. err = 1;
  2711. else {
  2712. int idx;
  2713. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
  2714. uint32_t vec;
  2715. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
  2716. if (idx < 1 || idx >= 8)
  2717. err = 1;
  2718. if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
  2719. xscale->static_low_vectors_set |= (1<<idx);
  2720. xscale->static_low_vectors[idx] = vec;
  2721. } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
  2722. xscale->static_high_vectors_set |= (1<<idx);
  2723. xscale->static_high_vectors[idx] = vec;
  2724. } else
  2725. err = 1;
  2726. }
  2727. if (err)
  2728. return ERROR_COMMAND_SYNTAX_ERROR;
  2729. return ERROR_OK;
  2730. }
  2731. COMMAND_HANDLER(xscale_handle_trace_buffer_command)
  2732. {
  2733. struct target *target = get_current_target(CMD_CTX);
  2734. struct xscale_common *xscale = target_to_xscale(target);
  2735. uint32_t dcsr_value;
  2736. int retval;
  2737. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2738. if (retval != ERROR_OK)
  2739. return retval;
  2740. if (target->state != TARGET_HALTED) {
  2741. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2742. return ERROR_OK;
  2743. }
  2744. if (CMD_ARGC >= 1) {
  2745. if (strcmp("enable", CMD_ARGV[0]) == 0)
  2746. xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
  2747. else if (strcmp("disable", CMD_ARGV[0]) == 0)
  2748. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2749. else
  2750. return ERROR_COMMAND_SYNTAX_ERROR;
  2751. }
  2752. if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
  2753. if (strcmp("fill", CMD_ARGV[1]) == 0) {
  2754. int buffcount = 1; /* default */
  2755. if (CMD_ARGC >= 3)
  2756. COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
  2757. if (buffcount < 1) { /* invalid */
  2758. command_print(CMD_CTX, "fill buffer count must be > 0");
  2759. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2760. return ERROR_COMMAND_SYNTAX_ERROR;
  2761. }
  2762. xscale->trace.buffer_fill = buffcount;
  2763. xscale->trace.mode = XSCALE_TRACE_FILL;
  2764. } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
  2765. xscale->trace.mode = XSCALE_TRACE_WRAP;
  2766. else {
  2767. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2768. return ERROR_COMMAND_SYNTAX_ERROR;
  2769. }
  2770. }
  2771. if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
  2772. char fill_string[12];
  2773. sprintf(fill_string, "fill %d", xscale->trace.buffer_fill);
  2774. command_print(CMD_CTX, "trace buffer enabled (%s)",
  2775. (xscale->trace.mode == XSCALE_TRACE_FILL)
  2776. ? fill_string : "wrap");
  2777. } else
  2778. command_print(CMD_CTX, "trace buffer disabled");
  2779. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2780. if (xscale->trace.mode == XSCALE_TRACE_FILL)
  2781. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2782. else
  2783. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2784. return ERROR_OK;
  2785. }
  2786. COMMAND_HANDLER(xscale_handle_trace_image_command)
  2787. {
  2788. struct target *target = get_current_target(CMD_CTX);
  2789. struct xscale_common *xscale = target_to_xscale(target);
  2790. int retval;
  2791. if (CMD_ARGC < 1)
  2792. return ERROR_COMMAND_SYNTAX_ERROR;
  2793. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2794. if (retval != ERROR_OK)
  2795. return retval;
  2796. if (xscale->trace.image) {
  2797. image_close(xscale->trace.image);
  2798. free(xscale->trace.image);
  2799. command_print(CMD_CTX, "previously loaded image found and closed");
  2800. }
  2801. xscale->trace.image = malloc(sizeof(struct image));
  2802. xscale->trace.image->base_address_set = 0;
  2803. xscale->trace.image->start_address_set = 0;
  2804. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2805. if (CMD_ARGC >= 2) {
  2806. xscale->trace.image->base_address_set = 1;
  2807. COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
  2808. } else
  2809. xscale->trace.image->base_address_set = 0;
  2810. if (image_open(xscale->trace.image, CMD_ARGV[0],
  2811. (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
  2812. free(xscale->trace.image);
  2813. xscale->trace.image = NULL;
  2814. return ERROR_OK;
  2815. }
  2816. return ERROR_OK;
  2817. }
  2818. COMMAND_HANDLER(xscale_handle_dump_trace_command)
  2819. {
  2820. struct target *target = get_current_target(CMD_CTX);
  2821. struct xscale_common *xscale = target_to_xscale(target);
  2822. struct xscale_trace_data *trace_data;
  2823. struct fileio *file;
  2824. int retval;
  2825. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2826. if (retval != ERROR_OK)
  2827. return retval;
  2828. if (target->state != TARGET_HALTED) {
  2829. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2830. return ERROR_OK;
  2831. }
  2832. if (CMD_ARGC < 1)
  2833. return ERROR_COMMAND_SYNTAX_ERROR;
  2834. trace_data = xscale->trace.data;
  2835. if (!trace_data) {
  2836. command_print(CMD_CTX, "no trace data collected");
  2837. return ERROR_OK;
  2838. }
  2839. if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2840. return ERROR_OK;
  2841. while (trace_data) {
  2842. int i;
  2843. fileio_write_u32(file, trace_data->chkpt0);
  2844. fileio_write_u32(file, trace_data->chkpt1);
  2845. fileio_write_u32(file, trace_data->last_instruction);
  2846. fileio_write_u32(file, trace_data->depth);
  2847. for (i = 0; i < trace_data->depth; i++)
  2848. fileio_write_u32(file, trace_data->entries[i].data |
  2849. ((trace_data->entries[i].type & 0xffff) << 16));
  2850. trace_data = trace_data->next;
  2851. }
  2852. fileio_close(file);
  2853. return ERROR_OK;
  2854. }
  2855. COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
  2856. {
  2857. struct target *target = get_current_target(CMD_CTX);
  2858. struct xscale_common *xscale = target_to_xscale(target);
  2859. int retval;
  2860. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2861. if (retval != ERROR_OK)
  2862. return retval;
  2863. xscale_analyze_trace(target, CMD_CTX);
  2864. return ERROR_OK;
  2865. }
  2866. COMMAND_HANDLER(xscale_handle_cp15)
  2867. {
  2868. struct target *target = get_current_target(CMD_CTX);
  2869. struct xscale_common *xscale = target_to_xscale(target);
  2870. int retval;
  2871. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2872. if (retval != ERROR_OK)
  2873. return retval;
  2874. if (target->state != TARGET_HALTED) {
  2875. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2876. return ERROR_OK;
  2877. }
  2878. uint32_t reg_no = 0;
  2879. struct reg *reg = NULL;
  2880. if (CMD_ARGC > 0) {
  2881. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
  2882. /*translate from xscale cp15 register no to openocd register*/
  2883. switch (reg_no) {
  2884. case 0:
  2885. reg_no = XSCALE_MAINID;
  2886. break;
  2887. case 1:
  2888. reg_no = XSCALE_CTRL;
  2889. break;
  2890. case 2:
  2891. reg_no = XSCALE_TTB;
  2892. break;
  2893. case 3:
  2894. reg_no = XSCALE_DAC;
  2895. break;
  2896. case 5:
  2897. reg_no = XSCALE_FSR;
  2898. break;
  2899. case 6:
  2900. reg_no = XSCALE_FAR;
  2901. break;
  2902. case 13:
  2903. reg_no = XSCALE_PID;
  2904. break;
  2905. case 15:
  2906. reg_no = XSCALE_CPACCESS;
  2907. break;
  2908. default:
  2909. command_print(CMD_CTX, "invalid register number");
  2910. return ERROR_COMMAND_SYNTAX_ERROR;
  2911. }
  2912. reg = &xscale->reg_cache->reg_list[reg_no];
  2913. }
  2914. if (CMD_ARGC == 1) {
  2915. uint32_t value;
  2916. /* read cp15 control register */
  2917. xscale_get_reg(reg);
  2918. value = buf_get_u32(reg->value, 0, 32);
  2919. command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
  2920. value);
  2921. } else if (CMD_ARGC == 2) {
  2922. uint32_t value;
  2923. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
  2924. /* send CP write request (command 0x41) */
  2925. xscale_send_u32(target, 0x41);
  2926. /* send CP register number */
  2927. xscale_send_u32(target, reg_no);
  2928. /* send CP register value */
  2929. xscale_send_u32(target, value);
  2930. /* execute cpwait to ensure outstanding operations complete */
  2931. xscale_send_u32(target, 0x53);
  2932. } else
  2933. return ERROR_COMMAND_SYNTAX_ERROR;
  2934. return ERROR_OK;
  2935. }
  2936. static const struct command_registration xscale_exec_command_handlers[] = {
  2937. {
  2938. .name = "cache_info",
  2939. .handler = xscale_handle_cache_info_command,
  2940. .mode = COMMAND_EXEC,
  2941. .help = "display information about CPU caches",
  2942. },
  2943. {
  2944. .name = "mmu",
  2945. .handler = xscale_handle_mmu_command,
  2946. .mode = COMMAND_EXEC,
  2947. .help = "enable or disable the MMU",
  2948. .usage = "['enable'|'disable']",
  2949. },
  2950. {
  2951. .name = "icache",
  2952. .handler = xscale_handle_idcache_command,
  2953. .mode = COMMAND_EXEC,
  2954. .help = "display ICache state, optionally enabling or "
  2955. "disabling it",
  2956. .usage = "['enable'|'disable']",
  2957. },
  2958. {
  2959. .name = "dcache",
  2960. .handler = xscale_handle_idcache_command,
  2961. .mode = COMMAND_EXEC,
  2962. .help = "display DCache state, optionally enabling or "
  2963. "disabling it",
  2964. .usage = "['enable'|'disable']",
  2965. },
  2966. {
  2967. .name = "vector_catch",
  2968. .handler = xscale_handle_vector_catch_command,
  2969. .mode = COMMAND_EXEC,
  2970. .help = "set or display mask of vectors "
  2971. "that should trigger debug entry",
  2972. .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
  2973. },
  2974. {
  2975. .name = "vector_table",
  2976. .handler = xscale_handle_vector_table_command,
  2977. .mode = COMMAND_EXEC,
  2978. .help = "set vector table entry in mini-ICache, "
  2979. "or display current tables",
  2980. .usage = "[('high'|'low') index code]",
  2981. },
  2982. {
  2983. .name = "trace_buffer",
  2984. .handler = xscale_handle_trace_buffer_command,
  2985. .mode = COMMAND_EXEC,
  2986. .help = "display trace buffer status, enable or disable "
  2987. "tracing, and optionally reconfigure trace mode",
  2988. .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
  2989. },
  2990. {
  2991. .name = "dump_trace",
  2992. .handler = xscale_handle_dump_trace_command,
  2993. .mode = COMMAND_EXEC,
  2994. .help = "dump content of trace buffer to file",
  2995. .usage = "filename",
  2996. },
  2997. {
  2998. .name = "analyze_trace",
  2999. .handler = xscale_handle_analyze_trace_buffer_command,
  3000. .mode = COMMAND_EXEC,
  3001. .help = "analyze content of trace buffer",
  3002. .usage = "",
  3003. },
  3004. {
  3005. .name = "trace_image",
  3006. .handler = xscale_handle_trace_image_command,
  3007. .mode = COMMAND_EXEC,
  3008. .help = "load image from file to address (default 0)",
  3009. .usage = "filename [offset [filetype]]",
  3010. },
  3011. {
  3012. .name = "cp15",
  3013. .handler = xscale_handle_cp15,
  3014. .mode = COMMAND_EXEC,
  3015. .help = "Read or write coprocessor 15 register.",
  3016. .usage = "register [value]",
  3017. },
  3018. COMMAND_REGISTRATION_DONE
  3019. };
  3020. static const struct command_registration xscale_any_command_handlers[] = {
  3021. {
  3022. .name = "debug_handler",
  3023. .handler = xscale_handle_debug_handler_command,
  3024. .mode = COMMAND_ANY,
  3025. .help = "Change address used for debug handler.",
  3026. .usage = "<target> <address>",
  3027. },
  3028. {
  3029. .name = "cache_clean_address",
  3030. .handler = xscale_handle_cache_clean_address_command,
  3031. .mode = COMMAND_ANY,
  3032. .help = "Change address used for cleaning data cache.",
  3033. .usage = "address",
  3034. },
  3035. {
  3036. .chain = xscale_exec_command_handlers,
  3037. },
  3038. COMMAND_REGISTRATION_DONE
  3039. };
  3040. static const struct command_registration xscale_command_handlers[] = {
  3041. {
  3042. .chain = arm_command_handlers,
  3043. },
  3044. {
  3045. .name = "xscale",
  3046. .mode = COMMAND_ANY,
  3047. .help = "xscale command group",
  3048. .usage = "",
  3049. .chain = xscale_any_command_handlers,
  3050. },
  3051. COMMAND_REGISTRATION_DONE
  3052. };
  3053. struct target_type xscale_target = {
  3054. .name = "xscale",
  3055. .poll = xscale_poll,
  3056. .arch_state = xscale_arch_state,
  3057. .halt = xscale_halt,
  3058. .resume = xscale_resume,
  3059. .step = xscale_step,
  3060. .assert_reset = xscale_assert_reset,
  3061. .deassert_reset = xscale_deassert_reset,
  3062. /* REVISIT on some cores, allow exporting iwmmxt registers ... */
  3063. .get_gdb_reg_list = arm_get_gdb_reg_list,
  3064. .read_memory = xscale_read_memory,
  3065. .read_phys_memory = xscale_read_phys_memory,
  3066. .write_memory = xscale_write_memory,
  3067. .write_phys_memory = xscale_write_phys_memory,
  3068. .checksum_memory = arm_checksum_memory,
  3069. .blank_check_memory = arm_blank_check_memory,
  3070. .run_algorithm = armv4_5_run_algorithm,
  3071. .add_breakpoint = xscale_add_breakpoint,
  3072. .remove_breakpoint = xscale_remove_breakpoint,
  3073. .add_watchpoint = xscale_add_watchpoint,
  3074. .remove_watchpoint = xscale_remove_watchpoint,
  3075. .commands = xscale_command_handlers,
  3076. .target_create = xscale_target_create,
  3077. .init_target = xscale_init_target,
  3078. .virt2phys = xscale_virt2phys,
  3079. .mmu = xscale_mmu
  3080. };