You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3767 lines
97 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "breakpoints.h"
  30. #include "xscale.h"
  31. #include "target_type.h"
  32. #include "arm_jtag.h"
  33. #include "arm_simulator.h"
  34. #include "arm_disassembler.h"
  35. #include <helper/time_support.h>
  36. #include "register.h"
  37. #include "image.h"
  38. #include "arm_opcodes.h"
  39. #include "armv4_5.h"
  40. /*
  41. * Important XScale documents available as of October 2009 include:
  42. *
  43. * Intel XScale® Core Developer’s Manual, January 2004
  44. * Order Number: 273473-002
  45. * This has a chapter detailing debug facilities, and punts some
  46. * details to chip-specific microarchitecture documents.
  47. *
  48. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  49. * Document Number: 273539-005
  50. * Less detailed than the developer's manual, but summarizes those
  51. * missing details (for most XScales) and gives LOTS of notes about
  52. * debugger/handler interaction issues. Presents a simpler reset
  53. * and load-handler sequence than the arch doc. (Note, OpenOCD
  54. * doesn't currently support "Hot-Debug" as defined there.)
  55. *
  56. * Chip-specific microarchitecture documents may also be useful.
  57. */
  58. /* forward declarations */
  59. static int xscale_resume(struct target *, int current,
  60. uint32_t address, int handle_breakpoints, int debug_execution);
  61. static int xscale_debug_entry(struct target *);
  62. static int xscale_restore_banked(struct target *);
  63. static int xscale_get_reg(struct reg *reg);
  64. static int xscale_set_reg(struct reg *reg, uint8_t *buf);
  65. static int xscale_set_breakpoint(struct target *, struct breakpoint *);
  66. static int xscale_set_watchpoint(struct target *, struct watchpoint *);
  67. static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
  68. static int xscale_read_trace(struct target *);
  69. /* This XScale "debug handler" is loaded into the processor's
  70. * mini-ICache, which is 2K of code writable only via JTAG.
  71. *
  72. * FIXME the OpenOCD "bin2char" utility currently doesn't handle
  73. * binary files cleanly. It's string oriented, and terminates them
  74. * with a NUL character. Better would be to generate the constants
  75. * and let other code decide names, scoping, and other housekeeping.
  76. */
  77. static /* unsigned const char xscale_debug_handler[] = ... */
  78. #include "xscale_debug.h"
  79. static char *const xscale_reg_list[] =
  80. {
  81. "XSCALE_MAINID", /* 0 */
  82. "XSCALE_CACHETYPE",
  83. "XSCALE_CTRL",
  84. "XSCALE_AUXCTRL",
  85. "XSCALE_TTB",
  86. "XSCALE_DAC",
  87. "XSCALE_FSR",
  88. "XSCALE_FAR",
  89. "XSCALE_PID",
  90. "XSCALE_CPACCESS",
  91. "XSCALE_IBCR0", /* 10 */
  92. "XSCALE_IBCR1",
  93. "XSCALE_DBR0",
  94. "XSCALE_DBR1",
  95. "XSCALE_DBCON",
  96. "XSCALE_TBREG",
  97. "XSCALE_CHKPT0",
  98. "XSCALE_CHKPT1",
  99. "XSCALE_DCSR",
  100. "XSCALE_TX",
  101. "XSCALE_RX", /* 20 */
  102. "XSCALE_TXRXCTRL",
  103. };
  104. static const struct xscale_reg xscale_reg_arch_info[] =
  105. {
  106. {XSCALE_MAINID, NULL},
  107. {XSCALE_CACHETYPE, NULL},
  108. {XSCALE_CTRL, NULL},
  109. {XSCALE_AUXCTRL, NULL},
  110. {XSCALE_TTB, NULL},
  111. {XSCALE_DAC, NULL},
  112. {XSCALE_FSR, NULL},
  113. {XSCALE_FAR, NULL},
  114. {XSCALE_PID, NULL},
  115. {XSCALE_CPACCESS, NULL},
  116. {XSCALE_IBCR0, NULL},
  117. {XSCALE_IBCR1, NULL},
  118. {XSCALE_DBR0, NULL},
  119. {XSCALE_DBR1, NULL},
  120. {XSCALE_DBCON, NULL},
  121. {XSCALE_TBREG, NULL},
  122. {XSCALE_CHKPT0, NULL},
  123. {XSCALE_CHKPT1, NULL},
  124. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  125. {-1, NULL}, /* TX accessed via JTAG */
  126. {-1, NULL}, /* RX accessed via JTAG */
  127. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  128. };
  129. /* convenience wrapper to access XScale specific registers */
  130. static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
  131. {
  132. uint8_t buf[4];
  133. buf_set_u32(buf, 0, 32, value);
  134. return xscale_set_reg(reg, buf);
  135. }
  136. static const char xscale_not[] = "target is not an XScale";
  137. static int xscale_verify_pointer(struct command_context *cmd_ctx,
  138. struct xscale_common *xscale)
  139. {
  140. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  141. command_print(cmd_ctx, xscale_not);
  142. return ERROR_TARGET_INVALID;
  143. }
  144. return ERROR_OK;
  145. }
  146. static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
  147. {
  148. if (tap == NULL)
  149. return ERROR_FAIL;
  150. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  151. {
  152. struct scan_field field;
  153. uint8_t scratch[4];
  154. memset(&field, 0, sizeof field);
  155. field.tap = tap;
  156. field.num_bits = tap->ir_length;
  157. field.out_value = scratch;
  158. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  159. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  160. }
  161. return ERROR_OK;
  162. }
  163. static int xscale_read_dcsr(struct target *target)
  164. {
  165. struct xscale_common *xscale = target_to_xscale(target);
  166. int retval;
  167. struct scan_field fields[3];
  168. uint8_t field0 = 0x0;
  169. uint8_t field0_check_value = 0x2;
  170. uint8_t field0_check_mask = 0x7;
  171. uint8_t field2 = 0x0;
  172. uint8_t field2_check_value = 0x0;
  173. uint8_t field2_check_mask = 0x1;
  174. jtag_set_end_state(TAP_DRPAUSE);
  175. xscale_jtag_set_instr(target->tap,
  176. XSCALE_SELDCSR << xscale->xscale_variant);
  177. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  178. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  179. memset(&fields, 0, sizeof fields);
  180. fields[0].tap = target->tap;
  181. fields[0].num_bits = 3;
  182. fields[0].out_value = &field0;
  183. uint8_t tmp;
  184. fields[0].in_value = &tmp;
  185. fields[1].tap = target->tap;
  186. fields[1].num_bits = 32;
  187. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  188. fields[2].tap = target->tap;
  189. fields[2].num_bits = 1;
  190. fields[2].out_value = &field2;
  191. uint8_t tmp2;
  192. fields[2].in_value = &tmp2;
  193. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  194. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  195. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  196. if ((retval = jtag_execute_queue()) != ERROR_OK)
  197. {
  198. LOG_ERROR("JTAG error while reading DCSR");
  199. return retval;
  200. }
  201. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  202. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  203. /* write the register with the value we just read
  204. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  205. */
  206. field0_check_mask = 0x1;
  207. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  208. fields[1].in_value = NULL;
  209. jtag_set_end_state(TAP_IDLE);
  210. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  211. /* DANGER!!! this must be here. It will make sure that the arguments
  212. * to jtag_set_check_value() does not go out of scope! */
  213. return jtag_execute_queue();
  214. }
  215. static void xscale_getbuf(jtag_callback_data_t arg)
  216. {
  217. uint8_t *in = (uint8_t *)arg;
  218. *((uint32_t *)in) = buf_get_u32(in, 0, 32);
  219. }
  220. static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
  221. {
  222. if (num_words == 0)
  223. return ERROR_INVALID_ARGUMENTS;
  224. struct xscale_common *xscale = target_to_xscale(target);
  225. int retval = ERROR_OK;
  226. tap_state_t path[3];
  227. struct scan_field fields[3];
  228. uint8_t *field0 = malloc(num_words * 1);
  229. uint8_t field0_check_value = 0x2;
  230. uint8_t field0_check_mask = 0x6;
  231. uint32_t *field1 = malloc(num_words * 4);
  232. uint8_t field2_check_value = 0x0;
  233. uint8_t field2_check_mask = 0x1;
  234. int words_done = 0;
  235. int words_scheduled = 0;
  236. int i;
  237. path[0] = TAP_DRSELECT;
  238. path[1] = TAP_DRCAPTURE;
  239. path[2] = TAP_DRSHIFT;
  240. memset(&fields, 0, sizeof fields);
  241. fields[0].tap = target->tap;
  242. fields[0].num_bits = 3;
  243. fields[0].check_value = &field0_check_value;
  244. fields[0].check_mask = &field0_check_mask;
  245. fields[1].tap = target->tap;
  246. fields[1].num_bits = 32;
  247. fields[2].tap = target->tap;
  248. fields[2].num_bits = 1;
  249. fields[2].check_value = &field2_check_value;
  250. fields[2].check_mask = &field2_check_mask;
  251. jtag_set_end_state(TAP_IDLE);
  252. xscale_jtag_set_instr(target->tap,
  253. XSCALE_DBGTX << xscale->xscale_variant);
  254. jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  255. /* repeat until all words have been collected */
  256. int attempts = 0;
  257. while (words_done < num_words)
  258. {
  259. /* schedule reads */
  260. words_scheduled = 0;
  261. for (i = words_done; i < num_words; i++)
  262. {
  263. fields[0].in_value = &field0[i];
  264. jtag_add_pathmove(3, path);
  265. fields[1].in_value = (uint8_t *)(field1 + i);
  266. jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
  267. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  268. words_scheduled++;
  269. }
  270. if ((retval = jtag_execute_queue()) != ERROR_OK)
  271. {
  272. LOG_ERROR("JTAG error while receiving data from debug handler");
  273. break;
  274. }
  275. /* examine results */
  276. for (i = words_done; i < num_words; i++)
  277. {
  278. if (!(field0[0] & 1))
  279. {
  280. /* move backwards if necessary */
  281. int j;
  282. for (j = i; j < num_words - 1; j++)
  283. {
  284. field0[j] = field0[j + 1];
  285. field1[j] = field1[j + 1];
  286. }
  287. words_scheduled--;
  288. }
  289. }
  290. if (words_scheduled == 0)
  291. {
  292. if (attempts++==1000)
  293. {
  294. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  295. retval = ERROR_TARGET_TIMEOUT;
  296. break;
  297. }
  298. }
  299. words_done += words_scheduled;
  300. }
  301. for (i = 0; i < num_words; i++)
  302. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  303. free(field1);
  304. return retval;
  305. }
  306. static int xscale_read_tx(struct target *target, int consume)
  307. {
  308. struct xscale_common *xscale = target_to_xscale(target);
  309. tap_state_t path[3];
  310. tap_state_t noconsume_path[6];
  311. int retval;
  312. struct timeval timeout, now;
  313. struct scan_field fields[3];
  314. uint8_t field0_in = 0x0;
  315. uint8_t field0_check_value = 0x2;
  316. uint8_t field0_check_mask = 0x6;
  317. uint8_t field2_check_value = 0x0;
  318. uint8_t field2_check_mask = 0x1;
  319. jtag_set_end_state(TAP_IDLE);
  320. xscale_jtag_set_instr(target->tap,
  321. XSCALE_DBGTX << xscale->xscale_variant);
  322. path[0] = TAP_DRSELECT;
  323. path[1] = TAP_DRCAPTURE;
  324. path[2] = TAP_DRSHIFT;
  325. noconsume_path[0] = TAP_DRSELECT;
  326. noconsume_path[1] = TAP_DRCAPTURE;
  327. noconsume_path[2] = TAP_DREXIT1;
  328. noconsume_path[3] = TAP_DRPAUSE;
  329. noconsume_path[4] = TAP_DREXIT2;
  330. noconsume_path[5] = TAP_DRSHIFT;
  331. memset(&fields, 0, sizeof fields);
  332. fields[0].tap = target->tap;
  333. fields[0].num_bits = 3;
  334. fields[0].in_value = &field0_in;
  335. fields[1].tap = target->tap;
  336. fields[1].num_bits = 32;
  337. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  338. fields[2].tap = target->tap;
  339. fields[2].num_bits = 1;
  340. uint8_t tmp;
  341. fields[2].in_value = &tmp;
  342. gettimeofday(&timeout, NULL);
  343. timeval_add_time(&timeout, 1, 0);
  344. for (;;)
  345. {
  346. /* if we want to consume the register content (i.e. clear TX_READY),
  347. * we have to go straight from Capture-DR to Shift-DR
  348. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  349. */
  350. if (consume)
  351. jtag_add_pathmove(3, path);
  352. else
  353. {
  354. jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
  355. }
  356. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  357. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  358. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  359. if ((retval = jtag_execute_queue()) != ERROR_OK)
  360. {
  361. LOG_ERROR("JTAG error while reading TX");
  362. return ERROR_TARGET_TIMEOUT;
  363. }
  364. gettimeofday(&now, NULL);
  365. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  366. {
  367. LOG_ERROR("time out reading TX register");
  368. return ERROR_TARGET_TIMEOUT;
  369. }
  370. if (!((!(field0_in & 1)) && consume))
  371. {
  372. goto done;
  373. }
  374. if (debug_level >= 3)
  375. {
  376. LOG_DEBUG("waiting 100ms");
  377. alive_sleep(100); /* avoid flooding the logs */
  378. } else
  379. {
  380. keep_alive();
  381. }
  382. }
  383. done:
  384. if (!(field0_in & 1))
  385. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  386. return ERROR_OK;
  387. }
  388. static int xscale_write_rx(struct target *target)
  389. {
  390. struct xscale_common *xscale = target_to_xscale(target);
  391. int retval;
  392. struct timeval timeout, now;
  393. struct scan_field fields[3];
  394. uint8_t field0_out = 0x0;
  395. uint8_t field0_in = 0x0;
  396. uint8_t field0_check_value = 0x2;
  397. uint8_t field0_check_mask = 0x6;
  398. uint8_t field2 = 0x0;
  399. uint8_t field2_check_value = 0x0;
  400. uint8_t field2_check_mask = 0x1;
  401. jtag_set_end_state(TAP_IDLE);
  402. xscale_jtag_set_instr(target->tap,
  403. XSCALE_DBGRX << xscale->xscale_variant);
  404. memset(&fields, 0, sizeof fields);
  405. fields[0].tap = target->tap;
  406. fields[0].num_bits = 3;
  407. fields[0].out_value = &field0_out;
  408. fields[0].in_value = &field0_in;
  409. fields[1].tap = target->tap;
  410. fields[1].num_bits = 32;
  411. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  412. fields[2].tap = target->tap;
  413. fields[2].num_bits = 1;
  414. fields[2].out_value = &field2;
  415. uint8_t tmp;
  416. fields[2].in_value = &tmp;
  417. gettimeofday(&timeout, NULL);
  418. timeval_add_time(&timeout, 1, 0);
  419. /* poll until rx_read is low */
  420. LOG_DEBUG("polling RX");
  421. for (;;)
  422. {
  423. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  424. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  425. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  426. if ((retval = jtag_execute_queue()) != ERROR_OK)
  427. {
  428. LOG_ERROR("JTAG error while writing RX");
  429. return retval;
  430. }
  431. gettimeofday(&now, NULL);
  432. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  433. {
  434. LOG_ERROR("time out writing RX register");
  435. return ERROR_TARGET_TIMEOUT;
  436. }
  437. if (!(field0_in & 1))
  438. goto done;
  439. if (debug_level >= 3)
  440. {
  441. LOG_DEBUG("waiting 100ms");
  442. alive_sleep(100); /* avoid flooding the logs */
  443. } else
  444. {
  445. keep_alive();
  446. }
  447. }
  448. done:
  449. /* set rx_valid */
  450. field2 = 0x1;
  451. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  452. if ((retval = jtag_execute_queue()) != ERROR_OK)
  453. {
  454. LOG_ERROR("JTAG error while writing RX");
  455. return retval;
  456. }
  457. return ERROR_OK;
  458. }
  459. /* send count elements of size byte to the debug handler */
  460. static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
  461. {
  462. struct xscale_common *xscale = target_to_xscale(target);
  463. uint32_t t[3];
  464. int bits[3];
  465. int retval;
  466. int done_count = 0;
  467. jtag_set_end_state(TAP_IDLE);
  468. xscale_jtag_set_instr(target->tap,
  469. XSCALE_DBGRX << xscale->xscale_variant);
  470. bits[0]=3;
  471. t[0]=0;
  472. bits[1]=32;
  473. t[2]=1;
  474. bits[2]=1;
  475. int endianness = target->endianness;
  476. while (done_count++ < count)
  477. {
  478. switch (size)
  479. {
  480. case 4:
  481. if (endianness == TARGET_LITTLE_ENDIAN)
  482. {
  483. t[1]=le_to_h_u32(buffer);
  484. } else
  485. {
  486. t[1]=be_to_h_u32(buffer);
  487. }
  488. break;
  489. case 2:
  490. if (endianness == TARGET_LITTLE_ENDIAN)
  491. {
  492. t[1]=le_to_h_u16(buffer);
  493. } else
  494. {
  495. t[1]=be_to_h_u16(buffer);
  496. }
  497. break;
  498. case 1:
  499. t[1]=buffer[0];
  500. break;
  501. default:
  502. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  503. return ERROR_INVALID_ARGUMENTS;
  504. }
  505. jtag_add_dr_out(target->tap,
  506. 3,
  507. bits,
  508. t,
  509. jtag_set_end_state(TAP_IDLE));
  510. buffer += size;
  511. }
  512. if ((retval = jtag_execute_queue()) != ERROR_OK)
  513. {
  514. LOG_ERROR("JTAG error while sending data to debug handler");
  515. return retval;
  516. }
  517. return ERROR_OK;
  518. }
  519. static int xscale_send_u32(struct target *target, uint32_t value)
  520. {
  521. struct xscale_common *xscale = target_to_xscale(target);
  522. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  523. return xscale_write_rx(target);
  524. }
  525. static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
  526. {
  527. struct xscale_common *xscale = target_to_xscale(target);
  528. int retval;
  529. struct scan_field fields[3];
  530. uint8_t field0 = 0x0;
  531. uint8_t field0_check_value = 0x2;
  532. uint8_t field0_check_mask = 0x7;
  533. uint8_t field2 = 0x0;
  534. uint8_t field2_check_value = 0x0;
  535. uint8_t field2_check_mask = 0x1;
  536. if (hold_rst != -1)
  537. xscale->hold_rst = hold_rst;
  538. if (ext_dbg_brk != -1)
  539. xscale->external_debug_break = ext_dbg_brk;
  540. jtag_set_end_state(TAP_IDLE);
  541. xscale_jtag_set_instr(target->tap,
  542. XSCALE_SELDCSR << xscale->xscale_variant);
  543. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  544. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  545. memset(&fields, 0, sizeof fields);
  546. fields[0].tap = target->tap;
  547. fields[0].num_bits = 3;
  548. fields[0].out_value = &field0;
  549. uint8_t tmp;
  550. fields[0].in_value = &tmp;
  551. fields[1].tap = target->tap;
  552. fields[1].num_bits = 32;
  553. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  554. fields[2].tap = target->tap;
  555. fields[2].num_bits = 1;
  556. fields[2].out_value = &field2;
  557. uint8_t tmp2;
  558. fields[2].in_value = &tmp2;
  559. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  560. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  561. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  562. if ((retval = jtag_execute_queue()) != ERROR_OK)
  563. {
  564. LOG_ERROR("JTAG error while writing DCSR");
  565. return retval;
  566. }
  567. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  568. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  569. return ERROR_OK;
  570. }
  571. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  572. static unsigned int parity (unsigned int v)
  573. {
  574. // unsigned int ov = v;
  575. v ^= v >> 16;
  576. v ^= v >> 8;
  577. v ^= v >> 4;
  578. v &= 0xf;
  579. // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  580. return (0x6996 >> v) & 1;
  581. }
  582. static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
  583. {
  584. struct xscale_common *xscale = target_to_xscale(target);
  585. uint8_t packet[4];
  586. uint8_t cmd;
  587. int word;
  588. struct scan_field fields[2];
  589. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  590. /* LDIC into IR */
  591. jtag_set_end_state(TAP_IDLE);
  592. xscale_jtag_set_instr(target->tap,
  593. XSCALE_LDIC << xscale->xscale_variant);
  594. /* CMD is b011 to load a cacheline into the Mini ICache.
  595. * Loading into the main ICache is deprecated, and unused.
  596. * It's followed by three zero bits, and 27 address bits.
  597. */
  598. buf_set_u32(&cmd, 0, 6, 0x3);
  599. /* virtual address of desired cache line */
  600. buf_set_u32(packet, 0, 27, va >> 5);
  601. memset(&fields, 0, sizeof fields);
  602. fields[0].tap = target->tap;
  603. fields[0].num_bits = 6;
  604. fields[0].out_value = &cmd;
  605. fields[1].tap = target->tap;
  606. fields[1].num_bits = 27;
  607. fields[1].out_value = packet;
  608. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  609. /* rest of packet is a cacheline: 8 instructions, with parity */
  610. fields[0].num_bits = 32;
  611. fields[0].out_value = packet;
  612. fields[1].num_bits = 1;
  613. fields[1].out_value = &cmd;
  614. for (word = 0; word < 8; word++)
  615. {
  616. buf_set_u32(packet, 0, 32, buffer[word]);
  617. uint32_t value;
  618. memcpy(&value, packet, sizeof(uint32_t));
  619. cmd = parity(value);
  620. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  621. }
  622. return jtag_execute_queue();
  623. }
  624. static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
  625. {
  626. struct xscale_common *xscale = target_to_xscale(target);
  627. uint8_t packet[4];
  628. uint8_t cmd;
  629. struct scan_field fields[2];
  630. jtag_set_end_state(TAP_IDLE);
  631. xscale_jtag_set_instr(target->tap,
  632. XSCALE_LDIC << xscale->xscale_variant);
  633. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  634. buf_set_u32(&cmd, 0, 6, 0x0);
  635. /* virtual address of desired cache line */
  636. buf_set_u32(packet, 0, 27, va >> 5);
  637. memset(&fields, 0, sizeof fields);
  638. fields[0].tap = target->tap;
  639. fields[0].num_bits = 6;
  640. fields[0].out_value = &cmd;
  641. fields[1].tap = target->tap;
  642. fields[1].num_bits = 27;
  643. fields[1].out_value = packet;
  644. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  645. return ERROR_OK;
  646. }
  647. static int xscale_update_vectors(struct target *target)
  648. {
  649. struct xscale_common *xscale = target_to_xscale(target);
  650. int i;
  651. int retval;
  652. uint32_t low_reset_branch, high_reset_branch;
  653. for (i = 1; i < 8; i++)
  654. {
  655. /* if there's a static vector specified for this exception, override */
  656. if (xscale->static_high_vectors_set & (1 << i))
  657. {
  658. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  659. }
  660. else
  661. {
  662. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  663. if (retval == ERROR_TARGET_TIMEOUT)
  664. return retval;
  665. if (retval != ERROR_OK)
  666. {
  667. /* Some of these reads will fail as part of normal execution */
  668. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  669. }
  670. }
  671. }
  672. for (i = 1; i < 8; i++)
  673. {
  674. if (xscale->static_low_vectors_set & (1 << i))
  675. {
  676. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  677. }
  678. else
  679. {
  680. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  681. if (retval == ERROR_TARGET_TIMEOUT)
  682. return retval;
  683. if (retval != ERROR_OK)
  684. {
  685. /* Some of these reads will fail as part of normal execution */
  686. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  687. }
  688. }
  689. }
  690. /* calculate branches to debug handler */
  691. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  692. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  693. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  694. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  695. /* invalidate and load exception vectors in mini i-cache */
  696. xscale_invalidate_ic_line(target, 0x0);
  697. xscale_invalidate_ic_line(target, 0xffff0000);
  698. xscale_load_ic(target, 0x0, xscale->low_vectors);
  699. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  700. return ERROR_OK;
  701. }
  702. static int xscale_arch_state(struct target *target)
  703. {
  704. struct xscale_common *xscale = target_to_xscale(target);
  705. struct arm *armv4_5 = &xscale->armv4_5_common;
  706. static const char *state[] =
  707. {
  708. "disabled", "enabled"
  709. };
  710. static const char *arch_dbg_reason[] =
  711. {
  712. "", "\n(processor reset)", "\n(trace buffer full)"
  713. };
  714. if (armv4_5->common_magic != ARM_COMMON_MAGIC)
  715. {
  716. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  717. return ERROR_INVALID_ARGUMENTS;
  718. }
  719. arm_arch_state(target);
  720. LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
  721. state[xscale->armv4_5_mmu.mmu_enabled],
  722. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  723. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  724. arch_dbg_reason[xscale->arch_debug_reason]);
  725. return ERROR_OK;
  726. }
  727. static int xscale_poll(struct target *target)
  728. {
  729. int retval = ERROR_OK;
  730. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  731. {
  732. enum target_state previous_state = target->state;
  733. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  734. {
  735. /* there's data to read from the tx register, we entered debug state */
  736. target->state = TARGET_HALTED;
  737. /* process debug entry, fetching current mode regs */
  738. retval = xscale_debug_entry(target);
  739. }
  740. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  741. {
  742. LOG_USER("error while polling TX register, reset CPU");
  743. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  744. target->state = TARGET_HALTED;
  745. }
  746. /* debug_entry could have overwritten target state (i.e. immediate resume)
  747. * don't signal event handlers in that case
  748. */
  749. if (target->state != TARGET_HALTED)
  750. return ERROR_OK;
  751. /* if target was running, signal that we halted
  752. * otherwise we reentered from debug execution */
  753. if (previous_state == TARGET_RUNNING)
  754. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  755. else
  756. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  757. }
  758. return retval;
  759. }
  760. static int xscale_debug_entry(struct target *target)
  761. {
  762. struct xscale_common *xscale = target_to_xscale(target);
  763. struct arm *armv4_5 = &xscale->armv4_5_common;
  764. uint32_t pc;
  765. uint32_t buffer[10];
  766. int i;
  767. int retval;
  768. uint32_t moe;
  769. /* clear external dbg break (will be written on next DCSR read) */
  770. xscale->external_debug_break = 0;
  771. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  772. return retval;
  773. /* get r0, pc, r1 to r7 and cpsr */
  774. if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
  775. return retval;
  776. /* move r0 from buffer to register cache */
  777. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  778. armv4_5->core_cache->reg_list[0].dirty = 1;
  779. armv4_5->core_cache->reg_list[0].valid = 1;
  780. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  781. /* move pc from buffer to register cache */
  782. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  783. armv4_5->core_cache->reg_list[15].dirty = 1;
  784. armv4_5->core_cache->reg_list[15].valid = 1;
  785. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  786. /* move data from buffer to register cache */
  787. for (i = 1; i <= 7; i++)
  788. {
  789. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  790. armv4_5->core_cache->reg_list[i].dirty = 1;
  791. armv4_5->core_cache->reg_list[i].valid = 1;
  792. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  793. }
  794. arm_set_cpsr(armv4_5, buffer[9]);
  795. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  796. if (!is_arm_mode(armv4_5->core_mode))
  797. {
  798. target->state = TARGET_UNKNOWN;
  799. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  800. return ERROR_TARGET_FAILURE;
  801. }
  802. LOG_DEBUG("target entered debug state in %s mode",
  803. arm_mode_name(armv4_5->core_mode));
  804. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  805. if (armv4_5->spsr) {
  806. xscale_receive(target, buffer, 8);
  807. buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
  808. armv4_5->spsr->dirty = false;
  809. armv4_5->spsr->valid = true;
  810. }
  811. else
  812. {
  813. /* r8 to r14, but no spsr */
  814. xscale_receive(target, buffer, 7);
  815. }
  816. /* move data from buffer to right banked register in cache */
  817. for (i = 8; i <= 14; i++)
  818. {
  819. struct reg *r = arm_reg_current(armv4_5, i);
  820. buf_set_u32(r->value, 0, 32, buffer[i - 8]);
  821. r->dirty = false;
  822. r->valid = true;
  823. }
  824. /* examine debug reason */
  825. xscale_read_dcsr(target);
  826. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  827. /* stored PC (for calculating fixup) */
  828. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  829. switch (moe)
  830. {
  831. case 0x0: /* Processor reset */
  832. target->debug_reason = DBG_REASON_DBGRQ;
  833. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  834. pc -= 4;
  835. break;
  836. case 0x1: /* Instruction breakpoint hit */
  837. target->debug_reason = DBG_REASON_BREAKPOINT;
  838. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  839. pc -= 4;
  840. break;
  841. case 0x2: /* Data breakpoint hit */
  842. target->debug_reason = DBG_REASON_WATCHPOINT;
  843. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  844. pc -= 4;
  845. break;
  846. case 0x3: /* BKPT instruction executed */
  847. target->debug_reason = DBG_REASON_BREAKPOINT;
  848. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  849. pc -= 4;
  850. break;
  851. case 0x4: /* Ext. debug event */
  852. target->debug_reason = DBG_REASON_DBGRQ;
  853. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  854. pc -= 4;
  855. break;
  856. case 0x5: /* Vector trap occured */
  857. target->debug_reason = DBG_REASON_BREAKPOINT;
  858. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  859. pc -= 4;
  860. break;
  861. case 0x6: /* Trace buffer full break */
  862. target->debug_reason = DBG_REASON_DBGRQ;
  863. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  864. pc -= 4;
  865. break;
  866. case 0x7: /* Reserved (may flag Hot-Debug support) */
  867. default:
  868. LOG_ERROR("Method of Entry is 'Reserved'");
  869. exit(-1);
  870. break;
  871. }
  872. /* apply PC fixup */
  873. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  874. /* on the first debug entry, identify cache type */
  875. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  876. {
  877. uint32_t cache_type_reg;
  878. /* read cp15 cache type register */
  879. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  880. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  881. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  882. }
  883. /* examine MMU and Cache settings */
  884. /* read cp15 control register */
  885. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  886. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  887. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  888. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  889. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  890. /* tracing enabled, read collected trace data */
  891. if (xscale->trace.buffer_enabled)
  892. {
  893. xscale_read_trace(target);
  894. xscale->trace.buffer_fill--;
  895. /* resume if we're still collecting trace data */
  896. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  897. && (xscale->trace.buffer_fill > 0))
  898. {
  899. xscale_resume(target, 1, 0x0, 1, 0);
  900. }
  901. else
  902. {
  903. xscale->trace.buffer_enabled = 0;
  904. }
  905. }
  906. return ERROR_OK;
  907. }
  908. static int xscale_halt(struct target *target)
  909. {
  910. struct xscale_common *xscale = target_to_xscale(target);
  911. LOG_DEBUG("target->state: %s",
  912. target_state_name(target));
  913. if (target->state == TARGET_HALTED)
  914. {
  915. LOG_DEBUG("target was already halted");
  916. return ERROR_OK;
  917. }
  918. else if (target->state == TARGET_UNKNOWN)
  919. {
  920. /* this must not happen for a xscale target */
  921. LOG_ERROR("target was in unknown state when halt was requested");
  922. return ERROR_TARGET_INVALID;
  923. }
  924. else if (target->state == TARGET_RESET)
  925. {
  926. LOG_DEBUG("target->state == TARGET_RESET");
  927. }
  928. else
  929. {
  930. /* assert external dbg break */
  931. xscale->external_debug_break = 1;
  932. xscale_read_dcsr(target);
  933. target->debug_reason = DBG_REASON_DBGRQ;
  934. }
  935. return ERROR_OK;
  936. }
  937. static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
  938. {
  939. struct xscale_common *xscale = target_to_xscale(target);
  940. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  941. int retval;
  942. if (xscale->ibcr0_used)
  943. {
  944. struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  945. if (ibcr0_bp)
  946. {
  947. xscale_unset_breakpoint(target, ibcr0_bp);
  948. }
  949. else
  950. {
  951. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  952. exit(-1);
  953. }
  954. }
  955. if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
  956. return retval;
  957. return ERROR_OK;
  958. }
  959. static int xscale_disable_single_step(struct target *target)
  960. {
  961. struct xscale_common *xscale = target_to_xscale(target);
  962. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  963. int retval;
  964. if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
  965. return retval;
  966. return ERROR_OK;
  967. }
  968. static void xscale_enable_watchpoints(struct target *target)
  969. {
  970. struct watchpoint *watchpoint = target->watchpoints;
  971. while (watchpoint)
  972. {
  973. if (watchpoint->set == 0)
  974. xscale_set_watchpoint(target, watchpoint);
  975. watchpoint = watchpoint->next;
  976. }
  977. }
  978. static void xscale_enable_breakpoints(struct target *target)
  979. {
  980. struct breakpoint *breakpoint = target->breakpoints;
  981. /* set any pending breakpoints */
  982. while (breakpoint)
  983. {
  984. if (breakpoint->set == 0)
  985. xscale_set_breakpoint(target, breakpoint);
  986. breakpoint = breakpoint->next;
  987. }
  988. }
  989. static int xscale_resume(struct target *target, int current,
  990. uint32_t address, int handle_breakpoints, int debug_execution)
  991. {
  992. struct xscale_common *xscale = target_to_xscale(target);
  993. struct arm *armv4_5 = &xscale->armv4_5_common;
  994. struct breakpoint *breakpoint = target->breakpoints;
  995. uint32_t current_pc;
  996. int retval;
  997. int i;
  998. LOG_DEBUG("-");
  999. if (target->state != TARGET_HALTED)
  1000. {
  1001. LOG_WARNING("target not halted");
  1002. return ERROR_TARGET_NOT_HALTED;
  1003. }
  1004. if (!debug_execution)
  1005. {
  1006. target_free_all_working_areas(target);
  1007. }
  1008. /* update vector tables */
  1009. if ((retval = xscale_update_vectors(target)) != ERROR_OK)
  1010. return retval;
  1011. /* current = 1: continue on current pc, otherwise continue at <address> */
  1012. if (!current)
  1013. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1014. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1015. /* if we're at the reset vector, we have to simulate the branch */
  1016. if (current_pc == 0x0)
  1017. {
  1018. arm_simulate_step(target, NULL);
  1019. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1020. }
  1021. /* the front-end may request us not to handle breakpoints */
  1022. if (handle_breakpoints)
  1023. {
  1024. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1025. {
  1026. uint32_t next_pc;
  1027. /* there's a breakpoint at the current PC, we have to step over it */
  1028. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1029. xscale_unset_breakpoint(target, breakpoint);
  1030. /* calculate PC of next instruction */
  1031. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1032. {
  1033. uint32_t current_opcode;
  1034. target_read_u32(target, current_pc, &current_opcode);
  1035. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1036. }
  1037. LOG_DEBUG("enable single-step");
  1038. xscale_enable_single_step(target, next_pc);
  1039. /* restore banked registers */
  1040. retval = xscale_restore_banked(target);
  1041. /* send resume request (command 0x30 or 0x31)
  1042. * clean the trace buffer if it is to be enabled (0x62) */
  1043. if (xscale->trace.buffer_enabled)
  1044. {
  1045. xscale_send_u32(target, 0x62);
  1046. xscale_send_u32(target, 0x31);
  1047. }
  1048. else
  1049. xscale_send_u32(target, 0x30);
  1050. /* send CPSR */
  1051. xscale_send_u32(target,
  1052. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1053. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1054. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1055. for (i = 7; i >= 0; i--)
  1056. {
  1057. /* send register */
  1058. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1059. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1060. }
  1061. /* send PC */
  1062. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1063. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1064. /* wait for and process debug entry */
  1065. xscale_debug_entry(target);
  1066. LOG_DEBUG("disable single-step");
  1067. xscale_disable_single_step(target);
  1068. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1069. xscale_set_breakpoint(target, breakpoint);
  1070. }
  1071. }
  1072. /* enable any pending breakpoints and watchpoints */
  1073. xscale_enable_breakpoints(target);
  1074. xscale_enable_watchpoints(target);
  1075. /* restore banked registers */
  1076. retval = xscale_restore_banked(target);
  1077. /* send resume request (command 0x30 or 0x31)
  1078. * clean the trace buffer if it is to be enabled (0x62) */
  1079. if (xscale->trace.buffer_enabled)
  1080. {
  1081. xscale_send_u32(target, 0x62);
  1082. xscale_send_u32(target, 0x31);
  1083. }
  1084. else
  1085. xscale_send_u32(target, 0x30);
  1086. /* send CPSR */
  1087. xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1088. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1089. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1090. for (i = 7; i >= 0; i--)
  1091. {
  1092. /* send register */
  1093. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1094. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1095. }
  1096. /* send PC */
  1097. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1098. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1099. target->debug_reason = DBG_REASON_NOTHALTED;
  1100. if (!debug_execution)
  1101. {
  1102. /* registers are now invalid */
  1103. register_cache_invalidate(armv4_5->core_cache);
  1104. target->state = TARGET_RUNNING;
  1105. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1106. }
  1107. else
  1108. {
  1109. target->state = TARGET_DEBUG_RUNNING;
  1110. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1111. }
  1112. LOG_DEBUG("target resumed");
  1113. return ERROR_OK;
  1114. }
  1115. static int xscale_step_inner(struct target *target, int current,
  1116. uint32_t address, int handle_breakpoints)
  1117. {
  1118. struct xscale_common *xscale = target_to_xscale(target);
  1119. struct arm *armv4_5 = &xscale->armv4_5_common;
  1120. uint32_t next_pc;
  1121. int retval;
  1122. int i;
  1123. target->debug_reason = DBG_REASON_SINGLESTEP;
  1124. /* calculate PC of next instruction */
  1125. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1126. {
  1127. uint32_t current_opcode, current_pc;
  1128. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1129. target_read_u32(target, current_pc, &current_opcode);
  1130. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1131. return retval;
  1132. }
  1133. LOG_DEBUG("enable single-step");
  1134. if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
  1135. return retval;
  1136. /* restore banked registers */
  1137. if ((retval = xscale_restore_banked(target)) != ERROR_OK)
  1138. return retval;
  1139. /* send resume request (command 0x30 or 0x31)
  1140. * clean the trace buffer if it is to be enabled (0x62) */
  1141. if (xscale->trace.buffer_enabled)
  1142. {
  1143. if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
  1144. return retval;
  1145. if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
  1146. return retval;
  1147. }
  1148. else
  1149. if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
  1150. return retval;
  1151. /* send CPSR */
  1152. retval = xscale_send_u32(target,
  1153. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1154. if (retval != ERROR_OK)
  1155. return retval;
  1156. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1157. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1158. for (i = 7; i >= 0; i--)
  1159. {
  1160. /* send register */
  1161. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
  1162. return retval;
  1163. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1164. }
  1165. /* send PC */
  1166. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
  1167. return retval;
  1168. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1169. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1170. /* registers are now invalid */
  1171. register_cache_invalidate(armv4_5->core_cache);
  1172. /* wait for and process debug entry */
  1173. if ((retval = xscale_debug_entry(target)) != ERROR_OK)
  1174. return retval;
  1175. LOG_DEBUG("disable single-step");
  1176. if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
  1177. return retval;
  1178. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1179. return ERROR_OK;
  1180. }
  1181. static int xscale_step(struct target *target, int current,
  1182. uint32_t address, int handle_breakpoints)
  1183. {
  1184. struct arm *armv4_5 = target_to_arm(target);
  1185. struct breakpoint *breakpoint = target->breakpoints;
  1186. uint32_t current_pc;
  1187. int retval;
  1188. if (target->state != TARGET_HALTED)
  1189. {
  1190. LOG_WARNING("target not halted");
  1191. return ERROR_TARGET_NOT_HALTED;
  1192. }
  1193. /* current = 1: continue on current pc, otherwise continue at <address> */
  1194. if (!current)
  1195. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1196. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1197. /* if we're at the reset vector, we have to simulate the step */
  1198. if (current_pc == 0x0)
  1199. {
  1200. if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
  1201. return retval;
  1202. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1203. target->debug_reason = DBG_REASON_SINGLESTEP;
  1204. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1205. return ERROR_OK;
  1206. }
  1207. /* the front-end may request us not to handle breakpoints */
  1208. if (handle_breakpoints)
  1209. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1210. {
  1211. if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1212. return retval;
  1213. }
  1214. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1215. if (breakpoint)
  1216. {
  1217. xscale_set_breakpoint(target, breakpoint);
  1218. }
  1219. LOG_DEBUG("target stepped");
  1220. return ERROR_OK;
  1221. }
  1222. static int xscale_assert_reset(struct target *target)
  1223. {
  1224. struct xscale_common *xscale = target_to_xscale(target);
  1225. LOG_DEBUG("target->state: %s",
  1226. target_state_name(target));
  1227. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1228. * end up in T-L-R, which would reset JTAG
  1229. */
  1230. jtag_set_end_state(TAP_IDLE);
  1231. xscale_jtag_set_instr(target->tap,
  1232. XSCALE_SELDCSR << xscale->xscale_variant);
  1233. /* set Hold reset, Halt mode and Trap Reset */
  1234. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1235. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1236. xscale_write_dcsr(target, 1, 0);
  1237. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1238. xscale_jtag_set_instr(target->tap, ~0);
  1239. jtag_execute_queue();
  1240. /* assert reset */
  1241. jtag_add_reset(0, 1);
  1242. /* sleep 1ms, to be sure we fulfill any requirements */
  1243. jtag_add_sleep(1000);
  1244. jtag_execute_queue();
  1245. target->state = TARGET_RESET;
  1246. if (target->reset_halt)
  1247. {
  1248. int retval;
  1249. if ((retval = target_halt(target)) != ERROR_OK)
  1250. return retval;
  1251. }
  1252. return ERROR_OK;
  1253. }
  1254. static int xscale_deassert_reset(struct target *target)
  1255. {
  1256. struct xscale_common *xscale = target_to_xscale(target);
  1257. struct breakpoint *breakpoint = target->breakpoints;
  1258. LOG_DEBUG("-");
  1259. xscale->ibcr_available = 2;
  1260. xscale->ibcr0_used = 0;
  1261. xscale->ibcr1_used = 0;
  1262. xscale->dbr_available = 2;
  1263. xscale->dbr0_used = 0;
  1264. xscale->dbr1_used = 0;
  1265. /* mark all hardware breakpoints as unset */
  1266. while (breakpoint)
  1267. {
  1268. if (breakpoint->type == BKPT_HARD)
  1269. {
  1270. breakpoint->set = 0;
  1271. }
  1272. breakpoint = breakpoint->next;
  1273. }
  1274. register_cache_invalidate(xscale->armv4_5_common.core_cache);
  1275. /* FIXME mark hardware watchpoints got unset too. Also,
  1276. * at least some of the XScale registers are invalid...
  1277. */
  1278. /*
  1279. * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
  1280. * contents got invalidated. Safer to force that, so writing new
  1281. * contents can't ever fail..
  1282. */
  1283. {
  1284. uint32_t address;
  1285. unsigned buf_cnt;
  1286. const uint8_t *buffer = xscale_debug_handler;
  1287. int retval;
  1288. /* release SRST */
  1289. jtag_add_reset(0, 0);
  1290. /* wait 300ms; 150 and 100ms were not enough */
  1291. jtag_add_sleep(300*1000);
  1292. jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
  1293. jtag_execute_queue();
  1294. /* set Hold reset, Halt mode and Trap Reset */
  1295. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1296. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1297. xscale_write_dcsr(target, 1, 0);
  1298. /* Load the debug handler into the mini-icache. Since
  1299. * it's using halt mode (not monitor mode), it runs in
  1300. * "Special Debug State" for access to registers, memory,
  1301. * coprocessors, trace data, etc.
  1302. */
  1303. address = xscale->handler_address;
  1304. for (unsigned binary_size = sizeof xscale_debug_handler - 1;
  1305. binary_size > 0;
  1306. binary_size -= buf_cnt, buffer += buf_cnt)
  1307. {
  1308. uint32_t cache_line[8];
  1309. unsigned i;
  1310. buf_cnt = binary_size;
  1311. if (buf_cnt > 32)
  1312. buf_cnt = 32;
  1313. for (i = 0; i < buf_cnt; i += 4)
  1314. {
  1315. /* convert LE buffer to host-endian uint32_t */
  1316. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1317. }
  1318. for (; i < 32; i += 4)
  1319. {
  1320. cache_line[i / 4] = 0xe1a08008;
  1321. }
  1322. /* only load addresses other than the reset vectors */
  1323. if ((address % 0x400) != 0x0)
  1324. {
  1325. retval = xscale_load_ic(target, address,
  1326. cache_line);
  1327. if (retval != ERROR_OK)
  1328. return retval;
  1329. }
  1330. address += buf_cnt;
  1331. };
  1332. retval = xscale_load_ic(target, 0x0,
  1333. xscale->low_vectors);
  1334. if (retval != ERROR_OK)
  1335. return retval;
  1336. retval = xscale_load_ic(target, 0xffff0000,
  1337. xscale->high_vectors);
  1338. if (retval != ERROR_OK)
  1339. return retval;
  1340. jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
  1341. jtag_add_sleep(100000);
  1342. /* set Hold reset, Halt mode and Trap Reset */
  1343. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1344. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1345. xscale_write_dcsr(target, 1, 0);
  1346. /* clear Hold reset to let the target run (should enter debug handler) */
  1347. xscale_write_dcsr(target, 0, 1);
  1348. target->state = TARGET_RUNNING;
  1349. if (!target->reset_halt)
  1350. {
  1351. jtag_add_sleep(10000);
  1352. /* we should have entered debug now */
  1353. xscale_debug_entry(target);
  1354. target->state = TARGET_HALTED;
  1355. /* resume the target */
  1356. xscale_resume(target, 1, 0x0, 1, 0);
  1357. }
  1358. }
  1359. return ERROR_OK;
  1360. }
  1361. static int xscale_read_core_reg(struct target *target, struct reg *r,
  1362. int num, enum arm_mode mode)
  1363. {
  1364. /** \todo add debug handler support for core register reads */
  1365. LOG_ERROR("not implemented");
  1366. return ERROR_OK;
  1367. }
  1368. static int xscale_write_core_reg(struct target *target, struct reg *r,
  1369. int num, enum arm_mode mode, uint32_t value)
  1370. {
  1371. /** \todo add debug handler support for core register writes */
  1372. LOG_ERROR("not implemented");
  1373. return ERROR_OK;
  1374. }
  1375. static int xscale_full_context(struct target *target)
  1376. {
  1377. struct arm *armv4_5 = target_to_arm(target);
  1378. uint32_t *buffer;
  1379. int i, j;
  1380. LOG_DEBUG("-");
  1381. if (target->state != TARGET_HALTED)
  1382. {
  1383. LOG_WARNING("target not halted");
  1384. return ERROR_TARGET_NOT_HALTED;
  1385. }
  1386. buffer = malloc(4 * 8);
  1387. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1388. * we can't enter User mode on an XScale (unpredictable),
  1389. * but User shares registers with SYS
  1390. */
  1391. for (i = 1; i < 7; i++)
  1392. {
  1393. enum arm_mode mode = armv4_5_number_to_mode(i);
  1394. bool valid = true;
  1395. struct reg *r;
  1396. if (mode == ARM_MODE_USR)
  1397. continue;
  1398. /* check if there are invalid registers in the current mode
  1399. */
  1400. for (j = 0; valid && j <= 16; j++)
  1401. {
  1402. if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1403. mode, j).valid)
  1404. valid = false;
  1405. }
  1406. if (valid)
  1407. continue;
  1408. /* request banked registers */
  1409. xscale_send_u32(target, 0x0);
  1410. /* send CPSR for desired bank mode */
  1411. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1412. /* get banked registers: r8 to r14; and SPSR
  1413. * except in USR/SYS mode
  1414. */
  1415. if (mode != ARM_MODE_SYS) {
  1416. /* SPSR */
  1417. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1418. mode, 16);
  1419. xscale_receive(target, buffer, 8);
  1420. buf_set_u32(r->value, 0, 32, buffer[7]);
  1421. r->dirty = false;
  1422. r->valid = true;
  1423. } else {
  1424. xscale_receive(target, buffer, 7);
  1425. }
  1426. /* move data from buffer to register cache */
  1427. for (j = 8; j <= 14; j++)
  1428. {
  1429. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1430. mode, j);
  1431. buf_set_u32(r->value, 0, 32, buffer[j - 8]);
  1432. r->dirty = false;
  1433. r->valid = true;
  1434. }
  1435. }
  1436. free(buffer);
  1437. return ERROR_OK;
  1438. }
  1439. static int xscale_restore_banked(struct target *target)
  1440. {
  1441. struct arm *armv4_5 = target_to_arm(target);
  1442. int i, j;
  1443. if (target->state != TARGET_HALTED)
  1444. {
  1445. LOG_WARNING("target not halted");
  1446. return ERROR_TARGET_NOT_HALTED;
  1447. }
  1448. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1449. * and check if any banked registers need to be written. Ignore
  1450. * USR mode (number 0) in favor of SYS; we can't enter User mode on
  1451. * an XScale (unpredictable), but they share all registers.
  1452. */
  1453. for (i = 1; i < 7; i++)
  1454. {
  1455. enum arm_mode mode = armv4_5_number_to_mode(i);
  1456. struct reg *r;
  1457. if (mode == ARM_MODE_USR)
  1458. continue;
  1459. /* check if there are dirty registers in this mode */
  1460. for (j = 8; j <= 14; j++)
  1461. {
  1462. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1463. mode, j).dirty)
  1464. goto dirty;
  1465. }
  1466. /* if not USR/SYS, check if the SPSR needs to be written */
  1467. if (mode != ARM_MODE_SYS)
  1468. {
  1469. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1470. mode, 16).dirty)
  1471. goto dirty;
  1472. }
  1473. /* there's nothing to flush for this mode */
  1474. continue;
  1475. dirty:
  1476. /* command 0x1: "send banked registers" */
  1477. xscale_send_u32(target, 0x1);
  1478. /* send CPSR for desired mode */
  1479. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1480. /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
  1481. * but this protocol doesn't understand that nuance.
  1482. */
  1483. for (j = 8; j <= 14; j++) {
  1484. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1485. mode, j);
  1486. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1487. r->dirty = false;
  1488. }
  1489. /* send spsr if not in USR/SYS mode */
  1490. if (mode != ARM_MODE_SYS) {
  1491. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1492. mode, 16);
  1493. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1494. r->dirty = false;
  1495. }
  1496. }
  1497. return ERROR_OK;
  1498. }
  1499. static int xscale_read_memory(struct target *target, uint32_t address,
  1500. uint32_t size, uint32_t count, uint8_t *buffer)
  1501. {
  1502. struct xscale_common *xscale = target_to_xscale(target);
  1503. uint32_t *buf32;
  1504. uint32_t i;
  1505. int retval;
  1506. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1507. if (target->state != TARGET_HALTED)
  1508. {
  1509. LOG_WARNING("target not halted");
  1510. return ERROR_TARGET_NOT_HALTED;
  1511. }
  1512. /* sanitize arguments */
  1513. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1514. return ERROR_INVALID_ARGUMENTS;
  1515. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1516. return ERROR_TARGET_UNALIGNED_ACCESS;
  1517. /* send memory read request (command 0x1n, n: access size) */
  1518. if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
  1519. return retval;
  1520. /* send base address for read request */
  1521. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1522. return retval;
  1523. /* send number of requested data words */
  1524. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1525. return retval;
  1526. /* receive data from target (count times 32-bit words in host endianness) */
  1527. buf32 = malloc(4 * count);
  1528. if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
  1529. return retval;
  1530. /* extract data from host-endian buffer into byte stream */
  1531. for (i = 0; i < count; i++)
  1532. {
  1533. switch (size)
  1534. {
  1535. case 4:
  1536. target_buffer_set_u32(target, buffer, buf32[i]);
  1537. buffer += 4;
  1538. break;
  1539. case 2:
  1540. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1541. buffer += 2;
  1542. break;
  1543. case 1:
  1544. *buffer++ = buf32[i] & 0xff;
  1545. break;
  1546. default:
  1547. LOG_ERROR("invalid read size");
  1548. return ERROR_INVALID_ARGUMENTS;
  1549. }
  1550. }
  1551. free(buf32);
  1552. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1553. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1554. return retval;
  1555. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1556. {
  1557. /* clear SA bit */
  1558. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1559. return retval;
  1560. return ERROR_TARGET_DATA_ABORT;
  1561. }
  1562. return ERROR_OK;
  1563. }
  1564. static int xscale_read_phys_memory(struct target *target, uint32_t address,
  1565. uint32_t size, uint32_t count, uint8_t *buffer)
  1566. {
  1567. struct xscale_common *xscale = target_to_xscale(target);
  1568. /* with MMU inactive, there are only physical addresses */
  1569. if (!xscale->armv4_5_mmu.mmu_enabled)
  1570. return xscale_read_memory(target, address, size, count, buffer);
  1571. /** \todo: provide a non-stub implementation of this routine. */
  1572. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1573. target_name(target), __func__);
  1574. return ERROR_FAIL;
  1575. }
  1576. static int xscale_write_memory(struct target *target, uint32_t address,
  1577. uint32_t size, uint32_t count, uint8_t *buffer)
  1578. {
  1579. struct xscale_common *xscale = target_to_xscale(target);
  1580. int retval;
  1581. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1582. if (target->state != TARGET_HALTED)
  1583. {
  1584. LOG_WARNING("target not halted");
  1585. return ERROR_TARGET_NOT_HALTED;
  1586. }
  1587. /* sanitize arguments */
  1588. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1589. return ERROR_INVALID_ARGUMENTS;
  1590. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1591. return ERROR_TARGET_UNALIGNED_ACCESS;
  1592. /* send memory write request (command 0x2n, n: access size) */
  1593. if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
  1594. return retval;
  1595. /* send base address for read request */
  1596. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1597. return retval;
  1598. /* send number of requested data words to be written*/
  1599. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1600. return retval;
  1601. /* extract data from host-endian buffer into byte stream */
  1602. #if 0
  1603. for (i = 0; i < count; i++)
  1604. {
  1605. switch (size)
  1606. {
  1607. case 4:
  1608. value = target_buffer_get_u32(target, buffer);
  1609. xscale_send_u32(target, value);
  1610. buffer += 4;
  1611. break;
  1612. case 2:
  1613. value = target_buffer_get_u16(target, buffer);
  1614. xscale_send_u32(target, value);
  1615. buffer += 2;
  1616. break;
  1617. case 1:
  1618. value = *buffer;
  1619. xscale_send_u32(target, value);
  1620. buffer += 1;
  1621. break;
  1622. default:
  1623. LOG_ERROR("should never get here");
  1624. exit(-1);
  1625. }
  1626. }
  1627. #endif
  1628. if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
  1629. return retval;
  1630. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1631. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1632. return retval;
  1633. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1634. {
  1635. /* clear SA bit */
  1636. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1637. return retval;
  1638. return ERROR_TARGET_DATA_ABORT;
  1639. }
  1640. return ERROR_OK;
  1641. }
  1642. static int xscale_write_phys_memory(struct target *target, uint32_t address,
  1643. uint32_t size, uint32_t count, uint8_t *buffer)
  1644. {
  1645. struct xscale_common *xscale = target_to_xscale(target);
  1646. /* with MMU inactive, there are only physical addresses */
  1647. if (!xscale->armv4_5_mmu.mmu_enabled)
  1648. return xscale_read_memory(target, address, size, count, buffer);
  1649. /** \todo: provide a non-stub implementation of this routine. */
  1650. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1651. target_name(target), __func__);
  1652. return ERROR_FAIL;
  1653. }
  1654. static int xscale_bulk_write_memory(struct target *target, uint32_t address,
  1655. uint32_t count, uint8_t *buffer)
  1656. {
  1657. return xscale_write_memory(target, address, 4, count, buffer);
  1658. }
  1659. static uint32_t xscale_get_ttb(struct target *target)
  1660. {
  1661. struct xscale_common *xscale = target_to_xscale(target);
  1662. uint32_t ttb;
  1663. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1664. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1665. return ttb;
  1666. }
  1667. static void xscale_disable_mmu_caches(struct target *target, int mmu,
  1668. int d_u_cache, int i_cache)
  1669. {
  1670. struct xscale_common *xscale = target_to_xscale(target);
  1671. uint32_t cp15_control;
  1672. /* read cp15 control register */
  1673. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1674. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1675. if (mmu)
  1676. cp15_control &= ~0x1U;
  1677. if (d_u_cache)
  1678. {
  1679. /* clean DCache */
  1680. xscale_send_u32(target, 0x50);
  1681. xscale_send_u32(target, xscale->cache_clean_address);
  1682. /* invalidate DCache */
  1683. xscale_send_u32(target, 0x51);
  1684. cp15_control &= ~0x4U;
  1685. }
  1686. if (i_cache)
  1687. {
  1688. /* invalidate ICache */
  1689. xscale_send_u32(target, 0x52);
  1690. cp15_control &= ~0x1000U;
  1691. }
  1692. /* write new cp15 control register */
  1693. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1694. /* execute cpwait to ensure outstanding operations complete */
  1695. xscale_send_u32(target, 0x53);
  1696. }
  1697. static void xscale_enable_mmu_caches(struct target *target, int mmu,
  1698. int d_u_cache, int i_cache)
  1699. {
  1700. struct xscale_common *xscale = target_to_xscale(target);
  1701. uint32_t cp15_control;
  1702. /* read cp15 control register */
  1703. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1704. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1705. if (mmu)
  1706. cp15_control |= 0x1U;
  1707. if (d_u_cache)
  1708. cp15_control |= 0x4U;
  1709. if (i_cache)
  1710. cp15_control |= 0x1000U;
  1711. /* write new cp15 control register */
  1712. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1713. /* execute cpwait to ensure outstanding operations complete */
  1714. xscale_send_u32(target, 0x53);
  1715. }
  1716. static int xscale_set_breakpoint(struct target *target,
  1717. struct breakpoint *breakpoint)
  1718. {
  1719. int retval;
  1720. struct xscale_common *xscale = target_to_xscale(target);
  1721. if (target->state != TARGET_HALTED)
  1722. {
  1723. LOG_WARNING("target not halted");
  1724. return ERROR_TARGET_NOT_HALTED;
  1725. }
  1726. if (breakpoint->set)
  1727. {
  1728. LOG_WARNING("breakpoint already set");
  1729. return ERROR_OK;
  1730. }
  1731. if (breakpoint->type == BKPT_HARD)
  1732. {
  1733. uint32_t value = breakpoint->address | 1;
  1734. if (!xscale->ibcr0_used)
  1735. {
  1736. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1737. xscale->ibcr0_used = 1;
  1738. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1739. }
  1740. else if (!xscale->ibcr1_used)
  1741. {
  1742. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1743. xscale->ibcr1_used = 1;
  1744. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1745. }
  1746. else
  1747. {
  1748. LOG_ERROR("BUG: no hardware comparator available");
  1749. return ERROR_OK;
  1750. }
  1751. }
  1752. else if (breakpoint->type == BKPT_SOFT)
  1753. {
  1754. if (breakpoint->length == 4)
  1755. {
  1756. /* keep the original instruction in target endianness */
  1757. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1758. {
  1759. return retval;
  1760. }
  1761. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1762. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1763. {
  1764. return retval;
  1765. }
  1766. }
  1767. else
  1768. {
  1769. /* keep the original instruction in target endianness */
  1770. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1771. {
  1772. return retval;
  1773. }
  1774. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1775. if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1776. {
  1777. return retval;
  1778. }
  1779. }
  1780. breakpoint->set = 1;
  1781. }
  1782. return ERROR_OK;
  1783. }
  1784. static int xscale_add_breakpoint(struct target *target,
  1785. struct breakpoint *breakpoint)
  1786. {
  1787. struct xscale_common *xscale = target_to_xscale(target);
  1788. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1789. {
  1790. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1791. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1792. }
  1793. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1794. {
  1795. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1796. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1797. }
  1798. if (breakpoint->type == BKPT_HARD)
  1799. {
  1800. xscale->ibcr_available--;
  1801. }
  1802. return ERROR_OK;
  1803. }
  1804. static int xscale_unset_breakpoint(struct target *target,
  1805. struct breakpoint *breakpoint)
  1806. {
  1807. int retval;
  1808. struct xscale_common *xscale = target_to_xscale(target);
  1809. if (target->state != TARGET_HALTED)
  1810. {
  1811. LOG_WARNING("target not halted");
  1812. return ERROR_TARGET_NOT_HALTED;
  1813. }
  1814. if (!breakpoint->set)
  1815. {
  1816. LOG_WARNING("breakpoint not set");
  1817. return ERROR_OK;
  1818. }
  1819. if (breakpoint->type == BKPT_HARD)
  1820. {
  1821. if (breakpoint->set == 1)
  1822. {
  1823. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1824. xscale->ibcr0_used = 0;
  1825. }
  1826. else if (breakpoint->set == 2)
  1827. {
  1828. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1829. xscale->ibcr1_used = 0;
  1830. }
  1831. breakpoint->set = 0;
  1832. }
  1833. else
  1834. {
  1835. /* restore original instruction (kept in target endianness) */
  1836. if (breakpoint->length == 4)
  1837. {
  1838. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1839. {
  1840. return retval;
  1841. }
  1842. }
  1843. else
  1844. {
  1845. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1846. {
  1847. return retval;
  1848. }
  1849. }
  1850. breakpoint->set = 0;
  1851. }
  1852. return ERROR_OK;
  1853. }
  1854. static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1855. {
  1856. struct xscale_common *xscale = target_to_xscale(target);
  1857. if (target->state != TARGET_HALTED)
  1858. {
  1859. LOG_WARNING("target not halted");
  1860. return ERROR_TARGET_NOT_HALTED;
  1861. }
  1862. if (breakpoint->set)
  1863. {
  1864. xscale_unset_breakpoint(target, breakpoint);
  1865. }
  1866. if (breakpoint->type == BKPT_HARD)
  1867. xscale->ibcr_available++;
  1868. return ERROR_OK;
  1869. }
  1870. static int xscale_set_watchpoint(struct target *target,
  1871. struct watchpoint *watchpoint)
  1872. {
  1873. struct xscale_common *xscale = target_to_xscale(target);
  1874. uint8_t enable = 0;
  1875. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1876. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1877. if (target->state != TARGET_HALTED)
  1878. {
  1879. LOG_WARNING("target not halted");
  1880. return ERROR_TARGET_NOT_HALTED;
  1881. }
  1882. xscale_get_reg(dbcon);
  1883. switch (watchpoint->rw)
  1884. {
  1885. case WPT_READ:
  1886. enable = 0x3;
  1887. break;
  1888. case WPT_ACCESS:
  1889. enable = 0x2;
  1890. break;
  1891. case WPT_WRITE:
  1892. enable = 0x1;
  1893. break;
  1894. default:
  1895. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1896. }
  1897. if (!xscale->dbr0_used)
  1898. {
  1899. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1900. dbcon_value |= enable;
  1901. xscale_set_reg_u32(dbcon, dbcon_value);
  1902. watchpoint->set = 1;
  1903. xscale->dbr0_used = 1;
  1904. }
  1905. else if (!xscale->dbr1_used)
  1906. {
  1907. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1908. dbcon_value |= enable << 2;
  1909. xscale_set_reg_u32(dbcon, dbcon_value);
  1910. watchpoint->set = 2;
  1911. xscale->dbr1_used = 1;
  1912. }
  1913. else
  1914. {
  1915. LOG_ERROR("BUG: no hardware comparator available");
  1916. return ERROR_OK;
  1917. }
  1918. return ERROR_OK;
  1919. }
  1920. static int xscale_add_watchpoint(struct target *target,
  1921. struct watchpoint *watchpoint)
  1922. {
  1923. struct xscale_common *xscale = target_to_xscale(target);
  1924. if (xscale->dbr_available < 1)
  1925. {
  1926. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1927. }
  1928. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1929. {
  1930. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1931. }
  1932. xscale->dbr_available--;
  1933. return ERROR_OK;
  1934. }
  1935. static int xscale_unset_watchpoint(struct target *target,
  1936. struct watchpoint *watchpoint)
  1937. {
  1938. struct xscale_common *xscale = target_to_xscale(target);
  1939. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1940. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1941. if (target->state != TARGET_HALTED)
  1942. {
  1943. LOG_WARNING("target not halted");
  1944. return ERROR_TARGET_NOT_HALTED;
  1945. }
  1946. if (!watchpoint->set)
  1947. {
  1948. LOG_WARNING("breakpoint not set");
  1949. return ERROR_OK;
  1950. }
  1951. if (watchpoint->set == 1)
  1952. {
  1953. dbcon_value &= ~0x3;
  1954. xscale_set_reg_u32(dbcon, dbcon_value);
  1955. xscale->dbr0_used = 0;
  1956. }
  1957. else if (watchpoint->set == 2)
  1958. {
  1959. dbcon_value &= ~0xc;
  1960. xscale_set_reg_u32(dbcon, dbcon_value);
  1961. xscale->dbr1_used = 0;
  1962. }
  1963. watchpoint->set = 0;
  1964. return ERROR_OK;
  1965. }
  1966. static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1967. {
  1968. struct xscale_common *xscale = target_to_xscale(target);
  1969. if (target->state != TARGET_HALTED)
  1970. {
  1971. LOG_WARNING("target not halted");
  1972. return ERROR_TARGET_NOT_HALTED;
  1973. }
  1974. if (watchpoint->set)
  1975. {
  1976. xscale_unset_watchpoint(target, watchpoint);
  1977. }
  1978. xscale->dbr_available++;
  1979. return ERROR_OK;
  1980. }
  1981. static int xscale_get_reg(struct reg *reg)
  1982. {
  1983. struct xscale_reg *arch_info = reg->arch_info;
  1984. struct target *target = arch_info->target;
  1985. struct xscale_common *xscale = target_to_xscale(target);
  1986. /* DCSR, TX and RX are accessible via JTAG */
  1987. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  1988. {
  1989. return xscale_read_dcsr(arch_info->target);
  1990. }
  1991. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  1992. {
  1993. /* 1 = consume register content */
  1994. return xscale_read_tx(arch_info->target, 1);
  1995. }
  1996. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  1997. {
  1998. /* can't read from RX register (host -> debug handler) */
  1999. return ERROR_OK;
  2000. }
  2001. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2002. {
  2003. /* can't (explicitly) read from TXRXCTRL register */
  2004. return ERROR_OK;
  2005. }
  2006. else /* Other DBG registers have to be transfered by the debug handler */
  2007. {
  2008. /* send CP read request (command 0x40) */
  2009. xscale_send_u32(target, 0x40);
  2010. /* send CP register number */
  2011. xscale_send_u32(target, arch_info->dbg_handler_number);
  2012. /* read register value */
  2013. xscale_read_tx(target, 1);
  2014. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2015. reg->dirty = 0;
  2016. reg->valid = 1;
  2017. }
  2018. return ERROR_OK;
  2019. }
  2020. static int xscale_set_reg(struct reg *reg, uint8_t* buf)
  2021. {
  2022. struct xscale_reg *arch_info = reg->arch_info;
  2023. struct target *target = arch_info->target;
  2024. struct xscale_common *xscale = target_to_xscale(target);
  2025. uint32_t value = buf_get_u32(buf, 0, 32);
  2026. /* DCSR, TX and RX are accessible via JTAG */
  2027. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2028. {
  2029. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2030. return xscale_write_dcsr(arch_info->target, -1, -1);
  2031. }
  2032. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2033. {
  2034. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2035. return xscale_write_rx(arch_info->target);
  2036. }
  2037. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2038. {
  2039. /* can't write to TX register (debug-handler -> host) */
  2040. return ERROR_OK;
  2041. }
  2042. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2043. {
  2044. /* can't (explicitly) write to TXRXCTRL register */
  2045. return ERROR_OK;
  2046. }
  2047. else /* Other DBG registers have to be transfered by the debug handler */
  2048. {
  2049. /* send CP write request (command 0x41) */
  2050. xscale_send_u32(target, 0x41);
  2051. /* send CP register number */
  2052. xscale_send_u32(target, arch_info->dbg_handler_number);
  2053. /* send CP register value */
  2054. xscale_send_u32(target, value);
  2055. buf_set_u32(reg->value, 0, 32, value);
  2056. }
  2057. return ERROR_OK;
  2058. }
  2059. static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
  2060. {
  2061. struct xscale_common *xscale = target_to_xscale(target);
  2062. struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2063. struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
  2064. /* send CP write request (command 0x41) */
  2065. xscale_send_u32(target, 0x41);
  2066. /* send CP register number */
  2067. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2068. /* send CP register value */
  2069. xscale_send_u32(target, value);
  2070. buf_set_u32(dcsr->value, 0, 32, value);
  2071. return ERROR_OK;
  2072. }
  2073. static int xscale_read_trace(struct target *target)
  2074. {
  2075. struct xscale_common *xscale = target_to_xscale(target);
  2076. struct arm *armv4_5 = &xscale->armv4_5_common;
  2077. struct xscale_trace_data **trace_data_p;
  2078. /* 258 words from debug handler
  2079. * 256 trace buffer entries
  2080. * 2 checkpoint addresses
  2081. */
  2082. uint32_t trace_buffer[258];
  2083. int is_address[256];
  2084. int i, j;
  2085. if (target->state != TARGET_HALTED)
  2086. {
  2087. LOG_WARNING("target must be stopped to read trace data");
  2088. return ERROR_TARGET_NOT_HALTED;
  2089. }
  2090. /* send read trace buffer command (command 0x61) */
  2091. xscale_send_u32(target, 0x61);
  2092. /* receive trace buffer content */
  2093. xscale_receive(target, trace_buffer, 258);
  2094. /* parse buffer backwards to identify address entries */
  2095. for (i = 255; i >= 0; i--)
  2096. {
  2097. is_address[i] = 0;
  2098. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2099. ((trace_buffer[i] & 0xf0) == 0xd0))
  2100. {
  2101. if (i >= 3)
  2102. is_address[--i] = 1;
  2103. if (i >= 2)
  2104. is_address[--i] = 1;
  2105. if (i >= 1)
  2106. is_address[--i] = 1;
  2107. if (i >= 0)
  2108. is_address[--i] = 1;
  2109. }
  2110. }
  2111. /* search first non-zero entry */
  2112. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2113. ;
  2114. if (j == 256)
  2115. {
  2116. LOG_DEBUG("no trace data collected");
  2117. return ERROR_XSCALE_NO_TRACE_DATA;
  2118. }
  2119. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2120. ;
  2121. *trace_data_p = malloc(sizeof(struct xscale_trace_data));
  2122. (*trace_data_p)->next = NULL;
  2123. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2124. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2125. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2126. (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
  2127. (*trace_data_p)->depth = 256 - j;
  2128. for (i = j; i < 256; i++)
  2129. {
  2130. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2131. if (is_address[i])
  2132. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2133. else
  2134. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2135. }
  2136. return ERROR_OK;
  2137. }
  2138. static int xscale_read_instruction(struct target *target,
  2139. struct arm_instruction *instruction)
  2140. {
  2141. struct xscale_common *xscale = target_to_xscale(target);
  2142. int i;
  2143. int section = -1;
  2144. size_t size_read;
  2145. uint32_t opcode;
  2146. int retval;
  2147. if (!xscale->trace.image)
  2148. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2149. /* search for the section the current instruction belongs to */
  2150. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2151. {
  2152. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2153. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2154. {
  2155. section = i;
  2156. break;
  2157. }
  2158. }
  2159. if (section == -1)
  2160. {
  2161. /* current instruction couldn't be found in the image */
  2162. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2163. }
  2164. if (xscale->trace.core_state == ARM_STATE_ARM)
  2165. {
  2166. uint8_t buf[4];
  2167. if ((retval = image_read_section(xscale->trace.image, section,
  2168. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2169. 4, buf, &size_read)) != ERROR_OK)
  2170. {
  2171. LOG_ERROR("error while reading instruction: %i", retval);
  2172. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2173. }
  2174. opcode = target_buffer_get_u32(target, buf);
  2175. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2176. }
  2177. else if (xscale->trace.core_state == ARM_STATE_THUMB)
  2178. {
  2179. uint8_t buf[2];
  2180. if ((retval = image_read_section(xscale->trace.image, section,
  2181. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2182. 2, buf, &size_read)) != ERROR_OK)
  2183. {
  2184. LOG_ERROR("error while reading instruction: %i", retval);
  2185. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2186. }
  2187. opcode = target_buffer_get_u16(target, buf);
  2188. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2189. }
  2190. else
  2191. {
  2192. LOG_ERROR("BUG: unknown core state encountered");
  2193. exit(-1);
  2194. }
  2195. return ERROR_OK;
  2196. }
  2197. static int xscale_branch_address(struct xscale_trace_data *trace_data,
  2198. int i, uint32_t *target)
  2199. {
  2200. /* if there are less than four entries prior to the indirect branch message
  2201. * we can't extract the address */
  2202. if (i < 4)
  2203. {
  2204. return -1;
  2205. }
  2206. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2207. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2208. return 0;
  2209. }
  2210. static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
  2211. {
  2212. struct xscale_common *xscale = target_to_xscale(target);
  2213. int next_pc_ok = 0;
  2214. uint32_t next_pc = 0x0;
  2215. struct xscale_trace_data *trace_data = xscale->trace.data;
  2216. int retval;
  2217. while (trace_data)
  2218. {
  2219. int i, chkpt;
  2220. int rollover;
  2221. int branch;
  2222. int exception;
  2223. xscale->trace.core_state = ARM_STATE_ARM;
  2224. chkpt = 0;
  2225. rollover = 0;
  2226. for (i = 0; i < trace_data->depth; i++)
  2227. {
  2228. next_pc_ok = 0;
  2229. branch = 0;
  2230. exception = 0;
  2231. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2232. continue;
  2233. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2234. {
  2235. case 0: /* Exceptions */
  2236. case 1:
  2237. case 2:
  2238. case 3:
  2239. case 4:
  2240. case 5:
  2241. case 6:
  2242. case 7:
  2243. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2244. next_pc_ok = 1;
  2245. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2246. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2247. break;
  2248. case 8: /* Direct Branch */
  2249. branch = 1;
  2250. break;
  2251. case 9: /* Indirect Branch */
  2252. branch = 1;
  2253. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2254. {
  2255. next_pc_ok = 1;
  2256. }
  2257. break;
  2258. case 13: /* Checkpointed Indirect Branch */
  2259. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2260. {
  2261. next_pc_ok = 1;
  2262. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2263. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2264. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2265. }
  2266. /* explicit fall-through */
  2267. case 12: /* Checkpointed Direct Branch */
  2268. branch = 1;
  2269. if (chkpt == 0)
  2270. {
  2271. next_pc_ok = 1;
  2272. next_pc = trace_data->chkpt0;
  2273. chkpt++;
  2274. }
  2275. else if (chkpt == 1)
  2276. {
  2277. next_pc_ok = 1;
  2278. next_pc = trace_data->chkpt0;
  2279. chkpt++;
  2280. }
  2281. else
  2282. {
  2283. LOG_WARNING("more than two checkpointed branches encountered");
  2284. }
  2285. break;
  2286. case 15: /* Roll-over */
  2287. rollover++;
  2288. continue;
  2289. default: /* Reserved */
  2290. command_print(cmd_ctx, "--- reserved trace message ---");
  2291. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2292. return ERROR_OK;
  2293. }
  2294. if (xscale->trace.pc_ok)
  2295. {
  2296. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2297. struct arm_instruction instruction;
  2298. if ((exception == 6) || (exception == 7))
  2299. {
  2300. /* IRQ or FIQ exception, no instruction executed */
  2301. executed -= 1;
  2302. }
  2303. while (executed-- >= 0)
  2304. {
  2305. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2306. {
  2307. /* can't continue tracing with no image available */
  2308. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2309. {
  2310. return retval;
  2311. }
  2312. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2313. {
  2314. /* TODO: handle incomplete images */
  2315. }
  2316. }
  2317. /* a precise abort on a load to the PC is included in the incremental
  2318. * word count, other instructions causing data aborts are not included
  2319. */
  2320. if ((executed == 0) && (exception == 4)
  2321. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2322. {
  2323. if ((instruction.type == ARM_LDM)
  2324. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2325. {
  2326. executed--;
  2327. }
  2328. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2329. && (instruction.info.load_store.Rd != 15))
  2330. {
  2331. executed--;
  2332. }
  2333. }
  2334. /* only the last instruction executed
  2335. * (the one that caused the control flow change)
  2336. * could be a taken branch
  2337. */
  2338. if (((executed == -1) && (branch == 1)) &&
  2339. (((instruction.type == ARM_B) ||
  2340. (instruction.type == ARM_BL) ||
  2341. (instruction.type == ARM_BLX)) &&
  2342. (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
  2343. {
  2344. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2345. }
  2346. else
  2347. {
  2348. xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
  2349. }
  2350. command_print(cmd_ctx, "%s", instruction.text);
  2351. }
  2352. rollover = 0;
  2353. }
  2354. if (next_pc_ok)
  2355. {
  2356. xscale->trace.current_pc = next_pc;
  2357. xscale->trace.pc_ok = 1;
  2358. }
  2359. }
  2360. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
  2361. {
  2362. struct arm_instruction instruction;
  2363. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2364. {
  2365. /* can't continue tracing with no image available */
  2366. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2367. {
  2368. return retval;
  2369. }
  2370. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2371. {
  2372. /* TODO: handle incomplete images */
  2373. }
  2374. }
  2375. command_print(cmd_ctx, "%s", instruction.text);
  2376. }
  2377. trace_data = trace_data->next;
  2378. }
  2379. return ERROR_OK;
  2380. }
  2381. static const struct reg_arch_type xscale_reg_type = {
  2382. .get = xscale_get_reg,
  2383. .set = xscale_set_reg,
  2384. };
  2385. static void xscale_build_reg_cache(struct target *target)
  2386. {
  2387. struct xscale_common *xscale = target_to_xscale(target);
  2388. struct arm *armv4_5 = &xscale->armv4_5_common;
  2389. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  2390. struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2391. int i;
  2392. int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
  2393. (*cache_p) = arm_build_reg_cache(target, armv4_5);
  2394. (*cache_p)->next = malloc(sizeof(struct reg_cache));
  2395. cache_p = &(*cache_p)->next;
  2396. /* fill in values for the xscale reg cache */
  2397. (*cache_p)->name = "XScale registers";
  2398. (*cache_p)->next = NULL;
  2399. (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
  2400. (*cache_p)->num_regs = num_regs;
  2401. for (i = 0; i < num_regs; i++)
  2402. {
  2403. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2404. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2405. (*cache_p)->reg_list[i].dirty = 0;
  2406. (*cache_p)->reg_list[i].valid = 0;
  2407. (*cache_p)->reg_list[i].size = 32;
  2408. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2409. (*cache_p)->reg_list[i].type = &xscale_reg_type;
  2410. arch_info[i] = xscale_reg_arch_info[i];
  2411. arch_info[i].target = target;
  2412. }
  2413. xscale->reg_cache = (*cache_p);
  2414. }
  2415. static int xscale_init_target(struct command_context *cmd_ctx,
  2416. struct target *target)
  2417. {
  2418. xscale_build_reg_cache(target);
  2419. return ERROR_OK;
  2420. }
  2421. static int xscale_init_arch_info(struct target *target,
  2422. struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
  2423. {
  2424. struct arm *armv4_5;
  2425. uint32_t high_reset_branch, low_reset_branch;
  2426. int i;
  2427. armv4_5 = &xscale->armv4_5_common;
  2428. /* store architecture specfic data */
  2429. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2430. /* we don't really *need* a variant param ... */
  2431. if (variant) {
  2432. int ir_length = 0;
  2433. if (strcmp(variant, "pxa250") == 0
  2434. || strcmp(variant, "pxa255") == 0
  2435. || strcmp(variant, "pxa26x") == 0)
  2436. ir_length = 5;
  2437. else if (strcmp(variant, "pxa27x") == 0
  2438. || strcmp(variant, "ixp42x") == 0
  2439. || strcmp(variant, "ixp45x") == 0
  2440. || strcmp(variant, "ixp46x") == 0)
  2441. ir_length = 7;
  2442. else if (strcmp(variant, "pxa3xx") == 0)
  2443. ir_length = 11;
  2444. else
  2445. LOG_WARNING("%s: unrecognized variant %s",
  2446. tap->dotted_name, variant);
  2447. if (ir_length && ir_length != tap->ir_length) {
  2448. LOG_WARNING("%s: IR length for %s is %d; fixing",
  2449. tap->dotted_name, variant, ir_length);
  2450. tap->ir_length = ir_length;
  2451. }
  2452. }
  2453. /* PXA3xx shifts the JTAG instructions */
  2454. if (tap->ir_length == 11)
  2455. xscale->xscale_variant = XSCALE_PXA3XX;
  2456. else
  2457. xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
  2458. /* the debug handler isn't installed (and thus not running) at this time */
  2459. xscale->handler_address = 0xfe000800;
  2460. /* clear the vectors we keep locally for reference */
  2461. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2462. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2463. /* no user-specified vectors have been configured yet */
  2464. xscale->static_low_vectors_set = 0x0;
  2465. xscale->static_high_vectors_set = 0x0;
  2466. /* calculate branches to debug handler */
  2467. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2468. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2469. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2470. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2471. for (i = 1; i <= 7; i++)
  2472. {
  2473. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2474. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2475. }
  2476. /* 64kB aligned region used for DCache cleaning */
  2477. xscale->cache_clean_address = 0xfffe0000;
  2478. xscale->hold_rst = 0;
  2479. xscale->external_debug_break = 0;
  2480. xscale->ibcr_available = 2;
  2481. xscale->ibcr0_used = 0;
  2482. xscale->ibcr1_used = 0;
  2483. xscale->dbr_available = 2;
  2484. xscale->dbr0_used = 0;
  2485. xscale->dbr1_used = 0;
  2486. LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
  2487. target_name(target));
  2488. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2489. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2490. xscale->vector_catch = 0x1;
  2491. xscale->trace.capture_status = TRACE_IDLE;
  2492. xscale->trace.data = NULL;
  2493. xscale->trace.image = NULL;
  2494. xscale->trace.buffer_enabled = 0;
  2495. xscale->trace.buffer_fill = 0;
  2496. /* prepare ARMv4/5 specific information */
  2497. armv4_5->arch_info = xscale;
  2498. armv4_5->read_core_reg = xscale_read_core_reg;
  2499. armv4_5->write_core_reg = xscale_write_core_reg;
  2500. armv4_5->full_context = xscale_full_context;
  2501. arm_init_arch_info(target, armv4_5);
  2502. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2503. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2504. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2505. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2506. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2507. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2508. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2509. xscale->armv4_5_mmu.mmu_enabled = 0;
  2510. return ERROR_OK;
  2511. }
  2512. static int xscale_target_create(struct target *target, Jim_Interp *interp)
  2513. {
  2514. struct xscale_common *xscale;
  2515. if (sizeof xscale_debug_handler - 1 > 0x800) {
  2516. LOG_ERROR("debug_handler.bin: larger than 2kb");
  2517. return ERROR_FAIL;
  2518. }
  2519. xscale = calloc(1, sizeof(*xscale));
  2520. if (!xscale)
  2521. return ERROR_FAIL;
  2522. return xscale_init_arch_info(target, xscale, target->tap,
  2523. target->variant);
  2524. }
  2525. COMMAND_HANDLER(xscale_handle_debug_handler_command)
  2526. {
  2527. struct target *target = NULL;
  2528. struct xscale_common *xscale;
  2529. int retval;
  2530. uint32_t handler_address;
  2531. if (CMD_ARGC < 2)
  2532. {
  2533. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2534. return ERROR_OK;
  2535. }
  2536. if ((target = get_target(CMD_ARGV[0])) == NULL)
  2537. {
  2538. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2539. return ERROR_FAIL;
  2540. }
  2541. xscale = target_to_xscale(target);
  2542. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2543. if (retval != ERROR_OK)
  2544. return retval;
  2545. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
  2546. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2547. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2548. {
  2549. xscale->handler_address = handler_address;
  2550. }
  2551. else
  2552. {
  2553. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2554. return ERROR_FAIL;
  2555. }
  2556. return ERROR_OK;
  2557. }
  2558. COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
  2559. {
  2560. struct target *target = NULL;
  2561. struct xscale_common *xscale;
  2562. int retval;
  2563. uint32_t cache_clean_address;
  2564. if (CMD_ARGC < 2)
  2565. {
  2566. return ERROR_COMMAND_SYNTAX_ERROR;
  2567. }
  2568. target = get_target(CMD_ARGV[0]);
  2569. if (target == NULL)
  2570. {
  2571. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2572. return ERROR_FAIL;
  2573. }
  2574. xscale = target_to_xscale(target);
  2575. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2576. if (retval != ERROR_OK)
  2577. return retval;
  2578. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
  2579. if (cache_clean_address & 0xffff)
  2580. {
  2581. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2582. }
  2583. else
  2584. {
  2585. xscale->cache_clean_address = cache_clean_address;
  2586. }
  2587. return ERROR_OK;
  2588. }
  2589. COMMAND_HANDLER(xscale_handle_cache_info_command)
  2590. {
  2591. struct target *target = get_current_target(CMD_CTX);
  2592. struct xscale_common *xscale = target_to_xscale(target);
  2593. int retval;
  2594. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2595. if (retval != ERROR_OK)
  2596. return retval;
  2597. return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
  2598. }
  2599. static int xscale_virt2phys(struct target *target,
  2600. uint32_t virtual, uint32_t *physical)
  2601. {
  2602. struct xscale_common *xscale = target_to_xscale(target);
  2603. int type;
  2604. uint32_t cb;
  2605. int domain;
  2606. uint32_t ap;
  2607. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  2608. LOG_ERROR(xscale_not);
  2609. return ERROR_TARGET_INVALID;
  2610. }
  2611. uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2612. if (type == -1)
  2613. {
  2614. return ret;
  2615. }
  2616. *physical = ret;
  2617. return ERROR_OK;
  2618. }
  2619. static int xscale_mmu(struct target *target, int *enabled)
  2620. {
  2621. struct xscale_common *xscale = target_to_xscale(target);
  2622. if (target->state != TARGET_HALTED)
  2623. {
  2624. LOG_ERROR("Target not halted");
  2625. return ERROR_TARGET_INVALID;
  2626. }
  2627. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2628. return ERROR_OK;
  2629. }
  2630. COMMAND_HANDLER(xscale_handle_mmu_command)
  2631. {
  2632. struct target *target = get_current_target(CMD_CTX);
  2633. struct xscale_common *xscale = target_to_xscale(target);
  2634. int retval;
  2635. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2636. if (retval != ERROR_OK)
  2637. return retval;
  2638. if (target->state != TARGET_HALTED)
  2639. {
  2640. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2641. return ERROR_OK;
  2642. }
  2643. if (CMD_ARGC >= 1)
  2644. {
  2645. bool enable;
  2646. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2647. if (enable)
  2648. xscale_enable_mmu_caches(target, 1, 0, 0);
  2649. else
  2650. xscale_disable_mmu_caches(target, 1, 0, 0);
  2651. xscale->armv4_5_mmu.mmu_enabled = enable;
  2652. }
  2653. command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2654. return ERROR_OK;
  2655. }
  2656. COMMAND_HANDLER(xscale_handle_idcache_command)
  2657. {
  2658. struct target *target = get_current_target(CMD_CTX);
  2659. struct xscale_common *xscale = target_to_xscale(target);
  2660. int retval = xscale_verify_pointer(CMD_CTX, xscale);
  2661. if (retval != ERROR_OK)
  2662. return retval;
  2663. if (target->state != TARGET_HALTED)
  2664. {
  2665. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2666. return ERROR_OK;
  2667. }
  2668. bool icache;
  2669. COMMAND_PARSE_BOOL(CMD_NAME, icache, "icache", "dcache");
  2670. if (CMD_ARGC >= 1)
  2671. {
  2672. bool enable;
  2673. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2674. if (enable)
  2675. xscale_enable_mmu_caches(target, 1, 0, 0);
  2676. else
  2677. xscale_disable_mmu_caches(target, 1, 0, 0);
  2678. if (icache)
  2679. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
  2680. else
  2681. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
  2682. }
  2683. bool enabled = icache ?
  2684. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
  2685. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
  2686. const char *msg = enabled ? "enabled" : "disabled";
  2687. command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
  2688. return ERROR_OK;
  2689. }
  2690. COMMAND_HANDLER(xscale_handle_vector_catch_command)
  2691. {
  2692. struct target *target = get_current_target(CMD_CTX);
  2693. struct xscale_common *xscale = target_to_xscale(target);
  2694. int retval;
  2695. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2696. if (retval != ERROR_OK)
  2697. return retval;
  2698. if (CMD_ARGC < 1)
  2699. {
  2700. command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
  2701. }
  2702. else
  2703. {
  2704. COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
  2705. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2706. xscale_write_dcsr(target, -1, -1);
  2707. }
  2708. command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2709. return ERROR_OK;
  2710. }
  2711. COMMAND_HANDLER(xscale_handle_vector_table_command)
  2712. {
  2713. struct target *target = get_current_target(CMD_CTX);
  2714. struct xscale_common *xscale = target_to_xscale(target);
  2715. int err = 0;
  2716. int retval;
  2717. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2718. if (retval != ERROR_OK)
  2719. return retval;
  2720. if (CMD_ARGC == 0) /* print current settings */
  2721. {
  2722. int idx;
  2723. command_print(CMD_CTX, "active user-set static vectors:");
  2724. for (idx = 1; idx < 8; idx++)
  2725. if (xscale->static_low_vectors_set & (1 << idx))
  2726. command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
  2727. for (idx = 1; idx < 8; idx++)
  2728. if (xscale->static_high_vectors_set & (1 << idx))
  2729. command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
  2730. return ERROR_OK;
  2731. }
  2732. if (CMD_ARGC != 3)
  2733. err = 1;
  2734. else
  2735. {
  2736. int idx;
  2737. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
  2738. uint32_t vec;
  2739. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
  2740. if (idx < 1 || idx >= 8)
  2741. err = 1;
  2742. if (!err && strcmp(CMD_ARGV[0], "low") == 0)
  2743. {
  2744. xscale->static_low_vectors_set |= (1<<idx);
  2745. xscale->static_low_vectors[idx] = vec;
  2746. }
  2747. else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
  2748. {
  2749. xscale->static_high_vectors_set |= (1<<idx);
  2750. xscale->static_high_vectors[idx] = vec;
  2751. }
  2752. else
  2753. err = 1;
  2754. }
  2755. if (err)
  2756. command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
  2757. return ERROR_OK;
  2758. }
  2759. COMMAND_HANDLER(xscale_handle_trace_buffer_command)
  2760. {
  2761. struct target *target = get_current_target(CMD_CTX);
  2762. struct xscale_common *xscale = target_to_xscale(target);
  2763. struct arm *armv4_5 = &xscale->armv4_5_common;
  2764. uint32_t dcsr_value;
  2765. int retval;
  2766. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2767. if (retval != ERROR_OK)
  2768. return retval;
  2769. if (target->state != TARGET_HALTED)
  2770. {
  2771. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2772. return ERROR_OK;
  2773. }
  2774. if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
  2775. {
  2776. struct xscale_trace_data *td, *next_td;
  2777. xscale->trace.buffer_enabled = 1;
  2778. /* free old trace data */
  2779. td = xscale->trace.data;
  2780. while (td)
  2781. {
  2782. next_td = td->next;
  2783. if (td->entries)
  2784. free(td->entries);
  2785. free(td);
  2786. td = next_td;
  2787. }
  2788. xscale->trace.data = NULL;
  2789. }
  2790. else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
  2791. {
  2792. xscale->trace.buffer_enabled = 0;
  2793. }
  2794. if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
  2795. {
  2796. uint32_t fill = 1;
  2797. if (CMD_ARGC >= 3)
  2798. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
  2799. xscale->trace.buffer_fill = fill;
  2800. }
  2801. else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
  2802. {
  2803. xscale->trace.buffer_fill = -1;
  2804. }
  2805. if (xscale->trace.buffer_enabled)
  2806. {
  2807. /* if we enable the trace buffer in fill-once
  2808. * mode we know the address of the first instruction */
  2809. xscale->trace.pc_ok = 1;
  2810. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2811. }
  2812. else
  2813. {
  2814. /* otherwise the address is unknown, and we have no known good PC */
  2815. xscale->trace.pc_ok = 0;
  2816. }
  2817. command_print(CMD_CTX, "trace buffer %s (%s)",
  2818. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2819. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2820. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2821. if (xscale->trace.buffer_fill >= 0)
  2822. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2823. else
  2824. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2825. return ERROR_OK;
  2826. }
  2827. COMMAND_HANDLER(xscale_handle_trace_image_command)
  2828. {
  2829. struct target *target = get_current_target(CMD_CTX);
  2830. struct xscale_common *xscale = target_to_xscale(target);
  2831. int retval;
  2832. if (CMD_ARGC < 1)
  2833. {
  2834. command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
  2835. return ERROR_OK;
  2836. }
  2837. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2838. if (retval != ERROR_OK)
  2839. return retval;
  2840. if (xscale->trace.image)
  2841. {
  2842. image_close(xscale->trace.image);
  2843. free(xscale->trace.image);
  2844. command_print(CMD_CTX, "previously loaded image found and closed");
  2845. }
  2846. xscale->trace.image = malloc(sizeof(struct image));
  2847. xscale->trace.image->base_address_set = 0;
  2848. xscale->trace.image->start_address_set = 0;
  2849. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2850. if (CMD_ARGC >= 2)
  2851. {
  2852. xscale->trace.image->base_address_set = 1;
  2853. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], xscale->trace.image->base_address);
  2854. }
  2855. else
  2856. {
  2857. xscale->trace.image->base_address_set = 0;
  2858. }
  2859. if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
  2860. {
  2861. free(xscale->trace.image);
  2862. xscale->trace.image = NULL;
  2863. return ERROR_OK;
  2864. }
  2865. return ERROR_OK;
  2866. }
  2867. COMMAND_HANDLER(xscale_handle_dump_trace_command)
  2868. {
  2869. struct target *target = get_current_target(CMD_CTX);
  2870. struct xscale_common *xscale = target_to_xscale(target);
  2871. struct xscale_trace_data *trace_data;
  2872. struct fileio file;
  2873. int retval;
  2874. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2875. if (retval != ERROR_OK)
  2876. return retval;
  2877. if (target->state != TARGET_HALTED)
  2878. {
  2879. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2880. return ERROR_OK;
  2881. }
  2882. if (CMD_ARGC < 1)
  2883. {
  2884. command_print(CMD_CTX, "usage: xscale dump_trace <file>");
  2885. return ERROR_OK;
  2886. }
  2887. trace_data = xscale->trace.data;
  2888. if (!trace_data)
  2889. {
  2890. command_print(CMD_CTX, "no trace data collected");
  2891. return ERROR_OK;
  2892. }
  2893. if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2894. {
  2895. return ERROR_OK;
  2896. }
  2897. while (trace_data)
  2898. {
  2899. int i;
  2900. fileio_write_u32(&file, trace_data->chkpt0);
  2901. fileio_write_u32(&file, trace_data->chkpt1);
  2902. fileio_write_u32(&file, trace_data->last_instruction);
  2903. fileio_write_u32(&file, trace_data->depth);
  2904. for (i = 0; i < trace_data->depth; i++)
  2905. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2906. trace_data = trace_data->next;
  2907. }
  2908. fileio_close(&file);
  2909. return ERROR_OK;
  2910. }
  2911. COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
  2912. {
  2913. struct target *target = get_current_target(CMD_CTX);
  2914. struct xscale_common *xscale = target_to_xscale(target);
  2915. int retval;
  2916. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2917. if (retval != ERROR_OK)
  2918. return retval;
  2919. xscale_analyze_trace(target, CMD_CTX);
  2920. return ERROR_OK;
  2921. }
  2922. COMMAND_HANDLER(xscale_handle_cp15)
  2923. {
  2924. struct target *target = get_current_target(CMD_CTX);
  2925. struct xscale_common *xscale = target_to_xscale(target);
  2926. int retval;
  2927. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2928. if (retval != ERROR_OK)
  2929. return retval;
  2930. if (target->state != TARGET_HALTED)
  2931. {
  2932. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2933. return ERROR_OK;
  2934. }
  2935. uint32_t reg_no = 0;
  2936. struct reg *reg = NULL;
  2937. if (CMD_ARGC > 0)
  2938. {
  2939. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
  2940. /*translate from xscale cp15 register no to openocd register*/
  2941. switch (reg_no)
  2942. {
  2943. case 0:
  2944. reg_no = XSCALE_MAINID;
  2945. break;
  2946. case 1:
  2947. reg_no = XSCALE_CTRL;
  2948. break;
  2949. case 2:
  2950. reg_no = XSCALE_TTB;
  2951. break;
  2952. case 3:
  2953. reg_no = XSCALE_DAC;
  2954. break;
  2955. case 5:
  2956. reg_no = XSCALE_FSR;
  2957. break;
  2958. case 6:
  2959. reg_no = XSCALE_FAR;
  2960. break;
  2961. case 13:
  2962. reg_no = XSCALE_PID;
  2963. break;
  2964. case 15:
  2965. reg_no = XSCALE_CPACCESS;
  2966. break;
  2967. default:
  2968. command_print(CMD_CTX, "invalid register number");
  2969. return ERROR_INVALID_ARGUMENTS;
  2970. }
  2971. reg = &xscale->reg_cache->reg_list[reg_no];
  2972. }
  2973. if (CMD_ARGC == 1)
  2974. {
  2975. uint32_t value;
  2976. /* read cp15 control register */
  2977. xscale_get_reg(reg);
  2978. value = buf_get_u32(reg->value, 0, 32);
  2979. command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  2980. }
  2981. else if (CMD_ARGC == 2)
  2982. {
  2983. uint32_t value;
  2984. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
  2985. /* send CP write request (command 0x41) */
  2986. xscale_send_u32(target, 0x41);
  2987. /* send CP register number */
  2988. xscale_send_u32(target, reg_no);
  2989. /* send CP register value */
  2990. xscale_send_u32(target, value);
  2991. /* execute cpwait to ensure outstanding operations complete */
  2992. xscale_send_u32(target, 0x53);
  2993. }
  2994. else
  2995. {
  2996. command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
  2997. }
  2998. return ERROR_OK;
  2999. }
  3000. static const struct command_registration xscale_exec_command_handlers[] = {
  3001. {
  3002. .name = "cache_info",
  3003. .handler = xscale_handle_cache_info_command,
  3004. .mode = COMMAND_EXEC,
  3005. .help = "display information about CPU caches",
  3006. },
  3007. {
  3008. .name = "mmu",
  3009. .handler = xscale_handle_mmu_command,
  3010. .mode = COMMAND_EXEC,
  3011. .help = "enable or disable the MMU",
  3012. .usage = "['enable'|'disable']",
  3013. },
  3014. {
  3015. .name = "icache",
  3016. .handler = xscale_handle_idcache_command,
  3017. .mode = COMMAND_EXEC,
  3018. .help = "display ICache state, optionally enabling or "
  3019. "disabling it",
  3020. .usage = "['enable'|'disable']",
  3021. },
  3022. {
  3023. .name = "dcache",
  3024. .handler = xscale_handle_idcache_command,
  3025. .mode = COMMAND_EXEC,
  3026. .help = "display DCache state, optionally enabling or "
  3027. "disabling it",
  3028. .usage = "['enable'|'disable']",
  3029. },
  3030. {
  3031. .name = "vector_catch",
  3032. .handler = xscale_handle_vector_catch_command,
  3033. .mode = COMMAND_EXEC,
  3034. .help = "set or display 8-bit mask of vectors "
  3035. "that should trigger debug entry",
  3036. .usage = "[mask]",
  3037. },
  3038. {
  3039. .name = "vector_table",
  3040. .handler = xscale_handle_vector_table_command,
  3041. .mode = COMMAND_EXEC,
  3042. .help = "set vector table entry in mini-ICache, "
  3043. "or display current tables",
  3044. .usage = "[('high'|'low') index code]",
  3045. },
  3046. {
  3047. .name = "trace_buffer",
  3048. .handler = xscale_handle_trace_buffer_command,
  3049. .mode = COMMAND_EXEC,
  3050. .help = "display trace buffer status, enable or disable "
  3051. "tracing, and optionally reconfigure trace mode",
  3052. .usage = "['enable'|'disable' ['fill' number|'wrap']]",
  3053. },
  3054. {
  3055. .name = "dump_trace",
  3056. .handler = xscale_handle_dump_trace_command,
  3057. .mode = COMMAND_EXEC,
  3058. .help = "dump content of trace buffer to file",
  3059. .usage = "filename",
  3060. },
  3061. {
  3062. .name = "analyze_trace",
  3063. .handler = xscale_handle_analyze_trace_buffer_command,
  3064. .mode = COMMAND_EXEC,
  3065. .help = "analyze content of trace buffer",
  3066. .usage = "",
  3067. },
  3068. {
  3069. .name = "trace_image",
  3070. .handler = xscale_handle_trace_image_command,
  3071. .mode = COMMAND_EXEC,
  3072. .help = "load image from file to address (default 0)",
  3073. .usage = "filename [offset [filetype]]",
  3074. },
  3075. {
  3076. .name = "cp15",
  3077. .handler = xscale_handle_cp15,
  3078. .mode = COMMAND_EXEC,
  3079. .help = "Read or write coprocessor 15 register.",
  3080. .usage = "register [value]",
  3081. },
  3082. COMMAND_REGISTRATION_DONE
  3083. };
  3084. static const struct command_registration xscale_any_command_handlers[] = {
  3085. {
  3086. .name = "debug_handler",
  3087. .handler = xscale_handle_debug_handler_command,
  3088. .mode = COMMAND_ANY,
  3089. .help = "Change address used for debug handler.",
  3090. .usage = "target address",
  3091. },
  3092. {
  3093. .name = "cache_clean_address",
  3094. .handler = xscale_handle_cache_clean_address_command,
  3095. .mode = COMMAND_ANY,
  3096. .help = "Change address used for cleaning data cache.",
  3097. .usage = "address",
  3098. },
  3099. {
  3100. .chain = xscale_exec_command_handlers,
  3101. },
  3102. COMMAND_REGISTRATION_DONE
  3103. };
  3104. static const struct command_registration xscale_command_handlers[] = {
  3105. {
  3106. .chain = arm_command_handlers,
  3107. },
  3108. {
  3109. .name = "xscale",
  3110. .mode = COMMAND_ANY,
  3111. .help = "xscale command group",
  3112. .chain = xscale_any_command_handlers,
  3113. },
  3114. COMMAND_REGISTRATION_DONE
  3115. };
  3116. struct target_type xscale_target =
  3117. {
  3118. .name = "xscale",
  3119. .poll = xscale_poll,
  3120. .arch_state = xscale_arch_state,
  3121. .target_request_data = NULL,
  3122. .halt = xscale_halt,
  3123. .resume = xscale_resume,
  3124. .step = xscale_step,
  3125. .assert_reset = xscale_assert_reset,
  3126. .deassert_reset = xscale_deassert_reset,
  3127. .soft_reset_halt = NULL,
  3128. /* REVISIT on some cores, allow exporting iwmmxt registers ... */
  3129. .get_gdb_reg_list = arm_get_gdb_reg_list,
  3130. .read_memory = xscale_read_memory,
  3131. .read_phys_memory = xscale_read_phys_memory,
  3132. .write_memory = xscale_write_memory,
  3133. .write_phys_memory = xscale_write_phys_memory,
  3134. .bulk_write_memory = xscale_bulk_write_memory,
  3135. .checksum_memory = arm_checksum_memory,
  3136. .blank_check_memory = arm_blank_check_memory,
  3137. .run_algorithm = armv4_5_run_algorithm,
  3138. .add_breakpoint = xscale_add_breakpoint,
  3139. .remove_breakpoint = xscale_remove_breakpoint,
  3140. .add_watchpoint = xscale_add_watchpoint,
  3141. .remove_watchpoint = xscale_remove_watchpoint,
  3142. .commands = xscale_command_handlers,
  3143. .target_create = xscale_target_create,
  3144. .init_target = xscale_init_target,
  3145. .virt2phys = xscale_virt2phys,
  3146. .mmu = xscale_mmu
  3147. };