You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3921 lines
103 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "breakpoints.h"
  30. #include "xscale.h"
  31. #include "target_type.h"
  32. #include "arm_jtag.h"
  33. #include "arm_simulator.h"
  34. #include "arm_disassembler.h"
  35. #include <helper/time_support.h>
  36. #include "register.h"
  37. #include "image.h"
  38. #include "arm_opcodes.h"
  39. #include "armv4_5.h"
  40. /*
  41. * Important XScale documents available as of October 2009 include:
  42. *
  43. * Intel XScale® Core Developer’s Manual, January 2004
  44. * Order Number: 273473-002
  45. * This has a chapter detailing debug facilities, and punts some
  46. * details to chip-specific microarchitecture documents.
  47. *
  48. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  49. * Document Number: 273539-005
  50. * Less detailed than the developer's manual, but summarizes those
  51. * missing details (for most XScales) and gives LOTS of notes about
  52. * debugger/handler interaction issues. Presents a simpler reset
  53. * and load-handler sequence than the arch doc. (Note, OpenOCD
  54. * doesn't currently support "Hot-Debug" as defined there.)
  55. *
  56. * Chip-specific microarchitecture documents may also be useful.
  57. */
  58. /* forward declarations */
  59. static int xscale_resume(struct target *, int current,
  60. uint32_t address, int handle_breakpoints, int debug_execution);
  61. static int xscale_debug_entry(struct target *);
  62. static int xscale_restore_banked(struct target *);
  63. static int xscale_get_reg(struct reg *reg);
  64. static int xscale_set_reg(struct reg *reg, uint8_t *buf);
  65. static int xscale_set_breakpoint(struct target *, struct breakpoint *);
  66. static int xscale_set_watchpoint(struct target *, struct watchpoint *);
  67. static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
  68. static int xscale_read_trace(struct target *);
  69. /* This XScale "debug handler" is loaded into the processor's
  70. * mini-ICache, which is 2K of code writable only via JTAG.
  71. *
  72. * FIXME the OpenOCD "bin2char" utility currently doesn't handle
  73. * binary files cleanly. It's string oriented, and terminates them
  74. * with a NUL character. Better would be to generate the constants
  75. * and let other code decide names, scoping, and other housekeeping.
  76. */
  77. static /* unsigned const char xscale_debug_handler[] = ... */
  78. #include "xscale_debug.h"
  79. static char *const xscale_reg_list[] =
  80. {
  81. "XSCALE_MAINID", /* 0 */
  82. "XSCALE_CACHETYPE",
  83. "XSCALE_CTRL",
  84. "XSCALE_AUXCTRL",
  85. "XSCALE_TTB",
  86. "XSCALE_DAC",
  87. "XSCALE_FSR",
  88. "XSCALE_FAR",
  89. "XSCALE_PID",
  90. "XSCALE_CPACCESS",
  91. "XSCALE_IBCR0", /* 10 */
  92. "XSCALE_IBCR1",
  93. "XSCALE_DBR0",
  94. "XSCALE_DBR1",
  95. "XSCALE_DBCON",
  96. "XSCALE_TBREG",
  97. "XSCALE_CHKPT0",
  98. "XSCALE_CHKPT1",
  99. "XSCALE_DCSR",
  100. "XSCALE_TX",
  101. "XSCALE_RX", /* 20 */
  102. "XSCALE_TXRXCTRL",
  103. };
  104. static const struct xscale_reg xscale_reg_arch_info[] =
  105. {
  106. {XSCALE_MAINID, NULL},
  107. {XSCALE_CACHETYPE, NULL},
  108. {XSCALE_CTRL, NULL},
  109. {XSCALE_AUXCTRL, NULL},
  110. {XSCALE_TTB, NULL},
  111. {XSCALE_DAC, NULL},
  112. {XSCALE_FSR, NULL},
  113. {XSCALE_FAR, NULL},
  114. {XSCALE_PID, NULL},
  115. {XSCALE_CPACCESS, NULL},
  116. {XSCALE_IBCR0, NULL},
  117. {XSCALE_IBCR1, NULL},
  118. {XSCALE_DBR0, NULL},
  119. {XSCALE_DBR1, NULL},
  120. {XSCALE_DBCON, NULL},
  121. {XSCALE_TBREG, NULL},
  122. {XSCALE_CHKPT0, NULL},
  123. {XSCALE_CHKPT1, NULL},
  124. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  125. {-1, NULL}, /* TX accessed via JTAG */
  126. {-1, NULL}, /* RX accessed via JTAG */
  127. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  128. };
  129. /* convenience wrapper to access XScale specific registers */
  130. static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
  131. {
  132. uint8_t buf[4];
  133. buf_set_u32(buf, 0, 32, value);
  134. return xscale_set_reg(reg, buf);
  135. }
  136. static const char xscale_not[] = "target is not an XScale";
  137. static int xscale_verify_pointer(struct command_context *cmd_ctx,
  138. struct xscale_common *xscale)
  139. {
  140. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  141. command_print(cmd_ctx, xscale_not);
  142. return ERROR_TARGET_INVALID;
  143. }
  144. return ERROR_OK;
  145. }
  146. static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
  147. {
  148. assert (tap != NULL);
  149. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  150. {
  151. struct scan_field field;
  152. uint8_t scratch[4];
  153. memset(&field, 0, sizeof field);
  154. field.num_bits = tap->ir_length;
  155. field.out_value = scratch;
  156. buf_set_u32(scratch, 0, field.num_bits, new_instr);
  157. jtag_add_ir_scan(tap, &field, end_state);
  158. }
  159. return ERROR_OK;
  160. }
  161. static int xscale_read_dcsr(struct target *target)
  162. {
  163. struct xscale_common *xscale = target_to_xscale(target);
  164. int retval;
  165. struct scan_field fields[3];
  166. uint8_t field0 = 0x0;
  167. uint8_t field0_check_value = 0x2;
  168. uint8_t field0_check_mask = 0x7;
  169. uint8_t field2 = 0x0;
  170. uint8_t field2_check_value = 0x0;
  171. uint8_t field2_check_mask = 0x1;
  172. xscale_jtag_set_instr(target->tap,
  173. XSCALE_SELDCSR << xscale->xscale_variant,
  174. TAP_DRPAUSE);
  175. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  176. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  177. memset(&fields, 0, sizeof fields);
  178. fields[0].num_bits = 3;
  179. fields[0].out_value = &field0;
  180. uint8_t tmp;
  181. fields[0].in_value = &tmp;
  182. fields[1].num_bits = 32;
  183. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  184. fields[2].num_bits = 1;
  185. fields[2].out_value = &field2;
  186. uint8_t tmp2;
  187. fields[2].in_value = &tmp2;
  188. jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
  189. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  190. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  191. if ((retval = jtag_execute_queue()) != ERROR_OK)
  192. {
  193. LOG_ERROR("JTAG error while reading DCSR");
  194. return retval;
  195. }
  196. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  197. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  198. /* write the register with the value we just read
  199. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  200. */
  201. field0_check_mask = 0x1;
  202. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  203. fields[1].in_value = NULL;
  204. jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
  205. /* DANGER!!! this must be here. It will make sure that the arguments
  206. * to jtag_set_check_value() does not go out of scope! */
  207. return jtag_execute_queue();
  208. }
  209. static void xscale_getbuf(jtag_callback_data_t arg)
  210. {
  211. uint8_t *in = (uint8_t *)arg;
  212. *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
  213. }
  214. static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
  215. {
  216. if (num_words == 0)
  217. return ERROR_INVALID_ARGUMENTS;
  218. struct xscale_common *xscale = target_to_xscale(target);
  219. int retval = ERROR_OK;
  220. tap_state_t path[3];
  221. struct scan_field fields[3];
  222. uint8_t *field0 = malloc(num_words * 1);
  223. uint8_t field0_check_value = 0x2;
  224. uint8_t field0_check_mask = 0x6;
  225. uint32_t *field1 = malloc(num_words * 4);
  226. uint8_t field2_check_value = 0x0;
  227. uint8_t field2_check_mask = 0x1;
  228. int words_done = 0;
  229. int words_scheduled = 0;
  230. int i;
  231. path[0] = TAP_DRSELECT;
  232. path[1] = TAP_DRCAPTURE;
  233. path[2] = TAP_DRSHIFT;
  234. memset(&fields, 0, sizeof fields);
  235. fields[0].num_bits = 3;
  236. fields[0].check_value = &field0_check_value;
  237. fields[0].check_mask = &field0_check_mask;
  238. fields[1].num_bits = 32;
  239. fields[2].num_bits = 1;
  240. fields[2].check_value = &field2_check_value;
  241. fields[2].check_mask = &field2_check_mask;
  242. xscale_jtag_set_instr(target->tap,
  243. XSCALE_DBGTX << xscale->xscale_variant,
  244. TAP_IDLE);
  245. jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  246. /* repeat until all words have been collected */
  247. int attempts = 0;
  248. while (words_done < num_words)
  249. {
  250. /* schedule reads */
  251. words_scheduled = 0;
  252. for (i = words_done; i < num_words; i++)
  253. {
  254. fields[0].in_value = &field0[i];
  255. jtag_add_pathmove(3, path);
  256. fields[1].in_value = (uint8_t *)(field1 + i);
  257. jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
  258. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  259. words_scheduled++;
  260. }
  261. if ((retval = jtag_execute_queue()) != ERROR_OK)
  262. {
  263. LOG_ERROR("JTAG error while receiving data from debug handler");
  264. break;
  265. }
  266. /* examine results */
  267. for (i = words_done; i < num_words; i++)
  268. {
  269. if (!(field0[0] & 1))
  270. {
  271. /* move backwards if necessary */
  272. int j;
  273. for (j = i; j < num_words - 1; j++)
  274. {
  275. field0[j] = field0[j + 1];
  276. field1[j] = field1[j + 1];
  277. }
  278. words_scheduled--;
  279. }
  280. }
  281. if (words_scheduled == 0)
  282. {
  283. if (attempts++==1000)
  284. {
  285. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  286. retval = ERROR_TARGET_TIMEOUT;
  287. break;
  288. }
  289. }
  290. words_done += words_scheduled;
  291. }
  292. for (i = 0; i < num_words; i++)
  293. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  294. free(field1);
  295. return retval;
  296. }
  297. static int xscale_read_tx(struct target *target, int consume)
  298. {
  299. struct xscale_common *xscale = target_to_xscale(target);
  300. tap_state_t path[3];
  301. tap_state_t noconsume_path[6];
  302. int retval;
  303. struct timeval timeout, now;
  304. struct scan_field fields[3];
  305. uint8_t field0_in = 0x0;
  306. uint8_t field0_check_value = 0x2;
  307. uint8_t field0_check_mask = 0x6;
  308. uint8_t field2_check_value = 0x0;
  309. uint8_t field2_check_mask = 0x1;
  310. xscale_jtag_set_instr(target->tap,
  311. XSCALE_DBGTX << xscale->xscale_variant,
  312. TAP_IDLE);
  313. path[0] = TAP_DRSELECT;
  314. path[1] = TAP_DRCAPTURE;
  315. path[2] = TAP_DRSHIFT;
  316. noconsume_path[0] = TAP_DRSELECT;
  317. noconsume_path[1] = TAP_DRCAPTURE;
  318. noconsume_path[2] = TAP_DREXIT1;
  319. noconsume_path[3] = TAP_DRPAUSE;
  320. noconsume_path[4] = TAP_DREXIT2;
  321. noconsume_path[5] = TAP_DRSHIFT;
  322. memset(&fields, 0, sizeof fields);
  323. fields[0].num_bits = 3;
  324. fields[0].in_value = &field0_in;
  325. fields[1].num_bits = 32;
  326. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  327. fields[2].num_bits = 1;
  328. uint8_t tmp;
  329. fields[2].in_value = &tmp;
  330. gettimeofday(&timeout, NULL);
  331. timeval_add_time(&timeout, 1, 0);
  332. for (;;)
  333. {
  334. /* if we want to consume the register content (i.e. clear TX_READY),
  335. * we have to go straight from Capture-DR to Shift-DR
  336. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  337. */
  338. if (consume)
  339. jtag_add_pathmove(3, path);
  340. else
  341. {
  342. jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
  343. }
  344. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  345. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  346. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  347. if ((retval = jtag_execute_queue()) != ERROR_OK)
  348. {
  349. LOG_ERROR("JTAG error while reading TX");
  350. return ERROR_TARGET_TIMEOUT;
  351. }
  352. gettimeofday(&now, NULL);
  353. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  354. {
  355. LOG_ERROR("time out reading TX register");
  356. return ERROR_TARGET_TIMEOUT;
  357. }
  358. if (!((!(field0_in & 1)) && consume))
  359. {
  360. goto done;
  361. }
  362. if (debug_level >= 3)
  363. {
  364. LOG_DEBUG("waiting 100ms");
  365. alive_sleep(100); /* avoid flooding the logs */
  366. } else
  367. {
  368. keep_alive();
  369. }
  370. }
  371. done:
  372. if (!(field0_in & 1))
  373. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  374. return ERROR_OK;
  375. }
  376. static int xscale_write_rx(struct target *target)
  377. {
  378. struct xscale_common *xscale = target_to_xscale(target);
  379. int retval;
  380. struct timeval timeout, now;
  381. struct scan_field fields[3];
  382. uint8_t field0_out = 0x0;
  383. uint8_t field0_in = 0x0;
  384. uint8_t field0_check_value = 0x2;
  385. uint8_t field0_check_mask = 0x6;
  386. uint8_t field2 = 0x0;
  387. uint8_t field2_check_value = 0x0;
  388. uint8_t field2_check_mask = 0x1;
  389. xscale_jtag_set_instr(target->tap,
  390. XSCALE_DBGRX << xscale->xscale_variant,
  391. TAP_IDLE);
  392. memset(&fields, 0, sizeof fields);
  393. fields[0].num_bits = 3;
  394. fields[0].out_value = &field0_out;
  395. fields[0].in_value = &field0_in;
  396. fields[1].num_bits = 32;
  397. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  398. fields[2].num_bits = 1;
  399. fields[2].out_value = &field2;
  400. uint8_t tmp;
  401. fields[2].in_value = &tmp;
  402. gettimeofday(&timeout, NULL);
  403. timeval_add_time(&timeout, 1, 0);
  404. /* poll until rx_read is low */
  405. LOG_DEBUG("polling RX");
  406. for (;;)
  407. {
  408. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  409. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  410. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  411. if ((retval = jtag_execute_queue()) != ERROR_OK)
  412. {
  413. LOG_ERROR("JTAG error while writing RX");
  414. return retval;
  415. }
  416. gettimeofday(&now, NULL);
  417. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  418. {
  419. LOG_ERROR("time out writing RX register");
  420. return ERROR_TARGET_TIMEOUT;
  421. }
  422. if (!(field0_in & 1))
  423. goto done;
  424. if (debug_level >= 3)
  425. {
  426. LOG_DEBUG("waiting 100ms");
  427. alive_sleep(100); /* avoid flooding the logs */
  428. } else
  429. {
  430. keep_alive();
  431. }
  432. }
  433. done:
  434. /* set rx_valid */
  435. field2 = 0x1;
  436. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  437. if ((retval = jtag_execute_queue()) != ERROR_OK)
  438. {
  439. LOG_ERROR("JTAG error while writing RX");
  440. return retval;
  441. }
  442. return ERROR_OK;
  443. }
  444. /* send count elements of size byte to the debug handler */
  445. static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
  446. {
  447. struct xscale_common *xscale = target_to_xscale(target);
  448. uint32_t t[3];
  449. int bits[3];
  450. int retval;
  451. int done_count = 0;
  452. xscale_jtag_set_instr(target->tap,
  453. XSCALE_DBGRX << xscale->xscale_variant,
  454. TAP_IDLE);
  455. bits[0]=3;
  456. t[0]=0;
  457. bits[1]=32;
  458. t[2]=1;
  459. bits[2]=1;
  460. int endianness = target->endianness;
  461. while (done_count++ < count)
  462. {
  463. switch (size)
  464. {
  465. case 4:
  466. if (endianness == TARGET_LITTLE_ENDIAN)
  467. {
  468. t[1]=le_to_h_u32(buffer);
  469. } else
  470. {
  471. t[1]=be_to_h_u32(buffer);
  472. }
  473. break;
  474. case 2:
  475. if (endianness == TARGET_LITTLE_ENDIAN)
  476. {
  477. t[1]=le_to_h_u16(buffer);
  478. } else
  479. {
  480. t[1]=be_to_h_u16(buffer);
  481. }
  482. break;
  483. case 1:
  484. t[1]=buffer[0];
  485. break;
  486. default:
  487. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  488. return ERROR_INVALID_ARGUMENTS;
  489. }
  490. jtag_add_dr_out(target->tap,
  491. 3,
  492. bits,
  493. t,
  494. TAP_IDLE);
  495. buffer += size;
  496. }
  497. if ((retval = jtag_execute_queue()) != ERROR_OK)
  498. {
  499. LOG_ERROR("JTAG error while sending data to debug handler");
  500. return retval;
  501. }
  502. return ERROR_OK;
  503. }
  504. static int xscale_send_u32(struct target *target, uint32_t value)
  505. {
  506. struct xscale_common *xscale = target_to_xscale(target);
  507. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  508. return xscale_write_rx(target);
  509. }
  510. static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
  511. {
  512. struct xscale_common *xscale = target_to_xscale(target);
  513. int retval;
  514. struct scan_field fields[3];
  515. uint8_t field0 = 0x0;
  516. uint8_t field0_check_value = 0x2;
  517. uint8_t field0_check_mask = 0x7;
  518. uint8_t field2 = 0x0;
  519. uint8_t field2_check_value = 0x0;
  520. uint8_t field2_check_mask = 0x1;
  521. if (hold_rst != -1)
  522. xscale->hold_rst = hold_rst;
  523. if (ext_dbg_brk != -1)
  524. xscale->external_debug_break = ext_dbg_brk;
  525. xscale_jtag_set_instr(target->tap,
  526. XSCALE_SELDCSR << xscale->xscale_variant,
  527. TAP_IDLE);
  528. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  529. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  530. memset(&fields, 0, sizeof fields);
  531. fields[0].num_bits = 3;
  532. fields[0].out_value = &field0;
  533. uint8_t tmp;
  534. fields[0].in_value = &tmp;
  535. fields[1].num_bits = 32;
  536. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  537. fields[2].num_bits = 1;
  538. fields[2].out_value = &field2;
  539. uint8_t tmp2;
  540. fields[2].in_value = &tmp2;
  541. jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
  542. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  543. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  544. if ((retval = jtag_execute_queue()) != ERROR_OK)
  545. {
  546. LOG_ERROR("JTAG error while writing DCSR");
  547. return retval;
  548. }
  549. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  550. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  551. return ERROR_OK;
  552. }
  553. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  554. static unsigned int parity (unsigned int v)
  555. {
  556. // unsigned int ov = v;
  557. v ^= v >> 16;
  558. v ^= v >> 8;
  559. v ^= v >> 4;
  560. v &= 0xf;
  561. // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  562. return (0x6996 >> v) & 1;
  563. }
  564. static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
  565. {
  566. struct xscale_common *xscale = target_to_xscale(target);
  567. uint8_t packet[4];
  568. uint8_t cmd;
  569. int word;
  570. struct scan_field fields[2];
  571. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  572. /* LDIC into IR */
  573. xscale_jtag_set_instr(target->tap,
  574. XSCALE_LDIC << xscale->xscale_variant,
  575. TAP_IDLE);
  576. /* CMD is b011 to load a cacheline into the Mini ICache.
  577. * Loading into the main ICache is deprecated, and unused.
  578. * It's followed by three zero bits, and 27 address bits.
  579. */
  580. buf_set_u32(&cmd, 0, 6, 0x3);
  581. /* virtual address of desired cache line */
  582. buf_set_u32(packet, 0, 27, va >> 5);
  583. memset(&fields, 0, sizeof fields);
  584. fields[0].num_bits = 6;
  585. fields[0].out_value = &cmd;
  586. fields[1].num_bits = 27;
  587. fields[1].out_value = packet;
  588. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  589. /* rest of packet is a cacheline: 8 instructions, with parity */
  590. fields[0].num_bits = 32;
  591. fields[0].out_value = packet;
  592. fields[1].num_bits = 1;
  593. fields[1].out_value = &cmd;
  594. for (word = 0; word < 8; word++)
  595. {
  596. buf_set_u32(packet, 0, 32, buffer[word]);
  597. uint32_t value;
  598. memcpy(&value, packet, sizeof(uint32_t));
  599. cmd = parity(value);
  600. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  601. }
  602. return jtag_execute_queue();
  603. }
  604. static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
  605. {
  606. struct xscale_common *xscale = target_to_xscale(target);
  607. uint8_t packet[4];
  608. uint8_t cmd;
  609. struct scan_field fields[2];
  610. xscale_jtag_set_instr(target->tap,
  611. XSCALE_LDIC << xscale->xscale_variant,
  612. TAP_IDLE);
  613. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  614. buf_set_u32(&cmd, 0, 6, 0x0);
  615. /* virtual address of desired cache line */
  616. buf_set_u32(packet, 0, 27, va >> 5);
  617. memset(&fields, 0, sizeof fields);
  618. fields[0].num_bits = 6;
  619. fields[0].out_value = &cmd;
  620. fields[1].num_bits = 27;
  621. fields[1].out_value = packet;
  622. jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
  623. return ERROR_OK;
  624. }
  625. static int xscale_update_vectors(struct target *target)
  626. {
  627. struct xscale_common *xscale = target_to_xscale(target);
  628. int i;
  629. int retval;
  630. uint32_t low_reset_branch, high_reset_branch;
  631. for (i = 1; i < 8; i++)
  632. {
  633. /* if there's a static vector specified for this exception, override */
  634. if (xscale->static_high_vectors_set & (1 << i))
  635. {
  636. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  637. }
  638. else
  639. {
  640. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  641. if (retval == ERROR_TARGET_TIMEOUT)
  642. return retval;
  643. if (retval != ERROR_OK)
  644. {
  645. /* Some of these reads will fail as part of normal execution */
  646. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  647. }
  648. }
  649. }
  650. for (i = 1; i < 8; i++)
  651. {
  652. if (xscale->static_low_vectors_set & (1 << i))
  653. {
  654. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  655. }
  656. else
  657. {
  658. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  659. if (retval == ERROR_TARGET_TIMEOUT)
  660. return retval;
  661. if (retval != ERROR_OK)
  662. {
  663. /* Some of these reads will fail as part of normal execution */
  664. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  665. }
  666. }
  667. }
  668. /* calculate branches to debug handler */
  669. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  670. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  671. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  672. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  673. /* invalidate and load exception vectors in mini i-cache */
  674. xscale_invalidate_ic_line(target, 0x0);
  675. xscale_invalidate_ic_line(target, 0xffff0000);
  676. xscale_load_ic(target, 0x0, xscale->low_vectors);
  677. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  678. return ERROR_OK;
  679. }
  680. static int xscale_arch_state(struct target *target)
  681. {
  682. struct xscale_common *xscale = target_to_xscale(target);
  683. struct arm *armv4_5 = &xscale->armv4_5_common;
  684. static const char *state[] =
  685. {
  686. "disabled", "enabled"
  687. };
  688. static const char *arch_dbg_reason[] =
  689. {
  690. "", "\n(processor reset)", "\n(trace buffer full)"
  691. };
  692. if (armv4_5->common_magic != ARM_COMMON_MAGIC)
  693. {
  694. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  695. return ERROR_INVALID_ARGUMENTS;
  696. }
  697. arm_arch_state(target);
  698. LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
  699. state[xscale->armv4_5_mmu.mmu_enabled],
  700. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  701. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  702. arch_dbg_reason[xscale->arch_debug_reason]);
  703. return ERROR_OK;
  704. }
  705. static int xscale_poll(struct target *target)
  706. {
  707. int retval = ERROR_OK;
  708. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  709. {
  710. enum target_state previous_state = target->state;
  711. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  712. {
  713. /* there's data to read from the tx register, we entered debug state */
  714. target->state = TARGET_HALTED;
  715. /* process debug entry, fetching current mode regs */
  716. retval = xscale_debug_entry(target);
  717. }
  718. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  719. {
  720. LOG_USER("error while polling TX register, reset CPU");
  721. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  722. target->state = TARGET_HALTED;
  723. }
  724. /* debug_entry could have overwritten target state (i.e. immediate resume)
  725. * don't signal event handlers in that case
  726. */
  727. if (target->state != TARGET_HALTED)
  728. return ERROR_OK;
  729. /* if target was running, signal that we halted
  730. * otherwise we reentered from debug execution */
  731. if (previous_state == TARGET_RUNNING)
  732. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  733. else
  734. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  735. }
  736. return retval;
  737. }
  738. static int xscale_debug_entry(struct target *target)
  739. {
  740. struct xscale_common *xscale = target_to_xscale(target);
  741. struct arm *armv4_5 = &xscale->armv4_5_common;
  742. uint32_t pc;
  743. uint32_t buffer[10];
  744. unsigned i;
  745. int retval;
  746. uint32_t moe;
  747. /* clear external dbg break (will be written on next DCSR read) */
  748. xscale->external_debug_break = 0;
  749. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  750. return retval;
  751. /* get r0, pc, r1 to r7 and cpsr */
  752. if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
  753. return retval;
  754. /* move r0 from buffer to register cache */
  755. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  756. armv4_5->core_cache->reg_list[0].dirty = 1;
  757. armv4_5->core_cache->reg_list[0].valid = 1;
  758. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  759. /* move pc from buffer to register cache */
  760. buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
  761. armv4_5->pc->dirty = 1;
  762. armv4_5->pc->valid = 1;
  763. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  764. /* move data from buffer to register cache */
  765. for (i = 1; i <= 7; i++)
  766. {
  767. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  768. armv4_5->core_cache->reg_list[i].dirty = 1;
  769. armv4_5->core_cache->reg_list[i].valid = 1;
  770. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  771. }
  772. arm_set_cpsr(armv4_5, buffer[9]);
  773. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  774. if (!is_arm_mode(armv4_5->core_mode))
  775. {
  776. target->state = TARGET_UNKNOWN;
  777. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  778. return ERROR_TARGET_FAILURE;
  779. }
  780. LOG_DEBUG("target entered debug state in %s mode",
  781. arm_mode_name(armv4_5->core_mode));
  782. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  783. if (armv4_5->spsr) {
  784. xscale_receive(target, buffer, 8);
  785. buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
  786. armv4_5->spsr->dirty = false;
  787. armv4_5->spsr->valid = true;
  788. }
  789. else
  790. {
  791. /* r8 to r14, but no spsr */
  792. xscale_receive(target, buffer, 7);
  793. }
  794. /* move data from buffer to right banked register in cache */
  795. for (i = 8; i <= 14; i++)
  796. {
  797. struct reg *r = arm_reg_current(armv4_5, i);
  798. buf_set_u32(r->value, 0, 32, buffer[i - 8]);
  799. r->dirty = false;
  800. r->valid = true;
  801. }
  802. /* mark xscale regs invalid to ensure they are retrieved from the
  803. * debug handler if requested */
  804. for (i = 0; i < xscale->reg_cache->num_regs; i++)
  805. xscale->reg_cache->reg_list[i].valid = 0;
  806. /* examine debug reason */
  807. xscale_read_dcsr(target);
  808. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  809. /* stored PC (for calculating fixup) */
  810. pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  811. switch (moe)
  812. {
  813. case 0x0: /* Processor reset */
  814. target->debug_reason = DBG_REASON_DBGRQ;
  815. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  816. pc -= 4;
  817. break;
  818. case 0x1: /* Instruction breakpoint hit */
  819. target->debug_reason = DBG_REASON_BREAKPOINT;
  820. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  821. pc -= 4;
  822. break;
  823. case 0x2: /* Data breakpoint hit */
  824. target->debug_reason = DBG_REASON_WATCHPOINT;
  825. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  826. pc -= 4;
  827. break;
  828. case 0x3: /* BKPT instruction executed */
  829. target->debug_reason = DBG_REASON_BREAKPOINT;
  830. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  831. pc -= 4;
  832. break;
  833. case 0x4: /* Ext. debug event */
  834. target->debug_reason = DBG_REASON_DBGRQ;
  835. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  836. pc -= 4;
  837. break;
  838. case 0x5: /* Vector trap occured */
  839. target->debug_reason = DBG_REASON_BREAKPOINT;
  840. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  841. pc -= 4;
  842. break;
  843. case 0x6: /* Trace buffer full break */
  844. target->debug_reason = DBG_REASON_DBGRQ;
  845. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  846. pc -= 4;
  847. break;
  848. case 0x7: /* Reserved (may flag Hot-Debug support) */
  849. default:
  850. LOG_ERROR("Method of Entry is 'Reserved'");
  851. exit(-1);
  852. break;
  853. }
  854. /* apply PC fixup */
  855. buf_set_u32(armv4_5->pc->value, 0, 32, pc);
  856. /* on the first debug entry, identify cache type */
  857. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  858. {
  859. uint32_t cache_type_reg;
  860. /* read cp15 cache type register */
  861. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  862. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  863. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  864. }
  865. /* examine MMU and Cache settings */
  866. /* read cp15 control register */
  867. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  868. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  869. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  870. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  871. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  872. /* tracing enabled, read collected trace data */
  873. if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
  874. {
  875. xscale_read_trace(target);
  876. /* Resume if entered debug due to buffer fill and we're still collecting
  877. * trace data. Note that a debug exception due to trace buffer full
  878. * can only happen in fill mode. */
  879. if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  880. {
  881. if (--xscale->trace.fill_counter > 0)
  882. xscale_resume(target, 1, 0x0, 1, 0);
  883. }
  884. else /* entered debug for other reason; reset counter */
  885. xscale->trace.fill_counter = 0;
  886. }
  887. return ERROR_OK;
  888. }
  889. static int xscale_halt(struct target *target)
  890. {
  891. struct xscale_common *xscale = target_to_xscale(target);
  892. LOG_DEBUG("target->state: %s",
  893. target_state_name(target));
  894. if (target->state == TARGET_HALTED)
  895. {
  896. LOG_DEBUG("target was already halted");
  897. return ERROR_OK;
  898. }
  899. else if (target->state == TARGET_UNKNOWN)
  900. {
  901. /* this must not happen for a xscale target */
  902. LOG_ERROR("target was in unknown state when halt was requested");
  903. return ERROR_TARGET_INVALID;
  904. }
  905. else if (target->state == TARGET_RESET)
  906. {
  907. LOG_DEBUG("target->state == TARGET_RESET");
  908. }
  909. else
  910. {
  911. /* assert external dbg break */
  912. xscale->external_debug_break = 1;
  913. xscale_read_dcsr(target);
  914. target->debug_reason = DBG_REASON_DBGRQ;
  915. }
  916. return ERROR_OK;
  917. }
  918. static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
  919. {
  920. struct xscale_common *xscale = target_to_xscale(target);
  921. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  922. int retval;
  923. if (xscale->ibcr0_used)
  924. {
  925. struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  926. if (ibcr0_bp)
  927. {
  928. xscale_unset_breakpoint(target, ibcr0_bp);
  929. }
  930. else
  931. {
  932. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  933. exit(-1);
  934. }
  935. }
  936. if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
  937. return retval;
  938. return ERROR_OK;
  939. }
  940. static int xscale_disable_single_step(struct target *target)
  941. {
  942. struct xscale_common *xscale = target_to_xscale(target);
  943. struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  944. int retval;
  945. if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
  946. return retval;
  947. return ERROR_OK;
  948. }
  949. static void xscale_enable_watchpoints(struct target *target)
  950. {
  951. struct watchpoint *watchpoint = target->watchpoints;
  952. while (watchpoint)
  953. {
  954. if (watchpoint->set == 0)
  955. xscale_set_watchpoint(target, watchpoint);
  956. watchpoint = watchpoint->next;
  957. }
  958. }
  959. static void xscale_enable_breakpoints(struct target *target)
  960. {
  961. struct breakpoint *breakpoint = target->breakpoints;
  962. /* set any pending breakpoints */
  963. while (breakpoint)
  964. {
  965. if (breakpoint->set == 0)
  966. xscale_set_breakpoint(target, breakpoint);
  967. breakpoint = breakpoint->next;
  968. }
  969. }
  970. static void xscale_free_trace_data(struct xscale_common *xscale)
  971. {
  972. struct xscale_trace_data *td = xscale->trace.data;
  973. while (td)
  974. {
  975. struct xscale_trace_data *next_td = td->next;
  976. if (td->entries)
  977. free(td->entries);
  978. free(td);
  979. td = next_td;
  980. }
  981. xscale->trace.data = NULL;
  982. }
  983. static int xscale_resume(struct target *target, int current,
  984. uint32_t address, int handle_breakpoints, int debug_execution)
  985. {
  986. struct xscale_common *xscale = target_to_xscale(target);
  987. struct arm *armv4_5 = &xscale->armv4_5_common;
  988. struct breakpoint *breakpoint = target->breakpoints;
  989. uint32_t current_pc;
  990. int retval;
  991. int i;
  992. LOG_DEBUG("-");
  993. if (target->state != TARGET_HALTED)
  994. {
  995. LOG_WARNING("target not halted");
  996. return ERROR_TARGET_NOT_HALTED;
  997. }
  998. if (!debug_execution)
  999. {
  1000. target_free_all_working_areas(target);
  1001. }
  1002. /* update vector tables */
  1003. if ((retval = xscale_update_vectors(target)) != ERROR_OK)
  1004. return retval;
  1005. /* current = 1: continue on current pc, otherwise continue at <address> */
  1006. if (!current)
  1007. buf_set_u32(armv4_5->pc->value, 0, 32, address);
  1008. current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  1009. /* if we're at the reset vector, we have to simulate the branch */
  1010. if (current_pc == 0x0)
  1011. {
  1012. arm_simulate_step(target, NULL);
  1013. current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  1014. }
  1015. /* the front-end may request us not to handle breakpoints */
  1016. if (handle_breakpoints)
  1017. {
  1018. breakpoint = breakpoint_find(target,
  1019. buf_get_u32(armv4_5->pc->value, 0, 32));
  1020. if (breakpoint != NULL)
  1021. {
  1022. uint32_t next_pc;
  1023. enum trace_mode saved_trace_mode;
  1024. /* there's a breakpoint at the current PC, we have to step over it */
  1025. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1026. xscale_unset_breakpoint(target, breakpoint);
  1027. /* calculate PC of next instruction */
  1028. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1029. {
  1030. uint32_t current_opcode;
  1031. target_read_u32(target, current_pc, &current_opcode);
  1032. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1033. }
  1034. LOG_DEBUG("enable single-step");
  1035. xscale_enable_single_step(target, next_pc);
  1036. /* restore banked registers */
  1037. retval = xscale_restore_banked(target);
  1038. /* send resume request */
  1039. xscale_send_u32(target, 0x30);
  1040. /* send CPSR */
  1041. xscale_send_u32(target,
  1042. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1043. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1044. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1045. for (i = 7; i >= 0; i--)
  1046. {
  1047. /* send register */
  1048. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1049. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1050. }
  1051. /* send PC */
  1052. xscale_send_u32(target,
  1053. buf_get_u32(armv4_5->pc->value, 0, 32));
  1054. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
  1055. buf_get_u32(armv4_5->pc->value, 0, 32));
  1056. /* disable trace data collection in xscale_debug_entry() */
  1057. saved_trace_mode = xscale->trace.mode;
  1058. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  1059. /* wait for and process debug entry */
  1060. xscale_debug_entry(target);
  1061. /* re-enable trace buffer, if enabled previously */
  1062. xscale->trace.mode = saved_trace_mode;
  1063. LOG_DEBUG("disable single-step");
  1064. xscale_disable_single_step(target);
  1065. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1066. xscale_set_breakpoint(target, breakpoint);
  1067. }
  1068. }
  1069. /* enable any pending breakpoints and watchpoints */
  1070. xscale_enable_breakpoints(target);
  1071. xscale_enable_watchpoints(target);
  1072. /* restore banked registers */
  1073. retval = xscale_restore_banked(target);
  1074. /* send resume request (command 0x30 or 0x31)
  1075. * clean the trace buffer if it is to be enabled (0x62) */
  1076. if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
  1077. {
  1078. if (xscale->trace.mode == XSCALE_TRACE_FILL)
  1079. {
  1080. /* If trace enabled in fill mode and starting collection of new set
  1081. * of buffers, initialize buffer counter and free previous buffers */
  1082. if (xscale->trace.fill_counter == 0)
  1083. {
  1084. xscale->trace.fill_counter = xscale->trace.buffer_fill;
  1085. xscale_free_trace_data(xscale);
  1086. }
  1087. }
  1088. else /* wrap mode; free previous buffer */
  1089. xscale_free_trace_data(xscale);
  1090. xscale_send_u32(target, 0x62);
  1091. xscale_send_u32(target, 0x31);
  1092. }
  1093. else
  1094. xscale_send_u32(target, 0x30);
  1095. /* send CPSR */
  1096. xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1097. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1098. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1099. for (i = 7; i >= 0; i--)
  1100. {
  1101. /* send register */
  1102. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1103. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1104. }
  1105. /* send PC */
  1106. xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
  1107. LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
  1108. buf_get_u32(armv4_5->pc->value, 0, 32));
  1109. target->debug_reason = DBG_REASON_NOTHALTED;
  1110. if (!debug_execution)
  1111. {
  1112. /* registers are now invalid */
  1113. register_cache_invalidate(armv4_5->core_cache);
  1114. target->state = TARGET_RUNNING;
  1115. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1116. }
  1117. else
  1118. {
  1119. target->state = TARGET_DEBUG_RUNNING;
  1120. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1121. }
  1122. LOG_DEBUG("target resumed");
  1123. return ERROR_OK;
  1124. }
  1125. static int xscale_step_inner(struct target *target, int current,
  1126. uint32_t address, int handle_breakpoints)
  1127. {
  1128. struct xscale_common *xscale = target_to_xscale(target);
  1129. struct arm *armv4_5 = &xscale->armv4_5_common;
  1130. uint32_t next_pc;
  1131. int retval;
  1132. int i;
  1133. target->debug_reason = DBG_REASON_SINGLESTEP;
  1134. /* calculate PC of next instruction */
  1135. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1136. {
  1137. uint32_t current_opcode, current_pc;
  1138. current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  1139. target_read_u32(target, current_pc, &current_opcode);
  1140. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1141. return retval;
  1142. }
  1143. LOG_DEBUG("enable single-step");
  1144. if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
  1145. return retval;
  1146. /* restore banked registers */
  1147. if ((retval = xscale_restore_banked(target)) != ERROR_OK)
  1148. return retval;
  1149. /* send resume request (command 0x30 or 0x31)
  1150. * clean the trace buffer if it is to be enabled (0x62) */
  1151. if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
  1152. {
  1153. if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
  1154. return retval;
  1155. if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
  1156. return retval;
  1157. }
  1158. else
  1159. if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
  1160. return retval;
  1161. /* send CPSR */
  1162. retval = xscale_send_u32(target,
  1163. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1164. if (retval != ERROR_OK)
  1165. return retval;
  1166. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
  1167. buf_get_u32(armv4_5->cpsr->value, 0, 32));
  1168. for (i = 7; i >= 0; i--)
  1169. {
  1170. /* send register */
  1171. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
  1172. return retval;
  1173. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1174. }
  1175. /* send PC */
  1176. retval = xscale_send_u32(target,
  1177. buf_get_u32(armv4_5->pc->value, 0, 32));
  1178. if (retval != ERROR_OK)
  1179. return retval;
  1180. LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
  1181. buf_get_u32(armv4_5->pc->value, 0, 32));
  1182. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1183. /* registers are now invalid */
  1184. register_cache_invalidate(armv4_5->core_cache);
  1185. /* wait for and process debug entry */
  1186. if ((retval = xscale_debug_entry(target)) != ERROR_OK)
  1187. return retval;
  1188. LOG_DEBUG("disable single-step");
  1189. if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
  1190. return retval;
  1191. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1192. return ERROR_OK;
  1193. }
  1194. static int xscale_step(struct target *target, int current,
  1195. uint32_t address, int handle_breakpoints)
  1196. {
  1197. struct arm *armv4_5 = target_to_arm(target);
  1198. struct breakpoint *breakpoint = NULL;
  1199. uint32_t current_pc;
  1200. int retval;
  1201. if (target->state != TARGET_HALTED)
  1202. {
  1203. LOG_WARNING("target not halted");
  1204. return ERROR_TARGET_NOT_HALTED;
  1205. }
  1206. /* current = 1: continue on current pc, otherwise continue at <address> */
  1207. if (!current)
  1208. buf_set_u32(armv4_5->pc->value, 0, 32, address);
  1209. current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  1210. /* if we're at the reset vector, we have to simulate the step */
  1211. if (current_pc == 0x0)
  1212. {
  1213. if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
  1214. return retval;
  1215. current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
  1216. target->debug_reason = DBG_REASON_SINGLESTEP;
  1217. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1218. return ERROR_OK;
  1219. }
  1220. /* the front-end may request us not to handle breakpoints */
  1221. if (handle_breakpoints)
  1222. breakpoint = breakpoint_find(target,
  1223. buf_get_u32(armv4_5->pc->value, 0, 32));
  1224. if (breakpoint != NULL) {
  1225. retval = xscale_unset_breakpoint(target, breakpoint);
  1226. if (retval != ERROR_OK)
  1227. return retval;
  1228. }
  1229. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1230. if (breakpoint)
  1231. {
  1232. xscale_set_breakpoint(target, breakpoint);
  1233. }
  1234. LOG_DEBUG("target stepped");
  1235. return ERROR_OK;
  1236. }
  1237. static int xscale_assert_reset(struct target *target)
  1238. {
  1239. struct xscale_common *xscale = target_to_xscale(target);
  1240. LOG_DEBUG("target->state: %s",
  1241. target_state_name(target));
  1242. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1243. * end up in T-L-R, which would reset JTAG
  1244. */
  1245. xscale_jtag_set_instr(target->tap,
  1246. XSCALE_SELDCSR << xscale->xscale_variant,
  1247. TAP_IDLE);
  1248. /* set Hold reset, Halt mode and Trap Reset */
  1249. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1250. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1251. xscale_write_dcsr(target, 1, 0);
  1252. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1253. xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
  1254. jtag_execute_queue();
  1255. /* assert reset */
  1256. jtag_add_reset(0, 1);
  1257. /* sleep 1ms, to be sure we fulfill any requirements */
  1258. jtag_add_sleep(1000);
  1259. jtag_execute_queue();
  1260. target->state = TARGET_RESET;
  1261. if (target->reset_halt)
  1262. {
  1263. int retval;
  1264. if ((retval = target_halt(target)) != ERROR_OK)
  1265. return retval;
  1266. }
  1267. return ERROR_OK;
  1268. }
  1269. static int xscale_deassert_reset(struct target *target)
  1270. {
  1271. struct xscale_common *xscale = target_to_xscale(target);
  1272. struct breakpoint *breakpoint = target->breakpoints;
  1273. LOG_DEBUG("-");
  1274. xscale->ibcr_available = 2;
  1275. xscale->ibcr0_used = 0;
  1276. xscale->ibcr1_used = 0;
  1277. xscale->dbr_available = 2;
  1278. xscale->dbr0_used = 0;
  1279. xscale->dbr1_used = 0;
  1280. /* mark all hardware breakpoints as unset */
  1281. while (breakpoint)
  1282. {
  1283. if (breakpoint->type == BKPT_HARD)
  1284. {
  1285. breakpoint->set = 0;
  1286. }
  1287. breakpoint = breakpoint->next;
  1288. }
  1289. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  1290. xscale_free_trace_data(xscale);
  1291. register_cache_invalidate(xscale->armv4_5_common.core_cache);
  1292. /* FIXME mark hardware watchpoints got unset too. Also,
  1293. * at least some of the XScale registers are invalid...
  1294. */
  1295. /*
  1296. * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
  1297. * contents got invalidated. Safer to force that, so writing new
  1298. * contents can't ever fail..
  1299. */
  1300. {
  1301. uint32_t address;
  1302. unsigned buf_cnt;
  1303. const uint8_t *buffer = xscale_debug_handler;
  1304. int retval;
  1305. /* release SRST */
  1306. jtag_add_reset(0, 0);
  1307. /* wait 300ms; 150 and 100ms were not enough */
  1308. jtag_add_sleep(300*1000);
  1309. jtag_add_runtest(2030, TAP_IDLE);
  1310. jtag_execute_queue();
  1311. /* set Hold reset, Halt mode and Trap Reset */
  1312. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1313. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1314. xscale_write_dcsr(target, 1, 0);
  1315. /* Load the debug handler into the mini-icache. Since
  1316. * it's using halt mode (not monitor mode), it runs in
  1317. * "Special Debug State" for access to registers, memory,
  1318. * coprocessors, trace data, etc.
  1319. */
  1320. address = xscale->handler_address;
  1321. for (unsigned binary_size = sizeof xscale_debug_handler - 1;
  1322. binary_size > 0;
  1323. binary_size -= buf_cnt, buffer += buf_cnt)
  1324. {
  1325. uint32_t cache_line[8];
  1326. unsigned i;
  1327. buf_cnt = binary_size;
  1328. if (buf_cnt > 32)
  1329. buf_cnt = 32;
  1330. for (i = 0; i < buf_cnt; i += 4)
  1331. {
  1332. /* convert LE buffer to host-endian uint32_t */
  1333. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1334. }
  1335. for (; i < 32; i += 4)
  1336. {
  1337. cache_line[i / 4] = 0xe1a08008;
  1338. }
  1339. /* only load addresses other than the reset vectors */
  1340. if ((address % 0x400) != 0x0)
  1341. {
  1342. retval = xscale_load_ic(target, address,
  1343. cache_line);
  1344. if (retval != ERROR_OK)
  1345. return retval;
  1346. }
  1347. address += buf_cnt;
  1348. };
  1349. retval = xscale_load_ic(target, 0x0,
  1350. xscale->low_vectors);
  1351. if (retval != ERROR_OK)
  1352. return retval;
  1353. retval = xscale_load_ic(target, 0xffff0000,
  1354. xscale->high_vectors);
  1355. if (retval != ERROR_OK)
  1356. return retval;
  1357. jtag_add_runtest(30, TAP_IDLE);
  1358. jtag_add_sleep(100000);
  1359. /* set Hold reset, Halt mode and Trap Reset */
  1360. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1361. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1362. xscale_write_dcsr(target, 1, 0);
  1363. /* clear Hold reset to let the target run (should enter debug handler) */
  1364. xscale_write_dcsr(target, 0, 1);
  1365. target->state = TARGET_RUNNING;
  1366. if (!target->reset_halt)
  1367. {
  1368. jtag_add_sleep(10000);
  1369. /* we should have entered debug now */
  1370. xscale_debug_entry(target);
  1371. target->state = TARGET_HALTED;
  1372. /* resume the target */
  1373. xscale_resume(target, 1, 0x0, 1, 0);
  1374. }
  1375. }
  1376. return ERROR_OK;
  1377. }
  1378. static int xscale_read_core_reg(struct target *target, struct reg *r,
  1379. int num, enum arm_mode mode)
  1380. {
  1381. /** \todo add debug handler support for core register reads */
  1382. LOG_ERROR("not implemented");
  1383. return ERROR_OK;
  1384. }
  1385. static int xscale_write_core_reg(struct target *target, struct reg *r,
  1386. int num, enum arm_mode mode, uint32_t value)
  1387. {
  1388. /** \todo add debug handler support for core register writes */
  1389. LOG_ERROR("not implemented");
  1390. return ERROR_OK;
  1391. }
  1392. static int xscale_full_context(struct target *target)
  1393. {
  1394. struct arm *armv4_5 = target_to_arm(target);
  1395. uint32_t *buffer;
  1396. int i, j;
  1397. LOG_DEBUG("-");
  1398. if (target->state != TARGET_HALTED)
  1399. {
  1400. LOG_WARNING("target not halted");
  1401. return ERROR_TARGET_NOT_HALTED;
  1402. }
  1403. buffer = malloc(4 * 8);
  1404. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1405. * we can't enter User mode on an XScale (unpredictable),
  1406. * but User shares registers with SYS
  1407. */
  1408. for (i = 1; i < 7; i++)
  1409. {
  1410. enum arm_mode mode = armv4_5_number_to_mode(i);
  1411. bool valid = true;
  1412. struct reg *r;
  1413. if (mode == ARM_MODE_USR)
  1414. continue;
  1415. /* check if there are invalid registers in the current mode
  1416. */
  1417. for (j = 0; valid && j <= 16; j++)
  1418. {
  1419. if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1420. mode, j).valid)
  1421. valid = false;
  1422. }
  1423. if (valid)
  1424. continue;
  1425. /* request banked registers */
  1426. xscale_send_u32(target, 0x0);
  1427. /* send CPSR for desired bank mode */
  1428. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1429. /* get banked registers: r8 to r14; and SPSR
  1430. * except in USR/SYS mode
  1431. */
  1432. if (mode != ARM_MODE_SYS) {
  1433. /* SPSR */
  1434. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1435. mode, 16);
  1436. xscale_receive(target, buffer, 8);
  1437. buf_set_u32(r->value, 0, 32, buffer[7]);
  1438. r->dirty = false;
  1439. r->valid = true;
  1440. } else {
  1441. xscale_receive(target, buffer, 7);
  1442. }
  1443. /* move data from buffer to register cache */
  1444. for (j = 8; j <= 14; j++)
  1445. {
  1446. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1447. mode, j);
  1448. buf_set_u32(r->value, 0, 32, buffer[j - 8]);
  1449. r->dirty = false;
  1450. r->valid = true;
  1451. }
  1452. }
  1453. free(buffer);
  1454. return ERROR_OK;
  1455. }
  1456. static int xscale_restore_banked(struct target *target)
  1457. {
  1458. struct arm *armv4_5 = target_to_arm(target);
  1459. int i, j;
  1460. if (target->state != TARGET_HALTED)
  1461. {
  1462. LOG_WARNING("target not halted");
  1463. return ERROR_TARGET_NOT_HALTED;
  1464. }
  1465. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1466. * and check if any banked registers need to be written. Ignore
  1467. * USR mode (number 0) in favor of SYS; we can't enter User mode on
  1468. * an XScale (unpredictable), but they share all registers.
  1469. */
  1470. for (i = 1; i < 7; i++)
  1471. {
  1472. enum arm_mode mode = armv4_5_number_to_mode(i);
  1473. struct reg *r;
  1474. if (mode == ARM_MODE_USR)
  1475. continue;
  1476. /* check if there are dirty registers in this mode */
  1477. for (j = 8; j <= 14; j++)
  1478. {
  1479. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1480. mode, j).dirty)
  1481. goto dirty;
  1482. }
  1483. /* if not USR/SYS, check if the SPSR needs to be written */
  1484. if (mode != ARM_MODE_SYS)
  1485. {
  1486. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1487. mode, 16).dirty)
  1488. goto dirty;
  1489. }
  1490. /* there's nothing to flush for this mode */
  1491. continue;
  1492. dirty:
  1493. /* command 0x1: "send banked registers" */
  1494. xscale_send_u32(target, 0x1);
  1495. /* send CPSR for desired mode */
  1496. xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
  1497. /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
  1498. * but this protocol doesn't understand that nuance.
  1499. */
  1500. for (j = 8; j <= 14; j++) {
  1501. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1502. mode, j);
  1503. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1504. r->dirty = false;
  1505. }
  1506. /* send spsr if not in USR/SYS mode */
  1507. if (mode != ARM_MODE_SYS) {
  1508. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  1509. mode, 16);
  1510. xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
  1511. r->dirty = false;
  1512. }
  1513. }
  1514. return ERROR_OK;
  1515. }
  1516. static int xscale_read_memory(struct target *target, uint32_t address,
  1517. uint32_t size, uint32_t count, uint8_t *buffer)
  1518. {
  1519. struct xscale_common *xscale = target_to_xscale(target);
  1520. uint32_t *buf32;
  1521. uint32_t i;
  1522. int retval;
  1523. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1524. if (target->state != TARGET_HALTED)
  1525. {
  1526. LOG_WARNING("target not halted");
  1527. return ERROR_TARGET_NOT_HALTED;
  1528. }
  1529. /* sanitize arguments */
  1530. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1531. return ERROR_INVALID_ARGUMENTS;
  1532. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1533. return ERROR_TARGET_UNALIGNED_ACCESS;
  1534. /* send memory read request (command 0x1n, n: access size) */
  1535. if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
  1536. return retval;
  1537. /* send base address for read request */
  1538. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1539. return retval;
  1540. /* send number of requested data words */
  1541. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1542. return retval;
  1543. /* receive data from target (count times 32-bit words in host endianness) */
  1544. buf32 = malloc(4 * count);
  1545. if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
  1546. return retval;
  1547. /* extract data from host-endian buffer into byte stream */
  1548. for (i = 0; i < count; i++)
  1549. {
  1550. switch (size)
  1551. {
  1552. case 4:
  1553. target_buffer_set_u32(target, buffer, buf32[i]);
  1554. buffer += 4;
  1555. break;
  1556. case 2:
  1557. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1558. buffer += 2;
  1559. break;
  1560. case 1:
  1561. *buffer++ = buf32[i] & 0xff;
  1562. break;
  1563. default:
  1564. LOG_ERROR("invalid read size");
  1565. return ERROR_INVALID_ARGUMENTS;
  1566. }
  1567. }
  1568. free(buf32);
  1569. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1570. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1571. return retval;
  1572. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1573. {
  1574. /* clear SA bit */
  1575. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1576. return retval;
  1577. return ERROR_TARGET_DATA_ABORT;
  1578. }
  1579. return ERROR_OK;
  1580. }
  1581. static int xscale_read_phys_memory(struct target *target, uint32_t address,
  1582. uint32_t size, uint32_t count, uint8_t *buffer)
  1583. {
  1584. struct xscale_common *xscale = target_to_xscale(target);
  1585. /* with MMU inactive, there are only physical addresses */
  1586. if (!xscale->armv4_5_mmu.mmu_enabled)
  1587. return xscale_read_memory(target, address, size, count, buffer);
  1588. /** \todo: provide a non-stub implementation of this routine. */
  1589. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1590. target_name(target), __func__);
  1591. return ERROR_FAIL;
  1592. }
  1593. static int xscale_write_memory(struct target *target, uint32_t address,
  1594. uint32_t size, uint32_t count, const uint8_t *buffer)
  1595. {
  1596. struct xscale_common *xscale = target_to_xscale(target);
  1597. int retval;
  1598. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1599. if (target->state != TARGET_HALTED)
  1600. {
  1601. LOG_WARNING("target not halted");
  1602. return ERROR_TARGET_NOT_HALTED;
  1603. }
  1604. /* sanitize arguments */
  1605. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1606. return ERROR_INVALID_ARGUMENTS;
  1607. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1608. return ERROR_TARGET_UNALIGNED_ACCESS;
  1609. /* send memory write request (command 0x2n, n: access size) */
  1610. if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
  1611. return retval;
  1612. /* send base address for read request */
  1613. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1614. return retval;
  1615. /* send number of requested data words to be written*/
  1616. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1617. return retval;
  1618. /* extract data from host-endian buffer into byte stream */
  1619. #if 0
  1620. for (i = 0; i < count; i++)
  1621. {
  1622. switch (size)
  1623. {
  1624. case 4:
  1625. value = target_buffer_get_u32(target, buffer);
  1626. xscale_send_u32(target, value);
  1627. buffer += 4;
  1628. break;
  1629. case 2:
  1630. value = target_buffer_get_u16(target, buffer);
  1631. xscale_send_u32(target, value);
  1632. buffer += 2;
  1633. break;
  1634. case 1:
  1635. value = *buffer;
  1636. xscale_send_u32(target, value);
  1637. buffer += 1;
  1638. break;
  1639. default:
  1640. LOG_ERROR("should never get here");
  1641. exit(-1);
  1642. }
  1643. }
  1644. #endif
  1645. if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
  1646. return retval;
  1647. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1648. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1649. return retval;
  1650. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1651. {
  1652. /* clear SA bit */
  1653. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1654. return retval;
  1655. LOG_ERROR("data abort writing memory");
  1656. return ERROR_TARGET_DATA_ABORT;
  1657. }
  1658. return ERROR_OK;
  1659. }
  1660. static int xscale_write_phys_memory(struct target *target, uint32_t address,
  1661. uint32_t size, uint32_t count, const uint8_t *buffer)
  1662. {
  1663. struct xscale_common *xscale = target_to_xscale(target);
  1664. /* with MMU inactive, there are only physical addresses */
  1665. if (!xscale->armv4_5_mmu.mmu_enabled)
  1666. return xscale_write_memory(target, address, size, count, buffer);
  1667. /** \todo: provide a non-stub implementation of this routine. */
  1668. LOG_ERROR("%s: %s is not implemented. Disable MMU?",
  1669. target_name(target), __func__);
  1670. return ERROR_FAIL;
  1671. }
  1672. static int xscale_bulk_write_memory(struct target *target, uint32_t address,
  1673. uint32_t count, const uint8_t *buffer)
  1674. {
  1675. return xscale_write_memory(target, address, 4, count, buffer);
  1676. }
  1677. static int xscale_get_ttb(struct target *target, uint32_t *result)
  1678. {
  1679. struct xscale_common *xscale = target_to_xscale(target);
  1680. uint32_t ttb;
  1681. int retval;
  1682. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1683. if (retval != ERROR_OK)
  1684. return retval;
  1685. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1686. *result = ttb;
  1687. return ERROR_OK;
  1688. }
  1689. static int xscale_disable_mmu_caches(struct target *target, int mmu,
  1690. int d_u_cache, int i_cache)
  1691. {
  1692. struct xscale_common *xscale = target_to_xscale(target);
  1693. uint32_t cp15_control;
  1694. int retval;
  1695. /* read cp15 control register */
  1696. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1697. if (retval !=ERROR_OK)
  1698. return retval;
  1699. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1700. if (mmu)
  1701. cp15_control &= ~0x1U;
  1702. if (d_u_cache)
  1703. {
  1704. /* clean DCache */
  1705. retval = xscale_send_u32(target, 0x50);
  1706. if (retval !=ERROR_OK)
  1707. return retval;
  1708. retval = xscale_send_u32(target, xscale->cache_clean_address);
  1709. if (retval !=ERROR_OK)
  1710. return retval;
  1711. /* invalidate DCache */
  1712. retval = xscale_send_u32(target, 0x51);
  1713. if (retval !=ERROR_OK)
  1714. return retval;
  1715. cp15_control &= ~0x4U;
  1716. }
  1717. if (i_cache)
  1718. {
  1719. /* invalidate ICache */
  1720. retval = xscale_send_u32(target, 0x52);
  1721. if (retval !=ERROR_OK)
  1722. return retval;
  1723. cp15_control &= ~0x1000U;
  1724. }
  1725. /* write new cp15 control register */
  1726. retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1727. if (retval !=ERROR_OK)
  1728. return retval;
  1729. /* execute cpwait to ensure outstanding operations complete */
  1730. retval = xscale_send_u32(target, 0x53);
  1731. return retval;
  1732. }
  1733. static int xscale_enable_mmu_caches(struct target *target, int mmu,
  1734. int d_u_cache, int i_cache)
  1735. {
  1736. struct xscale_common *xscale = target_to_xscale(target);
  1737. uint32_t cp15_control;
  1738. int retval;
  1739. /* read cp15 control register */
  1740. retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1741. if (retval !=ERROR_OK)
  1742. return retval;
  1743. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1744. if (mmu)
  1745. cp15_control |= 0x1U;
  1746. if (d_u_cache)
  1747. cp15_control |= 0x4U;
  1748. if (i_cache)
  1749. cp15_control |= 0x1000U;
  1750. /* write new cp15 control register */
  1751. retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1752. if (retval !=ERROR_OK)
  1753. return retval;
  1754. /* execute cpwait to ensure outstanding operations complete */
  1755. retval = xscale_send_u32(target, 0x53);
  1756. return retval;
  1757. }
  1758. static int xscale_set_breakpoint(struct target *target,
  1759. struct breakpoint *breakpoint)
  1760. {
  1761. int retval;
  1762. struct xscale_common *xscale = target_to_xscale(target);
  1763. if (target->state != TARGET_HALTED)
  1764. {
  1765. LOG_WARNING("target not halted");
  1766. return ERROR_TARGET_NOT_HALTED;
  1767. }
  1768. if (breakpoint->set)
  1769. {
  1770. LOG_WARNING("breakpoint already set");
  1771. return ERROR_OK;
  1772. }
  1773. if (breakpoint->type == BKPT_HARD)
  1774. {
  1775. uint32_t value = breakpoint->address | 1;
  1776. if (!xscale->ibcr0_used)
  1777. {
  1778. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1779. xscale->ibcr0_used = 1;
  1780. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1781. }
  1782. else if (!xscale->ibcr1_used)
  1783. {
  1784. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1785. xscale->ibcr1_used = 1;
  1786. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1787. }
  1788. else
  1789. { /* bug: availability previously verified in xscale_add_breakpoint() */
  1790. LOG_ERROR("BUG: no hardware comparator available");
  1791. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1792. }
  1793. }
  1794. else if (breakpoint->type == BKPT_SOFT)
  1795. {
  1796. if (breakpoint->length == 4)
  1797. {
  1798. /* keep the original instruction in target endianness */
  1799. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1800. {
  1801. return retval;
  1802. }
  1803. /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1804. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1805. {
  1806. return retval;
  1807. }
  1808. }
  1809. else
  1810. {
  1811. /* keep the original instruction in target endianness */
  1812. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1813. {
  1814. return retval;
  1815. }
  1816. /* write the bkpt instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1817. if ((retval = target_write_u16(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1818. {
  1819. return retval;
  1820. }
  1821. }
  1822. breakpoint->set = 1;
  1823. xscale_send_u32(target, 0x50); /* clean dcache */
  1824. xscale_send_u32(target, xscale->cache_clean_address);
  1825. xscale_send_u32(target, 0x51); /* invalidate dcache */
  1826. xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
  1827. }
  1828. return ERROR_OK;
  1829. }
  1830. static int xscale_add_breakpoint(struct target *target,
  1831. struct breakpoint *breakpoint)
  1832. {
  1833. struct xscale_common *xscale = target_to_xscale(target);
  1834. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1835. {
  1836. LOG_ERROR("no breakpoint unit available for hardware breakpoint");
  1837. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1838. }
  1839. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1840. {
  1841. LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1842. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1843. }
  1844. if (breakpoint->type == BKPT_HARD)
  1845. {
  1846. xscale->ibcr_available--;
  1847. }
  1848. return xscale_set_breakpoint(target, breakpoint);
  1849. }
  1850. static int xscale_unset_breakpoint(struct target *target,
  1851. struct breakpoint *breakpoint)
  1852. {
  1853. int retval;
  1854. struct xscale_common *xscale = target_to_xscale(target);
  1855. if (target->state != TARGET_HALTED)
  1856. {
  1857. LOG_WARNING("target not halted");
  1858. return ERROR_TARGET_NOT_HALTED;
  1859. }
  1860. if (!breakpoint->set)
  1861. {
  1862. LOG_WARNING("breakpoint not set");
  1863. return ERROR_OK;
  1864. }
  1865. if (breakpoint->type == BKPT_HARD)
  1866. {
  1867. if (breakpoint->set == 1)
  1868. {
  1869. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1870. xscale->ibcr0_used = 0;
  1871. }
  1872. else if (breakpoint->set == 2)
  1873. {
  1874. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1875. xscale->ibcr1_used = 0;
  1876. }
  1877. breakpoint->set = 0;
  1878. }
  1879. else
  1880. {
  1881. /* restore original instruction (kept in target endianness) */
  1882. if (breakpoint->length == 4)
  1883. {
  1884. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1885. {
  1886. return retval;
  1887. }
  1888. }
  1889. else
  1890. {
  1891. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1892. {
  1893. return retval;
  1894. }
  1895. }
  1896. breakpoint->set = 0;
  1897. xscale_send_u32(target, 0x50); /* clean dcache */
  1898. xscale_send_u32(target, xscale->cache_clean_address);
  1899. xscale_send_u32(target, 0x51); /* invalidate dcache */
  1900. xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
  1901. }
  1902. return ERROR_OK;
  1903. }
  1904. static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1905. {
  1906. struct xscale_common *xscale = target_to_xscale(target);
  1907. if (target->state != TARGET_HALTED)
  1908. {
  1909. LOG_ERROR("target not halted");
  1910. return ERROR_TARGET_NOT_HALTED;
  1911. }
  1912. if (breakpoint->set)
  1913. {
  1914. xscale_unset_breakpoint(target, breakpoint);
  1915. }
  1916. if (breakpoint->type == BKPT_HARD)
  1917. xscale->ibcr_available++;
  1918. return ERROR_OK;
  1919. }
  1920. static int xscale_set_watchpoint(struct target *target,
  1921. struct watchpoint *watchpoint)
  1922. {
  1923. struct xscale_common *xscale = target_to_xscale(target);
  1924. uint32_t enable = 0;
  1925. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1926. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1927. if (target->state != TARGET_HALTED)
  1928. {
  1929. LOG_ERROR("target not halted");
  1930. return ERROR_TARGET_NOT_HALTED;
  1931. }
  1932. switch (watchpoint->rw)
  1933. {
  1934. case WPT_READ:
  1935. enable = 0x3;
  1936. break;
  1937. case WPT_ACCESS:
  1938. enable = 0x2;
  1939. break;
  1940. case WPT_WRITE:
  1941. enable = 0x1;
  1942. break;
  1943. default:
  1944. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1945. }
  1946. /* For watchpoint across more than one word, both DBR registers must
  1947. be enlisted, with the second used as a mask. */
  1948. if (watchpoint->length > 4)
  1949. {
  1950. if (xscale->dbr0_used || xscale->dbr1_used)
  1951. {
  1952. LOG_ERROR("BUG: sufficient hardware comparators unavailable");
  1953. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1954. }
  1955. /* Write mask value to DBR1, based on the length argument.
  1956. * Address bits ignored by the comparator are those set in mask. */
  1957. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
  1958. watchpoint->length - 1);
  1959. xscale->dbr1_used = 1;
  1960. enable |= 0x100; /* DBCON[M] */
  1961. }
  1962. if (!xscale->dbr0_used)
  1963. {
  1964. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1965. dbcon_value |= enable;
  1966. xscale_set_reg_u32(dbcon, dbcon_value);
  1967. watchpoint->set = 1;
  1968. xscale->dbr0_used = 1;
  1969. }
  1970. else if (!xscale->dbr1_used)
  1971. {
  1972. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1973. dbcon_value |= enable << 2;
  1974. xscale_set_reg_u32(dbcon, dbcon_value);
  1975. watchpoint->set = 2;
  1976. xscale->dbr1_used = 1;
  1977. }
  1978. else
  1979. {
  1980. LOG_ERROR("BUG: no hardware comparator available");
  1981. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1982. }
  1983. return ERROR_OK;
  1984. }
  1985. static int xscale_add_watchpoint(struct target *target,
  1986. struct watchpoint *watchpoint)
  1987. {
  1988. struct xscale_common *xscale = target_to_xscale(target);
  1989. if (xscale->dbr_available < 1)
  1990. {
  1991. LOG_ERROR("no more watchpoint registers available");
  1992. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1993. }
  1994. if (watchpoint->value)
  1995. LOG_WARNING("xscale does not support value, mask arguments; ignoring");
  1996. /* check that length is a power of two */
  1997. for (uint32_t len = watchpoint->length; len != 1; len /= 2)
  1998. {
  1999. if (len % 2)
  2000. {
  2001. LOG_ERROR("xscale requires that watchpoint length is a power of two");
  2002. return ERROR_COMMAND_ARGUMENT_INVALID;
  2003. }
  2004. }
  2005. if (watchpoint->length == 4) /* single word watchpoint */
  2006. {
  2007. xscale->dbr_available--; /* one DBR reg used */
  2008. return ERROR_OK;
  2009. }
  2010. /* watchpoints across multiple words require both DBR registers */
  2011. if (xscale->dbr_available < 2)
  2012. {
  2013. LOG_ERROR("insufficient watchpoint registers available");
  2014. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  2015. }
  2016. if (watchpoint->length > watchpoint->address)
  2017. {
  2018. LOG_ERROR("xscale does not support watchpoints with length "
  2019. "greater than address");
  2020. return ERROR_COMMAND_ARGUMENT_INVALID;
  2021. }
  2022. xscale->dbr_available = 0;
  2023. return ERROR_OK;
  2024. }
  2025. static int xscale_unset_watchpoint(struct target *target,
  2026. struct watchpoint *watchpoint)
  2027. {
  2028. struct xscale_common *xscale = target_to_xscale(target);
  2029. struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  2030. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  2031. if (target->state != TARGET_HALTED)
  2032. {
  2033. LOG_WARNING("target not halted");
  2034. return ERROR_TARGET_NOT_HALTED;
  2035. }
  2036. if (!watchpoint->set)
  2037. {
  2038. LOG_WARNING("breakpoint not set");
  2039. return ERROR_OK;
  2040. }
  2041. if (watchpoint->set == 1)
  2042. {
  2043. if (watchpoint->length > 4)
  2044. {
  2045. dbcon_value &= ~0x103; /* clear DBCON[M] as well */
  2046. xscale->dbr1_used = 0; /* DBR1 was used for mask */
  2047. }
  2048. else
  2049. dbcon_value &= ~0x3;
  2050. xscale_set_reg_u32(dbcon, dbcon_value);
  2051. xscale->dbr0_used = 0;
  2052. }
  2053. else if (watchpoint->set == 2)
  2054. {
  2055. dbcon_value &= ~0xc;
  2056. xscale_set_reg_u32(dbcon, dbcon_value);
  2057. xscale->dbr1_used = 0;
  2058. }
  2059. watchpoint->set = 0;
  2060. return ERROR_OK;
  2061. }
  2062. static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  2063. {
  2064. struct xscale_common *xscale = target_to_xscale(target);
  2065. if (target->state != TARGET_HALTED)
  2066. {
  2067. LOG_ERROR("target not halted");
  2068. return ERROR_TARGET_NOT_HALTED;
  2069. }
  2070. if (watchpoint->set)
  2071. {
  2072. xscale_unset_watchpoint(target, watchpoint);
  2073. }
  2074. if (watchpoint->length > 4)
  2075. xscale->dbr_available++; /* both DBR regs now available */
  2076. xscale->dbr_available++;
  2077. return ERROR_OK;
  2078. }
  2079. static int xscale_get_reg(struct reg *reg)
  2080. {
  2081. struct xscale_reg *arch_info = reg->arch_info;
  2082. struct target *target = arch_info->target;
  2083. struct xscale_common *xscale = target_to_xscale(target);
  2084. /* DCSR, TX and RX are accessible via JTAG */
  2085. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2086. {
  2087. return xscale_read_dcsr(arch_info->target);
  2088. }
  2089. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2090. {
  2091. /* 1 = consume register content */
  2092. return xscale_read_tx(arch_info->target, 1);
  2093. }
  2094. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2095. {
  2096. /* can't read from RX register (host -> debug handler) */
  2097. return ERROR_OK;
  2098. }
  2099. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2100. {
  2101. /* can't (explicitly) read from TXRXCTRL register */
  2102. return ERROR_OK;
  2103. }
  2104. else /* Other DBG registers have to be transfered by the debug handler */
  2105. {
  2106. /* send CP read request (command 0x40) */
  2107. xscale_send_u32(target, 0x40);
  2108. /* send CP register number */
  2109. xscale_send_u32(target, arch_info->dbg_handler_number);
  2110. /* read register value */
  2111. xscale_read_tx(target, 1);
  2112. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2113. reg->dirty = 0;
  2114. reg->valid = 1;
  2115. }
  2116. return ERROR_OK;
  2117. }
  2118. static int xscale_set_reg(struct reg *reg, uint8_t* buf)
  2119. {
  2120. struct xscale_reg *arch_info = reg->arch_info;
  2121. struct target *target = arch_info->target;
  2122. struct xscale_common *xscale = target_to_xscale(target);
  2123. uint32_t value = buf_get_u32(buf, 0, 32);
  2124. /* DCSR, TX and RX are accessible via JTAG */
  2125. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2126. {
  2127. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2128. return xscale_write_dcsr(arch_info->target, -1, -1);
  2129. }
  2130. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2131. {
  2132. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2133. return xscale_write_rx(arch_info->target);
  2134. }
  2135. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2136. {
  2137. /* can't write to TX register (debug-handler -> host) */
  2138. return ERROR_OK;
  2139. }
  2140. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2141. {
  2142. /* can't (explicitly) write to TXRXCTRL register */
  2143. return ERROR_OK;
  2144. }
  2145. else /* Other DBG registers have to be transfered by the debug handler */
  2146. {
  2147. /* send CP write request (command 0x41) */
  2148. xscale_send_u32(target, 0x41);
  2149. /* send CP register number */
  2150. xscale_send_u32(target, arch_info->dbg_handler_number);
  2151. /* send CP register value */
  2152. xscale_send_u32(target, value);
  2153. buf_set_u32(reg->value, 0, 32, value);
  2154. }
  2155. return ERROR_OK;
  2156. }
  2157. static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
  2158. {
  2159. struct xscale_common *xscale = target_to_xscale(target);
  2160. struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2161. struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
  2162. /* send CP write request (command 0x41) */
  2163. xscale_send_u32(target, 0x41);
  2164. /* send CP register number */
  2165. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2166. /* send CP register value */
  2167. xscale_send_u32(target, value);
  2168. buf_set_u32(dcsr->value, 0, 32, value);
  2169. return ERROR_OK;
  2170. }
  2171. static int xscale_read_trace(struct target *target)
  2172. {
  2173. struct xscale_common *xscale = target_to_xscale(target);
  2174. struct arm *armv4_5 = &xscale->armv4_5_common;
  2175. struct xscale_trace_data **trace_data_p;
  2176. /* 258 words from debug handler
  2177. * 256 trace buffer entries
  2178. * 2 checkpoint addresses
  2179. */
  2180. uint32_t trace_buffer[258];
  2181. int is_address[256];
  2182. int i, j;
  2183. unsigned int num_checkpoints = 0;
  2184. if (target->state != TARGET_HALTED)
  2185. {
  2186. LOG_WARNING("target must be stopped to read trace data");
  2187. return ERROR_TARGET_NOT_HALTED;
  2188. }
  2189. /* send read trace buffer command (command 0x61) */
  2190. xscale_send_u32(target, 0x61);
  2191. /* receive trace buffer content */
  2192. xscale_receive(target, trace_buffer, 258);
  2193. /* parse buffer backwards to identify address entries */
  2194. for (i = 255; i >= 0; i--)
  2195. {
  2196. /* also count number of checkpointed entries */
  2197. if ((trace_buffer[i] & 0xe0) == 0xc0)
  2198. num_checkpoints++;
  2199. is_address[i] = 0;
  2200. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2201. ((trace_buffer[i] & 0xf0) == 0xd0))
  2202. {
  2203. if (i > 0)
  2204. is_address[--i] = 1;
  2205. if (i > 0)
  2206. is_address[--i] = 1;
  2207. if (i > 0)
  2208. is_address[--i] = 1;
  2209. if (i > 0)
  2210. is_address[--i] = 1;
  2211. }
  2212. }
  2213. /* search first non-zero entry that is not part of an address */
  2214. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2215. ;
  2216. if (j == 256)
  2217. {
  2218. LOG_DEBUG("no trace data collected");
  2219. return ERROR_XSCALE_NO_TRACE_DATA;
  2220. }
  2221. /* account for possible partial address at buffer start (wrap mode only) */
  2222. if (is_address[0])
  2223. { /* first entry is address; complete set of 4? */
  2224. i = 1;
  2225. while (i < 4)
  2226. if (!is_address[i++])
  2227. break;
  2228. if (i < 4)
  2229. j += i; /* partial address; can't use it */
  2230. }
  2231. /* if first valid entry is indirect branch, can't use that either (no address) */
  2232. if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
  2233. j++;
  2234. /* walk linked list to terminating entry */
  2235. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2236. ;
  2237. *trace_data_p = malloc(sizeof(struct xscale_trace_data));
  2238. (*trace_data_p)->next = NULL;
  2239. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2240. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2241. (*trace_data_p)->last_instruction =
  2242. buf_get_u32(armv4_5->pc->value, 0, 32);
  2243. (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
  2244. (*trace_data_p)->depth = 256 - j;
  2245. (*trace_data_p)->num_checkpoints = num_checkpoints;
  2246. for (i = j; i < 256; i++)
  2247. {
  2248. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2249. if (is_address[i])
  2250. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2251. else
  2252. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2253. }
  2254. return ERROR_OK;
  2255. }
  2256. static int xscale_read_instruction(struct target *target, uint32_t pc,
  2257. struct arm_instruction *instruction)
  2258. {
  2259. struct xscale_common *const xscale = target_to_xscale(target);
  2260. int i;
  2261. int section = -1;
  2262. size_t size_read;
  2263. uint32_t opcode;
  2264. int retval;
  2265. if (!xscale->trace.image)
  2266. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2267. /* search for the section the current instruction belongs to */
  2268. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2269. {
  2270. if ((xscale->trace.image->sections[i].base_address <= pc) &&
  2271. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > pc))
  2272. {
  2273. section = i;
  2274. break;
  2275. }
  2276. }
  2277. if (section == -1)
  2278. {
  2279. /* current instruction couldn't be found in the image */
  2280. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2281. }
  2282. if (xscale->trace.core_state == ARM_STATE_ARM)
  2283. {
  2284. uint8_t buf[4];
  2285. if ((retval = image_read_section(xscale->trace.image, section,
  2286. pc - xscale->trace.image->sections[section].base_address,
  2287. 4, buf, &size_read)) != ERROR_OK)
  2288. {
  2289. LOG_ERROR("error while reading instruction");
  2290. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2291. }
  2292. opcode = target_buffer_get_u32(target, buf);
  2293. arm_evaluate_opcode(opcode, pc, instruction);
  2294. }
  2295. else if (xscale->trace.core_state == ARM_STATE_THUMB)
  2296. {
  2297. uint8_t buf[2];
  2298. if ((retval = image_read_section(xscale->trace.image, section,
  2299. pc - xscale->trace.image->sections[section].base_address,
  2300. 2, buf, &size_read)) != ERROR_OK)
  2301. {
  2302. LOG_ERROR("error while reading instruction");
  2303. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2304. }
  2305. opcode = target_buffer_get_u16(target, buf);
  2306. thumb_evaluate_opcode(opcode, pc, instruction);
  2307. }
  2308. else
  2309. {
  2310. LOG_ERROR("BUG: unknown core state encountered");
  2311. exit(-1);
  2312. }
  2313. return ERROR_OK;
  2314. }
  2315. /* Extract address encoded into trace data.
  2316. * Write result to address referenced by argument 'target', or 0 if incomplete. */
  2317. static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
  2318. int i, uint32_t *target)
  2319. {
  2320. /* if there are less than four entries prior to the indirect branch message
  2321. * we can't extract the address */
  2322. if (i < 4)
  2323. *target = 0;
  2324. else
  2325. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2326. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2327. }
  2328. static inline void xscale_display_instruction(struct target *target, uint32_t pc,
  2329. struct arm_instruction *instruction,
  2330. struct command_context *cmd_ctx)
  2331. {
  2332. int retval = xscale_read_instruction(target, pc, instruction);
  2333. if (retval == ERROR_OK)
  2334. command_print(cmd_ctx, "%s", instruction->text);
  2335. else
  2336. command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
  2337. }
  2338. static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
  2339. {
  2340. struct xscale_common *xscale = target_to_xscale(target);
  2341. struct xscale_trace_data *trace_data = xscale->trace.data;
  2342. int i, retval;
  2343. uint32_t breakpoint_pc;
  2344. struct arm_instruction instruction;
  2345. uint32_t current_pc = 0; /* initialized when address determined */
  2346. if (!xscale->trace.image)
  2347. LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
  2348. /* loop for each trace buffer that was loaded from target */
  2349. while (trace_data)
  2350. {
  2351. int chkpt = 0; /* incremented as checkpointed entries found */
  2352. int j;
  2353. /* FIXME: set this to correct mode when trace buffer is first enabled */
  2354. xscale->trace.core_state = ARM_STATE_ARM;
  2355. /* loop for each entry in this trace buffer */
  2356. for (i = 0; i < trace_data->depth; i++)
  2357. {
  2358. int exception = 0;
  2359. uint32_t chkpt_reg = 0x0;
  2360. uint32_t branch_target = 0;
  2361. int count;
  2362. /* trace entry type is upper nybble of 'message byte' */
  2363. int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
  2364. /* Target addresses of indirect branches are written into buffer
  2365. * before the message byte representing the branch. Skip past it */
  2366. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2367. continue;
  2368. switch (trace_msg_type)
  2369. {
  2370. case 0: /* Exceptions */
  2371. case 1:
  2372. case 2:
  2373. case 3:
  2374. case 4:
  2375. case 5:
  2376. case 6:
  2377. case 7:
  2378. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2379. /* FIXME: vector table may be at ffff0000 */
  2380. branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
  2381. break;
  2382. case 8: /* Direct Branch */
  2383. break;
  2384. case 9: /* Indirect Branch */
  2385. xscale_branch_address(trace_data, i, &branch_target);
  2386. break;
  2387. case 13: /* Checkpointed Indirect Branch */
  2388. xscale_branch_address(trace_data, i, &branch_target);
  2389. if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
  2390. chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
  2391. else
  2392. chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
  2393. chkpt++;
  2394. break;
  2395. case 12: /* Checkpointed Direct Branch */
  2396. if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
  2397. chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is oldest */
  2398. else
  2399. chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and newest */
  2400. /* if no current_pc, checkpoint will be starting point */
  2401. if (current_pc == 0)
  2402. branch_target = chkpt_reg;
  2403. chkpt++;
  2404. break;
  2405. case 15: /* Roll-over */
  2406. break;
  2407. default: /* Reserved */
  2408. LOG_WARNING("trace is suspect: invalid trace message byte");
  2409. continue;
  2410. }
  2411. /* If we don't have the current_pc yet, but we did get the branch target
  2412. * (either from the trace buffer on indirect branch, or from a checkpoint reg),
  2413. * then we can start displaying instructions at the next iteration, with
  2414. * branch_target as the starting point.
  2415. */
  2416. if (current_pc == 0)
  2417. {
  2418. current_pc = branch_target; /* remains 0 unless branch_target obtained */
  2419. continue;
  2420. }
  2421. /* We have current_pc. Read and display the instructions from the image.
  2422. * First, display count instructions (lower nybble of message byte). */
  2423. count = trace_data->entries[i].data & 0x0f;
  2424. for (j = 0; j < count; j++)
  2425. {
  2426. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2427. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2428. }
  2429. /* An additional instruction is implicitly added to count for
  2430. * rollover and some exceptions: undef, swi, prefetch abort. */
  2431. if ((trace_msg_type == 15) || (exception > 0 && exception < 4))
  2432. {
  2433. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2434. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2435. }
  2436. if (trace_msg_type == 15) /* rollover */
  2437. continue;
  2438. if (exception)
  2439. {
  2440. command_print(cmd_ctx, "--- exception %i ---", exception);
  2441. continue;
  2442. }
  2443. /* not exception or rollover; next instruction is a branch and is
  2444. * not included in the count */
  2445. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2446. /* for direct branches, extract branch destination from instruction */
  2447. if ((trace_msg_type == 8) || (trace_msg_type == 12))
  2448. {
  2449. retval = xscale_read_instruction(target, current_pc, &instruction);
  2450. if (retval == ERROR_OK)
  2451. current_pc = instruction.info.b_bl_bx_blx.target_address;
  2452. else
  2453. current_pc = 0; /* branch destination unknown */
  2454. /* direct branch w/ checkpoint; can also get from checkpoint reg */
  2455. if (trace_msg_type == 12)
  2456. {
  2457. if (current_pc == 0)
  2458. current_pc = chkpt_reg;
  2459. else if (current_pc != chkpt_reg) /* sanity check */
  2460. LOG_WARNING("trace is suspect: checkpoint register "
  2461. "inconsistent with adddress from image");
  2462. }
  2463. if (current_pc == 0)
  2464. command_print(cmd_ctx, "address unknown");
  2465. continue;
  2466. }
  2467. /* indirect branch; the branch destination was read from trace buffer */
  2468. if ((trace_msg_type == 9) || (trace_msg_type == 13))
  2469. {
  2470. current_pc = branch_target;
  2471. /* sanity check (checkpoint reg is redundant) */
  2472. if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
  2473. LOG_WARNING("trace is suspect: checkpoint register "
  2474. "inconsistent with address from trace buffer");
  2475. }
  2476. } /* END: for (i = 0; i < trace_data->depth; i++) */
  2477. breakpoint_pc = trace_data->last_instruction; /* used below */
  2478. trace_data = trace_data->next;
  2479. } /* END: while (trace_data) */
  2480. /* Finally... display all instructions up to the value of the pc when the
  2481. * debug break occurred (saved when trace data was collected from target).
  2482. * This is necessary because the trace only records execution branches and 16
  2483. * consecutive instructions (rollovers), so last few typically missed.
  2484. */
  2485. if (current_pc == 0)
  2486. return ERROR_OK; /* current_pc was never found */
  2487. /* how many instructions remaining? */
  2488. int gap_count = (breakpoint_pc - current_pc) /
  2489. (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
  2490. /* should never be negative or over 16, but verify */
  2491. if (gap_count < 0 || gap_count > 16)
  2492. {
  2493. LOG_WARNING("trace is suspect: excessive gap at end of trace");
  2494. return ERROR_OK; /* bail; large number or negative value no good */
  2495. }
  2496. /* display remaining instructions */
  2497. for (i = 0; i < gap_count; i++)
  2498. {
  2499. xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
  2500. current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
  2501. }
  2502. return ERROR_OK;
  2503. }
  2504. static const struct reg_arch_type xscale_reg_type = {
  2505. .get = xscale_get_reg,
  2506. .set = xscale_set_reg,
  2507. };
  2508. static void xscale_build_reg_cache(struct target *target)
  2509. {
  2510. struct xscale_common *xscale = target_to_xscale(target);
  2511. struct arm *armv4_5 = &xscale->armv4_5_common;
  2512. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  2513. struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2514. int i;
  2515. int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
  2516. (*cache_p) = arm_build_reg_cache(target, armv4_5);
  2517. (*cache_p)->next = malloc(sizeof(struct reg_cache));
  2518. cache_p = &(*cache_p)->next;
  2519. /* fill in values for the xscale reg cache */
  2520. (*cache_p)->name = "XScale registers";
  2521. (*cache_p)->next = NULL;
  2522. (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
  2523. (*cache_p)->num_regs = num_regs;
  2524. for (i = 0; i < num_regs; i++)
  2525. {
  2526. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2527. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2528. (*cache_p)->reg_list[i].dirty = 0;
  2529. (*cache_p)->reg_list[i].valid = 0;
  2530. (*cache_p)->reg_list[i].size = 32;
  2531. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2532. (*cache_p)->reg_list[i].type = &xscale_reg_type;
  2533. arch_info[i] = xscale_reg_arch_info[i];
  2534. arch_info[i].target = target;
  2535. }
  2536. xscale->reg_cache = (*cache_p);
  2537. }
  2538. static int xscale_init_target(struct command_context *cmd_ctx,
  2539. struct target *target)
  2540. {
  2541. xscale_build_reg_cache(target);
  2542. return ERROR_OK;
  2543. }
  2544. static int xscale_init_arch_info(struct target *target,
  2545. struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
  2546. {
  2547. struct arm *armv4_5;
  2548. uint32_t high_reset_branch, low_reset_branch;
  2549. int i;
  2550. armv4_5 = &xscale->armv4_5_common;
  2551. /* store architecture specfic data */
  2552. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2553. /* we don't really *need* a variant param ... */
  2554. if (variant) {
  2555. int ir_length = 0;
  2556. if (strcmp(variant, "pxa250") == 0
  2557. || strcmp(variant, "pxa255") == 0
  2558. || strcmp(variant, "pxa26x") == 0)
  2559. ir_length = 5;
  2560. else if (strcmp(variant, "pxa27x") == 0
  2561. || strcmp(variant, "ixp42x") == 0
  2562. || strcmp(variant, "ixp45x") == 0
  2563. || strcmp(variant, "ixp46x") == 0)
  2564. ir_length = 7;
  2565. else if (strcmp(variant, "pxa3xx") == 0)
  2566. ir_length = 11;
  2567. else
  2568. LOG_WARNING("%s: unrecognized variant %s",
  2569. tap->dotted_name, variant);
  2570. if (ir_length && ir_length != tap->ir_length) {
  2571. LOG_WARNING("%s: IR length for %s is %d; fixing",
  2572. tap->dotted_name, variant, ir_length);
  2573. tap->ir_length = ir_length;
  2574. }
  2575. }
  2576. /* PXA3xx shifts the JTAG instructions */
  2577. if (tap->ir_length == 11)
  2578. xscale->xscale_variant = XSCALE_PXA3XX;
  2579. else
  2580. xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
  2581. /* the debug handler isn't installed (and thus not running) at this time */
  2582. xscale->handler_address = 0xfe000800;
  2583. /* clear the vectors we keep locally for reference */
  2584. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2585. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2586. /* no user-specified vectors have been configured yet */
  2587. xscale->static_low_vectors_set = 0x0;
  2588. xscale->static_high_vectors_set = 0x0;
  2589. /* calculate branches to debug handler */
  2590. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2591. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2592. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2593. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2594. for (i = 1; i <= 7; i++)
  2595. {
  2596. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2597. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2598. }
  2599. /* 64kB aligned region used for DCache cleaning */
  2600. xscale->cache_clean_address = 0xfffe0000;
  2601. xscale->hold_rst = 0;
  2602. xscale->external_debug_break = 0;
  2603. xscale->ibcr_available = 2;
  2604. xscale->ibcr0_used = 0;
  2605. xscale->ibcr1_used = 0;
  2606. xscale->dbr_available = 2;
  2607. xscale->dbr0_used = 0;
  2608. xscale->dbr1_used = 0;
  2609. LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
  2610. target_name(target));
  2611. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2612. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2613. xscale->vector_catch = 0x1;
  2614. xscale->trace.data = NULL;
  2615. xscale->trace.image = NULL;
  2616. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2617. xscale->trace.buffer_fill = 0;
  2618. xscale->trace.fill_counter = 0;
  2619. /* prepare ARMv4/5 specific information */
  2620. armv4_5->arch_info = xscale;
  2621. armv4_5->read_core_reg = xscale_read_core_reg;
  2622. armv4_5->write_core_reg = xscale_write_core_reg;
  2623. armv4_5->full_context = xscale_full_context;
  2624. arm_init_arch_info(target, armv4_5);
  2625. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2626. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2627. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2628. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2629. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2630. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2631. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2632. xscale->armv4_5_mmu.mmu_enabled = 0;
  2633. return ERROR_OK;
  2634. }
  2635. static int xscale_target_create(struct target *target, Jim_Interp *interp)
  2636. {
  2637. struct xscale_common *xscale;
  2638. if (sizeof xscale_debug_handler - 1 > 0x800) {
  2639. LOG_ERROR("debug_handler.bin: larger than 2kb");
  2640. return ERROR_FAIL;
  2641. }
  2642. xscale = calloc(1, sizeof(*xscale));
  2643. if (!xscale)
  2644. return ERROR_FAIL;
  2645. return xscale_init_arch_info(target, xscale, target->tap,
  2646. target->variant);
  2647. }
  2648. COMMAND_HANDLER(xscale_handle_debug_handler_command)
  2649. {
  2650. struct target *target = NULL;
  2651. struct xscale_common *xscale;
  2652. int retval;
  2653. uint32_t handler_address;
  2654. if (CMD_ARGC < 2)
  2655. {
  2656. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2657. return ERROR_OK;
  2658. }
  2659. if ((target = get_target(CMD_ARGV[0])) == NULL)
  2660. {
  2661. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2662. return ERROR_FAIL;
  2663. }
  2664. xscale = target_to_xscale(target);
  2665. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2666. if (retval != ERROR_OK)
  2667. return retval;
  2668. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
  2669. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2670. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2671. {
  2672. xscale->handler_address = handler_address;
  2673. }
  2674. else
  2675. {
  2676. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2677. return ERROR_FAIL;
  2678. }
  2679. return ERROR_OK;
  2680. }
  2681. COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
  2682. {
  2683. struct target *target = NULL;
  2684. struct xscale_common *xscale;
  2685. int retval;
  2686. uint32_t cache_clean_address;
  2687. if (CMD_ARGC < 2)
  2688. {
  2689. return ERROR_COMMAND_SYNTAX_ERROR;
  2690. }
  2691. target = get_target(CMD_ARGV[0]);
  2692. if (target == NULL)
  2693. {
  2694. LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
  2695. return ERROR_FAIL;
  2696. }
  2697. xscale = target_to_xscale(target);
  2698. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2699. if (retval != ERROR_OK)
  2700. return retval;
  2701. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
  2702. if (cache_clean_address & 0xffff)
  2703. {
  2704. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2705. }
  2706. else
  2707. {
  2708. xscale->cache_clean_address = cache_clean_address;
  2709. }
  2710. return ERROR_OK;
  2711. }
  2712. COMMAND_HANDLER(xscale_handle_cache_info_command)
  2713. {
  2714. struct target *target = get_current_target(CMD_CTX);
  2715. struct xscale_common *xscale = target_to_xscale(target);
  2716. int retval;
  2717. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2718. if (retval != ERROR_OK)
  2719. return retval;
  2720. return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
  2721. }
  2722. static int xscale_virt2phys(struct target *target,
  2723. uint32_t virtual, uint32_t *physical)
  2724. {
  2725. struct xscale_common *xscale = target_to_xscale(target);
  2726. uint32_t cb;
  2727. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  2728. LOG_ERROR(xscale_not);
  2729. return ERROR_TARGET_INVALID;
  2730. }
  2731. uint32_t ret;
  2732. int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
  2733. virtual, &cb, &ret);
  2734. if (retval != ERROR_OK)
  2735. return retval;
  2736. *physical = ret;
  2737. return ERROR_OK;
  2738. }
  2739. static int xscale_mmu(struct target *target, int *enabled)
  2740. {
  2741. struct xscale_common *xscale = target_to_xscale(target);
  2742. if (target->state != TARGET_HALTED)
  2743. {
  2744. LOG_ERROR("Target not halted");
  2745. return ERROR_TARGET_INVALID;
  2746. }
  2747. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2748. return ERROR_OK;
  2749. }
  2750. COMMAND_HANDLER(xscale_handle_mmu_command)
  2751. {
  2752. struct target *target = get_current_target(CMD_CTX);
  2753. struct xscale_common *xscale = target_to_xscale(target);
  2754. int retval;
  2755. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2756. if (retval != ERROR_OK)
  2757. return retval;
  2758. if (target->state != TARGET_HALTED)
  2759. {
  2760. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2761. return ERROR_OK;
  2762. }
  2763. if (CMD_ARGC >= 1)
  2764. {
  2765. bool enable;
  2766. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2767. if (enable)
  2768. xscale_enable_mmu_caches(target, 1, 0, 0);
  2769. else
  2770. xscale_disable_mmu_caches(target, 1, 0, 0);
  2771. xscale->armv4_5_mmu.mmu_enabled = enable;
  2772. }
  2773. command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2774. return ERROR_OK;
  2775. }
  2776. COMMAND_HANDLER(xscale_handle_idcache_command)
  2777. {
  2778. struct target *target = get_current_target(CMD_CTX);
  2779. struct xscale_common *xscale = target_to_xscale(target);
  2780. int retval = xscale_verify_pointer(CMD_CTX, xscale);
  2781. if (retval != ERROR_OK)
  2782. return retval;
  2783. if (target->state != TARGET_HALTED)
  2784. {
  2785. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2786. return ERROR_OK;
  2787. }
  2788. bool icache = false;
  2789. if (strcmp(CMD_NAME, "icache") == 0)
  2790. icache = true;
  2791. if (CMD_ARGC >= 1)
  2792. {
  2793. bool enable;
  2794. COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
  2795. if (icache) {
  2796. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
  2797. if (enable)
  2798. xscale_enable_mmu_caches(target, 0, 0, 1);
  2799. else
  2800. xscale_disable_mmu_caches(target, 0, 0, 1);
  2801. } else {
  2802. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
  2803. if (enable)
  2804. xscale_enable_mmu_caches(target, 0, 1, 0);
  2805. else
  2806. xscale_disable_mmu_caches(target, 0, 1, 0);
  2807. }
  2808. }
  2809. bool enabled = icache ?
  2810. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
  2811. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
  2812. const char *msg = enabled ? "enabled" : "disabled";
  2813. command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
  2814. return ERROR_OK;
  2815. }
  2816. COMMAND_HANDLER(xscale_handle_vector_catch_command)
  2817. {
  2818. struct target *target = get_current_target(CMD_CTX);
  2819. struct xscale_common *xscale = target_to_xscale(target);
  2820. int retval;
  2821. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2822. if (retval != ERROR_OK)
  2823. return retval;
  2824. if (CMD_ARGC < 1)
  2825. {
  2826. command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
  2827. }
  2828. else
  2829. {
  2830. COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
  2831. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2832. xscale_write_dcsr(target, -1, -1);
  2833. }
  2834. command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2835. return ERROR_OK;
  2836. }
  2837. COMMAND_HANDLER(xscale_handle_vector_table_command)
  2838. {
  2839. struct target *target = get_current_target(CMD_CTX);
  2840. struct xscale_common *xscale = target_to_xscale(target);
  2841. int err = 0;
  2842. int retval;
  2843. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2844. if (retval != ERROR_OK)
  2845. return retval;
  2846. if (CMD_ARGC == 0) /* print current settings */
  2847. {
  2848. int idx;
  2849. command_print(CMD_CTX, "active user-set static vectors:");
  2850. for (idx = 1; idx < 8; idx++)
  2851. if (xscale->static_low_vectors_set & (1 << idx))
  2852. command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
  2853. for (idx = 1; idx < 8; idx++)
  2854. if (xscale->static_high_vectors_set & (1 << idx))
  2855. command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
  2856. return ERROR_OK;
  2857. }
  2858. if (CMD_ARGC != 3)
  2859. err = 1;
  2860. else
  2861. {
  2862. int idx;
  2863. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
  2864. uint32_t vec;
  2865. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
  2866. if (idx < 1 || idx >= 8)
  2867. err = 1;
  2868. if (!err && strcmp(CMD_ARGV[0], "low") == 0)
  2869. {
  2870. xscale->static_low_vectors_set |= (1<<idx);
  2871. xscale->static_low_vectors[idx] = vec;
  2872. }
  2873. else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
  2874. {
  2875. xscale->static_high_vectors_set |= (1<<idx);
  2876. xscale->static_high_vectors[idx] = vec;
  2877. }
  2878. else
  2879. err = 1;
  2880. }
  2881. if (err)
  2882. command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
  2883. return ERROR_OK;
  2884. }
  2885. COMMAND_HANDLER(xscale_handle_trace_buffer_command)
  2886. {
  2887. struct target *target = get_current_target(CMD_CTX);
  2888. struct xscale_common *xscale = target_to_xscale(target);
  2889. uint32_t dcsr_value;
  2890. int retval;
  2891. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2892. if (retval != ERROR_OK)
  2893. return retval;
  2894. if (target->state != TARGET_HALTED)
  2895. {
  2896. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  2897. return ERROR_OK;
  2898. }
  2899. if (CMD_ARGC >= 1)
  2900. {
  2901. if (strcmp("enable", CMD_ARGV[0]) == 0)
  2902. xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
  2903. else if (strcmp("disable", CMD_ARGV[0]) == 0)
  2904. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2905. else
  2906. return ERROR_INVALID_ARGUMENTS;
  2907. }
  2908. if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED)
  2909. {
  2910. if (strcmp("fill", CMD_ARGV[1]) == 0)
  2911. {
  2912. int buffcount = 1; /* default */
  2913. if (CMD_ARGC >= 3)
  2914. COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
  2915. if (buffcount < 1) /* invalid */
  2916. {
  2917. command_print(CMD_CTX, "fill buffer count must be > 0");
  2918. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2919. return ERROR_INVALID_ARGUMENTS;
  2920. }
  2921. xscale->trace.buffer_fill = buffcount;
  2922. xscale->trace.mode = XSCALE_TRACE_FILL;
  2923. }
  2924. else if (strcmp("wrap", CMD_ARGV[1]) == 0)
  2925. xscale->trace.mode = XSCALE_TRACE_WRAP;
  2926. else
  2927. {
  2928. xscale->trace.mode = XSCALE_TRACE_DISABLED;
  2929. return ERROR_INVALID_ARGUMENTS;
  2930. }
  2931. }
  2932. if (xscale->trace.mode != XSCALE_TRACE_DISABLED)
  2933. {
  2934. char fill_string[12];
  2935. sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
  2936. command_print(CMD_CTX, "trace buffer enabled (%s)",
  2937. (xscale->trace.mode == XSCALE_TRACE_FILL)
  2938. ? fill_string : "wrap");
  2939. }
  2940. else
  2941. command_print(CMD_CTX, "trace buffer disabled");
  2942. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2943. if (xscale->trace.mode == XSCALE_TRACE_FILL)
  2944. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2945. else
  2946. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2947. return ERROR_OK;
  2948. }
  2949. COMMAND_HANDLER(xscale_handle_trace_image_command)
  2950. {
  2951. struct target *target = get_current_target(CMD_CTX);
  2952. struct xscale_common *xscale = target_to_xscale(target);
  2953. int retval;
  2954. if (CMD_ARGC < 1)
  2955. {
  2956. command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
  2957. return ERROR_OK;
  2958. }
  2959. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2960. if (retval != ERROR_OK)
  2961. return retval;
  2962. if (xscale->trace.image)
  2963. {
  2964. image_close(xscale->trace.image);
  2965. free(xscale->trace.image);
  2966. command_print(CMD_CTX, "previously loaded image found and closed");
  2967. }
  2968. xscale->trace.image = malloc(sizeof(struct image));
  2969. xscale->trace.image->base_address_set = 0;
  2970. xscale->trace.image->start_address_set = 0;
  2971. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2972. if (CMD_ARGC >= 2)
  2973. {
  2974. xscale->trace.image->base_address_set = 1;
  2975. COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
  2976. }
  2977. else
  2978. {
  2979. xscale->trace.image->base_address_set = 0;
  2980. }
  2981. if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
  2982. {
  2983. free(xscale->trace.image);
  2984. xscale->trace.image = NULL;
  2985. return ERROR_OK;
  2986. }
  2987. return ERROR_OK;
  2988. }
  2989. COMMAND_HANDLER(xscale_handle_dump_trace_command)
  2990. {
  2991. struct target *target = get_current_target(CMD_CTX);
  2992. struct xscale_common *xscale = target_to_xscale(target);
  2993. struct xscale_trace_data *trace_data;
  2994. struct fileio file;
  2995. int retval;
  2996. retval = xscale_verify_pointer(CMD_CTX, xscale);
  2997. if (retval != ERROR_OK)
  2998. return retval;
  2999. if (target->state != TARGET_HALTED)
  3000. {
  3001. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  3002. return ERROR_OK;
  3003. }
  3004. if (CMD_ARGC < 1)
  3005. {
  3006. command_print(CMD_CTX, "usage: xscale dump_trace <file>");
  3007. return ERROR_OK;
  3008. }
  3009. trace_data = xscale->trace.data;
  3010. if (!trace_data)
  3011. {
  3012. command_print(CMD_CTX, "no trace data collected");
  3013. return ERROR_OK;
  3014. }
  3015. if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  3016. {
  3017. return ERROR_OK;
  3018. }
  3019. while (trace_data)
  3020. {
  3021. int i;
  3022. fileio_write_u32(&file, trace_data->chkpt0);
  3023. fileio_write_u32(&file, trace_data->chkpt1);
  3024. fileio_write_u32(&file, trace_data->last_instruction);
  3025. fileio_write_u32(&file, trace_data->depth);
  3026. for (i = 0; i < trace_data->depth; i++)
  3027. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  3028. trace_data = trace_data->next;
  3029. }
  3030. fileio_close(&file);
  3031. return ERROR_OK;
  3032. }
  3033. COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
  3034. {
  3035. struct target *target = get_current_target(CMD_CTX);
  3036. struct xscale_common *xscale = target_to_xscale(target);
  3037. int retval;
  3038. retval = xscale_verify_pointer(CMD_CTX, xscale);
  3039. if (retval != ERROR_OK)
  3040. return retval;
  3041. xscale_analyze_trace(target, CMD_CTX);
  3042. return ERROR_OK;
  3043. }
  3044. COMMAND_HANDLER(xscale_handle_cp15)
  3045. {
  3046. struct target *target = get_current_target(CMD_CTX);
  3047. struct xscale_common *xscale = target_to_xscale(target);
  3048. int retval;
  3049. retval = xscale_verify_pointer(CMD_CTX, xscale);
  3050. if (retval != ERROR_OK)
  3051. return retval;
  3052. if (target->state != TARGET_HALTED)
  3053. {
  3054. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  3055. return ERROR_OK;
  3056. }
  3057. uint32_t reg_no = 0;
  3058. struct reg *reg = NULL;
  3059. if (CMD_ARGC > 0)
  3060. {
  3061. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
  3062. /*translate from xscale cp15 register no to openocd register*/
  3063. switch (reg_no)
  3064. {
  3065. case 0:
  3066. reg_no = XSCALE_MAINID;
  3067. break;
  3068. case 1:
  3069. reg_no = XSCALE_CTRL;
  3070. break;
  3071. case 2:
  3072. reg_no = XSCALE_TTB;
  3073. break;
  3074. case 3:
  3075. reg_no = XSCALE_DAC;
  3076. break;
  3077. case 5:
  3078. reg_no = XSCALE_FSR;
  3079. break;
  3080. case 6:
  3081. reg_no = XSCALE_FAR;
  3082. break;
  3083. case 13:
  3084. reg_no = XSCALE_PID;
  3085. break;
  3086. case 15:
  3087. reg_no = XSCALE_CPACCESS;
  3088. break;
  3089. default:
  3090. command_print(CMD_CTX, "invalid register number");
  3091. return ERROR_INVALID_ARGUMENTS;
  3092. }
  3093. reg = &xscale->reg_cache->reg_list[reg_no];
  3094. }
  3095. if (CMD_ARGC == 1)
  3096. {
  3097. uint32_t value;
  3098. /* read cp15 control register */
  3099. xscale_get_reg(reg);
  3100. value = buf_get_u32(reg->value, 0, 32);
  3101. command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  3102. }
  3103. else if (CMD_ARGC == 2)
  3104. {
  3105. uint32_t value;
  3106. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
  3107. /* send CP write request (command 0x41) */
  3108. xscale_send_u32(target, 0x41);
  3109. /* send CP register number */
  3110. xscale_send_u32(target, reg_no);
  3111. /* send CP register value */
  3112. xscale_send_u32(target, value);
  3113. /* execute cpwait to ensure outstanding operations complete */
  3114. xscale_send_u32(target, 0x53);
  3115. }
  3116. else
  3117. {
  3118. command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
  3119. }
  3120. return ERROR_OK;
  3121. }
  3122. static const struct command_registration xscale_exec_command_handlers[] = {
  3123. {
  3124. .name = "cache_info",
  3125. .handler = xscale_handle_cache_info_command,
  3126. .mode = COMMAND_EXEC,
  3127. .help = "display information about CPU caches",
  3128. },
  3129. {
  3130. .name = "mmu",
  3131. .handler = xscale_handle_mmu_command,
  3132. .mode = COMMAND_EXEC,
  3133. .help = "enable or disable the MMU",
  3134. .usage = "['enable'|'disable']",
  3135. },
  3136. {
  3137. .name = "icache",
  3138. .handler = xscale_handle_idcache_command,
  3139. .mode = COMMAND_EXEC,
  3140. .help = "display ICache state, optionally enabling or "
  3141. "disabling it",
  3142. .usage = "['enable'|'disable']",
  3143. },
  3144. {
  3145. .name = "dcache",
  3146. .handler = xscale_handle_idcache_command,
  3147. .mode = COMMAND_EXEC,
  3148. .help = "display DCache state, optionally enabling or "
  3149. "disabling it",
  3150. .usage = "['enable'|'disable']",
  3151. },
  3152. {
  3153. .name = "vector_catch",
  3154. .handler = xscale_handle_vector_catch_command,
  3155. .mode = COMMAND_EXEC,
  3156. .help = "set or display 8-bit mask of vectors "
  3157. "that should trigger debug entry",
  3158. .usage = "[mask]",
  3159. },
  3160. {
  3161. .name = "vector_table",
  3162. .handler = xscale_handle_vector_table_command,
  3163. .mode = COMMAND_EXEC,
  3164. .help = "set vector table entry in mini-ICache, "
  3165. "or display current tables",
  3166. .usage = "[('high'|'low') index code]",
  3167. },
  3168. {
  3169. .name = "trace_buffer",
  3170. .handler = xscale_handle_trace_buffer_command,
  3171. .mode = COMMAND_EXEC,
  3172. .help = "display trace buffer status, enable or disable "
  3173. "tracing, and optionally reconfigure trace mode",
  3174. .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
  3175. },
  3176. {
  3177. .name = "dump_trace",
  3178. .handler = xscale_handle_dump_trace_command,
  3179. .mode = COMMAND_EXEC,
  3180. .help = "dump content of trace buffer to file",
  3181. .usage = "filename",
  3182. },
  3183. {
  3184. .name = "analyze_trace",
  3185. .handler = xscale_handle_analyze_trace_buffer_command,
  3186. .mode = COMMAND_EXEC,
  3187. .help = "analyze content of trace buffer",
  3188. .usage = "",
  3189. },
  3190. {
  3191. .name = "trace_image",
  3192. .handler = xscale_handle_trace_image_command,
  3193. .mode = COMMAND_EXEC,
  3194. .help = "load image from file to address (default 0)",
  3195. .usage = "filename [offset [filetype]]",
  3196. },
  3197. {
  3198. .name = "cp15",
  3199. .handler = xscale_handle_cp15,
  3200. .mode = COMMAND_EXEC,
  3201. .help = "Read or write coprocessor 15 register.",
  3202. .usage = "register [value]",
  3203. },
  3204. COMMAND_REGISTRATION_DONE
  3205. };
  3206. static const struct command_registration xscale_any_command_handlers[] = {
  3207. {
  3208. .name = "debug_handler",
  3209. .handler = xscale_handle_debug_handler_command,
  3210. .mode = COMMAND_ANY,
  3211. .help = "Change address used for debug handler.",
  3212. .usage = "target address",
  3213. },
  3214. {
  3215. .name = "cache_clean_address",
  3216. .handler = xscale_handle_cache_clean_address_command,
  3217. .mode = COMMAND_ANY,
  3218. .help = "Change address used for cleaning data cache.",
  3219. .usage = "address",
  3220. },
  3221. {
  3222. .chain = xscale_exec_command_handlers,
  3223. },
  3224. COMMAND_REGISTRATION_DONE
  3225. };
  3226. static const struct command_registration xscale_command_handlers[] = {
  3227. {
  3228. .chain = arm_command_handlers,
  3229. },
  3230. {
  3231. .name = "xscale",
  3232. .mode = COMMAND_ANY,
  3233. .help = "xscale command group",
  3234. .chain = xscale_any_command_handlers,
  3235. },
  3236. COMMAND_REGISTRATION_DONE
  3237. };
  3238. struct target_type xscale_target =
  3239. {
  3240. .name = "xscale",
  3241. .poll = xscale_poll,
  3242. .arch_state = xscale_arch_state,
  3243. .target_request_data = NULL,
  3244. .halt = xscale_halt,
  3245. .resume = xscale_resume,
  3246. .step = xscale_step,
  3247. .assert_reset = xscale_assert_reset,
  3248. .deassert_reset = xscale_deassert_reset,
  3249. .soft_reset_halt = NULL,
  3250. /* REVISIT on some cores, allow exporting iwmmxt registers ... */
  3251. .get_gdb_reg_list = arm_get_gdb_reg_list,
  3252. .read_memory = xscale_read_memory,
  3253. .read_phys_memory = xscale_read_phys_memory,
  3254. .write_memory = xscale_write_memory,
  3255. .write_phys_memory = xscale_write_phys_memory,
  3256. .bulk_write_memory = xscale_bulk_write_memory,
  3257. .checksum_memory = arm_checksum_memory,
  3258. .blank_check_memory = arm_blank_check_memory,
  3259. .run_algorithm = armv4_5_run_algorithm,
  3260. .add_breakpoint = xscale_add_breakpoint,
  3261. .remove_breakpoint = xscale_remove_breakpoint,
  3262. .add_watchpoint = xscale_add_watchpoint,
  3263. .remove_watchpoint = xscale_remove_watchpoint,
  3264. .commands = xscale_command_handlers,
  3265. .target_create = xscale_target_create,
  3266. .init_target = xscale_init_target,
  3267. .virt2phys = xscale_virt2phys,
  3268. .mmu = xscale_mmu
  3269. };