You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3648 lines
96 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "xscale.h"
  30. #include "target_type.h"
  31. #include "arm7_9_common.h"
  32. #include "arm_simulator.h"
  33. #include "arm_disassembler.h"
  34. #include "time_support.h"
  35. #include "image.h"
  36. /*
  37. * Important XScale documents available as of October 2009 include:
  38. *
  39. * Intel XScale® Core Developer’s Manual, January 2004
  40. * Order Number: 273473-002
  41. * This has a chapter detailing debug facilities, and punts some
  42. * details to chip-specific microarchitecture documents.
  43. *
  44. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  45. * Document Number: 273539-005
  46. * Less detailed than the developer's manual, but summarizes those
  47. * missing details (for most XScales) and gives LOTS of notes about
  48. * debugger/handler interaction issues. Presents a simpler reset
  49. * and load-handler sequence than the arch doc. (Note, OpenOCD
  50. * doesn't currently support "Hot-Debug" as defined there.)
  51. *
  52. * Chip-specific microarchitecture documents may also be useful.
  53. */
  54. /* forward declarations */
  55. static int xscale_resume(struct target_s *, int current,
  56. uint32_t address, int handle_breakpoints, int debug_execution);
  57. static int xscale_debug_entry(target_t *);
  58. static int xscale_restore_context(target_t *);
  59. static int xscale_get_reg(reg_t *reg);
  60. static int xscale_set_reg(reg_t *reg, uint8_t *buf);
  61. static int xscale_set_breakpoint(struct target_s *, struct breakpoint *);
  62. static int xscale_set_watchpoint(struct target_s *, struct watchpoint *);
  63. static int xscale_unset_breakpoint(struct target_s *, struct breakpoint *);
  64. static int xscale_read_trace(target_t *);
  65. /* This XScale "debug handler" is loaded into the processor's
  66. * mini-ICache, which is 2K of code writable only via JTAG.
  67. *
  68. * FIXME the OpenOCD "bin2char" utility currently doesn't handle
  69. * binary files cleanly. It's string oriented, and terminates them
  70. * with a NUL character. Better would be to generate the constants
  71. * and let other code decide names, scoping, and other housekeeping.
  72. */
  73. static /* unsigned const char xscale_debug_handler[] = ... */
  74. #include "xscale_debug.h"
  75. static char *const xscale_reg_list[] =
  76. {
  77. "XSCALE_MAINID", /* 0 */
  78. "XSCALE_CACHETYPE",
  79. "XSCALE_CTRL",
  80. "XSCALE_AUXCTRL",
  81. "XSCALE_TTB",
  82. "XSCALE_DAC",
  83. "XSCALE_FSR",
  84. "XSCALE_FAR",
  85. "XSCALE_PID",
  86. "XSCALE_CPACCESS",
  87. "XSCALE_IBCR0", /* 10 */
  88. "XSCALE_IBCR1",
  89. "XSCALE_DBR0",
  90. "XSCALE_DBR1",
  91. "XSCALE_DBCON",
  92. "XSCALE_TBREG",
  93. "XSCALE_CHKPT0",
  94. "XSCALE_CHKPT1",
  95. "XSCALE_DCSR",
  96. "XSCALE_TX",
  97. "XSCALE_RX", /* 20 */
  98. "XSCALE_TXRXCTRL",
  99. };
  100. static const struct xscale_reg xscale_reg_arch_info[] =
  101. {
  102. {XSCALE_MAINID, NULL},
  103. {XSCALE_CACHETYPE, NULL},
  104. {XSCALE_CTRL, NULL},
  105. {XSCALE_AUXCTRL, NULL},
  106. {XSCALE_TTB, NULL},
  107. {XSCALE_DAC, NULL},
  108. {XSCALE_FSR, NULL},
  109. {XSCALE_FAR, NULL},
  110. {XSCALE_PID, NULL},
  111. {XSCALE_CPACCESS, NULL},
  112. {XSCALE_IBCR0, NULL},
  113. {XSCALE_IBCR1, NULL},
  114. {XSCALE_DBR0, NULL},
  115. {XSCALE_DBR1, NULL},
  116. {XSCALE_DBCON, NULL},
  117. {XSCALE_TBREG, NULL},
  118. {XSCALE_CHKPT0, NULL},
  119. {XSCALE_CHKPT1, NULL},
  120. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  121. {-1, NULL}, /* TX accessed via JTAG */
  122. {-1, NULL}, /* RX accessed via JTAG */
  123. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  124. };
  125. static int xscale_reg_arch_type = -1;
  126. /* convenience wrapper to access XScale specific registers */
  127. static int xscale_set_reg_u32(reg_t *reg, uint32_t value)
  128. {
  129. uint8_t buf[4];
  130. buf_set_u32(buf, 0, 32, value);
  131. return xscale_set_reg(reg, buf);
  132. }
  133. static const char xscale_not[] = "target is not an XScale";
  134. static int xscale_verify_pointer(struct command_context_s *cmd_ctx,
  135. struct xscale_common *xscale)
  136. {
  137. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  138. command_print(cmd_ctx, xscale_not);
  139. return ERROR_TARGET_INVALID;
  140. }
  141. return ERROR_OK;
  142. }
  143. static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr)
  144. {
  145. if (tap == NULL)
  146. return ERROR_FAIL;
  147. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  148. {
  149. struct scan_field field;
  150. uint8_t scratch[4];
  151. memset(&field, 0, sizeof field);
  152. field.tap = tap;
  153. field.num_bits = tap->ir_length;
  154. field.out_value = scratch;
  155. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  156. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  157. }
  158. return ERROR_OK;
  159. }
  160. static int xscale_read_dcsr(target_t *target)
  161. {
  162. struct xscale_common *xscale = target_to_xscale(target);
  163. int retval;
  164. struct scan_field fields[3];
  165. uint8_t field0 = 0x0;
  166. uint8_t field0_check_value = 0x2;
  167. uint8_t field0_check_mask = 0x7;
  168. uint8_t field2 = 0x0;
  169. uint8_t field2_check_value = 0x0;
  170. uint8_t field2_check_mask = 0x1;
  171. jtag_set_end_state(TAP_DRPAUSE);
  172. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  173. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  174. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  175. memset(&fields, 0, sizeof fields);
  176. fields[0].tap = target->tap;
  177. fields[0].num_bits = 3;
  178. fields[0].out_value = &field0;
  179. uint8_t tmp;
  180. fields[0].in_value = &tmp;
  181. fields[1].tap = target->tap;
  182. fields[1].num_bits = 32;
  183. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  184. fields[2].tap = target->tap;
  185. fields[2].num_bits = 1;
  186. fields[2].out_value = &field2;
  187. uint8_t tmp2;
  188. fields[2].in_value = &tmp2;
  189. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  190. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  191. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  192. if ((retval = jtag_execute_queue()) != ERROR_OK)
  193. {
  194. LOG_ERROR("JTAG error while reading DCSR");
  195. return retval;
  196. }
  197. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  198. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  199. /* write the register with the value we just read
  200. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  201. */
  202. field0_check_mask = 0x1;
  203. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  204. fields[1].in_value = NULL;
  205. jtag_set_end_state(TAP_IDLE);
  206. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  207. /* DANGER!!! this must be here. It will make sure that the arguments
  208. * to jtag_set_check_value() does not go out of scope! */
  209. return jtag_execute_queue();
  210. }
  211. static void xscale_getbuf(jtag_callback_data_t arg)
  212. {
  213. uint8_t *in = (uint8_t *)arg;
  214. *((uint32_t *)in) = buf_get_u32(in, 0, 32);
  215. }
  216. static int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
  217. {
  218. if (num_words == 0)
  219. return ERROR_INVALID_ARGUMENTS;
  220. int retval = ERROR_OK;
  221. tap_state_t path[3];
  222. struct scan_field fields[3];
  223. uint8_t *field0 = malloc(num_words * 1);
  224. uint8_t field0_check_value = 0x2;
  225. uint8_t field0_check_mask = 0x6;
  226. uint32_t *field1 = malloc(num_words * 4);
  227. uint8_t field2_check_value = 0x0;
  228. uint8_t field2_check_mask = 0x1;
  229. int words_done = 0;
  230. int words_scheduled = 0;
  231. int i;
  232. path[0] = TAP_DRSELECT;
  233. path[1] = TAP_DRCAPTURE;
  234. path[2] = TAP_DRSHIFT;
  235. memset(&fields, 0, sizeof fields);
  236. fields[0].tap = target->tap;
  237. fields[0].num_bits = 3;
  238. fields[0].check_value = &field0_check_value;
  239. fields[0].check_mask = &field0_check_mask;
  240. fields[1].tap = target->tap;
  241. fields[1].num_bits = 32;
  242. fields[2].tap = target->tap;
  243. fields[2].num_bits = 1;
  244. fields[2].check_value = &field2_check_value;
  245. fields[2].check_mask = &field2_check_mask;
  246. jtag_set_end_state(TAP_IDLE);
  247. xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
  248. jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  249. /* repeat until all words have been collected */
  250. int attempts = 0;
  251. while (words_done < num_words)
  252. {
  253. /* schedule reads */
  254. words_scheduled = 0;
  255. for (i = words_done; i < num_words; i++)
  256. {
  257. fields[0].in_value = &field0[i];
  258. jtag_add_pathmove(3, path);
  259. fields[1].in_value = (uint8_t *)(field1 + i);
  260. jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
  261. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  262. words_scheduled++;
  263. }
  264. if ((retval = jtag_execute_queue()) != ERROR_OK)
  265. {
  266. LOG_ERROR("JTAG error while receiving data from debug handler");
  267. break;
  268. }
  269. /* examine results */
  270. for (i = words_done; i < num_words; i++)
  271. {
  272. if (!(field0[0] & 1))
  273. {
  274. /* move backwards if necessary */
  275. int j;
  276. for (j = i; j < num_words - 1; j++)
  277. {
  278. field0[j] = field0[j + 1];
  279. field1[j] = field1[j + 1];
  280. }
  281. words_scheduled--;
  282. }
  283. }
  284. if (words_scheduled == 0)
  285. {
  286. if (attempts++==1000)
  287. {
  288. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  289. retval = ERROR_TARGET_TIMEOUT;
  290. break;
  291. }
  292. }
  293. words_done += words_scheduled;
  294. }
  295. for (i = 0; i < num_words; i++)
  296. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  297. free(field1);
  298. return retval;
  299. }
  300. static int xscale_read_tx(target_t *target, int consume)
  301. {
  302. struct xscale_common *xscale = target_to_xscale(target);
  303. tap_state_t path[3];
  304. tap_state_t noconsume_path[6];
  305. int retval;
  306. struct timeval timeout, now;
  307. struct scan_field fields[3];
  308. uint8_t field0_in = 0x0;
  309. uint8_t field0_check_value = 0x2;
  310. uint8_t field0_check_mask = 0x6;
  311. uint8_t field2_check_value = 0x0;
  312. uint8_t field2_check_mask = 0x1;
  313. jtag_set_end_state(TAP_IDLE);
  314. xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
  315. path[0] = TAP_DRSELECT;
  316. path[1] = TAP_DRCAPTURE;
  317. path[2] = TAP_DRSHIFT;
  318. noconsume_path[0] = TAP_DRSELECT;
  319. noconsume_path[1] = TAP_DRCAPTURE;
  320. noconsume_path[2] = TAP_DREXIT1;
  321. noconsume_path[3] = TAP_DRPAUSE;
  322. noconsume_path[4] = TAP_DREXIT2;
  323. noconsume_path[5] = TAP_DRSHIFT;
  324. memset(&fields, 0, sizeof fields);
  325. fields[0].tap = target->tap;
  326. fields[0].num_bits = 3;
  327. fields[0].in_value = &field0_in;
  328. fields[1].tap = target->tap;
  329. fields[1].num_bits = 32;
  330. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  331. fields[2].tap = target->tap;
  332. fields[2].num_bits = 1;
  333. uint8_t tmp;
  334. fields[2].in_value = &tmp;
  335. gettimeofday(&timeout, NULL);
  336. timeval_add_time(&timeout, 1, 0);
  337. for (;;)
  338. {
  339. /* if we want to consume the register content (i.e. clear TX_READY),
  340. * we have to go straight from Capture-DR to Shift-DR
  341. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  342. */
  343. if (consume)
  344. jtag_add_pathmove(3, path);
  345. else
  346. {
  347. jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
  348. }
  349. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  350. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  351. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  352. if ((retval = jtag_execute_queue()) != ERROR_OK)
  353. {
  354. LOG_ERROR("JTAG error while reading TX");
  355. return ERROR_TARGET_TIMEOUT;
  356. }
  357. gettimeofday(&now, NULL);
  358. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  359. {
  360. LOG_ERROR("time out reading TX register");
  361. return ERROR_TARGET_TIMEOUT;
  362. }
  363. if (!((!(field0_in & 1)) && consume))
  364. {
  365. goto done;
  366. }
  367. if (debug_level >= 3)
  368. {
  369. LOG_DEBUG("waiting 100ms");
  370. alive_sleep(100); /* avoid flooding the logs */
  371. } else
  372. {
  373. keep_alive();
  374. }
  375. }
  376. done:
  377. if (!(field0_in & 1))
  378. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  379. return ERROR_OK;
  380. }
  381. static int xscale_write_rx(target_t *target)
  382. {
  383. struct xscale_common *xscale = target_to_xscale(target);
  384. int retval;
  385. struct timeval timeout, now;
  386. struct scan_field fields[3];
  387. uint8_t field0_out = 0x0;
  388. uint8_t field0_in = 0x0;
  389. uint8_t field0_check_value = 0x2;
  390. uint8_t field0_check_mask = 0x6;
  391. uint8_t field2 = 0x0;
  392. uint8_t field2_check_value = 0x0;
  393. uint8_t field2_check_mask = 0x1;
  394. jtag_set_end_state(TAP_IDLE);
  395. xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
  396. memset(&fields, 0, sizeof fields);
  397. fields[0].tap = target->tap;
  398. fields[0].num_bits = 3;
  399. fields[0].out_value = &field0_out;
  400. fields[0].in_value = &field0_in;
  401. fields[1].tap = target->tap;
  402. fields[1].num_bits = 32;
  403. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  404. fields[2].tap = target->tap;
  405. fields[2].num_bits = 1;
  406. fields[2].out_value = &field2;
  407. uint8_t tmp;
  408. fields[2].in_value = &tmp;
  409. gettimeofday(&timeout, NULL);
  410. timeval_add_time(&timeout, 1, 0);
  411. /* poll until rx_read is low */
  412. LOG_DEBUG("polling RX");
  413. for (;;)
  414. {
  415. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  416. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  417. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  418. if ((retval = jtag_execute_queue()) != ERROR_OK)
  419. {
  420. LOG_ERROR("JTAG error while writing RX");
  421. return retval;
  422. }
  423. gettimeofday(&now, NULL);
  424. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  425. {
  426. LOG_ERROR("time out writing RX register");
  427. return ERROR_TARGET_TIMEOUT;
  428. }
  429. if (!(field0_in & 1))
  430. goto done;
  431. if (debug_level >= 3)
  432. {
  433. LOG_DEBUG("waiting 100ms");
  434. alive_sleep(100); /* avoid flooding the logs */
  435. } else
  436. {
  437. keep_alive();
  438. }
  439. }
  440. done:
  441. /* set rx_valid */
  442. field2 = 0x1;
  443. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  444. if ((retval = jtag_execute_queue()) != ERROR_OK)
  445. {
  446. LOG_ERROR("JTAG error while writing RX");
  447. return retval;
  448. }
  449. return ERROR_OK;
  450. }
  451. /* send count elements of size byte to the debug handler */
  452. static int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
  453. {
  454. uint32_t t[3];
  455. int bits[3];
  456. int retval;
  457. int done_count = 0;
  458. jtag_set_end_state(TAP_IDLE);
  459. xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
  460. bits[0]=3;
  461. t[0]=0;
  462. bits[1]=32;
  463. t[2]=1;
  464. bits[2]=1;
  465. int endianness = target->endianness;
  466. while (done_count++ < count)
  467. {
  468. switch (size)
  469. {
  470. case 4:
  471. if (endianness == TARGET_LITTLE_ENDIAN)
  472. {
  473. t[1]=le_to_h_u32(buffer);
  474. } else
  475. {
  476. t[1]=be_to_h_u32(buffer);
  477. }
  478. break;
  479. case 2:
  480. if (endianness == TARGET_LITTLE_ENDIAN)
  481. {
  482. t[1]=le_to_h_u16(buffer);
  483. } else
  484. {
  485. t[1]=be_to_h_u16(buffer);
  486. }
  487. break;
  488. case 1:
  489. t[1]=buffer[0];
  490. break;
  491. default:
  492. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  493. exit(-1);
  494. }
  495. jtag_add_dr_out(target->tap,
  496. 3,
  497. bits,
  498. t,
  499. jtag_set_end_state(TAP_IDLE));
  500. buffer += size;
  501. }
  502. if ((retval = jtag_execute_queue()) != ERROR_OK)
  503. {
  504. LOG_ERROR("JTAG error while sending data to debug handler");
  505. return retval;
  506. }
  507. return ERROR_OK;
  508. }
  509. static int xscale_send_u32(target_t *target, uint32_t value)
  510. {
  511. struct xscale_common *xscale = target_to_xscale(target);
  512. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  513. return xscale_write_rx(target);
  514. }
  515. static int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
  516. {
  517. struct xscale_common *xscale = target_to_xscale(target);
  518. int retval;
  519. struct scan_field fields[3];
  520. uint8_t field0 = 0x0;
  521. uint8_t field0_check_value = 0x2;
  522. uint8_t field0_check_mask = 0x7;
  523. uint8_t field2 = 0x0;
  524. uint8_t field2_check_value = 0x0;
  525. uint8_t field2_check_mask = 0x1;
  526. if (hold_rst != -1)
  527. xscale->hold_rst = hold_rst;
  528. if (ext_dbg_brk != -1)
  529. xscale->external_debug_break = ext_dbg_brk;
  530. jtag_set_end_state(TAP_IDLE);
  531. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  532. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  533. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  534. memset(&fields, 0, sizeof fields);
  535. fields[0].tap = target->tap;
  536. fields[0].num_bits = 3;
  537. fields[0].out_value = &field0;
  538. uint8_t tmp;
  539. fields[0].in_value = &tmp;
  540. fields[1].tap = target->tap;
  541. fields[1].num_bits = 32;
  542. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  543. fields[2].tap = target->tap;
  544. fields[2].num_bits = 1;
  545. fields[2].out_value = &field2;
  546. uint8_t tmp2;
  547. fields[2].in_value = &tmp2;
  548. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  549. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  550. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  551. if ((retval = jtag_execute_queue()) != ERROR_OK)
  552. {
  553. LOG_ERROR("JTAG error while writing DCSR");
  554. return retval;
  555. }
  556. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  557. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  558. return ERROR_OK;
  559. }
  560. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  561. static unsigned int parity (unsigned int v)
  562. {
  563. // unsigned int ov = v;
  564. v ^= v >> 16;
  565. v ^= v >> 8;
  566. v ^= v >> 4;
  567. v &= 0xf;
  568. // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  569. return (0x6996 >> v) & 1;
  570. }
  571. static int xscale_load_ic(target_t *target, uint32_t va, uint32_t buffer[8])
  572. {
  573. uint8_t packet[4];
  574. uint8_t cmd;
  575. int word;
  576. struct scan_field fields[2];
  577. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  578. /* LDIC into IR */
  579. jtag_set_end_state(TAP_IDLE);
  580. xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
  581. /* CMD is b011 to load a cacheline into the Mini ICache.
  582. * Loading into the main ICache is deprecated, and unused.
  583. * It's followed by three zero bits, and 27 address bits.
  584. */
  585. buf_set_u32(&cmd, 0, 6, 0x3);
  586. /* virtual address of desired cache line */
  587. buf_set_u32(packet, 0, 27, va >> 5);
  588. memset(&fields, 0, sizeof fields);
  589. fields[0].tap = target->tap;
  590. fields[0].num_bits = 6;
  591. fields[0].out_value = &cmd;
  592. fields[1].tap = target->tap;
  593. fields[1].num_bits = 27;
  594. fields[1].out_value = packet;
  595. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  596. /* rest of packet is a cacheline: 8 instructions, with parity */
  597. fields[0].num_bits = 32;
  598. fields[0].out_value = packet;
  599. fields[1].num_bits = 1;
  600. fields[1].out_value = &cmd;
  601. for (word = 0; word < 8; word++)
  602. {
  603. buf_set_u32(packet, 0, 32, buffer[word]);
  604. uint32_t value;
  605. memcpy(&value, packet, sizeof(uint32_t));
  606. cmd = parity(value);
  607. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  608. }
  609. return jtag_execute_queue();
  610. }
  611. static int xscale_invalidate_ic_line(target_t *target, uint32_t va)
  612. {
  613. uint8_t packet[4];
  614. uint8_t cmd;
  615. struct scan_field fields[2];
  616. jtag_set_end_state(TAP_IDLE);
  617. xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
  618. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  619. buf_set_u32(&cmd, 0, 6, 0x0);
  620. /* virtual address of desired cache line */
  621. buf_set_u32(packet, 0, 27, va >> 5);
  622. memset(&fields, 0, sizeof fields);
  623. fields[0].tap = target->tap;
  624. fields[0].num_bits = 6;
  625. fields[0].out_value = &cmd;
  626. fields[1].tap = target->tap;
  627. fields[1].num_bits = 27;
  628. fields[1].out_value = packet;
  629. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  630. return ERROR_OK;
  631. }
  632. static int xscale_update_vectors(target_t *target)
  633. {
  634. struct xscale_common *xscale = target_to_xscale(target);
  635. int i;
  636. int retval;
  637. uint32_t low_reset_branch, high_reset_branch;
  638. for (i = 1; i < 8; i++)
  639. {
  640. /* if there's a static vector specified for this exception, override */
  641. if (xscale->static_high_vectors_set & (1 << i))
  642. {
  643. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  644. }
  645. else
  646. {
  647. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  648. if (retval == ERROR_TARGET_TIMEOUT)
  649. return retval;
  650. if (retval != ERROR_OK)
  651. {
  652. /* Some of these reads will fail as part of normal execution */
  653. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  654. }
  655. }
  656. }
  657. for (i = 1; i < 8; i++)
  658. {
  659. if (xscale->static_low_vectors_set & (1 << i))
  660. {
  661. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  662. }
  663. else
  664. {
  665. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  666. if (retval == ERROR_TARGET_TIMEOUT)
  667. return retval;
  668. if (retval != ERROR_OK)
  669. {
  670. /* Some of these reads will fail as part of normal execution */
  671. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  672. }
  673. }
  674. }
  675. /* calculate branches to debug handler */
  676. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  677. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  678. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  679. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  680. /* invalidate and load exception vectors in mini i-cache */
  681. xscale_invalidate_ic_line(target, 0x0);
  682. xscale_invalidate_ic_line(target, 0xffff0000);
  683. xscale_load_ic(target, 0x0, xscale->low_vectors);
  684. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  685. return ERROR_OK;
  686. }
  687. static int xscale_arch_state(struct target_s *target)
  688. {
  689. struct xscale_common *xscale = target_to_xscale(target);
  690. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  691. static const char *state[] =
  692. {
  693. "disabled", "enabled"
  694. };
  695. static const char *arch_dbg_reason[] =
  696. {
  697. "", "\n(processor reset)", "\n(trace buffer full)"
  698. };
  699. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  700. {
  701. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  702. exit(-1);
  703. }
  704. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  705. "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
  706. "MMU: %s, D-Cache: %s, I-Cache: %s"
  707. "%s",
  708. armv4_5_state_strings[armv4_5->core_state],
  709. Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
  710. armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
  711. buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
  712. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
  713. state[xscale->armv4_5_mmu.mmu_enabled],
  714. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  715. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  716. arch_dbg_reason[xscale->arch_debug_reason]);
  717. return ERROR_OK;
  718. }
  719. static int xscale_poll(target_t *target)
  720. {
  721. int retval = ERROR_OK;
  722. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  723. {
  724. enum target_state previous_state = target->state;
  725. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  726. {
  727. /* there's data to read from the tx register, we entered debug state */
  728. target->state = TARGET_HALTED;
  729. /* process debug entry, fetching current mode regs */
  730. retval = xscale_debug_entry(target);
  731. }
  732. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  733. {
  734. LOG_USER("error while polling TX register, reset CPU");
  735. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  736. target->state = TARGET_HALTED;
  737. }
  738. /* debug_entry could have overwritten target state (i.e. immediate resume)
  739. * don't signal event handlers in that case
  740. */
  741. if (target->state != TARGET_HALTED)
  742. return ERROR_OK;
  743. /* if target was running, signal that we halted
  744. * otherwise we reentered from debug execution */
  745. if (previous_state == TARGET_RUNNING)
  746. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  747. else
  748. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  749. }
  750. return retval;
  751. }
  752. static int xscale_debug_entry(target_t *target)
  753. {
  754. struct xscale_common *xscale = target_to_xscale(target);
  755. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  756. uint32_t pc;
  757. uint32_t buffer[10];
  758. int i;
  759. int retval;
  760. uint32_t moe;
  761. /* clear external dbg break (will be written on next DCSR read) */
  762. xscale->external_debug_break = 0;
  763. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  764. return retval;
  765. /* get r0, pc, r1 to r7 and cpsr */
  766. if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
  767. return retval;
  768. /* move r0 from buffer to register cache */
  769. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  770. armv4_5->core_cache->reg_list[0].dirty = 1;
  771. armv4_5->core_cache->reg_list[0].valid = 1;
  772. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  773. /* move pc from buffer to register cache */
  774. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  775. armv4_5->core_cache->reg_list[15].dirty = 1;
  776. armv4_5->core_cache->reg_list[15].valid = 1;
  777. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  778. /* move data from buffer to register cache */
  779. for (i = 1; i <= 7; i++)
  780. {
  781. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  782. armv4_5->core_cache->reg_list[i].dirty = 1;
  783. armv4_5->core_cache->reg_list[i].valid = 1;
  784. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  785. }
  786. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
  787. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  788. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  789. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  790. armv4_5->core_mode = buffer[9] & 0x1f;
  791. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  792. {
  793. target->state = TARGET_UNKNOWN;
  794. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  795. return ERROR_TARGET_FAILURE;
  796. }
  797. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  798. if (buffer[9] & 0x20)
  799. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  800. else
  801. armv4_5->core_state = ARMV4_5_STATE_ARM;
  802. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  803. return ERROR_FAIL;
  804. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  805. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  806. {
  807. xscale_receive(target, buffer, 8);
  808. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  809. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  810. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  811. }
  812. else
  813. {
  814. /* r8 to r14, but no spsr */
  815. xscale_receive(target, buffer, 7);
  816. }
  817. /* move data from buffer to register cache */
  818. for (i = 8; i <= 14; i++)
  819. {
  820. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
  821. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  822. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  823. }
  824. /* examine debug reason */
  825. xscale_read_dcsr(target);
  826. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  827. /* stored PC (for calculating fixup) */
  828. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  829. switch (moe)
  830. {
  831. case 0x0: /* Processor reset */
  832. target->debug_reason = DBG_REASON_DBGRQ;
  833. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  834. pc -= 4;
  835. break;
  836. case 0x1: /* Instruction breakpoint hit */
  837. target->debug_reason = DBG_REASON_BREAKPOINT;
  838. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  839. pc -= 4;
  840. break;
  841. case 0x2: /* Data breakpoint hit */
  842. target->debug_reason = DBG_REASON_WATCHPOINT;
  843. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  844. pc -= 4;
  845. break;
  846. case 0x3: /* BKPT instruction executed */
  847. target->debug_reason = DBG_REASON_BREAKPOINT;
  848. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  849. pc -= 4;
  850. break;
  851. case 0x4: /* Ext. debug event */
  852. target->debug_reason = DBG_REASON_DBGRQ;
  853. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  854. pc -= 4;
  855. break;
  856. case 0x5: /* Vector trap occured */
  857. target->debug_reason = DBG_REASON_BREAKPOINT;
  858. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  859. pc -= 4;
  860. break;
  861. case 0x6: /* Trace buffer full break */
  862. target->debug_reason = DBG_REASON_DBGRQ;
  863. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  864. pc -= 4;
  865. break;
  866. case 0x7: /* Reserved (may flag Hot-Debug support) */
  867. default:
  868. LOG_ERROR("Method of Entry is 'Reserved'");
  869. exit(-1);
  870. break;
  871. }
  872. /* apply PC fixup */
  873. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  874. /* on the first debug entry, identify cache type */
  875. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  876. {
  877. uint32_t cache_type_reg;
  878. /* read cp15 cache type register */
  879. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  880. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  881. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  882. }
  883. /* examine MMU and Cache settings */
  884. /* read cp15 control register */
  885. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  886. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  887. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  888. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  889. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  890. /* tracing enabled, read collected trace data */
  891. if (xscale->trace.buffer_enabled)
  892. {
  893. xscale_read_trace(target);
  894. xscale->trace.buffer_fill--;
  895. /* resume if we're still collecting trace data */
  896. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  897. && (xscale->trace.buffer_fill > 0))
  898. {
  899. xscale_resume(target, 1, 0x0, 1, 0);
  900. }
  901. else
  902. {
  903. xscale->trace.buffer_enabled = 0;
  904. }
  905. }
  906. return ERROR_OK;
  907. }
  908. static int xscale_halt(target_t *target)
  909. {
  910. struct xscale_common *xscale = target_to_xscale(target);
  911. LOG_DEBUG("target->state: %s",
  912. target_state_name(target));
  913. if (target->state == TARGET_HALTED)
  914. {
  915. LOG_DEBUG("target was already halted");
  916. return ERROR_OK;
  917. }
  918. else if (target->state == TARGET_UNKNOWN)
  919. {
  920. /* this must not happen for a xscale target */
  921. LOG_ERROR("target was in unknown state when halt was requested");
  922. return ERROR_TARGET_INVALID;
  923. }
  924. else if (target->state == TARGET_RESET)
  925. {
  926. LOG_DEBUG("target->state == TARGET_RESET");
  927. }
  928. else
  929. {
  930. /* assert external dbg break */
  931. xscale->external_debug_break = 1;
  932. xscale_read_dcsr(target);
  933. target->debug_reason = DBG_REASON_DBGRQ;
  934. }
  935. return ERROR_OK;
  936. }
  937. static int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
  938. {
  939. struct xscale_common *xscale = target_to_xscale(target);
  940. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  941. int retval;
  942. if (xscale->ibcr0_used)
  943. {
  944. struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  945. if (ibcr0_bp)
  946. {
  947. xscale_unset_breakpoint(target, ibcr0_bp);
  948. }
  949. else
  950. {
  951. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  952. exit(-1);
  953. }
  954. }
  955. if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
  956. return retval;
  957. return ERROR_OK;
  958. }
  959. static int xscale_disable_single_step(struct target_s *target)
  960. {
  961. struct xscale_common *xscale = target_to_xscale(target);
  962. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  963. int retval;
  964. if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
  965. return retval;
  966. return ERROR_OK;
  967. }
  968. static void xscale_enable_watchpoints(struct target_s *target)
  969. {
  970. struct watchpoint *watchpoint = target->watchpoints;
  971. while (watchpoint)
  972. {
  973. if (watchpoint->set == 0)
  974. xscale_set_watchpoint(target, watchpoint);
  975. watchpoint = watchpoint->next;
  976. }
  977. }
  978. static void xscale_enable_breakpoints(struct target_s *target)
  979. {
  980. struct breakpoint *breakpoint = target->breakpoints;
  981. /* set any pending breakpoints */
  982. while (breakpoint)
  983. {
  984. if (breakpoint->set == 0)
  985. xscale_set_breakpoint(target, breakpoint);
  986. breakpoint = breakpoint->next;
  987. }
  988. }
  989. static int xscale_resume(struct target_s *target, int current,
  990. uint32_t address, int handle_breakpoints, int debug_execution)
  991. {
  992. struct xscale_common *xscale = target_to_xscale(target);
  993. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  994. struct breakpoint *breakpoint = target->breakpoints;
  995. uint32_t current_pc;
  996. int retval;
  997. int i;
  998. LOG_DEBUG("-");
  999. if (target->state != TARGET_HALTED)
  1000. {
  1001. LOG_WARNING("target not halted");
  1002. return ERROR_TARGET_NOT_HALTED;
  1003. }
  1004. if (!debug_execution)
  1005. {
  1006. target_free_all_working_areas(target);
  1007. }
  1008. /* update vector tables */
  1009. if ((retval = xscale_update_vectors(target)) != ERROR_OK)
  1010. return retval;
  1011. /* current = 1: continue on current pc, otherwise continue at <address> */
  1012. if (!current)
  1013. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1014. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1015. /* if we're at the reset vector, we have to simulate the branch */
  1016. if (current_pc == 0x0)
  1017. {
  1018. arm_simulate_step(target, NULL);
  1019. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1020. }
  1021. /* the front-end may request us not to handle breakpoints */
  1022. if (handle_breakpoints)
  1023. {
  1024. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1025. {
  1026. uint32_t next_pc;
  1027. /* there's a breakpoint at the current PC, we have to step over it */
  1028. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1029. xscale_unset_breakpoint(target, breakpoint);
  1030. /* calculate PC of next instruction */
  1031. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1032. {
  1033. uint32_t current_opcode;
  1034. target_read_u32(target, current_pc, &current_opcode);
  1035. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1036. }
  1037. LOG_DEBUG("enable single-step");
  1038. xscale_enable_single_step(target, next_pc);
  1039. /* restore banked registers */
  1040. xscale_restore_context(target);
  1041. /* send resume request (command 0x30 or 0x31)
  1042. * clean the trace buffer if it is to be enabled (0x62) */
  1043. if (xscale->trace.buffer_enabled)
  1044. {
  1045. xscale_send_u32(target, 0x62);
  1046. xscale_send_u32(target, 0x31);
  1047. }
  1048. else
  1049. xscale_send_u32(target, 0x30);
  1050. /* send CPSR */
  1051. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1052. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1053. for (i = 7; i >= 0; i--)
  1054. {
  1055. /* send register */
  1056. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1057. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1058. }
  1059. /* send PC */
  1060. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1061. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1062. /* wait for and process debug entry */
  1063. xscale_debug_entry(target);
  1064. LOG_DEBUG("disable single-step");
  1065. xscale_disable_single_step(target);
  1066. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1067. xscale_set_breakpoint(target, breakpoint);
  1068. }
  1069. }
  1070. /* enable any pending breakpoints and watchpoints */
  1071. xscale_enable_breakpoints(target);
  1072. xscale_enable_watchpoints(target);
  1073. /* restore banked registers */
  1074. xscale_restore_context(target);
  1075. /* send resume request (command 0x30 or 0x31)
  1076. * clean the trace buffer if it is to be enabled (0x62) */
  1077. if (xscale->trace.buffer_enabled)
  1078. {
  1079. xscale_send_u32(target, 0x62);
  1080. xscale_send_u32(target, 0x31);
  1081. }
  1082. else
  1083. xscale_send_u32(target, 0x30);
  1084. /* send CPSR */
  1085. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1086. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1087. for (i = 7; i >= 0; i--)
  1088. {
  1089. /* send register */
  1090. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1091. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1092. }
  1093. /* send PC */
  1094. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1095. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1096. target->debug_reason = DBG_REASON_NOTHALTED;
  1097. if (!debug_execution)
  1098. {
  1099. /* registers are now invalid */
  1100. armv4_5_invalidate_core_regs(target);
  1101. target->state = TARGET_RUNNING;
  1102. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1103. }
  1104. else
  1105. {
  1106. target->state = TARGET_DEBUG_RUNNING;
  1107. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1108. }
  1109. LOG_DEBUG("target resumed");
  1110. return ERROR_OK;
  1111. }
  1112. static int xscale_step_inner(struct target_s *target, int current,
  1113. uint32_t address, int handle_breakpoints)
  1114. {
  1115. struct xscale_common *xscale = target_to_xscale(target);
  1116. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  1117. uint32_t next_pc;
  1118. int retval;
  1119. int i;
  1120. target->debug_reason = DBG_REASON_SINGLESTEP;
  1121. /* calculate PC of next instruction */
  1122. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1123. {
  1124. uint32_t current_opcode, current_pc;
  1125. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1126. target_read_u32(target, current_pc, &current_opcode);
  1127. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1128. return retval;
  1129. }
  1130. LOG_DEBUG("enable single-step");
  1131. if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
  1132. return retval;
  1133. /* restore banked registers */
  1134. if ((retval = xscale_restore_context(target)) != ERROR_OK)
  1135. return retval;
  1136. /* send resume request (command 0x30 or 0x31)
  1137. * clean the trace buffer if it is to be enabled (0x62) */
  1138. if (xscale->trace.buffer_enabled)
  1139. {
  1140. if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
  1141. return retval;
  1142. if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
  1143. return retval;
  1144. }
  1145. else
  1146. if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
  1147. return retval;
  1148. /* send CPSR */
  1149. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
  1150. return retval;
  1151. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1152. for (i = 7; i >= 0; i--)
  1153. {
  1154. /* send register */
  1155. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
  1156. return retval;
  1157. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1158. }
  1159. /* send PC */
  1160. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
  1161. return retval;
  1162. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1163. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1164. /* registers are now invalid */
  1165. if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
  1166. return retval;
  1167. /* wait for and process debug entry */
  1168. if ((retval = xscale_debug_entry(target)) != ERROR_OK)
  1169. return retval;
  1170. LOG_DEBUG("disable single-step");
  1171. if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
  1172. return retval;
  1173. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1174. return ERROR_OK;
  1175. }
  1176. static int xscale_step(struct target_s *target, int current,
  1177. uint32_t address, int handle_breakpoints)
  1178. {
  1179. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  1180. struct breakpoint *breakpoint = target->breakpoints;
  1181. uint32_t current_pc;
  1182. int retval;
  1183. if (target->state != TARGET_HALTED)
  1184. {
  1185. LOG_WARNING("target not halted");
  1186. return ERROR_TARGET_NOT_HALTED;
  1187. }
  1188. /* current = 1: continue on current pc, otherwise continue at <address> */
  1189. if (!current)
  1190. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1191. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1192. /* if we're at the reset vector, we have to simulate the step */
  1193. if (current_pc == 0x0)
  1194. {
  1195. if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
  1196. return retval;
  1197. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1198. target->debug_reason = DBG_REASON_SINGLESTEP;
  1199. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1200. return ERROR_OK;
  1201. }
  1202. /* the front-end may request us not to handle breakpoints */
  1203. if (handle_breakpoints)
  1204. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1205. {
  1206. if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1207. return retval;
  1208. }
  1209. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1210. if (breakpoint)
  1211. {
  1212. xscale_set_breakpoint(target, breakpoint);
  1213. }
  1214. LOG_DEBUG("target stepped");
  1215. return ERROR_OK;
  1216. }
  1217. static int xscale_assert_reset(target_t *target)
  1218. {
  1219. struct xscale_common *xscale = target_to_xscale(target);
  1220. LOG_DEBUG("target->state: %s",
  1221. target_state_name(target));
  1222. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1223. * end up in T-L-R, which would reset JTAG
  1224. */
  1225. jtag_set_end_state(TAP_IDLE);
  1226. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  1227. /* set Hold reset, Halt mode and Trap Reset */
  1228. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1229. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1230. xscale_write_dcsr(target, 1, 0);
  1231. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1232. xscale_jtag_set_instr(target->tap, 0x7f);
  1233. jtag_execute_queue();
  1234. /* assert reset */
  1235. jtag_add_reset(0, 1);
  1236. /* sleep 1ms, to be sure we fulfill any requirements */
  1237. jtag_add_sleep(1000);
  1238. jtag_execute_queue();
  1239. target->state = TARGET_RESET;
  1240. if (target->reset_halt)
  1241. {
  1242. int retval;
  1243. if ((retval = target_halt(target)) != ERROR_OK)
  1244. return retval;
  1245. }
  1246. return ERROR_OK;
  1247. }
  1248. static int xscale_deassert_reset(target_t *target)
  1249. {
  1250. struct xscale_common *xscale = target_to_xscale(target);
  1251. struct breakpoint *breakpoint = target->breakpoints;
  1252. LOG_DEBUG("-");
  1253. xscale->ibcr_available = 2;
  1254. xscale->ibcr0_used = 0;
  1255. xscale->ibcr1_used = 0;
  1256. xscale->dbr_available = 2;
  1257. xscale->dbr0_used = 0;
  1258. xscale->dbr1_used = 0;
  1259. /* mark all hardware breakpoints as unset */
  1260. while (breakpoint)
  1261. {
  1262. if (breakpoint->type == BKPT_HARD)
  1263. {
  1264. breakpoint->set = 0;
  1265. }
  1266. breakpoint = breakpoint->next;
  1267. }
  1268. armv4_5_invalidate_core_regs(target);
  1269. /* FIXME mark hardware watchpoints got unset too. Also,
  1270. * at least some of the XScale registers are invalid...
  1271. */
  1272. /*
  1273. * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
  1274. * contents got invalidated. Safer to force that, so writing new
  1275. * contents can't ever fail..
  1276. */
  1277. {
  1278. uint32_t address;
  1279. unsigned buf_cnt;
  1280. const uint8_t *buffer = xscale_debug_handler;
  1281. int retval;
  1282. /* release SRST */
  1283. jtag_add_reset(0, 0);
  1284. /* wait 300ms; 150 and 100ms were not enough */
  1285. jtag_add_sleep(300*1000);
  1286. jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
  1287. jtag_execute_queue();
  1288. /* set Hold reset, Halt mode and Trap Reset */
  1289. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1290. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1291. xscale_write_dcsr(target, 1, 0);
  1292. /* Load the debug handler into the mini-icache. Since
  1293. * it's using halt mode (not monitor mode), it runs in
  1294. * "Special Debug State" for access to registers, memory,
  1295. * coprocessors, trace data, etc.
  1296. */
  1297. address = xscale->handler_address;
  1298. for (unsigned binary_size = sizeof xscale_debug_handler - 1;
  1299. binary_size > 0;
  1300. binary_size -= buf_cnt, buffer += buf_cnt)
  1301. {
  1302. uint32_t cache_line[8];
  1303. unsigned i;
  1304. buf_cnt = binary_size;
  1305. if (buf_cnt > 32)
  1306. buf_cnt = 32;
  1307. for (i = 0; i < buf_cnt; i += 4)
  1308. {
  1309. /* convert LE buffer to host-endian uint32_t */
  1310. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1311. }
  1312. for (; i < 32; i += 4)
  1313. {
  1314. cache_line[i / 4] = 0xe1a08008;
  1315. }
  1316. /* only load addresses other than the reset vectors */
  1317. if ((address % 0x400) != 0x0)
  1318. {
  1319. retval = xscale_load_ic(target, address,
  1320. cache_line);
  1321. if (retval != ERROR_OK)
  1322. return retval;
  1323. }
  1324. address += buf_cnt;
  1325. };
  1326. retval = xscale_load_ic(target, 0x0,
  1327. xscale->low_vectors);
  1328. if (retval != ERROR_OK)
  1329. return retval;
  1330. retval = xscale_load_ic(target, 0xffff0000,
  1331. xscale->high_vectors);
  1332. if (retval != ERROR_OK)
  1333. return retval;
  1334. jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
  1335. jtag_add_sleep(100000);
  1336. /* set Hold reset, Halt mode and Trap Reset */
  1337. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1338. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1339. xscale_write_dcsr(target, 1, 0);
  1340. /* clear Hold reset to let the target run (should enter debug handler) */
  1341. xscale_write_dcsr(target, 0, 1);
  1342. target->state = TARGET_RUNNING;
  1343. if (!target->reset_halt)
  1344. {
  1345. jtag_add_sleep(10000);
  1346. /* we should have entered debug now */
  1347. xscale_debug_entry(target);
  1348. target->state = TARGET_HALTED;
  1349. /* resume the target */
  1350. xscale_resume(target, 1, 0x0, 1, 0);
  1351. }
  1352. }
  1353. return ERROR_OK;
  1354. }
  1355. static int xscale_read_core_reg(struct target_s *target, int num,
  1356. enum armv4_5_mode mode)
  1357. {
  1358. LOG_ERROR("not implemented");
  1359. return ERROR_OK;
  1360. }
  1361. static int xscale_write_core_reg(struct target_s *target, int num,
  1362. enum armv4_5_mode mode, uint32_t value)
  1363. {
  1364. LOG_ERROR("not implemented");
  1365. return ERROR_OK;
  1366. }
  1367. static int xscale_full_context(target_t *target)
  1368. {
  1369. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  1370. uint32_t *buffer;
  1371. int i, j;
  1372. LOG_DEBUG("-");
  1373. if (target->state != TARGET_HALTED)
  1374. {
  1375. LOG_WARNING("target not halted");
  1376. return ERROR_TARGET_NOT_HALTED;
  1377. }
  1378. buffer = malloc(4 * 8);
  1379. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1380. * we can't enter User mode on an XScale (unpredictable),
  1381. * but User shares registers with SYS
  1382. */
  1383. for (i = 1; i < 7; i++)
  1384. {
  1385. int valid = 1;
  1386. /* check if there are invalid registers in the current mode
  1387. */
  1388. for (j = 0; j <= 16; j++)
  1389. {
  1390. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1391. valid = 0;
  1392. }
  1393. if (!valid)
  1394. {
  1395. uint32_t tmp_cpsr;
  1396. /* request banked registers */
  1397. xscale_send_u32(target, 0x0);
  1398. tmp_cpsr = 0x0;
  1399. tmp_cpsr |= armv4_5_number_to_mode(i);
  1400. tmp_cpsr |= 0xc0; /* I/F bits */
  1401. /* send CPSR for desired mode */
  1402. xscale_send_u32(target, tmp_cpsr);
  1403. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1404. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1405. {
  1406. xscale_receive(target, buffer, 8);
  1407. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  1408. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1409. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1410. }
  1411. else
  1412. {
  1413. xscale_receive(target, buffer, 7);
  1414. }
  1415. /* move data from buffer to register cache */
  1416. for (j = 8; j <= 14; j++)
  1417. {
  1418. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
  1419. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1420. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1421. }
  1422. }
  1423. }
  1424. free(buffer);
  1425. return ERROR_OK;
  1426. }
  1427. static int xscale_restore_context(target_t *target)
  1428. {
  1429. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  1430. int i, j;
  1431. if (target->state != TARGET_HALTED)
  1432. {
  1433. LOG_WARNING("target not halted");
  1434. return ERROR_TARGET_NOT_HALTED;
  1435. }
  1436. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1437. * we can't enter User mode on an XScale (unpredictable),
  1438. * but User shares registers with SYS
  1439. */
  1440. for (i = 1; i < 7; i++)
  1441. {
  1442. int dirty = 0;
  1443. /* check if there are invalid registers in the current mode
  1444. */
  1445. for (j = 8; j <= 14; j++)
  1446. {
  1447. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
  1448. dirty = 1;
  1449. }
  1450. /* if not USR/SYS, check if the SPSR needs to be written */
  1451. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1452. {
  1453. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
  1454. dirty = 1;
  1455. }
  1456. if (dirty)
  1457. {
  1458. uint32_t tmp_cpsr;
  1459. /* send banked registers */
  1460. xscale_send_u32(target, 0x1);
  1461. tmp_cpsr = 0x0;
  1462. tmp_cpsr |= armv4_5_number_to_mode(i);
  1463. tmp_cpsr |= 0xc0; /* I/F bits */
  1464. /* send CPSR for desired mode */
  1465. xscale_send_u32(target, tmp_cpsr);
  1466. /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1467. for (j = 8; j <= 14; j++)
  1468. {
  1469. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
  1470. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1471. }
  1472. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1473. {
  1474. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
  1475. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1476. }
  1477. }
  1478. }
  1479. return ERROR_OK;
  1480. }
  1481. static int xscale_read_memory(struct target_s *target, uint32_t address,
  1482. uint32_t size, uint32_t count, uint8_t *buffer)
  1483. {
  1484. struct xscale_common *xscale = target_to_xscale(target);
  1485. uint32_t *buf32;
  1486. uint32_t i;
  1487. int retval;
  1488. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1489. if (target->state != TARGET_HALTED)
  1490. {
  1491. LOG_WARNING("target not halted");
  1492. return ERROR_TARGET_NOT_HALTED;
  1493. }
  1494. /* sanitize arguments */
  1495. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1496. return ERROR_INVALID_ARGUMENTS;
  1497. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1498. return ERROR_TARGET_UNALIGNED_ACCESS;
  1499. /* send memory read request (command 0x1n, n: access size) */
  1500. if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
  1501. return retval;
  1502. /* send base address for read request */
  1503. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1504. return retval;
  1505. /* send number of requested data words */
  1506. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1507. return retval;
  1508. /* receive data from target (count times 32-bit words in host endianness) */
  1509. buf32 = malloc(4 * count);
  1510. if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
  1511. return retval;
  1512. /* extract data from host-endian buffer into byte stream */
  1513. for (i = 0; i < count; i++)
  1514. {
  1515. switch (size)
  1516. {
  1517. case 4:
  1518. target_buffer_set_u32(target, buffer, buf32[i]);
  1519. buffer += 4;
  1520. break;
  1521. case 2:
  1522. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1523. buffer += 2;
  1524. break;
  1525. case 1:
  1526. *buffer++ = buf32[i] & 0xff;
  1527. break;
  1528. default:
  1529. LOG_ERROR("should never get here");
  1530. exit(-1);
  1531. }
  1532. }
  1533. free(buf32);
  1534. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1535. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1536. return retval;
  1537. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1538. {
  1539. /* clear SA bit */
  1540. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1541. return retval;
  1542. return ERROR_TARGET_DATA_ABORT;
  1543. }
  1544. return ERROR_OK;
  1545. }
  1546. static int xscale_write_memory(struct target_s *target, uint32_t address,
  1547. uint32_t size, uint32_t count, uint8_t *buffer)
  1548. {
  1549. struct xscale_common *xscale = target_to_xscale(target);
  1550. int retval;
  1551. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1552. if (target->state != TARGET_HALTED)
  1553. {
  1554. LOG_WARNING("target not halted");
  1555. return ERROR_TARGET_NOT_HALTED;
  1556. }
  1557. /* sanitize arguments */
  1558. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1559. return ERROR_INVALID_ARGUMENTS;
  1560. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1561. return ERROR_TARGET_UNALIGNED_ACCESS;
  1562. /* send memory write request (command 0x2n, n: access size) */
  1563. if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
  1564. return retval;
  1565. /* send base address for read request */
  1566. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1567. return retval;
  1568. /* send number of requested data words to be written*/
  1569. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1570. return retval;
  1571. /* extract data from host-endian buffer into byte stream */
  1572. #if 0
  1573. for (i = 0; i < count; i++)
  1574. {
  1575. switch (size)
  1576. {
  1577. case 4:
  1578. value = target_buffer_get_u32(target, buffer);
  1579. xscale_send_u32(target, value);
  1580. buffer += 4;
  1581. break;
  1582. case 2:
  1583. value = target_buffer_get_u16(target, buffer);
  1584. xscale_send_u32(target, value);
  1585. buffer += 2;
  1586. break;
  1587. case 1:
  1588. value = *buffer;
  1589. xscale_send_u32(target, value);
  1590. buffer += 1;
  1591. break;
  1592. default:
  1593. LOG_ERROR("should never get here");
  1594. exit(-1);
  1595. }
  1596. }
  1597. #endif
  1598. if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
  1599. return retval;
  1600. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1601. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1602. return retval;
  1603. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1604. {
  1605. /* clear SA bit */
  1606. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1607. return retval;
  1608. return ERROR_TARGET_DATA_ABORT;
  1609. }
  1610. return ERROR_OK;
  1611. }
  1612. static int xscale_bulk_write_memory(target_t *target, uint32_t address,
  1613. uint32_t count, uint8_t *buffer)
  1614. {
  1615. return xscale_write_memory(target, address, 4, count, buffer);
  1616. }
  1617. static uint32_t xscale_get_ttb(target_t *target)
  1618. {
  1619. struct xscale_common *xscale = target_to_xscale(target);
  1620. uint32_t ttb;
  1621. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1622. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1623. return ttb;
  1624. }
  1625. static void xscale_disable_mmu_caches(target_t *target, int mmu,
  1626. int d_u_cache, int i_cache)
  1627. {
  1628. struct xscale_common *xscale = target_to_xscale(target);
  1629. uint32_t cp15_control;
  1630. /* read cp15 control register */
  1631. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1632. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1633. if (mmu)
  1634. cp15_control &= ~0x1U;
  1635. if (d_u_cache)
  1636. {
  1637. /* clean DCache */
  1638. xscale_send_u32(target, 0x50);
  1639. xscale_send_u32(target, xscale->cache_clean_address);
  1640. /* invalidate DCache */
  1641. xscale_send_u32(target, 0x51);
  1642. cp15_control &= ~0x4U;
  1643. }
  1644. if (i_cache)
  1645. {
  1646. /* invalidate ICache */
  1647. xscale_send_u32(target, 0x52);
  1648. cp15_control &= ~0x1000U;
  1649. }
  1650. /* write new cp15 control register */
  1651. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1652. /* execute cpwait to ensure outstanding operations complete */
  1653. xscale_send_u32(target, 0x53);
  1654. }
  1655. static void xscale_enable_mmu_caches(target_t *target, int mmu,
  1656. int d_u_cache, int i_cache)
  1657. {
  1658. struct xscale_common *xscale = target_to_xscale(target);
  1659. uint32_t cp15_control;
  1660. /* read cp15 control register */
  1661. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1662. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1663. if (mmu)
  1664. cp15_control |= 0x1U;
  1665. if (d_u_cache)
  1666. cp15_control |= 0x4U;
  1667. if (i_cache)
  1668. cp15_control |= 0x1000U;
  1669. /* write new cp15 control register */
  1670. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1671. /* execute cpwait to ensure outstanding operations complete */
  1672. xscale_send_u32(target, 0x53);
  1673. }
  1674. static int xscale_set_breakpoint(struct target_s *target,
  1675. struct breakpoint *breakpoint)
  1676. {
  1677. int retval;
  1678. struct xscale_common *xscale = target_to_xscale(target);
  1679. if (target->state != TARGET_HALTED)
  1680. {
  1681. LOG_WARNING("target not halted");
  1682. return ERROR_TARGET_NOT_HALTED;
  1683. }
  1684. if (breakpoint->set)
  1685. {
  1686. LOG_WARNING("breakpoint already set");
  1687. return ERROR_OK;
  1688. }
  1689. if (breakpoint->type == BKPT_HARD)
  1690. {
  1691. uint32_t value = breakpoint->address | 1;
  1692. if (!xscale->ibcr0_used)
  1693. {
  1694. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1695. xscale->ibcr0_used = 1;
  1696. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1697. }
  1698. else if (!xscale->ibcr1_used)
  1699. {
  1700. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1701. xscale->ibcr1_used = 1;
  1702. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1703. }
  1704. else
  1705. {
  1706. LOG_ERROR("BUG: no hardware comparator available");
  1707. return ERROR_OK;
  1708. }
  1709. }
  1710. else if (breakpoint->type == BKPT_SOFT)
  1711. {
  1712. if (breakpoint->length == 4)
  1713. {
  1714. /* keep the original instruction in target endianness */
  1715. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1716. {
  1717. return retval;
  1718. }
  1719. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1720. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1721. {
  1722. return retval;
  1723. }
  1724. }
  1725. else
  1726. {
  1727. /* keep the original instruction in target endianness */
  1728. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1729. {
  1730. return retval;
  1731. }
  1732. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1733. if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1734. {
  1735. return retval;
  1736. }
  1737. }
  1738. breakpoint->set = 1;
  1739. }
  1740. return ERROR_OK;
  1741. }
  1742. static int xscale_add_breakpoint(struct target_s *target,
  1743. struct breakpoint *breakpoint)
  1744. {
  1745. struct xscale_common *xscale = target_to_xscale(target);
  1746. if (target->state != TARGET_HALTED)
  1747. {
  1748. LOG_WARNING("target not halted");
  1749. return ERROR_TARGET_NOT_HALTED;
  1750. }
  1751. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1752. {
  1753. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1754. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1755. }
  1756. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1757. {
  1758. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1759. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1760. }
  1761. if (breakpoint->type == BKPT_HARD)
  1762. {
  1763. xscale->ibcr_available--;
  1764. }
  1765. return ERROR_OK;
  1766. }
  1767. static int xscale_unset_breakpoint(struct target_s *target,
  1768. struct breakpoint *breakpoint)
  1769. {
  1770. int retval;
  1771. struct xscale_common *xscale = target_to_xscale(target);
  1772. if (target->state != TARGET_HALTED)
  1773. {
  1774. LOG_WARNING("target not halted");
  1775. return ERROR_TARGET_NOT_HALTED;
  1776. }
  1777. if (!breakpoint->set)
  1778. {
  1779. LOG_WARNING("breakpoint not set");
  1780. return ERROR_OK;
  1781. }
  1782. if (breakpoint->type == BKPT_HARD)
  1783. {
  1784. if (breakpoint->set == 1)
  1785. {
  1786. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1787. xscale->ibcr0_used = 0;
  1788. }
  1789. else if (breakpoint->set == 2)
  1790. {
  1791. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1792. xscale->ibcr1_used = 0;
  1793. }
  1794. breakpoint->set = 0;
  1795. }
  1796. else
  1797. {
  1798. /* restore original instruction (kept in target endianness) */
  1799. if (breakpoint->length == 4)
  1800. {
  1801. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1802. {
  1803. return retval;
  1804. }
  1805. }
  1806. else
  1807. {
  1808. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1809. {
  1810. return retval;
  1811. }
  1812. }
  1813. breakpoint->set = 0;
  1814. }
  1815. return ERROR_OK;
  1816. }
  1817. static int xscale_remove_breakpoint(struct target_s *target, struct breakpoint *breakpoint)
  1818. {
  1819. struct xscale_common *xscale = target_to_xscale(target);
  1820. if (target->state != TARGET_HALTED)
  1821. {
  1822. LOG_WARNING("target not halted");
  1823. return ERROR_TARGET_NOT_HALTED;
  1824. }
  1825. if (breakpoint->set)
  1826. {
  1827. xscale_unset_breakpoint(target, breakpoint);
  1828. }
  1829. if (breakpoint->type == BKPT_HARD)
  1830. xscale->ibcr_available++;
  1831. return ERROR_OK;
  1832. }
  1833. static int xscale_set_watchpoint(struct target_s *target,
  1834. struct watchpoint *watchpoint)
  1835. {
  1836. struct xscale_common *xscale = target_to_xscale(target);
  1837. uint8_t enable = 0;
  1838. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1839. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1840. if (target->state != TARGET_HALTED)
  1841. {
  1842. LOG_WARNING("target not halted");
  1843. return ERROR_TARGET_NOT_HALTED;
  1844. }
  1845. xscale_get_reg(dbcon);
  1846. switch (watchpoint->rw)
  1847. {
  1848. case WPT_READ:
  1849. enable = 0x3;
  1850. break;
  1851. case WPT_ACCESS:
  1852. enable = 0x2;
  1853. break;
  1854. case WPT_WRITE:
  1855. enable = 0x1;
  1856. break;
  1857. default:
  1858. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1859. }
  1860. if (!xscale->dbr0_used)
  1861. {
  1862. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1863. dbcon_value |= enable;
  1864. xscale_set_reg_u32(dbcon, dbcon_value);
  1865. watchpoint->set = 1;
  1866. xscale->dbr0_used = 1;
  1867. }
  1868. else if (!xscale->dbr1_used)
  1869. {
  1870. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1871. dbcon_value |= enable << 2;
  1872. xscale_set_reg_u32(dbcon, dbcon_value);
  1873. watchpoint->set = 2;
  1874. xscale->dbr1_used = 1;
  1875. }
  1876. else
  1877. {
  1878. LOG_ERROR("BUG: no hardware comparator available");
  1879. return ERROR_OK;
  1880. }
  1881. return ERROR_OK;
  1882. }
  1883. static int xscale_add_watchpoint(struct target_s *target,
  1884. struct watchpoint *watchpoint)
  1885. {
  1886. struct xscale_common *xscale = target_to_xscale(target);
  1887. if (target->state != TARGET_HALTED)
  1888. {
  1889. LOG_WARNING("target not halted");
  1890. return ERROR_TARGET_NOT_HALTED;
  1891. }
  1892. if (xscale->dbr_available < 1)
  1893. {
  1894. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1895. }
  1896. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1897. {
  1898. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1899. }
  1900. xscale->dbr_available--;
  1901. return ERROR_OK;
  1902. }
  1903. static int xscale_unset_watchpoint(struct target_s *target,
  1904. struct watchpoint *watchpoint)
  1905. {
  1906. struct xscale_common *xscale = target_to_xscale(target);
  1907. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1908. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1909. if (target->state != TARGET_HALTED)
  1910. {
  1911. LOG_WARNING("target not halted");
  1912. return ERROR_TARGET_NOT_HALTED;
  1913. }
  1914. if (!watchpoint->set)
  1915. {
  1916. LOG_WARNING("breakpoint not set");
  1917. return ERROR_OK;
  1918. }
  1919. if (watchpoint->set == 1)
  1920. {
  1921. dbcon_value &= ~0x3;
  1922. xscale_set_reg_u32(dbcon, dbcon_value);
  1923. xscale->dbr0_used = 0;
  1924. }
  1925. else if (watchpoint->set == 2)
  1926. {
  1927. dbcon_value &= ~0xc;
  1928. xscale_set_reg_u32(dbcon, dbcon_value);
  1929. xscale->dbr1_used = 0;
  1930. }
  1931. watchpoint->set = 0;
  1932. return ERROR_OK;
  1933. }
  1934. static int xscale_remove_watchpoint(struct target_s *target, struct watchpoint *watchpoint)
  1935. {
  1936. struct xscale_common *xscale = target_to_xscale(target);
  1937. if (target->state != TARGET_HALTED)
  1938. {
  1939. LOG_WARNING("target not halted");
  1940. return ERROR_TARGET_NOT_HALTED;
  1941. }
  1942. if (watchpoint->set)
  1943. {
  1944. xscale_unset_watchpoint(target, watchpoint);
  1945. }
  1946. xscale->dbr_available++;
  1947. return ERROR_OK;
  1948. }
  1949. static int xscale_get_reg(reg_t *reg)
  1950. {
  1951. struct xscale_reg *arch_info = reg->arch_info;
  1952. target_t *target = arch_info->target;
  1953. struct xscale_common *xscale = target_to_xscale(target);
  1954. /* DCSR, TX and RX are accessible via JTAG */
  1955. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  1956. {
  1957. return xscale_read_dcsr(arch_info->target);
  1958. }
  1959. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  1960. {
  1961. /* 1 = consume register content */
  1962. return xscale_read_tx(arch_info->target, 1);
  1963. }
  1964. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  1965. {
  1966. /* can't read from RX register (host -> debug handler) */
  1967. return ERROR_OK;
  1968. }
  1969. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  1970. {
  1971. /* can't (explicitly) read from TXRXCTRL register */
  1972. return ERROR_OK;
  1973. }
  1974. else /* Other DBG registers have to be transfered by the debug handler */
  1975. {
  1976. /* send CP read request (command 0x40) */
  1977. xscale_send_u32(target, 0x40);
  1978. /* send CP register number */
  1979. xscale_send_u32(target, arch_info->dbg_handler_number);
  1980. /* read register value */
  1981. xscale_read_tx(target, 1);
  1982. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  1983. reg->dirty = 0;
  1984. reg->valid = 1;
  1985. }
  1986. return ERROR_OK;
  1987. }
  1988. static int xscale_set_reg(reg_t *reg, uint8_t* buf)
  1989. {
  1990. struct xscale_reg *arch_info = reg->arch_info;
  1991. target_t *target = arch_info->target;
  1992. struct xscale_common *xscale = target_to_xscale(target);
  1993. uint32_t value = buf_get_u32(buf, 0, 32);
  1994. /* DCSR, TX and RX are accessible via JTAG */
  1995. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  1996. {
  1997. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  1998. return xscale_write_dcsr(arch_info->target, -1, -1);
  1999. }
  2000. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2001. {
  2002. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2003. return xscale_write_rx(arch_info->target);
  2004. }
  2005. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2006. {
  2007. /* can't write to TX register (debug-handler -> host) */
  2008. return ERROR_OK;
  2009. }
  2010. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2011. {
  2012. /* can't (explicitly) write to TXRXCTRL register */
  2013. return ERROR_OK;
  2014. }
  2015. else /* Other DBG registers have to be transfered by the debug handler */
  2016. {
  2017. /* send CP write request (command 0x41) */
  2018. xscale_send_u32(target, 0x41);
  2019. /* send CP register number */
  2020. xscale_send_u32(target, arch_info->dbg_handler_number);
  2021. /* send CP register value */
  2022. xscale_send_u32(target, value);
  2023. buf_set_u32(reg->value, 0, 32, value);
  2024. }
  2025. return ERROR_OK;
  2026. }
  2027. static int xscale_write_dcsr_sw(target_t *target, uint32_t value)
  2028. {
  2029. struct xscale_common *xscale = target_to_xscale(target);
  2030. reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2031. struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
  2032. /* send CP write request (command 0x41) */
  2033. xscale_send_u32(target, 0x41);
  2034. /* send CP register number */
  2035. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2036. /* send CP register value */
  2037. xscale_send_u32(target, value);
  2038. buf_set_u32(dcsr->value, 0, 32, value);
  2039. return ERROR_OK;
  2040. }
  2041. static int xscale_read_trace(target_t *target)
  2042. {
  2043. struct xscale_common *xscale = target_to_xscale(target);
  2044. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  2045. struct xscale_trace_data **trace_data_p;
  2046. /* 258 words from debug handler
  2047. * 256 trace buffer entries
  2048. * 2 checkpoint addresses
  2049. */
  2050. uint32_t trace_buffer[258];
  2051. int is_address[256];
  2052. int i, j;
  2053. if (target->state != TARGET_HALTED)
  2054. {
  2055. LOG_WARNING("target must be stopped to read trace data");
  2056. return ERROR_TARGET_NOT_HALTED;
  2057. }
  2058. /* send read trace buffer command (command 0x61) */
  2059. xscale_send_u32(target, 0x61);
  2060. /* receive trace buffer content */
  2061. xscale_receive(target, trace_buffer, 258);
  2062. /* parse buffer backwards to identify address entries */
  2063. for (i = 255; i >= 0; i--)
  2064. {
  2065. is_address[i] = 0;
  2066. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2067. ((trace_buffer[i] & 0xf0) == 0xd0))
  2068. {
  2069. if (i >= 3)
  2070. is_address[--i] = 1;
  2071. if (i >= 2)
  2072. is_address[--i] = 1;
  2073. if (i >= 1)
  2074. is_address[--i] = 1;
  2075. if (i >= 0)
  2076. is_address[--i] = 1;
  2077. }
  2078. }
  2079. /* search first non-zero entry */
  2080. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2081. ;
  2082. if (j == 256)
  2083. {
  2084. LOG_DEBUG("no trace data collected");
  2085. return ERROR_XSCALE_NO_TRACE_DATA;
  2086. }
  2087. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2088. ;
  2089. *trace_data_p = malloc(sizeof(struct xscale_trace_data));
  2090. (*trace_data_p)->next = NULL;
  2091. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2092. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2093. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2094. (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
  2095. (*trace_data_p)->depth = 256 - j;
  2096. for (i = j; i < 256; i++)
  2097. {
  2098. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2099. if (is_address[i])
  2100. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2101. else
  2102. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2103. }
  2104. return ERROR_OK;
  2105. }
  2106. static int xscale_read_instruction(target_t *target,
  2107. struct arm_instruction *instruction)
  2108. {
  2109. struct xscale_common *xscale = target_to_xscale(target);
  2110. int i;
  2111. int section = -1;
  2112. uint32_t size_read;
  2113. uint32_t opcode;
  2114. int retval;
  2115. if (!xscale->trace.image)
  2116. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2117. /* search for the section the current instruction belongs to */
  2118. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2119. {
  2120. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2121. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2122. {
  2123. section = i;
  2124. break;
  2125. }
  2126. }
  2127. if (section == -1)
  2128. {
  2129. /* current instruction couldn't be found in the image */
  2130. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2131. }
  2132. if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
  2133. {
  2134. uint8_t buf[4];
  2135. if ((retval = image_read_section(xscale->trace.image, section,
  2136. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2137. 4, buf, &size_read)) != ERROR_OK)
  2138. {
  2139. LOG_ERROR("error while reading instruction: %i", retval);
  2140. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2141. }
  2142. opcode = target_buffer_get_u32(target, buf);
  2143. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2144. }
  2145. else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
  2146. {
  2147. uint8_t buf[2];
  2148. if ((retval = image_read_section(xscale->trace.image, section,
  2149. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2150. 2, buf, &size_read)) != ERROR_OK)
  2151. {
  2152. LOG_ERROR("error while reading instruction: %i", retval);
  2153. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2154. }
  2155. opcode = target_buffer_get_u16(target, buf);
  2156. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2157. }
  2158. else
  2159. {
  2160. LOG_ERROR("BUG: unknown core state encountered");
  2161. exit(-1);
  2162. }
  2163. return ERROR_OK;
  2164. }
  2165. static int xscale_branch_address(struct xscale_trace_data *trace_data,
  2166. int i, uint32_t *target)
  2167. {
  2168. /* if there are less than four entries prior to the indirect branch message
  2169. * we can't extract the address */
  2170. if (i < 4)
  2171. {
  2172. return -1;
  2173. }
  2174. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2175. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2176. return 0;
  2177. }
  2178. static int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
  2179. {
  2180. struct xscale_common *xscale = target_to_xscale(target);
  2181. int next_pc_ok = 0;
  2182. uint32_t next_pc = 0x0;
  2183. struct xscale_trace_data *trace_data = xscale->trace.data;
  2184. int retval;
  2185. while (trace_data)
  2186. {
  2187. int i, chkpt;
  2188. int rollover;
  2189. int branch;
  2190. int exception;
  2191. xscale->trace.core_state = ARMV4_5_STATE_ARM;
  2192. chkpt = 0;
  2193. rollover = 0;
  2194. for (i = 0; i < trace_data->depth; i++)
  2195. {
  2196. next_pc_ok = 0;
  2197. branch = 0;
  2198. exception = 0;
  2199. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2200. continue;
  2201. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2202. {
  2203. case 0: /* Exceptions */
  2204. case 1:
  2205. case 2:
  2206. case 3:
  2207. case 4:
  2208. case 5:
  2209. case 6:
  2210. case 7:
  2211. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2212. next_pc_ok = 1;
  2213. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2214. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2215. break;
  2216. case 8: /* Direct Branch */
  2217. branch = 1;
  2218. break;
  2219. case 9: /* Indirect Branch */
  2220. branch = 1;
  2221. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2222. {
  2223. next_pc_ok = 1;
  2224. }
  2225. break;
  2226. case 13: /* Checkpointed Indirect Branch */
  2227. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2228. {
  2229. next_pc_ok = 1;
  2230. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2231. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2232. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2233. }
  2234. /* explicit fall-through */
  2235. case 12: /* Checkpointed Direct Branch */
  2236. branch = 1;
  2237. if (chkpt == 0)
  2238. {
  2239. next_pc_ok = 1;
  2240. next_pc = trace_data->chkpt0;
  2241. chkpt++;
  2242. }
  2243. else if (chkpt == 1)
  2244. {
  2245. next_pc_ok = 1;
  2246. next_pc = trace_data->chkpt0;
  2247. chkpt++;
  2248. }
  2249. else
  2250. {
  2251. LOG_WARNING("more than two checkpointed branches encountered");
  2252. }
  2253. break;
  2254. case 15: /* Roll-over */
  2255. rollover++;
  2256. continue;
  2257. default: /* Reserved */
  2258. command_print(cmd_ctx, "--- reserved trace message ---");
  2259. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2260. return ERROR_OK;
  2261. }
  2262. if (xscale->trace.pc_ok)
  2263. {
  2264. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2265. struct arm_instruction instruction;
  2266. if ((exception == 6) || (exception == 7))
  2267. {
  2268. /* IRQ or FIQ exception, no instruction executed */
  2269. executed -= 1;
  2270. }
  2271. while (executed-- >= 0)
  2272. {
  2273. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2274. {
  2275. /* can't continue tracing with no image available */
  2276. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2277. {
  2278. return retval;
  2279. }
  2280. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2281. {
  2282. /* TODO: handle incomplete images */
  2283. }
  2284. }
  2285. /* a precise abort on a load to the PC is included in the incremental
  2286. * word count, other instructions causing data aborts are not included
  2287. */
  2288. if ((executed == 0) && (exception == 4)
  2289. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2290. {
  2291. if ((instruction.type == ARM_LDM)
  2292. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2293. {
  2294. executed--;
  2295. }
  2296. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2297. && (instruction.info.load_store.Rd != 15))
  2298. {
  2299. executed--;
  2300. }
  2301. }
  2302. /* only the last instruction executed
  2303. * (the one that caused the control flow change)
  2304. * could be a taken branch
  2305. */
  2306. if (((executed == -1) && (branch == 1)) &&
  2307. (((instruction.type == ARM_B) ||
  2308. (instruction.type == ARM_BL) ||
  2309. (instruction.type == ARM_BLX)) &&
  2310. (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
  2311. {
  2312. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2313. }
  2314. else
  2315. {
  2316. xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
  2317. }
  2318. command_print(cmd_ctx, "%s", instruction.text);
  2319. }
  2320. rollover = 0;
  2321. }
  2322. if (next_pc_ok)
  2323. {
  2324. xscale->trace.current_pc = next_pc;
  2325. xscale->trace.pc_ok = 1;
  2326. }
  2327. }
  2328. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
  2329. {
  2330. struct arm_instruction instruction;
  2331. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2332. {
  2333. /* can't continue tracing with no image available */
  2334. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2335. {
  2336. return retval;
  2337. }
  2338. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2339. {
  2340. /* TODO: handle incomplete images */
  2341. }
  2342. }
  2343. command_print(cmd_ctx, "%s", instruction.text);
  2344. }
  2345. trace_data = trace_data->next;
  2346. }
  2347. return ERROR_OK;
  2348. }
  2349. static void xscale_build_reg_cache(target_t *target)
  2350. {
  2351. struct xscale_common *xscale = target_to_xscale(target);
  2352. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  2353. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  2354. struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2355. int i;
  2356. int num_regs = sizeof(xscale_reg_arch_info) / sizeof(struct xscale_reg);
  2357. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  2358. armv4_5->core_cache = (*cache_p);
  2359. /* register a register arch-type for XScale dbg registers only once */
  2360. if (xscale_reg_arch_type == -1)
  2361. xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
  2362. (*cache_p)->next = malloc(sizeof(struct reg_cache));
  2363. cache_p = &(*cache_p)->next;
  2364. /* fill in values for the xscale reg cache */
  2365. (*cache_p)->name = "XScale registers";
  2366. (*cache_p)->next = NULL;
  2367. (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
  2368. (*cache_p)->num_regs = num_regs;
  2369. for (i = 0; i < num_regs; i++)
  2370. {
  2371. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2372. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2373. (*cache_p)->reg_list[i].dirty = 0;
  2374. (*cache_p)->reg_list[i].valid = 0;
  2375. (*cache_p)->reg_list[i].size = 32;
  2376. (*cache_p)->reg_list[i].bitfield_desc = NULL;
  2377. (*cache_p)->reg_list[i].num_bitfields = 0;
  2378. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2379. (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
  2380. arch_info[i] = xscale_reg_arch_info[i];
  2381. arch_info[i].target = target;
  2382. }
  2383. xscale->reg_cache = (*cache_p);
  2384. }
  2385. static int xscale_init_target(struct command_context_s *cmd_ctx,
  2386. struct target_s *target)
  2387. {
  2388. xscale_build_reg_cache(target);
  2389. return ERROR_OK;
  2390. }
  2391. static int xscale_init_arch_info(target_t *target,
  2392. struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
  2393. {
  2394. armv4_5_common_t *armv4_5;
  2395. uint32_t high_reset_branch, low_reset_branch;
  2396. int i;
  2397. armv4_5 = &xscale->armv4_5_common;
  2398. /* store architecture specfic data (none so far) */
  2399. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2400. /* we don't really *need* variant info ... */
  2401. if (variant) {
  2402. int ir_length = 0;
  2403. if (strcmp(variant, "pxa250") == 0
  2404. || strcmp(variant, "pxa255") == 0
  2405. || strcmp(variant, "pxa26x") == 0)
  2406. ir_length = 5;
  2407. else if (strcmp(variant, "pxa27x") == 0
  2408. || strcmp(variant, "ixp42x") == 0
  2409. || strcmp(variant, "ixp45x") == 0
  2410. || strcmp(variant, "ixp46x") == 0)
  2411. ir_length = 7;
  2412. else
  2413. LOG_WARNING("%s: unrecognized variant %s",
  2414. tap->dotted_name, variant);
  2415. if (ir_length && ir_length != tap->ir_length) {
  2416. LOG_WARNING("%s: IR length for %s is %d; fixing",
  2417. tap->dotted_name, variant, ir_length);
  2418. tap->ir_length = ir_length;
  2419. }
  2420. }
  2421. /* the debug handler isn't installed (and thus not running) at this time */
  2422. xscale->handler_address = 0xfe000800;
  2423. /* clear the vectors we keep locally for reference */
  2424. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2425. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2426. /* no user-specified vectors have been configured yet */
  2427. xscale->static_low_vectors_set = 0x0;
  2428. xscale->static_high_vectors_set = 0x0;
  2429. /* calculate branches to debug handler */
  2430. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2431. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2432. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2433. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2434. for (i = 1; i <= 7; i++)
  2435. {
  2436. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2437. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2438. }
  2439. /* 64kB aligned region used for DCache cleaning */
  2440. xscale->cache_clean_address = 0xfffe0000;
  2441. xscale->hold_rst = 0;
  2442. xscale->external_debug_break = 0;
  2443. xscale->ibcr_available = 2;
  2444. xscale->ibcr0_used = 0;
  2445. xscale->ibcr1_used = 0;
  2446. xscale->dbr_available = 2;
  2447. xscale->dbr0_used = 0;
  2448. xscale->dbr1_used = 0;
  2449. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2450. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2451. xscale->vector_catch = 0x1;
  2452. xscale->trace.capture_status = TRACE_IDLE;
  2453. xscale->trace.data = NULL;
  2454. xscale->trace.image = NULL;
  2455. xscale->trace.buffer_enabled = 0;
  2456. xscale->trace.buffer_fill = 0;
  2457. /* prepare ARMv4/5 specific information */
  2458. armv4_5->arch_info = xscale;
  2459. armv4_5->read_core_reg = xscale_read_core_reg;
  2460. armv4_5->write_core_reg = xscale_write_core_reg;
  2461. armv4_5->full_context = xscale_full_context;
  2462. armv4_5_init_arch_info(target, armv4_5);
  2463. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2464. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2465. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2466. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2467. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2468. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2469. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2470. xscale->armv4_5_mmu.mmu_enabled = 0;
  2471. return ERROR_OK;
  2472. }
  2473. static int xscale_target_create(struct target_s *target, Jim_Interp *interp)
  2474. {
  2475. struct xscale_common *xscale;
  2476. if (sizeof xscale_debug_handler - 1 > 0x800) {
  2477. LOG_ERROR("debug_handler.bin: larger than 2kb");
  2478. return ERROR_FAIL;
  2479. }
  2480. xscale = calloc(1, sizeof(*xscale));
  2481. if (!xscale)
  2482. return ERROR_FAIL;
  2483. return xscale_init_arch_info(target, xscale, target->tap,
  2484. target->variant);
  2485. }
  2486. COMMAND_HANDLER(xscale_handle_debug_handler_command)
  2487. {
  2488. target_t *target = NULL;
  2489. struct xscale_common *xscale;
  2490. int retval;
  2491. uint32_t handler_address;
  2492. if (argc < 2)
  2493. {
  2494. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2495. return ERROR_OK;
  2496. }
  2497. if ((target = get_target(args[0])) == NULL)
  2498. {
  2499. LOG_ERROR("target '%s' not defined", args[0]);
  2500. return ERROR_FAIL;
  2501. }
  2502. xscale = target_to_xscale(target);
  2503. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2504. if (retval != ERROR_OK)
  2505. return retval;
  2506. COMMAND_PARSE_NUMBER(u32, args[1], handler_address);
  2507. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2508. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2509. {
  2510. xscale->handler_address = handler_address;
  2511. }
  2512. else
  2513. {
  2514. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2515. return ERROR_FAIL;
  2516. }
  2517. return ERROR_OK;
  2518. }
  2519. COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
  2520. {
  2521. target_t *target = NULL;
  2522. struct xscale_common *xscale;
  2523. int retval;
  2524. uint32_t cache_clean_address;
  2525. if (argc < 2)
  2526. {
  2527. return ERROR_COMMAND_SYNTAX_ERROR;
  2528. }
  2529. target = get_target(args[0]);
  2530. if (target == NULL)
  2531. {
  2532. LOG_ERROR("target '%s' not defined", args[0]);
  2533. return ERROR_FAIL;
  2534. }
  2535. xscale = target_to_xscale(target);
  2536. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2537. if (retval != ERROR_OK)
  2538. return retval;
  2539. COMMAND_PARSE_NUMBER(u32, args[1], cache_clean_address);
  2540. if (cache_clean_address & 0xffff)
  2541. {
  2542. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2543. }
  2544. else
  2545. {
  2546. xscale->cache_clean_address = cache_clean_address;
  2547. }
  2548. return ERROR_OK;
  2549. }
  2550. COMMAND_HANDLER(xscale_handle_cache_info_command)
  2551. {
  2552. target_t *target = get_current_target(cmd_ctx);
  2553. struct xscale_common *xscale = target_to_xscale(target);
  2554. int retval;
  2555. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2556. if (retval != ERROR_OK)
  2557. return retval;
  2558. return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
  2559. }
  2560. static int xscale_virt2phys(struct target_s *target,
  2561. uint32_t virtual, uint32_t *physical)
  2562. {
  2563. struct xscale_common *xscale = target_to_xscale(target);
  2564. int type;
  2565. uint32_t cb;
  2566. int domain;
  2567. uint32_t ap;
  2568. if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
  2569. LOG_ERROR(xscale_not);
  2570. return ERROR_TARGET_INVALID;
  2571. }
  2572. uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2573. if (type == -1)
  2574. {
  2575. return ret;
  2576. }
  2577. *physical = ret;
  2578. return ERROR_OK;
  2579. }
  2580. static int xscale_mmu(struct target_s *target, int *enabled)
  2581. {
  2582. struct xscale_common *xscale = target_to_xscale(target);
  2583. if (target->state != TARGET_HALTED)
  2584. {
  2585. LOG_ERROR("Target not halted");
  2586. return ERROR_TARGET_INVALID;
  2587. }
  2588. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2589. return ERROR_OK;
  2590. }
  2591. COMMAND_HANDLER(xscale_handle_mmu_command)
  2592. {
  2593. target_t *target = get_current_target(cmd_ctx);
  2594. struct xscale_common *xscale = target_to_xscale(target);
  2595. int retval;
  2596. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2597. if (retval != ERROR_OK)
  2598. return retval;
  2599. if (target->state != TARGET_HALTED)
  2600. {
  2601. command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
  2602. return ERROR_OK;
  2603. }
  2604. if (argc >= 1)
  2605. {
  2606. if (strcmp("enable", args[0]) == 0)
  2607. {
  2608. xscale_enable_mmu_caches(target, 1, 0, 0);
  2609. xscale->armv4_5_mmu.mmu_enabled = 1;
  2610. }
  2611. else if (strcmp("disable", args[0]) == 0)
  2612. {
  2613. xscale_disable_mmu_caches(target, 1, 0, 0);
  2614. xscale->armv4_5_mmu.mmu_enabled = 0;
  2615. }
  2616. }
  2617. command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2618. return ERROR_OK;
  2619. }
  2620. COMMAND_HANDLER(xscale_handle_idcache_command)
  2621. {
  2622. target_t *target = get_current_target(cmd_ctx);
  2623. struct xscale_common *xscale = target_to_xscale(target);
  2624. int icache = 0, dcache = 0;
  2625. int retval;
  2626. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2627. if (retval != ERROR_OK)
  2628. return retval;
  2629. if (target->state != TARGET_HALTED)
  2630. {
  2631. command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
  2632. return ERROR_OK;
  2633. }
  2634. if (strcmp(CMD_NAME, "icache") == 0)
  2635. icache = 1;
  2636. else if (strcmp(CMD_NAME, "dcache") == 0)
  2637. dcache = 1;
  2638. if (argc >= 1)
  2639. {
  2640. if (strcmp("enable", args[0]) == 0)
  2641. {
  2642. xscale_enable_mmu_caches(target, 0, dcache, icache);
  2643. if (icache)
  2644. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
  2645. else if (dcache)
  2646. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
  2647. }
  2648. else if (strcmp("disable", args[0]) == 0)
  2649. {
  2650. xscale_disable_mmu_caches(target, 0, dcache, icache);
  2651. if (icache)
  2652. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
  2653. else if (dcache)
  2654. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
  2655. }
  2656. }
  2657. if (icache)
  2658. command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
  2659. if (dcache)
  2660. command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
  2661. return ERROR_OK;
  2662. }
  2663. COMMAND_HANDLER(xscale_handle_vector_catch_command)
  2664. {
  2665. target_t *target = get_current_target(cmd_ctx);
  2666. struct xscale_common *xscale = target_to_xscale(target);
  2667. int retval;
  2668. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2669. if (retval != ERROR_OK)
  2670. return retval;
  2671. if (argc < 1)
  2672. {
  2673. command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
  2674. }
  2675. else
  2676. {
  2677. COMMAND_PARSE_NUMBER(u8, args[0], xscale->vector_catch);
  2678. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2679. xscale_write_dcsr(target, -1, -1);
  2680. }
  2681. command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2682. return ERROR_OK;
  2683. }
  2684. COMMAND_HANDLER(xscale_handle_vector_table_command)
  2685. {
  2686. target_t *target = get_current_target(cmd_ctx);
  2687. struct xscale_common *xscale = target_to_xscale(target);
  2688. int err = 0;
  2689. int retval;
  2690. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2691. if (retval != ERROR_OK)
  2692. return retval;
  2693. if (argc == 0) /* print current settings */
  2694. {
  2695. int idx;
  2696. command_print(cmd_ctx, "active user-set static vectors:");
  2697. for (idx = 1; idx < 8; idx++)
  2698. if (xscale->static_low_vectors_set & (1 << idx))
  2699. command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
  2700. for (idx = 1; idx < 8; idx++)
  2701. if (xscale->static_high_vectors_set & (1 << idx))
  2702. command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
  2703. return ERROR_OK;
  2704. }
  2705. if (argc != 3)
  2706. err = 1;
  2707. else
  2708. {
  2709. int idx;
  2710. COMMAND_PARSE_NUMBER(int, args[1], idx);
  2711. uint32_t vec;
  2712. COMMAND_PARSE_NUMBER(u32, args[2], vec);
  2713. if (idx < 1 || idx >= 8)
  2714. err = 1;
  2715. if (!err && strcmp(args[0], "low") == 0)
  2716. {
  2717. xscale->static_low_vectors_set |= (1<<idx);
  2718. xscale->static_low_vectors[idx] = vec;
  2719. }
  2720. else if (!err && (strcmp(args[0], "high") == 0))
  2721. {
  2722. xscale->static_high_vectors_set |= (1<<idx);
  2723. xscale->static_high_vectors[idx] = vec;
  2724. }
  2725. else
  2726. err = 1;
  2727. }
  2728. if (err)
  2729. command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
  2730. return ERROR_OK;
  2731. }
  2732. COMMAND_HANDLER(xscale_handle_trace_buffer_command)
  2733. {
  2734. target_t *target = get_current_target(cmd_ctx);
  2735. struct xscale_common *xscale = target_to_xscale(target);
  2736. struct armv4_5_common_s *armv4_5 = &xscale->armv4_5_common;
  2737. uint32_t dcsr_value;
  2738. int retval;
  2739. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2740. if (retval != ERROR_OK)
  2741. return retval;
  2742. if (target->state != TARGET_HALTED)
  2743. {
  2744. command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
  2745. return ERROR_OK;
  2746. }
  2747. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  2748. {
  2749. struct xscale_trace_data *td, *next_td;
  2750. xscale->trace.buffer_enabled = 1;
  2751. /* free old trace data */
  2752. td = xscale->trace.data;
  2753. while (td)
  2754. {
  2755. next_td = td->next;
  2756. if (td->entries)
  2757. free(td->entries);
  2758. free(td);
  2759. td = next_td;
  2760. }
  2761. xscale->trace.data = NULL;
  2762. }
  2763. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  2764. {
  2765. xscale->trace.buffer_enabled = 0;
  2766. }
  2767. if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
  2768. {
  2769. uint32_t fill = 1;
  2770. if (argc >= 3)
  2771. COMMAND_PARSE_NUMBER(u32, args[2], fill);
  2772. xscale->trace.buffer_fill = fill;
  2773. }
  2774. else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
  2775. {
  2776. xscale->trace.buffer_fill = -1;
  2777. }
  2778. if (xscale->trace.buffer_enabled)
  2779. {
  2780. /* if we enable the trace buffer in fill-once
  2781. * mode we know the address of the first instruction */
  2782. xscale->trace.pc_ok = 1;
  2783. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2784. }
  2785. else
  2786. {
  2787. /* otherwise the address is unknown, and we have no known good PC */
  2788. xscale->trace.pc_ok = 0;
  2789. }
  2790. command_print(cmd_ctx, "trace buffer %s (%s)",
  2791. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2792. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2793. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2794. if (xscale->trace.buffer_fill >= 0)
  2795. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2796. else
  2797. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2798. return ERROR_OK;
  2799. }
  2800. COMMAND_HANDLER(xscale_handle_trace_image_command)
  2801. {
  2802. target_t *target = get_current_target(cmd_ctx);
  2803. struct xscale_common *xscale = target_to_xscale(target);
  2804. int retval;
  2805. if (argc < 1)
  2806. {
  2807. command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
  2808. return ERROR_OK;
  2809. }
  2810. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2811. if (retval != ERROR_OK)
  2812. return retval;
  2813. if (xscale->trace.image)
  2814. {
  2815. image_close(xscale->trace.image);
  2816. free(xscale->trace.image);
  2817. command_print(cmd_ctx, "previously loaded image found and closed");
  2818. }
  2819. xscale->trace.image = malloc(sizeof(image_t));
  2820. xscale->trace.image->base_address_set = 0;
  2821. xscale->trace.image->start_address_set = 0;
  2822. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2823. if (argc >= 2)
  2824. {
  2825. xscale->trace.image->base_address_set = 1;
  2826. COMMAND_PARSE_NUMBER(int, args[1], xscale->trace.image->base_address);
  2827. }
  2828. else
  2829. {
  2830. xscale->trace.image->base_address_set = 0;
  2831. }
  2832. if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
  2833. {
  2834. free(xscale->trace.image);
  2835. xscale->trace.image = NULL;
  2836. return ERROR_OK;
  2837. }
  2838. return ERROR_OK;
  2839. }
  2840. COMMAND_HANDLER(xscale_handle_dump_trace_command)
  2841. {
  2842. target_t *target = get_current_target(cmd_ctx);
  2843. struct xscale_common *xscale = target_to_xscale(target);
  2844. struct xscale_trace_data *trace_data;
  2845. struct fileio file;
  2846. int retval;
  2847. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2848. if (retval != ERROR_OK)
  2849. return retval;
  2850. if (target->state != TARGET_HALTED)
  2851. {
  2852. command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
  2853. return ERROR_OK;
  2854. }
  2855. if (argc < 1)
  2856. {
  2857. command_print(cmd_ctx, "usage: xscale dump_trace <file>");
  2858. return ERROR_OK;
  2859. }
  2860. trace_data = xscale->trace.data;
  2861. if (!trace_data)
  2862. {
  2863. command_print(cmd_ctx, "no trace data collected");
  2864. return ERROR_OK;
  2865. }
  2866. if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2867. {
  2868. return ERROR_OK;
  2869. }
  2870. while (trace_data)
  2871. {
  2872. int i;
  2873. fileio_write_u32(&file, trace_data->chkpt0);
  2874. fileio_write_u32(&file, trace_data->chkpt1);
  2875. fileio_write_u32(&file, trace_data->last_instruction);
  2876. fileio_write_u32(&file, trace_data->depth);
  2877. for (i = 0; i < trace_data->depth; i++)
  2878. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2879. trace_data = trace_data->next;
  2880. }
  2881. fileio_close(&file);
  2882. return ERROR_OK;
  2883. }
  2884. COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
  2885. {
  2886. target_t *target = get_current_target(cmd_ctx);
  2887. struct xscale_common *xscale = target_to_xscale(target);
  2888. int retval;
  2889. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2890. if (retval != ERROR_OK)
  2891. return retval;
  2892. xscale_analyze_trace(target, cmd_ctx);
  2893. return ERROR_OK;
  2894. }
  2895. COMMAND_HANDLER(xscale_handle_cp15)
  2896. {
  2897. target_t *target = get_current_target(cmd_ctx);
  2898. struct xscale_common *xscale = target_to_xscale(target);
  2899. int retval;
  2900. retval = xscale_verify_pointer(cmd_ctx, xscale);
  2901. if (retval != ERROR_OK)
  2902. return retval;
  2903. if (target->state != TARGET_HALTED)
  2904. {
  2905. command_print(cmd_ctx, "target must be stopped for \"%s\" command", CMD_NAME);
  2906. return ERROR_OK;
  2907. }
  2908. uint32_t reg_no = 0;
  2909. reg_t *reg = NULL;
  2910. if (argc > 0)
  2911. {
  2912. COMMAND_PARSE_NUMBER(u32, args[0], reg_no);
  2913. /*translate from xscale cp15 register no to openocd register*/
  2914. switch (reg_no)
  2915. {
  2916. case 0:
  2917. reg_no = XSCALE_MAINID;
  2918. break;
  2919. case 1:
  2920. reg_no = XSCALE_CTRL;
  2921. break;
  2922. case 2:
  2923. reg_no = XSCALE_TTB;
  2924. break;
  2925. case 3:
  2926. reg_no = XSCALE_DAC;
  2927. break;
  2928. case 5:
  2929. reg_no = XSCALE_FSR;
  2930. break;
  2931. case 6:
  2932. reg_no = XSCALE_FAR;
  2933. break;
  2934. case 13:
  2935. reg_no = XSCALE_PID;
  2936. break;
  2937. case 15:
  2938. reg_no = XSCALE_CPACCESS;
  2939. break;
  2940. default:
  2941. command_print(cmd_ctx, "invalid register number");
  2942. return ERROR_INVALID_ARGUMENTS;
  2943. }
  2944. reg = &xscale->reg_cache->reg_list[reg_no];
  2945. }
  2946. if (argc == 1)
  2947. {
  2948. uint32_t value;
  2949. /* read cp15 control register */
  2950. xscale_get_reg(reg);
  2951. value = buf_get_u32(reg->value, 0, 32);
  2952. command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  2953. }
  2954. else if (argc == 2)
  2955. {
  2956. uint32_t value;
  2957. COMMAND_PARSE_NUMBER(u32, args[1], value);
  2958. /* send CP write request (command 0x41) */
  2959. xscale_send_u32(target, 0x41);
  2960. /* send CP register number */
  2961. xscale_send_u32(target, reg_no);
  2962. /* send CP register value */
  2963. xscale_send_u32(target, value);
  2964. /* execute cpwait to ensure outstanding operations complete */
  2965. xscale_send_u32(target, 0x53);
  2966. }
  2967. else
  2968. {
  2969. command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
  2970. }
  2971. return ERROR_OK;
  2972. }
  2973. static int xscale_register_commands(struct command_context_s *cmd_ctx)
  2974. {
  2975. command_t *xscale_cmd;
  2976. xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
  2977. register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
  2978. register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
  2979. register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
  2980. register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
  2981. register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
  2982. register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
  2983. register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
  2984. register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
  2985. register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
  2986. register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
  2987. register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
  2988. register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
  2989. COMMAND_EXEC, "load image from <file> [base address]");
  2990. register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
  2991. armv4_5_register_commands(cmd_ctx);
  2992. return ERROR_OK;
  2993. }
  2994. target_type_t xscale_target =
  2995. {
  2996. .name = "xscale",
  2997. .poll = xscale_poll,
  2998. .arch_state = xscale_arch_state,
  2999. .target_request_data = NULL,
  3000. .halt = xscale_halt,
  3001. .resume = xscale_resume,
  3002. .step = xscale_step,
  3003. .assert_reset = xscale_assert_reset,
  3004. .deassert_reset = xscale_deassert_reset,
  3005. .soft_reset_halt = NULL,
  3006. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  3007. .read_memory = xscale_read_memory,
  3008. .write_memory = xscale_write_memory,
  3009. .bulk_write_memory = xscale_bulk_write_memory,
  3010. .checksum_memory = arm7_9_checksum_memory,
  3011. .blank_check_memory = arm7_9_blank_check_memory,
  3012. .run_algorithm = armv4_5_run_algorithm,
  3013. .add_breakpoint = xscale_add_breakpoint,
  3014. .remove_breakpoint = xscale_remove_breakpoint,
  3015. .add_watchpoint = xscale_add_watchpoint,
  3016. .remove_watchpoint = xscale_remove_watchpoint,
  3017. .register_commands = xscale_register_commands,
  3018. .target_create = xscale_target_create,
  3019. .init_target = xscale_init_target,
  3020. .virt2phys = xscale_virt2phys,
  3021. .mmu = xscale_mmu
  3022. };