You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3742 lines
98 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "xscale.h"
  30. #include "target_type.h"
  31. #include "arm7_9_common.h"
  32. #include "arm_simulator.h"
  33. #include "arm_disassembler.h"
  34. #include "time_support.h"
  35. #include "image.h"
  36. /*
  37. * Important XScale documents available as of October 2009 include:
  38. *
  39. * Intel XScale® Core Developer’s Manual, January 2004
  40. * Order Number: 273473-002
  41. * This has a chapter detailing debug facilities, and punts some
  42. * details to chip-specific microarchitecture documents.
  43. *
  44. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  45. * Document Number: 273539-005
  46. * Less detailed than the developer's manual, but summarizes those
  47. * missing details (for most XScales) and gives LOTS of notes about
  48. * debugger/handler interaction issues. Presents a simpler reset
  49. * and load-handler sequence than the arch doc. (Note, OpenOCD
  50. * doesn't currently support "Hot-Debug" as defined there.)
  51. *
  52. * Chip-specific microarchitecture documents may also be useful.
  53. */
  54. /* forward declarations */
  55. static int xscale_resume(struct target_s *, int current,
  56. uint32_t address, int handle_breakpoints, int debug_execution);
  57. static int xscale_debug_entry(target_t *);
  58. static int xscale_restore_context(target_t *);
  59. static int xscale_get_reg(reg_t *reg);
  60. static int xscale_set_reg(reg_t *reg, uint8_t *buf);
  61. static int xscale_set_breakpoint(struct target_s *, breakpoint_t *);
  62. static int xscale_set_watchpoint(struct target_s *, watchpoint_t *);
  63. static int xscale_unset_breakpoint(struct target_s *, breakpoint_t *);
  64. static int xscale_read_trace(target_t *);
  65. /* This XScale "debug handler" is loaded into the processor's
  66. * mini-ICache, which is 2K of code writable only via JTAG.
  67. *
  68. * FIXME the OpenOCD "bin2char" utility currently doesn't handle
  69. * binary files cleanly. It's string oriented, and terminates them
  70. * with a NUL character. Better would be to generate the constants
  71. * and let other code decide names, scoping, and other housekeeping.
  72. */
  73. static /* unsigned const char xscale_debug_handler[] = ... */
  74. #include "xscale_debug.h"
  75. static char *const xscale_reg_list[] =
  76. {
  77. "XSCALE_MAINID", /* 0 */
  78. "XSCALE_CACHETYPE",
  79. "XSCALE_CTRL",
  80. "XSCALE_AUXCTRL",
  81. "XSCALE_TTB",
  82. "XSCALE_DAC",
  83. "XSCALE_FSR",
  84. "XSCALE_FAR",
  85. "XSCALE_PID",
  86. "XSCALE_CPACCESS",
  87. "XSCALE_IBCR0", /* 10 */
  88. "XSCALE_IBCR1",
  89. "XSCALE_DBR0",
  90. "XSCALE_DBR1",
  91. "XSCALE_DBCON",
  92. "XSCALE_TBREG",
  93. "XSCALE_CHKPT0",
  94. "XSCALE_CHKPT1",
  95. "XSCALE_DCSR",
  96. "XSCALE_TX",
  97. "XSCALE_RX", /* 20 */
  98. "XSCALE_TXRXCTRL",
  99. };
  100. static const xscale_reg_t xscale_reg_arch_info[] =
  101. {
  102. {XSCALE_MAINID, NULL},
  103. {XSCALE_CACHETYPE, NULL},
  104. {XSCALE_CTRL, NULL},
  105. {XSCALE_AUXCTRL, NULL},
  106. {XSCALE_TTB, NULL},
  107. {XSCALE_DAC, NULL},
  108. {XSCALE_FSR, NULL},
  109. {XSCALE_FAR, NULL},
  110. {XSCALE_PID, NULL},
  111. {XSCALE_CPACCESS, NULL},
  112. {XSCALE_IBCR0, NULL},
  113. {XSCALE_IBCR1, NULL},
  114. {XSCALE_DBR0, NULL},
  115. {XSCALE_DBR1, NULL},
  116. {XSCALE_DBCON, NULL},
  117. {XSCALE_TBREG, NULL},
  118. {XSCALE_CHKPT0, NULL},
  119. {XSCALE_CHKPT1, NULL},
  120. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  121. {-1, NULL}, /* TX accessed via JTAG */
  122. {-1, NULL}, /* RX accessed via JTAG */
  123. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  124. };
  125. static int xscale_reg_arch_type = -1;
  126. /* convenience wrapper to access XScale specific registers */
  127. static int xscale_set_reg_u32(reg_t *reg, uint32_t value)
  128. {
  129. uint8_t buf[4];
  130. buf_set_u32(buf, 0, 32, value);
  131. return xscale_set_reg(reg, buf);
  132. }
  133. static int xscale_get_arch_pointers(target_t *target,
  134. armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
  135. {
  136. armv4_5_common_t *armv4_5 = target->arch_info;
  137. xscale_common_t *xscale = armv4_5->arch_info;
  138. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  139. {
  140. LOG_ERROR("target isn't an XScale target");
  141. return -1;
  142. }
  143. if (xscale->common_magic != XSCALE_COMMON_MAGIC)
  144. {
  145. LOG_ERROR("target isn't an XScale target");
  146. return -1;
  147. }
  148. *armv4_5_p = armv4_5;
  149. *xscale_p = xscale;
  150. return ERROR_OK;
  151. }
  152. static int xscale_jtag_set_instr(jtag_tap_t *tap, uint32_t new_instr)
  153. {
  154. if (tap == NULL)
  155. return ERROR_FAIL;
  156. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  157. {
  158. scan_field_t field;
  159. uint8_t scratch[4];
  160. memset(&field, 0, sizeof field);
  161. field.tap = tap;
  162. field.num_bits = tap->ir_length;
  163. field.out_value = scratch;
  164. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  165. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  166. }
  167. return ERROR_OK;
  168. }
  169. static int xscale_read_dcsr(target_t *target)
  170. {
  171. armv4_5_common_t *armv4_5 = target->arch_info;
  172. xscale_common_t *xscale = armv4_5->arch_info;
  173. int retval;
  174. scan_field_t fields[3];
  175. uint8_t field0 = 0x0;
  176. uint8_t field0_check_value = 0x2;
  177. uint8_t field0_check_mask = 0x7;
  178. uint8_t field2 = 0x0;
  179. uint8_t field2_check_value = 0x0;
  180. uint8_t field2_check_mask = 0x1;
  181. jtag_set_end_state(TAP_DRPAUSE);
  182. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  183. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  184. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  185. memset(&fields, 0, sizeof fields);
  186. fields[0].tap = target->tap;
  187. fields[0].num_bits = 3;
  188. fields[0].out_value = &field0;
  189. uint8_t tmp;
  190. fields[0].in_value = &tmp;
  191. fields[1].tap = target->tap;
  192. fields[1].num_bits = 32;
  193. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  194. fields[2].tap = target->tap;
  195. fields[2].num_bits = 1;
  196. fields[2].out_value = &field2;
  197. uint8_t tmp2;
  198. fields[2].in_value = &tmp2;
  199. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  200. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  201. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  202. if ((retval = jtag_execute_queue()) != ERROR_OK)
  203. {
  204. LOG_ERROR("JTAG error while reading DCSR");
  205. return retval;
  206. }
  207. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  208. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  209. /* write the register with the value we just read
  210. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  211. */
  212. field0_check_mask = 0x1;
  213. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  214. fields[1].in_value = NULL;
  215. jtag_set_end_state(TAP_IDLE);
  216. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  217. /* DANGER!!! this must be here. It will make sure that the arguments
  218. * to jtag_set_check_value() does not go out of scope! */
  219. return jtag_execute_queue();
  220. }
  221. static void xscale_getbuf(jtag_callback_data_t arg)
  222. {
  223. uint8_t *in = (uint8_t *)arg;
  224. *((uint32_t *)in) = buf_get_u32(in, 0, 32);
  225. }
  226. static int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
  227. {
  228. if (num_words == 0)
  229. return ERROR_INVALID_ARGUMENTS;
  230. int retval = ERROR_OK;
  231. tap_state_t path[3];
  232. scan_field_t fields[3];
  233. uint8_t *field0 = malloc(num_words * 1);
  234. uint8_t field0_check_value = 0x2;
  235. uint8_t field0_check_mask = 0x6;
  236. uint32_t *field1 = malloc(num_words * 4);
  237. uint8_t field2_check_value = 0x0;
  238. uint8_t field2_check_mask = 0x1;
  239. int words_done = 0;
  240. int words_scheduled = 0;
  241. int i;
  242. path[0] = TAP_DRSELECT;
  243. path[1] = TAP_DRCAPTURE;
  244. path[2] = TAP_DRSHIFT;
  245. memset(&fields, 0, sizeof fields);
  246. fields[0].tap = target->tap;
  247. fields[0].num_bits = 3;
  248. fields[0].check_value = &field0_check_value;
  249. fields[0].check_mask = &field0_check_mask;
  250. fields[1].tap = target->tap;
  251. fields[1].num_bits = 32;
  252. fields[2].tap = target->tap;
  253. fields[2].num_bits = 1;
  254. fields[2].check_value = &field2_check_value;
  255. fields[2].check_mask = &field2_check_mask;
  256. jtag_set_end_state(TAP_IDLE);
  257. xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
  258. jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  259. /* repeat until all words have been collected */
  260. int attempts = 0;
  261. while (words_done < num_words)
  262. {
  263. /* schedule reads */
  264. words_scheduled = 0;
  265. for (i = words_done; i < num_words; i++)
  266. {
  267. fields[0].in_value = &field0[i];
  268. jtag_add_pathmove(3, path);
  269. fields[1].in_value = (uint8_t *)(field1 + i);
  270. jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
  271. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  272. words_scheduled++;
  273. }
  274. if ((retval = jtag_execute_queue()) != ERROR_OK)
  275. {
  276. LOG_ERROR("JTAG error while receiving data from debug handler");
  277. break;
  278. }
  279. /* examine results */
  280. for (i = words_done; i < num_words; i++)
  281. {
  282. if (!(field0[0] & 1))
  283. {
  284. /* move backwards if necessary */
  285. int j;
  286. for (j = i; j < num_words - 1; j++)
  287. {
  288. field0[j] = field0[j + 1];
  289. field1[j] = field1[j + 1];
  290. }
  291. words_scheduled--;
  292. }
  293. }
  294. if (words_scheduled == 0)
  295. {
  296. if (attempts++==1000)
  297. {
  298. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  299. retval = ERROR_TARGET_TIMEOUT;
  300. break;
  301. }
  302. }
  303. words_done += words_scheduled;
  304. }
  305. for (i = 0; i < num_words; i++)
  306. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  307. free(field1);
  308. return retval;
  309. }
  310. static int xscale_read_tx(target_t *target, int consume)
  311. {
  312. armv4_5_common_t *armv4_5 = target->arch_info;
  313. xscale_common_t *xscale = armv4_5->arch_info;
  314. tap_state_t path[3];
  315. tap_state_t noconsume_path[6];
  316. int retval;
  317. struct timeval timeout, now;
  318. scan_field_t fields[3];
  319. uint8_t field0_in = 0x0;
  320. uint8_t field0_check_value = 0x2;
  321. uint8_t field0_check_mask = 0x6;
  322. uint8_t field2_check_value = 0x0;
  323. uint8_t field2_check_mask = 0x1;
  324. jtag_set_end_state(TAP_IDLE);
  325. xscale_jtag_set_instr(target->tap, XSCALE_DBGTX);
  326. path[0] = TAP_DRSELECT;
  327. path[1] = TAP_DRCAPTURE;
  328. path[2] = TAP_DRSHIFT;
  329. noconsume_path[0] = TAP_DRSELECT;
  330. noconsume_path[1] = TAP_DRCAPTURE;
  331. noconsume_path[2] = TAP_DREXIT1;
  332. noconsume_path[3] = TAP_DRPAUSE;
  333. noconsume_path[4] = TAP_DREXIT2;
  334. noconsume_path[5] = TAP_DRSHIFT;
  335. memset(&fields, 0, sizeof fields);
  336. fields[0].tap = target->tap;
  337. fields[0].num_bits = 3;
  338. fields[0].in_value = &field0_in;
  339. fields[1].tap = target->tap;
  340. fields[1].num_bits = 32;
  341. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  342. fields[2].tap = target->tap;
  343. fields[2].num_bits = 1;
  344. uint8_t tmp;
  345. fields[2].in_value = &tmp;
  346. gettimeofday(&timeout, NULL);
  347. timeval_add_time(&timeout, 1, 0);
  348. for (;;)
  349. {
  350. /* if we want to consume the register content (i.e. clear TX_READY),
  351. * we have to go straight from Capture-DR to Shift-DR
  352. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  353. */
  354. if (consume)
  355. jtag_add_pathmove(3, path);
  356. else
  357. {
  358. jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
  359. }
  360. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  361. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  362. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  363. if ((retval = jtag_execute_queue()) != ERROR_OK)
  364. {
  365. LOG_ERROR("JTAG error while reading TX");
  366. return ERROR_TARGET_TIMEOUT;
  367. }
  368. gettimeofday(&now, NULL);
  369. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  370. {
  371. LOG_ERROR("time out reading TX register");
  372. return ERROR_TARGET_TIMEOUT;
  373. }
  374. if (!((!(field0_in & 1)) && consume))
  375. {
  376. goto done;
  377. }
  378. if (debug_level >= 3)
  379. {
  380. LOG_DEBUG("waiting 100ms");
  381. alive_sleep(100); /* avoid flooding the logs */
  382. } else
  383. {
  384. keep_alive();
  385. }
  386. }
  387. done:
  388. if (!(field0_in & 1))
  389. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  390. return ERROR_OK;
  391. }
  392. static int xscale_write_rx(target_t *target)
  393. {
  394. armv4_5_common_t *armv4_5 = target->arch_info;
  395. xscale_common_t *xscale = armv4_5->arch_info;
  396. int retval;
  397. struct timeval timeout, now;
  398. scan_field_t fields[3];
  399. uint8_t field0_out = 0x0;
  400. uint8_t field0_in = 0x0;
  401. uint8_t field0_check_value = 0x2;
  402. uint8_t field0_check_mask = 0x6;
  403. uint8_t field2 = 0x0;
  404. uint8_t field2_check_value = 0x0;
  405. uint8_t field2_check_mask = 0x1;
  406. jtag_set_end_state(TAP_IDLE);
  407. xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
  408. memset(&fields, 0, sizeof fields);
  409. fields[0].tap = target->tap;
  410. fields[0].num_bits = 3;
  411. fields[0].out_value = &field0_out;
  412. fields[0].in_value = &field0_in;
  413. fields[1].tap = target->tap;
  414. fields[1].num_bits = 32;
  415. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  416. fields[2].tap = target->tap;
  417. fields[2].num_bits = 1;
  418. fields[2].out_value = &field2;
  419. uint8_t tmp;
  420. fields[2].in_value = &tmp;
  421. gettimeofday(&timeout, NULL);
  422. timeval_add_time(&timeout, 1, 0);
  423. /* poll until rx_read is low */
  424. LOG_DEBUG("polling RX");
  425. for (;;)
  426. {
  427. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  428. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  429. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  430. if ((retval = jtag_execute_queue()) != ERROR_OK)
  431. {
  432. LOG_ERROR("JTAG error while writing RX");
  433. return retval;
  434. }
  435. gettimeofday(&now, NULL);
  436. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  437. {
  438. LOG_ERROR("time out writing RX register");
  439. return ERROR_TARGET_TIMEOUT;
  440. }
  441. if (!(field0_in & 1))
  442. goto done;
  443. if (debug_level >= 3)
  444. {
  445. LOG_DEBUG("waiting 100ms");
  446. alive_sleep(100); /* avoid flooding the logs */
  447. } else
  448. {
  449. keep_alive();
  450. }
  451. }
  452. done:
  453. /* set rx_valid */
  454. field2 = 0x1;
  455. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  456. if ((retval = jtag_execute_queue()) != ERROR_OK)
  457. {
  458. LOG_ERROR("JTAG error while writing RX");
  459. return retval;
  460. }
  461. return ERROR_OK;
  462. }
  463. /* send count elements of size byte to the debug handler */
  464. static int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
  465. {
  466. uint32_t t[3];
  467. int bits[3];
  468. int retval;
  469. int done_count = 0;
  470. jtag_set_end_state(TAP_IDLE);
  471. xscale_jtag_set_instr(target->tap, XSCALE_DBGRX);
  472. bits[0]=3;
  473. t[0]=0;
  474. bits[1]=32;
  475. t[2]=1;
  476. bits[2]=1;
  477. int endianness = target->endianness;
  478. while (done_count++ < count)
  479. {
  480. switch (size)
  481. {
  482. case 4:
  483. if (endianness == TARGET_LITTLE_ENDIAN)
  484. {
  485. t[1]=le_to_h_u32(buffer);
  486. } else
  487. {
  488. t[1]=be_to_h_u32(buffer);
  489. }
  490. break;
  491. case 2:
  492. if (endianness == TARGET_LITTLE_ENDIAN)
  493. {
  494. t[1]=le_to_h_u16(buffer);
  495. } else
  496. {
  497. t[1]=be_to_h_u16(buffer);
  498. }
  499. break;
  500. case 1:
  501. t[1]=buffer[0];
  502. break;
  503. default:
  504. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  505. exit(-1);
  506. }
  507. jtag_add_dr_out(target->tap,
  508. 3,
  509. bits,
  510. t,
  511. jtag_set_end_state(TAP_IDLE));
  512. buffer += size;
  513. }
  514. if ((retval = jtag_execute_queue()) != ERROR_OK)
  515. {
  516. LOG_ERROR("JTAG error while sending data to debug handler");
  517. return retval;
  518. }
  519. return ERROR_OK;
  520. }
  521. static int xscale_send_u32(target_t *target, uint32_t value)
  522. {
  523. armv4_5_common_t *armv4_5 = target->arch_info;
  524. xscale_common_t *xscale = armv4_5->arch_info;
  525. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  526. return xscale_write_rx(target);
  527. }
  528. static int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
  529. {
  530. armv4_5_common_t *armv4_5 = target->arch_info;
  531. xscale_common_t *xscale = armv4_5->arch_info;
  532. int retval;
  533. scan_field_t fields[3];
  534. uint8_t field0 = 0x0;
  535. uint8_t field0_check_value = 0x2;
  536. uint8_t field0_check_mask = 0x7;
  537. uint8_t field2 = 0x0;
  538. uint8_t field2_check_value = 0x0;
  539. uint8_t field2_check_mask = 0x1;
  540. if (hold_rst != -1)
  541. xscale->hold_rst = hold_rst;
  542. if (ext_dbg_brk != -1)
  543. xscale->external_debug_break = ext_dbg_brk;
  544. jtag_set_end_state(TAP_IDLE);
  545. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  546. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  547. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  548. memset(&fields, 0, sizeof fields);
  549. fields[0].tap = target->tap;
  550. fields[0].num_bits = 3;
  551. fields[0].out_value = &field0;
  552. uint8_t tmp;
  553. fields[0].in_value = &tmp;
  554. fields[1].tap = target->tap;
  555. fields[1].num_bits = 32;
  556. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  557. fields[2].tap = target->tap;
  558. fields[2].num_bits = 1;
  559. fields[2].out_value = &field2;
  560. uint8_t tmp2;
  561. fields[2].in_value = &tmp2;
  562. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  563. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  564. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  565. if ((retval = jtag_execute_queue()) != ERROR_OK)
  566. {
  567. LOG_ERROR("JTAG error while writing DCSR");
  568. return retval;
  569. }
  570. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  571. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  572. return ERROR_OK;
  573. }
  574. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  575. static unsigned int parity (unsigned int v)
  576. {
  577. // unsigned int ov = v;
  578. v ^= v >> 16;
  579. v ^= v >> 8;
  580. v ^= v >> 4;
  581. v &= 0xf;
  582. // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  583. return (0x6996 >> v) & 1;
  584. }
  585. static int xscale_load_ic(target_t *target, uint32_t va, uint32_t buffer[8])
  586. {
  587. uint8_t packet[4];
  588. uint8_t cmd;
  589. int word;
  590. scan_field_t fields[2];
  591. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  592. /* LDIC into IR */
  593. jtag_set_end_state(TAP_IDLE);
  594. xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
  595. /* CMD is b011 to load a cacheline into the Mini ICache.
  596. * Loading into the main ICache is deprecated, and unused.
  597. * It's followed by three zero bits, and 27 address bits.
  598. */
  599. buf_set_u32(&cmd, 0, 6, 0x3);
  600. /* virtual address of desired cache line */
  601. buf_set_u32(packet, 0, 27, va >> 5);
  602. memset(&fields, 0, sizeof fields);
  603. fields[0].tap = target->tap;
  604. fields[0].num_bits = 6;
  605. fields[0].out_value = &cmd;
  606. fields[1].tap = target->tap;
  607. fields[1].num_bits = 27;
  608. fields[1].out_value = packet;
  609. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  610. /* rest of packet is a cacheline: 8 instructions, with parity */
  611. fields[0].num_bits = 32;
  612. fields[0].out_value = packet;
  613. fields[1].num_bits = 1;
  614. fields[1].out_value = &cmd;
  615. for (word = 0; word < 8; word++)
  616. {
  617. buf_set_u32(packet, 0, 32, buffer[word]);
  618. uint32_t value;
  619. memcpy(&value, packet, sizeof(uint32_t));
  620. cmd = parity(value);
  621. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  622. }
  623. return jtag_execute_queue();
  624. }
  625. static int xscale_invalidate_ic_line(target_t *target, uint32_t va)
  626. {
  627. uint8_t packet[4];
  628. uint8_t cmd;
  629. scan_field_t fields[2];
  630. jtag_set_end_state(TAP_IDLE);
  631. xscale_jtag_set_instr(target->tap, XSCALE_LDIC);
  632. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  633. buf_set_u32(&cmd, 0, 6, 0x0);
  634. /* virtual address of desired cache line */
  635. buf_set_u32(packet, 0, 27, va >> 5);
  636. memset(&fields, 0, sizeof fields);
  637. fields[0].tap = target->tap;
  638. fields[0].num_bits = 6;
  639. fields[0].out_value = &cmd;
  640. fields[1].tap = target->tap;
  641. fields[1].num_bits = 27;
  642. fields[1].out_value = packet;
  643. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  644. return ERROR_OK;
  645. }
  646. static int xscale_update_vectors(target_t *target)
  647. {
  648. armv4_5_common_t *armv4_5 = target->arch_info;
  649. xscale_common_t *xscale = armv4_5->arch_info;
  650. int i;
  651. int retval;
  652. uint32_t low_reset_branch, high_reset_branch;
  653. for (i = 1; i < 8; i++)
  654. {
  655. /* if there's a static vector specified for this exception, override */
  656. if (xscale->static_high_vectors_set & (1 << i))
  657. {
  658. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  659. }
  660. else
  661. {
  662. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  663. if (retval == ERROR_TARGET_TIMEOUT)
  664. return retval;
  665. if (retval != ERROR_OK)
  666. {
  667. /* Some of these reads will fail as part of normal execution */
  668. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  669. }
  670. }
  671. }
  672. for (i = 1; i < 8; i++)
  673. {
  674. if (xscale->static_low_vectors_set & (1 << i))
  675. {
  676. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  677. }
  678. else
  679. {
  680. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  681. if (retval == ERROR_TARGET_TIMEOUT)
  682. return retval;
  683. if (retval != ERROR_OK)
  684. {
  685. /* Some of these reads will fail as part of normal execution */
  686. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  687. }
  688. }
  689. }
  690. /* calculate branches to debug handler */
  691. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  692. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  693. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  694. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  695. /* invalidate and load exception vectors in mini i-cache */
  696. xscale_invalidate_ic_line(target, 0x0);
  697. xscale_invalidate_ic_line(target, 0xffff0000);
  698. xscale_load_ic(target, 0x0, xscale->low_vectors);
  699. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  700. return ERROR_OK;
  701. }
  702. static int xscale_arch_state(struct target_s *target)
  703. {
  704. armv4_5_common_t *armv4_5 = target->arch_info;
  705. xscale_common_t *xscale = armv4_5->arch_info;
  706. static const char *state[] =
  707. {
  708. "disabled", "enabled"
  709. };
  710. static const char *arch_dbg_reason[] =
  711. {
  712. "", "\n(processor reset)", "\n(trace buffer full)"
  713. };
  714. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  715. {
  716. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  717. exit(-1);
  718. }
  719. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  720. "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
  721. "MMU: %s, D-Cache: %s, I-Cache: %s"
  722. "%s",
  723. armv4_5_state_strings[armv4_5->core_state],
  724. Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
  725. armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
  726. buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
  727. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
  728. state[xscale->armv4_5_mmu.mmu_enabled],
  729. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  730. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  731. arch_dbg_reason[xscale->arch_debug_reason]);
  732. return ERROR_OK;
  733. }
  734. static int xscale_poll(target_t *target)
  735. {
  736. int retval = ERROR_OK;
  737. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  738. {
  739. enum target_state previous_state = target->state;
  740. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  741. {
  742. /* there's data to read from the tx register, we entered debug state */
  743. target->state = TARGET_HALTED;
  744. /* process debug entry, fetching current mode regs */
  745. retval = xscale_debug_entry(target);
  746. }
  747. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  748. {
  749. LOG_USER("error while polling TX register, reset CPU");
  750. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  751. target->state = TARGET_HALTED;
  752. }
  753. /* debug_entry could have overwritten target state (i.e. immediate resume)
  754. * don't signal event handlers in that case
  755. */
  756. if (target->state != TARGET_HALTED)
  757. return ERROR_OK;
  758. /* if target was running, signal that we halted
  759. * otherwise we reentered from debug execution */
  760. if (previous_state == TARGET_RUNNING)
  761. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  762. else
  763. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  764. }
  765. return retval;
  766. }
  767. static int xscale_debug_entry(target_t *target)
  768. {
  769. armv4_5_common_t *armv4_5 = target->arch_info;
  770. xscale_common_t *xscale = armv4_5->arch_info;
  771. uint32_t pc;
  772. uint32_t buffer[10];
  773. int i;
  774. int retval;
  775. uint32_t moe;
  776. /* clear external dbg break (will be written on next DCSR read) */
  777. xscale->external_debug_break = 0;
  778. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  779. return retval;
  780. /* get r0, pc, r1 to r7 and cpsr */
  781. if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
  782. return retval;
  783. /* move r0 from buffer to register cache */
  784. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  785. armv4_5->core_cache->reg_list[0].dirty = 1;
  786. armv4_5->core_cache->reg_list[0].valid = 1;
  787. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  788. /* move pc from buffer to register cache */
  789. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  790. armv4_5->core_cache->reg_list[15].dirty = 1;
  791. armv4_5->core_cache->reg_list[15].valid = 1;
  792. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  793. /* move data from buffer to register cache */
  794. for (i = 1; i <= 7; i++)
  795. {
  796. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  797. armv4_5->core_cache->reg_list[i].dirty = 1;
  798. armv4_5->core_cache->reg_list[i].valid = 1;
  799. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  800. }
  801. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
  802. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  803. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  804. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  805. armv4_5->core_mode = buffer[9] & 0x1f;
  806. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  807. {
  808. target->state = TARGET_UNKNOWN;
  809. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  810. return ERROR_TARGET_FAILURE;
  811. }
  812. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  813. if (buffer[9] & 0x20)
  814. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  815. else
  816. armv4_5->core_state = ARMV4_5_STATE_ARM;
  817. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  818. return ERROR_FAIL;
  819. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  820. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  821. {
  822. xscale_receive(target, buffer, 8);
  823. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  824. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  825. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  826. }
  827. else
  828. {
  829. /* r8 to r14, but no spsr */
  830. xscale_receive(target, buffer, 7);
  831. }
  832. /* move data from buffer to register cache */
  833. for (i = 8; i <= 14; i++)
  834. {
  835. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
  836. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  837. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  838. }
  839. /* examine debug reason */
  840. xscale_read_dcsr(target);
  841. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  842. /* stored PC (for calculating fixup) */
  843. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  844. switch (moe)
  845. {
  846. case 0x0: /* Processor reset */
  847. target->debug_reason = DBG_REASON_DBGRQ;
  848. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  849. pc -= 4;
  850. break;
  851. case 0x1: /* Instruction breakpoint hit */
  852. target->debug_reason = DBG_REASON_BREAKPOINT;
  853. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  854. pc -= 4;
  855. break;
  856. case 0x2: /* Data breakpoint hit */
  857. target->debug_reason = DBG_REASON_WATCHPOINT;
  858. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  859. pc -= 4;
  860. break;
  861. case 0x3: /* BKPT instruction executed */
  862. target->debug_reason = DBG_REASON_BREAKPOINT;
  863. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  864. pc -= 4;
  865. break;
  866. case 0x4: /* Ext. debug event */
  867. target->debug_reason = DBG_REASON_DBGRQ;
  868. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  869. pc -= 4;
  870. break;
  871. case 0x5: /* Vector trap occured */
  872. target->debug_reason = DBG_REASON_BREAKPOINT;
  873. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  874. pc -= 4;
  875. break;
  876. case 0x6: /* Trace buffer full break */
  877. target->debug_reason = DBG_REASON_DBGRQ;
  878. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  879. pc -= 4;
  880. break;
  881. case 0x7: /* Reserved (may flag Hot-Debug support) */
  882. default:
  883. LOG_ERROR("Method of Entry is 'Reserved'");
  884. exit(-1);
  885. break;
  886. }
  887. /* apply PC fixup */
  888. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  889. /* on the first debug entry, identify cache type */
  890. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  891. {
  892. uint32_t cache_type_reg;
  893. /* read cp15 cache type register */
  894. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  895. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  896. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  897. }
  898. /* examine MMU and Cache settings */
  899. /* read cp15 control register */
  900. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  901. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  902. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  903. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  904. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  905. /* tracing enabled, read collected trace data */
  906. if (xscale->trace.buffer_enabled)
  907. {
  908. xscale_read_trace(target);
  909. xscale->trace.buffer_fill--;
  910. /* resume if we're still collecting trace data */
  911. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  912. && (xscale->trace.buffer_fill > 0))
  913. {
  914. xscale_resume(target, 1, 0x0, 1, 0);
  915. }
  916. else
  917. {
  918. xscale->trace.buffer_enabled = 0;
  919. }
  920. }
  921. return ERROR_OK;
  922. }
  923. static int xscale_halt(target_t *target)
  924. {
  925. armv4_5_common_t *armv4_5 = target->arch_info;
  926. xscale_common_t *xscale = armv4_5->arch_info;
  927. LOG_DEBUG("target->state: %s",
  928. target_state_name(target));
  929. if (target->state == TARGET_HALTED)
  930. {
  931. LOG_DEBUG("target was already halted");
  932. return ERROR_OK;
  933. }
  934. else if (target->state == TARGET_UNKNOWN)
  935. {
  936. /* this must not happen for a xscale target */
  937. LOG_ERROR("target was in unknown state when halt was requested");
  938. return ERROR_TARGET_INVALID;
  939. }
  940. else if (target->state == TARGET_RESET)
  941. {
  942. LOG_DEBUG("target->state == TARGET_RESET");
  943. }
  944. else
  945. {
  946. /* assert external dbg break */
  947. xscale->external_debug_break = 1;
  948. xscale_read_dcsr(target);
  949. target->debug_reason = DBG_REASON_DBGRQ;
  950. }
  951. return ERROR_OK;
  952. }
  953. static int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
  954. {
  955. armv4_5_common_t *armv4_5 = target->arch_info;
  956. xscale_common_t *xscale= armv4_5->arch_info;
  957. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  958. int retval;
  959. if (xscale->ibcr0_used)
  960. {
  961. breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  962. if (ibcr0_bp)
  963. {
  964. xscale_unset_breakpoint(target, ibcr0_bp);
  965. }
  966. else
  967. {
  968. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  969. exit(-1);
  970. }
  971. }
  972. if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
  973. return retval;
  974. return ERROR_OK;
  975. }
  976. static int xscale_disable_single_step(struct target_s *target)
  977. {
  978. armv4_5_common_t *armv4_5 = target->arch_info;
  979. xscale_common_t *xscale= armv4_5->arch_info;
  980. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  981. int retval;
  982. if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
  983. return retval;
  984. return ERROR_OK;
  985. }
  986. static void xscale_enable_watchpoints(struct target_s *target)
  987. {
  988. watchpoint_t *watchpoint = target->watchpoints;
  989. while (watchpoint)
  990. {
  991. if (watchpoint->set == 0)
  992. xscale_set_watchpoint(target, watchpoint);
  993. watchpoint = watchpoint->next;
  994. }
  995. }
  996. static void xscale_enable_breakpoints(struct target_s *target)
  997. {
  998. breakpoint_t *breakpoint = target->breakpoints;
  999. /* set any pending breakpoints */
  1000. while (breakpoint)
  1001. {
  1002. if (breakpoint->set == 0)
  1003. xscale_set_breakpoint(target, breakpoint);
  1004. breakpoint = breakpoint->next;
  1005. }
  1006. }
  1007. static int xscale_resume(struct target_s *target, int current,
  1008. uint32_t address, int handle_breakpoints, int debug_execution)
  1009. {
  1010. armv4_5_common_t *armv4_5 = target->arch_info;
  1011. xscale_common_t *xscale= armv4_5->arch_info;
  1012. breakpoint_t *breakpoint = target->breakpoints;
  1013. uint32_t current_pc;
  1014. int retval;
  1015. int i;
  1016. LOG_DEBUG("-");
  1017. if (target->state != TARGET_HALTED)
  1018. {
  1019. LOG_WARNING("target not halted");
  1020. return ERROR_TARGET_NOT_HALTED;
  1021. }
  1022. if (!debug_execution)
  1023. {
  1024. target_free_all_working_areas(target);
  1025. }
  1026. /* update vector tables */
  1027. if ((retval = xscale_update_vectors(target)) != ERROR_OK)
  1028. return retval;
  1029. /* current = 1: continue on current pc, otherwise continue at <address> */
  1030. if (!current)
  1031. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1032. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1033. /* if we're at the reset vector, we have to simulate the branch */
  1034. if (current_pc == 0x0)
  1035. {
  1036. arm_simulate_step(target, NULL);
  1037. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1038. }
  1039. /* the front-end may request us not to handle breakpoints */
  1040. if (handle_breakpoints)
  1041. {
  1042. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1043. {
  1044. uint32_t next_pc;
  1045. /* there's a breakpoint at the current PC, we have to step over it */
  1046. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1047. xscale_unset_breakpoint(target, breakpoint);
  1048. /* calculate PC of next instruction */
  1049. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1050. {
  1051. uint32_t current_opcode;
  1052. target_read_u32(target, current_pc, &current_opcode);
  1053. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1054. }
  1055. LOG_DEBUG("enable single-step");
  1056. xscale_enable_single_step(target, next_pc);
  1057. /* restore banked registers */
  1058. xscale_restore_context(target);
  1059. /* send resume request (command 0x30 or 0x31)
  1060. * clean the trace buffer if it is to be enabled (0x62) */
  1061. if (xscale->trace.buffer_enabled)
  1062. {
  1063. xscale_send_u32(target, 0x62);
  1064. xscale_send_u32(target, 0x31);
  1065. }
  1066. else
  1067. xscale_send_u32(target, 0x30);
  1068. /* send CPSR */
  1069. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1070. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1071. for (i = 7; i >= 0; i--)
  1072. {
  1073. /* send register */
  1074. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1075. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1076. }
  1077. /* send PC */
  1078. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1079. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1080. /* wait for and process debug entry */
  1081. xscale_debug_entry(target);
  1082. LOG_DEBUG("disable single-step");
  1083. xscale_disable_single_step(target);
  1084. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1085. xscale_set_breakpoint(target, breakpoint);
  1086. }
  1087. }
  1088. /* enable any pending breakpoints and watchpoints */
  1089. xscale_enable_breakpoints(target);
  1090. xscale_enable_watchpoints(target);
  1091. /* restore banked registers */
  1092. xscale_restore_context(target);
  1093. /* send resume request (command 0x30 or 0x31)
  1094. * clean the trace buffer if it is to be enabled (0x62) */
  1095. if (xscale->trace.buffer_enabled)
  1096. {
  1097. xscale_send_u32(target, 0x62);
  1098. xscale_send_u32(target, 0x31);
  1099. }
  1100. else
  1101. xscale_send_u32(target, 0x30);
  1102. /* send CPSR */
  1103. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1104. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1105. for (i = 7; i >= 0; i--)
  1106. {
  1107. /* send register */
  1108. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1109. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1110. }
  1111. /* send PC */
  1112. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1113. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1114. target->debug_reason = DBG_REASON_NOTHALTED;
  1115. if (!debug_execution)
  1116. {
  1117. /* registers are now invalid */
  1118. armv4_5_invalidate_core_regs(target);
  1119. target->state = TARGET_RUNNING;
  1120. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1121. }
  1122. else
  1123. {
  1124. target->state = TARGET_DEBUG_RUNNING;
  1125. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1126. }
  1127. LOG_DEBUG("target resumed");
  1128. return ERROR_OK;
  1129. }
  1130. static int xscale_step_inner(struct target_s *target, int current,
  1131. uint32_t address, int handle_breakpoints)
  1132. {
  1133. armv4_5_common_t *armv4_5 = target->arch_info;
  1134. xscale_common_t *xscale = armv4_5->arch_info;
  1135. uint32_t next_pc;
  1136. int retval;
  1137. int i;
  1138. target->debug_reason = DBG_REASON_SINGLESTEP;
  1139. /* calculate PC of next instruction */
  1140. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1141. {
  1142. uint32_t current_opcode, current_pc;
  1143. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1144. target_read_u32(target, current_pc, &current_opcode);
  1145. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1146. return retval;
  1147. }
  1148. LOG_DEBUG("enable single-step");
  1149. if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
  1150. return retval;
  1151. /* restore banked registers */
  1152. if ((retval = xscale_restore_context(target)) != ERROR_OK)
  1153. return retval;
  1154. /* send resume request (command 0x30 or 0x31)
  1155. * clean the trace buffer if it is to be enabled (0x62) */
  1156. if (xscale->trace.buffer_enabled)
  1157. {
  1158. if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
  1159. return retval;
  1160. if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
  1161. return retval;
  1162. }
  1163. else
  1164. if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
  1165. return retval;
  1166. /* send CPSR */
  1167. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
  1168. return retval;
  1169. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1170. for (i = 7; i >= 0; i--)
  1171. {
  1172. /* send register */
  1173. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
  1174. return retval;
  1175. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1176. }
  1177. /* send PC */
  1178. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
  1179. return retval;
  1180. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1181. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1182. /* registers are now invalid */
  1183. if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
  1184. return retval;
  1185. /* wait for and process debug entry */
  1186. if ((retval = xscale_debug_entry(target)) != ERROR_OK)
  1187. return retval;
  1188. LOG_DEBUG("disable single-step");
  1189. if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
  1190. return retval;
  1191. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1192. return ERROR_OK;
  1193. }
  1194. static int xscale_step(struct target_s *target, int current,
  1195. uint32_t address, int handle_breakpoints)
  1196. {
  1197. armv4_5_common_t *armv4_5 = target->arch_info;
  1198. breakpoint_t *breakpoint = target->breakpoints;
  1199. uint32_t current_pc;
  1200. int retval;
  1201. if (target->state != TARGET_HALTED)
  1202. {
  1203. LOG_WARNING("target not halted");
  1204. return ERROR_TARGET_NOT_HALTED;
  1205. }
  1206. /* current = 1: continue on current pc, otherwise continue at <address> */
  1207. if (!current)
  1208. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1209. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1210. /* if we're at the reset vector, we have to simulate the step */
  1211. if (current_pc == 0x0)
  1212. {
  1213. if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
  1214. return retval;
  1215. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1216. target->debug_reason = DBG_REASON_SINGLESTEP;
  1217. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1218. return ERROR_OK;
  1219. }
  1220. /* the front-end may request us not to handle breakpoints */
  1221. if (handle_breakpoints)
  1222. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1223. {
  1224. if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1225. return retval;
  1226. }
  1227. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1228. if (breakpoint)
  1229. {
  1230. xscale_set_breakpoint(target, breakpoint);
  1231. }
  1232. LOG_DEBUG("target stepped");
  1233. return ERROR_OK;
  1234. }
  1235. static int xscale_assert_reset(target_t *target)
  1236. {
  1237. armv4_5_common_t *armv4_5 = target->arch_info;
  1238. xscale_common_t *xscale = armv4_5->arch_info;
  1239. LOG_DEBUG("target->state: %s",
  1240. target_state_name(target));
  1241. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1242. * end up in T-L-R, which would reset JTAG
  1243. */
  1244. jtag_set_end_state(TAP_IDLE);
  1245. xscale_jtag_set_instr(target->tap, XSCALE_SELDCSR);
  1246. /* set Hold reset, Halt mode and Trap Reset */
  1247. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1248. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1249. xscale_write_dcsr(target, 1, 0);
  1250. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1251. xscale_jtag_set_instr(target->tap, 0x7f);
  1252. jtag_execute_queue();
  1253. /* assert reset */
  1254. jtag_add_reset(0, 1);
  1255. /* sleep 1ms, to be sure we fulfill any requirements */
  1256. jtag_add_sleep(1000);
  1257. jtag_execute_queue();
  1258. target->state = TARGET_RESET;
  1259. if (target->reset_halt)
  1260. {
  1261. int retval;
  1262. if ((retval = target_halt(target)) != ERROR_OK)
  1263. return retval;
  1264. }
  1265. return ERROR_OK;
  1266. }
  1267. static int xscale_deassert_reset(target_t *target)
  1268. {
  1269. armv4_5_common_t *armv4_5 = target->arch_info;
  1270. xscale_common_t *xscale = armv4_5->arch_info;
  1271. breakpoint_t *breakpoint = target->breakpoints;
  1272. LOG_DEBUG("-");
  1273. xscale->ibcr_available = 2;
  1274. xscale->ibcr0_used = 0;
  1275. xscale->ibcr1_used = 0;
  1276. xscale->dbr_available = 2;
  1277. xscale->dbr0_used = 0;
  1278. xscale->dbr1_used = 0;
  1279. /* mark all hardware breakpoints as unset */
  1280. while (breakpoint)
  1281. {
  1282. if (breakpoint->type == BKPT_HARD)
  1283. {
  1284. breakpoint->set = 0;
  1285. }
  1286. breakpoint = breakpoint->next;
  1287. }
  1288. armv4_5_invalidate_core_regs(target);
  1289. /* FIXME mark hardware watchpoints got unset too. Also,
  1290. * at least some of the XScale registers are invalid...
  1291. */
  1292. /*
  1293. * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
  1294. * contents got invalidated. Safer to force that, so writing new
  1295. * contents can't ever fail..
  1296. */
  1297. {
  1298. uint32_t address;
  1299. unsigned buf_cnt;
  1300. const uint8_t *buffer = xscale_debug_handler;
  1301. int retval;
  1302. /* release SRST */
  1303. jtag_add_reset(0, 0);
  1304. /* wait 300ms; 150 and 100ms were not enough */
  1305. jtag_add_sleep(300*1000);
  1306. jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
  1307. jtag_execute_queue();
  1308. /* set Hold reset, Halt mode and Trap Reset */
  1309. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1310. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1311. xscale_write_dcsr(target, 1, 0);
  1312. /* Load the debug handler into the mini-icache. Since
  1313. * it's using halt mode (not monitor mode), it runs in
  1314. * "Special Debug State" for access to registers, memory,
  1315. * coprocessors, trace data, etc.
  1316. */
  1317. address = xscale->handler_address;
  1318. for (unsigned binary_size = sizeof xscale_debug_handler - 1;
  1319. binary_size > 0;
  1320. binary_size -= buf_cnt, buffer += buf_cnt)
  1321. {
  1322. uint32_t cache_line[8];
  1323. unsigned i;
  1324. buf_cnt = binary_size;
  1325. if (buf_cnt > 32)
  1326. buf_cnt = 32;
  1327. for (i = 0; i < buf_cnt; i += 4)
  1328. {
  1329. /* convert LE buffer to host-endian uint32_t */
  1330. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1331. }
  1332. for (; i < 32; i += 4)
  1333. {
  1334. cache_line[i / 4] = 0xe1a08008;
  1335. }
  1336. /* only load addresses other than the reset vectors */
  1337. if ((address % 0x400) != 0x0)
  1338. {
  1339. retval = xscale_load_ic(target, address,
  1340. cache_line);
  1341. if (retval != ERROR_OK)
  1342. return retval;
  1343. }
  1344. address += buf_cnt;
  1345. };
  1346. retval = xscale_load_ic(target, 0x0,
  1347. xscale->low_vectors);
  1348. if (retval != ERROR_OK)
  1349. return retval;
  1350. retval = xscale_load_ic(target, 0xffff0000,
  1351. xscale->high_vectors);
  1352. if (retval != ERROR_OK)
  1353. return retval;
  1354. jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
  1355. jtag_add_sleep(100000);
  1356. /* set Hold reset, Halt mode and Trap Reset */
  1357. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1358. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1359. xscale_write_dcsr(target, 1, 0);
  1360. /* clear Hold reset to let the target run (should enter debug handler) */
  1361. xscale_write_dcsr(target, 0, 1);
  1362. target->state = TARGET_RUNNING;
  1363. if (!target->reset_halt)
  1364. {
  1365. jtag_add_sleep(10000);
  1366. /* we should have entered debug now */
  1367. xscale_debug_entry(target);
  1368. target->state = TARGET_HALTED;
  1369. /* resume the target */
  1370. xscale_resume(target, 1, 0x0, 1, 0);
  1371. }
  1372. }
  1373. return ERROR_OK;
  1374. }
  1375. static int xscale_read_core_reg(struct target_s *target, int num,
  1376. enum armv4_5_mode mode)
  1377. {
  1378. LOG_ERROR("not implemented");
  1379. return ERROR_OK;
  1380. }
  1381. static int xscale_write_core_reg(struct target_s *target, int num,
  1382. enum armv4_5_mode mode, uint32_t value)
  1383. {
  1384. LOG_ERROR("not implemented");
  1385. return ERROR_OK;
  1386. }
  1387. static int xscale_full_context(target_t *target)
  1388. {
  1389. armv4_5_common_t *armv4_5 = target->arch_info;
  1390. uint32_t *buffer;
  1391. int i, j;
  1392. LOG_DEBUG("-");
  1393. if (target->state != TARGET_HALTED)
  1394. {
  1395. LOG_WARNING("target not halted");
  1396. return ERROR_TARGET_NOT_HALTED;
  1397. }
  1398. buffer = malloc(4 * 8);
  1399. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1400. * we can't enter User mode on an XScale (unpredictable),
  1401. * but User shares registers with SYS
  1402. */
  1403. for (i = 1; i < 7; i++)
  1404. {
  1405. int valid = 1;
  1406. /* check if there are invalid registers in the current mode
  1407. */
  1408. for (j = 0; j <= 16; j++)
  1409. {
  1410. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1411. valid = 0;
  1412. }
  1413. if (!valid)
  1414. {
  1415. uint32_t tmp_cpsr;
  1416. /* request banked registers */
  1417. xscale_send_u32(target, 0x0);
  1418. tmp_cpsr = 0x0;
  1419. tmp_cpsr |= armv4_5_number_to_mode(i);
  1420. tmp_cpsr |= 0xc0; /* I/F bits */
  1421. /* send CPSR for desired mode */
  1422. xscale_send_u32(target, tmp_cpsr);
  1423. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1424. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1425. {
  1426. xscale_receive(target, buffer, 8);
  1427. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  1428. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1429. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1430. }
  1431. else
  1432. {
  1433. xscale_receive(target, buffer, 7);
  1434. }
  1435. /* move data from buffer to register cache */
  1436. for (j = 8; j <= 14; j++)
  1437. {
  1438. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
  1439. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1440. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1441. }
  1442. }
  1443. }
  1444. free(buffer);
  1445. return ERROR_OK;
  1446. }
  1447. static int xscale_restore_context(target_t *target)
  1448. {
  1449. armv4_5_common_t *armv4_5 = target->arch_info;
  1450. int i, j;
  1451. if (target->state != TARGET_HALTED)
  1452. {
  1453. LOG_WARNING("target not halted");
  1454. return ERROR_TARGET_NOT_HALTED;
  1455. }
  1456. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1457. * we can't enter User mode on an XScale (unpredictable),
  1458. * but User shares registers with SYS
  1459. */
  1460. for (i = 1; i < 7; i++)
  1461. {
  1462. int dirty = 0;
  1463. /* check if there are invalid registers in the current mode
  1464. */
  1465. for (j = 8; j <= 14; j++)
  1466. {
  1467. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
  1468. dirty = 1;
  1469. }
  1470. /* if not USR/SYS, check if the SPSR needs to be written */
  1471. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1472. {
  1473. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
  1474. dirty = 1;
  1475. }
  1476. if (dirty)
  1477. {
  1478. uint32_t tmp_cpsr;
  1479. /* send banked registers */
  1480. xscale_send_u32(target, 0x1);
  1481. tmp_cpsr = 0x0;
  1482. tmp_cpsr |= armv4_5_number_to_mode(i);
  1483. tmp_cpsr |= 0xc0; /* I/F bits */
  1484. /* send CPSR for desired mode */
  1485. xscale_send_u32(target, tmp_cpsr);
  1486. /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1487. for (j = 8; j <= 14; j++)
  1488. {
  1489. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
  1490. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1491. }
  1492. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1493. {
  1494. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
  1495. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1496. }
  1497. }
  1498. }
  1499. return ERROR_OK;
  1500. }
  1501. static int xscale_read_memory(struct target_s *target, uint32_t address,
  1502. uint32_t size, uint32_t count, uint8_t *buffer)
  1503. {
  1504. armv4_5_common_t *armv4_5 = target->arch_info;
  1505. xscale_common_t *xscale = armv4_5->arch_info;
  1506. uint32_t *buf32;
  1507. uint32_t i;
  1508. int retval;
  1509. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1510. if (target->state != TARGET_HALTED)
  1511. {
  1512. LOG_WARNING("target not halted");
  1513. return ERROR_TARGET_NOT_HALTED;
  1514. }
  1515. /* sanitize arguments */
  1516. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1517. return ERROR_INVALID_ARGUMENTS;
  1518. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1519. return ERROR_TARGET_UNALIGNED_ACCESS;
  1520. /* send memory read request (command 0x1n, n: access size) */
  1521. if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
  1522. return retval;
  1523. /* send base address for read request */
  1524. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1525. return retval;
  1526. /* send number of requested data words */
  1527. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1528. return retval;
  1529. /* receive data from target (count times 32-bit words in host endianness) */
  1530. buf32 = malloc(4 * count);
  1531. if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
  1532. return retval;
  1533. /* extract data from host-endian buffer into byte stream */
  1534. for (i = 0; i < count; i++)
  1535. {
  1536. switch (size)
  1537. {
  1538. case 4:
  1539. target_buffer_set_u32(target, buffer, buf32[i]);
  1540. buffer += 4;
  1541. break;
  1542. case 2:
  1543. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1544. buffer += 2;
  1545. break;
  1546. case 1:
  1547. *buffer++ = buf32[i] & 0xff;
  1548. break;
  1549. default:
  1550. LOG_ERROR("should never get here");
  1551. exit(-1);
  1552. }
  1553. }
  1554. free(buf32);
  1555. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1556. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1557. return retval;
  1558. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1559. {
  1560. /* clear SA bit */
  1561. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1562. return retval;
  1563. return ERROR_TARGET_DATA_ABORT;
  1564. }
  1565. return ERROR_OK;
  1566. }
  1567. static int xscale_write_memory(struct target_s *target, uint32_t address,
  1568. uint32_t size, uint32_t count, uint8_t *buffer)
  1569. {
  1570. armv4_5_common_t *armv4_5 = target->arch_info;
  1571. xscale_common_t *xscale = armv4_5->arch_info;
  1572. int retval;
  1573. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1574. if (target->state != TARGET_HALTED)
  1575. {
  1576. LOG_WARNING("target not halted");
  1577. return ERROR_TARGET_NOT_HALTED;
  1578. }
  1579. /* sanitize arguments */
  1580. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1581. return ERROR_INVALID_ARGUMENTS;
  1582. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1583. return ERROR_TARGET_UNALIGNED_ACCESS;
  1584. /* send memory write request (command 0x2n, n: access size) */
  1585. if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
  1586. return retval;
  1587. /* send base address for read request */
  1588. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1589. return retval;
  1590. /* send number of requested data words to be written*/
  1591. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1592. return retval;
  1593. /* extract data from host-endian buffer into byte stream */
  1594. #if 0
  1595. for (i = 0; i < count; i++)
  1596. {
  1597. switch (size)
  1598. {
  1599. case 4:
  1600. value = target_buffer_get_u32(target, buffer);
  1601. xscale_send_u32(target, value);
  1602. buffer += 4;
  1603. break;
  1604. case 2:
  1605. value = target_buffer_get_u16(target, buffer);
  1606. xscale_send_u32(target, value);
  1607. buffer += 2;
  1608. break;
  1609. case 1:
  1610. value = *buffer;
  1611. xscale_send_u32(target, value);
  1612. buffer += 1;
  1613. break;
  1614. default:
  1615. LOG_ERROR("should never get here");
  1616. exit(-1);
  1617. }
  1618. }
  1619. #endif
  1620. if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
  1621. return retval;
  1622. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1623. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1624. return retval;
  1625. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1626. {
  1627. /* clear SA bit */
  1628. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1629. return retval;
  1630. return ERROR_TARGET_DATA_ABORT;
  1631. }
  1632. return ERROR_OK;
  1633. }
  1634. static int xscale_bulk_write_memory(target_t *target, uint32_t address,
  1635. uint32_t count, uint8_t *buffer)
  1636. {
  1637. return xscale_write_memory(target, address, 4, count, buffer);
  1638. }
  1639. static uint32_t xscale_get_ttb(target_t *target)
  1640. {
  1641. armv4_5_common_t *armv4_5 = target->arch_info;
  1642. xscale_common_t *xscale = armv4_5->arch_info;
  1643. uint32_t ttb;
  1644. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1645. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1646. return ttb;
  1647. }
  1648. static void xscale_disable_mmu_caches(target_t *target, int mmu,
  1649. int d_u_cache, int i_cache)
  1650. {
  1651. armv4_5_common_t *armv4_5 = target->arch_info;
  1652. xscale_common_t *xscale = armv4_5->arch_info;
  1653. uint32_t cp15_control;
  1654. /* read cp15 control register */
  1655. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1656. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1657. if (mmu)
  1658. cp15_control &= ~0x1U;
  1659. if (d_u_cache)
  1660. {
  1661. /* clean DCache */
  1662. xscale_send_u32(target, 0x50);
  1663. xscale_send_u32(target, xscale->cache_clean_address);
  1664. /* invalidate DCache */
  1665. xscale_send_u32(target, 0x51);
  1666. cp15_control &= ~0x4U;
  1667. }
  1668. if (i_cache)
  1669. {
  1670. /* invalidate ICache */
  1671. xscale_send_u32(target, 0x52);
  1672. cp15_control &= ~0x1000U;
  1673. }
  1674. /* write new cp15 control register */
  1675. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1676. /* execute cpwait to ensure outstanding operations complete */
  1677. xscale_send_u32(target, 0x53);
  1678. }
  1679. static void xscale_enable_mmu_caches(target_t *target, int mmu,
  1680. int d_u_cache, int i_cache)
  1681. {
  1682. armv4_5_common_t *armv4_5 = target->arch_info;
  1683. xscale_common_t *xscale = armv4_5->arch_info;
  1684. uint32_t cp15_control;
  1685. /* read cp15 control register */
  1686. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1687. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1688. if (mmu)
  1689. cp15_control |= 0x1U;
  1690. if (d_u_cache)
  1691. cp15_control |= 0x4U;
  1692. if (i_cache)
  1693. cp15_control |= 0x1000U;
  1694. /* write new cp15 control register */
  1695. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1696. /* execute cpwait to ensure outstanding operations complete */
  1697. xscale_send_u32(target, 0x53);
  1698. }
  1699. static int xscale_set_breakpoint(struct target_s *target,
  1700. breakpoint_t *breakpoint)
  1701. {
  1702. int retval;
  1703. armv4_5_common_t *armv4_5 = target->arch_info;
  1704. xscale_common_t *xscale = armv4_5->arch_info;
  1705. if (target->state != TARGET_HALTED)
  1706. {
  1707. LOG_WARNING("target not halted");
  1708. return ERROR_TARGET_NOT_HALTED;
  1709. }
  1710. if (breakpoint->set)
  1711. {
  1712. LOG_WARNING("breakpoint already set");
  1713. return ERROR_OK;
  1714. }
  1715. if (breakpoint->type == BKPT_HARD)
  1716. {
  1717. uint32_t value = breakpoint->address | 1;
  1718. if (!xscale->ibcr0_used)
  1719. {
  1720. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1721. xscale->ibcr0_used = 1;
  1722. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1723. }
  1724. else if (!xscale->ibcr1_used)
  1725. {
  1726. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1727. xscale->ibcr1_used = 1;
  1728. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1729. }
  1730. else
  1731. {
  1732. LOG_ERROR("BUG: no hardware comparator available");
  1733. return ERROR_OK;
  1734. }
  1735. }
  1736. else if (breakpoint->type == BKPT_SOFT)
  1737. {
  1738. if (breakpoint->length == 4)
  1739. {
  1740. /* keep the original instruction in target endianness */
  1741. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1742. {
  1743. return retval;
  1744. }
  1745. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1746. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1747. {
  1748. return retval;
  1749. }
  1750. }
  1751. else
  1752. {
  1753. /* keep the original instruction in target endianness */
  1754. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1755. {
  1756. return retval;
  1757. }
  1758. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1759. if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1760. {
  1761. return retval;
  1762. }
  1763. }
  1764. breakpoint->set = 1;
  1765. }
  1766. return ERROR_OK;
  1767. }
  1768. static int xscale_add_breakpoint(struct target_s *target,
  1769. breakpoint_t *breakpoint)
  1770. {
  1771. armv4_5_common_t *armv4_5 = target->arch_info;
  1772. xscale_common_t *xscale = armv4_5->arch_info;
  1773. if (target->state != TARGET_HALTED)
  1774. {
  1775. LOG_WARNING("target not halted");
  1776. return ERROR_TARGET_NOT_HALTED;
  1777. }
  1778. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1779. {
  1780. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1781. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1782. }
  1783. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1784. {
  1785. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1786. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1787. }
  1788. if (breakpoint->type == BKPT_HARD)
  1789. {
  1790. xscale->ibcr_available--;
  1791. }
  1792. return ERROR_OK;
  1793. }
  1794. static int xscale_unset_breakpoint(struct target_s *target,
  1795. breakpoint_t *breakpoint)
  1796. {
  1797. int retval;
  1798. armv4_5_common_t *armv4_5 = target->arch_info;
  1799. xscale_common_t *xscale = armv4_5->arch_info;
  1800. if (target->state != TARGET_HALTED)
  1801. {
  1802. LOG_WARNING("target not halted");
  1803. return ERROR_TARGET_NOT_HALTED;
  1804. }
  1805. if (!breakpoint->set)
  1806. {
  1807. LOG_WARNING("breakpoint not set");
  1808. return ERROR_OK;
  1809. }
  1810. if (breakpoint->type == BKPT_HARD)
  1811. {
  1812. if (breakpoint->set == 1)
  1813. {
  1814. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1815. xscale->ibcr0_used = 0;
  1816. }
  1817. else if (breakpoint->set == 2)
  1818. {
  1819. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1820. xscale->ibcr1_used = 0;
  1821. }
  1822. breakpoint->set = 0;
  1823. }
  1824. else
  1825. {
  1826. /* restore original instruction (kept in target endianness) */
  1827. if (breakpoint->length == 4)
  1828. {
  1829. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1830. {
  1831. return retval;
  1832. }
  1833. }
  1834. else
  1835. {
  1836. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1837. {
  1838. return retval;
  1839. }
  1840. }
  1841. breakpoint->set = 0;
  1842. }
  1843. return ERROR_OK;
  1844. }
  1845. static int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1846. {
  1847. armv4_5_common_t *armv4_5 = target->arch_info;
  1848. xscale_common_t *xscale = armv4_5->arch_info;
  1849. if (target->state != TARGET_HALTED)
  1850. {
  1851. LOG_WARNING("target not halted");
  1852. return ERROR_TARGET_NOT_HALTED;
  1853. }
  1854. if (breakpoint->set)
  1855. {
  1856. xscale_unset_breakpoint(target, breakpoint);
  1857. }
  1858. if (breakpoint->type == BKPT_HARD)
  1859. xscale->ibcr_available++;
  1860. return ERROR_OK;
  1861. }
  1862. static int xscale_set_watchpoint(struct target_s *target,
  1863. watchpoint_t *watchpoint)
  1864. {
  1865. armv4_5_common_t *armv4_5 = target->arch_info;
  1866. xscale_common_t *xscale = armv4_5->arch_info;
  1867. uint8_t enable = 0;
  1868. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1869. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1870. if (target->state != TARGET_HALTED)
  1871. {
  1872. LOG_WARNING("target not halted");
  1873. return ERROR_TARGET_NOT_HALTED;
  1874. }
  1875. xscale_get_reg(dbcon);
  1876. switch (watchpoint->rw)
  1877. {
  1878. case WPT_READ:
  1879. enable = 0x3;
  1880. break;
  1881. case WPT_ACCESS:
  1882. enable = 0x2;
  1883. break;
  1884. case WPT_WRITE:
  1885. enable = 0x1;
  1886. break;
  1887. default:
  1888. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1889. }
  1890. if (!xscale->dbr0_used)
  1891. {
  1892. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1893. dbcon_value |= enable;
  1894. xscale_set_reg_u32(dbcon, dbcon_value);
  1895. watchpoint->set = 1;
  1896. xscale->dbr0_used = 1;
  1897. }
  1898. else if (!xscale->dbr1_used)
  1899. {
  1900. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1901. dbcon_value |= enable << 2;
  1902. xscale_set_reg_u32(dbcon, dbcon_value);
  1903. watchpoint->set = 2;
  1904. xscale->dbr1_used = 1;
  1905. }
  1906. else
  1907. {
  1908. LOG_ERROR("BUG: no hardware comparator available");
  1909. return ERROR_OK;
  1910. }
  1911. return ERROR_OK;
  1912. }
  1913. static int xscale_add_watchpoint(struct target_s *target,
  1914. watchpoint_t *watchpoint)
  1915. {
  1916. armv4_5_common_t *armv4_5 = target->arch_info;
  1917. xscale_common_t *xscale = armv4_5->arch_info;
  1918. if (target->state != TARGET_HALTED)
  1919. {
  1920. LOG_WARNING("target not halted");
  1921. return ERROR_TARGET_NOT_HALTED;
  1922. }
  1923. if (xscale->dbr_available < 1)
  1924. {
  1925. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1926. }
  1927. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1928. {
  1929. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1930. }
  1931. xscale->dbr_available--;
  1932. return ERROR_OK;
  1933. }
  1934. static int xscale_unset_watchpoint(struct target_s *target,
  1935. watchpoint_t *watchpoint)
  1936. {
  1937. armv4_5_common_t *armv4_5 = target->arch_info;
  1938. xscale_common_t *xscale = armv4_5->arch_info;
  1939. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1940. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1941. if (target->state != TARGET_HALTED)
  1942. {
  1943. LOG_WARNING("target not halted");
  1944. return ERROR_TARGET_NOT_HALTED;
  1945. }
  1946. if (!watchpoint->set)
  1947. {
  1948. LOG_WARNING("breakpoint not set");
  1949. return ERROR_OK;
  1950. }
  1951. if (watchpoint->set == 1)
  1952. {
  1953. dbcon_value &= ~0x3;
  1954. xscale_set_reg_u32(dbcon, dbcon_value);
  1955. xscale->dbr0_used = 0;
  1956. }
  1957. else if (watchpoint->set == 2)
  1958. {
  1959. dbcon_value &= ~0xc;
  1960. xscale_set_reg_u32(dbcon, dbcon_value);
  1961. xscale->dbr1_used = 0;
  1962. }
  1963. watchpoint->set = 0;
  1964. return ERROR_OK;
  1965. }
  1966. static int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1967. {
  1968. armv4_5_common_t *armv4_5 = target->arch_info;
  1969. xscale_common_t *xscale = armv4_5->arch_info;
  1970. if (target->state != TARGET_HALTED)
  1971. {
  1972. LOG_WARNING("target not halted");
  1973. return ERROR_TARGET_NOT_HALTED;
  1974. }
  1975. if (watchpoint->set)
  1976. {
  1977. xscale_unset_watchpoint(target, watchpoint);
  1978. }
  1979. xscale->dbr_available++;
  1980. return ERROR_OK;
  1981. }
  1982. static int xscale_get_reg(reg_t *reg)
  1983. {
  1984. xscale_reg_t *arch_info = reg->arch_info;
  1985. target_t *target = arch_info->target;
  1986. armv4_5_common_t *armv4_5 = target->arch_info;
  1987. xscale_common_t *xscale = armv4_5->arch_info;
  1988. /* DCSR, TX and RX are accessible via JTAG */
  1989. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  1990. {
  1991. return xscale_read_dcsr(arch_info->target);
  1992. }
  1993. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  1994. {
  1995. /* 1 = consume register content */
  1996. return xscale_read_tx(arch_info->target, 1);
  1997. }
  1998. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  1999. {
  2000. /* can't read from RX register (host -> debug handler) */
  2001. return ERROR_OK;
  2002. }
  2003. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2004. {
  2005. /* can't (explicitly) read from TXRXCTRL register */
  2006. return ERROR_OK;
  2007. }
  2008. else /* Other DBG registers have to be transfered by the debug handler */
  2009. {
  2010. /* send CP read request (command 0x40) */
  2011. xscale_send_u32(target, 0x40);
  2012. /* send CP register number */
  2013. xscale_send_u32(target, arch_info->dbg_handler_number);
  2014. /* read register value */
  2015. xscale_read_tx(target, 1);
  2016. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2017. reg->dirty = 0;
  2018. reg->valid = 1;
  2019. }
  2020. return ERROR_OK;
  2021. }
  2022. static int xscale_set_reg(reg_t *reg, uint8_t* buf)
  2023. {
  2024. xscale_reg_t *arch_info = reg->arch_info;
  2025. target_t *target = arch_info->target;
  2026. armv4_5_common_t *armv4_5 = target->arch_info;
  2027. xscale_common_t *xscale = armv4_5->arch_info;
  2028. uint32_t value = buf_get_u32(buf, 0, 32);
  2029. /* DCSR, TX and RX are accessible via JTAG */
  2030. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2031. {
  2032. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2033. return xscale_write_dcsr(arch_info->target, -1, -1);
  2034. }
  2035. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2036. {
  2037. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2038. return xscale_write_rx(arch_info->target);
  2039. }
  2040. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2041. {
  2042. /* can't write to TX register (debug-handler -> host) */
  2043. return ERROR_OK;
  2044. }
  2045. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2046. {
  2047. /* can't (explicitly) write to TXRXCTRL register */
  2048. return ERROR_OK;
  2049. }
  2050. else /* Other DBG registers have to be transfered by the debug handler */
  2051. {
  2052. /* send CP write request (command 0x41) */
  2053. xscale_send_u32(target, 0x41);
  2054. /* send CP register number */
  2055. xscale_send_u32(target, arch_info->dbg_handler_number);
  2056. /* send CP register value */
  2057. xscale_send_u32(target, value);
  2058. buf_set_u32(reg->value, 0, 32, value);
  2059. }
  2060. return ERROR_OK;
  2061. }
  2062. static int xscale_write_dcsr_sw(target_t *target, uint32_t value)
  2063. {
  2064. /* get pointers to arch-specific information */
  2065. armv4_5_common_t *armv4_5 = target->arch_info;
  2066. xscale_common_t *xscale = armv4_5->arch_info;
  2067. reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2068. xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
  2069. /* send CP write request (command 0x41) */
  2070. xscale_send_u32(target, 0x41);
  2071. /* send CP register number */
  2072. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2073. /* send CP register value */
  2074. xscale_send_u32(target, value);
  2075. buf_set_u32(dcsr->value, 0, 32, value);
  2076. return ERROR_OK;
  2077. }
  2078. static int xscale_read_trace(target_t *target)
  2079. {
  2080. /* get pointers to arch-specific information */
  2081. armv4_5_common_t *armv4_5 = target->arch_info;
  2082. xscale_common_t *xscale = armv4_5->arch_info;
  2083. xscale_trace_data_t **trace_data_p;
  2084. /* 258 words from debug handler
  2085. * 256 trace buffer entries
  2086. * 2 checkpoint addresses
  2087. */
  2088. uint32_t trace_buffer[258];
  2089. int is_address[256];
  2090. int i, j;
  2091. if (target->state != TARGET_HALTED)
  2092. {
  2093. LOG_WARNING("target must be stopped to read trace data");
  2094. return ERROR_TARGET_NOT_HALTED;
  2095. }
  2096. /* send read trace buffer command (command 0x61) */
  2097. xscale_send_u32(target, 0x61);
  2098. /* receive trace buffer content */
  2099. xscale_receive(target, trace_buffer, 258);
  2100. /* parse buffer backwards to identify address entries */
  2101. for (i = 255; i >= 0; i--)
  2102. {
  2103. is_address[i] = 0;
  2104. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2105. ((trace_buffer[i] & 0xf0) == 0xd0))
  2106. {
  2107. if (i >= 3)
  2108. is_address[--i] = 1;
  2109. if (i >= 2)
  2110. is_address[--i] = 1;
  2111. if (i >= 1)
  2112. is_address[--i] = 1;
  2113. if (i >= 0)
  2114. is_address[--i] = 1;
  2115. }
  2116. }
  2117. /* search first non-zero entry */
  2118. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2119. ;
  2120. if (j == 256)
  2121. {
  2122. LOG_DEBUG("no trace data collected");
  2123. return ERROR_XSCALE_NO_TRACE_DATA;
  2124. }
  2125. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2126. ;
  2127. *trace_data_p = malloc(sizeof(xscale_trace_data_t));
  2128. (*trace_data_p)->next = NULL;
  2129. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2130. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2131. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2132. (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
  2133. (*trace_data_p)->depth = 256 - j;
  2134. for (i = j; i < 256; i++)
  2135. {
  2136. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2137. if (is_address[i])
  2138. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2139. else
  2140. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2141. }
  2142. return ERROR_OK;
  2143. }
  2144. static int xscale_read_instruction(target_t *target,
  2145. arm_instruction_t *instruction)
  2146. {
  2147. /* get pointers to arch-specific information */
  2148. armv4_5_common_t *armv4_5 = target->arch_info;
  2149. xscale_common_t *xscale = armv4_5->arch_info;
  2150. int i;
  2151. int section = -1;
  2152. uint32_t size_read;
  2153. uint32_t opcode;
  2154. int retval;
  2155. if (!xscale->trace.image)
  2156. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2157. /* search for the section the current instruction belongs to */
  2158. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2159. {
  2160. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2161. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2162. {
  2163. section = i;
  2164. break;
  2165. }
  2166. }
  2167. if (section == -1)
  2168. {
  2169. /* current instruction couldn't be found in the image */
  2170. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2171. }
  2172. if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
  2173. {
  2174. uint8_t buf[4];
  2175. if ((retval = image_read_section(xscale->trace.image, section,
  2176. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2177. 4, buf, &size_read)) != ERROR_OK)
  2178. {
  2179. LOG_ERROR("error while reading instruction: %i", retval);
  2180. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2181. }
  2182. opcode = target_buffer_get_u32(target, buf);
  2183. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2184. }
  2185. else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
  2186. {
  2187. uint8_t buf[2];
  2188. if ((retval = image_read_section(xscale->trace.image, section,
  2189. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2190. 2, buf, &size_read)) != ERROR_OK)
  2191. {
  2192. LOG_ERROR("error while reading instruction: %i", retval);
  2193. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2194. }
  2195. opcode = target_buffer_get_u16(target, buf);
  2196. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2197. }
  2198. else
  2199. {
  2200. LOG_ERROR("BUG: unknown core state encountered");
  2201. exit(-1);
  2202. }
  2203. return ERROR_OK;
  2204. }
  2205. static int xscale_branch_address(xscale_trace_data_t *trace_data,
  2206. int i, uint32_t *target)
  2207. {
  2208. /* if there are less than four entries prior to the indirect branch message
  2209. * we can't extract the address */
  2210. if (i < 4)
  2211. {
  2212. return -1;
  2213. }
  2214. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2215. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2216. return 0;
  2217. }
  2218. static int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
  2219. {
  2220. /* get pointers to arch-specific information */
  2221. armv4_5_common_t *armv4_5 = target->arch_info;
  2222. xscale_common_t *xscale = armv4_5->arch_info;
  2223. int next_pc_ok = 0;
  2224. uint32_t next_pc = 0x0;
  2225. xscale_trace_data_t *trace_data = xscale->trace.data;
  2226. int retval;
  2227. while (trace_data)
  2228. {
  2229. int i, chkpt;
  2230. int rollover;
  2231. int branch;
  2232. int exception;
  2233. xscale->trace.core_state = ARMV4_5_STATE_ARM;
  2234. chkpt = 0;
  2235. rollover = 0;
  2236. for (i = 0; i < trace_data->depth; i++)
  2237. {
  2238. next_pc_ok = 0;
  2239. branch = 0;
  2240. exception = 0;
  2241. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2242. continue;
  2243. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2244. {
  2245. case 0: /* Exceptions */
  2246. case 1:
  2247. case 2:
  2248. case 3:
  2249. case 4:
  2250. case 5:
  2251. case 6:
  2252. case 7:
  2253. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2254. next_pc_ok = 1;
  2255. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2256. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2257. break;
  2258. case 8: /* Direct Branch */
  2259. branch = 1;
  2260. break;
  2261. case 9: /* Indirect Branch */
  2262. branch = 1;
  2263. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2264. {
  2265. next_pc_ok = 1;
  2266. }
  2267. break;
  2268. case 13: /* Checkpointed Indirect Branch */
  2269. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2270. {
  2271. next_pc_ok = 1;
  2272. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2273. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2274. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2275. }
  2276. /* explicit fall-through */
  2277. case 12: /* Checkpointed Direct Branch */
  2278. branch = 1;
  2279. if (chkpt == 0)
  2280. {
  2281. next_pc_ok = 1;
  2282. next_pc = trace_data->chkpt0;
  2283. chkpt++;
  2284. }
  2285. else if (chkpt == 1)
  2286. {
  2287. next_pc_ok = 1;
  2288. next_pc = trace_data->chkpt0;
  2289. chkpt++;
  2290. }
  2291. else
  2292. {
  2293. LOG_WARNING("more than two checkpointed branches encountered");
  2294. }
  2295. break;
  2296. case 15: /* Roll-over */
  2297. rollover++;
  2298. continue;
  2299. default: /* Reserved */
  2300. command_print(cmd_ctx, "--- reserved trace message ---");
  2301. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2302. return ERROR_OK;
  2303. }
  2304. if (xscale->trace.pc_ok)
  2305. {
  2306. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2307. arm_instruction_t instruction;
  2308. if ((exception == 6) || (exception == 7))
  2309. {
  2310. /* IRQ or FIQ exception, no instruction executed */
  2311. executed -= 1;
  2312. }
  2313. while (executed-- >= 0)
  2314. {
  2315. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2316. {
  2317. /* can't continue tracing with no image available */
  2318. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2319. {
  2320. return retval;
  2321. }
  2322. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2323. {
  2324. /* TODO: handle incomplete images */
  2325. }
  2326. }
  2327. /* a precise abort on a load to the PC is included in the incremental
  2328. * word count, other instructions causing data aborts are not included
  2329. */
  2330. if ((executed == 0) && (exception == 4)
  2331. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2332. {
  2333. if ((instruction.type == ARM_LDM)
  2334. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2335. {
  2336. executed--;
  2337. }
  2338. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2339. && (instruction.info.load_store.Rd != 15))
  2340. {
  2341. executed--;
  2342. }
  2343. }
  2344. /* only the last instruction executed
  2345. * (the one that caused the control flow change)
  2346. * could be a taken branch
  2347. */
  2348. if (((executed == -1) && (branch == 1)) &&
  2349. (((instruction.type == ARM_B) ||
  2350. (instruction.type == ARM_BL) ||
  2351. (instruction.type == ARM_BLX)) &&
  2352. (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
  2353. {
  2354. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2355. }
  2356. else
  2357. {
  2358. xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
  2359. }
  2360. command_print(cmd_ctx, "%s", instruction.text);
  2361. }
  2362. rollover = 0;
  2363. }
  2364. if (next_pc_ok)
  2365. {
  2366. xscale->trace.current_pc = next_pc;
  2367. xscale->trace.pc_ok = 1;
  2368. }
  2369. }
  2370. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
  2371. {
  2372. arm_instruction_t instruction;
  2373. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2374. {
  2375. /* can't continue tracing with no image available */
  2376. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2377. {
  2378. return retval;
  2379. }
  2380. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2381. {
  2382. /* TODO: handle incomplete images */
  2383. }
  2384. }
  2385. command_print(cmd_ctx, "%s", instruction.text);
  2386. }
  2387. trace_data = trace_data->next;
  2388. }
  2389. return ERROR_OK;
  2390. }
  2391. static void xscale_build_reg_cache(target_t *target)
  2392. {
  2393. /* get pointers to arch-specific information */
  2394. armv4_5_common_t *armv4_5 = target->arch_info;
  2395. xscale_common_t *xscale = armv4_5->arch_info;
  2396. reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
  2397. xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2398. int i;
  2399. int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
  2400. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  2401. armv4_5->core_cache = (*cache_p);
  2402. /* register a register arch-type for XScale dbg registers only once */
  2403. if (xscale_reg_arch_type == -1)
  2404. xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
  2405. (*cache_p)->next = malloc(sizeof(reg_cache_t));
  2406. cache_p = &(*cache_p)->next;
  2407. /* fill in values for the xscale reg cache */
  2408. (*cache_p)->name = "XScale registers";
  2409. (*cache_p)->next = NULL;
  2410. (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
  2411. (*cache_p)->num_regs = num_regs;
  2412. for (i = 0; i < num_regs; i++)
  2413. {
  2414. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2415. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2416. (*cache_p)->reg_list[i].dirty = 0;
  2417. (*cache_p)->reg_list[i].valid = 0;
  2418. (*cache_p)->reg_list[i].size = 32;
  2419. (*cache_p)->reg_list[i].bitfield_desc = NULL;
  2420. (*cache_p)->reg_list[i].num_bitfields = 0;
  2421. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2422. (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
  2423. arch_info[i] = xscale_reg_arch_info[i];
  2424. arch_info[i].target = target;
  2425. }
  2426. xscale->reg_cache = (*cache_p);
  2427. }
  2428. static int xscale_init_target(struct command_context_s *cmd_ctx,
  2429. struct target_s *target)
  2430. {
  2431. xscale_build_reg_cache(target);
  2432. return ERROR_OK;
  2433. }
  2434. static int xscale_quit(void)
  2435. {
  2436. jtag_add_runtest(100, TAP_RESET);
  2437. return ERROR_OK;
  2438. }
  2439. static int xscale_init_arch_info(target_t *target,
  2440. xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
  2441. {
  2442. armv4_5_common_t *armv4_5;
  2443. uint32_t high_reset_branch, low_reset_branch;
  2444. int i;
  2445. armv4_5 = &xscale->armv4_5_common;
  2446. /* store architecture specfic data (none so far) */
  2447. xscale->arch_info = NULL;
  2448. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2449. /* we don't really *need* variant info ... */
  2450. if (variant) {
  2451. int ir_length = 0;
  2452. if (strcmp(variant, "pxa250") == 0
  2453. || strcmp(variant, "pxa255") == 0
  2454. || strcmp(variant, "pxa26x") == 0)
  2455. ir_length = 5;
  2456. else if (strcmp(variant, "pxa27x") == 0
  2457. || strcmp(variant, "ixp42x") == 0
  2458. || strcmp(variant, "ixp45x") == 0
  2459. || strcmp(variant, "ixp46x") == 0)
  2460. ir_length = 7;
  2461. else
  2462. LOG_WARNING("%s: unrecognized variant %s",
  2463. tap->dotted_name, variant);
  2464. if (ir_length && ir_length != tap->ir_length) {
  2465. LOG_WARNING("%s: IR length for %s is %d; fixing",
  2466. tap->dotted_name, variant, ir_length);
  2467. tap->ir_length = ir_length;
  2468. }
  2469. }
  2470. /* the debug handler isn't installed (and thus not running) at this time */
  2471. xscale->handler_address = 0xfe000800;
  2472. /* clear the vectors we keep locally for reference */
  2473. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2474. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2475. /* no user-specified vectors have been configured yet */
  2476. xscale->static_low_vectors_set = 0x0;
  2477. xscale->static_high_vectors_set = 0x0;
  2478. /* calculate branches to debug handler */
  2479. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2480. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2481. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2482. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2483. for (i = 1; i <= 7; i++)
  2484. {
  2485. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2486. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2487. }
  2488. /* 64kB aligned region used for DCache cleaning */
  2489. xscale->cache_clean_address = 0xfffe0000;
  2490. xscale->hold_rst = 0;
  2491. xscale->external_debug_break = 0;
  2492. xscale->ibcr_available = 2;
  2493. xscale->ibcr0_used = 0;
  2494. xscale->ibcr1_used = 0;
  2495. xscale->dbr_available = 2;
  2496. xscale->dbr0_used = 0;
  2497. xscale->dbr1_used = 0;
  2498. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2499. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2500. xscale->vector_catch = 0x1;
  2501. xscale->trace.capture_status = TRACE_IDLE;
  2502. xscale->trace.data = NULL;
  2503. xscale->trace.image = NULL;
  2504. xscale->trace.buffer_enabled = 0;
  2505. xscale->trace.buffer_fill = 0;
  2506. /* prepare ARMv4/5 specific information */
  2507. armv4_5->arch_info = xscale;
  2508. armv4_5->read_core_reg = xscale_read_core_reg;
  2509. armv4_5->write_core_reg = xscale_write_core_reg;
  2510. armv4_5->full_context = xscale_full_context;
  2511. armv4_5_init_arch_info(target, armv4_5);
  2512. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2513. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2514. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2515. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2516. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2517. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2518. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2519. xscale->armv4_5_mmu.mmu_enabled = 0;
  2520. return ERROR_OK;
  2521. }
  2522. static int xscale_target_create(struct target_s *target, Jim_Interp *interp)
  2523. {
  2524. xscale_common_t *xscale;
  2525. if (sizeof xscale_debug_handler - 1 > 0x800) {
  2526. LOG_ERROR("debug_handler.bin: larger than 2kb");
  2527. return ERROR_FAIL;
  2528. }
  2529. xscale = calloc(1, sizeof(*xscale));
  2530. if (!xscale)
  2531. return ERROR_FAIL;
  2532. return xscale_init_arch_info(target, xscale, target->tap,
  2533. target->variant);
  2534. }
  2535. static int
  2536. xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx,
  2537. char *cmd, char **args, int argc)
  2538. {
  2539. target_t *target = NULL;
  2540. armv4_5_common_t *armv4_5;
  2541. xscale_common_t *xscale;
  2542. uint32_t handler_address;
  2543. if (argc < 2)
  2544. {
  2545. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2546. return ERROR_OK;
  2547. }
  2548. if ((target = get_target(args[0])) == NULL)
  2549. {
  2550. LOG_ERROR("target '%s' not defined", args[0]);
  2551. return ERROR_FAIL;
  2552. }
  2553. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2554. {
  2555. return ERROR_FAIL;
  2556. }
  2557. handler_address = strtoul(args[1], NULL, 0);
  2558. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2559. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2560. {
  2561. xscale->handler_address = handler_address;
  2562. }
  2563. else
  2564. {
  2565. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2566. return ERROR_FAIL;
  2567. }
  2568. return ERROR_OK;
  2569. }
  2570. static int
  2571. xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx,
  2572. char *cmd, char **args, int argc)
  2573. {
  2574. target_t *target = NULL;
  2575. armv4_5_common_t *armv4_5;
  2576. xscale_common_t *xscale;
  2577. uint32_t cache_clean_address;
  2578. if (argc < 2)
  2579. {
  2580. return ERROR_COMMAND_SYNTAX_ERROR;
  2581. }
  2582. target = get_target(args[0]);
  2583. if (target == NULL)
  2584. {
  2585. LOG_ERROR("target '%s' not defined", args[0]);
  2586. return ERROR_FAIL;
  2587. }
  2588. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2589. {
  2590. return ERROR_FAIL;
  2591. }
  2592. cache_clean_address = strtoul(args[1], NULL, 0);
  2593. if (cache_clean_address & 0xffff)
  2594. {
  2595. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2596. }
  2597. else
  2598. {
  2599. xscale->cache_clean_address = cache_clean_address;
  2600. }
  2601. return ERROR_OK;
  2602. }
  2603. static int
  2604. xscale_handle_cache_info_command(struct command_context_s *cmd_ctx,
  2605. char *cmd, char **args, int argc)
  2606. {
  2607. target_t *target = get_current_target(cmd_ctx);
  2608. armv4_5_common_t *armv4_5;
  2609. xscale_common_t *xscale;
  2610. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2611. {
  2612. return ERROR_OK;
  2613. }
  2614. return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
  2615. }
  2616. static int xscale_virt2phys(struct target_s *target,
  2617. uint32_t virtual, uint32_t *physical)
  2618. {
  2619. armv4_5_common_t *armv4_5;
  2620. xscale_common_t *xscale;
  2621. int retval;
  2622. int type;
  2623. uint32_t cb;
  2624. int domain;
  2625. uint32_t ap;
  2626. if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
  2627. {
  2628. return retval;
  2629. }
  2630. uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2631. if (type == -1)
  2632. {
  2633. return ret;
  2634. }
  2635. *physical = ret;
  2636. return ERROR_OK;
  2637. }
  2638. static int xscale_mmu(struct target_s *target, int *enabled)
  2639. {
  2640. armv4_5_common_t *armv4_5 = target->arch_info;
  2641. xscale_common_t *xscale = armv4_5->arch_info;
  2642. if (target->state != TARGET_HALTED)
  2643. {
  2644. LOG_ERROR("Target not halted");
  2645. return ERROR_TARGET_INVALID;
  2646. }
  2647. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2648. return ERROR_OK;
  2649. }
  2650. static int xscale_handle_mmu_command(command_context_t *cmd_ctx,
  2651. char *cmd, char **args, int argc)
  2652. {
  2653. target_t *target = get_current_target(cmd_ctx);
  2654. armv4_5_common_t *armv4_5;
  2655. xscale_common_t *xscale;
  2656. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2657. {
  2658. return ERROR_OK;
  2659. }
  2660. if (target->state != TARGET_HALTED)
  2661. {
  2662. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2663. return ERROR_OK;
  2664. }
  2665. if (argc >= 1)
  2666. {
  2667. if (strcmp("enable", args[0]) == 0)
  2668. {
  2669. xscale_enable_mmu_caches(target, 1, 0, 0);
  2670. xscale->armv4_5_mmu.mmu_enabled = 1;
  2671. }
  2672. else if (strcmp("disable", args[0]) == 0)
  2673. {
  2674. xscale_disable_mmu_caches(target, 1, 0, 0);
  2675. xscale->armv4_5_mmu.mmu_enabled = 0;
  2676. }
  2677. }
  2678. command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2679. return ERROR_OK;
  2680. }
  2681. static int xscale_handle_idcache_command(command_context_t *cmd_ctx,
  2682. char *cmd, char **args, int argc)
  2683. {
  2684. target_t *target = get_current_target(cmd_ctx);
  2685. armv4_5_common_t *armv4_5;
  2686. xscale_common_t *xscale;
  2687. int icache = 0, dcache = 0;
  2688. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2689. {
  2690. return ERROR_OK;
  2691. }
  2692. if (target->state != TARGET_HALTED)
  2693. {
  2694. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2695. return ERROR_OK;
  2696. }
  2697. if (strcmp(cmd, "icache") == 0)
  2698. icache = 1;
  2699. else if (strcmp(cmd, "dcache") == 0)
  2700. dcache = 1;
  2701. if (argc >= 1)
  2702. {
  2703. if (strcmp("enable", args[0]) == 0)
  2704. {
  2705. xscale_enable_mmu_caches(target, 0, dcache, icache);
  2706. if (icache)
  2707. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
  2708. else if (dcache)
  2709. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
  2710. }
  2711. else if (strcmp("disable", args[0]) == 0)
  2712. {
  2713. xscale_disable_mmu_caches(target, 0, dcache, icache);
  2714. if (icache)
  2715. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
  2716. else if (dcache)
  2717. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
  2718. }
  2719. }
  2720. if (icache)
  2721. command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
  2722. if (dcache)
  2723. command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
  2724. return ERROR_OK;
  2725. }
  2726. static int xscale_handle_vector_catch_command(command_context_t *cmd_ctx,
  2727. char *cmd, char **args, int argc)
  2728. {
  2729. target_t *target = get_current_target(cmd_ctx);
  2730. armv4_5_common_t *armv4_5;
  2731. xscale_common_t *xscale;
  2732. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2733. {
  2734. return ERROR_OK;
  2735. }
  2736. if (argc < 1)
  2737. {
  2738. command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
  2739. }
  2740. else
  2741. {
  2742. xscale->vector_catch = strtoul(args[0], NULL, 0);
  2743. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2744. xscale_write_dcsr(target, -1, -1);
  2745. }
  2746. command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2747. return ERROR_OK;
  2748. }
  2749. static int xscale_handle_vector_table_command(command_context_t *cmd_ctx,
  2750. char *cmd, char **args, int argc)
  2751. {
  2752. target_t *target = get_current_target(cmd_ctx);
  2753. armv4_5_common_t *armv4_5;
  2754. xscale_common_t *xscale;
  2755. int err = 0;
  2756. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2757. {
  2758. return ERROR_OK;
  2759. }
  2760. if (argc == 0) /* print current settings */
  2761. {
  2762. int idx;
  2763. command_print(cmd_ctx, "active user-set static vectors:");
  2764. for (idx = 1; idx < 8; idx++)
  2765. if (xscale->static_low_vectors_set & (1 << idx))
  2766. command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
  2767. for (idx = 1; idx < 8; idx++)
  2768. if (xscale->static_high_vectors_set & (1 << idx))
  2769. command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
  2770. return ERROR_OK;
  2771. }
  2772. if (argc != 3)
  2773. err = 1;
  2774. else
  2775. {
  2776. int idx;
  2777. uint32_t vec;
  2778. idx = strtoul(args[1], NULL, 0);
  2779. vec = strtoul(args[2], NULL, 0);
  2780. if (idx < 1 || idx >= 8)
  2781. err = 1;
  2782. if (!err && strcmp(args[0], "low") == 0)
  2783. {
  2784. xscale->static_low_vectors_set |= (1<<idx);
  2785. xscale->static_low_vectors[idx] = vec;
  2786. }
  2787. else if (!err && (strcmp(args[0], "high") == 0))
  2788. {
  2789. xscale->static_high_vectors_set |= (1<<idx);
  2790. xscale->static_high_vectors[idx] = vec;
  2791. }
  2792. else
  2793. err = 1;
  2794. }
  2795. if (err)
  2796. command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
  2797. return ERROR_OK;
  2798. }
  2799. static int
  2800. xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx,
  2801. char *cmd, char **args, int argc)
  2802. {
  2803. target_t *target = get_current_target(cmd_ctx);
  2804. armv4_5_common_t *armv4_5;
  2805. xscale_common_t *xscale;
  2806. uint32_t dcsr_value;
  2807. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2808. {
  2809. return ERROR_OK;
  2810. }
  2811. if (target->state != TARGET_HALTED)
  2812. {
  2813. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2814. return ERROR_OK;
  2815. }
  2816. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  2817. {
  2818. xscale_trace_data_t *td, *next_td;
  2819. xscale->trace.buffer_enabled = 1;
  2820. /* free old trace data */
  2821. td = xscale->trace.data;
  2822. while (td)
  2823. {
  2824. next_td = td->next;
  2825. if (td->entries)
  2826. free(td->entries);
  2827. free(td);
  2828. td = next_td;
  2829. }
  2830. xscale->trace.data = NULL;
  2831. }
  2832. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  2833. {
  2834. xscale->trace.buffer_enabled = 0;
  2835. }
  2836. if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
  2837. {
  2838. if (argc >= 3)
  2839. xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
  2840. else
  2841. xscale->trace.buffer_fill = 1;
  2842. }
  2843. else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
  2844. {
  2845. xscale->trace.buffer_fill = -1;
  2846. }
  2847. if (xscale->trace.buffer_enabled)
  2848. {
  2849. /* if we enable the trace buffer in fill-once
  2850. * mode we know the address of the first instruction */
  2851. xscale->trace.pc_ok = 1;
  2852. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2853. }
  2854. else
  2855. {
  2856. /* otherwise the address is unknown, and we have no known good PC */
  2857. xscale->trace.pc_ok = 0;
  2858. }
  2859. command_print(cmd_ctx, "trace buffer %s (%s)",
  2860. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2861. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2862. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2863. if (xscale->trace.buffer_fill >= 0)
  2864. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2865. else
  2866. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2867. return ERROR_OK;
  2868. }
  2869. static int
  2870. xscale_handle_trace_image_command(struct command_context_s *cmd_ctx,
  2871. char *cmd, char **args, int argc)
  2872. {
  2873. target_t *target;
  2874. armv4_5_common_t *armv4_5;
  2875. xscale_common_t *xscale;
  2876. if (argc < 1)
  2877. {
  2878. command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
  2879. return ERROR_OK;
  2880. }
  2881. target = get_current_target(cmd_ctx);
  2882. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2883. {
  2884. return ERROR_OK;
  2885. }
  2886. if (xscale->trace.image)
  2887. {
  2888. image_close(xscale->trace.image);
  2889. free(xscale->trace.image);
  2890. command_print(cmd_ctx, "previously loaded image found and closed");
  2891. }
  2892. xscale->trace.image = malloc(sizeof(image_t));
  2893. xscale->trace.image->base_address_set = 0;
  2894. xscale->trace.image->start_address_set = 0;
  2895. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2896. if (argc >= 2)
  2897. {
  2898. xscale->trace.image->base_address_set = 1;
  2899. xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
  2900. }
  2901. else
  2902. {
  2903. xscale->trace.image->base_address_set = 0;
  2904. }
  2905. if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
  2906. {
  2907. free(xscale->trace.image);
  2908. xscale->trace.image = NULL;
  2909. return ERROR_OK;
  2910. }
  2911. return ERROR_OK;
  2912. }
  2913. static int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx,
  2914. char *cmd, char **args, int argc)
  2915. {
  2916. target_t *target = get_current_target(cmd_ctx);
  2917. armv4_5_common_t *armv4_5;
  2918. xscale_common_t *xscale;
  2919. xscale_trace_data_t *trace_data;
  2920. fileio_t file;
  2921. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2922. {
  2923. return ERROR_OK;
  2924. }
  2925. if (target->state != TARGET_HALTED)
  2926. {
  2927. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2928. return ERROR_OK;
  2929. }
  2930. if (argc < 1)
  2931. {
  2932. command_print(cmd_ctx, "usage: xscale dump_trace <file>");
  2933. return ERROR_OK;
  2934. }
  2935. trace_data = xscale->trace.data;
  2936. if (!trace_data)
  2937. {
  2938. command_print(cmd_ctx, "no trace data collected");
  2939. return ERROR_OK;
  2940. }
  2941. if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2942. {
  2943. return ERROR_OK;
  2944. }
  2945. while (trace_data)
  2946. {
  2947. int i;
  2948. fileio_write_u32(&file, trace_data->chkpt0);
  2949. fileio_write_u32(&file, trace_data->chkpt1);
  2950. fileio_write_u32(&file, trace_data->last_instruction);
  2951. fileio_write_u32(&file, trace_data->depth);
  2952. for (i = 0; i < trace_data->depth; i++)
  2953. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2954. trace_data = trace_data->next;
  2955. }
  2956. fileio_close(&file);
  2957. return ERROR_OK;
  2958. }
  2959. static int
  2960. xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx,
  2961. char *cmd, char **args, int argc)
  2962. {
  2963. target_t *target = get_current_target(cmd_ctx);
  2964. armv4_5_common_t *armv4_5;
  2965. xscale_common_t *xscale;
  2966. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2967. {
  2968. return ERROR_OK;
  2969. }
  2970. xscale_analyze_trace(target, cmd_ctx);
  2971. return ERROR_OK;
  2972. }
  2973. static int xscale_handle_cp15(command_context_t *cmd_ctx,
  2974. char *cmd, char **args, int argc)
  2975. {
  2976. target_t *target = get_current_target(cmd_ctx);
  2977. armv4_5_common_t *armv4_5;
  2978. xscale_common_t *xscale;
  2979. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2980. {
  2981. return ERROR_OK;
  2982. }
  2983. if (target->state != TARGET_HALTED)
  2984. {
  2985. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2986. return ERROR_OK;
  2987. }
  2988. uint32_t reg_no = 0;
  2989. reg_t *reg = NULL;
  2990. if (argc > 0)
  2991. {
  2992. reg_no = strtoul(args[0], NULL, 0);
  2993. /*translate from xscale cp15 register no to openocd register*/
  2994. switch (reg_no)
  2995. {
  2996. case 0:
  2997. reg_no = XSCALE_MAINID;
  2998. break;
  2999. case 1:
  3000. reg_no = XSCALE_CTRL;
  3001. break;
  3002. case 2:
  3003. reg_no = XSCALE_TTB;
  3004. break;
  3005. case 3:
  3006. reg_no = XSCALE_DAC;
  3007. break;
  3008. case 5:
  3009. reg_no = XSCALE_FSR;
  3010. break;
  3011. case 6:
  3012. reg_no = XSCALE_FAR;
  3013. break;
  3014. case 13:
  3015. reg_no = XSCALE_PID;
  3016. break;
  3017. case 15:
  3018. reg_no = XSCALE_CPACCESS;
  3019. break;
  3020. default:
  3021. command_print(cmd_ctx, "invalid register number");
  3022. return ERROR_INVALID_ARGUMENTS;
  3023. }
  3024. reg = &xscale->reg_cache->reg_list[reg_no];
  3025. }
  3026. if (argc == 1)
  3027. {
  3028. uint32_t value;
  3029. /* read cp15 control register */
  3030. xscale_get_reg(reg);
  3031. value = buf_get_u32(reg->value, 0, 32);
  3032. command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  3033. }
  3034. else if (argc == 2)
  3035. {
  3036. uint32_t value = strtoul(args[1], NULL, 0);
  3037. /* send CP write request (command 0x41) */
  3038. xscale_send_u32(target, 0x41);
  3039. /* send CP register number */
  3040. xscale_send_u32(target, reg_no);
  3041. /* send CP register value */
  3042. xscale_send_u32(target, value);
  3043. /* execute cpwait to ensure outstanding operations complete */
  3044. xscale_send_u32(target, 0x53);
  3045. }
  3046. else
  3047. {
  3048. command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
  3049. }
  3050. return ERROR_OK;
  3051. }
  3052. static int xscale_register_commands(struct command_context_s *cmd_ctx)
  3053. {
  3054. command_t *xscale_cmd;
  3055. xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
  3056. register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
  3057. register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
  3058. register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
  3059. register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
  3060. register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
  3061. register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
  3062. register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
  3063. register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
  3064. register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
  3065. register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
  3066. register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
  3067. register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
  3068. COMMAND_EXEC, "load image from <file> [base address]");
  3069. register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
  3070. armv4_5_register_commands(cmd_ctx);
  3071. return ERROR_OK;
  3072. }
  3073. target_type_t xscale_target =
  3074. {
  3075. .name = "xscale",
  3076. .poll = xscale_poll,
  3077. .arch_state = xscale_arch_state,
  3078. .target_request_data = NULL,
  3079. .halt = xscale_halt,
  3080. .resume = xscale_resume,
  3081. .step = xscale_step,
  3082. .assert_reset = xscale_assert_reset,
  3083. .deassert_reset = xscale_deassert_reset,
  3084. .soft_reset_halt = NULL,
  3085. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  3086. .read_memory = xscale_read_memory,
  3087. .write_memory = xscale_write_memory,
  3088. .bulk_write_memory = xscale_bulk_write_memory,
  3089. .checksum_memory = arm7_9_checksum_memory,
  3090. .blank_check_memory = arm7_9_blank_check_memory,
  3091. .run_algorithm = armv4_5_run_algorithm,
  3092. .add_breakpoint = xscale_add_breakpoint,
  3093. .remove_breakpoint = xscale_remove_breakpoint,
  3094. .add_watchpoint = xscale_add_watchpoint,
  3095. .remove_watchpoint = xscale_remove_watchpoint,
  3096. .register_commands = xscale_register_commands,
  3097. .target_create = xscale_target_create,
  3098. .init_target = xscale_init_target,
  3099. .quit = xscale_quit,
  3100. .virt2phys = xscale_virt2phys,
  3101. .mmu = xscale_mmu
  3102. };