You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3771 lines
98 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2009 Michael Schwingen *
  9. * michael@schwingen.org *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "xscale.h"
  30. #include "target_type.h"
  31. #include "arm7_9_common.h"
  32. #include "arm_simulator.h"
  33. #include "arm_disassembler.h"
  34. #include "time_support.h"
  35. #include "image.h"
  36. /*
  37. * Important XScale documents available as of October 2009 include:
  38. *
  39. * Intel XScale® Core Developer’s Manual, January 2004
  40. * Order Number: 273473-002
  41. * This has a chapter detailing debug facilities, and punts some
  42. * details to chip-specific microarchitecture documentats.
  43. *
  44. * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
  45. * Document Number: 273539-005
  46. * Less detailed than the developer's manual, but summarizes those
  47. * missing details (for most XScales) and gives LOTS of notes about
  48. * debugger/handler interaction issues. Presents a simpler reset
  49. * and load-handler sequence than the arch doc. (Note, OpenOCD
  50. * doesn't currently support "Hot-Debug" as defined there.)
  51. *
  52. * Chip-specific microarchitecture documents may also be useful.
  53. */
  54. /* forward declarations */
  55. static int xscale_resume(struct target_s *, int current,
  56. uint32_t address, int handle_breakpoints, int debug_execution);
  57. static int xscale_debug_entry(target_t *);
  58. static int xscale_restore_context(target_t *);
  59. static int xscale_get_reg(reg_t *reg);
  60. static int xscale_set_reg(reg_t *reg, uint8_t *buf);
  61. static int xscale_set_breakpoint(struct target_s *, breakpoint_t *);
  62. static int xscale_set_watchpoint(struct target_s *, watchpoint_t *);
  63. static int xscale_unset_breakpoint(struct target_s *, breakpoint_t *);
  64. static int xscale_read_trace(target_t *);
  65. static char *const xscale_reg_list[] =
  66. {
  67. "XSCALE_MAINID", /* 0 */
  68. "XSCALE_CACHETYPE",
  69. "XSCALE_CTRL",
  70. "XSCALE_AUXCTRL",
  71. "XSCALE_TTB",
  72. "XSCALE_DAC",
  73. "XSCALE_FSR",
  74. "XSCALE_FAR",
  75. "XSCALE_PID",
  76. "XSCALE_CPACCESS",
  77. "XSCALE_IBCR0", /* 10 */
  78. "XSCALE_IBCR1",
  79. "XSCALE_DBR0",
  80. "XSCALE_DBR1",
  81. "XSCALE_DBCON",
  82. "XSCALE_TBREG",
  83. "XSCALE_CHKPT0",
  84. "XSCALE_CHKPT1",
  85. "XSCALE_DCSR",
  86. "XSCALE_TX",
  87. "XSCALE_RX", /* 20 */
  88. "XSCALE_TXRXCTRL",
  89. };
  90. static const xscale_reg_t xscale_reg_arch_info[] =
  91. {
  92. {XSCALE_MAINID, NULL},
  93. {XSCALE_CACHETYPE, NULL},
  94. {XSCALE_CTRL, NULL},
  95. {XSCALE_AUXCTRL, NULL},
  96. {XSCALE_TTB, NULL},
  97. {XSCALE_DAC, NULL},
  98. {XSCALE_FSR, NULL},
  99. {XSCALE_FAR, NULL},
  100. {XSCALE_PID, NULL},
  101. {XSCALE_CPACCESS, NULL},
  102. {XSCALE_IBCR0, NULL},
  103. {XSCALE_IBCR1, NULL},
  104. {XSCALE_DBR0, NULL},
  105. {XSCALE_DBR1, NULL},
  106. {XSCALE_DBCON, NULL},
  107. {XSCALE_TBREG, NULL},
  108. {XSCALE_CHKPT0, NULL},
  109. {XSCALE_CHKPT1, NULL},
  110. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  111. {-1, NULL}, /* TX accessed via JTAG */
  112. {-1, NULL}, /* RX accessed via JTAG */
  113. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  114. };
  115. static int xscale_reg_arch_type = -1;
  116. /* convenience wrapper to access XScale specific registers */
  117. static int xscale_set_reg_u32(reg_t *reg, uint32_t value)
  118. {
  119. uint8_t buf[4];
  120. buf_set_u32(buf, 0, 32, value);
  121. return xscale_set_reg(reg, buf);
  122. }
  123. static int xscale_get_arch_pointers(target_t *target,
  124. armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
  125. {
  126. armv4_5_common_t *armv4_5 = target->arch_info;
  127. xscale_common_t *xscale = armv4_5->arch_info;
  128. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  129. {
  130. LOG_ERROR("target isn't an XScale target");
  131. return -1;
  132. }
  133. if (xscale->common_magic != XSCALE_COMMON_MAGIC)
  134. {
  135. LOG_ERROR("target isn't an XScale target");
  136. return -1;
  137. }
  138. *armv4_5_p = armv4_5;
  139. *xscale_p = xscale;
  140. return ERROR_OK;
  141. }
  142. static int xscale_jtag_set_instr(jtag_tap_t *tap, uint32_t new_instr)
  143. {
  144. if (tap == NULL)
  145. return ERROR_FAIL;
  146. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  147. {
  148. scan_field_t field;
  149. field.tap = tap;
  150. field.num_bits = tap->ir_length;
  151. field.out_value = calloc(CEIL(field.num_bits, 8), 1);
  152. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  153. uint8_t tmp[4];
  154. field.in_value = tmp;
  155. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  156. /* FIX!!!! isn't this check superfluous? verify_ircapture handles this? */
  157. jtag_check_value_mask(&field, tap->expected, tap->expected_mask);
  158. free(field.out_value);
  159. }
  160. return ERROR_OK;
  161. }
  162. static int xscale_read_dcsr(target_t *target)
  163. {
  164. armv4_5_common_t *armv4_5 = target->arch_info;
  165. xscale_common_t *xscale = armv4_5->arch_info;
  166. int retval;
  167. scan_field_t fields[3];
  168. uint8_t field0 = 0x0;
  169. uint8_t field0_check_value = 0x2;
  170. uint8_t field0_check_mask = 0x7;
  171. uint8_t field2 = 0x0;
  172. uint8_t field2_check_value = 0x0;
  173. uint8_t field2_check_mask = 0x1;
  174. jtag_set_end_state(TAP_DRPAUSE);
  175. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  176. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  177. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  178. fields[0].tap = xscale->jtag_info.tap;
  179. fields[0].num_bits = 3;
  180. fields[0].out_value = &field0;
  181. uint8_t tmp;
  182. fields[0].in_value = &tmp;
  183. fields[1].tap = xscale->jtag_info.tap;
  184. fields[1].num_bits = 32;
  185. fields[1].out_value = NULL;
  186. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  187. fields[2].tap = xscale->jtag_info.tap;
  188. fields[2].num_bits = 1;
  189. fields[2].out_value = &field2;
  190. uint8_t tmp2;
  191. fields[2].in_value = &tmp2;
  192. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  193. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  194. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  195. if ((retval = jtag_execute_queue()) != ERROR_OK)
  196. {
  197. LOG_ERROR("JTAG error while reading DCSR");
  198. return retval;
  199. }
  200. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  201. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  202. /* write the register with the value we just read
  203. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  204. */
  205. field0_check_mask = 0x1;
  206. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  207. fields[1].in_value = NULL;
  208. jtag_set_end_state(TAP_IDLE);
  209. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  210. /* DANGER!!! this must be here. It will make sure that the arguments
  211. * to jtag_set_check_value() does not go out of scope! */
  212. return jtag_execute_queue();
  213. }
  214. static void xscale_getbuf(jtag_callback_data_t arg)
  215. {
  216. uint8_t *in = (uint8_t *)arg;
  217. *((uint32_t *)in) = buf_get_u32(in, 0, 32);
  218. }
  219. static int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
  220. {
  221. if (num_words == 0)
  222. return ERROR_INVALID_ARGUMENTS;
  223. int retval = ERROR_OK;
  224. armv4_5_common_t *armv4_5 = target->arch_info;
  225. xscale_common_t *xscale = armv4_5->arch_info;
  226. tap_state_t path[3];
  227. scan_field_t fields[3];
  228. uint8_t *field0 = malloc(num_words * 1);
  229. uint8_t field0_check_value = 0x2;
  230. uint8_t field0_check_mask = 0x6;
  231. uint32_t *field1 = malloc(num_words * 4);
  232. uint8_t field2_check_value = 0x0;
  233. uint8_t field2_check_mask = 0x1;
  234. int words_done = 0;
  235. int words_scheduled = 0;
  236. int i;
  237. path[0] = TAP_DRSELECT;
  238. path[1] = TAP_DRCAPTURE;
  239. path[2] = TAP_DRSHIFT;
  240. fields[0].tap = xscale->jtag_info.tap;
  241. fields[0].num_bits = 3;
  242. fields[0].out_value = NULL;
  243. fields[0].in_value = NULL;
  244. fields[0].check_value = &field0_check_value;
  245. fields[0].check_mask = &field0_check_mask;
  246. fields[1].tap = xscale->jtag_info.tap;
  247. fields[1].num_bits = 32;
  248. fields[1].out_value = NULL;
  249. fields[1].check_value = NULL;
  250. fields[1].check_mask = NULL;
  251. fields[2].tap = xscale->jtag_info.tap;
  252. fields[2].num_bits = 1;
  253. fields[2].out_value = NULL;
  254. fields[2].in_value = NULL;
  255. fields[2].check_value = &field2_check_value;
  256. fields[2].check_mask = &field2_check_mask;
  257. jtag_set_end_state(TAP_IDLE);
  258. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  259. jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  260. /* repeat until all words have been collected */
  261. int attempts = 0;
  262. while (words_done < num_words)
  263. {
  264. /* schedule reads */
  265. words_scheduled = 0;
  266. for (i = words_done; i < num_words; i++)
  267. {
  268. fields[0].in_value = &field0[i];
  269. jtag_add_pathmove(3, path);
  270. fields[1].in_value = (uint8_t *)(field1 + i);
  271. jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
  272. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
  273. words_scheduled++;
  274. }
  275. if ((retval = jtag_execute_queue()) != ERROR_OK)
  276. {
  277. LOG_ERROR("JTAG error while receiving data from debug handler");
  278. break;
  279. }
  280. /* examine results */
  281. for (i = words_done; i < num_words; i++)
  282. {
  283. if (!(field0[0] & 1))
  284. {
  285. /* move backwards if necessary */
  286. int j;
  287. for (j = i; j < num_words - 1; j++)
  288. {
  289. field0[j] = field0[j + 1];
  290. field1[j] = field1[j + 1];
  291. }
  292. words_scheduled--;
  293. }
  294. }
  295. if (words_scheduled == 0)
  296. {
  297. if (attempts++==1000)
  298. {
  299. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  300. retval = ERROR_TARGET_TIMEOUT;
  301. break;
  302. }
  303. }
  304. words_done += words_scheduled;
  305. }
  306. for (i = 0; i < num_words; i++)
  307. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  308. free(field1);
  309. return retval;
  310. }
  311. static int xscale_read_tx(target_t *target, int consume)
  312. {
  313. armv4_5_common_t *armv4_5 = target->arch_info;
  314. xscale_common_t *xscale = armv4_5->arch_info;
  315. tap_state_t path[3];
  316. tap_state_t noconsume_path[6];
  317. int retval;
  318. struct timeval timeout, now;
  319. scan_field_t fields[3];
  320. uint8_t field0_in = 0x0;
  321. uint8_t field0_check_value = 0x2;
  322. uint8_t field0_check_mask = 0x6;
  323. uint8_t field2_check_value = 0x0;
  324. uint8_t field2_check_mask = 0x1;
  325. jtag_set_end_state(TAP_IDLE);
  326. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  327. path[0] = TAP_DRSELECT;
  328. path[1] = TAP_DRCAPTURE;
  329. path[2] = TAP_DRSHIFT;
  330. noconsume_path[0] = TAP_DRSELECT;
  331. noconsume_path[1] = TAP_DRCAPTURE;
  332. noconsume_path[2] = TAP_DREXIT1;
  333. noconsume_path[3] = TAP_DRPAUSE;
  334. noconsume_path[4] = TAP_DREXIT2;
  335. noconsume_path[5] = TAP_DRSHIFT;
  336. fields[0].tap = xscale->jtag_info.tap;
  337. fields[0].num_bits = 3;
  338. fields[0].out_value = NULL;
  339. fields[0].in_value = &field0_in;
  340. fields[1].tap = xscale->jtag_info.tap;
  341. fields[1].num_bits = 32;
  342. fields[1].out_value = NULL;
  343. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  344. fields[2].tap = xscale->jtag_info.tap;
  345. fields[2].num_bits = 1;
  346. fields[2].out_value = NULL;
  347. uint8_t tmp;
  348. fields[2].in_value = &tmp;
  349. gettimeofday(&timeout, NULL);
  350. timeval_add_time(&timeout, 1, 0);
  351. for (;;)
  352. {
  353. /* if we want to consume the register content (i.e. clear TX_READY),
  354. * we have to go straight from Capture-DR to Shift-DR
  355. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  356. */
  357. if (consume)
  358. jtag_add_pathmove(3, path);
  359. else
  360. {
  361. jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
  362. }
  363. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  364. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  365. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  366. if ((retval = jtag_execute_queue()) != ERROR_OK)
  367. {
  368. LOG_ERROR("JTAG error while reading TX");
  369. return ERROR_TARGET_TIMEOUT;
  370. }
  371. gettimeofday(&now, NULL);
  372. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  373. {
  374. LOG_ERROR("time out reading TX register");
  375. return ERROR_TARGET_TIMEOUT;
  376. }
  377. if (!((!(field0_in & 1)) && consume))
  378. {
  379. goto done;
  380. }
  381. if (debug_level >= 3)
  382. {
  383. LOG_DEBUG("waiting 100ms");
  384. alive_sleep(100); /* avoid flooding the logs */
  385. } else
  386. {
  387. keep_alive();
  388. }
  389. }
  390. done:
  391. if (!(field0_in & 1))
  392. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  393. return ERROR_OK;
  394. }
  395. static int xscale_write_rx(target_t *target)
  396. {
  397. armv4_5_common_t *armv4_5 = target->arch_info;
  398. xscale_common_t *xscale = armv4_5->arch_info;
  399. int retval;
  400. struct timeval timeout, now;
  401. scan_field_t fields[3];
  402. uint8_t field0_out = 0x0;
  403. uint8_t field0_in = 0x0;
  404. uint8_t field0_check_value = 0x2;
  405. uint8_t field0_check_mask = 0x6;
  406. uint8_t field2 = 0x0;
  407. uint8_t field2_check_value = 0x0;
  408. uint8_t field2_check_mask = 0x1;
  409. jtag_set_end_state(TAP_IDLE);
  410. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  411. fields[0].tap = xscale->jtag_info.tap;
  412. fields[0].num_bits = 3;
  413. fields[0].out_value = &field0_out;
  414. fields[0].in_value = &field0_in;
  415. fields[1].tap = xscale->jtag_info.tap;
  416. fields[1].num_bits = 32;
  417. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  418. fields[1].in_value = NULL;
  419. fields[2].tap = xscale->jtag_info.tap;
  420. fields[2].num_bits = 1;
  421. fields[2].out_value = &field2;
  422. uint8_t tmp;
  423. fields[2].in_value = &tmp;
  424. gettimeofday(&timeout, NULL);
  425. timeval_add_time(&timeout, 1, 0);
  426. /* poll until rx_read is low */
  427. LOG_DEBUG("polling RX");
  428. for (;;)
  429. {
  430. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  431. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  432. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  433. if ((retval = jtag_execute_queue()) != ERROR_OK)
  434. {
  435. LOG_ERROR("JTAG error while writing RX");
  436. return retval;
  437. }
  438. gettimeofday(&now, NULL);
  439. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  440. {
  441. LOG_ERROR("time out writing RX register");
  442. return ERROR_TARGET_TIMEOUT;
  443. }
  444. if (!(field0_in & 1))
  445. goto done;
  446. if (debug_level >= 3)
  447. {
  448. LOG_DEBUG("waiting 100ms");
  449. alive_sleep(100); /* avoid flooding the logs */
  450. } else
  451. {
  452. keep_alive();
  453. }
  454. }
  455. done:
  456. /* set rx_valid */
  457. field2 = 0x1;
  458. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  459. if ((retval = jtag_execute_queue()) != ERROR_OK)
  460. {
  461. LOG_ERROR("JTAG error while writing RX");
  462. return retval;
  463. }
  464. return ERROR_OK;
  465. }
  466. /* send count elements of size byte to the debug handler */
  467. static int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
  468. {
  469. armv4_5_common_t *armv4_5 = target->arch_info;
  470. xscale_common_t *xscale = armv4_5->arch_info;
  471. uint32_t t[3];
  472. int bits[3];
  473. int retval;
  474. int done_count = 0;
  475. jtag_set_end_state(TAP_IDLE);
  476. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  477. bits[0]=3;
  478. t[0]=0;
  479. bits[1]=32;
  480. t[2]=1;
  481. bits[2]=1;
  482. int endianness = target->endianness;
  483. while (done_count++ < count)
  484. {
  485. switch (size)
  486. {
  487. case 4:
  488. if (endianness == TARGET_LITTLE_ENDIAN)
  489. {
  490. t[1]=le_to_h_u32(buffer);
  491. } else
  492. {
  493. t[1]=be_to_h_u32(buffer);
  494. }
  495. break;
  496. case 2:
  497. if (endianness == TARGET_LITTLE_ENDIAN)
  498. {
  499. t[1]=le_to_h_u16(buffer);
  500. } else
  501. {
  502. t[1]=be_to_h_u16(buffer);
  503. }
  504. break;
  505. case 1:
  506. t[1]=buffer[0];
  507. break;
  508. default:
  509. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  510. exit(-1);
  511. }
  512. jtag_add_dr_out(xscale->jtag_info.tap,
  513. 3,
  514. bits,
  515. t,
  516. jtag_set_end_state(TAP_IDLE));
  517. buffer += size;
  518. }
  519. if ((retval = jtag_execute_queue()) != ERROR_OK)
  520. {
  521. LOG_ERROR("JTAG error while sending data to debug handler");
  522. return retval;
  523. }
  524. return ERROR_OK;
  525. }
  526. static int xscale_send_u32(target_t *target, uint32_t value)
  527. {
  528. armv4_5_common_t *armv4_5 = target->arch_info;
  529. xscale_common_t *xscale = armv4_5->arch_info;
  530. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  531. return xscale_write_rx(target);
  532. }
  533. static int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
  534. {
  535. armv4_5_common_t *armv4_5 = target->arch_info;
  536. xscale_common_t *xscale = armv4_5->arch_info;
  537. int retval;
  538. scan_field_t fields[3];
  539. uint8_t field0 = 0x0;
  540. uint8_t field0_check_value = 0x2;
  541. uint8_t field0_check_mask = 0x7;
  542. uint8_t field2 = 0x0;
  543. uint8_t field2_check_value = 0x0;
  544. uint8_t field2_check_mask = 0x1;
  545. if (hold_rst != -1)
  546. xscale->hold_rst = hold_rst;
  547. if (ext_dbg_brk != -1)
  548. xscale->external_debug_break = ext_dbg_brk;
  549. jtag_set_end_state(TAP_IDLE);
  550. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  551. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  552. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  553. fields[0].tap = xscale->jtag_info.tap;
  554. fields[0].num_bits = 3;
  555. fields[0].out_value = &field0;
  556. uint8_t tmp;
  557. fields[0].in_value = &tmp;
  558. fields[1].tap = xscale->jtag_info.tap;
  559. fields[1].num_bits = 32;
  560. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  561. fields[1].in_value = NULL;
  562. fields[2].tap = xscale->jtag_info.tap;
  563. fields[2].num_bits = 1;
  564. fields[2].out_value = &field2;
  565. uint8_t tmp2;
  566. fields[2].in_value = &tmp2;
  567. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  568. jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
  569. jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
  570. if ((retval = jtag_execute_queue()) != ERROR_OK)
  571. {
  572. LOG_ERROR("JTAG error while writing DCSR");
  573. return retval;
  574. }
  575. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  576. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  577. return ERROR_OK;
  578. }
  579. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  580. static unsigned int parity (unsigned int v)
  581. {
  582. // unsigned int ov = v;
  583. v ^= v >> 16;
  584. v ^= v >> 8;
  585. v ^= v >> 4;
  586. v &= 0xf;
  587. // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  588. return (0x6996 >> v) & 1;
  589. }
  590. static int xscale_load_ic(target_t *target, uint32_t va, uint32_t buffer[8])
  591. {
  592. armv4_5_common_t *armv4_5 = target->arch_info;
  593. xscale_common_t *xscale = armv4_5->arch_info;
  594. uint8_t packet[4];
  595. uint8_t cmd;
  596. int word;
  597. scan_field_t fields[2];
  598. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  599. /* LDIC into IR */
  600. jtag_set_end_state(TAP_IDLE);
  601. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic);
  602. /* CMD is b011 to load a cacheline into the Mini ICache.
  603. * Loading into the main ICache is deprecated, and unused.
  604. * It's followed by three zero bits, and 27 address bits.
  605. */
  606. buf_set_u32(&cmd, 0, 6, 0x3);
  607. /* virtual address of desired cache line */
  608. buf_set_u32(packet, 0, 27, va >> 5);
  609. fields[0].tap = xscale->jtag_info.tap;
  610. fields[0].num_bits = 6;
  611. fields[0].out_value = &cmd;
  612. fields[0].in_value = NULL;
  613. fields[1].tap = xscale->jtag_info.tap;
  614. fields[1].num_bits = 27;
  615. fields[1].out_value = packet;
  616. fields[1].in_value = NULL;
  617. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  618. /* rest of packet is a cacheline: 8 instructions, with parity */
  619. fields[0].num_bits = 32;
  620. fields[0].out_value = packet;
  621. fields[1].num_bits = 1;
  622. fields[1].out_value = &cmd;
  623. for (word = 0; word < 8; word++)
  624. {
  625. buf_set_u32(packet, 0, 32, buffer[word]);
  626. uint32_t value;
  627. memcpy(&value, packet, sizeof(uint32_t));
  628. cmd = parity(value);
  629. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  630. }
  631. return jtag_execute_queue();
  632. }
  633. static int xscale_invalidate_ic_line(target_t *target, uint32_t va)
  634. {
  635. armv4_5_common_t *armv4_5 = target->arch_info;
  636. xscale_common_t *xscale = armv4_5->arch_info;
  637. uint8_t packet[4];
  638. uint8_t cmd;
  639. scan_field_t fields[2];
  640. jtag_set_end_state(TAP_IDLE);
  641. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
  642. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  643. buf_set_u32(&cmd, 0, 6, 0x0);
  644. /* virtual address of desired cache line */
  645. buf_set_u32(packet, 0, 27, va >> 5);
  646. fields[0].tap = xscale->jtag_info.tap;
  647. fields[0].num_bits = 6;
  648. fields[0].out_value = &cmd;
  649. fields[0].in_value = NULL;
  650. fields[1].tap = xscale->jtag_info.tap;
  651. fields[1].num_bits = 27;
  652. fields[1].out_value = packet;
  653. fields[1].in_value = NULL;
  654. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  655. return ERROR_OK;
  656. }
  657. static int xscale_update_vectors(target_t *target)
  658. {
  659. armv4_5_common_t *armv4_5 = target->arch_info;
  660. xscale_common_t *xscale = armv4_5->arch_info;
  661. int i;
  662. int retval;
  663. uint32_t low_reset_branch, high_reset_branch;
  664. for (i = 1; i < 8; i++)
  665. {
  666. /* if there's a static vector specified for this exception, override */
  667. if (xscale->static_high_vectors_set & (1 << i))
  668. {
  669. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  670. }
  671. else
  672. {
  673. retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  674. if (retval == ERROR_TARGET_TIMEOUT)
  675. return retval;
  676. if (retval != ERROR_OK)
  677. {
  678. /* Some of these reads will fail as part of normal execution */
  679. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  680. }
  681. }
  682. }
  683. for (i = 1; i < 8; i++)
  684. {
  685. if (xscale->static_low_vectors_set & (1 << i))
  686. {
  687. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  688. }
  689. else
  690. {
  691. retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  692. if (retval == ERROR_TARGET_TIMEOUT)
  693. return retval;
  694. if (retval != ERROR_OK)
  695. {
  696. /* Some of these reads will fail as part of normal execution */
  697. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  698. }
  699. }
  700. }
  701. /* calculate branches to debug handler */
  702. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  703. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  704. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  705. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  706. /* invalidate and load exception vectors in mini i-cache */
  707. xscale_invalidate_ic_line(target, 0x0);
  708. xscale_invalidate_ic_line(target, 0xffff0000);
  709. xscale_load_ic(target, 0x0, xscale->low_vectors);
  710. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  711. return ERROR_OK;
  712. }
  713. static int xscale_arch_state(struct target_s *target)
  714. {
  715. armv4_5_common_t *armv4_5 = target->arch_info;
  716. xscale_common_t *xscale = armv4_5->arch_info;
  717. static const char *state[] =
  718. {
  719. "disabled", "enabled"
  720. };
  721. static const char *arch_dbg_reason[] =
  722. {
  723. "", "\n(processor reset)", "\n(trace buffer full)"
  724. };
  725. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  726. {
  727. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  728. exit(-1);
  729. }
  730. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  731. "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
  732. "MMU: %s, D-Cache: %s, I-Cache: %s"
  733. "%s",
  734. armv4_5_state_strings[armv4_5->core_state],
  735. Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name ,
  736. armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
  737. buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
  738. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
  739. state[xscale->armv4_5_mmu.mmu_enabled],
  740. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  741. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  742. arch_dbg_reason[xscale->arch_debug_reason]);
  743. return ERROR_OK;
  744. }
  745. static int xscale_poll(target_t *target)
  746. {
  747. int retval = ERROR_OK;
  748. armv4_5_common_t *armv4_5 = target->arch_info;
  749. xscale_common_t *xscale = armv4_5->arch_info;
  750. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  751. {
  752. enum target_state previous_state = target->state;
  753. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  754. {
  755. /* there's data to read from the tx register, we entered debug state */
  756. xscale->handler_running = 1;
  757. target->state = TARGET_HALTED;
  758. /* process debug entry, fetching current mode regs */
  759. retval = xscale_debug_entry(target);
  760. }
  761. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  762. {
  763. LOG_USER("error while polling TX register, reset CPU");
  764. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  765. target->state = TARGET_HALTED;
  766. }
  767. /* debug_entry could have overwritten target state (i.e. immediate resume)
  768. * don't signal event handlers in that case
  769. */
  770. if (target->state != TARGET_HALTED)
  771. return ERROR_OK;
  772. /* if target was running, signal that we halted
  773. * otherwise we reentered from debug execution */
  774. if (previous_state == TARGET_RUNNING)
  775. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  776. else
  777. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  778. }
  779. return retval;
  780. }
  781. static int xscale_debug_entry(target_t *target)
  782. {
  783. armv4_5_common_t *armv4_5 = target->arch_info;
  784. xscale_common_t *xscale = armv4_5->arch_info;
  785. uint32_t pc;
  786. uint32_t buffer[10];
  787. int i;
  788. int retval;
  789. uint32_t moe;
  790. /* clear external dbg break (will be written on next DCSR read) */
  791. xscale->external_debug_break = 0;
  792. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  793. return retval;
  794. /* get r0, pc, r1 to r7 and cpsr */
  795. if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
  796. return retval;
  797. /* move r0 from buffer to register cache */
  798. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  799. armv4_5->core_cache->reg_list[0].dirty = 1;
  800. armv4_5->core_cache->reg_list[0].valid = 1;
  801. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  802. /* move pc from buffer to register cache */
  803. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  804. armv4_5->core_cache->reg_list[15].dirty = 1;
  805. armv4_5->core_cache->reg_list[15].valid = 1;
  806. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  807. /* move data from buffer to register cache */
  808. for (i = 1; i <= 7; i++)
  809. {
  810. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  811. armv4_5->core_cache->reg_list[i].dirty = 1;
  812. armv4_5->core_cache->reg_list[i].valid = 1;
  813. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  814. }
  815. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
  816. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  817. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  818. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  819. armv4_5->core_mode = buffer[9] & 0x1f;
  820. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  821. {
  822. target->state = TARGET_UNKNOWN;
  823. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  824. return ERROR_TARGET_FAILURE;
  825. }
  826. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  827. if (buffer[9] & 0x20)
  828. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  829. else
  830. armv4_5->core_state = ARMV4_5_STATE_ARM;
  831. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  832. return ERROR_FAIL;
  833. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  834. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  835. {
  836. xscale_receive(target, buffer, 8);
  837. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  838. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  839. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  840. }
  841. else
  842. {
  843. /* r8 to r14, but no spsr */
  844. xscale_receive(target, buffer, 7);
  845. }
  846. /* move data from buffer to register cache */
  847. for (i = 8; i <= 14; i++)
  848. {
  849. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
  850. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  851. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  852. }
  853. /* examine debug reason */
  854. xscale_read_dcsr(target);
  855. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  856. /* stored PC (for calculating fixup) */
  857. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  858. switch (moe)
  859. {
  860. case 0x0: /* Processor reset */
  861. target->debug_reason = DBG_REASON_DBGRQ;
  862. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  863. pc -= 4;
  864. break;
  865. case 0x1: /* Instruction breakpoint hit */
  866. target->debug_reason = DBG_REASON_BREAKPOINT;
  867. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  868. pc -= 4;
  869. break;
  870. case 0x2: /* Data breakpoint hit */
  871. target->debug_reason = DBG_REASON_WATCHPOINT;
  872. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  873. pc -= 4;
  874. break;
  875. case 0x3: /* BKPT instruction executed */
  876. target->debug_reason = DBG_REASON_BREAKPOINT;
  877. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  878. pc -= 4;
  879. break;
  880. case 0x4: /* Ext. debug event */
  881. target->debug_reason = DBG_REASON_DBGRQ;
  882. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  883. pc -= 4;
  884. break;
  885. case 0x5: /* Vector trap occured */
  886. target->debug_reason = DBG_REASON_BREAKPOINT;
  887. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  888. pc -= 4;
  889. break;
  890. case 0x6: /* Trace buffer full break */
  891. target->debug_reason = DBG_REASON_DBGRQ;
  892. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  893. pc -= 4;
  894. break;
  895. case 0x7: /* Reserved (may flag Hot-Debug support) */
  896. default:
  897. LOG_ERROR("Method of Entry is 'Reserved'");
  898. exit(-1);
  899. break;
  900. }
  901. /* apply PC fixup */
  902. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  903. /* on the first debug entry, identify cache type */
  904. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  905. {
  906. uint32_t cache_type_reg;
  907. /* read cp15 cache type register */
  908. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  909. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  910. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  911. }
  912. /* examine MMU and Cache settings */
  913. /* read cp15 control register */
  914. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  915. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  916. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  917. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  918. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  919. /* tracing enabled, read collected trace data */
  920. if (xscale->trace.buffer_enabled)
  921. {
  922. xscale_read_trace(target);
  923. xscale->trace.buffer_fill--;
  924. /* resume if we're still collecting trace data */
  925. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  926. && (xscale->trace.buffer_fill > 0))
  927. {
  928. xscale_resume(target, 1, 0x0, 1, 0);
  929. }
  930. else
  931. {
  932. xscale->trace.buffer_enabled = 0;
  933. }
  934. }
  935. return ERROR_OK;
  936. }
  937. static int xscale_halt(target_t *target)
  938. {
  939. armv4_5_common_t *armv4_5 = target->arch_info;
  940. xscale_common_t *xscale = armv4_5->arch_info;
  941. LOG_DEBUG("target->state: %s",
  942. target_state_name(target));
  943. if (target->state == TARGET_HALTED)
  944. {
  945. LOG_DEBUG("target was already halted");
  946. return ERROR_OK;
  947. }
  948. else if (target->state == TARGET_UNKNOWN)
  949. {
  950. /* this must not happen for a xscale target */
  951. LOG_ERROR("target was in unknown state when halt was requested");
  952. return ERROR_TARGET_INVALID;
  953. }
  954. else if (target->state == TARGET_RESET)
  955. {
  956. LOG_DEBUG("target->state == TARGET_RESET");
  957. }
  958. else
  959. {
  960. /* assert external dbg break */
  961. xscale->external_debug_break = 1;
  962. xscale_read_dcsr(target);
  963. target->debug_reason = DBG_REASON_DBGRQ;
  964. }
  965. return ERROR_OK;
  966. }
  967. static int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
  968. {
  969. armv4_5_common_t *armv4_5 = target->arch_info;
  970. xscale_common_t *xscale= armv4_5->arch_info;
  971. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  972. int retval;
  973. if (xscale->ibcr0_used)
  974. {
  975. breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  976. if (ibcr0_bp)
  977. {
  978. xscale_unset_breakpoint(target, ibcr0_bp);
  979. }
  980. else
  981. {
  982. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  983. exit(-1);
  984. }
  985. }
  986. if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
  987. return retval;
  988. return ERROR_OK;
  989. }
  990. static int xscale_disable_single_step(struct target_s *target)
  991. {
  992. armv4_5_common_t *armv4_5 = target->arch_info;
  993. xscale_common_t *xscale= armv4_5->arch_info;
  994. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  995. int retval;
  996. if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
  997. return retval;
  998. return ERROR_OK;
  999. }
  1000. static void xscale_enable_watchpoints(struct target_s *target)
  1001. {
  1002. watchpoint_t *watchpoint = target->watchpoints;
  1003. while (watchpoint)
  1004. {
  1005. if (watchpoint->set == 0)
  1006. xscale_set_watchpoint(target, watchpoint);
  1007. watchpoint = watchpoint->next;
  1008. }
  1009. }
  1010. static void xscale_enable_breakpoints(struct target_s *target)
  1011. {
  1012. breakpoint_t *breakpoint = target->breakpoints;
  1013. /* set any pending breakpoints */
  1014. while (breakpoint)
  1015. {
  1016. if (breakpoint->set == 0)
  1017. xscale_set_breakpoint(target, breakpoint);
  1018. breakpoint = breakpoint->next;
  1019. }
  1020. }
  1021. static int xscale_resume(struct target_s *target, int current,
  1022. uint32_t address, int handle_breakpoints, int debug_execution)
  1023. {
  1024. armv4_5_common_t *armv4_5 = target->arch_info;
  1025. xscale_common_t *xscale= armv4_5->arch_info;
  1026. breakpoint_t *breakpoint = target->breakpoints;
  1027. uint32_t current_pc;
  1028. int retval;
  1029. int i;
  1030. LOG_DEBUG("-");
  1031. if (target->state != TARGET_HALTED)
  1032. {
  1033. LOG_WARNING("target not halted");
  1034. return ERROR_TARGET_NOT_HALTED;
  1035. }
  1036. if (!debug_execution)
  1037. {
  1038. target_free_all_working_areas(target);
  1039. }
  1040. /* update vector tables */
  1041. if ((retval = xscale_update_vectors(target)) != ERROR_OK)
  1042. return retval;
  1043. /* current = 1: continue on current pc, otherwise continue at <address> */
  1044. if (!current)
  1045. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1046. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1047. /* if we're at the reset vector, we have to simulate the branch */
  1048. if (current_pc == 0x0)
  1049. {
  1050. arm_simulate_step(target, NULL);
  1051. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1052. }
  1053. /* the front-end may request us not to handle breakpoints */
  1054. if (handle_breakpoints)
  1055. {
  1056. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1057. {
  1058. uint32_t next_pc;
  1059. /* there's a breakpoint at the current PC, we have to step over it */
  1060. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1061. xscale_unset_breakpoint(target, breakpoint);
  1062. /* calculate PC of next instruction */
  1063. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1064. {
  1065. uint32_t current_opcode;
  1066. target_read_u32(target, current_pc, &current_opcode);
  1067. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1068. }
  1069. LOG_DEBUG("enable single-step");
  1070. xscale_enable_single_step(target, next_pc);
  1071. /* restore banked registers */
  1072. xscale_restore_context(target);
  1073. /* send resume request (command 0x30 or 0x31)
  1074. * clean the trace buffer if it is to be enabled (0x62) */
  1075. if (xscale->trace.buffer_enabled)
  1076. {
  1077. xscale_send_u32(target, 0x62);
  1078. xscale_send_u32(target, 0x31);
  1079. }
  1080. else
  1081. xscale_send_u32(target, 0x30);
  1082. /* send CPSR */
  1083. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1084. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1085. for (i = 7; i >= 0; i--)
  1086. {
  1087. /* send register */
  1088. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1089. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1090. }
  1091. /* send PC */
  1092. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1093. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1094. /* wait for and process debug entry */
  1095. xscale_debug_entry(target);
  1096. LOG_DEBUG("disable single-step");
  1097. xscale_disable_single_step(target);
  1098. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1099. xscale_set_breakpoint(target, breakpoint);
  1100. }
  1101. }
  1102. /* enable any pending breakpoints and watchpoints */
  1103. xscale_enable_breakpoints(target);
  1104. xscale_enable_watchpoints(target);
  1105. /* restore banked registers */
  1106. xscale_restore_context(target);
  1107. /* send resume request (command 0x30 or 0x31)
  1108. * clean the trace buffer if it is to be enabled (0x62) */
  1109. if (xscale->trace.buffer_enabled)
  1110. {
  1111. xscale_send_u32(target, 0x62);
  1112. xscale_send_u32(target, 0x31);
  1113. }
  1114. else
  1115. xscale_send_u32(target, 0x30);
  1116. /* send CPSR */
  1117. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1118. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1119. for (i = 7; i >= 0; i--)
  1120. {
  1121. /* send register */
  1122. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1123. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1124. }
  1125. /* send PC */
  1126. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1127. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1128. target->debug_reason = DBG_REASON_NOTHALTED;
  1129. if (!debug_execution)
  1130. {
  1131. /* registers are now invalid */
  1132. armv4_5_invalidate_core_regs(target);
  1133. target->state = TARGET_RUNNING;
  1134. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1135. }
  1136. else
  1137. {
  1138. target->state = TARGET_DEBUG_RUNNING;
  1139. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1140. }
  1141. LOG_DEBUG("target resumed");
  1142. xscale->handler_running = 1;
  1143. return ERROR_OK;
  1144. }
  1145. static int xscale_step_inner(struct target_s *target, int current,
  1146. uint32_t address, int handle_breakpoints)
  1147. {
  1148. armv4_5_common_t *armv4_5 = target->arch_info;
  1149. xscale_common_t *xscale = armv4_5->arch_info;
  1150. uint32_t next_pc;
  1151. int retval;
  1152. int i;
  1153. target->debug_reason = DBG_REASON_SINGLESTEP;
  1154. /* calculate PC of next instruction */
  1155. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1156. {
  1157. uint32_t current_opcode, current_pc;
  1158. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1159. target_read_u32(target, current_pc, &current_opcode);
  1160. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1161. return retval;
  1162. }
  1163. LOG_DEBUG("enable single-step");
  1164. if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
  1165. return retval;
  1166. /* restore banked registers */
  1167. if ((retval = xscale_restore_context(target)) != ERROR_OK)
  1168. return retval;
  1169. /* send resume request (command 0x30 or 0x31)
  1170. * clean the trace buffer if it is to be enabled (0x62) */
  1171. if (xscale->trace.buffer_enabled)
  1172. {
  1173. if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
  1174. return retval;
  1175. if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
  1176. return retval;
  1177. }
  1178. else
  1179. if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
  1180. return retval;
  1181. /* send CPSR */
  1182. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32))) != ERROR_OK)
  1183. return retval;
  1184. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1185. for (i = 7; i >= 0; i--)
  1186. {
  1187. /* send register */
  1188. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
  1189. return retval;
  1190. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1191. }
  1192. /* send PC */
  1193. if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))) != ERROR_OK)
  1194. return retval;
  1195. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1196. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1197. /* registers are now invalid */
  1198. if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
  1199. return retval;
  1200. /* wait for and process debug entry */
  1201. if ((retval = xscale_debug_entry(target)) != ERROR_OK)
  1202. return retval;
  1203. LOG_DEBUG("disable single-step");
  1204. if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
  1205. return retval;
  1206. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1207. return ERROR_OK;
  1208. }
  1209. static int xscale_step(struct target_s *target, int current,
  1210. uint32_t address, int handle_breakpoints)
  1211. {
  1212. armv4_5_common_t *armv4_5 = target->arch_info;
  1213. breakpoint_t *breakpoint = target->breakpoints;
  1214. uint32_t current_pc;
  1215. int retval;
  1216. if (target->state != TARGET_HALTED)
  1217. {
  1218. LOG_WARNING("target not halted");
  1219. return ERROR_TARGET_NOT_HALTED;
  1220. }
  1221. /* current = 1: continue on current pc, otherwise continue at <address> */
  1222. if (!current)
  1223. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1224. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1225. /* if we're at the reset vector, we have to simulate the step */
  1226. if (current_pc == 0x0)
  1227. {
  1228. if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
  1229. return retval;
  1230. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1231. target->debug_reason = DBG_REASON_SINGLESTEP;
  1232. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1233. return ERROR_OK;
  1234. }
  1235. /* the front-end may request us not to handle breakpoints */
  1236. if (handle_breakpoints)
  1237. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1238. {
  1239. if ((retval = xscale_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1240. return retval;
  1241. }
  1242. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1243. if (breakpoint)
  1244. {
  1245. xscale_set_breakpoint(target, breakpoint);
  1246. }
  1247. LOG_DEBUG("target stepped");
  1248. return ERROR_OK;
  1249. }
  1250. static int xscale_assert_reset(target_t *target)
  1251. {
  1252. armv4_5_common_t *armv4_5 = target->arch_info;
  1253. xscale_common_t *xscale = armv4_5->arch_info;
  1254. LOG_DEBUG("target->state: %s",
  1255. target_state_name(target));
  1256. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1257. * end up in T-L-R, which would reset JTAG
  1258. */
  1259. jtag_set_end_state(TAP_IDLE);
  1260. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  1261. /* set Hold reset, Halt mode and Trap Reset */
  1262. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1263. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1264. xscale_write_dcsr(target, 1, 0);
  1265. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1266. xscale_jtag_set_instr(xscale->jtag_info.tap, 0x7f);
  1267. jtag_execute_queue();
  1268. /* assert reset */
  1269. jtag_add_reset(0, 1);
  1270. /* sleep 1ms, to be sure we fulfill any requirements */
  1271. jtag_add_sleep(1000);
  1272. jtag_execute_queue();
  1273. target->state = TARGET_RESET;
  1274. if (target->reset_halt)
  1275. {
  1276. int retval;
  1277. if ((retval = target_halt(target)) != ERROR_OK)
  1278. return retval;
  1279. }
  1280. return ERROR_OK;
  1281. }
  1282. static int xscale_deassert_reset(target_t *target)
  1283. {
  1284. armv4_5_common_t *armv4_5 = target->arch_info;
  1285. xscale_common_t *xscale = armv4_5->arch_info;
  1286. fileio_t debug_handler;
  1287. uint32_t address;
  1288. uint32_t binary_size;
  1289. uint32_t buf_cnt;
  1290. uint32_t i;
  1291. int retval;
  1292. breakpoint_t *breakpoint = target->breakpoints;
  1293. LOG_DEBUG("-");
  1294. xscale->ibcr_available = 2;
  1295. xscale->ibcr0_used = 0;
  1296. xscale->ibcr1_used = 0;
  1297. xscale->dbr_available = 2;
  1298. xscale->dbr0_used = 0;
  1299. xscale->dbr1_used = 0;
  1300. /* mark all hardware breakpoints as unset */
  1301. while (breakpoint)
  1302. {
  1303. if (breakpoint->type == BKPT_HARD)
  1304. {
  1305. breakpoint->set = 0;
  1306. }
  1307. breakpoint = breakpoint->next;
  1308. }
  1309. if (!xscale->handler_installed)
  1310. {
  1311. /* release SRST */
  1312. jtag_add_reset(0, 0);
  1313. /* wait 300ms; 150 and 100ms were not enough */
  1314. jtag_add_sleep(300*1000);
  1315. jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
  1316. jtag_execute_queue();
  1317. /* set Hold reset, Halt mode and Trap Reset */
  1318. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1319. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1320. xscale_write_dcsr(target, 1, 0);
  1321. /* Load debug handler */
  1322. if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
  1323. {
  1324. return ERROR_OK;
  1325. }
  1326. if ((binary_size = debug_handler.size) % 4)
  1327. {
  1328. LOG_ERROR("debug_handler.bin: size not a multiple of 4");
  1329. exit(-1);
  1330. }
  1331. if (binary_size > 0x800)
  1332. {
  1333. LOG_ERROR("debug_handler.bin: larger than 2kb");
  1334. exit(-1);
  1335. }
  1336. binary_size = CEIL(binary_size, 32) * 32;
  1337. address = xscale->handler_address;
  1338. while (binary_size > 0)
  1339. {
  1340. uint32_t cache_line[8];
  1341. uint8_t buffer[32];
  1342. if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
  1343. {
  1344. }
  1345. for (i = 0; i < buf_cnt; i += 4)
  1346. {
  1347. /* convert LE buffer to host-endian uint32_t */
  1348. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1349. }
  1350. for (; i < 32; i += 4)
  1351. {
  1352. cache_line[i / 4] = 0xe1a08008;
  1353. }
  1354. /* only load addresses other than the reset vectors */
  1355. if ((address % 0x400) != 0x0)
  1356. {
  1357. xscale_load_ic(target, address, cache_line);
  1358. }
  1359. address += buf_cnt;
  1360. binary_size -= buf_cnt;
  1361. };
  1362. xscale_load_ic(target, 0x0, xscale->low_vectors);
  1363. xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
  1364. jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
  1365. jtag_add_sleep(100000);
  1366. /* set Hold reset, Halt mode and Trap Reset */
  1367. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1368. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1369. xscale_write_dcsr(target, 1, 0);
  1370. /* clear Hold reset to let the target run (should enter debug handler) */
  1371. xscale_write_dcsr(target, 0, 1);
  1372. target->state = TARGET_RUNNING;
  1373. if (!target->reset_halt)
  1374. {
  1375. jtag_add_sleep(10000);
  1376. /* we should have entered debug now */
  1377. xscale_debug_entry(target);
  1378. target->state = TARGET_HALTED;
  1379. /* resume the target */
  1380. xscale_resume(target, 1, 0x0, 1, 0);
  1381. }
  1382. fileio_close(&debug_handler);
  1383. }
  1384. else
  1385. {
  1386. jtag_add_reset(0, 0);
  1387. }
  1388. return ERROR_OK;
  1389. }
  1390. static int xscale_read_core_reg(struct target_s *target, int num,
  1391. enum armv4_5_mode mode)
  1392. {
  1393. LOG_ERROR("not implemented");
  1394. return ERROR_OK;
  1395. }
  1396. static int xscale_write_core_reg(struct target_s *target, int num,
  1397. enum armv4_5_mode mode, uint32_t value)
  1398. {
  1399. LOG_ERROR("not implemented");
  1400. return ERROR_OK;
  1401. }
  1402. static int xscale_full_context(target_t *target)
  1403. {
  1404. armv4_5_common_t *armv4_5 = target->arch_info;
  1405. uint32_t *buffer;
  1406. int i, j;
  1407. LOG_DEBUG("-");
  1408. if (target->state != TARGET_HALTED)
  1409. {
  1410. LOG_WARNING("target not halted");
  1411. return ERROR_TARGET_NOT_HALTED;
  1412. }
  1413. buffer = malloc(4 * 8);
  1414. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1415. * we can't enter User mode on an XScale (unpredictable),
  1416. * but User shares registers with SYS
  1417. */
  1418. for (i = 1; i < 7; i++)
  1419. {
  1420. int valid = 1;
  1421. /* check if there are invalid registers in the current mode
  1422. */
  1423. for (j = 0; j <= 16; j++)
  1424. {
  1425. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1426. valid = 0;
  1427. }
  1428. if (!valid)
  1429. {
  1430. uint32_t tmp_cpsr;
  1431. /* request banked registers */
  1432. xscale_send_u32(target, 0x0);
  1433. tmp_cpsr = 0x0;
  1434. tmp_cpsr |= armv4_5_number_to_mode(i);
  1435. tmp_cpsr |= 0xc0; /* I/F bits */
  1436. /* send CPSR for desired mode */
  1437. xscale_send_u32(target, tmp_cpsr);
  1438. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1439. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1440. {
  1441. xscale_receive(target, buffer, 8);
  1442. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  1443. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1444. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1445. }
  1446. else
  1447. {
  1448. xscale_receive(target, buffer, 7);
  1449. }
  1450. /* move data from buffer to register cache */
  1451. for (j = 8; j <= 14; j++)
  1452. {
  1453. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
  1454. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1455. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1456. }
  1457. }
  1458. }
  1459. free(buffer);
  1460. return ERROR_OK;
  1461. }
  1462. static int xscale_restore_context(target_t *target)
  1463. {
  1464. armv4_5_common_t *armv4_5 = target->arch_info;
  1465. int i, j;
  1466. if (target->state != TARGET_HALTED)
  1467. {
  1468. LOG_WARNING("target not halted");
  1469. return ERROR_TARGET_NOT_HALTED;
  1470. }
  1471. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1472. * we can't enter User mode on an XScale (unpredictable),
  1473. * but User shares registers with SYS
  1474. */
  1475. for (i = 1; i < 7; i++)
  1476. {
  1477. int dirty = 0;
  1478. /* check if there are invalid registers in the current mode
  1479. */
  1480. for (j = 8; j <= 14; j++)
  1481. {
  1482. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
  1483. dirty = 1;
  1484. }
  1485. /* if not USR/SYS, check if the SPSR needs to be written */
  1486. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1487. {
  1488. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
  1489. dirty = 1;
  1490. }
  1491. if (dirty)
  1492. {
  1493. uint32_t tmp_cpsr;
  1494. /* send banked registers */
  1495. xscale_send_u32(target, 0x1);
  1496. tmp_cpsr = 0x0;
  1497. tmp_cpsr |= armv4_5_number_to_mode(i);
  1498. tmp_cpsr |= 0xc0; /* I/F bits */
  1499. /* send CPSR for desired mode */
  1500. xscale_send_u32(target, tmp_cpsr);
  1501. /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1502. for (j = 8; j <= 14; j++)
  1503. {
  1504. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
  1505. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1506. }
  1507. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1508. {
  1509. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
  1510. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1511. }
  1512. }
  1513. }
  1514. return ERROR_OK;
  1515. }
  1516. static int xscale_read_memory(struct target_s *target, uint32_t address,
  1517. uint32_t size, uint32_t count, uint8_t *buffer)
  1518. {
  1519. armv4_5_common_t *armv4_5 = target->arch_info;
  1520. xscale_common_t *xscale = armv4_5->arch_info;
  1521. uint32_t *buf32;
  1522. uint32_t i;
  1523. int retval;
  1524. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1525. if (target->state != TARGET_HALTED)
  1526. {
  1527. LOG_WARNING("target not halted");
  1528. return ERROR_TARGET_NOT_HALTED;
  1529. }
  1530. /* sanitize arguments */
  1531. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1532. return ERROR_INVALID_ARGUMENTS;
  1533. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1534. return ERROR_TARGET_UNALIGNED_ACCESS;
  1535. /* send memory read request (command 0x1n, n: access size) */
  1536. if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
  1537. return retval;
  1538. /* send base address for read request */
  1539. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1540. return retval;
  1541. /* send number of requested data words */
  1542. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1543. return retval;
  1544. /* receive data from target (count times 32-bit words in host endianness) */
  1545. buf32 = malloc(4 * count);
  1546. if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
  1547. return retval;
  1548. /* extract data from host-endian buffer into byte stream */
  1549. for (i = 0; i < count; i++)
  1550. {
  1551. switch (size)
  1552. {
  1553. case 4:
  1554. target_buffer_set_u32(target, buffer, buf32[i]);
  1555. buffer += 4;
  1556. break;
  1557. case 2:
  1558. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1559. buffer += 2;
  1560. break;
  1561. case 1:
  1562. *buffer++ = buf32[i] & 0xff;
  1563. break;
  1564. default:
  1565. LOG_ERROR("should never get here");
  1566. exit(-1);
  1567. }
  1568. }
  1569. free(buf32);
  1570. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1571. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1572. return retval;
  1573. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1574. {
  1575. /* clear SA bit */
  1576. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1577. return retval;
  1578. return ERROR_TARGET_DATA_ABORT;
  1579. }
  1580. return ERROR_OK;
  1581. }
  1582. static int xscale_write_memory(struct target_s *target, uint32_t address,
  1583. uint32_t size, uint32_t count, uint8_t *buffer)
  1584. {
  1585. armv4_5_common_t *armv4_5 = target->arch_info;
  1586. xscale_common_t *xscale = armv4_5->arch_info;
  1587. int retval;
  1588. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1589. if (target->state != TARGET_HALTED)
  1590. {
  1591. LOG_WARNING("target not halted");
  1592. return ERROR_TARGET_NOT_HALTED;
  1593. }
  1594. /* sanitize arguments */
  1595. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1596. return ERROR_INVALID_ARGUMENTS;
  1597. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1598. return ERROR_TARGET_UNALIGNED_ACCESS;
  1599. /* send memory write request (command 0x2n, n: access size) */
  1600. if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
  1601. return retval;
  1602. /* send base address for read request */
  1603. if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
  1604. return retval;
  1605. /* send number of requested data words to be written*/
  1606. if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
  1607. return retval;
  1608. /* extract data from host-endian buffer into byte stream */
  1609. #if 0
  1610. for (i = 0; i < count; i++)
  1611. {
  1612. switch (size)
  1613. {
  1614. case 4:
  1615. value = target_buffer_get_u32(target, buffer);
  1616. xscale_send_u32(target, value);
  1617. buffer += 4;
  1618. break;
  1619. case 2:
  1620. value = target_buffer_get_u16(target, buffer);
  1621. xscale_send_u32(target, value);
  1622. buffer += 2;
  1623. break;
  1624. case 1:
  1625. value = *buffer;
  1626. xscale_send_u32(target, value);
  1627. buffer += 1;
  1628. break;
  1629. default:
  1630. LOG_ERROR("should never get here");
  1631. exit(-1);
  1632. }
  1633. }
  1634. #endif
  1635. if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
  1636. return retval;
  1637. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1638. if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
  1639. return retval;
  1640. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1641. {
  1642. /* clear SA bit */
  1643. if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
  1644. return retval;
  1645. return ERROR_TARGET_DATA_ABORT;
  1646. }
  1647. return ERROR_OK;
  1648. }
  1649. static int xscale_bulk_write_memory(target_t *target, uint32_t address,
  1650. uint32_t count, uint8_t *buffer)
  1651. {
  1652. return xscale_write_memory(target, address, 4, count, buffer);
  1653. }
  1654. static uint32_t xscale_get_ttb(target_t *target)
  1655. {
  1656. armv4_5_common_t *armv4_5 = target->arch_info;
  1657. xscale_common_t *xscale = armv4_5->arch_info;
  1658. uint32_t ttb;
  1659. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1660. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1661. return ttb;
  1662. }
  1663. static void xscale_disable_mmu_caches(target_t *target, int mmu,
  1664. int d_u_cache, int i_cache)
  1665. {
  1666. armv4_5_common_t *armv4_5 = target->arch_info;
  1667. xscale_common_t *xscale = armv4_5->arch_info;
  1668. uint32_t cp15_control;
  1669. /* read cp15 control register */
  1670. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1671. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1672. if (mmu)
  1673. cp15_control &= ~0x1U;
  1674. if (d_u_cache)
  1675. {
  1676. /* clean DCache */
  1677. xscale_send_u32(target, 0x50);
  1678. xscale_send_u32(target, xscale->cache_clean_address);
  1679. /* invalidate DCache */
  1680. xscale_send_u32(target, 0x51);
  1681. cp15_control &= ~0x4U;
  1682. }
  1683. if (i_cache)
  1684. {
  1685. /* invalidate ICache */
  1686. xscale_send_u32(target, 0x52);
  1687. cp15_control &= ~0x1000U;
  1688. }
  1689. /* write new cp15 control register */
  1690. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1691. /* execute cpwait to ensure outstanding operations complete */
  1692. xscale_send_u32(target, 0x53);
  1693. }
  1694. static void xscale_enable_mmu_caches(target_t *target, int mmu,
  1695. int d_u_cache, int i_cache)
  1696. {
  1697. armv4_5_common_t *armv4_5 = target->arch_info;
  1698. xscale_common_t *xscale = armv4_5->arch_info;
  1699. uint32_t cp15_control;
  1700. /* read cp15 control register */
  1701. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1702. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1703. if (mmu)
  1704. cp15_control |= 0x1U;
  1705. if (d_u_cache)
  1706. cp15_control |= 0x4U;
  1707. if (i_cache)
  1708. cp15_control |= 0x1000U;
  1709. /* write new cp15 control register */
  1710. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1711. /* execute cpwait to ensure outstanding operations complete */
  1712. xscale_send_u32(target, 0x53);
  1713. }
  1714. static int xscale_set_breakpoint(struct target_s *target,
  1715. breakpoint_t *breakpoint)
  1716. {
  1717. int retval;
  1718. armv4_5_common_t *armv4_5 = target->arch_info;
  1719. xscale_common_t *xscale = armv4_5->arch_info;
  1720. if (target->state != TARGET_HALTED)
  1721. {
  1722. LOG_WARNING("target not halted");
  1723. return ERROR_TARGET_NOT_HALTED;
  1724. }
  1725. if (breakpoint->set)
  1726. {
  1727. LOG_WARNING("breakpoint already set");
  1728. return ERROR_OK;
  1729. }
  1730. if (breakpoint->type == BKPT_HARD)
  1731. {
  1732. uint32_t value = breakpoint->address | 1;
  1733. if (!xscale->ibcr0_used)
  1734. {
  1735. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1736. xscale->ibcr0_used = 1;
  1737. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1738. }
  1739. else if (!xscale->ibcr1_used)
  1740. {
  1741. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1742. xscale->ibcr1_used = 1;
  1743. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1744. }
  1745. else
  1746. {
  1747. LOG_ERROR("BUG: no hardware comparator available");
  1748. return ERROR_OK;
  1749. }
  1750. }
  1751. else if (breakpoint->type == BKPT_SOFT)
  1752. {
  1753. if (breakpoint->length == 4)
  1754. {
  1755. /* keep the original instruction in target endianness */
  1756. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1757. {
  1758. return retval;
  1759. }
  1760. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1761. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1762. {
  1763. return retval;
  1764. }
  1765. }
  1766. else
  1767. {
  1768. /* keep the original instruction in target endianness */
  1769. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1770. {
  1771. return retval;
  1772. }
  1773. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1774. if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1775. {
  1776. return retval;
  1777. }
  1778. }
  1779. breakpoint->set = 1;
  1780. }
  1781. return ERROR_OK;
  1782. }
  1783. static int xscale_add_breakpoint(struct target_s *target,
  1784. breakpoint_t *breakpoint)
  1785. {
  1786. armv4_5_common_t *armv4_5 = target->arch_info;
  1787. xscale_common_t *xscale = armv4_5->arch_info;
  1788. if (target->state != TARGET_HALTED)
  1789. {
  1790. LOG_WARNING("target not halted");
  1791. return ERROR_TARGET_NOT_HALTED;
  1792. }
  1793. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1794. {
  1795. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1796. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1797. }
  1798. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1799. {
  1800. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1801. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1802. }
  1803. if (breakpoint->type == BKPT_HARD)
  1804. {
  1805. xscale->ibcr_available--;
  1806. }
  1807. return ERROR_OK;
  1808. }
  1809. static int xscale_unset_breakpoint(struct target_s *target,
  1810. breakpoint_t *breakpoint)
  1811. {
  1812. int retval;
  1813. armv4_5_common_t *armv4_5 = target->arch_info;
  1814. xscale_common_t *xscale = armv4_5->arch_info;
  1815. if (target->state != TARGET_HALTED)
  1816. {
  1817. LOG_WARNING("target not halted");
  1818. return ERROR_TARGET_NOT_HALTED;
  1819. }
  1820. if (!breakpoint->set)
  1821. {
  1822. LOG_WARNING("breakpoint not set");
  1823. return ERROR_OK;
  1824. }
  1825. if (breakpoint->type == BKPT_HARD)
  1826. {
  1827. if (breakpoint->set == 1)
  1828. {
  1829. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1830. xscale->ibcr0_used = 0;
  1831. }
  1832. else if (breakpoint->set == 2)
  1833. {
  1834. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1835. xscale->ibcr1_used = 0;
  1836. }
  1837. breakpoint->set = 0;
  1838. }
  1839. else
  1840. {
  1841. /* restore original instruction (kept in target endianness) */
  1842. if (breakpoint->length == 4)
  1843. {
  1844. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1845. {
  1846. return retval;
  1847. }
  1848. }
  1849. else
  1850. {
  1851. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1852. {
  1853. return retval;
  1854. }
  1855. }
  1856. breakpoint->set = 0;
  1857. }
  1858. return ERROR_OK;
  1859. }
  1860. static int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1861. {
  1862. armv4_5_common_t *armv4_5 = target->arch_info;
  1863. xscale_common_t *xscale = armv4_5->arch_info;
  1864. if (target->state != TARGET_HALTED)
  1865. {
  1866. LOG_WARNING("target not halted");
  1867. return ERROR_TARGET_NOT_HALTED;
  1868. }
  1869. if (breakpoint->set)
  1870. {
  1871. xscale_unset_breakpoint(target, breakpoint);
  1872. }
  1873. if (breakpoint->type == BKPT_HARD)
  1874. xscale->ibcr_available++;
  1875. return ERROR_OK;
  1876. }
  1877. static int xscale_set_watchpoint(struct target_s *target,
  1878. watchpoint_t *watchpoint)
  1879. {
  1880. armv4_5_common_t *armv4_5 = target->arch_info;
  1881. xscale_common_t *xscale = armv4_5->arch_info;
  1882. uint8_t enable = 0;
  1883. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1884. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1885. if (target->state != TARGET_HALTED)
  1886. {
  1887. LOG_WARNING("target not halted");
  1888. return ERROR_TARGET_NOT_HALTED;
  1889. }
  1890. xscale_get_reg(dbcon);
  1891. switch (watchpoint->rw)
  1892. {
  1893. case WPT_READ:
  1894. enable = 0x3;
  1895. break;
  1896. case WPT_ACCESS:
  1897. enable = 0x2;
  1898. break;
  1899. case WPT_WRITE:
  1900. enable = 0x1;
  1901. break;
  1902. default:
  1903. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1904. }
  1905. if (!xscale->dbr0_used)
  1906. {
  1907. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1908. dbcon_value |= enable;
  1909. xscale_set_reg_u32(dbcon, dbcon_value);
  1910. watchpoint->set = 1;
  1911. xscale->dbr0_used = 1;
  1912. }
  1913. else if (!xscale->dbr1_used)
  1914. {
  1915. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1916. dbcon_value |= enable << 2;
  1917. xscale_set_reg_u32(dbcon, dbcon_value);
  1918. watchpoint->set = 2;
  1919. xscale->dbr1_used = 1;
  1920. }
  1921. else
  1922. {
  1923. LOG_ERROR("BUG: no hardware comparator available");
  1924. return ERROR_OK;
  1925. }
  1926. return ERROR_OK;
  1927. }
  1928. static int xscale_add_watchpoint(struct target_s *target,
  1929. watchpoint_t *watchpoint)
  1930. {
  1931. armv4_5_common_t *armv4_5 = target->arch_info;
  1932. xscale_common_t *xscale = armv4_5->arch_info;
  1933. if (target->state != TARGET_HALTED)
  1934. {
  1935. LOG_WARNING("target not halted");
  1936. return ERROR_TARGET_NOT_HALTED;
  1937. }
  1938. if (xscale->dbr_available < 1)
  1939. {
  1940. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1941. }
  1942. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1943. {
  1944. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1945. }
  1946. xscale->dbr_available--;
  1947. return ERROR_OK;
  1948. }
  1949. static int xscale_unset_watchpoint(struct target_s *target,
  1950. watchpoint_t *watchpoint)
  1951. {
  1952. armv4_5_common_t *armv4_5 = target->arch_info;
  1953. xscale_common_t *xscale = armv4_5->arch_info;
  1954. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1955. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1956. if (target->state != TARGET_HALTED)
  1957. {
  1958. LOG_WARNING("target not halted");
  1959. return ERROR_TARGET_NOT_HALTED;
  1960. }
  1961. if (!watchpoint->set)
  1962. {
  1963. LOG_WARNING("breakpoint not set");
  1964. return ERROR_OK;
  1965. }
  1966. if (watchpoint->set == 1)
  1967. {
  1968. dbcon_value &= ~0x3;
  1969. xscale_set_reg_u32(dbcon, dbcon_value);
  1970. xscale->dbr0_used = 0;
  1971. }
  1972. else if (watchpoint->set == 2)
  1973. {
  1974. dbcon_value &= ~0xc;
  1975. xscale_set_reg_u32(dbcon, dbcon_value);
  1976. xscale->dbr1_used = 0;
  1977. }
  1978. watchpoint->set = 0;
  1979. return ERROR_OK;
  1980. }
  1981. static int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1982. {
  1983. armv4_5_common_t *armv4_5 = target->arch_info;
  1984. xscale_common_t *xscale = armv4_5->arch_info;
  1985. if (target->state != TARGET_HALTED)
  1986. {
  1987. LOG_WARNING("target not halted");
  1988. return ERROR_TARGET_NOT_HALTED;
  1989. }
  1990. if (watchpoint->set)
  1991. {
  1992. xscale_unset_watchpoint(target, watchpoint);
  1993. }
  1994. xscale->dbr_available++;
  1995. return ERROR_OK;
  1996. }
  1997. static int xscale_get_reg(reg_t *reg)
  1998. {
  1999. xscale_reg_t *arch_info = reg->arch_info;
  2000. target_t *target = arch_info->target;
  2001. armv4_5_common_t *armv4_5 = target->arch_info;
  2002. xscale_common_t *xscale = armv4_5->arch_info;
  2003. /* DCSR, TX and RX are accessible via JTAG */
  2004. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2005. {
  2006. return xscale_read_dcsr(arch_info->target);
  2007. }
  2008. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2009. {
  2010. /* 1 = consume register content */
  2011. return xscale_read_tx(arch_info->target, 1);
  2012. }
  2013. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2014. {
  2015. /* can't read from RX register (host -> debug handler) */
  2016. return ERROR_OK;
  2017. }
  2018. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2019. {
  2020. /* can't (explicitly) read from TXRXCTRL register */
  2021. return ERROR_OK;
  2022. }
  2023. else /* Other DBG registers have to be transfered by the debug handler */
  2024. {
  2025. /* send CP read request (command 0x40) */
  2026. xscale_send_u32(target, 0x40);
  2027. /* send CP register number */
  2028. xscale_send_u32(target, arch_info->dbg_handler_number);
  2029. /* read register value */
  2030. xscale_read_tx(target, 1);
  2031. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2032. reg->dirty = 0;
  2033. reg->valid = 1;
  2034. }
  2035. return ERROR_OK;
  2036. }
  2037. static int xscale_set_reg(reg_t *reg, uint8_t* buf)
  2038. {
  2039. xscale_reg_t *arch_info = reg->arch_info;
  2040. target_t *target = arch_info->target;
  2041. armv4_5_common_t *armv4_5 = target->arch_info;
  2042. xscale_common_t *xscale = armv4_5->arch_info;
  2043. uint32_t value = buf_get_u32(buf, 0, 32);
  2044. /* DCSR, TX and RX are accessible via JTAG */
  2045. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2046. {
  2047. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2048. return xscale_write_dcsr(arch_info->target, -1, -1);
  2049. }
  2050. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2051. {
  2052. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2053. return xscale_write_rx(arch_info->target);
  2054. }
  2055. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2056. {
  2057. /* can't write to TX register (debug-handler -> host) */
  2058. return ERROR_OK;
  2059. }
  2060. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2061. {
  2062. /* can't (explicitly) write to TXRXCTRL register */
  2063. return ERROR_OK;
  2064. }
  2065. else /* Other DBG registers have to be transfered by the debug handler */
  2066. {
  2067. /* send CP write request (command 0x41) */
  2068. xscale_send_u32(target, 0x41);
  2069. /* send CP register number */
  2070. xscale_send_u32(target, arch_info->dbg_handler_number);
  2071. /* send CP register value */
  2072. xscale_send_u32(target, value);
  2073. buf_set_u32(reg->value, 0, 32, value);
  2074. }
  2075. return ERROR_OK;
  2076. }
  2077. static int xscale_write_dcsr_sw(target_t *target, uint32_t value)
  2078. {
  2079. /* get pointers to arch-specific information */
  2080. armv4_5_common_t *armv4_5 = target->arch_info;
  2081. xscale_common_t *xscale = armv4_5->arch_info;
  2082. reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2083. xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
  2084. /* send CP write request (command 0x41) */
  2085. xscale_send_u32(target, 0x41);
  2086. /* send CP register number */
  2087. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2088. /* send CP register value */
  2089. xscale_send_u32(target, value);
  2090. buf_set_u32(dcsr->value, 0, 32, value);
  2091. return ERROR_OK;
  2092. }
  2093. static int xscale_read_trace(target_t *target)
  2094. {
  2095. /* get pointers to arch-specific information */
  2096. armv4_5_common_t *armv4_5 = target->arch_info;
  2097. xscale_common_t *xscale = armv4_5->arch_info;
  2098. xscale_trace_data_t **trace_data_p;
  2099. /* 258 words from debug handler
  2100. * 256 trace buffer entries
  2101. * 2 checkpoint addresses
  2102. */
  2103. uint32_t trace_buffer[258];
  2104. int is_address[256];
  2105. int i, j;
  2106. if (target->state != TARGET_HALTED)
  2107. {
  2108. LOG_WARNING("target must be stopped to read trace data");
  2109. return ERROR_TARGET_NOT_HALTED;
  2110. }
  2111. /* send read trace buffer command (command 0x61) */
  2112. xscale_send_u32(target, 0x61);
  2113. /* receive trace buffer content */
  2114. xscale_receive(target, trace_buffer, 258);
  2115. /* parse buffer backwards to identify address entries */
  2116. for (i = 255; i >= 0; i--)
  2117. {
  2118. is_address[i] = 0;
  2119. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2120. ((trace_buffer[i] & 0xf0) == 0xd0))
  2121. {
  2122. if (i >= 3)
  2123. is_address[--i] = 1;
  2124. if (i >= 2)
  2125. is_address[--i] = 1;
  2126. if (i >= 1)
  2127. is_address[--i] = 1;
  2128. if (i >= 0)
  2129. is_address[--i] = 1;
  2130. }
  2131. }
  2132. /* search first non-zero entry */
  2133. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2134. ;
  2135. if (j == 256)
  2136. {
  2137. LOG_DEBUG("no trace data collected");
  2138. return ERROR_XSCALE_NO_TRACE_DATA;
  2139. }
  2140. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2141. ;
  2142. *trace_data_p = malloc(sizeof(xscale_trace_data_t));
  2143. (*trace_data_p)->next = NULL;
  2144. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2145. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2146. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2147. (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
  2148. (*trace_data_p)->depth = 256 - j;
  2149. for (i = j; i < 256; i++)
  2150. {
  2151. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2152. if (is_address[i])
  2153. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2154. else
  2155. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2156. }
  2157. return ERROR_OK;
  2158. }
  2159. static int xscale_read_instruction(target_t *target,
  2160. arm_instruction_t *instruction)
  2161. {
  2162. /* get pointers to arch-specific information */
  2163. armv4_5_common_t *armv4_5 = target->arch_info;
  2164. xscale_common_t *xscale = armv4_5->arch_info;
  2165. int i;
  2166. int section = -1;
  2167. uint32_t size_read;
  2168. uint32_t opcode;
  2169. int retval;
  2170. if (!xscale->trace.image)
  2171. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2172. /* search for the section the current instruction belongs to */
  2173. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2174. {
  2175. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2176. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2177. {
  2178. section = i;
  2179. break;
  2180. }
  2181. }
  2182. if (section == -1)
  2183. {
  2184. /* current instruction couldn't be found in the image */
  2185. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2186. }
  2187. if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
  2188. {
  2189. uint8_t buf[4];
  2190. if ((retval = image_read_section(xscale->trace.image, section,
  2191. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2192. 4, buf, &size_read)) != ERROR_OK)
  2193. {
  2194. LOG_ERROR("error while reading instruction: %i", retval);
  2195. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2196. }
  2197. opcode = target_buffer_get_u32(target, buf);
  2198. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2199. }
  2200. else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
  2201. {
  2202. uint8_t buf[2];
  2203. if ((retval = image_read_section(xscale->trace.image, section,
  2204. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2205. 2, buf, &size_read)) != ERROR_OK)
  2206. {
  2207. LOG_ERROR("error while reading instruction: %i", retval);
  2208. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2209. }
  2210. opcode = target_buffer_get_u16(target, buf);
  2211. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2212. }
  2213. else
  2214. {
  2215. LOG_ERROR("BUG: unknown core state encountered");
  2216. exit(-1);
  2217. }
  2218. return ERROR_OK;
  2219. }
  2220. static int xscale_branch_address(xscale_trace_data_t *trace_data,
  2221. int i, uint32_t *target)
  2222. {
  2223. /* if there are less than four entries prior to the indirect branch message
  2224. * we can't extract the address */
  2225. if (i < 4)
  2226. {
  2227. return -1;
  2228. }
  2229. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2230. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2231. return 0;
  2232. }
  2233. static int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
  2234. {
  2235. /* get pointers to arch-specific information */
  2236. armv4_5_common_t *armv4_5 = target->arch_info;
  2237. xscale_common_t *xscale = armv4_5->arch_info;
  2238. int next_pc_ok = 0;
  2239. uint32_t next_pc = 0x0;
  2240. xscale_trace_data_t *trace_data = xscale->trace.data;
  2241. int retval;
  2242. while (trace_data)
  2243. {
  2244. int i, chkpt;
  2245. int rollover;
  2246. int branch;
  2247. int exception;
  2248. xscale->trace.core_state = ARMV4_5_STATE_ARM;
  2249. chkpt = 0;
  2250. rollover = 0;
  2251. for (i = 0; i < trace_data->depth; i++)
  2252. {
  2253. next_pc_ok = 0;
  2254. branch = 0;
  2255. exception = 0;
  2256. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2257. continue;
  2258. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2259. {
  2260. case 0: /* Exceptions */
  2261. case 1:
  2262. case 2:
  2263. case 3:
  2264. case 4:
  2265. case 5:
  2266. case 6:
  2267. case 7:
  2268. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2269. next_pc_ok = 1;
  2270. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2271. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2272. break;
  2273. case 8: /* Direct Branch */
  2274. branch = 1;
  2275. break;
  2276. case 9: /* Indirect Branch */
  2277. branch = 1;
  2278. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2279. {
  2280. next_pc_ok = 1;
  2281. }
  2282. break;
  2283. case 13: /* Checkpointed Indirect Branch */
  2284. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2285. {
  2286. next_pc_ok = 1;
  2287. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2288. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2289. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2290. }
  2291. /* explicit fall-through */
  2292. case 12: /* Checkpointed Direct Branch */
  2293. branch = 1;
  2294. if (chkpt == 0)
  2295. {
  2296. next_pc_ok = 1;
  2297. next_pc = trace_data->chkpt0;
  2298. chkpt++;
  2299. }
  2300. else if (chkpt == 1)
  2301. {
  2302. next_pc_ok = 1;
  2303. next_pc = trace_data->chkpt0;
  2304. chkpt++;
  2305. }
  2306. else
  2307. {
  2308. LOG_WARNING("more than two checkpointed branches encountered");
  2309. }
  2310. break;
  2311. case 15: /* Roll-over */
  2312. rollover++;
  2313. continue;
  2314. default: /* Reserved */
  2315. command_print(cmd_ctx, "--- reserved trace message ---");
  2316. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2317. return ERROR_OK;
  2318. }
  2319. if (xscale->trace.pc_ok)
  2320. {
  2321. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2322. arm_instruction_t instruction;
  2323. if ((exception == 6) || (exception == 7))
  2324. {
  2325. /* IRQ or FIQ exception, no instruction executed */
  2326. executed -= 1;
  2327. }
  2328. while (executed-- >= 0)
  2329. {
  2330. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2331. {
  2332. /* can't continue tracing with no image available */
  2333. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2334. {
  2335. return retval;
  2336. }
  2337. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2338. {
  2339. /* TODO: handle incomplete images */
  2340. }
  2341. }
  2342. /* a precise abort on a load to the PC is included in the incremental
  2343. * word count, other instructions causing data aborts are not included
  2344. */
  2345. if ((executed == 0) && (exception == 4)
  2346. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2347. {
  2348. if ((instruction.type == ARM_LDM)
  2349. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2350. {
  2351. executed--;
  2352. }
  2353. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2354. && (instruction.info.load_store.Rd != 15))
  2355. {
  2356. executed--;
  2357. }
  2358. }
  2359. /* only the last instruction executed
  2360. * (the one that caused the control flow change)
  2361. * could be a taken branch
  2362. */
  2363. if (((executed == -1) && (branch == 1)) &&
  2364. (((instruction.type == ARM_B) ||
  2365. (instruction.type == ARM_BL) ||
  2366. (instruction.type == ARM_BLX)) &&
  2367. (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
  2368. {
  2369. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2370. }
  2371. else
  2372. {
  2373. xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
  2374. }
  2375. command_print(cmd_ctx, "%s", instruction.text);
  2376. }
  2377. rollover = 0;
  2378. }
  2379. if (next_pc_ok)
  2380. {
  2381. xscale->trace.current_pc = next_pc;
  2382. xscale->trace.pc_ok = 1;
  2383. }
  2384. }
  2385. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
  2386. {
  2387. arm_instruction_t instruction;
  2388. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2389. {
  2390. /* can't continue tracing with no image available */
  2391. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2392. {
  2393. return retval;
  2394. }
  2395. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2396. {
  2397. /* TODO: handle incomplete images */
  2398. }
  2399. }
  2400. command_print(cmd_ctx, "%s", instruction.text);
  2401. }
  2402. trace_data = trace_data->next;
  2403. }
  2404. return ERROR_OK;
  2405. }
  2406. static void xscale_build_reg_cache(target_t *target)
  2407. {
  2408. /* get pointers to arch-specific information */
  2409. armv4_5_common_t *armv4_5 = target->arch_info;
  2410. xscale_common_t *xscale = armv4_5->arch_info;
  2411. reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
  2412. xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2413. int i;
  2414. int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
  2415. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  2416. armv4_5->core_cache = (*cache_p);
  2417. /* register a register arch-type for XScale dbg registers only once */
  2418. if (xscale_reg_arch_type == -1)
  2419. xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
  2420. (*cache_p)->next = malloc(sizeof(reg_cache_t));
  2421. cache_p = &(*cache_p)->next;
  2422. /* fill in values for the xscale reg cache */
  2423. (*cache_p)->name = "XScale registers";
  2424. (*cache_p)->next = NULL;
  2425. (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
  2426. (*cache_p)->num_regs = num_regs;
  2427. for (i = 0; i < num_regs; i++)
  2428. {
  2429. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2430. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2431. (*cache_p)->reg_list[i].dirty = 0;
  2432. (*cache_p)->reg_list[i].valid = 0;
  2433. (*cache_p)->reg_list[i].size = 32;
  2434. (*cache_p)->reg_list[i].bitfield_desc = NULL;
  2435. (*cache_p)->reg_list[i].num_bitfields = 0;
  2436. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2437. (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
  2438. arch_info[i] = xscale_reg_arch_info[i];
  2439. arch_info[i].target = target;
  2440. }
  2441. xscale->reg_cache = (*cache_p);
  2442. }
  2443. static int xscale_init_target(struct command_context_s *cmd_ctx,
  2444. struct target_s *target)
  2445. {
  2446. return ERROR_OK;
  2447. }
  2448. static int xscale_quit(void)
  2449. {
  2450. jtag_add_runtest(100, TAP_RESET);
  2451. return ERROR_OK;
  2452. }
  2453. static int xscale_init_arch_info(target_t *target,
  2454. xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
  2455. {
  2456. armv4_5_common_t *armv4_5;
  2457. uint32_t high_reset_branch, low_reset_branch;
  2458. int i;
  2459. armv4_5 = &xscale->armv4_5_common;
  2460. /* store architecture specfic data (none so far) */
  2461. xscale->arch_info = NULL;
  2462. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2463. /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
  2464. xscale->variant = strdup(variant);
  2465. /* prepare JTAG information for the new target */
  2466. xscale->jtag_info.tap = tap;
  2467. xscale->jtag_info.dbgrx = 0x02;
  2468. xscale->jtag_info.dbgtx = 0x10;
  2469. xscale->jtag_info.dcsr = 0x09;
  2470. xscale->jtag_info.ldic = 0x07;
  2471. if ((strcmp(xscale->variant, "pxa250") == 0) ||
  2472. (strcmp(xscale->variant, "pxa255") == 0) ||
  2473. (strcmp(xscale->variant, "pxa26x") == 0))
  2474. {
  2475. xscale->jtag_info.ir_length = 5;
  2476. }
  2477. else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
  2478. (strcmp(xscale->variant, "ixp42x") == 0) ||
  2479. (strcmp(xscale->variant, "ixp45x") == 0) ||
  2480. (strcmp(xscale->variant, "ixp46x") == 0))
  2481. {
  2482. xscale->jtag_info.ir_length = 7;
  2483. }
  2484. /* the debug handler isn't installed (and thus not running) at this time */
  2485. xscale->handler_installed = 0;
  2486. xscale->handler_running = 0;
  2487. xscale->handler_address = 0xfe000800;
  2488. /* clear the vectors we keep locally for reference */
  2489. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2490. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2491. /* no user-specified vectors have been configured yet */
  2492. xscale->static_low_vectors_set = 0x0;
  2493. xscale->static_high_vectors_set = 0x0;
  2494. /* calculate branches to debug handler */
  2495. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2496. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2497. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2498. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2499. for (i = 1; i <= 7; i++)
  2500. {
  2501. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2502. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2503. }
  2504. /* 64kB aligned region used for DCache cleaning */
  2505. xscale->cache_clean_address = 0xfffe0000;
  2506. xscale->hold_rst = 0;
  2507. xscale->external_debug_break = 0;
  2508. xscale->ibcr_available = 2;
  2509. xscale->ibcr0_used = 0;
  2510. xscale->ibcr1_used = 0;
  2511. xscale->dbr_available = 2;
  2512. xscale->dbr0_used = 0;
  2513. xscale->dbr1_used = 0;
  2514. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2515. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2516. xscale->vector_catch = 0x1;
  2517. xscale->trace.capture_status = TRACE_IDLE;
  2518. xscale->trace.data = NULL;
  2519. xscale->trace.image = NULL;
  2520. xscale->trace.buffer_enabled = 0;
  2521. xscale->trace.buffer_fill = 0;
  2522. /* prepare ARMv4/5 specific information */
  2523. armv4_5->arch_info = xscale;
  2524. armv4_5->read_core_reg = xscale_read_core_reg;
  2525. armv4_5->write_core_reg = xscale_write_core_reg;
  2526. armv4_5->full_context = xscale_full_context;
  2527. armv4_5_init_arch_info(target, armv4_5);
  2528. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2529. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2530. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2531. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2532. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2533. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2534. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2535. xscale->armv4_5_mmu.mmu_enabled = 0;
  2536. return ERROR_OK;
  2537. }
  2538. /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
  2539. static int xscale_target_create(struct target_s *target, Jim_Interp *interp)
  2540. {
  2541. xscale_common_t *xscale = calloc(1,sizeof(xscale_common_t));
  2542. xscale_init_arch_info(target, xscale, target->tap, target->variant);
  2543. xscale_build_reg_cache(target);
  2544. return ERROR_OK;
  2545. }
  2546. static int
  2547. xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx,
  2548. char *cmd, char **args, int argc)
  2549. {
  2550. target_t *target = NULL;
  2551. armv4_5_common_t *armv4_5;
  2552. xscale_common_t *xscale;
  2553. uint32_t handler_address;
  2554. if (argc < 2)
  2555. {
  2556. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2557. return ERROR_OK;
  2558. }
  2559. if ((target = get_target(args[0])) == NULL)
  2560. {
  2561. LOG_ERROR("target '%s' not defined", args[0]);
  2562. return ERROR_FAIL;
  2563. }
  2564. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2565. {
  2566. return ERROR_FAIL;
  2567. }
  2568. handler_address = strtoul(args[1], NULL, 0);
  2569. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2570. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2571. {
  2572. xscale->handler_address = handler_address;
  2573. }
  2574. else
  2575. {
  2576. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2577. return ERROR_FAIL;
  2578. }
  2579. return ERROR_OK;
  2580. }
  2581. static int
  2582. xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx,
  2583. char *cmd, char **args, int argc)
  2584. {
  2585. target_t *target = NULL;
  2586. armv4_5_common_t *armv4_5;
  2587. xscale_common_t *xscale;
  2588. uint32_t cache_clean_address;
  2589. if (argc < 2)
  2590. {
  2591. return ERROR_COMMAND_SYNTAX_ERROR;
  2592. }
  2593. target = get_target(args[0]);
  2594. if (target == NULL)
  2595. {
  2596. LOG_ERROR("target '%s' not defined", args[0]);
  2597. return ERROR_FAIL;
  2598. }
  2599. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2600. {
  2601. return ERROR_FAIL;
  2602. }
  2603. cache_clean_address = strtoul(args[1], NULL, 0);
  2604. if (cache_clean_address & 0xffff)
  2605. {
  2606. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2607. }
  2608. else
  2609. {
  2610. xscale->cache_clean_address = cache_clean_address;
  2611. }
  2612. return ERROR_OK;
  2613. }
  2614. static int
  2615. xscale_handle_cache_info_command(struct command_context_s *cmd_ctx,
  2616. char *cmd, char **args, int argc)
  2617. {
  2618. target_t *target = get_current_target(cmd_ctx);
  2619. armv4_5_common_t *armv4_5;
  2620. xscale_common_t *xscale;
  2621. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2622. {
  2623. return ERROR_OK;
  2624. }
  2625. return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
  2626. }
  2627. static int xscale_virt2phys(struct target_s *target,
  2628. uint32_t virtual, uint32_t *physical)
  2629. {
  2630. armv4_5_common_t *armv4_5;
  2631. xscale_common_t *xscale;
  2632. int retval;
  2633. int type;
  2634. uint32_t cb;
  2635. int domain;
  2636. uint32_t ap;
  2637. if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
  2638. {
  2639. return retval;
  2640. }
  2641. uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2642. if (type == -1)
  2643. {
  2644. return ret;
  2645. }
  2646. *physical = ret;
  2647. return ERROR_OK;
  2648. }
  2649. static int xscale_mmu(struct target_s *target, int *enabled)
  2650. {
  2651. armv4_5_common_t *armv4_5 = target->arch_info;
  2652. xscale_common_t *xscale = armv4_5->arch_info;
  2653. if (target->state != TARGET_HALTED)
  2654. {
  2655. LOG_ERROR("Target not halted");
  2656. return ERROR_TARGET_INVALID;
  2657. }
  2658. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2659. return ERROR_OK;
  2660. }
  2661. static int xscale_handle_mmu_command(command_context_t *cmd_ctx,
  2662. char *cmd, char **args, int argc)
  2663. {
  2664. target_t *target = get_current_target(cmd_ctx);
  2665. armv4_5_common_t *armv4_5;
  2666. xscale_common_t *xscale;
  2667. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2668. {
  2669. return ERROR_OK;
  2670. }
  2671. if (target->state != TARGET_HALTED)
  2672. {
  2673. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2674. return ERROR_OK;
  2675. }
  2676. if (argc >= 1)
  2677. {
  2678. if (strcmp("enable", args[0]) == 0)
  2679. {
  2680. xscale_enable_mmu_caches(target, 1, 0, 0);
  2681. xscale->armv4_5_mmu.mmu_enabled = 1;
  2682. }
  2683. else if (strcmp("disable", args[0]) == 0)
  2684. {
  2685. xscale_disable_mmu_caches(target, 1, 0, 0);
  2686. xscale->armv4_5_mmu.mmu_enabled = 0;
  2687. }
  2688. }
  2689. command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2690. return ERROR_OK;
  2691. }
  2692. static int xscale_handle_idcache_command(command_context_t *cmd_ctx,
  2693. char *cmd, char **args, int argc)
  2694. {
  2695. target_t *target = get_current_target(cmd_ctx);
  2696. armv4_5_common_t *armv4_5;
  2697. xscale_common_t *xscale;
  2698. int icache = 0, dcache = 0;
  2699. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2700. {
  2701. return ERROR_OK;
  2702. }
  2703. if (target->state != TARGET_HALTED)
  2704. {
  2705. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2706. return ERROR_OK;
  2707. }
  2708. if (strcmp(cmd, "icache") == 0)
  2709. icache = 1;
  2710. else if (strcmp(cmd, "dcache") == 0)
  2711. dcache = 1;
  2712. if (argc >= 1)
  2713. {
  2714. if (strcmp("enable", args[0]) == 0)
  2715. {
  2716. xscale_enable_mmu_caches(target, 0, dcache, icache);
  2717. if (icache)
  2718. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
  2719. else if (dcache)
  2720. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
  2721. }
  2722. else if (strcmp("disable", args[0]) == 0)
  2723. {
  2724. xscale_disable_mmu_caches(target, 0, dcache, icache);
  2725. if (icache)
  2726. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
  2727. else if (dcache)
  2728. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
  2729. }
  2730. }
  2731. if (icache)
  2732. command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
  2733. if (dcache)
  2734. command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
  2735. return ERROR_OK;
  2736. }
  2737. static int xscale_handle_vector_catch_command(command_context_t *cmd_ctx,
  2738. char *cmd, char **args, int argc)
  2739. {
  2740. target_t *target = get_current_target(cmd_ctx);
  2741. armv4_5_common_t *armv4_5;
  2742. xscale_common_t *xscale;
  2743. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2744. {
  2745. return ERROR_OK;
  2746. }
  2747. if (argc < 1)
  2748. {
  2749. command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
  2750. }
  2751. else
  2752. {
  2753. xscale->vector_catch = strtoul(args[0], NULL, 0);
  2754. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2755. xscale_write_dcsr(target, -1, -1);
  2756. }
  2757. command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2758. return ERROR_OK;
  2759. }
  2760. static int xscale_handle_vector_table_command(command_context_t *cmd_ctx,
  2761. char *cmd, char **args, int argc)
  2762. {
  2763. target_t *target = get_current_target(cmd_ctx);
  2764. armv4_5_common_t *armv4_5;
  2765. xscale_common_t *xscale;
  2766. int err = 0;
  2767. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2768. {
  2769. return ERROR_OK;
  2770. }
  2771. if (argc == 0) /* print current settings */
  2772. {
  2773. int idx;
  2774. command_print(cmd_ctx, "active user-set static vectors:");
  2775. for (idx = 1; idx < 8; idx++)
  2776. if (xscale->static_low_vectors_set & (1 << idx))
  2777. command_print(cmd_ctx, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
  2778. for (idx = 1; idx < 8; idx++)
  2779. if (xscale->static_high_vectors_set & (1 << idx))
  2780. command_print(cmd_ctx, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
  2781. return ERROR_OK;
  2782. }
  2783. if (argc != 3)
  2784. err = 1;
  2785. else
  2786. {
  2787. int idx;
  2788. uint32_t vec;
  2789. idx = strtoul(args[1], NULL, 0);
  2790. vec = strtoul(args[2], NULL, 0);
  2791. if (idx < 1 || idx >= 8)
  2792. err = 1;
  2793. if (!err && strcmp(args[0], "low") == 0)
  2794. {
  2795. xscale->static_low_vectors_set |= (1<<idx);
  2796. xscale->static_low_vectors[idx] = vec;
  2797. }
  2798. else if (!err && (strcmp(args[0], "high") == 0))
  2799. {
  2800. xscale->static_high_vectors_set |= (1<<idx);
  2801. xscale->static_high_vectors[idx] = vec;
  2802. }
  2803. else
  2804. err = 1;
  2805. }
  2806. if (err)
  2807. command_print(cmd_ctx, "usage: xscale vector_table <high|low> <index> <code>");
  2808. return ERROR_OK;
  2809. }
  2810. static int
  2811. xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx,
  2812. char *cmd, char **args, int argc)
  2813. {
  2814. target_t *target = get_current_target(cmd_ctx);
  2815. armv4_5_common_t *armv4_5;
  2816. xscale_common_t *xscale;
  2817. uint32_t dcsr_value;
  2818. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2819. {
  2820. return ERROR_OK;
  2821. }
  2822. if (target->state != TARGET_HALTED)
  2823. {
  2824. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2825. return ERROR_OK;
  2826. }
  2827. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  2828. {
  2829. xscale_trace_data_t *td, *next_td;
  2830. xscale->trace.buffer_enabled = 1;
  2831. /* free old trace data */
  2832. td = xscale->trace.data;
  2833. while (td)
  2834. {
  2835. next_td = td->next;
  2836. if (td->entries)
  2837. free(td->entries);
  2838. free(td);
  2839. td = next_td;
  2840. }
  2841. xscale->trace.data = NULL;
  2842. }
  2843. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  2844. {
  2845. xscale->trace.buffer_enabled = 0;
  2846. }
  2847. if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
  2848. {
  2849. if (argc >= 3)
  2850. xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
  2851. else
  2852. xscale->trace.buffer_fill = 1;
  2853. }
  2854. else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
  2855. {
  2856. xscale->trace.buffer_fill = -1;
  2857. }
  2858. if (xscale->trace.buffer_enabled)
  2859. {
  2860. /* if we enable the trace buffer in fill-once
  2861. * mode we know the address of the first instruction */
  2862. xscale->trace.pc_ok = 1;
  2863. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2864. }
  2865. else
  2866. {
  2867. /* otherwise the address is unknown, and we have no known good PC */
  2868. xscale->trace.pc_ok = 0;
  2869. }
  2870. command_print(cmd_ctx, "trace buffer %s (%s)",
  2871. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2872. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2873. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2874. if (xscale->trace.buffer_fill >= 0)
  2875. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2876. else
  2877. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2878. return ERROR_OK;
  2879. }
  2880. static int
  2881. xscale_handle_trace_image_command(struct command_context_s *cmd_ctx,
  2882. char *cmd, char **args, int argc)
  2883. {
  2884. target_t *target;
  2885. armv4_5_common_t *armv4_5;
  2886. xscale_common_t *xscale;
  2887. if (argc < 1)
  2888. {
  2889. command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
  2890. return ERROR_OK;
  2891. }
  2892. target = get_current_target(cmd_ctx);
  2893. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2894. {
  2895. return ERROR_OK;
  2896. }
  2897. if (xscale->trace.image)
  2898. {
  2899. image_close(xscale->trace.image);
  2900. free(xscale->trace.image);
  2901. command_print(cmd_ctx, "previously loaded image found and closed");
  2902. }
  2903. xscale->trace.image = malloc(sizeof(image_t));
  2904. xscale->trace.image->base_address_set = 0;
  2905. xscale->trace.image->start_address_set = 0;
  2906. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2907. if (argc >= 2)
  2908. {
  2909. xscale->trace.image->base_address_set = 1;
  2910. xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
  2911. }
  2912. else
  2913. {
  2914. xscale->trace.image->base_address_set = 0;
  2915. }
  2916. if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
  2917. {
  2918. free(xscale->trace.image);
  2919. xscale->trace.image = NULL;
  2920. return ERROR_OK;
  2921. }
  2922. return ERROR_OK;
  2923. }
  2924. static int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx,
  2925. char *cmd, char **args, int argc)
  2926. {
  2927. target_t *target = get_current_target(cmd_ctx);
  2928. armv4_5_common_t *armv4_5;
  2929. xscale_common_t *xscale;
  2930. xscale_trace_data_t *trace_data;
  2931. fileio_t file;
  2932. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2933. {
  2934. return ERROR_OK;
  2935. }
  2936. if (target->state != TARGET_HALTED)
  2937. {
  2938. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2939. return ERROR_OK;
  2940. }
  2941. if (argc < 1)
  2942. {
  2943. command_print(cmd_ctx, "usage: xscale dump_trace <file>");
  2944. return ERROR_OK;
  2945. }
  2946. trace_data = xscale->trace.data;
  2947. if (!trace_data)
  2948. {
  2949. command_print(cmd_ctx, "no trace data collected");
  2950. return ERROR_OK;
  2951. }
  2952. if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2953. {
  2954. return ERROR_OK;
  2955. }
  2956. while (trace_data)
  2957. {
  2958. int i;
  2959. fileio_write_u32(&file, trace_data->chkpt0);
  2960. fileio_write_u32(&file, trace_data->chkpt1);
  2961. fileio_write_u32(&file, trace_data->last_instruction);
  2962. fileio_write_u32(&file, trace_data->depth);
  2963. for (i = 0; i < trace_data->depth; i++)
  2964. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2965. trace_data = trace_data->next;
  2966. }
  2967. fileio_close(&file);
  2968. return ERROR_OK;
  2969. }
  2970. static int
  2971. xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx,
  2972. char *cmd, char **args, int argc)
  2973. {
  2974. target_t *target = get_current_target(cmd_ctx);
  2975. armv4_5_common_t *armv4_5;
  2976. xscale_common_t *xscale;
  2977. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2978. {
  2979. return ERROR_OK;
  2980. }
  2981. xscale_analyze_trace(target, cmd_ctx);
  2982. return ERROR_OK;
  2983. }
  2984. static int xscale_handle_cp15(command_context_t *cmd_ctx,
  2985. char *cmd, char **args, int argc)
  2986. {
  2987. target_t *target = get_current_target(cmd_ctx);
  2988. armv4_5_common_t *armv4_5;
  2989. xscale_common_t *xscale;
  2990. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2991. {
  2992. return ERROR_OK;
  2993. }
  2994. if (target->state != TARGET_HALTED)
  2995. {
  2996. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2997. return ERROR_OK;
  2998. }
  2999. uint32_t reg_no = 0;
  3000. reg_t *reg = NULL;
  3001. if (argc > 0)
  3002. {
  3003. reg_no = strtoul(args[0], NULL, 0);
  3004. /*translate from xscale cp15 register no to openocd register*/
  3005. switch (reg_no)
  3006. {
  3007. case 0:
  3008. reg_no = XSCALE_MAINID;
  3009. break;
  3010. case 1:
  3011. reg_no = XSCALE_CTRL;
  3012. break;
  3013. case 2:
  3014. reg_no = XSCALE_TTB;
  3015. break;
  3016. case 3:
  3017. reg_no = XSCALE_DAC;
  3018. break;
  3019. case 5:
  3020. reg_no = XSCALE_FSR;
  3021. break;
  3022. case 6:
  3023. reg_no = XSCALE_FAR;
  3024. break;
  3025. case 13:
  3026. reg_no = XSCALE_PID;
  3027. break;
  3028. case 15:
  3029. reg_no = XSCALE_CPACCESS;
  3030. break;
  3031. default:
  3032. command_print(cmd_ctx, "invalid register number");
  3033. return ERROR_INVALID_ARGUMENTS;
  3034. }
  3035. reg = &xscale->reg_cache->reg_list[reg_no];
  3036. }
  3037. if (argc == 1)
  3038. {
  3039. uint32_t value;
  3040. /* read cp15 control register */
  3041. xscale_get_reg(reg);
  3042. value = buf_get_u32(reg->value, 0, 32);
  3043. command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  3044. }
  3045. else if (argc == 2)
  3046. {
  3047. uint32_t value = strtoul(args[1], NULL, 0);
  3048. /* send CP write request (command 0x41) */
  3049. xscale_send_u32(target, 0x41);
  3050. /* send CP register number */
  3051. xscale_send_u32(target, reg_no);
  3052. /* send CP register value */
  3053. xscale_send_u32(target, value);
  3054. /* execute cpwait to ensure outstanding operations complete */
  3055. xscale_send_u32(target, 0x53);
  3056. }
  3057. else
  3058. {
  3059. command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
  3060. }
  3061. return ERROR_OK;
  3062. }
  3063. static int xscale_register_commands(struct command_context_s *cmd_ctx)
  3064. {
  3065. command_t *xscale_cmd;
  3066. xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
  3067. register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
  3068. register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
  3069. register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
  3070. register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
  3071. register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
  3072. register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
  3073. register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
  3074. register_command(cmd_ctx, xscale_cmd, "vector_table", xscale_handle_vector_table_command, COMMAND_EXEC, "<high|low> <index> <code> set static code for exception handler entry");
  3075. register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable | disable> ['fill' [n]|'wrap']");
  3076. register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
  3077. register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
  3078. register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
  3079. COMMAND_EXEC, "load image from <file> [base address]");
  3080. register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
  3081. armv4_5_register_commands(cmd_ctx);
  3082. return ERROR_OK;
  3083. }
  3084. target_type_t xscale_target =
  3085. {
  3086. .name = "xscale",
  3087. .poll = xscale_poll,
  3088. .arch_state = xscale_arch_state,
  3089. .target_request_data = NULL,
  3090. .halt = xscale_halt,
  3091. .resume = xscale_resume,
  3092. .step = xscale_step,
  3093. .assert_reset = xscale_assert_reset,
  3094. .deassert_reset = xscale_deassert_reset,
  3095. .soft_reset_halt = NULL,
  3096. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  3097. .read_memory = xscale_read_memory,
  3098. .write_memory = xscale_write_memory,
  3099. .bulk_write_memory = xscale_bulk_write_memory,
  3100. .checksum_memory = arm7_9_checksum_memory,
  3101. .blank_check_memory = arm7_9_blank_check_memory,
  3102. .run_algorithm = armv4_5_run_algorithm,
  3103. .add_breakpoint = xscale_add_breakpoint,
  3104. .remove_breakpoint = xscale_remove_breakpoint,
  3105. .add_watchpoint = xscale_add_watchpoint,
  3106. .remove_watchpoint = xscale_remove_watchpoint,
  3107. .register_commands = xscale_register_commands,
  3108. .target_create = xscale_target_create,
  3109. .init_target = xscale_init_target,
  3110. .quit = xscale_quit,
  3111. .virt2phys = xscale_virt2phys,
  3112. .mmu = xscale_mmu
  3113. };