You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3709 lines
97 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or modify *
  9. * it under the terms of the GNU General Public License as published by *
  10. * the Free Software Foundation; either version 2 of the License, or *
  11. * (at your option) any later version. *
  12. * *
  13. * This program is distributed in the hope that it will be useful, *
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  16. * GNU General Public License for more details. *
  17. * *
  18. * You should have received a copy of the GNU General Public License *
  19. * along with this program; if not, write to the *
  20. * Free Software Foundation, Inc., *
  21. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  22. ***************************************************************************/
  23. #ifdef HAVE_CONFIG_H
  24. #include "config.h"
  25. #endif
  26. #include "xscale.h"
  27. #include "target_type.h"
  28. #include "arm7_9_common.h"
  29. #include "arm_simulator.h"
  30. #include "arm_disassembler.h"
  31. #include "time_support.h"
  32. #include "image.h"
  33. /* cli handling */
  34. int xscale_register_commands(struct command_context_s *cmd_ctx);
  35. /* forward declarations */
  36. int xscale_target_create(struct target_s *target, Jim_Interp *interp);
  37. int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
  38. int xscale_quit(void);
  39. int xscale_arch_state(struct target_s *target);
  40. int xscale_poll(target_t *target);
  41. int xscale_halt(target_t *target);
  42. int xscale_resume(struct target_s *target, int current, uint32_t address, int handle_breakpoints, int debug_execution);
  43. int xscale_step(struct target_s *target, int current, uint32_t address, int handle_breakpoints);
  44. int xscale_debug_entry(target_t *target);
  45. int xscale_restore_context(target_t *target);
  46. int xscale_assert_reset(target_t *target);
  47. int xscale_deassert_reset(target_t *target);
  48. int xscale_soft_reset_halt(struct target_s *target);
  49. int xscale_set_reg_u32(reg_t *reg, uint32_t value);
  50. int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
  51. int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, uint32_t value);
  52. int xscale_read_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  53. int xscale_write_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  54. int xscale_bulk_write_memory(target_t *target, uint32_t address, uint32_t count, uint8_t *buffer);
  55. int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  56. int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  57. int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  58. int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  59. int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
  60. int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
  61. void xscale_enable_watchpoints(struct target_s *target);
  62. void xscale_enable_breakpoints(struct target_s *target);
  63. static int xscale_virt2phys(struct target_s *target, uint32_t virtual, uint32_t *physical);
  64. static int xscale_mmu(struct target_s *target, int *enabled);
  65. int xscale_read_trace(target_t *target);
  66. target_type_t xscale_target =
  67. {
  68. .name = "xscale",
  69. .poll = xscale_poll,
  70. .arch_state = xscale_arch_state,
  71. .target_request_data = NULL,
  72. .halt = xscale_halt,
  73. .resume = xscale_resume,
  74. .step = xscale_step,
  75. .assert_reset = xscale_assert_reset,
  76. .deassert_reset = xscale_deassert_reset,
  77. .soft_reset_halt = xscale_soft_reset_halt,
  78. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  79. .read_memory = xscale_read_memory,
  80. .write_memory = xscale_write_memory,
  81. .bulk_write_memory = xscale_bulk_write_memory,
  82. .checksum_memory = arm7_9_checksum_memory,
  83. .blank_check_memory = arm7_9_blank_check_memory,
  84. .run_algorithm = armv4_5_run_algorithm,
  85. .add_breakpoint = xscale_add_breakpoint,
  86. .remove_breakpoint = xscale_remove_breakpoint,
  87. .add_watchpoint = xscale_add_watchpoint,
  88. .remove_watchpoint = xscale_remove_watchpoint,
  89. .register_commands = xscale_register_commands,
  90. .target_create = xscale_target_create,
  91. .init_target = xscale_init_target,
  92. .quit = xscale_quit,
  93. .virt2phys = xscale_virt2phys,
  94. .mmu = xscale_mmu
  95. };
  96. char* xscale_reg_list[] =
  97. {
  98. "XSCALE_MAINID", /* 0 */
  99. "XSCALE_CACHETYPE",
  100. "XSCALE_CTRL",
  101. "XSCALE_AUXCTRL",
  102. "XSCALE_TTB",
  103. "XSCALE_DAC",
  104. "XSCALE_FSR",
  105. "XSCALE_FAR",
  106. "XSCALE_PID",
  107. "XSCALE_CPACCESS",
  108. "XSCALE_IBCR0", /* 10 */
  109. "XSCALE_IBCR1",
  110. "XSCALE_DBR0",
  111. "XSCALE_DBR1",
  112. "XSCALE_DBCON",
  113. "XSCALE_TBREG",
  114. "XSCALE_CHKPT0",
  115. "XSCALE_CHKPT1",
  116. "XSCALE_DCSR",
  117. "XSCALE_TX",
  118. "XSCALE_RX", /* 20 */
  119. "XSCALE_TXRXCTRL",
  120. };
  121. xscale_reg_t xscale_reg_arch_info[] =
  122. {
  123. {XSCALE_MAINID, NULL},
  124. {XSCALE_CACHETYPE, NULL},
  125. {XSCALE_CTRL, NULL},
  126. {XSCALE_AUXCTRL, NULL},
  127. {XSCALE_TTB, NULL},
  128. {XSCALE_DAC, NULL},
  129. {XSCALE_FSR, NULL},
  130. {XSCALE_FAR, NULL},
  131. {XSCALE_PID, NULL},
  132. {XSCALE_CPACCESS, NULL},
  133. {XSCALE_IBCR0, NULL},
  134. {XSCALE_IBCR1, NULL},
  135. {XSCALE_DBR0, NULL},
  136. {XSCALE_DBR1, NULL},
  137. {XSCALE_DBCON, NULL},
  138. {XSCALE_TBREG, NULL},
  139. {XSCALE_CHKPT0, NULL},
  140. {XSCALE_CHKPT1, NULL},
  141. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  142. {-1, NULL}, /* TX accessed via JTAG */
  143. {-1, NULL}, /* RX accessed via JTAG */
  144. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  145. };
  146. int xscale_reg_arch_type = -1;
  147. int xscale_get_reg(reg_t *reg);
  148. int xscale_set_reg(reg_t *reg, uint8_t *buf);
  149. int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
  150. {
  151. armv4_5_common_t *armv4_5 = target->arch_info;
  152. xscale_common_t *xscale = armv4_5->arch_info;
  153. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  154. {
  155. LOG_ERROR("target isn't an XScale target");
  156. return -1;
  157. }
  158. if (xscale->common_magic != XSCALE_COMMON_MAGIC)
  159. {
  160. LOG_ERROR("target isn't an XScale target");
  161. return -1;
  162. }
  163. *armv4_5_p = armv4_5;
  164. *xscale_p = xscale;
  165. return ERROR_OK;
  166. }
  167. int xscale_jtag_set_instr(jtag_tap_t *tap, uint32_t new_instr)
  168. {
  169. if (tap==NULL)
  170. return ERROR_FAIL;
  171. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  172. {
  173. scan_field_t field;
  174. field.tap = tap;
  175. field.num_bits = tap->ir_length;
  176. field.out_value = calloc(CEIL(field.num_bits, 8), 1);
  177. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  178. uint8_t tmp[4];
  179. field.in_value = tmp;
  180. jtag_add_ir_scan(1, &field, jtag_get_end_state());
  181. /* FIX!!!! isn't this check superfluous? verify_ircapture handles this? */
  182. jtag_check_value_mask(&field, tap->expected, tap->expected_mask);
  183. free(field.out_value);
  184. }
  185. return ERROR_OK;
  186. }
  187. int xscale_read_dcsr(target_t *target)
  188. {
  189. armv4_5_common_t *armv4_5 = target->arch_info;
  190. xscale_common_t *xscale = armv4_5->arch_info;
  191. int retval;
  192. scan_field_t fields[3];
  193. uint8_t field0 = 0x0;
  194. uint8_t field0_check_value = 0x2;
  195. uint8_t field0_check_mask = 0x7;
  196. uint8_t field2 = 0x0;
  197. uint8_t field2_check_value = 0x0;
  198. uint8_t field2_check_mask = 0x1;
  199. jtag_set_end_state(TAP_DRPAUSE);
  200. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  201. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  202. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  203. fields[0].tap = xscale->jtag_info.tap;
  204. fields[0].num_bits = 3;
  205. fields[0].out_value = &field0;
  206. uint8_t tmp;
  207. fields[0].in_value = &tmp;
  208. fields[1].tap = xscale->jtag_info.tap;
  209. fields[1].num_bits = 32;
  210. fields[1].out_value = NULL;
  211. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  212. fields[2].tap = xscale->jtag_info.tap;
  213. fields[2].num_bits = 1;
  214. fields[2].out_value = &field2;
  215. uint8_t tmp2;
  216. fields[2].in_value = &tmp2;
  217. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  218. jtag_check_value_mask(fields+0, &field0_check_value, &field0_check_mask);
  219. jtag_check_value_mask(fields+2, &field2_check_value, &field2_check_mask);
  220. if ((retval = jtag_execute_queue()) != ERROR_OK)
  221. {
  222. LOG_ERROR("JTAG error while reading DCSR");
  223. return retval;
  224. }
  225. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  226. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  227. /* write the register with the value we just read
  228. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  229. */
  230. field0_check_mask = 0x1;
  231. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  232. fields[1].in_value = NULL;
  233. jtag_set_end_state(TAP_IDLE);
  234. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  235. /* DANGER!!! this must be here. It will make sure that the arguments
  236. * to jtag_set_check_value() does not go out of scope! */
  237. return jtag_execute_queue();
  238. }
  239. static void xscale_getbuf(jtag_callback_data_t arg)
  240. {
  241. uint8_t *in=(uint8_t *)arg;
  242. *((uint32_t *)in)=buf_get_u32(in, 0, 32);
  243. }
  244. int xscale_receive(target_t *target, uint32_t *buffer, int num_words)
  245. {
  246. if (num_words==0)
  247. return ERROR_INVALID_ARGUMENTS;
  248. int retval=ERROR_OK;
  249. armv4_5_common_t *armv4_5 = target->arch_info;
  250. xscale_common_t *xscale = armv4_5->arch_info;
  251. tap_state_t path[3];
  252. scan_field_t fields[3];
  253. uint8_t *field0 = malloc(num_words * 1);
  254. uint8_t field0_check_value = 0x2;
  255. uint8_t field0_check_mask = 0x6;
  256. uint32_t *field1 = malloc(num_words * 4);
  257. uint8_t field2_check_value = 0x0;
  258. uint8_t field2_check_mask = 0x1;
  259. int words_done = 0;
  260. int words_scheduled = 0;
  261. int i;
  262. path[0] = TAP_DRSELECT;
  263. path[1] = TAP_DRCAPTURE;
  264. path[2] = TAP_DRSHIFT;
  265. fields[0].tap = xscale->jtag_info.tap;
  266. fields[0].num_bits = 3;
  267. fields[0].out_value = NULL;
  268. fields[0].in_value = NULL;
  269. fields[0].check_value = &field0_check_value;
  270. fields[0].check_mask = &field0_check_mask;
  271. fields[1].tap = xscale->jtag_info.tap;
  272. fields[1].num_bits = 32;
  273. fields[1].out_value = NULL;
  274. fields[1].check_value = NULL;
  275. fields[1].check_mask = NULL;
  276. fields[2].tap = xscale->jtag_info.tap;
  277. fields[2].num_bits = 1;
  278. fields[2].out_value = NULL;
  279. fields[2].in_value = NULL;
  280. fields[2].check_value = &field2_check_value;
  281. fields[2].check_mask = &field2_check_mask;
  282. jtag_set_end_state(TAP_IDLE);
  283. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  284. jtag_add_runtest(1, jtag_get_end_state()); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  285. /* repeat until all words have been collected */
  286. int attempts=0;
  287. while (words_done < num_words)
  288. {
  289. /* schedule reads */
  290. words_scheduled = 0;
  291. for (i = words_done; i < num_words; i++)
  292. {
  293. fields[0].in_value = &field0[i];
  294. jtag_add_pathmove(3, path);
  295. fields[1].in_value = (uint8_t *)(field1+i);
  296. jtag_add_dr_scan_check(3, fields, jtag_set_end_state(TAP_IDLE));
  297. jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1+i));
  298. words_scheduled++;
  299. }
  300. if ((retval = jtag_execute_queue()) != ERROR_OK)
  301. {
  302. LOG_ERROR("JTAG error while receiving data from debug handler");
  303. break;
  304. }
  305. /* examine results */
  306. for (i = words_done; i < num_words; i++)
  307. {
  308. if (!(field0[0] & 1))
  309. {
  310. /* move backwards if necessary */
  311. int j;
  312. for (j = i; j < num_words - 1; j++)
  313. {
  314. field0[j] = field0[j+1];
  315. field1[j] = field1[j+1];
  316. }
  317. words_scheduled--;
  318. }
  319. }
  320. if (words_scheduled==0)
  321. {
  322. if (attempts++==1000)
  323. {
  324. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  325. retval=ERROR_TARGET_TIMEOUT;
  326. break;
  327. }
  328. }
  329. words_done += words_scheduled;
  330. }
  331. for (i = 0; i < num_words; i++)
  332. *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
  333. free(field1);
  334. return retval;
  335. }
  336. int xscale_read_tx(target_t *target, int consume)
  337. {
  338. armv4_5_common_t *armv4_5 = target->arch_info;
  339. xscale_common_t *xscale = armv4_5->arch_info;
  340. tap_state_t path[3];
  341. tap_state_t noconsume_path[6];
  342. int retval;
  343. struct timeval timeout, now;
  344. scan_field_t fields[3];
  345. uint8_t field0_in = 0x0;
  346. uint8_t field0_check_value = 0x2;
  347. uint8_t field0_check_mask = 0x6;
  348. uint8_t field2_check_value = 0x0;
  349. uint8_t field2_check_mask = 0x1;
  350. jtag_set_end_state(TAP_IDLE);
  351. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  352. path[0] = TAP_DRSELECT;
  353. path[1] = TAP_DRCAPTURE;
  354. path[2] = TAP_DRSHIFT;
  355. noconsume_path[0] = TAP_DRSELECT;
  356. noconsume_path[1] = TAP_DRCAPTURE;
  357. noconsume_path[2] = TAP_DREXIT1;
  358. noconsume_path[3] = TAP_DRPAUSE;
  359. noconsume_path[4] = TAP_DREXIT2;
  360. noconsume_path[5] = TAP_DRSHIFT;
  361. fields[0].tap = xscale->jtag_info.tap;
  362. fields[0].num_bits = 3;
  363. fields[0].out_value = NULL;
  364. fields[0].in_value = &field0_in;
  365. fields[1].tap = xscale->jtag_info.tap;
  366. fields[1].num_bits = 32;
  367. fields[1].out_value = NULL;
  368. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  369. fields[2].tap = xscale->jtag_info.tap;
  370. fields[2].num_bits = 1;
  371. fields[2].out_value = NULL;
  372. uint8_t tmp;
  373. fields[2].in_value = &tmp;
  374. gettimeofday(&timeout, NULL);
  375. timeval_add_time(&timeout, 1, 0);
  376. for (;;)
  377. {
  378. /* if we want to consume the register content (i.e. clear TX_READY),
  379. * we have to go straight from Capture-DR to Shift-DR
  380. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  381. */
  382. if (consume)
  383. jtag_add_pathmove(3, path);
  384. else
  385. {
  386. jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
  387. }
  388. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  389. jtag_check_value_mask(fields+0, &field0_check_value, &field0_check_mask);
  390. jtag_check_value_mask(fields+2, &field2_check_value, &field2_check_mask);
  391. if ((retval = jtag_execute_queue()) != ERROR_OK)
  392. {
  393. LOG_ERROR("JTAG error while reading TX");
  394. return ERROR_TARGET_TIMEOUT;
  395. }
  396. gettimeofday(&now, NULL);
  397. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  398. {
  399. LOG_ERROR("time out reading TX register");
  400. return ERROR_TARGET_TIMEOUT;
  401. }
  402. if (!((!(field0_in & 1)) && consume))
  403. {
  404. goto done;
  405. }
  406. if (debug_level>=3)
  407. {
  408. LOG_DEBUG("waiting 100ms");
  409. alive_sleep(100); /* avoid flooding the logs */
  410. } else
  411. {
  412. keep_alive();
  413. }
  414. }
  415. done:
  416. if (!(field0_in & 1))
  417. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  418. return ERROR_OK;
  419. }
  420. int xscale_write_rx(target_t *target)
  421. {
  422. armv4_5_common_t *armv4_5 = target->arch_info;
  423. xscale_common_t *xscale = armv4_5->arch_info;
  424. int retval;
  425. struct timeval timeout, now;
  426. scan_field_t fields[3];
  427. uint8_t field0_out = 0x0;
  428. uint8_t field0_in = 0x0;
  429. uint8_t field0_check_value = 0x2;
  430. uint8_t field0_check_mask = 0x6;
  431. uint8_t field2 = 0x0;
  432. uint8_t field2_check_value = 0x0;
  433. uint8_t field2_check_mask = 0x1;
  434. jtag_set_end_state(TAP_IDLE);
  435. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  436. fields[0].tap = xscale->jtag_info.tap;
  437. fields[0].num_bits = 3;
  438. fields[0].out_value = &field0_out;
  439. fields[0].in_value = &field0_in;
  440. fields[1].tap = xscale->jtag_info.tap;
  441. fields[1].num_bits = 32;
  442. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  443. fields[1].in_value = NULL;
  444. fields[2].tap = xscale->jtag_info.tap;
  445. fields[2].num_bits = 1;
  446. fields[2].out_value = &field2;
  447. uint8_t tmp;
  448. fields[2].in_value = &tmp;
  449. gettimeofday(&timeout, NULL);
  450. timeval_add_time(&timeout, 1, 0);
  451. /* poll until rx_read is low */
  452. LOG_DEBUG("polling RX");
  453. for (;;)
  454. {
  455. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  456. jtag_check_value_mask(fields+0, &field0_check_value, &field0_check_mask);
  457. jtag_check_value_mask(fields+2, &field2_check_value, &field2_check_mask);
  458. if ((retval = jtag_execute_queue()) != ERROR_OK)
  459. {
  460. LOG_ERROR("JTAG error while writing RX");
  461. return retval;
  462. }
  463. gettimeofday(&now, NULL);
  464. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  465. {
  466. LOG_ERROR("time out writing RX register");
  467. return ERROR_TARGET_TIMEOUT;
  468. }
  469. if (!(field0_in & 1))
  470. goto done;
  471. if (debug_level>=3)
  472. {
  473. LOG_DEBUG("waiting 100ms");
  474. alive_sleep(100); /* avoid flooding the logs */
  475. } else
  476. {
  477. keep_alive();
  478. }
  479. }
  480. done:
  481. /* set rx_valid */
  482. field2 = 0x1;
  483. jtag_add_dr_scan(3, fields, jtag_set_end_state(TAP_IDLE));
  484. if ((retval = jtag_execute_queue()) != ERROR_OK)
  485. {
  486. LOG_ERROR("JTAG error while writing RX");
  487. return retval;
  488. }
  489. return ERROR_OK;
  490. }
  491. /* send count elements of size byte to the debug handler */
  492. int xscale_send(target_t *target, uint8_t *buffer, int count, int size)
  493. {
  494. armv4_5_common_t *armv4_5 = target->arch_info;
  495. xscale_common_t *xscale = armv4_5->arch_info;
  496. uint32_t t[3];
  497. int bits[3];
  498. int retval;
  499. int done_count = 0;
  500. jtag_set_end_state(TAP_IDLE);
  501. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  502. bits[0]=3;
  503. t[0]=0;
  504. bits[1]=32;
  505. t[2]=1;
  506. bits[2]=1;
  507. int endianness = target->endianness;
  508. while (done_count++ < count)
  509. {
  510. switch (size)
  511. {
  512. case 4:
  513. if (endianness == TARGET_LITTLE_ENDIAN)
  514. {
  515. t[1]=le_to_h_u32(buffer);
  516. } else
  517. {
  518. t[1]=be_to_h_u32(buffer);
  519. }
  520. break;
  521. case 2:
  522. if (endianness == TARGET_LITTLE_ENDIAN)
  523. {
  524. t[1]=le_to_h_u16(buffer);
  525. } else
  526. {
  527. t[1]=be_to_h_u16(buffer);
  528. }
  529. break;
  530. case 1:
  531. t[1]=buffer[0];
  532. break;
  533. default:
  534. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  535. exit(-1);
  536. }
  537. jtag_add_dr_out(xscale->jtag_info.tap,
  538. 3,
  539. bits,
  540. t,
  541. jtag_set_end_state(TAP_IDLE));
  542. buffer += size;
  543. }
  544. if ((retval = jtag_execute_queue()) != ERROR_OK)
  545. {
  546. LOG_ERROR("JTAG error while sending data to debug handler");
  547. return retval;
  548. }
  549. return ERROR_OK;
  550. }
  551. int xscale_send_u32(target_t *target, uint32_t value)
  552. {
  553. armv4_5_common_t *armv4_5 = target->arch_info;
  554. xscale_common_t *xscale = armv4_5->arch_info;
  555. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  556. return xscale_write_rx(target);
  557. }
  558. int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
  559. {
  560. armv4_5_common_t *armv4_5 = target->arch_info;
  561. xscale_common_t *xscale = armv4_5->arch_info;
  562. int retval;
  563. scan_field_t fields[3];
  564. uint8_t field0 = 0x0;
  565. uint8_t field0_check_value = 0x2;
  566. uint8_t field0_check_mask = 0x7;
  567. uint8_t field2 = 0x0;
  568. uint8_t field2_check_value = 0x0;
  569. uint8_t field2_check_mask = 0x1;
  570. if (hold_rst != -1)
  571. xscale->hold_rst = hold_rst;
  572. if (ext_dbg_brk != -1)
  573. xscale->external_debug_break = ext_dbg_brk;
  574. jtag_set_end_state(TAP_IDLE);
  575. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  576. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  577. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  578. fields[0].tap = xscale->jtag_info.tap;
  579. fields[0].num_bits = 3;
  580. fields[0].out_value = &field0;
  581. uint8_t tmp;
  582. fields[0].in_value = &tmp;
  583. fields[1].tap = xscale->jtag_info.tap;
  584. fields[1].num_bits = 32;
  585. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  586. fields[1].in_value = NULL;
  587. fields[2].tap = xscale->jtag_info.tap;
  588. fields[2].num_bits = 1;
  589. fields[2].out_value = &field2;
  590. uint8_t tmp2;
  591. fields[2].in_value = &tmp2;
  592. jtag_add_dr_scan(3, fields, jtag_get_end_state());
  593. jtag_check_value_mask(fields+0, &field0_check_value, &field0_check_mask);
  594. jtag_check_value_mask(fields+2, &field2_check_value, &field2_check_mask);
  595. if ((retval = jtag_execute_queue()) != ERROR_OK)
  596. {
  597. LOG_ERROR("JTAG error while writing DCSR");
  598. return retval;
  599. }
  600. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  601. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  602. return ERROR_OK;
  603. }
  604. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  605. unsigned int parity (unsigned int v)
  606. {
  607. unsigned int ov = v;
  608. v ^= v >> 16;
  609. v ^= v >> 8;
  610. v ^= v >> 4;
  611. v &= 0xf;
  612. LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  613. return (0x6996 >> v) & 1;
  614. }
  615. int xscale_load_ic(target_t *target, int mini, uint32_t va, uint32_t buffer[8])
  616. {
  617. armv4_5_common_t *armv4_5 = target->arch_info;
  618. xscale_common_t *xscale = armv4_5->arch_info;
  619. uint8_t packet[4];
  620. uint8_t cmd;
  621. int word;
  622. scan_field_t fields[2];
  623. LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
  624. jtag_set_end_state(TAP_IDLE);
  625. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
  626. /* CMD is b010 for Main IC and b011 for Mini IC */
  627. if (mini)
  628. buf_set_u32(&cmd, 0, 3, 0x3);
  629. else
  630. buf_set_u32(&cmd, 0, 3, 0x2);
  631. buf_set_u32(&cmd, 3, 3, 0x0);
  632. /* virtual address of desired cache line */
  633. buf_set_u32(packet, 0, 27, va >> 5);
  634. fields[0].tap = xscale->jtag_info.tap;
  635. fields[0].num_bits = 6;
  636. fields[0].out_value = &cmd;
  637. fields[0].in_value = NULL;
  638. fields[1].tap = xscale->jtag_info.tap;
  639. fields[1].num_bits = 27;
  640. fields[1].out_value = packet;
  641. fields[1].in_value = NULL;
  642. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  643. fields[0].num_bits = 32;
  644. fields[0].out_value = packet;
  645. fields[1].num_bits = 1;
  646. fields[1].out_value = &cmd;
  647. for (word = 0; word < 8; word++)
  648. {
  649. buf_set_u32(packet, 0, 32, buffer[word]);
  650. uint32_t value;
  651. memcpy(&value, packet, sizeof(uint32_t));
  652. cmd = parity(value);
  653. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  654. }
  655. jtag_execute_queue();
  656. return ERROR_OK;
  657. }
  658. int xscale_invalidate_ic_line(target_t *target, uint32_t va)
  659. {
  660. armv4_5_common_t *armv4_5 = target->arch_info;
  661. xscale_common_t *xscale = armv4_5->arch_info;
  662. uint8_t packet[4];
  663. uint8_t cmd;
  664. scan_field_t fields[2];
  665. jtag_set_end_state(TAP_IDLE);
  666. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
  667. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  668. buf_set_u32(&cmd, 0, 6, 0x0);
  669. /* virtual address of desired cache line */
  670. buf_set_u32(packet, 0, 27, va >> 5);
  671. fields[0].tap = xscale->jtag_info.tap;
  672. fields[0].num_bits = 6;
  673. fields[0].out_value = &cmd;
  674. fields[0].in_value = NULL;
  675. fields[1].tap = xscale->jtag_info.tap;
  676. fields[1].num_bits = 27;
  677. fields[1].out_value = packet;
  678. fields[1].in_value = NULL;
  679. jtag_add_dr_scan(2, fields, jtag_get_end_state());
  680. return ERROR_OK;
  681. }
  682. int xscale_update_vectors(target_t *target)
  683. {
  684. armv4_5_common_t *armv4_5 = target->arch_info;
  685. xscale_common_t *xscale = armv4_5->arch_info;
  686. int i;
  687. int retval;
  688. uint32_t low_reset_branch, high_reset_branch;
  689. for (i = 1; i < 8; i++)
  690. {
  691. /* if there's a static vector specified for this exception, override */
  692. if (xscale->static_high_vectors_set & (1 << i))
  693. {
  694. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  695. }
  696. else
  697. {
  698. retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  699. if (retval == ERROR_TARGET_TIMEOUT)
  700. return retval;
  701. if (retval!=ERROR_OK)
  702. {
  703. /* Some of these reads will fail as part of normal execution */
  704. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  705. }
  706. }
  707. }
  708. for (i = 1; i < 8; i++)
  709. {
  710. if (xscale->static_low_vectors_set & (1 << i))
  711. {
  712. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  713. }
  714. else
  715. {
  716. retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  717. if (retval == ERROR_TARGET_TIMEOUT)
  718. return retval;
  719. if (retval!=ERROR_OK)
  720. {
  721. /* Some of these reads will fail as part of normal execution */
  722. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  723. }
  724. }
  725. }
  726. /* calculate branches to debug handler */
  727. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  728. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  729. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  730. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  731. /* invalidate and load exception vectors in mini i-cache */
  732. xscale_invalidate_ic_line(target, 0x0);
  733. xscale_invalidate_ic_line(target, 0xffff0000);
  734. xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
  735. xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
  736. return ERROR_OK;
  737. }
  738. int xscale_arch_state(struct target_s *target)
  739. {
  740. armv4_5_common_t *armv4_5 = target->arch_info;
  741. xscale_common_t *xscale = armv4_5->arch_info;
  742. char *state[] =
  743. {
  744. "disabled", "enabled"
  745. };
  746. char *arch_dbg_reason[] =
  747. {
  748. "", "\n(processor reset)", "\n(trace buffer full)"
  749. };
  750. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  751. {
  752. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  753. exit(-1);
  754. }
  755. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  756. "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "\n"
  757. "MMU: %s, D-Cache: %s, I-Cache: %s"
  758. "%s",
  759. armv4_5_state_strings[armv4_5->core_state],
  760. Jim_Nvp_value2name_simple( nvp_target_debug_reason, target->debug_reason )->name ,
  761. armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
  762. buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
  763. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
  764. state[xscale->armv4_5_mmu.mmu_enabled],
  765. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  766. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  767. arch_dbg_reason[xscale->arch_debug_reason]);
  768. return ERROR_OK;
  769. }
  770. int xscale_poll(target_t *target)
  771. {
  772. int retval=ERROR_OK;
  773. armv4_5_common_t *armv4_5 = target->arch_info;
  774. xscale_common_t *xscale = armv4_5->arch_info;
  775. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  776. {
  777. enum target_state previous_state = target->state;
  778. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  779. {
  780. /* there's data to read from the tx register, we entered debug state */
  781. xscale->handler_running = 1;
  782. target->state = TARGET_HALTED;
  783. /* process debug entry, fetching current mode regs */
  784. retval = xscale_debug_entry(target);
  785. }
  786. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  787. {
  788. LOG_USER("error while polling TX register, reset CPU");
  789. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  790. target->state = TARGET_HALTED;
  791. }
  792. /* debug_entry could have overwritten target state (i.e. immediate resume)
  793. * don't signal event handlers in that case
  794. */
  795. if (target->state != TARGET_HALTED)
  796. return ERROR_OK;
  797. /* if target was running, signal that we halted
  798. * otherwise we reentered from debug execution */
  799. if (previous_state == TARGET_RUNNING)
  800. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  801. else
  802. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  803. }
  804. return retval;
  805. }
  806. int xscale_debug_entry(target_t *target)
  807. {
  808. armv4_5_common_t *armv4_5 = target->arch_info;
  809. xscale_common_t *xscale = armv4_5->arch_info;
  810. uint32_t pc;
  811. uint32_t buffer[10];
  812. int i;
  813. int retval;
  814. uint32_t moe;
  815. /* clear external dbg break (will be written on next DCSR read) */
  816. xscale->external_debug_break = 0;
  817. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  818. return retval;
  819. /* get r0, pc, r1 to r7 and cpsr */
  820. if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
  821. return retval;
  822. /* move r0 from buffer to register cache */
  823. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  824. armv4_5->core_cache->reg_list[15].dirty = 1;
  825. armv4_5->core_cache->reg_list[15].valid = 1;
  826. LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
  827. /* move pc from buffer to register cache */
  828. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  829. armv4_5->core_cache->reg_list[15].dirty = 1;
  830. armv4_5->core_cache->reg_list[15].valid = 1;
  831. LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
  832. /* move data from buffer to register cache */
  833. for (i = 1; i <= 7; i++)
  834. {
  835. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  836. armv4_5->core_cache->reg_list[i].dirty = 1;
  837. armv4_5->core_cache->reg_list[i].valid = 1;
  838. LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
  839. }
  840. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
  841. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  842. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  843. LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
  844. armv4_5->core_mode = buffer[9] & 0x1f;
  845. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  846. {
  847. target->state = TARGET_UNKNOWN;
  848. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  849. return ERROR_TARGET_FAILURE;
  850. }
  851. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  852. if (buffer[9] & 0x20)
  853. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  854. else
  855. armv4_5->core_state = ARMV4_5_STATE_ARM;
  856. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  857. return ERROR_FAIL;
  858. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  859. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  860. {
  861. xscale_receive(target, buffer, 8);
  862. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  863. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  864. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  865. }
  866. else
  867. {
  868. /* r8 to r14, but no spsr */
  869. xscale_receive(target, buffer, 7);
  870. }
  871. /* move data from buffer to register cache */
  872. for (i = 8; i <= 14; i++)
  873. {
  874. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
  875. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  876. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  877. }
  878. /* examine debug reason */
  879. xscale_read_dcsr(target);
  880. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  881. /* stored PC (for calculating fixup) */
  882. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  883. switch (moe)
  884. {
  885. case 0x0: /* Processor reset */
  886. target->debug_reason = DBG_REASON_DBGRQ;
  887. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  888. pc -= 4;
  889. break;
  890. case 0x1: /* Instruction breakpoint hit */
  891. target->debug_reason = DBG_REASON_BREAKPOINT;
  892. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  893. pc -= 4;
  894. break;
  895. case 0x2: /* Data breakpoint hit */
  896. target->debug_reason = DBG_REASON_WATCHPOINT;
  897. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  898. pc -= 4;
  899. break;
  900. case 0x3: /* BKPT instruction executed */
  901. target->debug_reason = DBG_REASON_BREAKPOINT;
  902. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  903. pc -= 4;
  904. break;
  905. case 0x4: /* Ext. debug event */
  906. target->debug_reason = DBG_REASON_DBGRQ;
  907. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  908. pc -= 4;
  909. break;
  910. case 0x5: /* Vector trap occured */
  911. target->debug_reason = DBG_REASON_BREAKPOINT;
  912. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  913. pc -= 4;
  914. break;
  915. case 0x6: /* Trace buffer full break */
  916. target->debug_reason = DBG_REASON_DBGRQ;
  917. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  918. pc -= 4;
  919. break;
  920. case 0x7: /* Reserved */
  921. default:
  922. LOG_ERROR("Method of Entry is 'Reserved'");
  923. exit(-1);
  924. break;
  925. }
  926. /* apply PC fixup */
  927. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  928. /* on the first debug entry, identify cache type */
  929. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  930. {
  931. uint32_t cache_type_reg;
  932. /* read cp15 cache type register */
  933. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  934. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  935. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  936. }
  937. /* examine MMU and Cache settings */
  938. /* read cp15 control register */
  939. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  940. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  941. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  942. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  943. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  944. /* tracing enabled, read collected trace data */
  945. if (xscale->trace.buffer_enabled)
  946. {
  947. xscale_read_trace(target);
  948. xscale->trace.buffer_fill--;
  949. /* resume if we're still collecting trace data */
  950. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  951. && (xscale->trace.buffer_fill > 0))
  952. {
  953. xscale_resume(target, 1, 0x0, 1, 0);
  954. }
  955. else
  956. {
  957. xscale->trace.buffer_enabled = 0;
  958. }
  959. }
  960. return ERROR_OK;
  961. }
  962. int xscale_halt(target_t *target)
  963. {
  964. armv4_5_common_t *armv4_5 = target->arch_info;
  965. xscale_common_t *xscale = armv4_5->arch_info;
  966. LOG_DEBUG("target->state: %s",
  967. Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
  968. if (target->state == TARGET_HALTED)
  969. {
  970. LOG_DEBUG("target was already halted");
  971. return ERROR_OK;
  972. }
  973. else if (target->state == TARGET_UNKNOWN)
  974. {
  975. /* this must not happen for a xscale target */
  976. LOG_ERROR("target was in unknown state when halt was requested");
  977. return ERROR_TARGET_INVALID;
  978. }
  979. else if (target->state == TARGET_RESET)
  980. {
  981. LOG_DEBUG("target->state == TARGET_RESET");
  982. }
  983. else
  984. {
  985. /* assert external dbg break */
  986. xscale->external_debug_break = 1;
  987. xscale_read_dcsr(target);
  988. target->debug_reason = DBG_REASON_DBGRQ;
  989. }
  990. return ERROR_OK;
  991. }
  992. int xscale_enable_single_step(struct target_s *target, uint32_t next_pc)
  993. {
  994. armv4_5_common_t *armv4_5 = target->arch_info;
  995. xscale_common_t *xscale= armv4_5->arch_info;
  996. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  997. int retval;
  998. if (xscale->ibcr0_used)
  999. {
  1000. breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  1001. if (ibcr0_bp)
  1002. {
  1003. xscale_unset_breakpoint(target, ibcr0_bp);
  1004. }
  1005. else
  1006. {
  1007. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  1008. exit(-1);
  1009. }
  1010. }
  1011. if ((retval=xscale_set_reg_u32(ibcr0, next_pc | 0x1))!=ERROR_OK)
  1012. return retval;
  1013. return ERROR_OK;
  1014. }
  1015. int xscale_disable_single_step(struct target_s *target)
  1016. {
  1017. armv4_5_common_t *armv4_5 = target->arch_info;
  1018. xscale_common_t *xscale= armv4_5->arch_info;
  1019. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  1020. int retval;
  1021. if ((retval=xscale_set_reg_u32(ibcr0, 0x0))!=ERROR_OK)
  1022. return retval;
  1023. return ERROR_OK;
  1024. }
  1025. int xscale_resume(struct target_s *target, int current, uint32_t address, int handle_breakpoints, int debug_execution)
  1026. {
  1027. armv4_5_common_t *armv4_5 = target->arch_info;
  1028. xscale_common_t *xscale= armv4_5->arch_info;
  1029. breakpoint_t *breakpoint = target->breakpoints;
  1030. uint32_t current_pc;
  1031. int retval;
  1032. int i;
  1033. LOG_DEBUG("-");
  1034. if (target->state != TARGET_HALTED)
  1035. {
  1036. LOG_WARNING("target not halted");
  1037. return ERROR_TARGET_NOT_HALTED;
  1038. }
  1039. if (!debug_execution)
  1040. {
  1041. target_free_all_working_areas(target);
  1042. }
  1043. /* update vector tables */
  1044. if ((retval=xscale_update_vectors(target))!=ERROR_OK)
  1045. return retval;
  1046. /* current = 1: continue on current pc, otherwise continue at <address> */
  1047. if (!current)
  1048. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1049. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1050. /* if we're at the reset vector, we have to simulate the branch */
  1051. if (current_pc == 0x0)
  1052. {
  1053. arm_simulate_step(target, NULL);
  1054. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1055. }
  1056. /* the front-end may request us not to handle breakpoints */
  1057. if (handle_breakpoints)
  1058. {
  1059. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1060. {
  1061. uint32_t next_pc;
  1062. /* there's a breakpoint at the current PC, we have to step over it */
  1063. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1064. xscale_unset_breakpoint(target, breakpoint);
  1065. /* calculate PC of next instruction */
  1066. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1067. {
  1068. uint32_t current_opcode;
  1069. target_read_u32(target, current_pc, &current_opcode);
  1070. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1071. }
  1072. LOG_DEBUG("enable single-step");
  1073. xscale_enable_single_step(target, next_pc);
  1074. /* restore banked registers */
  1075. xscale_restore_context(target);
  1076. /* send resume request (command 0x30 or 0x31)
  1077. * clean the trace buffer if it is to be enabled (0x62) */
  1078. if (xscale->trace.buffer_enabled)
  1079. {
  1080. xscale_send_u32(target, 0x62);
  1081. xscale_send_u32(target, 0x31);
  1082. }
  1083. else
  1084. xscale_send_u32(target, 0x30);
  1085. /* send CPSR */
  1086. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1087. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1088. for (i = 7; i >= 0; i--)
  1089. {
  1090. /* send register */
  1091. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1092. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1093. }
  1094. /* send PC */
  1095. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1096. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1097. /* wait for and process debug entry */
  1098. xscale_debug_entry(target);
  1099. LOG_DEBUG("disable single-step");
  1100. xscale_disable_single_step(target);
  1101. LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
  1102. xscale_set_breakpoint(target, breakpoint);
  1103. }
  1104. }
  1105. /* enable any pending breakpoints and watchpoints */
  1106. xscale_enable_breakpoints(target);
  1107. xscale_enable_watchpoints(target);
  1108. /* restore banked registers */
  1109. xscale_restore_context(target);
  1110. /* send resume request (command 0x30 or 0x31)
  1111. * clean the trace buffer if it is to be enabled (0x62) */
  1112. if (xscale->trace.buffer_enabled)
  1113. {
  1114. xscale_send_u32(target, 0x62);
  1115. xscale_send_u32(target, 0x31);
  1116. }
  1117. else
  1118. xscale_send_u32(target, 0x30);
  1119. /* send CPSR */
  1120. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1121. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1122. for (i = 7; i >= 0; i--)
  1123. {
  1124. /* send register */
  1125. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1126. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1127. }
  1128. /* send PC */
  1129. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1130. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1131. target->debug_reason = DBG_REASON_NOTHALTED;
  1132. if (!debug_execution)
  1133. {
  1134. /* registers are now invalid */
  1135. armv4_5_invalidate_core_regs(target);
  1136. target->state = TARGET_RUNNING;
  1137. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1138. }
  1139. else
  1140. {
  1141. target->state = TARGET_DEBUG_RUNNING;
  1142. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1143. }
  1144. LOG_DEBUG("target resumed");
  1145. xscale->handler_running = 1;
  1146. return ERROR_OK;
  1147. }
  1148. static int xscale_step_inner(struct target_s *target, int current, uint32_t address, int handle_breakpoints)
  1149. {
  1150. armv4_5_common_t *armv4_5 = target->arch_info;
  1151. xscale_common_t *xscale = armv4_5->arch_info;
  1152. uint32_t next_pc;
  1153. int retval;
  1154. int i;
  1155. target->debug_reason = DBG_REASON_SINGLESTEP;
  1156. /* calculate PC of next instruction */
  1157. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1158. {
  1159. uint32_t current_opcode, current_pc;
  1160. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1161. target_read_u32(target, current_pc, &current_opcode);
  1162. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
  1163. return retval;
  1164. }
  1165. LOG_DEBUG("enable single-step");
  1166. if ((retval=xscale_enable_single_step(target, next_pc))!=ERROR_OK)
  1167. return retval;
  1168. /* restore banked registers */
  1169. if ((retval=xscale_restore_context(target))!=ERROR_OK)
  1170. return retval;
  1171. /* send resume request (command 0x30 or 0x31)
  1172. * clean the trace buffer if it is to be enabled (0x62) */
  1173. if (xscale->trace.buffer_enabled)
  1174. {
  1175. if ((retval=xscale_send_u32(target, 0x62))!=ERROR_OK)
  1176. return retval;
  1177. if ((retval=xscale_send_u32(target, 0x31))!=ERROR_OK)
  1178. return retval;
  1179. }
  1180. else
  1181. if ((retval=xscale_send_u32(target, 0x30))!=ERROR_OK)
  1182. return retval;
  1183. /* send CPSR */
  1184. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32)))!=ERROR_OK)
  1185. return retval;
  1186. LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32 "", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1187. for (i = 7; i >= 0; i--)
  1188. {
  1189. /* send register */
  1190. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32)))!=ERROR_OK)
  1191. return retval;
  1192. LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1193. }
  1194. /* send PC */
  1195. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32)))!=ERROR_OK)
  1196. return retval;
  1197. LOG_DEBUG("writing PC with value 0x%8.8" PRIx32, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1198. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1199. /* registers are now invalid */
  1200. if ((retval=armv4_5_invalidate_core_regs(target))!=ERROR_OK)
  1201. return retval;
  1202. /* wait for and process debug entry */
  1203. if ((retval=xscale_debug_entry(target))!=ERROR_OK)
  1204. return retval;
  1205. LOG_DEBUG("disable single-step");
  1206. if ((retval=xscale_disable_single_step(target))!=ERROR_OK)
  1207. return retval;
  1208. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1209. return ERROR_OK;
  1210. }
  1211. int xscale_step(struct target_s *target, int current, uint32_t address, int handle_breakpoints)
  1212. {
  1213. armv4_5_common_t *armv4_5 = target->arch_info;
  1214. breakpoint_t *breakpoint = target->breakpoints;
  1215. uint32_t current_pc;
  1216. int retval;
  1217. if (target->state != TARGET_HALTED)
  1218. {
  1219. LOG_WARNING("target not halted");
  1220. return ERROR_TARGET_NOT_HALTED;
  1221. }
  1222. /* current = 1: continue on current pc, otherwise continue at <address> */
  1223. if (!current)
  1224. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1225. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1226. /* if we're at the reset vector, we have to simulate the step */
  1227. if (current_pc == 0x0)
  1228. {
  1229. if ((retval=arm_simulate_step(target, NULL))!=ERROR_OK)
  1230. return retval;
  1231. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1232. target->debug_reason = DBG_REASON_SINGLESTEP;
  1233. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1234. return ERROR_OK;
  1235. }
  1236. /* the front-end may request us not to handle breakpoints */
  1237. if (handle_breakpoints)
  1238. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1239. {
  1240. if ((retval=xscale_unset_breakpoint(target, breakpoint))!=ERROR_OK)
  1241. return retval;
  1242. }
  1243. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1244. if (breakpoint)
  1245. {
  1246. xscale_set_breakpoint(target, breakpoint);
  1247. }
  1248. LOG_DEBUG("target stepped");
  1249. return ERROR_OK;
  1250. }
  1251. int xscale_assert_reset(target_t *target)
  1252. {
  1253. armv4_5_common_t *armv4_5 = target->arch_info;
  1254. xscale_common_t *xscale = armv4_5->arch_info;
  1255. LOG_DEBUG("target->state: %s",
  1256. Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
  1257. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1258. * end up in T-L-R, which would reset JTAG
  1259. */
  1260. jtag_set_end_state(TAP_IDLE);
  1261. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  1262. /* set Hold reset, Halt mode and Trap Reset */
  1263. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1264. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1265. xscale_write_dcsr(target, 1, 0);
  1266. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1267. xscale_jtag_set_instr(xscale->jtag_info.tap, 0x7f);
  1268. jtag_execute_queue();
  1269. /* assert reset */
  1270. jtag_add_reset(0, 1);
  1271. /* sleep 1ms, to be sure we fulfill any requirements */
  1272. jtag_add_sleep(1000);
  1273. jtag_execute_queue();
  1274. target->state = TARGET_RESET;
  1275. if (target->reset_halt)
  1276. {
  1277. int retval;
  1278. if ((retval = target_halt(target))!=ERROR_OK)
  1279. return retval;
  1280. }
  1281. return ERROR_OK;
  1282. }
  1283. int xscale_deassert_reset(target_t *target)
  1284. {
  1285. armv4_5_common_t *armv4_5 = target->arch_info;
  1286. xscale_common_t *xscale = armv4_5->arch_info;
  1287. fileio_t debug_handler;
  1288. uint32_t address;
  1289. uint32_t binary_size;
  1290. uint32_t buf_cnt;
  1291. uint32_t i;
  1292. int retval;
  1293. breakpoint_t *breakpoint = target->breakpoints;
  1294. LOG_DEBUG("-");
  1295. xscale->ibcr_available = 2;
  1296. xscale->ibcr0_used = 0;
  1297. xscale->ibcr1_used = 0;
  1298. xscale->dbr_available = 2;
  1299. xscale->dbr0_used = 0;
  1300. xscale->dbr1_used = 0;
  1301. /* mark all hardware breakpoints as unset */
  1302. while (breakpoint)
  1303. {
  1304. if (breakpoint->type == BKPT_HARD)
  1305. {
  1306. breakpoint->set = 0;
  1307. }
  1308. breakpoint = breakpoint->next;
  1309. }
  1310. if (!xscale->handler_installed)
  1311. {
  1312. /* release SRST */
  1313. jtag_add_reset(0, 0);
  1314. /* wait 300ms; 150 and 100ms were not enough */
  1315. jtag_add_sleep(300*1000);
  1316. jtag_add_runtest(2030, jtag_set_end_state(TAP_IDLE));
  1317. jtag_execute_queue();
  1318. /* set Hold reset, Halt mode and Trap Reset */
  1319. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1320. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1321. xscale_write_dcsr(target, 1, 0);
  1322. /* Load debug handler */
  1323. if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
  1324. {
  1325. return ERROR_OK;
  1326. }
  1327. if ((binary_size = debug_handler.size) % 4)
  1328. {
  1329. LOG_ERROR("debug_handler.bin: size not a multiple of 4");
  1330. exit(-1);
  1331. }
  1332. if (binary_size > 0x800)
  1333. {
  1334. LOG_ERROR("debug_handler.bin: larger than 2kb");
  1335. exit(-1);
  1336. }
  1337. binary_size = CEIL(binary_size, 32) * 32;
  1338. address = xscale->handler_address;
  1339. while (binary_size > 0)
  1340. {
  1341. uint32_t cache_line[8];
  1342. uint8_t buffer[32];
  1343. if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
  1344. {
  1345. }
  1346. for (i = 0; i < buf_cnt; i += 4)
  1347. {
  1348. /* convert LE buffer to host-endian uint32_t */
  1349. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1350. }
  1351. for (; buf_cnt < 32; buf_cnt += 4)
  1352. {
  1353. cache_line[buf_cnt / 4] = 0xe1a08008;
  1354. }
  1355. /* only load addresses other than the reset vectors */
  1356. if ((address % 0x400) != 0x0)
  1357. {
  1358. xscale_load_ic(target, 1, address, cache_line);
  1359. }
  1360. address += buf_cnt;
  1361. binary_size -= buf_cnt;
  1362. };
  1363. xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
  1364. xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
  1365. jtag_add_runtest(30, jtag_set_end_state(TAP_IDLE));
  1366. jtag_add_sleep(100000);
  1367. /* set Hold reset, Halt mode and Trap Reset */
  1368. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1369. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1370. xscale_write_dcsr(target, 1, 0);
  1371. /* clear Hold reset to let the target run (should enter debug handler) */
  1372. xscale_write_dcsr(target, 0, 1);
  1373. target->state = TARGET_RUNNING;
  1374. if (!target->reset_halt)
  1375. {
  1376. jtag_add_sleep(10000);
  1377. /* we should have entered debug now */
  1378. xscale_debug_entry(target);
  1379. target->state = TARGET_HALTED;
  1380. /* resume the target */
  1381. xscale_resume(target, 1, 0x0, 1, 0);
  1382. }
  1383. fileio_close(&debug_handler);
  1384. }
  1385. else
  1386. {
  1387. jtag_add_reset(0, 0);
  1388. }
  1389. return ERROR_OK;
  1390. }
  1391. int xscale_soft_reset_halt(struct target_s *target)
  1392. {
  1393. return ERROR_OK;
  1394. }
  1395. int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
  1396. {
  1397. return ERROR_OK;
  1398. }
  1399. int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, uint32_t value)
  1400. {
  1401. return ERROR_OK;
  1402. }
  1403. int xscale_full_context(target_t *target)
  1404. {
  1405. armv4_5_common_t *armv4_5 = target->arch_info;
  1406. uint32_t *buffer;
  1407. int i, j;
  1408. LOG_DEBUG("-");
  1409. if (target->state != TARGET_HALTED)
  1410. {
  1411. LOG_WARNING("target not halted");
  1412. return ERROR_TARGET_NOT_HALTED;
  1413. }
  1414. buffer = malloc(4 * 8);
  1415. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1416. * we can't enter User mode on an XScale (unpredictable),
  1417. * but User shares registers with SYS
  1418. */
  1419. for (i = 1; i < 7; i++)
  1420. {
  1421. int valid = 1;
  1422. /* check if there are invalid registers in the current mode
  1423. */
  1424. for (j = 0; j <= 16; j++)
  1425. {
  1426. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1427. valid = 0;
  1428. }
  1429. if (!valid)
  1430. {
  1431. uint32_t tmp_cpsr;
  1432. /* request banked registers */
  1433. xscale_send_u32(target, 0x0);
  1434. tmp_cpsr = 0x0;
  1435. tmp_cpsr |= armv4_5_number_to_mode(i);
  1436. tmp_cpsr |= 0xc0; /* I/F bits */
  1437. /* send CPSR for desired mode */
  1438. xscale_send_u32(target, tmp_cpsr);
  1439. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1440. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1441. {
  1442. xscale_receive(target, buffer, 8);
  1443. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  1444. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1445. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1446. }
  1447. else
  1448. {
  1449. xscale_receive(target, buffer, 7);
  1450. }
  1451. /* move data from buffer to register cache */
  1452. for (j = 8; j <= 14; j++)
  1453. {
  1454. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
  1455. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1456. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1457. }
  1458. }
  1459. }
  1460. free(buffer);
  1461. return ERROR_OK;
  1462. }
  1463. int xscale_restore_context(target_t *target)
  1464. {
  1465. armv4_5_common_t *armv4_5 = target->arch_info;
  1466. int i, j;
  1467. LOG_DEBUG("-");
  1468. if (target->state != TARGET_HALTED)
  1469. {
  1470. LOG_WARNING("target not halted");
  1471. return ERROR_TARGET_NOT_HALTED;
  1472. }
  1473. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1474. * we can't enter User mode on an XScale (unpredictable),
  1475. * but User shares registers with SYS
  1476. */
  1477. for (i = 1; i < 7; i++)
  1478. {
  1479. int dirty = 0;
  1480. /* check if there are invalid registers in the current mode
  1481. */
  1482. for (j = 8; j <= 14; j++)
  1483. {
  1484. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
  1485. dirty = 1;
  1486. }
  1487. /* if not USR/SYS, check if the SPSR needs to be written */
  1488. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1489. {
  1490. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
  1491. dirty = 1;
  1492. }
  1493. if (dirty)
  1494. {
  1495. uint32_t tmp_cpsr;
  1496. /* send banked registers */
  1497. xscale_send_u32(target, 0x1);
  1498. tmp_cpsr = 0x0;
  1499. tmp_cpsr |= armv4_5_number_to_mode(i);
  1500. tmp_cpsr |= 0xc0; /* I/F bits */
  1501. /* send CPSR for desired mode */
  1502. xscale_send_u32(target, tmp_cpsr);
  1503. /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1504. for (j = 8; j <= 14; j++)
  1505. {
  1506. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
  1507. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1508. }
  1509. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1510. {
  1511. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
  1512. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1513. }
  1514. }
  1515. }
  1516. return ERROR_OK;
  1517. }
  1518. int xscale_read_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  1519. {
  1520. armv4_5_common_t *armv4_5 = target->arch_info;
  1521. xscale_common_t *xscale = armv4_5->arch_info;
  1522. uint32_t *buf32;
  1523. uint32_t i;
  1524. int retval;
  1525. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1526. if (target->state != TARGET_HALTED)
  1527. {
  1528. LOG_WARNING("target not halted");
  1529. return ERROR_TARGET_NOT_HALTED;
  1530. }
  1531. /* sanitize arguments */
  1532. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1533. return ERROR_INVALID_ARGUMENTS;
  1534. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1535. return ERROR_TARGET_UNALIGNED_ACCESS;
  1536. /* send memory read request (command 0x1n, n: access size) */
  1537. if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
  1538. return retval;
  1539. /* send base address for read request */
  1540. if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
  1541. return retval;
  1542. /* send number of requested data words */
  1543. if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
  1544. return retval;
  1545. /* receive data from target (count times 32-bit words in host endianness) */
  1546. buf32 = malloc(4 * count);
  1547. if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
  1548. return retval;
  1549. /* extract data from host-endian buffer into byte stream */
  1550. for (i = 0; i < count; i++)
  1551. {
  1552. switch (size)
  1553. {
  1554. case 4:
  1555. target_buffer_set_u32(target, buffer, buf32[i]);
  1556. buffer += 4;
  1557. break;
  1558. case 2:
  1559. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1560. buffer += 2;
  1561. break;
  1562. case 1:
  1563. *buffer++ = buf32[i] & 0xff;
  1564. break;
  1565. default:
  1566. LOG_ERROR("should never get here");
  1567. exit(-1);
  1568. }
  1569. }
  1570. free(buf32);
  1571. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1572. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  1573. return retval;
  1574. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1575. {
  1576. /* clear SA bit */
  1577. if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
  1578. return retval;
  1579. return ERROR_TARGET_DATA_ABORT;
  1580. }
  1581. return ERROR_OK;
  1582. }
  1583. int xscale_write_memory(struct target_s *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  1584. {
  1585. armv4_5_common_t *armv4_5 = target->arch_info;
  1586. xscale_common_t *xscale = armv4_5->arch_info;
  1587. int retval;
  1588. LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
  1589. if (target->state != TARGET_HALTED)
  1590. {
  1591. LOG_WARNING("target not halted");
  1592. return ERROR_TARGET_NOT_HALTED;
  1593. }
  1594. /* sanitize arguments */
  1595. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1596. return ERROR_INVALID_ARGUMENTS;
  1597. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1598. return ERROR_TARGET_UNALIGNED_ACCESS;
  1599. /* send memory write request (command 0x2n, n: access size) */
  1600. if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
  1601. return retval;
  1602. /* send base address for read request */
  1603. if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
  1604. return retval;
  1605. /* send number of requested data words to be written*/
  1606. if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
  1607. return retval;
  1608. /* extract data from host-endian buffer into byte stream */
  1609. #if 0
  1610. for (i = 0; i < count; i++)
  1611. {
  1612. switch (size)
  1613. {
  1614. case 4:
  1615. value = target_buffer_get_u32(target, buffer);
  1616. xscale_send_u32(target, value);
  1617. buffer += 4;
  1618. break;
  1619. case 2:
  1620. value = target_buffer_get_u16(target, buffer);
  1621. xscale_send_u32(target, value);
  1622. buffer += 2;
  1623. break;
  1624. case 1:
  1625. value = *buffer;
  1626. xscale_send_u32(target, value);
  1627. buffer += 1;
  1628. break;
  1629. default:
  1630. LOG_ERROR("should never get here");
  1631. exit(-1);
  1632. }
  1633. }
  1634. #endif
  1635. if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
  1636. return retval;
  1637. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1638. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  1639. return retval;
  1640. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1641. {
  1642. /* clear SA bit */
  1643. if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
  1644. return retval;
  1645. return ERROR_TARGET_DATA_ABORT;
  1646. }
  1647. return ERROR_OK;
  1648. }
  1649. int xscale_bulk_write_memory(target_t *target, uint32_t address, uint32_t count, uint8_t *buffer)
  1650. {
  1651. return xscale_write_memory(target, address, 4, count, buffer);
  1652. }
  1653. uint32_t xscale_get_ttb(target_t *target)
  1654. {
  1655. armv4_5_common_t *armv4_5 = target->arch_info;
  1656. xscale_common_t *xscale = armv4_5->arch_info;
  1657. uint32_t ttb;
  1658. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1659. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1660. return ttb;
  1661. }
  1662. void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
  1663. {
  1664. armv4_5_common_t *armv4_5 = target->arch_info;
  1665. xscale_common_t *xscale = armv4_5->arch_info;
  1666. uint32_t cp15_control;
  1667. /* read cp15 control register */
  1668. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1669. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1670. if (mmu)
  1671. cp15_control &= ~0x1U;
  1672. if (d_u_cache)
  1673. {
  1674. /* clean DCache */
  1675. xscale_send_u32(target, 0x50);
  1676. xscale_send_u32(target, xscale->cache_clean_address);
  1677. /* invalidate DCache */
  1678. xscale_send_u32(target, 0x51);
  1679. cp15_control &= ~0x4U;
  1680. }
  1681. if (i_cache)
  1682. {
  1683. /* invalidate ICache */
  1684. xscale_send_u32(target, 0x52);
  1685. cp15_control &= ~0x1000U;
  1686. }
  1687. /* write new cp15 control register */
  1688. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1689. /* execute cpwait to ensure outstanding operations complete */
  1690. xscale_send_u32(target, 0x53);
  1691. }
  1692. void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
  1693. {
  1694. armv4_5_common_t *armv4_5 = target->arch_info;
  1695. xscale_common_t *xscale = armv4_5->arch_info;
  1696. uint32_t cp15_control;
  1697. /* read cp15 control register */
  1698. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1699. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1700. if (mmu)
  1701. cp15_control |= 0x1U;
  1702. if (d_u_cache)
  1703. cp15_control |= 0x4U;
  1704. if (i_cache)
  1705. cp15_control |= 0x1000U;
  1706. /* write new cp15 control register */
  1707. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1708. /* execute cpwait to ensure outstanding operations complete */
  1709. xscale_send_u32(target, 0x53);
  1710. }
  1711. int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1712. {
  1713. int retval;
  1714. armv4_5_common_t *armv4_5 = target->arch_info;
  1715. xscale_common_t *xscale = armv4_5->arch_info;
  1716. if (target->state != TARGET_HALTED)
  1717. {
  1718. LOG_WARNING("target not halted");
  1719. return ERROR_TARGET_NOT_HALTED;
  1720. }
  1721. if (breakpoint->set)
  1722. {
  1723. LOG_WARNING("breakpoint already set");
  1724. return ERROR_OK;
  1725. }
  1726. if (breakpoint->type == BKPT_HARD)
  1727. {
  1728. uint32_t value = breakpoint->address | 1;
  1729. if (!xscale->ibcr0_used)
  1730. {
  1731. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1732. xscale->ibcr0_used = 1;
  1733. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1734. }
  1735. else if (!xscale->ibcr1_used)
  1736. {
  1737. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1738. xscale->ibcr1_used = 1;
  1739. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1740. }
  1741. else
  1742. {
  1743. LOG_ERROR("BUG: no hardware comparator available");
  1744. return ERROR_OK;
  1745. }
  1746. }
  1747. else if (breakpoint->type == BKPT_SOFT)
  1748. {
  1749. if (breakpoint->length == 4)
  1750. {
  1751. /* keep the original instruction in target endianness */
  1752. if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1753. {
  1754. return retval;
  1755. }
  1756. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1757. if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1758. {
  1759. return retval;
  1760. }
  1761. }
  1762. else
  1763. {
  1764. /* keep the original instruction in target endianness */
  1765. if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1766. {
  1767. return retval;
  1768. }
  1769. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1770. if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1771. {
  1772. return retval;
  1773. }
  1774. }
  1775. breakpoint->set = 1;
  1776. }
  1777. return ERROR_OK;
  1778. }
  1779. int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1780. {
  1781. armv4_5_common_t *armv4_5 = target->arch_info;
  1782. xscale_common_t *xscale = armv4_5->arch_info;
  1783. if (target->state != TARGET_HALTED)
  1784. {
  1785. LOG_WARNING("target not halted");
  1786. return ERROR_TARGET_NOT_HALTED;
  1787. }
  1788. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1789. {
  1790. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1791. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1792. }
  1793. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1794. {
  1795. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1796. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1797. }
  1798. if (breakpoint->type == BKPT_HARD)
  1799. {
  1800. xscale->ibcr_available--;
  1801. }
  1802. return ERROR_OK;
  1803. }
  1804. int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1805. {
  1806. int retval;
  1807. armv4_5_common_t *armv4_5 = target->arch_info;
  1808. xscale_common_t *xscale = armv4_5->arch_info;
  1809. if (target->state != TARGET_HALTED)
  1810. {
  1811. LOG_WARNING("target not halted");
  1812. return ERROR_TARGET_NOT_HALTED;
  1813. }
  1814. if (!breakpoint->set)
  1815. {
  1816. LOG_WARNING("breakpoint not set");
  1817. return ERROR_OK;
  1818. }
  1819. if (breakpoint->type == BKPT_HARD)
  1820. {
  1821. if (breakpoint->set == 1)
  1822. {
  1823. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1824. xscale->ibcr0_used = 0;
  1825. }
  1826. else if (breakpoint->set == 2)
  1827. {
  1828. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1829. xscale->ibcr1_used = 0;
  1830. }
  1831. breakpoint->set = 0;
  1832. }
  1833. else
  1834. {
  1835. /* restore original instruction (kept in target endianness) */
  1836. if (breakpoint->length == 4)
  1837. {
  1838. if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1839. {
  1840. return retval;
  1841. }
  1842. }
  1843. else
  1844. {
  1845. if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1846. {
  1847. return retval;
  1848. }
  1849. }
  1850. breakpoint->set = 0;
  1851. }
  1852. return ERROR_OK;
  1853. }
  1854. int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1855. {
  1856. armv4_5_common_t *armv4_5 = target->arch_info;
  1857. xscale_common_t *xscale = armv4_5->arch_info;
  1858. if (target->state != TARGET_HALTED)
  1859. {
  1860. LOG_WARNING("target not halted");
  1861. return ERROR_TARGET_NOT_HALTED;
  1862. }
  1863. if (breakpoint->set)
  1864. {
  1865. xscale_unset_breakpoint(target, breakpoint);
  1866. }
  1867. if (breakpoint->type == BKPT_HARD)
  1868. xscale->ibcr_available++;
  1869. return ERROR_OK;
  1870. }
  1871. int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1872. {
  1873. armv4_5_common_t *armv4_5 = target->arch_info;
  1874. xscale_common_t *xscale = armv4_5->arch_info;
  1875. uint8_t enable=0;
  1876. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1877. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1878. if (target->state != TARGET_HALTED)
  1879. {
  1880. LOG_WARNING("target not halted");
  1881. return ERROR_TARGET_NOT_HALTED;
  1882. }
  1883. xscale_get_reg(dbcon);
  1884. switch (watchpoint->rw)
  1885. {
  1886. case WPT_READ:
  1887. enable = 0x3;
  1888. break;
  1889. case WPT_ACCESS:
  1890. enable = 0x2;
  1891. break;
  1892. case WPT_WRITE:
  1893. enable = 0x1;
  1894. break;
  1895. default:
  1896. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1897. }
  1898. if (!xscale->dbr0_used)
  1899. {
  1900. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1901. dbcon_value |= enable;
  1902. xscale_set_reg_u32(dbcon, dbcon_value);
  1903. watchpoint->set = 1;
  1904. xscale->dbr0_used = 1;
  1905. }
  1906. else if (!xscale->dbr1_used)
  1907. {
  1908. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1909. dbcon_value |= enable << 2;
  1910. xscale_set_reg_u32(dbcon, dbcon_value);
  1911. watchpoint->set = 2;
  1912. xscale->dbr1_used = 1;
  1913. }
  1914. else
  1915. {
  1916. LOG_ERROR("BUG: no hardware comparator available");
  1917. return ERROR_OK;
  1918. }
  1919. return ERROR_OK;
  1920. }
  1921. int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1922. {
  1923. armv4_5_common_t *armv4_5 = target->arch_info;
  1924. xscale_common_t *xscale = armv4_5->arch_info;
  1925. if (target->state != TARGET_HALTED)
  1926. {
  1927. LOG_WARNING("target not halted");
  1928. return ERROR_TARGET_NOT_HALTED;
  1929. }
  1930. if (xscale->dbr_available < 1)
  1931. {
  1932. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1933. }
  1934. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1935. {
  1936. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1937. }
  1938. xscale->dbr_available--;
  1939. return ERROR_OK;
  1940. }
  1941. int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1942. {
  1943. armv4_5_common_t *armv4_5 = target->arch_info;
  1944. xscale_common_t *xscale = armv4_5->arch_info;
  1945. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1946. uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1947. if (target->state != TARGET_HALTED)
  1948. {
  1949. LOG_WARNING("target not halted");
  1950. return ERROR_TARGET_NOT_HALTED;
  1951. }
  1952. if (!watchpoint->set)
  1953. {
  1954. LOG_WARNING("breakpoint not set");
  1955. return ERROR_OK;
  1956. }
  1957. if (watchpoint->set == 1)
  1958. {
  1959. dbcon_value &= ~0x3;
  1960. xscale_set_reg_u32(dbcon, dbcon_value);
  1961. xscale->dbr0_used = 0;
  1962. }
  1963. else if (watchpoint->set == 2)
  1964. {
  1965. dbcon_value &= ~0xc;
  1966. xscale_set_reg_u32(dbcon, dbcon_value);
  1967. xscale->dbr1_used = 0;
  1968. }
  1969. watchpoint->set = 0;
  1970. return ERROR_OK;
  1971. }
  1972. int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1973. {
  1974. armv4_5_common_t *armv4_5 = target->arch_info;
  1975. xscale_common_t *xscale = armv4_5->arch_info;
  1976. if (target->state != TARGET_HALTED)
  1977. {
  1978. LOG_WARNING("target not halted");
  1979. return ERROR_TARGET_NOT_HALTED;
  1980. }
  1981. if (watchpoint->set)
  1982. {
  1983. xscale_unset_watchpoint(target, watchpoint);
  1984. }
  1985. xscale->dbr_available++;
  1986. return ERROR_OK;
  1987. }
  1988. void xscale_enable_watchpoints(struct target_s *target)
  1989. {
  1990. watchpoint_t *watchpoint = target->watchpoints;
  1991. while (watchpoint)
  1992. {
  1993. if (watchpoint->set == 0)
  1994. xscale_set_watchpoint(target, watchpoint);
  1995. watchpoint = watchpoint->next;
  1996. }
  1997. }
  1998. void xscale_enable_breakpoints(struct target_s *target)
  1999. {
  2000. breakpoint_t *breakpoint = target->breakpoints;
  2001. /* set any pending breakpoints */
  2002. while (breakpoint)
  2003. {
  2004. if (breakpoint->set == 0)
  2005. xscale_set_breakpoint(target, breakpoint);
  2006. breakpoint = breakpoint->next;
  2007. }
  2008. }
  2009. int xscale_get_reg(reg_t *reg)
  2010. {
  2011. xscale_reg_t *arch_info = reg->arch_info;
  2012. target_t *target = arch_info->target;
  2013. armv4_5_common_t *armv4_5 = target->arch_info;
  2014. xscale_common_t *xscale = armv4_5->arch_info;
  2015. /* DCSR, TX and RX are accessible via JTAG */
  2016. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2017. {
  2018. return xscale_read_dcsr(arch_info->target);
  2019. }
  2020. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2021. {
  2022. /* 1 = consume register content */
  2023. return xscale_read_tx(arch_info->target, 1);
  2024. }
  2025. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2026. {
  2027. /* can't read from RX register (host -> debug handler) */
  2028. return ERROR_OK;
  2029. }
  2030. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2031. {
  2032. /* can't (explicitly) read from TXRXCTRL register */
  2033. return ERROR_OK;
  2034. }
  2035. else /* Other DBG registers have to be transfered by the debug handler */
  2036. {
  2037. /* send CP read request (command 0x40) */
  2038. xscale_send_u32(target, 0x40);
  2039. /* send CP register number */
  2040. xscale_send_u32(target, arch_info->dbg_handler_number);
  2041. /* read register value */
  2042. xscale_read_tx(target, 1);
  2043. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2044. reg->dirty = 0;
  2045. reg->valid = 1;
  2046. }
  2047. return ERROR_OK;
  2048. }
  2049. int xscale_set_reg(reg_t *reg, uint8_t* buf)
  2050. {
  2051. xscale_reg_t *arch_info = reg->arch_info;
  2052. target_t *target = arch_info->target;
  2053. armv4_5_common_t *armv4_5 = target->arch_info;
  2054. xscale_common_t *xscale = armv4_5->arch_info;
  2055. uint32_t value = buf_get_u32(buf, 0, 32);
  2056. /* DCSR, TX and RX are accessible via JTAG */
  2057. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2058. {
  2059. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2060. return xscale_write_dcsr(arch_info->target, -1, -1);
  2061. }
  2062. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2063. {
  2064. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2065. return xscale_write_rx(arch_info->target);
  2066. }
  2067. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2068. {
  2069. /* can't write to TX register (debug-handler -> host) */
  2070. return ERROR_OK;
  2071. }
  2072. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2073. {
  2074. /* can't (explicitly) write to TXRXCTRL register */
  2075. return ERROR_OK;
  2076. }
  2077. else /* Other DBG registers have to be transfered by the debug handler */
  2078. {
  2079. /* send CP write request (command 0x41) */
  2080. xscale_send_u32(target, 0x41);
  2081. /* send CP register number */
  2082. xscale_send_u32(target, arch_info->dbg_handler_number);
  2083. /* send CP register value */
  2084. xscale_send_u32(target, value);
  2085. buf_set_u32(reg->value, 0, 32, value);
  2086. }
  2087. return ERROR_OK;
  2088. }
  2089. /* convenience wrapper to access XScale specific registers */
  2090. int xscale_set_reg_u32(reg_t *reg, uint32_t value)
  2091. {
  2092. uint8_t buf[4];
  2093. buf_set_u32(buf, 0, 32, value);
  2094. return xscale_set_reg(reg, buf);
  2095. }
  2096. int xscale_write_dcsr_sw(target_t *target, uint32_t value)
  2097. {
  2098. /* get pointers to arch-specific information */
  2099. armv4_5_common_t *armv4_5 = target->arch_info;
  2100. xscale_common_t *xscale = armv4_5->arch_info;
  2101. reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2102. xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
  2103. /* send CP write request (command 0x41) */
  2104. xscale_send_u32(target, 0x41);
  2105. /* send CP register number */
  2106. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2107. /* send CP register value */
  2108. xscale_send_u32(target, value);
  2109. buf_set_u32(dcsr->value, 0, 32, value);
  2110. return ERROR_OK;
  2111. }
  2112. int xscale_read_trace(target_t *target)
  2113. {
  2114. /* get pointers to arch-specific information */
  2115. armv4_5_common_t *armv4_5 = target->arch_info;
  2116. xscale_common_t *xscale = armv4_5->arch_info;
  2117. xscale_trace_data_t **trace_data_p;
  2118. /* 258 words from debug handler
  2119. * 256 trace buffer entries
  2120. * 2 checkpoint addresses
  2121. */
  2122. uint32_t trace_buffer[258];
  2123. int is_address[256];
  2124. int i, j;
  2125. if (target->state != TARGET_HALTED)
  2126. {
  2127. LOG_WARNING("target must be stopped to read trace data");
  2128. return ERROR_TARGET_NOT_HALTED;
  2129. }
  2130. /* send read trace buffer command (command 0x61) */
  2131. xscale_send_u32(target, 0x61);
  2132. /* receive trace buffer content */
  2133. xscale_receive(target, trace_buffer, 258);
  2134. /* parse buffer backwards to identify address entries */
  2135. for (i = 255; i >= 0; i--)
  2136. {
  2137. is_address[i] = 0;
  2138. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2139. ((trace_buffer[i] & 0xf0) == 0xd0))
  2140. {
  2141. if (i >= 3)
  2142. is_address[--i] = 1;
  2143. if (i >= 2)
  2144. is_address[--i] = 1;
  2145. if (i >= 1)
  2146. is_address[--i] = 1;
  2147. if (i >= 0)
  2148. is_address[--i] = 1;
  2149. }
  2150. }
  2151. /* search first non-zero entry */
  2152. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2153. ;
  2154. if (j == 256)
  2155. {
  2156. LOG_DEBUG("no trace data collected");
  2157. return ERROR_XSCALE_NO_TRACE_DATA;
  2158. }
  2159. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2160. ;
  2161. *trace_data_p = malloc(sizeof(xscale_trace_data_t));
  2162. (*trace_data_p)->next = NULL;
  2163. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2164. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2165. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2166. (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
  2167. (*trace_data_p)->depth = 256 - j;
  2168. for (i = j; i < 256; i++)
  2169. {
  2170. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2171. if (is_address[i])
  2172. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2173. else
  2174. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2175. }
  2176. return ERROR_OK;
  2177. }
  2178. int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
  2179. {
  2180. /* get pointers to arch-specific information */
  2181. armv4_5_common_t *armv4_5 = target->arch_info;
  2182. xscale_common_t *xscale = armv4_5->arch_info;
  2183. int i;
  2184. int section = -1;
  2185. uint32_t size_read;
  2186. uint32_t opcode;
  2187. int retval;
  2188. if (!xscale->trace.image)
  2189. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2190. /* search for the section the current instruction belongs to */
  2191. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2192. {
  2193. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2194. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2195. {
  2196. section = i;
  2197. break;
  2198. }
  2199. }
  2200. if (section == -1)
  2201. {
  2202. /* current instruction couldn't be found in the image */
  2203. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2204. }
  2205. if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
  2206. {
  2207. uint8_t buf[4];
  2208. if ((retval = image_read_section(xscale->trace.image, section,
  2209. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2210. 4, buf, &size_read)) != ERROR_OK)
  2211. {
  2212. LOG_ERROR("error while reading instruction: %i", retval);
  2213. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2214. }
  2215. opcode = target_buffer_get_u32(target, buf);
  2216. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2217. }
  2218. else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
  2219. {
  2220. uint8_t buf[2];
  2221. if ((retval = image_read_section(xscale->trace.image, section,
  2222. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2223. 2, buf, &size_read)) != ERROR_OK)
  2224. {
  2225. LOG_ERROR("error while reading instruction: %i", retval);
  2226. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2227. }
  2228. opcode = target_buffer_get_u16(target, buf);
  2229. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2230. }
  2231. else
  2232. {
  2233. LOG_ERROR("BUG: unknown core state encountered");
  2234. exit(-1);
  2235. }
  2236. return ERROR_OK;
  2237. }
  2238. int xscale_branch_address(xscale_trace_data_t *trace_data, int i, uint32_t *target)
  2239. {
  2240. /* if there are less than four entries prior to the indirect branch message
  2241. * we can't extract the address */
  2242. if (i < 4)
  2243. {
  2244. return -1;
  2245. }
  2246. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2247. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2248. return 0;
  2249. }
  2250. int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
  2251. {
  2252. /* get pointers to arch-specific information */
  2253. armv4_5_common_t *armv4_5 = target->arch_info;
  2254. xscale_common_t *xscale = armv4_5->arch_info;
  2255. int next_pc_ok = 0;
  2256. uint32_t next_pc = 0x0;
  2257. xscale_trace_data_t *trace_data = xscale->trace.data;
  2258. int retval;
  2259. while (trace_data)
  2260. {
  2261. int i, chkpt;
  2262. int rollover;
  2263. int branch;
  2264. int exception;
  2265. xscale->trace.core_state = ARMV4_5_STATE_ARM;
  2266. chkpt = 0;
  2267. rollover = 0;
  2268. for (i = 0; i < trace_data->depth; i++)
  2269. {
  2270. next_pc_ok = 0;
  2271. branch = 0;
  2272. exception = 0;
  2273. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2274. continue;
  2275. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2276. {
  2277. case 0: /* Exceptions */
  2278. case 1:
  2279. case 2:
  2280. case 3:
  2281. case 4:
  2282. case 5:
  2283. case 6:
  2284. case 7:
  2285. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2286. next_pc_ok = 1;
  2287. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2288. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2289. break;
  2290. case 8: /* Direct Branch */
  2291. branch = 1;
  2292. break;
  2293. case 9: /* Indirect Branch */
  2294. branch = 1;
  2295. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2296. {
  2297. next_pc_ok = 1;
  2298. }
  2299. break;
  2300. case 13: /* Checkpointed Indirect Branch */
  2301. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2302. {
  2303. next_pc_ok = 1;
  2304. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2305. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2306. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2307. }
  2308. /* explicit fall-through */
  2309. case 12: /* Checkpointed Direct Branch */
  2310. branch = 1;
  2311. if (chkpt == 0)
  2312. {
  2313. next_pc_ok = 1;
  2314. next_pc = trace_data->chkpt0;
  2315. chkpt++;
  2316. }
  2317. else if (chkpt == 1)
  2318. {
  2319. next_pc_ok = 1;
  2320. next_pc = trace_data->chkpt0;
  2321. chkpt++;
  2322. }
  2323. else
  2324. {
  2325. LOG_WARNING("more than two checkpointed branches encountered");
  2326. }
  2327. break;
  2328. case 15: /* Roll-over */
  2329. rollover++;
  2330. continue;
  2331. default: /* Reserved */
  2332. command_print(cmd_ctx, "--- reserved trace message ---");
  2333. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2334. return ERROR_OK;
  2335. }
  2336. if (xscale->trace.pc_ok)
  2337. {
  2338. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2339. arm_instruction_t instruction;
  2340. if ((exception == 6) || (exception == 7))
  2341. {
  2342. /* IRQ or FIQ exception, no instruction executed */
  2343. executed -= 1;
  2344. }
  2345. while (executed-- >= 0)
  2346. {
  2347. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2348. {
  2349. /* can't continue tracing with no image available */
  2350. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2351. {
  2352. return retval;
  2353. }
  2354. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2355. {
  2356. /* TODO: handle incomplete images */
  2357. }
  2358. }
  2359. /* a precise abort on a load to the PC is included in the incremental
  2360. * word count, other instructions causing data aborts are not included
  2361. */
  2362. if ((executed == 0) && (exception == 4)
  2363. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2364. {
  2365. if ((instruction.type == ARM_LDM)
  2366. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2367. {
  2368. executed--;
  2369. }
  2370. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2371. && (instruction.info.load_store.Rd != 15))
  2372. {
  2373. executed--;
  2374. }
  2375. }
  2376. /* only the last instruction executed
  2377. * (the one that caused the control flow change)
  2378. * could be a taken branch
  2379. */
  2380. if (((executed == -1) && (branch == 1)) &&
  2381. (((instruction.type == ARM_B) ||
  2382. (instruction.type == ARM_BL) ||
  2383. (instruction.type == ARM_BLX)) &&
  2384. (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
  2385. {
  2386. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2387. }
  2388. else
  2389. {
  2390. xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
  2391. }
  2392. command_print(cmd_ctx, "%s", instruction.text);
  2393. }
  2394. rollover = 0;
  2395. }
  2396. if (next_pc_ok)
  2397. {
  2398. xscale->trace.current_pc = next_pc;
  2399. xscale->trace.pc_ok = 1;
  2400. }
  2401. }
  2402. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
  2403. {
  2404. arm_instruction_t instruction;
  2405. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2406. {
  2407. /* can't continue tracing with no image available */
  2408. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2409. {
  2410. return retval;
  2411. }
  2412. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2413. {
  2414. /* TODO: handle incomplete images */
  2415. }
  2416. }
  2417. command_print(cmd_ctx, "%s", instruction.text);
  2418. }
  2419. trace_data = trace_data->next;
  2420. }
  2421. return ERROR_OK;
  2422. }
  2423. void xscale_build_reg_cache(target_t *target)
  2424. {
  2425. /* get pointers to arch-specific information */
  2426. armv4_5_common_t *armv4_5 = target->arch_info;
  2427. xscale_common_t *xscale = armv4_5->arch_info;
  2428. reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
  2429. xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2430. int i;
  2431. int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
  2432. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  2433. armv4_5->core_cache = (*cache_p);
  2434. /* register a register arch-type for XScale dbg registers only once */
  2435. if (xscale_reg_arch_type == -1)
  2436. xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
  2437. (*cache_p)->next = malloc(sizeof(reg_cache_t));
  2438. cache_p = &(*cache_p)->next;
  2439. /* fill in values for the xscale reg cache */
  2440. (*cache_p)->name = "XScale registers";
  2441. (*cache_p)->next = NULL;
  2442. (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
  2443. (*cache_p)->num_regs = num_regs;
  2444. for (i = 0; i < num_regs; i++)
  2445. {
  2446. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2447. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2448. (*cache_p)->reg_list[i].dirty = 0;
  2449. (*cache_p)->reg_list[i].valid = 0;
  2450. (*cache_p)->reg_list[i].size = 32;
  2451. (*cache_p)->reg_list[i].bitfield_desc = NULL;
  2452. (*cache_p)->reg_list[i].num_bitfields = 0;
  2453. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2454. (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
  2455. arch_info[i] = xscale_reg_arch_info[i];
  2456. arch_info[i].target = target;
  2457. }
  2458. xscale->reg_cache = (*cache_p);
  2459. }
  2460. int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
  2461. {
  2462. return ERROR_OK;
  2463. }
  2464. int xscale_quit(void)
  2465. {
  2466. return ERROR_OK;
  2467. }
  2468. int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
  2469. {
  2470. armv4_5_common_t *armv4_5;
  2471. uint32_t high_reset_branch, low_reset_branch;
  2472. int i;
  2473. armv4_5 = &xscale->armv4_5_common;
  2474. /* store architecture specfic data (none so far) */
  2475. xscale->arch_info = NULL;
  2476. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2477. /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
  2478. xscale->variant = strdup(variant);
  2479. /* prepare JTAG information for the new target */
  2480. xscale->jtag_info.tap = tap;
  2481. xscale->jtag_info.dbgrx = 0x02;
  2482. xscale->jtag_info.dbgtx = 0x10;
  2483. xscale->jtag_info.dcsr = 0x09;
  2484. xscale->jtag_info.ldic = 0x07;
  2485. if ((strcmp(xscale->variant, "pxa250") == 0) ||
  2486. (strcmp(xscale->variant, "pxa255") == 0) ||
  2487. (strcmp(xscale->variant, "pxa26x") == 0))
  2488. {
  2489. xscale->jtag_info.ir_length = 5;
  2490. }
  2491. else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
  2492. (strcmp(xscale->variant, "ixp42x") == 0) ||
  2493. (strcmp(xscale->variant, "ixp45x") == 0) ||
  2494. (strcmp(xscale->variant, "ixp46x") == 0))
  2495. {
  2496. xscale->jtag_info.ir_length = 7;
  2497. }
  2498. /* the debug handler isn't installed (and thus not running) at this time */
  2499. xscale->handler_installed = 0;
  2500. xscale->handler_running = 0;
  2501. xscale->handler_address = 0xfe000800;
  2502. /* clear the vectors we keep locally for reference */
  2503. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2504. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2505. /* no user-specified vectors have been configured yet */
  2506. xscale->static_low_vectors_set = 0x0;
  2507. xscale->static_high_vectors_set = 0x0;
  2508. /* calculate branches to debug handler */
  2509. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2510. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2511. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2512. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2513. for (i = 1; i <= 7; i++)
  2514. {
  2515. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2516. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2517. }
  2518. /* 64kB aligned region used for DCache cleaning */
  2519. xscale->cache_clean_address = 0xfffe0000;
  2520. xscale->hold_rst = 0;
  2521. xscale->external_debug_break = 0;
  2522. xscale->ibcr_available = 2;
  2523. xscale->ibcr0_used = 0;
  2524. xscale->ibcr1_used = 0;
  2525. xscale->dbr_available = 2;
  2526. xscale->dbr0_used = 0;
  2527. xscale->dbr1_used = 0;
  2528. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2529. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2530. xscale->vector_catch = 0x1;
  2531. xscale->trace.capture_status = TRACE_IDLE;
  2532. xscale->trace.data = NULL;
  2533. xscale->trace.image = NULL;
  2534. xscale->trace.buffer_enabled = 0;
  2535. xscale->trace.buffer_fill = 0;
  2536. /* prepare ARMv4/5 specific information */
  2537. armv4_5->arch_info = xscale;
  2538. armv4_5->read_core_reg = xscale_read_core_reg;
  2539. armv4_5->write_core_reg = xscale_write_core_reg;
  2540. armv4_5->full_context = xscale_full_context;
  2541. armv4_5_init_arch_info(target, armv4_5);
  2542. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2543. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2544. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2545. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2546. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2547. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2548. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2549. xscale->armv4_5_mmu.mmu_enabled = 0;
  2550. return ERROR_OK;
  2551. }
  2552. /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
  2553. int xscale_target_create(struct target_s *target, Jim_Interp *interp)
  2554. {
  2555. xscale_common_t *xscale = calloc(1,sizeof(xscale_common_t));
  2556. xscale_init_arch_info(target, xscale, target->tap, target->variant);
  2557. xscale_build_reg_cache(target);
  2558. return ERROR_OK;
  2559. }
  2560. int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2561. {
  2562. target_t *target = NULL;
  2563. armv4_5_common_t *armv4_5;
  2564. xscale_common_t *xscale;
  2565. uint32_t handler_address;
  2566. if (argc < 2)
  2567. {
  2568. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2569. return ERROR_OK;
  2570. }
  2571. if ((target = get_target(args[0])) == NULL)
  2572. {
  2573. LOG_ERROR("target '%s' not defined", args[0]);
  2574. return ERROR_FAIL;
  2575. }
  2576. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2577. {
  2578. return ERROR_FAIL;
  2579. }
  2580. handler_address = strtoul(args[1], NULL, 0);
  2581. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2582. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2583. {
  2584. xscale->handler_address = handler_address;
  2585. }
  2586. else
  2587. {
  2588. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2589. return ERROR_FAIL;
  2590. }
  2591. return ERROR_OK;
  2592. }
  2593. int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2594. {
  2595. target_t *target = NULL;
  2596. armv4_5_common_t *armv4_5;
  2597. xscale_common_t *xscale;
  2598. uint32_t cache_clean_address;
  2599. if (argc < 2)
  2600. {
  2601. return ERROR_COMMAND_SYNTAX_ERROR;
  2602. }
  2603. target = get_target(args[0]);
  2604. if (target == NULL)
  2605. {
  2606. LOG_ERROR("target '%s' not defined", args[0]);
  2607. return ERROR_FAIL;
  2608. }
  2609. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2610. {
  2611. return ERROR_FAIL;
  2612. }
  2613. cache_clean_address = strtoul(args[1], NULL, 0);
  2614. if (cache_clean_address & 0xffff)
  2615. {
  2616. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2617. }
  2618. else
  2619. {
  2620. xscale->cache_clean_address = cache_clean_address;
  2621. }
  2622. return ERROR_OK;
  2623. }
  2624. int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2625. {
  2626. target_t *target = get_current_target(cmd_ctx);
  2627. armv4_5_common_t *armv4_5;
  2628. xscale_common_t *xscale;
  2629. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2630. {
  2631. return ERROR_OK;
  2632. }
  2633. return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
  2634. }
  2635. static int xscale_virt2phys(struct target_s *target, uint32_t virtual, uint32_t *physical)
  2636. {
  2637. armv4_5_common_t *armv4_5;
  2638. xscale_common_t *xscale;
  2639. int retval;
  2640. int type;
  2641. uint32_t cb;
  2642. int domain;
  2643. uint32_t ap;
  2644. if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
  2645. {
  2646. return retval;
  2647. }
  2648. uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2649. if (type == -1)
  2650. {
  2651. return ret;
  2652. }
  2653. *physical = ret;
  2654. return ERROR_OK;
  2655. }
  2656. static int xscale_mmu(struct target_s *target, int *enabled)
  2657. {
  2658. armv4_5_common_t *armv4_5 = target->arch_info;
  2659. xscale_common_t *xscale = armv4_5->arch_info;
  2660. if (target->state != TARGET_HALTED)
  2661. {
  2662. LOG_ERROR("Target not halted");
  2663. return ERROR_TARGET_INVALID;
  2664. }
  2665. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2666. return ERROR_OK;
  2667. }
  2668. int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2669. {
  2670. target_t *target = get_current_target(cmd_ctx);
  2671. armv4_5_common_t *armv4_5;
  2672. xscale_common_t *xscale;
  2673. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2674. {
  2675. return ERROR_OK;
  2676. }
  2677. if (target->state != TARGET_HALTED)
  2678. {
  2679. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2680. return ERROR_OK;
  2681. }
  2682. if (argc >= 1)
  2683. {
  2684. if (strcmp("enable", args[0]) == 0)
  2685. {
  2686. xscale_enable_mmu_caches(target, 1, 0, 0);
  2687. xscale->armv4_5_mmu.mmu_enabled = 1;
  2688. }
  2689. else if (strcmp("disable", args[0]) == 0)
  2690. {
  2691. xscale_disable_mmu_caches(target, 1, 0, 0);
  2692. xscale->armv4_5_mmu.mmu_enabled = 0;
  2693. }
  2694. }
  2695. command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2696. return ERROR_OK;
  2697. }
  2698. int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2699. {
  2700. target_t *target = get_current_target(cmd_ctx);
  2701. armv4_5_common_t *armv4_5;
  2702. xscale_common_t *xscale;
  2703. int icache = 0, dcache = 0;
  2704. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2705. {
  2706. return ERROR_OK;
  2707. }
  2708. if (target->state != TARGET_HALTED)
  2709. {
  2710. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2711. return ERROR_OK;
  2712. }
  2713. if (strcmp(cmd, "icache") == 0)
  2714. icache = 1;
  2715. else if (strcmp(cmd, "dcache") == 0)
  2716. dcache = 1;
  2717. if (argc >= 1)
  2718. {
  2719. if (strcmp("enable", args[0]) == 0)
  2720. {
  2721. xscale_enable_mmu_caches(target, 0, dcache, icache);
  2722. if (icache)
  2723. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
  2724. else if (dcache)
  2725. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
  2726. }
  2727. else if (strcmp("disable", args[0]) == 0)
  2728. {
  2729. xscale_disable_mmu_caches(target, 0, dcache, icache);
  2730. if (icache)
  2731. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
  2732. else if (dcache)
  2733. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
  2734. }
  2735. }
  2736. if (icache)
  2737. command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
  2738. if (dcache)
  2739. command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
  2740. return ERROR_OK;
  2741. }
  2742. int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2743. {
  2744. target_t *target = get_current_target(cmd_ctx);
  2745. armv4_5_common_t *armv4_5;
  2746. xscale_common_t *xscale;
  2747. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2748. {
  2749. return ERROR_OK;
  2750. }
  2751. if (argc < 1)
  2752. {
  2753. command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
  2754. }
  2755. else
  2756. {
  2757. xscale->vector_catch = strtoul(args[0], NULL, 0);
  2758. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2759. xscale_write_dcsr(target, -1, -1);
  2760. }
  2761. command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2762. return ERROR_OK;
  2763. }
  2764. int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2765. {
  2766. target_t *target = get_current_target(cmd_ctx);
  2767. armv4_5_common_t *armv4_5;
  2768. xscale_common_t *xscale;
  2769. uint32_t dcsr_value;
  2770. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2771. {
  2772. return ERROR_OK;
  2773. }
  2774. if (target->state != TARGET_HALTED)
  2775. {
  2776. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2777. return ERROR_OK;
  2778. }
  2779. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  2780. {
  2781. xscale_trace_data_t *td, *next_td;
  2782. xscale->trace.buffer_enabled = 1;
  2783. /* free old trace data */
  2784. td = xscale->trace.data;
  2785. while (td)
  2786. {
  2787. next_td = td->next;
  2788. if (td->entries)
  2789. free(td->entries);
  2790. free(td);
  2791. td = next_td;
  2792. }
  2793. xscale->trace.data = NULL;
  2794. }
  2795. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  2796. {
  2797. xscale->trace.buffer_enabled = 0;
  2798. }
  2799. if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
  2800. {
  2801. if (argc >= 3)
  2802. xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
  2803. else
  2804. xscale->trace.buffer_fill = 1;
  2805. }
  2806. else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
  2807. {
  2808. xscale->trace.buffer_fill = -1;
  2809. }
  2810. if (xscale->trace.buffer_enabled)
  2811. {
  2812. /* if we enable the trace buffer in fill-once
  2813. * mode we know the address of the first instruction */
  2814. xscale->trace.pc_ok = 1;
  2815. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2816. }
  2817. else
  2818. {
  2819. /* otherwise the address is unknown, and we have no known good PC */
  2820. xscale->trace.pc_ok = 0;
  2821. }
  2822. command_print(cmd_ctx, "trace buffer %s (%s)",
  2823. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2824. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2825. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2826. if (xscale->trace.buffer_fill >= 0)
  2827. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2828. else
  2829. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2830. return ERROR_OK;
  2831. }
  2832. int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2833. {
  2834. target_t *target;
  2835. armv4_5_common_t *armv4_5;
  2836. xscale_common_t *xscale;
  2837. if (argc < 1)
  2838. {
  2839. command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
  2840. return ERROR_OK;
  2841. }
  2842. target = get_current_target(cmd_ctx);
  2843. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2844. {
  2845. return ERROR_OK;
  2846. }
  2847. if (xscale->trace.image)
  2848. {
  2849. image_close(xscale->trace.image);
  2850. free(xscale->trace.image);
  2851. command_print(cmd_ctx, "previously loaded image found and closed");
  2852. }
  2853. xscale->trace.image = malloc(sizeof(image_t));
  2854. xscale->trace.image->base_address_set = 0;
  2855. xscale->trace.image->start_address_set = 0;
  2856. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2857. if (argc >= 2)
  2858. {
  2859. xscale->trace.image->base_address_set = 1;
  2860. xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
  2861. }
  2862. else
  2863. {
  2864. xscale->trace.image->base_address_set = 0;
  2865. }
  2866. if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
  2867. {
  2868. free(xscale->trace.image);
  2869. xscale->trace.image = NULL;
  2870. return ERROR_OK;
  2871. }
  2872. return ERROR_OK;
  2873. }
  2874. int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2875. {
  2876. target_t *target = get_current_target(cmd_ctx);
  2877. armv4_5_common_t *armv4_5;
  2878. xscale_common_t *xscale;
  2879. xscale_trace_data_t *trace_data;
  2880. fileio_t file;
  2881. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2882. {
  2883. return ERROR_OK;
  2884. }
  2885. if (target->state != TARGET_HALTED)
  2886. {
  2887. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2888. return ERROR_OK;
  2889. }
  2890. if (argc < 1)
  2891. {
  2892. command_print(cmd_ctx, "usage: xscale dump_trace <file>");
  2893. return ERROR_OK;
  2894. }
  2895. trace_data = xscale->trace.data;
  2896. if (!trace_data)
  2897. {
  2898. command_print(cmd_ctx, "no trace data collected");
  2899. return ERROR_OK;
  2900. }
  2901. if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2902. {
  2903. return ERROR_OK;
  2904. }
  2905. while (trace_data)
  2906. {
  2907. int i;
  2908. fileio_write_u32(&file, trace_data->chkpt0);
  2909. fileio_write_u32(&file, trace_data->chkpt1);
  2910. fileio_write_u32(&file, trace_data->last_instruction);
  2911. fileio_write_u32(&file, trace_data->depth);
  2912. for (i = 0; i < trace_data->depth; i++)
  2913. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2914. trace_data = trace_data->next;
  2915. }
  2916. fileio_close(&file);
  2917. return ERROR_OK;
  2918. }
  2919. int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2920. {
  2921. target_t *target = get_current_target(cmd_ctx);
  2922. armv4_5_common_t *armv4_5;
  2923. xscale_common_t *xscale;
  2924. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2925. {
  2926. return ERROR_OK;
  2927. }
  2928. xscale_analyze_trace(target, cmd_ctx);
  2929. return ERROR_OK;
  2930. }
  2931. int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2932. {
  2933. target_t *target = get_current_target(cmd_ctx);
  2934. armv4_5_common_t *armv4_5;
  2935. xscale_common_t *xscale;
  2936. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2937. {
  2938. return ERROR_OK;
  2939. }
  2940. if (target->state != TARGET_HALTED)
  2941. {
  2942. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2943. return ERROR_OK;
  2944. }
  2945. uint32_t reg_no = 0;
  2946. reg_t *reg = NULL;
  2947. if (argc > 0)
  2948. {
  2949. reg_no = strtoul(args[0], NULL, 0);
  2950. /*translate from xscale cp15 register no to openocd register*/
  2951. switch (reg_no)
  2952. {
  2953. case 0:
  2954. reg_no = XSCALE_MAINID;
  2955. break;
  2956. case 1:
  2957. reg_no = XSCALE_CTRL;
  2958. break;
  2959. case 2:
  2960. reg_no = XSCALE_TTB;
  2961. break;
  2962. case 3:
  2963. reg_no = XSCALE_DAC;
  2964. break;
  2965. case 5:
  2966. reg_no = XSCALE_FSR;
  2967. break;
  2968. case 6:
  2969. reg_no = XSCALE_FAR;
  2970. break;
  2971. case 13:
  2972. reg_no = XSCALE_PID;
  2973. break;
  2974. case 15:
  2975. reg_no = XSCALE_CPACCESS;
  2976. break;
  2977. default:
  2978. command_print(cmd_ctx, "invalid register number");
  2979. return ERROR_INVALID_ARGUMENTS;
  2980. }
  2981. reg = &xscale->reg_cache->reg_list[reg_no];
  2982. }
  2983. if (argc == 1)
  2984. {
  2985. uint32_t value;
  2986. /* read cp15 control register */
  2987. xscale_get_reg(reg);
  2988. value = buf_get_u32(reg->value, 0, 32);
  2989. command_print(cmd_ctx, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
  2990. }
  2991. else if (argc == 2)
  2992. {
  2993. uint32_t value = strtoul(args[1], NULL, 0);
  2994. /* send CP write request (command 0x41) */
  2995. xscale_send_u32(target, 0x41);
  2996. /* send CP register number */
  2997. xscale_send_u32(target, reg_no);
  2998. /* send CP register value */
  2999. xscale_send_u32(target, value);
  3000. /* execute cpwait to ensure outstanding operations complete */
  3001. xscale_send_u32(target, 0x53);
  3002. }
  3003. else
  3004. {
  3005. command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
  3006. }
  3007. return ERROR_OK;
  3008. }
  3009. int xscale_register_commands(struct command_context_s *cmd_ctx)
  3010. {
  3011. command_t *xscale_cmd;
  3012. xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
  3013. register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
  3014. register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
  3015. register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
  3016. register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
  3017. register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
  3018. register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
  3019. register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
  3020. register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
  3021. register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
  3022. register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
  3023. register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
  3024. COMMAND_EXEC, "load image from <file> [base address]");
  3025. register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
  3026. armv4_5_register_commands(cmd_ctx);
  3027. return ERROR_OK;
  3028. }