You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3724 lines
97 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2006, 2007 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or modify *
  9. * it under the terms of the GNU General Public License as published by *
  10. * the Free Software Foundation; either version 2 of the License, or *
  11. * (at your option) any later version. *
  12. * *
  13. * This program is distributed in the hope that it will be useful, *
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  16. * GNU General Public License for more details. *
  17. * *
  18. * You should have received a copy of the GNU General Public License *
  19. * along with this program; if not, write to the *
  20. * Free Software Foundation, Inc., *
  21. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  22. ***************************************************************************/
  23. #ifdef HAVE_CONFIG_H
  24. #include "config.h"
  25. #endif
  26. #include "replacements.h"
  27. #include "xscale.h"
  28. #include "arm7_9_common.h"
  29. #include "register.h"
  30. #include "target.h"
  31. #include "armv4_5.h"
  32. #include "arm_simulator.h"
  33. #include "arm_disassembler.h"
  34. #include "log.h"
  35. #include "jtag.h"
  36. #include "binarybuffer.h"
  37. #include "time_support.h"
  38. #include "breakpoints.h"
  39. #include "fileio.h"
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include <sys/types.h>
  43. #include <unistd.h>
  44. #include <errno.h>
  45. /* cli handling */
  46. int xscale_register_commands(struct command_context_s *cmd_ctx);
  47. /* forward declarations */
  48. int xscale_target_create(struct target_s *target, Jim_Interp *interp);
  49. int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
  50. int xscale_quit(void);
  51. int xscale_arch_state(struct target_s *target);
  52. int xscale_poll(target_t *target);
  53. int xscale_halt(target_t *target);
  54. int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
  55. int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
  56. int xscale_debug_entry(target_t *target);
  57. int xscale_restore_context(target_t *target);
  58. int xscale_assert_reset(target_t *target);
  59. int xscale_deassert_reset(target_t *target);
  60. int xscale_soft_reset_halt(struct target_s *target);
  61. int xscale_set_reg_u32(reg_t *reg, u32 value);
  62. int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
  63. int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
  64. int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
  65. int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
  66. int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
  67. int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  68. int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  69. int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  70. int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
  71. int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
  72. int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
  73. void xscale_enable_watchpoints(struct target_s *target);
  74. void xscale_enable_breakpoints(struct target_s *target);
  75. static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
  76. static int xscale_mmu(struct target_s *target, int *enabled);
  77. int xscale_read_trace(target_t *target);
  78. target_type_t xscale_target =
  79. {
  80. .name = "xscale",
  81. .poll = xscale_poll,
  82. .arch_state = xscale_arch_state,
  83. .target_request_data = NULL,
  84. .halt = xscale_halt,
  85. .resume = xscale_resume,
  86. .step = xscale_step,
  87. .assert_reset = xscale_assert_reset,
  88. .deassert_reset = xscale_deassert_reset,
  89. .soft_reset_halt = xscale_soft_reset_halt,
  90. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  91. .read_memory = xscale_read_memory,
  92. .write_memory = xscale_write_memory,
  93. .bulk_write_memory = xscale_bulk_write_memory,
  94. .checksum_memory = arm7_9_checksum_memory,
  95. .blank_check_memory = arm7_9_blank_check_memory,
  96. .run_algorithm = armv4_5_run_algorithm,
  97. .add_breakpoint = xscale_add_breakpoint,
  98. .remove_breakpoint = xscale_remove_breakpoint,
  99. .add_watchpoint = xscale_add_watchpoint,
  100. .remove_watchpoint = xscale_remove_watchpoint,
  101. .register_commands = xscale_register_commands,
  102. .target_create = xscale_target_create,
  103. .init_target = xscale_init_target,
  104. .quit = xscale_quit,
  105. .virt2phys = xscale_virt2phys,
  106. .mmu = xscale_mmu
  107. };
  108. char* xscale_reg_list[] =
  109. {
  110. "XSCALE_MAINID", /* 0 */
  111. "XSCALE_CACHETYPE",
  112. "XSCALE_CTRL",
  113. "XSCALE_AUXCTRL",
  114. "XSCALE_TTB",
  115. "XSCALE_DAC",
  116. "XSCALE_FSR",
  117. "XSCALE_FAR",
  118. "XSCALE_PID",
  119. "XSCALE_CPACCESS",
  120. "XSCALE_IBCR0", /* 10 */
  121. "XSCALE_IBCR1",
  122. "XSCALE_DBR0",
  123. "XSCALE_DBR1",
  124. "XSCALE_DBCON",
  125. "XSCALE_TBREG",
  126. "XSCALE_CHKPT0",
  127. "XSCALE_CHKPT1",
  128. "XSCALE_DCSR",
  129. "XSCALE_TX",
  130. "XSCALE_RX", /* 20 */
  131. "XSCALE_TXRXCTRL",
  132. };
  133. xscale_reg_t xscale_reg_arch_info[] =
  134. {
  135. {XSCALE_MAINID, NULL},
  136. {XSCALE_CACHETYPE, NULL},
  137. {XSCALE_CTRL, NULL},
  138. {XSCALE_AUXCTRL, NULL},
  139. {XSCALE_TTB, NULL},
  140. {XSCALE_DAC, NULL},
  141. {XSCALE_FSR, NULL},
  142. {XSCALE_FAR, NULL},
  143. {XSCALE_PID, NULL},
  144. {XSCALE_CPACCESS, NULL},
  145. {XSCALE_IBCR0, NULL},
  146. {XSCALE_IBCR1, NULL},
  147. {XSCALE_DBR0, NULL},
  148. {XSCALE_DBR1, NULL},
  149. {XSCALE_DBCON, NULL},
  150. {XSCALE_TBREG, NULL},
  151. {XSCALE_CHKPT0, NULL},
  152. {XSCALE_CHKPT1, NULL},
  153. {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
  154. {-1, NULL}, /* TX accessed via JTAG */
  155. {-1, NULL}, /* RX accessed via JTAG */
  156. {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
  157. };
  158. int xscale_reg_arch_type = -1;
  159. int xscale_get_reg(reg_t *reg);
  160. int xscale_set_reg(reg_t *reg, u8 *buf);
  161. int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
  162. {
  163. armv4_5_common_t *armv4_5 = target->arch_info;
  164. xscale_common_t *xscale = armv4_5->arch_info;
  165. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  166. {
  167. LOG_ERROR("target isn't an XScale target");
  168. return -1;
  169. }
  170. if (xscale->common_magic != XSCALE_COMMON_MAGIC)
  171. {
  172. LOG_ERROR("target isn't an XScale target");
  173. return -1;
  174. }
  175. *armv4_5_p = armv4_5;
  176. *xscale_p = xscale;
  177. return ERROR_OK;
  178. }
  179. int xscale_jtag_set_instr(jtag_tap_t *tap, u32 new_instr)
  180. {
  181. if (tap==NULL)
  182. return ERROR_FAIL;
  183. if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
  184. {
  185. scan_field_t field;
  186. field.tap = tap;
  187. field.num_bits = tap->ir_length;
  188. field.out_value = calloc(CEIL(field.num_bits, 8), 1);
  189. buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
  190. field.out_mask = NULL;
  191. field.in_value = NULL;
  192. jtag_set_check_value(&field, tap->expected, tap->expected_mask, NULL);
  193. jtag_add_ir_scan(1, &field, -1);
  194. free(field.out_value);
  195. }
  196. return ERROR_OK;
  197. }
  198. int xscale_read_dcsr(target_t *target)
  199. {
  200. armv4_5_common_t *armv4_5 = target->arch_info;
  201. xscale_common_t *xscale = armv4_5->arch_info;
  202. int retval;
  203. scan_field_t fields[3];
  204. u8 field0 = 0x0;
  205. u8 field0_check_value = 0x2;
  206. u8 field0_check_mask = 0x7;
  207. u8 field2 = 0x0;
  208. u8 field2_check_value = 0x0;
  209. u8 field2_check_mask = 0x1;
  210. jtag_add_end_state(TAP_DRPAUSE);
  211. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  212. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  213. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  214. fields[0].tap = xscale->jtag_info.tap;
  215. fields[0].num_bits = 3;
  216. fields[0].out_value = &field0;
  217. fields[0].out_mask = NULL;
  218. fields[0].in_value = NULL;
  219. jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
  220. fields[1].tap = xscale->jtag_info.tap;
  221. fields[1].num_bits = 32;
  222. fields[1].out_value = NULL;
  223. fields[1].out_mask = NULL;
  224. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  225. fields[1].in_handler = NULL;
  226. fields[1].in_handler_priv = NULL;
  227. fields[1].in_check_value = NULL;
  228. fields[1].in_check_mask = NULL;
  229. fields[2].tap = xscale->jtag_info.tap;
  230. fields[2].num_bits = 1;
  231. fields[2].out_value = &field2;
  232. fields[2].out_mask = NULL;
  233. fields[2].in_value = NULL;
  234. jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
  235. jtag_add_dr_scan(3, fields, -1);
  236. if ((retval = jtag_execute_queue()) != ERROR_OK)
  237. {
  238. LOG_ERROR("JTAG error while reading DCSR");
  239. return retval;
  240. }
  241. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  242. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  243. /* write the register with the value we just read
  244. * on this second pass, only the first bit of field0 is guaranteed to be 0)
  245. */
  246. field0_check_mask = 0x1;
  247. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  248. fields[1].in_value = NULL;
  249. jtag_add_end_state(TAP_IDLE);
  250. jtag_add_dr_scan(3, fields, -1);
  251. /* DANGER!!! this must be here. It will make sure that the arguments
  252. * to jtag_set_check_value() does not go out of scope! */
  253. return jtag_execute_queue();
  254. }
  255. int xscale_receive(target_t *target, u32 *buffer, int num_words)
  256. {
  257. if (num_words==0)
  258. return ERROR_INVALID_ARGUMENTS;
  259. int retval=ERROR_OK;
  260. armv4_5_common_t *armv4_5 = target->arch_info;
  261. xscale_common_t *xscale = armv4_5->arch_info;
  262. tap_state_t path[3];
  263. scan_field_t fields[3];
  264. u8 *field0 = malloc(num_words * 1);
  265. u8 field0_check_value = 0x2;
  266. u8 field0_check_mask = 0x6;
  267. u32 *field1 = malloc(num_words * 4);
  268. u8 field2_check_value = 0x0;
  269. u8 field2_check_mask = 0x1;
  270. int words_done = 0;
  271. int words_scheduled = 0;
  272. int i;
  273. path[0] = TAP_DRSELECT;
  274. path[1] = TAP_DRCAPTURE;
  275. path[2] = TAP_DRSHIFT;
  276. fields[0].tap = xscale->jtag_info.tap;
  277. fields[0].num_bits = 3;
  278. fields[0].out_value = NULL;
  279. fields[0].out_mask = NULL;
  280. fields[0].in_value = NULL;
  281. jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
  282. fields[1].tap = xscale->jtag_info.tap;
  283. fields[1].num_bits = 32;
  284. fields[1].out_value = NULL;
  285. fields[1].out_mask = NULL;
  286. fields[1].in_value = NULL;
  287. fields[1].in_handler = NULL;
  288. fields[1].in_handler_priv = NULL;
  289. fields[1].in_check_value = NULL;
  290. fields[1].in_check_mask = NULL;
  291. fields[2].tap = xscale->jtag_info.tap;
  292. fields[2].num_bits = 1;
  293. fields[2].out_value = NULL;
  294. fields[2].out_mask = NULL;
  295. fields[2].in_value = NULL;
  296. jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
  297. jtag_add_end_state(TAP_IDLE);
  298. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  299. jtag_add_runtest(1, -1); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
  300. /* repeat until all words have been collected */
  301. int attempts=0;
  302. while (words_done < num_words)
  303. {
  304. /* schedule reads */
  305. words_scheduled = 0;
  306. for (i = words_done; i < num_words; i++)
  307. {
  308. fields[0].in_value = &field0[i];
  309. fields[1].in_handler = buf_to_u32_handler;
  310. fields[1].in_handler_priv = (u8*)&field1[i];
  311. jtag_add_pathmove(3, path);
  312. jtag_add_dr_scan(3, fields, TAP_IDLE);
  313. words_scheduled++;
  314. }
  315. if ((retval = jtag_execute_queue()) != ERROR_OK)
  316. {
  317. LOG_ERROR("JTAG error while receiving data from debug handler");
  318. break;
  319. }
  320. /* examine results */
  321. for (i = words_done; i < num_words; i++)
  322. {
  323. if (!(field0[0] & 1))
  324. {
  325. /* move backwards if necessary */
  326. int j;
  327. for (j = i; j < num_words - 1; j++)
  328. {
  329. field0[j] = field0[j+1];
  330. field1[j] = field1[j+1];
  331. }
  332. words_scheduled--;
  333. }
  334. }
  335. if (words_scheduled==0)
  336. {
  337. if (attempts++==1000)
  338. {
  339. LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
  340. retval=ERROR_TARGET_TIMEOUT;
  341. break;
  342. }
  343. }
  344. words_done += words_scheduled;
  345. }
  346. for (i = 0; i < num_words; i++)
  347. *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
  348. free(field1);
  349. return retval;
  350. }
  351. int xscale_read_tx(target_t *target, int consume)
  352. {
  353. armv4_5_common_t *armv4_5 = target->arch_info;
  354. xscale_common_t *xscale = armv4_5->arch_info;
  355. tap_state_t path[3];
  356. tap_state_t noconsume_path[6];
  357. int retval;
  358. struct timeval timeout, now;
  359. scan_field_t fields[3];
  360. u8 field0_in = 0x0;
  361. u8 field0_check_value = 0x2;
  362. u8 field0_check_mask = 0x6;
  363. u8 field2_check_value = 0x0;
  364. u8 field2_check_mask = 0x1;
  365. jtag_add_end_state(TAP_IDLE);
  366. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgtx);
  367. path[0] = TAP_DRSELECT;
  368. path[1] = TAP_DRCAPTURE;
  369. path[2] = TAP_DRSHIFT;
  370. noconsume_path[0] = TAP_DRSELECT;
  371. noconsume_path[1] = TAP_DRCAPTURE;
  372. noconsume_path[2] = TAP_DREXIT1;
  373. noconsume_path[3] = TAP_DRPAUSE;
  374. noconsume_path[4] = TAP_DREXIT2;
  375. noconsume_path[5] = TAP_DRSHIFT;
  376. fields[0].tap = xscale->jtag_info.tap;
  377. fields[0].num_bits = 3;
  378. fields[0].out_value = NULL;
  379. fields[0].out_mask = NULL;
  380. fields[0].in_value = &field0_in;
  381. jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
  382. fields[1].tap = xscale->jtag_info.tap;
  383. fields[1].num_bits = 32;
  384. fields[1].out_value = NULL;
  385. fields[1].out_mask = NULL;
  386. fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
  387. fields[1].in_handler = NULL;
  388. fields[1].in_handler_priv = NULL;
  389. fields[1].in_check_value = NULL;
  390. fields[1].in_check_mask = NULL;
  391. fields[2].tap = xscale->jtag_info.tap;
  392. fields[2].num_bits = 1;
  393. fields[2].out_value = NULL;
  394. fields[2].out_mask = NULL;
  395. fields[2].in_value = NULL;
  396. jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
  397. gettimeofday(&timeout, NULL);
  398. timeval_add_time(&timeout, 1, 0);
  399. for (;;)
  400. {
  401. /* if we want to consume the register content (i.e. clear TX_READY),
  402. * we have to go straight from Capture-DR to Shift-DR
  403. * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
  404. */
  405. if (consume)
  406. jtag_add_pathmove(3, path);
  407. else
  408. {
  409. jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
  410. }
  411. jtag_add_dr_scan(3, fields, TAP_IDLE);
  412. if ((retval = jtag_execute_queue()) != ERROR_OK)
  413. {
  414. LOG_ERROR("JTAG error while reading TX");
  415. return ERROR_TARGET_TIMEOUT;
  416. }
  417. gettimeofday(&now, NULL);
  418. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  419. {
  420. LOG_ERROR("time out reading TX register");
  421. return ERROR_TARGET_TIMEOUT;
  422. }
  423. if (!((!(field0_in & 1)) && consume))
  424. {
  425. goto done;
  426. }
  427. if (debug_level>=3)
  428. {
  429. LOG_DEBUG("waiting 100ms");
  430. alive_sleep(100); /* avoid flooding the logs */
  431. } else
  432. {
  433. keep_alive();
  434. }
  435. }
  436. done:
  437. if (!(field0_in & 1))
  438. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  439. return ERROR_OK;
  440. }
  441. int xscale_write_rx(target_t *target)
  442. {
  443. armv4_5_common_t *armv4_5 = target->arch_info;
  444. xscale_common_t *xscale = armv4_5->arch_info;
  445. int retval;
  446. struct timeval timeout, now;
  447. scan_field_t fields[3];
  448. u8 field0_out = 0x0;
  449. u8 field0_in = 0x0;
  450. u8 field0_check_value = 0x2;
  451. u8 field0_check_mask = 0x6;
  452. u8 field2 = 0x0;
  453. u8 field2_check_value = 0x0;
  454. u8 field2_check_mask = 0x1;
  455. jtag_add_end_state(TAP_IDLE);
  456. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  457. fields[0].tap = xscale->jtag_info.tap;
  458. fields[0].num_bits = 3;
  459. fields[0].out_value = &field0_out;
  460. fields[0].out_mask = NULL;
  461. fields[0].in_value = &field0_in;
  462. jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
  463. fields[1].tap = xscale->jtag_info.tap;
  464. fields[1].num_bits = 32;
  465. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
  466. fields[1].out_mask = NULL;
  467. fields[1].in_value = NULL;
  468. fields[1].in_handler = NULL;
  469. fields[1].in_handler_priv = NULL;
  470. fields[1].in_check_value = NULL;
  471. fields[1].in_check_mask = NULL;
  472. fields[2].tap = xscale->jtag_info.tap;
  473. fields[2].num_bits = 1;
  474. fields[2].out_value = &field2;
  475. fields[2].out_mask = NULL;
  476. fields[2].in_value = NULL;
  477. jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
  478. gettimeofday(&timeout, NULL);
  479. timeval_add_time(&timeout, 1, 0);
  480. /* poll until rx_read is low */
  481. LOG_DEBUG("polling RX");
  482. for (;;)
  483. {
  484. jtag_add_dr_scan(3, fields, TAP_IDLE);
  485. if ((retval = jtag_execute_queue()) != ERROR_OK)
  486. {
  487. LOG_ERROR("JTAG error while writing RX");
  488. return retval;
  489. }
  490. gettimeofday(&now, NULL);
  491. if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
  492. {
  493. LOG_ERROR("time out writing RX register");
  494. return ERROR_TARGET_TIMEOUT;
  495. }
  496. if (!(field0_in & 1))
  497. goto done;
  498. if (debug_level>=3)
  499. {
  500. LOG_DEBUG("waiting 100ms");
  501. alive_sleep(100); /* avoid flooding the logs */
  502. } else
  503. {
  504. keep_alive();
  505. }
  506. }
  507. done:
  508. /* set rx_valid */
  509. field2 = 0x1;
  510. jtag_add_dr_scan(3, fields, TAP_IDLE);
  511. if ((retval = jtag_execute_queue()) != ERROR_OK)
  512. {
  513. LOG_ERROR("JTAG error while writing RX");
  514. return retval;
  515. }
  516. return ERROR_OK;
  517. }
  518. /* send count elements of size byte to the debug handler */
  519. int xscale_send(target_t *target, u8 *buffer, int count, int size)
  520. {
  521. armv4_5_common_t *armv4_5 = target->arch_info;
  522. xscale_common_t *xscale = armv4_5->arch_info;
  523. u32 t[3];
  524. int bits[3];
  525. int retval;
  526. int done_count = 0;
  527. jtag_add_end_state(TAP_IDLE);
  528. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dbgrx);
  529. bits[0]=3;
  530. t[0]=0;
  531. bits[1]=32;
  532. t[2]=1;
  533. bits[2]=1;
  534. int endianness = target->endianness;
  535. while (done_count++ < count)
  536. {
  537. switch (size)
  538. {
  539. case 4:
  540. if (endianness == TARGET_LITTLE_ENDIAN)
  541. {
  542. t[1]=le_to_h_u32(buffer);
  543. } else
  544. {
  545. t[1]=be_to_h_u32(buffer);
  546. }
  547. break;
  548. case 2:
  549. if (endianness == TARGET_LITTLE_ENDIAN)
  550. {
  551. t[1]=le_to_h_u16(buffer);
  552. } else
  553. {
  554. t[1]=be_to_h_u16(buffer);
  555. }
  556. break;
  557. case 1:
  558. t[1]=buffer[0];
  559. break;
  560. default:
  561. LOG_ERROR("BUG: size neither 4, 2 nor 1");
  562. exit(-1);
  563. }
  564. jtag_add_dr_out(xscale->jtag_info.tap,
  565. 3,
  566. bits,
  567. t,
  568. TAP_IDLE);
  569. buffer += size;
  570. }
  571. if ((retval = jtag_execute_queue()) != ERROR_OK)
  572. {
  573. LOG_ERROR("JTAG error while sending data to debug handler");
  574. return retval;
  575. }
  576. return ERROR_OK;
  577. }
  578. int xscale_send_u32(target_t *target, u32 value)
  579. {
  580. armv4_5_common_t *armv4_5 = target->arch_info;
  581. xscale_common_t *xscale = armv4_5->arch_info;
  582. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  583. return xscale_write_rx(target);
  584. }
  585. int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
  586. {
  587. armv4_5_common_t *armv4_5 = target->arch_info;
  588. xscale_common_t *xscale = armv4_5->arch_info;
  589. int retval;
  590. scan_field_t fields[3];
  591. u8 field0 = 0x0;
  592. u8 field0_check_value = 0x2;
  593. u8 field0_check_mask = 0x7;
  594. u8 field2 = 0x0;
  595. u8 field2_check_value = 0x0;
  596. u8 field2_check_mask = 0x1;
  597. if (hold_rst != -1)
  598. xscale->hold_rst = hold_rst;
  599. if (ext_dbg_brk != -1)
  600. xscale->external_debug_break = ext_dbg_brk;
  601. jtag_add_end_state(TAP_IDLE);
  602. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  603. buf_set_u32(&field0, 1, 1, xscale->hold_rst);
  604. buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
  605. fields[0].tap = xscale->jtag_info.tap;
  606. fields[0].num_bits = 3;
  607. fields[0].out_value = &field0;
  608. fields[0].out_mask = NULL;
  609. fields[0].in_value = NULL;
  610. jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
  611. fields[1].tap = xscale->jtag_info.tap;
  612. fields[1].num_bits = 32;
  613. fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
  614. fields[1].out_mask = NULL;
  615. fields[1].in_value = NULL;
  616. fields[1].in_handler = NULL;
  617. fields[1].in_handler_priv = NULL;
  618. fields[1].in_check_value = NULL;
  619. fields[1].in_check_mask = NULL;
  620. fields[2].tap = xscale->jtag_info.tap;
  621. fields[2].num_bits = 1;
  622. fields[2].out_value = &field2;
  623. fields[2].out_mask = NULL;
  624. fields[2].in_value = NULL;
  625. jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
  626. jtag_add_dr_scan(3, fields, -1);
  627. if ((retval = jtag_execute_queue()) != ERROR_OK)
  628. {
  629. LOG_ERROR("JTAG error while writing DCSR");
  630. return retval;
  631. }
  632. xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
  633. xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
  634. return ERROR_OK;
  635. }
  636. /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
  637. unsigned int parity (unsigned int v)
  638. {
  639. unsigned int ov = v;
  640. v ^= v >> 16;
  641. v ^= v >> 8;
  642. v ^= v >> 4;
  643. v &= 0xf;
  644. LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
  645. return (0x6996 >> v) & 1;
  646. }
  647. int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
  648. {
  649. armv4_5_common_t *armv4_5 = target->arch_info;
  650. xscale_common_t *xscale = armv4_5->arch_info;
  651. u8 packet[4];
  652. u8 cmd;
  653. int word;
  654. scan_field_t fields[2];
  655. LOG_DEBUG("loading miniIC at 0x%8.8x", va);
  656. jtag_add_end_state(TAP_IDLE);
  657. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
  658. /* CMD is b010 for Main IC and b011 for Mini IC */
  659. if (mini)
  660. buf_set_u32(&cmd, 0, 3, 0x3);
  661. else
  662. buf_set_u32(&cmd, 0, 3, 0x2);
  663. buf_set_u32(&cmd, 3, 3, 0x0);
  664. /* virtual address of desired cache line */
  665. buf_set_u32(packet, 0, 27, va >> 5);
  666. fields[0].tap = xscale->jtag_info.tap;
  667. fields[0].num_bits = 6;
  668. fields[0].out_value = &cmd;
  669. fields[0].out_mask = NULL;
  670. fields[0].in_value = NULL;
  671. fields[0].in_check_value = NULL;
  672. fields[0].in_check_mask = NULL;
  673. fields[0].in_handler = NULL;
  674. fields[0].in_handler_priv = NULL;
  675. fields[1].tap = xscale->jtag_info.tap;
  676. fields[1].num_bits = 27;
  677. fields[1].out_value = packet;
  678. fields[1].out_mask = NULL;
  679. fields[1].in_value = NULL;
  680. fields[1].in_check_value = NULL;
  681. fields[1].in_check_mask = NULL;
  682. fields[1].in_handler = NULL;
  683. fields[1].in_handler_priv = NULL;
  684. jtag_add_dr_scan(2, fields, -1);
  685. fields[0].num_bits = 32;
  686. fields[0].out_value = packet;
  687. fields[1].num_bits = 1;
  688. fields[1].out_value = &cmd;
  689. for (word = 0; word < 8; word++)
  690. {
  691. buf_set_u32(packet, 0, 32, buffer[word]);
  692. cmd = parity(*((u32*)packet));
  693. jtag_add_dr_scan(2, fields, -1);
  694. }
  695. jtag_execute_queue();
  696. return ERROR_OK;
  697. }
  698. int xscale_invalidate_ic_line(target_t *target, u32 va)
  699. {
  700. armv4_5_common_t *armv4_5 = target->arch_info;
  701. xscale_common_t *xscale = armv4_5->arch_info;
  702. u8 packet[4];
  703. u8 cmd;
  704. scan_field_t fields[2];
  705. jtag_add_end_state(TAP_IDLE);
  706. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.ldic); /* LDIC */
  707. /* CMD for invalidate IC line b000, bits [6:4] b000 */
  708. buf_set_u32(&cmd, 0, 6, 0x0);
  709. /* virtual address of desired cache line */
  710. buf_set_u32(packet, 0, 27, va >> 5);
  711. fields[0].tap = xscale->jtag_info.tap;
  712. fields[0].num_bits = 6;
  713. fields[0].out_value = &cmd;
  714. fields[0].out_mask = NULL;
  715. fields[0].in_value = NULL;
  716. fields[0].in_check_value = NULL;
  717. fields[0].in_check_mask = NULL;
  718. fields[0].in_handler = NULL;
  719. fields[0].in_handler_priv = NULL;
  720. fields[1].tap = xscale->jtag_info.tap;
  721. fields[1].num_bits = 27;
  722. fields[1].out_value = packet;
  723. fields[1].out_mask = NULL;
  724. fields[1].in_value = NULL;
  725. fields[1].in_check_value = NULL;
  726. fields[1].in_check_mask = NULL;
  727. fields[1].in_handler = NULL;
  728. fields[1].in_handler_priv = NULL;
  729. jtag_add_dr_scan(2, fields, -1);
  730. return ERROR_OK;
  731. }
  732. int xscale_update_vectors(target_t *target)
  733. {
  734. armv4_5_common_t *armv4_5 = target->arch_info;
  735. xscale_common_t *xscale = armv4_5->arch_info;
  736. int i;
  737. int retval;
  738. u32 low_reset_branch, high_reset_branch;
  739. for (i = 1; i < 8; i++)
  740. {
  741. /* if there's a static vector specified for this exception, override */
  742. if (xscale->static_high_vectors_set & (1 << i))
  743. {
  744. xscale->high_vectors[i] = xscale->static_high_vectors[i];
  745. }
  746. else
  747. {
  748. retval=target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
  749. if (retval == ERROR_TARGET_TIMEOUT)
  750. return retval;
  751. if (retval!=ERROR_OK)
  752. {
  753. /* Some of these reads will fail as part of normal execution */
  754. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  755. }
  756. }
  757. }
  758. for (i = 1; i < 8; i++)
  759. {
  760. if (xscale->static_low_vectors_set & (1 << i))
  761. {
  762. xscale->low_vectors[i] = xscale->static_low_vectors[i];
  763. }
  764. else
  765. {
  766. retval=target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
  767. if (retval == ERROR_TARGET_TIMEOUT)
  768. return retval;
  769. if (retval!=ERROR_OK)
  770. {
  771. /* Some of these reads will fail as part of normal execution */
  772. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  773. }
  774. }
  775. }
  776. /* calculate branches to debug handler */
  777. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  778. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  779. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  780. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  781. /* invalidate and load exception vectors in mini i-cache */
  782. xscale_invalidate_ic_line(target, 0x0);
  783. xscale_invalidate_ic_line(target, 0xffff0000);
  784. xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
  785. xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
  786. return ERROR_OK;
  787. }
  788. int xscale_arch_state(struct target_s *target)
  789. {
  790. armv4_5_common_t *armv4_5 = target->arch_info;
  791. xscale_common_t *xscale = armv4_5->arch_info;
  792. char *state[] =
  793. {
  794. "disabled", "enabled"
  795. };
  796. char *arch_dbg_reason[] =
  797. {
  798. "", "\n(processor reset)", "\n(trace buffer full)"
  799. };
  800. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  801. {
  802. LOG_ERROR("BUG: called for a non-ARMv4/5 target");
  803. exit(-1);
  804. }
  805. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  806. "cpsr: 0x%8.8x pc: 0x%8.8x\n"
  807. "MMU: %s, D-Cache: %s, I-Cache: %s"
  808. "%s",
  809. armv4_5_state_strings[armv4_5->core_state],
  810. Jim_Nvp_value2name_simple( nvp_target_debug_reason, target->debug_reason )->name ,
  811. armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
  812. buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
  813. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
  814. state[xscale->armv4_5_mmu.mmu_enabled],
  815. state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
  816. state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
  817. arch_dbg_reason[xscale->arch_debug_reason]);
  818. return ERROR_OK;
  819. }
  820. int xscale_poll(target_t *target)
  821. {
  822. int retval=ERROR_OK;
  823. armv4_5_common_t *armv4_5 = target->arch_info;
  824. xscale_common_t *xscale = armv4_5->arch_info;
  825. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
  826. {
  827. enum target_state previous_state = target->state;
  828. if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
  829. {
  830. /* there's data to read from the tx register, we entered debug state */
  831. xscale->handler_running = 1;
  832. target->state = TARGET_HALTED;
  833. /* process debug entry, fetching current mode regs */
  834. retval = xscale_debug_entry(target);
  835. }
  836. else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  837. {
  838. LOG_USER("error while polling TX register, reset CPU");
  839. /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
  840. target->state = TARGET_HALTED;
  841. }
  842. /* debug_entry could have overwritten target state (i.e. immediate resume)
  843. * don't signal event handlers in that case
  844. */
  845. if (target->state != TARGET_HALTED)
  846. return ERROR_OK;
  847. /* if target was running, signal that we halted
  848. * otherwise we reentered from debug execution */
  849. if (previous_state == TARGET_RUNNING)
  850. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  851. else
  852. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  853. }
  854. return retval;
  855. }
  856. int xscale_debug_entry(target_t *target)
  857. {
  858. armv4_5_common_t *armv4_5 = target->arch_info;
  859. xscale_common_t *xscale = armv4_5->arch_info;
  860. u32 pc;
  861. u32 buffer[10];
  862. int i;
  863. int retval;
  864. u32 moe;
  865. /* clear external dbg break (will be written on next DCSR read) */
  866. xscale->external_debug_break = 0;
  867. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  868. return retval;
  869. /* get r0, pc, r1 to r7 and cpsr */
  870. if ((retval=xscale_receive(target, buffer, 10))!=ERROR_OK)
  871. return retval;
  872. /* move r0 from buffer to register cache */
  873. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
  874. armv4_5->core_cache->reg_list[15].dirty = 1;
  875. armv4_5->core_cache->reg_list[15].valid = 1;
  876. LOG_DEBUG("r0: 0x%8.8x", buffer[0]);
  877. /* move pc from buffer to register cache */
  878. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
  879. armv4_5->core_cache->reg_list[15].dirty = 1;
  880. armv4_5->core_cache->reg_list[15].valid = 1;
  881. LOG_DEBUG("pc: 0x%8.8x", buffer[1]);
  882. /* move data from buffer to register cache */
  883. for (i = 1; i <= 7; i++)
  884. {
  885. buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
  886. armv4_5->core_cache->reg_list[i].dirty = 1;
  887. armv4_5->core_cache->reg_list[i].valid = 1;
  888. LOG_DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
  889. }
  890. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
  891. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  892. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  893. LOG_DEBUG("cpsr: 0x%8.8x", buffer[9]);
  894. armv4_5->core_mode = buffer[9] & 0x1f;
  895. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  896. {
  897. target->state = TARGET_UNKNOWN;
  898. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  899. return ERROR_TARGET_FAILURE;
  900. }
  901. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  902. if (buffer[9] & 0x20)
  903. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  904. else
  905. armv4_5->core_state = ARMV4_5_STATE_ARM;
  906. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  907. return ERROR_FAIL;
  908. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  909. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  910. {
  911. xscale_receive(target, buffer, 8);
  912. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  913. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  914. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  915. }
  916. else
  917. {
  918. /* r8 to r14, but no spsr */
  919. xscale_receive(target, buffer, 7);
  920. }
  921. /* move data from buffer to register cache */
  922. for (i = 8; i <= 14; i++)
  923. {
  924. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
  925. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  926. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  927. }
  928. /* examine debug reason */
  929. xscale_read_dcsr(target);
  930. moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
  931. /* stored PC (for calculating fixup) */
  932. pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  933. switch (moe)
  934. {
  935. case 0x0: /* Processor reset */
  936. target->debug_reason = DBG_REASON_DBGRQ;
  937. xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
  938. pc -= 4;
  939. break;
  940. case 0x1: /* Instruction breakpoint hit */
  941. target->debug_reason = DBG_REASON_BREAKPOINT;
  942. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  943. pc -= 4;
  944. break;
  945. case 0x2: /* Data breakpoint hit */
  946. target->debug_reason = DBG_REASON_WATCHPOINT;
  947. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  948. pc -= 4;
  949. break;
  950. case 0x3: /* BKPT instruction executed */
  951. target->debug_reason = DBG_REASON_BREAKPOINT;
  952. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  953. pc -= 4;
  954. break;
  955. case 0x4: /* Ext. debug event */
  956. target->debug_reason = DBG_REASON_DBGRQ;
  957. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  958. pc -= 4;
  959. break;
  960. case 0x5: /* Vector trap occured */
  961. target->debug_reason = DBG_REASON_BREAKPOINT;
  962. xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
  963. pc -= 4;
  964. break;
  965. case 0x6: /* Trace buffer full break */
  966. target->debug_reason = DBG_REASON_DBGRQ;
  967. xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
  968. pc -= 4;
  969. break;
  970. case 0x7: /* Reserved */
  971. default:
  972. LOG_ERROR("Method of Entry is 'Reserved'");
  973. exit(-1);
  974. break;
  975. }
  976. /* apply PC fixup */
  977. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  978. /* on the first debug entry, identify cache type */
  979. if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
  980. {
  981. u32 cache_type_reg;
  982. /* read cp15 cache type register */
  983. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
  984. cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
  985. armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
  986. }
  987. /* examine MMU and Cache settings */
  988. /* read cp15 control register */
  989. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  990. xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  991. xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
  992. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
  993. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
  994. /* tracing enabled, read collected trace data */
  995. if (xscale->trace.buffer_enabled)
  996. {
  997. xscale_read_trace(target);
  998. xscale->trace.buffer_fill--;
  999. /* resume if we're still collecting trace data */
  1000. if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
  1001. && (xscale->trace.buffer_fill > 0))
  1002. {
  1003. xscale_resume(target, 1, 0x0, 1, 0);
  1004. }
  1005. else
  1006. {
  1007. xscale->trace.buffer_enabled = 0;
  1008. }
  1009. }
  1010. return ERROR_OK;
  1011. }
  1012. int xscale_halt(target_t *target)
  1013. {
  1014. armv4_5_common_t *armv4_5 = target->arch_info;
  1015. xscale_common_t *xscale = armv4_5->arch_info;
  1016. LOG_DEBUG("target->state: %s",
  1017. Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
  1018. if (target->state == TARGET_HALTED)
  1019. {
  1020. LOG_DEBUG("target was already halted");
  1021. return ERROR_OK;
  1022. }
  1023. else if (target->state == TARGET_UNKNOWN)
  1024. {
  1025. /* this must not happen for a xscale target */
  1026. LOG_ERROR("target was in unknown state when halt was requested");
  1027. return ERROR_TARGET_INVALID;
  1028. }
  1029. else if (target->state == TARGET_RESET)
  1030. {
  1031. LOG_DEBUG("target->state == TARGET_RESET");
  1032. }
  1033. else
  1034. {
  1035. /* assert external dbg break */
  1036. xscale->external_debug_break = 1;
  1037. xscale_read_dcsr(target);
  1038. target->debug_reason = DBG_REASON_DBGRQ;
  1039. }
  1040. return ERROR_OK;
  1041. }
  1042. int xscale_enable_single_step(struct target_s *target, u32 next_pc)
  1043. {
  1044. armv4_5_common_t *armv4_5 = target->arch_info;
  1045. xscale_common_t *xscale= armv4_5->arch_info;
  1046. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  1047. int retval;
  1048. if (xscale->ibcr0_used)
  1049. {
  1050. breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
  1051. if (ibcr0_bp)
  1052. {
  1053. xscale_unset_breakpoint(target, ibcr0_bp);
  1054. }
  1055. else
  1056. {
  1057. LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
  1058. exit(-1);
  1059. }
  1060. }
  1061. if ((retval=xscale_set_reg_u32(ibcr0, next_pc | 0x1))!=ERROR_OK)
  1062. return retval;
  1063. return ERROR_OK;
  1064. }
  1065. int xscale_disable_single_step(struct target_s *target)
  1066. {
  1067. armv4_5_common_t *armv4_5 = target->arch_info;
  1068. xscale_common_t *xscale= armv4_5->arch_info;
  1069. reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
  1070. int retval;
  1071. if ((retval=xscale_set_reg_u32(ibcr0, 0x0))!=ERROR_OK)
  1072. return retval;
  1073. return ERROR_OK;
  1074. }
  1075. int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
  1076. {
  1077. armv4_5_common_t *armv4_5 = target->arch_info;
  1078. xscale_common_t *xscale= armv4_5->arch_info;
  1079. breakpoint_t *breakpoint = target->breakpoints;
  1080. u32 current_pc;
  1081. int retval;
  1082. int i;
  1083. LOG_DEBUG("-");
  1084. if (target->state != TARGET_HALTED)
  1085. {
  1086. LOG_WARNING("target not halted");
  1087. return ERROR_TARGET_NOT_HALTED;
  1088. }
  1089. if (!debug_execution)
  1090. {
  1091. target_free_all_working_areas(target);
  1092. }
  1093. /* update vector tables */
  1094. if ((retval=xscale_update_vectors(target))!=ERROR_OK)
  1095. return retval;
  1096. /* current = 1: continue on current pc, otherwise continue at <address> */
  1097. if (!current)
  1098. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1099. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1100. /* if we're at the reset vector, we have to simulate the branch */
  1101. if (current_pc == 0x0)
  1102. {
  1103. arm_simulate_step(target, NULL);
  1104. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1105. }
  1106. /* the front-end may request us not to handle breakpoints */
  1107. if (handle_breakpoints)
  1108. {
  1109. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1110. {
  1111. u32 next_pc;
  1112. /* there's a breakpoint at the current PC, we have to step over it */
  1113. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  1114. xscale_unset_breakpoint(target, breakpoint);
  1115. /* calculate PC of next instruction */
  1116. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1117. {
  1118. u32 current_opcode;
  1119. target_read_u32(target, current_pc, &current_opcode);
  1120. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
  1121. }
  1122. LOG_DEBUG("enable single-step");
  1123. xscale_enable_single_step(target, next_pc);
  1124. /* restore banked registers */
  1125. xscale_restore_context(target);
  1126. /* send resume request (command 0x30 or 0x31)
  1127. * clean the trace buffer if it is to be enabled (0x62) */
  1128. if (xscale->trace.buffer_enabled)
  1129. {
  1130. xscale_send_u32(target, 0x62);
  1131. xscale_send_u32(target, 0x31);
  1132. }
  1133. else
  1134. xscale_send_u32(target, 0x30);
  1135. /* send CPSR */
  1136. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1137. LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1138. for (i = 7; i >= 0; i--)
  1139. {
  1140. /* send register */
  1141. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1142. LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1143. }
  1144. /* send PC */
  1145. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1146. LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1147. /* wait for and process debug entry */
  1148. xscale_debug_entry(target);
  1149. LOG_DEBUG("disable single-step");
  1150. xscale_disable_single_step(target);
  1151. LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
  1152. xscale_set_breakpoint(target, breakpoint);
  1153. }
  1154. }
  1155. /* enable any pending breakpoints and watchpoints */
  1156. xscale_enable_breakpoints(target);
  1157. xscale_enable_watchpoints(target);
  1158. /* restore banked registers */
  1159. xscale_restore_context(target);
  1160. /* send resume request (command 0x30 or 0x31)
  1161. * clean the trace buffer if it is to be enabled (0x62) */
  1162. if (xscale->trace.buffer_enabled)
  1163. {
  1164. xscale_send_u32(target, 0x62);
  1165. xscale_send_u32(target, 0x31);
  1166. }
  1167. else
  1168. xscale_send_u32(target, 0x30);
  1169. /* send CPSR */
  1170. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1171. LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1172. for (i = 7; i >= 0; i--)
  1173. {
  1174. /* send register */
  1175. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1176. LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1177. }
  1178. /* send PC */
  1179. xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1180. LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1181. target->debug_reason = DBG_REASON_NOTHALTED;
  1182. if (!debug_execution)
  1183. {
  1184. /* registers are now invalid */
  1185. armv4_5_invalidate_core_regs(target);
  1186. target->state = TARGET_RUNNING;
  1187. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1188. }
  1189. else
  1190. {
  1191. target->state = TARGET_DEBUG_RUNNING;
  1192. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1193. }
  1194. LOG_DEBUG("target resumed");
  1195. xscale->handler_running = 1;
  1196. return ERROR_OK;
  1197. }
  1198. static int xscale_step_inner(struct target_s *target, int current, u32 address, int handle_breakpoints)
  1199. {
  1200. armv4_5_common_t *armv4_5 = target->arch_info;
  1201. xscale_common_t *xscale = armv4_5->arch_info;
  1202. u32 current_pc, next_pc;
  1203. int retval;
  1204. int i;
  1205. target->debug_reason = DBG_REASON_SINGLESTEP;
  1206. /* calculate PC of next instruction */
  1207. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1208. {
  1209. u32 current_opcode;
  1210. target_read_u32(target, current_pc, &current_opcode);
  1211. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
  1212. return retval;
  1213. }
  1214. LOG_DEBUG("enable single-step");
  1215. if ((retval=xscale_enable_single_step(target, next_pc))!=ERROR_OK)
  1216. return retval;
  1217. /* restore banked registers */
  1218. if ((retval=xscale_restore_context(target))!=ERROR_OK)
  1219. return retval;
  1220. /* send resume request (command 0x30 or 0x31)
  1221. * clean the trace buffer if it is to be enabled (0x62) */
  1222. if (xscale->trace.buffer_enabled)
  1223. {
  1224. if ((retval=xscale_send_u32(target, 0x62))!=ERROR_OK)
  1225. return retval;
  1226. if ((retval=xscale_send_u32(target, 0x31))!=ERROR_OK)
  1227. return retval;
  1228. }
  1229. else
  1230. if ((retval=xscale_send_u32(target, 0x30))!=ERROR_OK)
  1231. return retval;
  1232. /* send CPSR */
  1233. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32)))!=ERROR_OK)
  1234. return retval;
  1235. LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1236. for (i = 7; i >= 0; i--)
  1237. {
  1238. /* send register */
  1239. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32)))!=ERROR_OK)
  1240. return retval;
  1241. LOG_DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
  1242. }
  1243. /* send PC */
  1244. if ((retval=xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32)))!=ERROR_OK)
  1245. return retval;
  1246. LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1247. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1248. /* registers are now invalid */
  1249. if ((retval=armv4_5_invalidate_core_regs(target))!=ERROR_OK)
  1250. return retval;
  1251. /* wait for and process debug entry */
  1252. if ((retval=xscale_debug_entry(target))!=ERROR_OK)
  1253. return retval;
  1254. LOG_DEBUG("disable single-step");
  1255. if ((retval=xscale_disable_single_step(target))!=ERROR_OK)
  1256. return retval;
  1257. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1258. return ERROR_OK;
  1259. }
  1260. int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
  1261. {
  1262. armv4_5_common_t *armv4_5 = target->arch_info;
  1263. breakpoint_t *breakpoint = target->breakpoints;
  1264. u32 current_pc;
  1265. int retval;
  1266. if (target->state != TARGET_HALTED)
  1267. {
  1268. LOG_WARNING("target not halted");
  1269. return ERROR_TARGET_NOT_HALTED;
  1270. }
  1271. /* current = 1: continue on current pc, otherwise continue at <address> */
  1272. if (!current)
  1273. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1274. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1275. /* if we're at the reset vector, we have to simulate the step */
  1276. if (current_pc == 0x0)
  1277. {
  1278. if ((retval=arm_simulate_step(target, NULL))!=ERROR_OK)
  1279. return retval;
  1280. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1281. target->debug_reason = DBG_REASON_SINGLESTEP;
  1282. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1283. return ERROR_OK;
  1284. }
  1285. /* the front-end may request us not to handle breakpoints */
  1286. if (handle_breakpoints)
  1287. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1288. {
  1289. if ((retval=xscale_unset_breakpoint(target, breakpoint))!=ERROR_OK)
  1290. return retval;
  1291. }
  1292. retval = xscale_step_inner(target, current, address, handle_breakpoints);
  1293. if (breakpoint)
  1294. {
  1295. xscale_set_breakpoint(target, breakpoint);
  1296. }
  1297. LOG_DEBUG("target stepped");
  1298. return ERROR_OK;
  1299. }
  1300. int xscale_assert_reset(target_t *target)
  1301. {
  1302. armv4_5_common_t *armv4_5 = target->arch_info;
  1303. xscale_common_t *xscale = armv4_5->arch_info;
  1304. LOG_DEBUG("target->state: %s",
  1305. Jim_Nvp_value2name_simple( nvp_target_state, target->state )->name);
  1306. /* select DCSR instruction (set endstate to R-T-I to ensure we don't
  1307. * end up in T-L-R, which would reset JTAG
  1308. */
  1309. jtag_add_end_state(TAP_IDLE);
  1310. xscale_jtag_set_instr(xscale->jtag_info.tap, xscale->jtag_info.dcsr);
  1311. /* set Hold reset, Halt mode and Trap Reset */
  1312. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1313. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1314. xscale_write_dcsr(target, 1, 0);
  1315. /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
  1316. xscale_jtag_set_instr(xscale->jtag_info.tap, 0x7f);
  1317. jtag_execute_queue();
  1318. /* assert reset */
  1319. jtag_add_reset(0, 1);
  1320. /* sleep 1ms, to be sure we fulfill any requirements */
  1321. jtag_add_sleep(1000);
  1322. jtag_execute_queue();
  1323. target->state = TARGET_RESET;
  1324. if (target->reset_halt)
  1325. {
  1326. int retval;
  1327. if ((retval = target_halt(target))!=ERROR_OK)
  1328. return retval;
  1329. }
  1330. return ERROR_OK;
  1331. }
  1332. int xscale_deassert_reset(target_t *target)
  1333. {
  1334. armv4_5_common_t *armv4_5 = target->arch_info;
  1335. xscale_common_t *xscale = armv4_5->arch_info;
  1336. fileio_t debug_handler;
  1337. u32 address;
  1338. u32 binary_size;
  1339. u32 buf_cnt;
  1340. int i;
  1341. int retval;
  1342. breakpoint_t *breakpoint = target->breakpoints;
  1343. LOG_DEBUG("-");
  1344. xscale->ibcr_available = 2;
  1345. xscale->ibcr0_used = 0;
  1346. xscale->ibcr1_used = 0;
  1347. xscale->dbr_available = 2;
  1348. xscale->dbr0_used = 0;
  1349. xscale->dbr1_used = 0;
  1350. /* mark all hardware breakpoints as unset */
  1351. while (breakpoint)
  1352. {
  1353. if (breakpoint->type == BKPT_HARD)
  1354. {
  1355. breakpoint->set = 0;
  1356. }
  1357. breakpoint = breakpoint->next;
  1358. }
  1359. if (!xscale->handler_installed)
  1360. {
  1361. /* release SRST */
  1362. jtag_add_reset(0, 0);
  1363. /* wait 300ms; 150 and 100ms were not enough */
  1364. jtag_add_sleep(300*1000);
  1365. jtag_add_runtest(2030, TAP_IDLE);
  1366. jtag_execute_queue();
  1367. /* set Hold reset, Halt mode and Trap Reset */
  1368. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1369. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1370. xscale_write_dcsr(target, 1, 0);
  1371. /* Load debug handler */
  1372. if (fileio_open(&debug_handler, "xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
  1373. {
  1374. return ERROR_OK;
  1375. }
  1376. if ((binary_size = debug_handler.size) % 4)
  1377. {
  1378. LOG_ERROR("debug_handler.bin: size not a multiple of 4");
  1379. exit(-1);
  1380. }
  1381. if (binary_size > 0x800)
  1382. {
  1383. LOG_ERROR("debug_handler.bin: larger than 2kb");
  1384. exit(-1);
  1385. }
  1386. binary_size = CEIL(binary_size, 32) * 32;
  1387. address = xscale->handler_address;
  1388. while (binary_size > 0)
  1389. {
  1390. u32 cache_line[8];
  1391. u8 buffer[32];
  1392. if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
  1393. {
  1394. }
  1395. for (i = 0; i < buf_cnt; i += 4)
  1396. {
  1397. /* convert LE buffer to host-endian u32 */
  1398. cache_line[i / 4] = le_to_h_u32(&buffer[i]);
  1399. }
  1400. for (; buf_cnt < 32; buf_cnt += 4)
  1401. {
  1402. cache_line[buf_cnt / 4] = 0xe1a08008;
  1403. }
  1404. /* only load addresses other than the reset vectors */
  1405. if ((address % 0x400) != 0x0)
  1406. {
  1407. xscale_load_ic(target, 1, address, cache_line);
  1408. }
  1409. address += buf_cnt;
  1410. binary_size -= buf_cnt;
  1411. };
  1412. xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
  1413. xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
  1414. jtag_add_runtest(30, TAP_IDLE);
  1415. jtag_add_sleep(100000);
  1416. /* set Hold reset, Halt mode and Trap Reset */
  1417. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
  1418. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
  1419. xscale_write_dcsr(target, 1, 0);
  1420. /* clear Hold reset to let the target run (should enter debug handler) */
  1421. xscale_write_dcsr(target, 0, 1);
  1422. target->state = TARGET_RUNNING;
  1423. if (!target->reset_halt)
  1424. {
  1425. jtag_add_sleep(10000);
  1426. /* we should have entered debug now */
  1427. xscale_debug_entry(target);
  1428. target->state = TARGET_HALTED;
  1429. /* resume the target */
  1430. xscale_resume(target, 1, 0x0, 1, 0);
  1431. }
  1432. fileio_close(&debug_handler);
  1433. }
  1434. else
  1435. {
  1436. jtag_add_reset(0, 0);
  1437. }
  1438. return ERROR_OK;
  1439. }
  1440. int xscale_soft_reset_halt(struct target_s *target)
  1441. {
  1442. return ERROR_OK;
  1443. }
  1444. int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
  1445. {
  1446. return ERROR_OK;
  1447. }
  1448. int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
  1449. {
  1450. return ERROR_OK;
  1451. }
  1452. int xscale_full_context(target_t *target)
  1453. {
  1454. armv4_5_common_t *armv4_5 = target->arch_info;
  1455. u32 *buffer;
  1456. int i, j;
  1457. LOG_DEBUG("-");
  1458. if (target->state != TARGET_HALTED)
  1459. {
  1460. LOG_WARNING("target not halted");
  1461. return ERROR_TARGET_NOT_HALTED;
  1462. }
  1463. buffer = malloc(4 * 8);
  1464. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1465. * we can't enter User mode on an XScale (unpredictable),
  1466. * but User shares registers with SYS
  1467. */
  1468. for(i = 1; i < 7; i++)
  1469. {
  1470. int valid = 1;
  1471. /* check if there are invalid registers in the current mode
  1472. */
  1473. for (j = 0; j <= 16; j++)
  1474. {
  1475. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1476. valid = 0;
  1477. }
  1478. if (!valid)
  1479. {
  1480. u32 tmp_cpsr;
  1481. /* request banked registers */
  1482. xscale_send_u32(target, 0x0);
  1483. tmp_cpsr = 0x0;
  1484. tmp_cpsr |= armv4_5_number_to_mode(i);
  1485. tmp_cpsr |= 0xc0; /* I/F bits */
  1486. /* send CPSR for desired mode */
  1487. xscale_send_u32(target, tmp_cpsr);
  1488. /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1489. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1490. {
  1491. xscale_receive(target, buffer, 8);
  1492. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
  1493. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1494. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1495. }
  1496. else
  1497. {
  1498. xscale_receive(target, buffer, 7);
  1499. }
  1500. /* move data from buffer to register cache */
  1501. for (j = 8; j <= 14; j++)
  1502. {
  1503. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
  1504. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1505. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1506. }
  1507. }
  1508. }
  1509. free(buffer);
  1510. return ERROR_OK;
  1511. }
  1512. int xscale_restore_context(target_t *target)
  1513. {
  1514. armv4_5_common_t *armv4_5 = target->arch_info;
  1515. int i, j;
  1516. LOG_DEBUG("-");
  1517. if (target->state != TARGET_HALTED)
  1518. {
  1519. LOG_WARNING("target not halted");
  1520. return ERROR_TARGET_NOT_HALTED;
  1521. }
  1522. /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
  1523. * we can't enter User mode on an XScale (unpredictable),
  1524. * but User shares registers with SYS
  1525. */
  1526. for(i = 1; i < 7; i++)
  1527. {
  1528. int dirty = 0;
  1529. /* check if there are invalid registers in the current mode
  1530. */
  1531. for (j = 8; j <= 14; j++)
  1532. {
  1533. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
  1534. dirty = 1;
  1535. }
  1536. /* if not USR/SYS, check if the SPSR needs to be written */
  1537. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1538. {
  1539. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
  1540. dirty = 1;
  1541. }
  1542. if (dirty)
  1543. {
  1544. u32 tmp_cpsr;
  1545. /* send banked registers */
  1546. xscale_send_u32(target, 0x1);
  1547. tmp_cpsr = 0x0;
  1548. tmp_cpsr |= armv4_5_number_to_mode(i);
  1549. tmp_cpsr |= 0xc0; /* I/F bits */
  1550. /* send CPSR for desired mode */
  1551. xscale_send_u32(target, tmp_cpsr);
  1552. /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
  1553. for (j = 8; j <= 14; j++)
  1554. {
  1555. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
  1556. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1557. }
  1558. if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
  1559. {
  1560. xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
  1561. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1562. }
  1563. }
  1564. }
  1565. return ERROR_OK;
  1566. }
  1567. int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1568. {
  1569. armv4_5_common_t *armv4_5 = target->arch_info;
  1570. xscale_common_t *xscale = armv4_5->arch_info;
  1571. u32 *buf32;
  1572. int i;
  1573. int retval;
  1574. LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1575. if (target->state != TARGET_HALTED)
  1576. {
  1577. LOG_WARNING("target not halted");
  1578. return ERROR_TARGET_NOT_HALTED;
  1579. }
  1580. /* sanitize arguments */
  1581. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1582. return ERROR_INVALID_ARGUMENTS;
  1583. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1584. return ERROR_TARGET_UNALIGNED_ACCESS;
  1585. /* send memory read request (command 0x1n, n: access size) */
  1586. if ((retval=xscale_send_u32(target, 0x10 | size))!=ERROR_OK)
  1587. return retval;
  1588. /* send base address for read request */
  1589. if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
  1590. return retval;
  1591. /* send number of requested data words */
  1592. if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
  1593. return retval;
  1594. /* receive data from target (count times 32-bit words in host endianness) */
  1595. buf32 = malloc(4 * count);
  1596. if ((retval=xscale_receive(target, buf32, count))!=ERROR_OK)
  1597. return retval;
  1598. /* extract data from host-endian buffer into byte stream */
  1599. for (i = 0; i < count; i++)
  1600. {
  1601. switch (size)
  1602. {
  1603. case 4:
  1604. target_buffer_set_u32(target, buffer, buf32[i]);
  1605. buffer += 4;
  1606. break;
  1607. case 2:
  1608. target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
  1609. buffer += 2;
  1610. break;
  1611. case 1:
  1612. *buffer++ = buf32[i] & 0xff;
  1613. break;
  1614. default:
  1615. LOG_ERROR("should never get here");
  1616. exit(-1);
  1617. }
  1618. }
  1619. free(buf32);
  1620. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1621. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  1622. return retval;
  1623. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1624. {
  1625. /* clear SA bit */
  1626. if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
  1627. return retval;
  1628. return ERROR_TARGET_DATA_ABORT;
  1629. }
  1630. return ERROR_OK;
  1631. }
  1632. int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1633. {
  1634. armv4_5_common_t *armv4_5 = target->arch_info;
  1635. xscale_common_t *xscale = armv4_5->arch_info;
  1636. int retval;
  1637. LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1638. if (target->state != TARGET_HALTED)
  1639. {
  1640. LOG_WARNING("target not halted");
  1641. return ERROR_TARGET_NOT_HALTED;
  1642. }
  1643. /* sanitize arguments */
  1644. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1645. return ERROR_INVALID_ARGUMENTS;
  1646. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1647. return ERROR_TARGET_UNALIGNED_ACCESS;
  1648. /* send memory write request (command 0x2n, n: access size) */
  1649. if ((retval=xscale_send_u32(target, 0x20 | size))!=ERROR_OK)
  1650. return retval;
  1651. /* send base address for read request */
  1652. if ((retval=xscale_send_u32(target, address))!=ERROR_OK)
  1653. return retval;
  1654. /* send number of requested data words to be written*/
  1655. if ((retval=xscale_send_u32(target, count))!=ERROR_OK)
  1656. return retval;
  1657. /* extract data from host-endian buffer into byte stream */
  1658. #if 0
  1659. for (i = 0; i < count; i++)
  1660. {
  1661. switch (size)
  1662. {
  1663. case 4:
  1664. value = target_buffer_get_u32(target, buffer);
  1665. xscale_send_u32(target, value);
  1666. buffer += 4;
  1667. break;
  1668. case 2:
  1669. value = target_buffer_get_u16(target, buffer);
  1670. xscale_send_u32(target, value);
  1671. buffer += 2;
  1672. break;
  1673. case 1:
  1674. value = *buffer;
  1675. xscale_send_u32(target, value);
  1676. buffer += 1;
  1677. break;
  1678. default:
  1679. LOG_ERROR("should never get here");
  1680. exit(-1);
  1681. }
  1682. }
  1683. #endif
  1684. if ((retval=xscale_send(target, buffer, count, size))!=ERROR_OK)
  1685. return retval;
  1686. /* examine DCSR, to see if Sticky Abort (SA) got set */
  1687. if ((retval=xscale_read_dcsr(target))!=ERROR_OK)
  1688. return retval;
  1689. if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
  1690. {
  1691. /* clear SA bit */
  1692. if ((retval=xscale_send_u32(target, 0x60))!=ERROR_OK)
  1693. return retval;
  1694. return ERROR_TARGET_DATA_ABORT;
  1695. }
  1696. return ERROR_OK;
  1697. }
  1698. int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
  1699. {
  1700. return xscale_write_memory(target, address, 4, count, buffer);
  1701. }
  1702. u32 xscale_get_ttb(target_t *target)
  1703. {
  1704. armv4_5_common_t *armv4_5 = target->arch_info;
  1705. xscale_common_t *xscale = armv4_5->arch_info;
  1706. u32 ttb;
  1707. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
  1708. ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
  1709. return ttb;
  1710. }
  1711. void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
  1712. {
  1713. armv4_5_common_t *armv4_5 = target->arch_info;
  1714. xscale_common_t *xscale = armv4_5->arch_info;
  1715. u32 cp15_control;
  1716. /* read cp15 control register */
  1717. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1718. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1719. if (mmu)
  1720. cp15_control &= ~0x1U;
  1721. if (d_u_cache)
  1722. {
  1723. /* clean DCache */
  1724. xscale_send_u32(target, 0x50);
  1725. xscale_send_u32(target, xscale->cache_clean_address);
  1726. /* invalidate DCache */
  1727. xscale_send_u32(target, 0x51);
  1728. cp15_control &= ~0x4U;
  1729. }
  1730. if (i_cache)
  1731. {
  1732. /* invalidate ICache */
  1733. xscale_send_u32(target, 0x52);
  1734. cp15_control &= ~0x1000U;
  1735. }
  1736. /* write new cp15 control register */
  1737. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1738. /* execute cpwait to ensure outstanding operations complete */
  1739. xscale_send_u32(target, 0x53);
  1740. }
  1741. void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
  1742. {
  1743. armv4_5_common_t *armv4_5 = target->arch_info;
  1744. xscale_common_t *xscale = armv4_5->arch_info;
  1745. u32 cp15_control;
  1746. /* read cp15 control register */
  1747. xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
  1748. cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
  1749. if (mmu)
  1750. cp15_control |= 0x1U;
  1751. if (d_u_cache)
  1752. cp15_control |= 0x4U;
  1753. if (i_cache)
  1754. cp15_control |= 0x1000U;
  1755. /* write new cp15 control register */
  1756. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
  1757. /* execute cpwait to ensure outstanding operations complete */
  1758. xscale_send_u32(target, 0x53);
  1759. }
  1760. int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1761. {
  1762. int retval;
  1763. armv4_5_common_t *armv4_5 = target->arch_info;
  1764. xscale_common_t *xscale = armv4_5->arch_info;
  1765. if (target->state != TARGET_HALTED)
  1766. {
  1767. LOG_WARNING("target not halted");
  1768. return ERROR_TARGET_NOT_HALTED;
  1769. }
  1770. if (breakpoint->set)
  1771. {
  1772. LOG_WARNING("breakpoint already set");
  1773. return ERROR_OK;
  1774. }
  1775. if (breakpoint->type == BKPT_HARD)
  1776. {
  1777. u32 value = breakpoint->address | 1;
  1778. if (!xscale->ibcr0_used)
  1779. {
  1780. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
  1781. xscale->ibcr0_used = 1;
  1782. breakpoint->set = 1; /* breakpoint set on first breakpoint register */
  1783. }
  1784. else if (!xscale->ibcr1_used)
  1785. {
  1786. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
  1787. xscale->ibcr1_used = 1;
  1788. breakpoint->set = 2; /* breakpoint set on second breakpoint register */
  1789. }
  1790. else
  1791. {
  1792. LOG_ERROR("BUG: no hardware comparator available");
  1793. return ERROR_OK;
  1794. }
  1795. }
  1796. else if (breakpoint->type == BKPT_SOFT)
  1797. {
  1798. if (breakpoint->length == 4)
  1799. {
  1800. /* keep the original instruction in target endianness */
  1801. if((retval = target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1802. {
  1803. return retval;
  1804. }
  1805. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1806. if((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
  1807. {
  1808. return retval;
  1809. }
  1810. }
  1811. else
  1812. {
  1813. /* keep the original instruction in target endianness */
  1814. if((retval = target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1815. {
  1816. return retval;
  1817. }
  1818. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  1819. if((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
  1820. {
  1821. return retval;
  1822. }
  1823. }
  1824. breakpoint->set = 1;
  1825. }
  1826. return ERROR_OK;
  1827. }
  1828. int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1829. {
  1830. armv4_5_common_t *armv4_5 = target->arch_info;
  1831. xscale_common_t *xscale = armv4_5->arch_info;
  1832. if (target->state != TARGET_HALTED)
  1833. {
  1834. LOG_WARNING("target not halted");
  1835. return ERROR_TARGET_NOT_HALTED;
  1836. }
  1837. if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
  1838. {
  1839. LOG_INFO("no breakpoint unit available for hardware breakpoint");
  1840. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1841. }
  1842. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  1843. {
  1844. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  1845. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1846. }
  1847. if (breakpoint->type == BKPT_HARD)
  1848. {
  1849. xscale->ibcr_available--;
  1850. }
  1851. return ERROR_OK;
  1852. }
  1853. int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1854. {
  1855. int retval;
  1856. armv4_5_common_t *armv4_5 = target->arch_info;
  1857. xscale_common_t *xscale = armv4_5->arch_info;
  1858. if (target->state != TARGET_HALTED)
  1859. {
  1860. LOG_WARNING("target not halted");
  1861. return ERROR_TARGET_NOT_HALTED;
  1862. }
  1863. if (!breakpoint->set)
  1864. {
  1865. LOG_WARNING("breakpoint not set");
  1866. return ERROR_OK;
  1867. }
  1868. if (breakpoint->type == BKPT_HARD)
  1869. {
  1870. if (breakpoint->set == 1)
  1871. {
  1872. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
  1873. xscale->ibcr0_used = 0;
  1874. }
  1875. else if (breakpoint->set == 2)
  1876. {
  1877. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
  1878. xscale->ibcr1_used = 0;
  1879. }
  1880. breakpoint->set = 0;
  1881. }
  1882. else
  1883. {
  1884. /* restore original instruction (kept in target endianness) */
  1885. if (breakpoint->length == 4)
  1886. {
  1887. if((retval = target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1888. {
  1889. return retval;
  1890. }
  1891. }
  1892. else
  1893. {
  1894. if((retval = target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1895. {
  1896. return retval;
  1897. }
  1898. }
  1899. breakpoint->set = 0;
  1900. }
  1901. return ERROR_OK;
  1902. }
  1903. int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  1904. {
  1905. armv4_5_common_t *armv4_5 = target->arch_info;
  1906. xscale_common_t *xscale = armv4_5->arch_info;
  1907. if (target->state != TARGET_HALTED)
  1908. {
  1909. LOG_WARNING("target not halted");
  1910. return ERROR_TARGET_NOT_HALTED;
  1911. }
  1912. if (breakpoint->set)
  1913. {
  1914. xscale_unset_breakpoint(target, breakpoint);
  1915. }
  1916. if (breakpoint->type == BKPT_HARD)
  1917. xscale->ibcr_available++;
  1918. return ERROR_OK;
  1919. }
  1920. int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1921. {
  1922. armv4_5_common_t *armv4_5 = target->arch_info;
  1923. xscale_common_t *xscale = armv4_5->arch_info;
  1924. u8 enable=0;
  1925. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1926. u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1927. if (target->state != TARGET_HALTED)
  1928. {
  1929. LOG_WARNING("target not halted");
  1930. return ERROR_TARGET_NOT_HALTED;
  1931. }
  1932. xscale_get_reg(dbcon);
  1933. switch (watchpoint->rw)
  1934. {
  1935. case WPT_READ:
  1936. enable = 0x3;
  1937. break;
  1938. case WPT_ACCESS:
  1939. enable = 0x2;
  1940. break;
  1941. case WPT_WRITE:
  1942. enable = 0x1;
  1943. break;
  1944. default:
  1945. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1946. }
  1947. if (!xscale->dbr0_used)
  1948. {
  1949. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
  1950. dbcon_value |= enable;
  1951. xscale_set_reg_u32(dbcon, dbcon_value);
  1952. watchpoint->set = 1;
  1953. xscale->dbr0_used = 1;
  1954. }
  1955. else if (!xscale->dbr1_used)
  1956. {
  1957. xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
  1958. dbcon_value |= enable << 2;
  1959. xscale_set_reg_u32(dbcon, dbcon_value);
  1960. watchpoint->set = 2;
  1961. xscale->dbr1_used = 1;
  1962. }
  1963. else
  1964. {
  1965. LOG_ERROR("BUG: no hardware comparator available");
  1966. return ERROR_OK;
  1967. }
  1968. return ERROR_OK;
  1969. }
  1970. int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1971. {
  1972. armv4_5_common_t *armv4_5 = target->arch_info;
  1973. xscale_common_t *xscale = armv4_5->arch_info;
  1974. if (target->state != TARGET_HALTED)
  1975. {
  1976. LOG_WARNING("target not halted");
  1977. return ERROR_TARGET_NOT_HALTED;
  1978. }
  1979. if (xscale->dbr_available < 1)
  1980. {
  1981. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1982. }
  1983. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  1984. {
  1985. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1986. }
  1987. xscale->dbr_available--;
  1988. return ERROR_OK;
  1989. }
  1990. int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  1991. {
  1992. armv4_5_common_t *armv4_5 = target->arch_info;
  1993. xscale_common_t *xscale = armv4_5->arch_info;
  1994. reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
  1995. u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
  1996. if (target->state != TARGET_HALTED)
  1997. {
  1998. LOG_WARNING("target not halted");
  1999. return ERROR_TARGET_NOT_HALTED;
  2000. }
  2001. if (!watchpoint->set)
  2002. {
  2003. LOG_WARNING("breakpoint not set");
  2004. return ERROR_OK;
  2005. }
  2006. if (watchpoint->set == 1)
  2007. {
  2008. dbcon_value &= ~0x3;
  2009. xscale_set_reg_u32(dbcon, dbcon_value);
  2010. xscale->dbr0_used = 0;
  2011. }
  2012. else if (watchpoint->set == 2)
  2013. {
  2014. dbcon_value &= ~0xc;
  2015. xscale_set_reg_u32(dbcon, dbcon_value);
  2016. xscale->dbr1_used = 0;
  2017. }
  2018. watchpoint->set = 0;
  2019. return ERROR_OK;
  2020. }
  2021. int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  2022. {
  2023. armv4_5_common_t *armv4_5 = target->arch_info;
  2024. xscale_common_t *xscale = armv4_5->arch_info;
  2025. if (target->state != TARGET_HALTED)
  2026. {
  2027. LOG_WARNING("target not halted");
  2028. return ERROR_TARGET_NOT_HALTED;
  2029. }
  2030. if (watchpoint->set)
  2031. {
  2032. xscale_unset_watchpoint(target, watchpoint);
  2033. }
  2034. xscale->dbr_available++;
  2035. return ERROR_OK;
  2036. }
  2037. void xscale_enable_watchpoints(struct target_s *target)
  2038. {
  2039. watchpoint_t *watchpoint = target->watchpoints;
  2040. while (watchpoint)
  2041. {
  2042. if (watchpoint->set == 0)
  2043. xscale_set_watchpoint(target, watchpoint);
  2044. watchpoint = watchpoint->next;
  2045. }
  2046. }
  2047. void xscale_enable_breakpoints(struct target_s *target)
  2048. {
  2049. breakpoint_t *breakpoint = target->breakpoints;
  2050. /* set any pending breakpoints */
  2051. while (breakpoint)
  2052. {
  2053. if (breakpoint->set == 0)
  2054. xscale_set_breakpoint(target, breakpoint);
  2055. breakpoint = breakpoint->next;
  2056. }
  2057. }
  2058. int xscale_get_reg(reg_t *reg)
  2059. {
  2060. xscale_reg_t *arch_info = reg->arch_info;
  2061. target_t *target = arch_info->target;
  2062. armv4_5_common_t *armv4_5 = target->arch_info;
  2063. xscale_common_t *xscale = armv4_5->arch_info;
  2064. /* DCSR, TX and RX are accessible via JTAG */
  2065. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2066. {
  2067. return xscale_read_dcsr(arch_info->target);
  2068. }
  2069. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2070. {
  2071. /* 1 = consume register content */
  2072. return xscale_read_tx(arch_info->target, 1);
  2073. }
  2074. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2075. {
  2076. /* can't read from RX register (host -> debug handler) */
  2077. return ERROR_OK;
  2078. }
  2079. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2080. {
  2081. /* can't (explicitly) read from TXRXCTRL register */
  2082. return ERROR_OK;
  2083. }
  2084. else /* Other DBG registers have to be transfered by the debug handler */
  2085. {
  2086. /* send CP read request (command 0x40) */
  2087. xscale_send_u32(target, 0x40);
  2088. /* send CP register number */
  2089. xscale_send_u32(target, arch_info->dbg_handler_number);
  2090. /* read register value */
  2091. xscale_read_tx(target, 1);
  2092. buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
  2093. reg->dirty = 0;
  2094. reg->valid = 1;
  2095. }
  2096. return ERROR_OK;
  2097. }
  2098. int xscale_set_reg(reg_t *reg, u8* buf)
  2099. {
  2100. xscale_reg_t *arch_info = reg->arch_info;
  2101. target_t *target = arch_info->target;
  2102. armv4_5_common_t *armv4_5 = target->arch_info;
  2103. xscale_common_t *xscale = armv4_5->arch_info;
  2104. u32 value = buf_get_u32(buf, 0, 32);
  2105. /* DCSR, TX and RX are accessible via JTAG */
  2106. if (strcmp(reg->name, "XSCALE_DCSR") == 0)
  2107. {
  2108. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
  2109. return xscale_write_dcsr(arch_info->target, -1, -1);
  2110. }
  2111. else if (strcmp(reg->name, "XSCALE_RX") == 0)
  2112. {
  2113. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
  2114. return xscale_write_rx(arch_info->target);
  2115. }
  2116. else if (strcmp(reg->name, "XSCALE_TX") == 0)
  2117. {
  2118. /* can't write to TX register (debug-handler -> host) */
  2119. return ERROR_OK;
  2120. }
  2121. else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
  2122. {
  2123. /* can't (explicitly) write to TXRXCTRL register */
  2124. return ERROR_OK;
  2125. }
  2126. else /* Other DBG registers have to be transfered by the debug handler */
  2127. {
  2128. /* send CP write request (command 0x41) */
  2129. xscale_send_u32(target, 0x41);
  2130. /* send CP register number */
  2131. xscale_send_u32(target, arch_info->dbg_handler_number);
  2132. /* send CP register value */
  2133. xscale_send_u32(target, value);
  2134. buf_set_u32(reg->value, 0, 32, value);
  2135. }
  2136. return ERROR_OK;
  2137. }
  2138. /* convenience wrapper to access XScale specific registers */
  2139. int xscale_set_reg_u32(reg_t *reg, u32 value)
  2140. {
  2141. u8 buf[4];
  2142. buf_set_u32(buf, 0, 32, value);
  2143. return xscale_set_reg(reg, buf);
  2144. }
  2145. int xscale_write_dcsr_sw(target_t *target, u32 value)
  2146. {
  2147. /* get pointers to arch-specific information */
  2148. armv4_5_common_t *armv4_5 = target->arch_info;
  2149. xscale_common_t *xscale = armv4_5->arch_info;
  2150. reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
  2151. xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
  2152. /* send CP write request (command 0x41) */
  2153. xscale_send_u32(target, 0x41);
  2154. /* send CP register number */
  2155. xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
  2156. /* send CP register value */
  2157. xscale_send_u32(target, value);
  2158. buf_set_u32(dcsr->value, 0, 32, value);
  2159. return ERROR_OK;
  2160. }
  2161. int xscale_read_trace(target_t *target)
  2162. {
  2163. /* get pointers to arch-specific information */
  2164. armv4_5_common_t *armv4_5 = target->arch_info;
  2165. xscale_common_t *xscale = armv4_5->arch_info;
  2166. xscale_trace_data_t **trace_data_p;
  2167. /* 258 words from debug handler
  2168. * 256 trace buffer entries
  2169. * 2 checkpoint addresses
  2170. */
  2171. u32 trace_buffer[258];
  2172. int is_address[256];
  2173. int i, j;
  2174. if (target->state != TARGET_HALTED)
  2175. {
  2176. LOG_WARNING("target must be stopped to read trace data");
  2177. return ERROR_TARGET_NOT_HALTED;
  2178. }
  2179. /* send read trace buffer command (command 0x61) */
  2180. xscale_send_u32(target, 0x61);
  2181. /* receive trace buffer content */
  2182. xscale_receive(target, trace_buffer, 258);
  2183. /* parse buffer backwards to identify address entries */
  2184. for (i = 255; i >= 0; i--)
  2185. {
  2186. is_address[i] = 0;
  2187. if (((trace_buffer[i] & 0xf0) == 0x90) ||
  2188. ((trace_buffer[i] & 0xf0) == 0xd0))
  2189. {
  2190. if (i >= 3)
  2191. is_address[--i] = 1;
  2192. if (i >= 2)
  2193. is_address[--i] = 1;
  2194. if (i >= 1)
  2195. is_address[--i] = 1;
  2196. if (i >= 0)
  2197. is_address[--i] = 1;
  2198. }
  2199. }
  2200. /* search first non-zero entry */
  2201. for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
  2202. ;
  2203. if (j == 256)
  2204. {
  2205. LOG_DEBUG("no trace data collected");
  2206. return ERROR_XSCALE_NO_TRACE_DATA;
  2207. }
  2208. for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
  2209. ;
  2210. *trace_data_p = malloc(sizeof(xscale_trace_data_t));
  2211. (*trace_data_p)->next = NULL;
  2212. (*trace_data_p)->chkpt0 = trace_buffer[256];
  2213. (*trace_data_p)->chkpt1 = trace_buffer[257];
  2214. (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2215. (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
  2216. (*trace_data_p)->depth = 256 - j;
  2217. for (i = j; i < 256; i++)
  2218. {
  2219. (*trace_data_p)->entries[i - j].data = trace_buffer[i];
  2220. if (is_address[i])
  2221. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
  2222. else
  2223. (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
  2224. }
  2225. return ERROR_OK;
  2226. }
  2227. int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
  2228. {
  2229. /* get pointers to arch-specific information */
  2230. armv4_5_common_t *armv4_5 = target->arch_info;
  2231. xscale_common_t *xscale = armv4_5->arch_info;
  2232. int i;
  2233. int section = -1;
  2234. u32 size_read;
  2235. u32 opcode;
  2236. int retval;
  2237. if (!xscale->trace.image)
  2238. return ERROR_TRACE_IMAGE_UNAVAILABLE;
  2239. /* search for the section the current instruction belongs to */
  2240. for (i = 0; i < xscale->trace.image->num_sections; i++)
  2241. {
  2242. if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
  2243. (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
  2244. {
  2245. section = i;
  2246. break;
  2247. }
  2248. }
  2249. if (section == -1)
  2250. {
  2251. /* current instruction couldn't be found in the image */
  2252. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2253. }
  2254. if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
  2255. {
  2256. u8 buf[4];
  2257. if ((retval = image_read_section(xscale->trace.image, section,
  2258. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2259. 4, buf, &size_read)) != ERROR_OK)
  2260. {
  2261. LOG_ERROR("error while reading instruction: %i", retval);
  2262. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2263. }
  2264. opcode = target_buffer_get_u32(target, buf);
  2265. arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2266. }
  2267. else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
  2268. {
  2269. u8 buf[2];
  2270. if ((retval = image_read_section(xscale->trace.image, section,
  2271. xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
  2272. 2, buf, &size_read)) != ERROR_OK)
  2273. {
  2274. LOG_ERROR("error while reading instruction: %i", retval);
  2275. return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
  2276. }
  2277. opcode = target_buffer_get_u16(target, buf);
  2278. thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
  2279. }
  2280. else
  2281. {
  2282. LOG_ERROR("BUG: unknown core state encountered");
  2283. exit(-1);
  2284. }
  2285. return ERROR_OK;
  2286. }
  2287. int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
  2288. {
  2289. /* if there are less than four entries prior to the indirect branch message
  2290. * we can't extract the address */
  2291. if (i < 4)
  2292. {
  2293. return -1;
  2294. }
  2295. *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
  2296. (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
  2297. return 0;
  2298. }
  2299. int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
  2300. {
  2301. /* get pointers to arch-specific information */
  2302. armv4_5_common_t *armv4_5 = target->arch_info;
  2303. xscale_common_t *xscale = armv4_5->arch_info;
  2304. int next_pc_ok = 0;
  2305. u32 next_pc = 0x0;
  2306. xscale_trace_data_t *trace_data = xscale->trace.data;
  2307. int retval;
  2308. while (trace_data)
  2309. {
  2310. int i, chkpt;
  2311. int rollover;
  2312. int branch;
  2313. int exception;
  2314. xscale->trace.core_state = ARMV4_5_STATE_ARM;
  2315. chkpt = 0;
  2316. rollover = 0;
  2317. for (i = 0; i < trace_data->depth; i++)
  2318. {
  2319. next_pc_ok = 0;
  2320. branch = 0;
  2321. exception = 0;
  2322. if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
  2323. continue;
  2324. switch ((trace_data->entries[i].data & 0xf0) >> 4)
  2325. {
  2326. case 0: /* Exceptions */
  2327. case 1:
  2328. case 2:
  2329. case 3:
  2330. case 4:
  2331. case 5:
  2332. case 6:
  2333. case 7:
  2334. exception = (trace_data->entries[i].data & 0x70) >> 4;
  2335. next_pc_ok = 1;
  2336. next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
  2337. command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
  2338. break;
  2339. case 8: /* Direct Branch */
  2340. branch = 1;
  2341. break;
  2342. case 9: /* Indirect Branch */
  2343. branch = 1;
  2344. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2345. {
  2346. next_pc_ok = 1;
  2347. }
  2348. break;
  2349. case 13: /* Checkpointed Indirect Branch */
  2350. if (xscale_branch_address(trace_data, i, &next_pc) == 0)
  2351. {
  2352. next_pc_ok = 1;
  2353. if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
  2354. || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
  2355. LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
  2356. }
  2357. /* explicit fall-through */
  2358. case 12: /* Checkpointed Direct Branch */
  2359. branch = 1;
  2360. if (chkpt == 0)
  2361. {
  2362. next_pc_ok = 1;
  2363. next_pc = trace_data->chkpt0;
  2364. chkpt++;
  2365. }
  2366. else if (chkpt == 1)
  2367. {
  2368. next_pc_ok = 1;
  2369. next_pc = trace_data->chkpt0;
  2370. chkpt++;
  2371. }
  2372. else
  2373. {
  2374. LOG_WARNING("more than two checkpointed branches encountered");
  2375. }
  2376. break;
  2377. case 15: /* Roll-over */
  2378. rollover++;
  2379. continue;
  2380. default: /* Reserved */
  2381. command_print(cmd_ctx, "--- reserved trace message ---");
  2382. LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
  2383. return ERROR_OK;
  2384. }
  2385. if (xscale->trace.pc_ok)
  2386. {
  2387. int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
  2388. arm_instruction_t instruction;
  2389. if ((exception == 6) || (exception == 7))
  2390. {
  2391. /* IRQ or FIQ exception, no instruction executed */
  2392. executed -= 1;
  2393. }
  2394. while (executed-- >= 0)
  2395. {
  2396. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2397. {
  2398. /* can't continue tracing with no image available */
  2399. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2400. {
  2401. return retval;
  2402. }
  2403. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2404. {
  2405. /* TODO: handle incomplete images */
  2406. }
  2407. }
  2408. /* a precise abort on a load to the PC is included in the incremental
  2409. * word count, other instructions causing data aborts are not included
  2410. */
  2411. if ((executed == 0) && (exception == 4)
  2412. && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
  2413. {
  2414. if ((instruction.type == ARM_LDM)
  2415. && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
  2416. {
  2417. executed--;
  2418. }
  2419. else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
  2420. && (instruction.info.load_store.Rd != 15))
  2421. {
  2422. executed--;
  2423. }
  2424. }
  2425. /* only the last instruction executed
  2426. * (the one that caused the control flow change)
  2427. * could be a taken branch
  2428. */
  2429. if (((executed == -1) && (branch == 1)) &&
  2430. (((instruction.type == ARM_B) ||
  2431. (instruction.type == ARM_BL) ||
  2432. (instruction.type == ARM_BLX)) &&
  2433. (instruction.info.b_bl_bx_blx.target_address != -1)))
  2434. {
  2435. xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
  2436. }
  2437. else
  2438. {
  2439. xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
  2440. }
  2441. command_print(cmd_ctx, "%s", instruction.text);
  2442. }
  2443. rollover = 0;
  2444. }
  2445. if (next_pc_ok)
  2446. {
  2447. xscale->trace.current_pc = next_pc;
  2448. xscale->trace.pc_ok = 1;
  2449. }
  2450. }
  2451. for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
  2452. {
  2453. arm_instruction_t instruction;
  2454. if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
  2455. {
  2456. /* can't continue tracing with no image available */
  2457. if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
  2458. {
  2459. return retval;
  2460. }
  2461. else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
  2462. {
  2463. /* TODO: handle incomplete images */
  2464. }
  2465. }
  2466. command_print(cmd_ctx, "%s", instruction.text);
  2467. }
  2468. trace_data = trace_data->next;
  2469. }
  2470. return ERROR_OK;
  2471. }
  2472. void xscale_build_reg_cache(target_t *target)
  2473. {
  2474. /* get pointers to arch-specific information */
  2475. armv4_5_common_t *armv4_5 = target->arch_info;
  2476. xscale_common_t *xscale = armv4_5->arch_info;
  2477. reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
  2478. xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
  2479. int i;
  2480. int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
  2481. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  2482. armv4_5->core_cache = (*cache_p);
  2483. /* register a register arch-type for XScale dbg registers only once */
  2484. if (xscale_reg_arch_type == -1)
  2485. xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
  2486. (*cache_p)->next = malloc(sizeof(reg_cache_t));
  2487. cache_p = &(*cache_p)->next;
  2488. /* fill in values for the xscale reg cache */
  2489. (*cache_p)->name = "XScale registers";
  2490. (*cache_p)->next = NULL;
  2491. (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
  2492. (*cache_p)->num_regs = num_regs;
  2493. for (i = 0; i < num_regs; i++)
  2494. {
  2495. (*cache_p)->reg_list[i].name = xscale_reg_list[i];
  2496. (*cache_p)->reg_list[i].value = calloc(4, 1);
  2497. (*cache_p)->reg_list[i].dirty = 0;
  2498. (*cache_p)->reg_list[i].valid = 0;
  2499. (*cache_p)->reg_list[i].size = 32;
  2500. (*cache_p)->reg_list[i].bitfield_desc = NULL;
  2501. (*cache_p)->reg_list[i].num_bitfields = 0;
  2502. (*cache_p)->reg_list[i].arch_info = &arch_info[i];
  2503. (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
  2504. arch_info[i] = xscale_reg_arch_info[i];
  2505. arch_info[i].target = target;
  2506. }
  2507. xscale->reg_cache = (*cache_p);
  2508. }
  2509. int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
  2510. {
  2511. return ERROR_OK;
  2512. }
  2513. int xscale_quit(void)
  2514. {
  2515. return ERROR_OK;
  2516. }
  2517. int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, jtag_tap_t *tap, const char *variant)
  2518. {
  2519. armv4_5_common_t *armv4_5;
  2520. u32 high_reset_branch, low_reset_branch;
  2521. int i;
  2522. armv4_5 = &xscale->armv4_5_common;
  2523. /* store architecture specfic data (none so far) */
  2524. xscale->arch_info = NULL;
  2525. xscale->common_magic = XSCALE_COMMON_MAGIC;
  2526. /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
  2527. xscale->variant = strdup(variant);
  2528. /* prepare JTAG information for the new target */
  2529. xscale->jtag_info.tap = tap;
  2530. xscale->jtag_info.dbgrx = 0x02;
  2531. xscale->jtag_info.dbgtx = 0x10;
  2532. xscale->jtag_info.dcsr = 0x09;
  2533. xscale->jtag_info.ldic = 0x07;
  2534. if ((strcmp(xscale->variant, "pxa250") == 0) ||
  2535. (strcmp(xscale->variant, "pxa255") == 0) ||
  2536. (strcmp(xscale->variant, "pxa26x") == 0))
  2537. {
  2538. xscale->jtag_info.ir_length = 5;
  2539. }
  2540. else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
  2541. (strcmp(xscale->variant, "ixp42x") == 0) ||
  2542. (strcmp(xscale->variant, "ixp45x") == 0) ||
  2543. (strcmp(xscale->variant, "ixp46x") == 0))
  2544. {
  2545. xscale->jtag_info.ir_length = 7;
  2546. }
  2547. /* the debug handler isn't installed (and thus not running) at this time */
  2548. xscale->handler_installed = 0;
  2549. xscale->handler_running = 0;
  2550. xscale->handler_address = 0xfe000800;
  2551. /* clear the vectors we keep locally for reference */
  2552. memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
  2553. memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
  2554. /* no user-specified vectors have been configured yet */
  2555. xscale->static_low_vectors_set = 0x0;
  2556. xscale->static_high_vectors_set = 0x0;
  2557. /* calculate branches to debug handler */
  2558. low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
  2559. high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
  2560. xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
  2561. xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
  2562. for (i = 1; i <= 7; i++)
  2563. {
  2564. xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2565. xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
  2566. }
  2567. /* 64kB aligned region used for DCache cleaning */
  2568. xscale->cache_clean_address = 0xfffe0000;
  2569. xscale->hold_rst = 0;
  2570. xscale->external_debug_break = 0;
  2571. xscale->ibcr_available = 2;
  2572. xscale->ibcr0_used = 0;
  2573. xscale->ibcr1_used = 0;
  2574. xscale->dbr_available = 2;
  2575. xscale->dbr0_used = 0;
  2576. xscale->dbr1_used = 0;
  2577. xscale->arm_bkpt = ARMV5_BKPT(0x0);
  2578. xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
  2579. xscale->vector_catch = 0x1;
  2580. xscale->trace.capture_status = TRACE_IDLE;
  2581. xscale->trace.data = NULL;
  2582. xscale->trace.image = NULL;
  2583. xscale->trace.buffer_enabled = 0;
  2584. xscale->trace.buffer_fill = 0;
  2585. /* prepare ARMv4/5 specific information */
  2586. armv4_5->arch_info = xscale;
  2587. armv4_5->read_core_reg = xscale_read_core_reg;
  2588. armv4_5->write_core_reg = xscale_write_core_reg;
  2589. armv4_5->full_context = xscale_full_context;
  2590. armv4_5_init_arch_info(target, armv4_5);
  2591. xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
  2592. xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
  2593. xscale->armv4_5_mmu.read_memory = xscale_read_memory;
  2594. xscale->armv4_5_mmu.write_memory = xscale_write_memory;
  2595. xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
  2596. xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
  2597. xscale->armv4_5_mmu.has_tiny_pages = 1;
  2598. xscale->armv4_5_mmu.mmu_enabled = 0;
  2599. return ERROR_OK;
  2600. }
  2601. /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
  2602. int xscale_target_create(struct target_s *target, Jim_Interp *interp)
  2603. {
  2604. xscale_common_t *xscale = calloc(1,sizeof(xscale_common_t));
  2605. xscale_init_arch_info(target, xscale, target->tap, target->variant);
  2606. xscale_build_reg_cache(target);
  2607. return ERROR_OK;
  2608. }
  2609. int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2610. {
  2611. target_t *target = NULL;
  2612. armv4_5_common_t *armv4_5;
  2613. xscale_common_t *xscale;
  2614. u32 handler_address;
  2615. if (argc < 2)
  2616. {
  2617. LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
  2618. return ERROR_OK;
  2619. }
  2620. if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
  2621. {
  2622. LOG_ERROR("no target '%s' configured", args[0]);
  2623. return ERROR_FAIL;
  2624. }
  2625. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2626. {
  2627. return ERROR_FAIL;
  2628. }
  2629. handler_address = strtoul(args[1], NULL, 0);
  2630. if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
  2631. ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
  2632. {
  2633. xscale->handler_address = handler_address;
  2634. }
  2635. else
  2636. {
  2637. LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
  2638. return ERROR_FAIL;
  2639. }
  2640. return ERROR_OK;
  2641. }
  2642. int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2643. {
  2644. target_t *target = NULL;
  2645. armv4_5_common_t *armv4_5;
  2646. xscale_common_t *xscale;
  2647. u32 cache_clean_address;
  2648. if (argc < 2)
  2649. {
  2650. return ERROR_COMMAND_SYNTAX_ERROR;
  2651. }
  2652. if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
  2653. {
  2654. LOG_ERROR("no target '%s' configured", args[0]);
  2655. return ERROR_FAIL;
  2656. }
  2657. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2658. {
  2659. return ERROR_FAIL;
  2660. }
  2661. cache_clean_address = strtoul(args[1], NULL, 0);
  2662. if (cache_clean_address & 0xffff)
  2663. {
  2664. LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
  2665. }
  2666. else
  2667. {
  2668. xscale->cache_clean_address = cache_clean_address;
  2669. }
  2670. return ERROR_OK;
  2671. }
  2672. int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2673. {
  2674. target_t *target = get_current_target(cmd_ctx);
  2675. armv4_5_common_t *armv4_5;
  2676. xscale_common_t *xscale;
  2677. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2678. {
  2679. return ERROR_OK;
  2680. }
  2681. return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
  2682. }
  2683. static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
  2684. {
  2685. armv4_5_common_t *armv4_5;
  2686. xscale_common_t *xscale;
  2687. int retval;
  2688. int type;
  2689. u32 cb;
  2690. int domain;
  2691. u32 ap;
  2692. if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
  2693. {
  2694. return retval;
  2695. }
  2696. u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
  2697. if (type == -1)
  2698. {
  2699. return ret;
  2700. }
  2701. *physical = ret;
  2702. return ERROR_OK;
  2703. }
  2704. static int xscale_mmu(struct target_s *target, int *enabled)
  2705. {
  2706. armv4_5_common_t *armv4_5 = target->arch_info;
  2707. xscale_common_t *xscale = armv4_5->arch_info;
  2708. if (target->state != TARGET_HALTED)
  2709. {
  2710. LOG_ERROR("Target not halted");
  2711. return ERROR_TARGET_INVALID;
  2712. }
  2713. *enabled = xscale->armv4_5_mmu.mmu_enabled;
  2714. return ERROR_OK;
  2715. }
  2716. int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2717. {
  2718. target_t *target = get_current_target(cmd_ctx);
  2719. armv4_5_common_t *armv4_5;
  2720. xscale_common_t *xscale;
  2721. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2722. {
  2723. return ERROR_OK;
  2724. }
  2725. if (target->state != TARGET_HALTED)
  2726. {
  2727. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2728. return ERROR_OK;
  2729. }
  2730. if (argc >= 1)
  2731. {
  2732. if (strcmp("enable", args[0]) == 0)
  2733. {
  2734. xscale_enable_mmu_caches(target, 1, 0, 0);
  2735. xscale->armv4_5_mmu.mmu_enabled = 1;
  2736. }
  2737. else if (strcmp("disable", args[0]) == 0)
  2738. {
  2739. xscale_disable_mmu_caches(target, 1, 0, 0);
  2740. xscale->armv4_5_mmu.mmu_enabled = 0;
  2741. }
  2742. }
  2743. command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
  2744. return ERROR_OK;
  2745. }
  2746. int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2747. {
  2748. target_t *target = get_current_target(cmd_ctx);
  2749. armv4_5_common_t *armv4_5;
  2750. xscale_common_t *xscale;
  2751. int icache = 0, dcache = 0;
  2752. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2753. {
  2754. return ERROR_OK;
  2755. }
  2756. if (target->state != TARGET_HALTED)
  2757. {
  2758. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2759. return ERROR_OK;
  2760. }
  2761. if (strcmp(cmd, "icache") == 0)
  2762. icache = 1;
  2763. else if (strcmp(cmd, "dcache") == 0)
  2764. dcache = 1;
  2765. if (argc >= 1)
  2766. {
  2767. if (strcmp("enable", args[0]) == 0)
  2768. {
  2769. xscale_enable_mmu_caches(target, 0, dcache, icache);
  2770. if (icache)
  2771. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
  2772. else if (dcache)
  2773. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
  2774. }
  2775. else if (strcmp("disable", args[0]) == 0)
  2776. {
  2777. xscale_disable_mmu_caches(target, 0, dcache, icache);
  2778. if (icache)
  2779. xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
  2780. else if (dcache)
  2781. xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
  2782. }
  2783. }
  2784. if (icache)
  2785. command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
  2786. if (dcache)
  2787. command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
  2788. return ERROR_OK;
  2789. }
  2790. int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2791. {
  2792. target_t *target = get_current_target(cmd_ctx);
  2793. armv4_5_common_t *armv4_5;
  2794. xscale_common_t *xscale;
  2795. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2796. {
  2797. return ERROR_OK;
  2798. }
  2799. if (argc < 1)
  2800. {
  2801. command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
  2802. }
  2803. else
  2804. {
  2805. xscale->vector_catch = strtoul(args[0], NULL, 0);
  2806. buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
  2807. xscale_write_dcsr(target, -1, -1);
  2808. }
  2809. command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
  2810. return ERROR_OK;
  2811. }
  2812. int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2813. {
  2814. target_t *target = get_current_target(cmd_ctx);
  2815. armv4_5_common_t *armv4_5;
  2816. xscale_common_t *xscale;
  2817. u32 dcsr_value;
  2818. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2819. {
  2820. return ERROR_OK;
  2821. }
  2822. if (target->state != TARGET_HALTED)
  2823. {
  2824. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2825. return ERROR_OK;
  2826. }
  2827. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  2828. {
  2829. xscale_trace_data_t *td, *next_td;
  2830. xscale->trace.buffer_enabled = 1;
  2831. /* free old trace data */
  2832. td = xscale->trace.data;
  2833. while (td)
  2834. {
  2835. next_td = td->next;
  2836. if (td->entries)
  2837. free(td->entries);
  2838. free(td);
  2839. td = next_td;
  2840. }
  2841. xscale->trace.data = NULL;
  2842. }
  2843. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  2844. {
  2845. xscale->trace.buffer_enabled = 0;
  2846. }
  2847. if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
  2848. {
  2849. if (argc >= 3)
  2850. xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
  2851. else
  2852. xscale->trace.buffer_fill = 1;
  2853. }
  2854. else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
  2855. {
  2856. xscale->trace.buffer_fill = -1;
  2857. }
  2858. if (xscale->trace.buffer_enabled)
  2859. {
  2860. /* if we enable the trace buffer in fill-once
  2861. * mode we know the address of the first instruction */
  2862. xscale->trace.pc_ok = 1;
  2863. xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  2864. }
  2865. else
  2866. {
  2867. /* otherwise the address is unknown, and we have no known good PC */
  2868. xscale->trace.pc_ok = 0;
  2869. }
  2870. command_print(cmd_ctx, "trace buffer %s (%s)",
  2871. (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
  2872. (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
  2873. dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
  2874. if (xscale->trace.buffer_fill >= 0)
  2875. xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
  2876. else
  2877. xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
  2878. return ERROR_OK;
  2879. }
  2880. int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2881. {
  2882. target_t *target;
  2883. armv4_5_common_t *armv4_5;
  2884. xscale_common_t *xscale;
  2885. if (argc < 1)
  2886. {
  2887. command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
  2888. return ERROR_OK;
  2889. }
  2890. target = get_current_target(cmd_ctx);
  2891. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2892. {
  2893. return ERROR_OK;
  2894. }
  2895. if (xscale->trace.image)
  2896. {
  2897. image_close(xscale->trace.image);
  2898. free(xscale->trace.image);
  2899. command_print(cmd_ctx, "previously loaded image found and closed");
  2900. }
  2901. xscale->trace.image = malloc(sizeof(image_t));
  2902. xscale->trace.image->base_address_set = 0;
  2903. xscale->trace.image->start_address_set = 0;
  2904. /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
  2905. if (argc >= 2)
  2906. {
  2907. xscale->trace.image->base_address_set = 1;
  2908. xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
  2909. }
  2910. else
  2911. {
  2912. xscale->trace.image->base_address_set = 0;
  2913. }
  2914. if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
  2915. {
  2916. free(xscale->trace.image);
  2917. xscale->trace.image = NULL;
  2918. return ERROR_OK;
  2919. }
  2920. return ERROR_OK;
  2921. }
  2922. int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2923. {
  2924. target_t *target = get_current_target(cmd_ctx);
  2925. armv4_5_common_t *armv4_5;
  2926. xscale_common_t *xscale;
  2927. xscale_trace_data_t *trace_data;
  2928. fileio_t file;
  2929. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2930. {
  2931. return ERROR_OK;
  2932. }
  2933. if (target->state != TARGET_HALTED)
  2934. {
  2935. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2936. return ERROR_OK;
  2937. }
  2938. if (argc < 1)
  2939. {
  2940. command_print(cmd_ctx, "usage: xscale dump_trace <file>");
  2941. return ERROR_OK;
  2942. }
  2943. trace_data = xscale->trace.data;
  2944. if (!trace_data)
  2945. {
  2946. command_print(cmd_ctx, "no trace data collected");
  2947. return ERROR_OK;
  2948. }
  2949. if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
  2950. {
  2951. return ERROR_OK;
  2952. }
  2953. while (trace_data)
  2954. {
  2955. int i;
  2956. fileio_write_u32(&file, trace_data->chkpt0);
  2957. fileio_write_u32(&file, trace_data->chkpt1);
  2958. fileio_write_u32(&file, trace_data->last_instruction);
  2959. fileio_write_u32(&file, trace_data->depth);
  2960. for (i = 0; i < trace_data->depth; i++)
  2961. fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
  2962. trace_data = trace_data->next;
  2963. }
  2964. fileio_close(&file);
  2965. return ERROR_OK;
  2966. }
  2967. int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2968. {
  2969. target_t *target = get_current_target(cmd_ctx);
  2970. armv4_5_common_t *armv4_5;
  2971. xscale_common_t *xscale;
  2972. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2973. {
  2974. return ERROR_OK;
  2975. }
  2976. xscale_analyze_trace(target, cmd_ctx);
  2977. return ERROR_OK;
  2978. }
  2979. int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
  2980. {
  2981. target_t *target = get_current_target(cmd_ctx);
  2982. armv4_5_common_t *armv4_5;
  2983. xscale_common_t *xscale;
  2984. if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
  2985. {
  2986. return ERROR_OK;
  2987. }
  2988. if (target->state != TARGET_HALTED)
  2989. {
  2990. command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
  2991. return ERROR_OK;
  2992. }
  2993. u32 reg_no = 0;
  2994. reg_t *reg = NULL;
  2995. if(argc > 0)
  2996. {
  2997. reg_no = strtoul(args[0], NULL, 0);
  2998. /*translate from xscale cp15 register no to openocd register*/
  2999. switch(reg_no)
  3000. {
  3001. case 0:
  3002. reg_no = XSCALE_MAINID;
  3003. break;
  3004. case 1:
  3005. reg_no = XSCALE_CTRL;
  3006. break;
  3007. case 2:
  3008. reg_no = XSCALE_TTB;
  3009. break;
  3010. case 3:
  3011. reg_no = XSCALE_DAC;
  3012. break;
  3013. case 5:
  3014. reg_no = XSCALE_FSR;
  3015. break;
  3016. case 6:
  3017. reg_no = XSCALE_FAR;
  3018. break;
  3019. case 13:
  3020. reg_no = XSCALE_PID;
  3021. break;
  3022. case 15:
  3023. reg_no = XSCALE_CPACCESS;
  3024. break;
  3025. default:
  3026. command_print(cmd_ctx, "invalid register number");
  3027. return ERROR_INVALID_ARGUMENTS;
  3028. }
  3029. reg = &xscale->reg_cache->reg_list[reg_no];
  3030. }
  3031. if(argc == 1)
  3032. {
  3033. u32 value;
  3034. /* read cp15 control register */
  3035. xscale_get_reg(reg);
  3036. value = buf_get_u32(reg->value, 0, 32);
  3037. command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
  3038. }
  3039. else if(argc == 2)
  3040. {
  3041. u32 value = strtoul(args[1], NULL, 0);
  3042. /* send CP write request (command 0x41) */
  3043. xscale_send_u32(target, 0x41);
  3044. /* send CP register number */
  3045. xscale_send_u32(target, reg_no);
  3046. /* send CP register value */
  3047. xscale_send_u32(target, value);
  3048. /* execute cpwait to ensure outstanding operations complete */
  3049. xscale_send_u32(target, 0x53);
  3050. }
  3051. else
  3052. {
  3053. command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
  3054. }
  3055. return ERROR_OK;
  3056. }
  3057. int xscale_register_commands(struct command_context_s *cmd_ctx)
  3058. {
  3059. command_t *xscale_cmd;
  3060. xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
  3061. register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
  3062. register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
  3063. register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
  3064. register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
  3065. register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
  3066. register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
  3067. register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_vector_catch_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
  3068. register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
  3069. register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
  3070. register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
  3071. register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
  3072. COMMAND_EXEC, "load image from <file> [base address]");
  3073. register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
  3074. armv4_5_register_commands(cmd_ctx);
  3075. return ERROR_OK;
  3076. }