You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3002 lines
87 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007,2008 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * Copyright (C) 2008 by Hongtao Zheng *
  12. * hontor@126.com *
  13. * *
  14. * This program is free software; you can redistribute it and/or modify *
  15. * it under the terms of the GNU General Public License as published by *
  16. * the Free Software Foundation; either version 2 of the License, or *
  17. * (at your option) any later version. *
  18. * *
  19. * This program is distributed in the hope that it will be useful, *
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  22. * GNU General Public License for more details. *
  23. * *
  24. * You should have received a copy of the GNU General Public License *
  25. * along with this program; if not, write to the *
  26. * Free Software Foundation, Inc., *
  27. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  28. ***************************************************************************/
  29. #ifdef HAVE_CONFIG_H
  30. #include "config.h"
  31. #endif
  32. #include "embeddedice.h"
  33. #include "target_request.h"
  34. #include "arm7_9_common.h"
  35. #include "time_support.h"
  36. #include "arm_simulator.h"
  37. int arm7_9_debug_entry(target_t *target);
  38. int arm7_9_enable_sw_bkpts(struct target_s *target);
  39. /* command handler forward declarations */
  40. int handle_arm7_9_write_xpsr_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  41. int handle_arm7_9_write_xpsr_im8_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  42. int handle_arm7_9_read_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  43. int handle_arm7_9_write_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  44. int handle_arm7_9_dbgrq_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  45. int handle_arm7_9_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  46. int handle_arm7_9_dcc_downloads_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  47. int handle_arm7_9_etm_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  48. /**
  49. * Clear watchpoints for an ARM7/9 target.
  50. *
  51. * @param arm7_9 Pointer to the common struct for an ARM7/9 target
  52. * @return JTAG error status after executing queue
  53. */
  54. static int arm7_9_clear_watchpoints(arm7_9_common_t *arm7_9)
  55. {
  56. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  57. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  58. arm7_9->sw_breakpoints_added = 0;
  59. arm7_9->wp0_used = 0;
  60. arm7_9->wp1_used = arm7_9->wp1_used_default;
  61. arm7_9->wp_available = arm7_9->wp_available_max;
  62. return jtag_execute_queue();
  63. }
  64. /**
  65. * Assign a watchpoint to one of the two available hardware comparators in an
  66. * ARM7 or ARM9 target.
  67. *
  68. * @param arm7_9 Pointer to the common struct for an ARM7/9 target
  69. * @param breakpoint Pointer to the breakpoint to be used as a watchpoint
  70. */
  71. static void arm7_9_assign_wp(arm7_9_common_t *arm7_9, breakpoint_t *breakpoint)
  72. {
  73. if (!arm7_9->wp0_used)
  74. {
  75. arm7_9->wp0_used = 1;
  76. breakpoint->set = 1;
  77. arm7_9->wp_available--;
  78. }
  79. else if (!arm7_9->wp1_used)
  80. {
  81. arm7_9->wp1_used = 1;
  82. breakpoint->set = 2;
  83. arm7_9->wp_available--;
  84. }
  85. else
  86. {
  87. LOG_ERROR("BUG: no hardware comparator available");
  88. }
  89. }
  90. /**
  91. * Setup an ARM7/9 target's embedded ICE registers for software breakpoints.
  92. *
  93. * @param arm7_9 Pointer to common struct for ARM7/9 targets
  94. * @return Error codes if there is a problem finding a watchpoint or the result
  95. * of executing the JTAG queue
  96. */
  97. static int arm7_9_set_software_breakpoints(arm7_9_common_t *arm7_9)
  98. {
  99. if (arm7_9->sw_breakpoints_added)
  100. {
  101. return ERROR_OK;
  102. }
  103. if (arm7_9->wp_available < 1)
  104. {
  105. LOG_WARNING("can't enable sw breakpoints with no watchpoint unit available");
  106. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  107. }
  108. arm7_9->wp_available--;
  109. /* pick a breakpoint unit */
  110. if (!arm7_9->wp0_used)
  111. {
  112. arm7_9->sw_breakpoints_added=1;
  113. arm7_9->wp0_used = 3;
  114. } else if (!arm7_9->wp1_used)
  115. {
  116. arm7_9->sw_breakpoints_added=2;
  117. arm7_9->wp1_used = 3;
  118. }
  119. else
  120. {
  121. LOG_ERROR("BUG: both watchpoints used, but wp_available >= 1");
  122. return ERROR_FAIL;
  123. }
  124. if (arm7_9->sw_breakpoints_added==1)
  125. {
  126. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_VALUE], arm7_9->arm_bkpt);
  127. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0x0);
  128. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffffu);
  129. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  130. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  131. }
  132. else if (arm7_9->sw_breakpoints_added==2)
  133. {
  134. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_VALUE], arm7_9->arm_bkpt);
  135. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0x0);
  136. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], 0xffffffffu);
  137. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  138. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  139. }
  140. else
  141. {
  142. LOG_ERROR("BUG: both watchpoints used, but wp_available >= 1");
  143. return ERROR_FAIL;
  144. }
  145. return jtag_execute_queue();
  146. }
  147. /**
  148. * Setup the common pieces for an ARM7/9 target after reset or on startup.
  149. *
  150. * @param target Pointer to an ARM7/9 target to setup
  151. * @return Result of clearing the watchpoints on the target
  152. */
  153. int arm7_9_setup(target_t *target)
  154. {
  155. armv4_5_common_t *armv4_5 = target->arch_info;
  156. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  157. return arm7_9_clear_watchpoints(arm7_9);
  158. }
  159. /**
  160. * Retrieves the architecture information pointers for ARMv4/5 and ARM7/9
  161. * targets. A return of ERROR_OK signifies that the target is a valid target
  162. * and that the pointers have been set properly.
  163. *
  164. * @param target Pointer to the target device to get the pointers from
  165. * @param armv4_5_p Pointer to be filled in with the common struct for ARMV4/5
  166. * targets
  167. * @param arm7_9_p Pointer to be filled in with the common struct for ARM7/9
  168. * targets
  169. * @return ERROR_OK if successful
  170. */
  171. int arm7_9_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, arm7_9_common_t **arm7_9_p)
  172. {
  173. armv4_5_common_t *armv4_5 = target->arch_info;
  174. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  175. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  176. {
  177. return -1;
  178. }
  179. if (arm7_9->common_magic != ARM7_9_COMMON_MAGIC)
  180. {
  181. return -1;
  182. }
  183. *armv4_5_p = armv4_5;
  184. *arm7_9_p = arm7_9;
  185. return ERROR_OK;
  186. }
  187. /**
  188. * Set either a hardware or software breakpoint on an ARM7/9 target. The
  189. * breakpoint is set up even if it is already set. Some actions, e.g. reset,
  190. * might have erased the values in Embedded ICE.
  191. *
  192. * @param target Pointer to the target device to set the breakpoints on
  193. * @param breakpoint Pointer to the breakpoint to be set
  194. * @return For hardware breakpoints, this is the result of executing the JTAG
  195. * queue. For software breakpoints, this will be the status of the
  196. * required memory reads and writes
  197. */
  198. int arm7_9_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  199. {
  200. armv4_5_common_t *armv4_5 = target->arch_info;
  201. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  202. int retval=ERROR_OK;
  203. if (target->state != TARGET_HALTED)
  204. {
  205. LOG_WARNING("target not halted");
  206. return ERROR_TARGET_NOT_HALTED;
  207. }
  208. if (breakpoint->type == BKPT_HARD)
  209. {
  210. /* either an ARM (4 byte) or Thumb (2 byte) breakpoint */
  211. u32 mask = (breakpoint->length == 4) ? 0x3u : 0x1u;
  212. /* reassign a hw breakpoint */
  213. if (breakpoint->set==0)
  214. {
  215. arm7_9_assign_wp(arm7_9, breakpoint);
  216. }
  217. if (breakpoint->set==1)
  218. {
  219. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE], breakpoint->address);
  220. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], mask);
  221. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffffu);
  222. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  223. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  224. }
  225. else if (breakpoint->set==2)
  226. {
  227. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], breakpoint->address);
  228. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], mask);
  229. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0xffffffffu);
  230. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  231. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  232. }
  233. else
  234. {
  235. LOG_ERROR("BUG: no hardware comparator available");
  236. return ERROR_OK;
  237. }
  238. retval=jtag_execute_queue();
  239. }
  240. else if (breakpoint->type == BKPT_SOFT)
  241. {
  242. if ((retval=arm7_9_set_software_breakpoints(arm7_9))!=ERROR_OK)
  243. return retval;
  244. /* did we already set this breakpoint? */
  245. if (breakpoint->set)
  246. return ERROR_OK;
  247. if (breakpoint->length == 4)
  248. {
  249. u32 verify = 0xffffffff;
  250. /* keep the original instruction in target endianness */
  251. if ((retval = target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  252. {
  253. return retval;
  254. }
  255. /* write the breakpoint instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  256. if ((retval = target_write_u32(target, breakpoint->address, arm7_9->arm_bkpt)) != ERROR_OK)
  257. {
  258. return retval;
  259. }
  260. if ((retval = target_read_u32(target, breakpoint->address, &verify)) != ERROR_OK)
  261. {
  262. return retval;
  263. }
  264. if (verify != arm7_9->arm_bkpt)
  265. {
  266. LOG_ERROR("Unable to set 32 bit software breakpoint at address %08x - check that memory is read/writable", breakpoint->address);
  267. return ERROR_OK;
  268. }
  269. }
  270. else
  271. {
  272. u16 verify = 0xffff;
  273. /* keep the original instruction in target endianness */
  274. if ((retval = target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  275. {
  276. return retval;
  277. }
  278. /* write the breakpoint instruction in target endianness (arm7_9->thumb_bkpt is host endian) */
  279. if ((retval = target_write_u16(target, breakpoint->address, arm7_9->thumb_bkpt)) != ERROR_OK)
  280. {
  281. return retval;
  282. }
  283. if ((retval = target_read_u16(target, breakpoint->address, &verify)) != ERROR_OK)
  284. {
  285. return retval;
  286. }
  287. if (verify != arm7_9->thumb_bkpt)
  288. {
  289. LOG_ERROR("Unable to set thumb software breakpoint at address %08x - check that memory is read/writable", breakpoint->address);
  290. return ERROR_OK;
  291. }
  292. }
  293. breakpoint->set = 1;
  294. }
  295. return retval;
  296. }
  297. /**
  298. * Unsets an existing breakpoint on an ARM7/9 target. If it is a hardware
  299. * breakpoint, the watchpoint used will be freed and the Embedded ICE registers
  300. * will be updated. Otherwise, the software breakpoint will be restored to its
  301. * original instruction if it hasn't already been modified.
  302. *
  303. * @param target Pointer to ARM7/9 target to unset the breakpoint from
  304. * @param breakpoint Pointer to breakpoint to be unset
  305. * @return For hardware breakpoints, this is the result of executing the JTAG
  306. * queue. For software breakpoints, this will be the status of the
  307. * required memory reads and writes
  308. */
  309. int arm7_9_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  310. {
  311. int retval = ERROR_OK;
  312. armv4_5_common_t *armv4_5 = target->arch_info;
  313. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  314. if (!breakpoint->set)
  315. {
  316. LOG_WARNING("breakpoint not set");
  317. return ERROR_OK;
  318. }
  319. if (breakpoint->type == BKPT_HARD)
  320. {
  321. if (breakpoint->set == 1)
  322. {
  323. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  324. arm7_9->wp0_used = 0;
  325. arm7_9->wp_available++;
  326. }
  327. else if (breakpoint->set == 2)
  328. {
  329. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  330. arm7_9->wp1_used = 0;
  331. arm7_9->wp_available++;
  332. }
  333. retval = jtag_execute_queue();
  334. breakpoint->set = 0;
  335. }
  336. else
  337. {
  338. /* restore original instruction (kept in target endianness) */
  339. if (breakpoint->length == 4)
  340. {
  341. u32 current_instr;
  342. /* check that user program as not modified breakpoint instruction */
  343. if ((retval = target->type->read_memory(target, breakpoint->address, 4, 1, (u8*)&current_instr)) != ERROR_OK)
  344. {
  345. return retval;
  346. }
  347. if (current_instr==arm7_9->arm_bkpt)
  348. if ((retval = target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  349. {
  350. return retval;
  351. }
  352. }
  353. else
  354. {
  355. u16 current_instr;
  356. /* check that user program as not modified breakpoint instruction */
  357. if ((retval = target->type->read_memory(target, breakpoint->address, 2, 1, (u8*)&current_instr)) != ERROR_OK)
  358. {
  359. return retval;
  360. }
  361. if (current_instr==arm7_9->thumb_bkpt)
  362. if ((retval = target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  363. {
  364. return retval;
  365. }
  366. }
  367. breakpoint->set = 0;
  368. }
  369. return retval;
  370. }
  371. /**
  372. * Add a breakpoint to an ARM7/9 target. This makes sure that there are no
  373. * dangling breakpoints and that the desired breakpoint can be added.
  374. *
  375. * @param target Pointer to the target ARM7/9 device to add a breakpoint to
  376. * @param breakpoint Pointer to the breakpoint to be added
  377. * @return An error status if there is a problem adding the breakpoint or the
  378. * result of setting the breakpoint
  379. */
  380. int arm7_9_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  381. {
  382. armv4_5_common_t *armv4_5 = target->arch_info;
  383. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  384. if (target->state != TARGET_HALTED)
  385. {
  386. LOG_WARNING("target not halted");
  387. return ERROR_TARGET_NOT_HALTED;
  388. }
  389. if (arm7_9->breakpoint_count==0)
  390. {
  391. /* make sure we don't have any dangling breakpoints. This is vital upon
  392. * GDB connect/disconnect
  393. */
  394. arm7_9_clear_watchpoints(arm7_9);
  395. }
  396. if ((breakpoint->type == BKPT_HARD) && (arm7_9->wp_available < 1))
  397. {
  398. LOG_INFO("no watchpoint unit available for hardware breakpoint");
  399. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  400. }
  401. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  402. {
  403. LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  404. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  405. }
  406. if (breakpoint->type == BKPT_HARD)
  407. {
  408. arm7_9_assign_wp(arm7_9, breakpoint);
  409. }
  410. arm7_9->breakpoint_count++;
  411. return arm7_9_set_breakpoint(target, breakpoint);
  412. }
  413. /**
  414. * Removes a breakpoint from an ARM7/9 target. This will make sure there are no
  415. * dangling breakpoints and updates available watchpoints if it is a hardware
  416. * breakpoint.
  417. *
  418. * @param target Pointer to the target to have a breakpoint removed
  419. * @param breakpoint Pointer to the breakpoint to be removed
  420. * @return Error status if there was a problem unsetting the breakpoint or the
  421. * watchpoints could not be cleared
  422. */
  423. int arm7_9_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  424. {
  425. int retval = ERROR_OK;
  426. armv4_5_common_t *armv4_5 = target->arch_info;
  427. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  428. if((retval = arm7_9_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  429. {
  430. return retval;
  431. }
  432. if (breakpoint->type == BKPT_HARD)
  433. arm7_9->wp_available++;
  434. arm7_9->breakpoint_count--;
  435. if (arm7_9->breakpoint_count==0)
  436. {
  437. /* make sure we don't have any dangling breakpoints */
  438. if((retval = arm7_9_clear_watchpoints(arm7_9)) != ERROR_OK)
  439. {
  440. return retval;
  441. }
  442. }
  443. return ERROR_OK;
  444. }
  445. /**
  446. * Sets a watchpoint for an ARM7/9 target in one of the watchpoint units. It is
  447. * considered a bug to call this function when there are no available watchpoint
  448. * units.
  449. *
  450. * @param target Pointer to an ARM7/9 target to set a watchpoint on
  451. * @param watchpoint Pointer to the watchpoint to be set
  452. * @return Error status if watchpoint set fails or the result of executing the
  453. * JTAG queue
  454. */
  455. int arm7_9_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  456. {
  457. int retval = ERROR_OK;
  458. armv4_5_common_t *armv4_5 = target->arch_info;
  459. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  460. int rw_mask = 1;
  461. u32 mask;
  462. mask = watchpoint->length - 1;
  463. if (target->state != TARGET_HALTED)
  464. {
  465. LOG_WARNING("target not halted");
  466. return ERROR_TARGET_NOT_HALTED;
  467. }
  468. if (watchpoint->rw == WPT_ACCESS)
  469. rw_mask = 0;
  470. else
  471. rw_mask = 1;
  472. if (!arm7_9->wp0_used)
  473. {
  474. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE], watchpoint->address);
  475. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], mask);
  476. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], watchpoint->mask);
  477. if( watchpoint->mask != 0xffffffffu )
  478. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_VALUE], watchpoint->value);
  479. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], 0xff & ~EICE_W_CTRL_nOPC & ~rw_mask);
  480. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE | EICE_W_CTRL_nOPC | (watchpoint->rw & 1));
  481. if((retval = jtag_execute_queue()) != ERROR_OK)
  482. {
  483. return retval;
  484. }
  485. watchpoint->set = 1;
  486. arm7_9->wp0_used = 2;
  487. }
  488. else if (!arm7_9->wp1_used)
  489. {
  490. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], watchpoint->address);
  491. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], mask);
  492. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], watchpoint->mask);
  493. if( watchpoint->mask != 0xffffffffu )
  494. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_VALUE], watchpoint->value);
  495. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], 0xff & ~EICE_W_CTRL_nOPC & ~rw_mask);
  496. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE | EICE_W_CTRL_nOPC | (watchpoint->rw & 1));
  497. if((retval = jtag_execute_queue()) != ERROR_OK)
  498. {
  499. return retval;
  500. }
  501. watchpoint->set = 2;
  502. arm7_9->wp1_used = 2;
  503. }
  504. else
  505. {
  506. LOG_ERROR("BUG: no hardware comparator available");
  507. return ERROR_OK;
  508. }
  509. return ERROR_OK;
  510. }
  511. /**
  512. * Unset an existing watchpoint and clear the used watchpoint unit.
  513. *
  514. * @param target Pointer to the target to have the watchpoint removed
  515. * @param watchpoint Pointer to the watchpoint to be removed
  516. * @return Error status while trying to unset the watchpoint or the result of
  517. * executing the JTAG queue
  518. */
  519. int arm7_9_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  520. {
  521. int retval = ERROR_OK;
  522. armv4_5_common_t *armv4_5 = target->arch_info;
  523. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  524. if (target->state != TARGET_HALTED)
  525. {
  526. LOG_WARNING("target not halted");
  527. return ERROR_TARGET_NOT_HALTED;
  528. }
  529. if (!watchpoint->set)
  530. {
  531. LOG_WARNING("breakpoint not set");
  532. return ERROR_OK;
  533. }
  534. if (watchpoint->set == 1)
  535. {
  536. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  537. if((retval = jtag_execute_queue()) != ERROR_OK)
  538. {
  539. return retval;
  540. }
  541. arm7_9->wp0_used = 0;
  542. }
  543. else if (watchpoint->set == 2)
  544. {
  545. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  546. if((retval = jtag_execute_queue()) != ERROR_OK)
  547. {
  548. return retval;
  549. }
  550. arm7_9->wp1_used = 0;
  551. }
  552. watchpoint->set = 0;
  553. return ERROR_OK;
  554. }
  555. /**
  556. * Add a watchpoint to an ARM7/9 target. If there are no watchpoint units
  557. * available, an error response is returned.
  558. *
  559. * @param target Pointer to the ARM7/9 target to add a watchpoint to
  560. * @param watchpoint Pointer to the watchpoint to be added
  561. * @return Error status while trying to add the watchpoint
  562. */
  563. int arm7_9_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  564. {
  565. armv4_5_common_t *armv4_5 = target->arch_info;
  566. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  567. if (target->state != TARGET_HALTED)
  568. {
  569. LOG_WARNING("target not halted");
  570. return ERROR_TARGET_NOT_HALTED;
  571. }
  572. if (arm7_9->wp_available < 1)
  573. {
  574. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  575. }
  576. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  577. {
  578. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  579. }
  580. arm7_9->wp_available--;
  581. return ERROR_OK;
  582. }
  583. /**
  584. * Remove a watchpoint from an ARM7/9 target. The watchpoint will be unset and
  585. * the used watchpoint unit will be reopened.
  586. *
  587. * @param target Pointer to the target to remove a watchpoint from
  588. * @param watchpoint Pointer to the watchpoint to be removed
  589. * @return Result of trying to unset the watchpoint
  590. */
  591. int arm7_9_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  592. {
  593. int retval = ERROR_OK;
  594. armv4_5_common_t *armv4_5 = target->arch_info;
  595. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  596. if (watchpoint->set)
  597. {
  598. if((retval = arm7_9_unset_watchpoint(target, watchpoint)) != ERROR_OK)
  599. {
  600. return retval;
  601. }
  602. }
  603. arm7_9->wp_available++;
  604. return ERROR_OK;
  605. }
  606. /**
  607. * Restarts the target by sending a RESTART instruction and moving the JTAG
  608. * state to IDLE. This includes a timeout waiting for DBGACK and SYSCOMP to be
  609. * asserted by the processor.
  610. *
  611. * @param target Pointer to target to issue commands to
  612. * @return Error status if there is a timeout or a problem while executing the
  613. * JTAG queue
  614. */
  615. int arm7_9_execute_sys_speed(struct target_s *target)
  616. {
  617. int retval;
  618. armv4_5_common_t *armv4_5 = target->arch_info;
  619. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  620. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  621. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  622. /* set RESTART instruction */
  623. jtag_add_end_state(TAP_IDLE);
  624. if (arm7_9->need_bypass_before_restart) {
  625. arm7_9->need_bypass_before_restart = 0;
  626. arm_jtag_set_instr(jtag_info, 0xf, NULL);
  627. }
  628. arm_jtag_set_instr(jtag_info, 0x4, NULL);
  629. long long then=timeval_ms();
  630. int timeout;
  631. while (!(timeout=((timeval_ms()-then)>1000)))
  632. {
  633. /* read debug status register */
  634. embeddedice_read_reg(dbg_stat);
  635. if ((retval = jtag_execute_queue()) != ERROR_OK)
  636. return retval;
  637. if ((buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1))
  638. && (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_SYSCOMP, 1)))
  639. break;
  640. if (debug_level>=3)
  641. {
  642. alive_sleep(100);
  643. } else
  644. {
  645. keep_alive();
  646. }
  647. }
  648. if (timeout)
  649. {
  650. LOG_ERROR("timeout waiting for SYSCOMP & DBGACK, last DBG_STATUS: %x", buf_get_u32(dbg_stat->value, 0, dbg_stat->size));
  651. return ERROR_TARGET_TIMEOUT;
  652. }
  653. return ERROR_OK;
  654. }
  655. /**
  656. * Restarts the target by sending a RESTART instruction and moving the JTAG
  657. * state to IDLE. This validates that DBGACK and SYSCOMP are set without
  658. * waiting until they are.
  659. *
  660. * @param target Pointer to the target to issue commands to
  661. * @return Always ERROR_OK
  662. */
  663. int arm7_9_execute_fast_sys_speed(struct target_s *target)
  664. {
  665. static int set=0;
  666. static u8 check_value[4], check_mask[4];
  667. armv4_5_common_t *armv4_5 = target->arch_info;
  668. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  669. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  670. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  671. /* set RESTART instruction */
  672. jtag_add_end_state(TAP_IDLE);
  673. if (arm7_9->need_bypass_before_restart) {
  674. arm7_9->need_bypass_before_restart = 0;
  675. arm_jtag_set_instr(jtag_info, 0xf, NULL);
  676. }
  677. arm_jtag_set_instr(jtag_info, 0x4, NULL);
  678. if (!set)
  679. {
  680. /* check for DBGACK and SYSCOMP set (others don't care) */
  681. /* NB! These are constants that must be available until after next jtag_execute() and
  682. * we evaluate the values upon first execution in lieu of setting up these constants
  683. * during early setup.
  684. * */
  685. buf_set_u32(check_value, 0, 32, 0x9);
  686. buf_set_u32(check_mask, 0, 32, 0x9);
  687. set=1;
  688. }
  689. /* read debug status register */
  690. embeddedice_read_reg_w_check(dbg_stat, check_value, check_mask);
  691. return ERROR_OK;
  692. }
  693. int arm7_9_target_request_data(target_t *target, u32 size, u8 *buffer)
  694. {
  695. armv4_5_common_t *armv4_5 = target->arch_info;
  696. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  697. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  698. u32 *data;
  699. int retval = ERROR_OK;
  700. u32 i;
  701. data = malloc(size * (sizeof(u32)));
  702. retval = embeddedice_receive(jtag_info, data, size);
  703. for (i = 0; i < size; i++)
  704. {
  705. h_u32_to_le(buffer + (i * 4), data[i]);
  706. }
  707. free(data);
  708. return retval;
  709. }
  710. int arm7_9_handle_target_request(void *priv)
  711. {
  712. int retval = ERROR_OK;
  713. target_t *target = priv;
  714. if (!target->type->examined)
  715. return ERROR_OK;
  716. armv4_5_common_t *armv4_5 = target->arch_info;
  717. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  718. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  719. reg_t *dcc_control = &arm7_9->eice_cache->reg_list[EICE_COMMS_CTRL];
  720. if (!target->dbg_msg_enabled)
  721. return ERROR_OK;
  722. if (target->state == TARGET_RUNNING)
  723. {
  724. /* read DCC control register */
  725. embeddedice_read_reg(dcc_control);
  726. if ((retval = jtag_execute_queue()) != ERROR_OK)
  727. {
  728. return retval;
  729. }
  730. /* check W bit */
  731. if (buf_get_u32(dcc_control->value, 1, 1) == 1)
  732. {
  733. u32 request;
  734. if ((retval = embeddedice_receive(jtag_info, &request, 1)) != ERROR_OK)
  735. {
  736. return retval;
  737. }
  738. if ((retval = target_request(target, request)) != ERROR_OK)
  739. {
  740. return retval;
  741. }
  742. }
  743. }
  744. return ERROR_OK;
  745. }
  746. int arm7_9_poll(target_t *target)
  747. {
  748. int retval;
  749. armv4_5_common_t *armv4_5 = target->arch_info;
  750. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  751. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  752. /* read debug status register */
  753. embeddedice_read_reg(dbg_stat);
  754. if ((retval = jtag_execute_queue()) != ERROR_OK)
  755. {
  756. return retval;
  757. }
  758. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1))
  759. {
  760. /* LOG_DEBUG("DBGACK set, dbg_state->value: 0x%x", buf_get_u32(dbg_stat->value, 0, 32));*/
  761. if (target->state == TARGET_UNKNOWN)
  762. {
  763. target->state = TARGET_RUNNING;
  764. LOG_WARNING("DBGACK set while target was in unknown state. Reset or initialize target.");
  765. }
  766. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_RESET))
  767. {
  768. int check_pc=0;
  769. if (target->state == TARGET_RESET)
  770. {
  771. if (target->reset_halt)
  772. {
  773. if ((jtag_reset_config & RESET_SRST_PULLS_TRST)==0)
  774. {
  775. check_pc = 1;
  776. }
  777. }
  778. }
  779. target->state = TARGET_HALTED;
  780. if ((retval = arm7_9_debug_entry(target)) != ERROR_OK)
  781. return retval;
  782. if (check_pc)
  783. {
  784. reg_t *reg = register_get_by_name(target->reg_cache, "pc", 1);
  785. u32 t=*((u32 *)reg->value);
  786. if (t!=0)
  787. {
  788. LOG_ERROR("PC was not 0. Does this target need srst_pulls_trst?");
  789. }
  790. }
  791. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_HALTED)) != ERROR_OK)
  792. {
  793. return retval;
  794. }
  795. }
  796. if (target->state == TARGET_DEBUG_RUNNING)
  797. {
  798. target->state = TARGET_HALTED;
  799. if ((retval = arm7_9_debug_entry(target)) != ERROR_OK)
  800. return retval;
  801. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED)) != ERROR_OK)
  802. {
  803. return retval;
  804. }
  805. }
  806. if (target->state != TARGET_HALTED)
  807. {
  808. LOG_WARNING("DBGACK set, but the target did not end up in the halted stated %d", target->state);
  809. }
  810. }
  811. else
  812. {
  813. if (target->state != TARGET_DEBUG_RUNNING)
  814. target->state = TARGET_RUNNING;
  815. }
  816. return ERROR_OK;
  817. }
  818. /*
  819. Some -S targets (ARM966E-S in the STR912 isn't affected, ARM926EJ-S
  820. in the LPC3180 and AT91SAM9260 is affected) completely stop the JTAG clock
  821. while the core is held in reset(SRST). It isn't possible to program the halt
  822. condition once reset was asserted, hence a hook that allows the target to set
  823. up its reset-halt condition prior to asserting reset.
  824. */
  825. int arm7_9_assert_reset(target_t *target)
  826. {
  827. armv4_5_common_t *armv4_5 = target->arch_info;
  828. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  829. LOG_DEBUG("target->state: %s",
  830. Jim_Nvp_value2name_simple( nvp_target_state,target->state)->name);
  831. if (!(jtag_reset_config & RESET_HAS_SRST))
  832. {
  833. LOG_ERROR("Can't assert SRST");
  834. return ERROR_FAIL;
  835. }
  836. if (target->reset_halt)
  837. {
  838. /*
  839. * Some targets do not support communication while SRST is asserted. We need to
  840. * set up the reset vector catch here.
  841. *
  842. * If TRST is asserted, then these settings will be reset anyway, so setting them
  843. * here is harmless.
  844. */
  845. if (arm7_9->has_vector_catch)
  846. {
  847. /* program vector catch register to catch reset vector */
  848. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_VEC_CATCH], 0x1);
  849. }
  850. else
  851. {
  852. /* program watchpoint unit to match on reset vector address */
  853. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE], 0x0);
  854. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0x3);
  855. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  856. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  857. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  858. }
  859. }
  860. /* here we should issue a srst only, but we may have to assert trst as well */
  861. if (jtag_reset_config & RESET_SRST_PULLS_TRST)
  862. {
  863. jtag_add_reset(1, 1);
  864. } else
  865. {
  866. jtag_add_reset(0, 1);
  867. }
  868. target->state = TARGET_RESET;
  869. jtag_add_sleep(50000);
  870. armv4_5_invalidate_core_regs(target);
  871. if ((target->reset_halt)&&((jtag_reset_config & RESET_SRST_PULLS_TRST)==0))
  872. {
  873. /* debug entry was already prepared in arm7_9_assert_reset() */
  874. target->debug_reason = DBG_REASON_DBGRQ;
  875. }
  876. return ERROR_OK;
  877. }
  878. int arm7_9_deassert_reset(target_t *target)
  879. {
  880. int retval=ERROR_OK;
  881. LOG_DEBUG("target->state: %s",
  882. Jim_Nvp_value2name_simple( nvp_target_state,target->state)->name);
  883. /* deassert reset lines */
  884. jtag_add_reset(0, 0);
  885. if (target->reset_halt&&(jtag_reset_config & RESET_SRST_PULLS_TRST)!=0)
  886. {
  887. LOG_WARNING("srst pulls trst - can not reset into halted mode. Issuing halt after reset.");
  888. /* set up embedded ice registers again */
  889. if ((retval=target->type->examine(target))!=ERROR_OK)
  890. return retval;
  891. if ((retval=target_poll(target))!=ERROR_OK)
  892. {
  893. return retval;
  894. }
  895. if ((retval=target_halt(target))!=ERROR_OK)
  896. {
  897. return retval;
  898. }
  899. }
  900. return retval;
  901. }
  902. int arm7_9_clear_halt(target_t *target)
  903. {
  904. armv4_5_common_t *armv4_5 = target->arch_info;
  905. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  906. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  907. /* we used DBGRQ only if we didn't come out of reset */
  908. if (!arm7_9->debug_entry_from_reset && arm7_9->use_dbgrq)
  909. {
  910. /* program EmbeddedICE Debug Control Register to deassert DBGRQ
  911. */
  912. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  913. embeddedice_store_reg(dbg_ctrl);
  914. }
  915. else
  916. {
  917. if (arm7_9->debug_entry_from_reset && arm7_9->has_vector_catch)
  918. {
  919. /* if we came out of reset, and vector catch is supported, we used
  920. * vector catch to enter debug state
  921. * restore the register in that case
  922. */
  923. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_VEC_CATCH]);
  924. }
  925. else
  926. {
  927. /* restore registers if watchpoint unit 0 was in use
  928. */
  929. if (arm7_9->wp0_used)
  930. {
  931. if (arm7_9->debug_entry_from_reset)
  932. {
  933. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE]);
  934. }
  935. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK]);
  936. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK]);
  937. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK]);
  938. }
  939. /* control value always has to be restored, as it was either disabled,
  940. * or enabled with possibly different bits
  941. */
  942. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE]);
  943. }
  944. }
  945. return ERROR_OK;
  946. }
  947. int arm7_9_soft_reset_halt(struct target_s *target)
  948. {
  949. armv4_5_common_t *armv4_5 = target->arch_info;
  950. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  951. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  952. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  953. int i;
  954. int retval;
  955. if ((retval=target_halt(target))!=ERROR_OK)
  956. return retval;
  957. long long then=timeval_ms();
  958. int timeout;
  959. while (!(timeout=((timeval_ms()-then)>1000)))
  960. {
  961. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1) != 0)
  962. break;
  963. embeddedice_read_reg(dbg_stat);
  964. if ((retval=jtag_execute_queue())!=ERROR_OK)
  965. return retval;
  966. if (debug_level>=3)
  967. {
  968. alive_sleep(100);
  969. } else
  970. {
  971. keep_alive();
  972. }
  973. }
  974. if (timeout)
  975. {
  976. LOG_ERROR("Failed to halt CPU after 1 sec");
  977. return ERROR_TARGET_TIMEOUT;
  978. }
  979. target->state = TARGET_HALTED;
  980. /* program EmbeddedICE Debug Control Register to assert DBGACK and INTDIS
  981. * ensure that DBGRQ is cleared
  982. */
  983. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  984. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  985. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 1);
  986. embeddedice_store_reg(dbg_ctrl);
  987. if ((retval = arm7_9_clear_halt(target)) != ERROR_OK)
  988. {
  989. return retval;
  990. }
  991. /* if the target is in Thumb state, change to ARM state */
  992. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_ITBIT, 1))
  993. {
  994. u32 r0_thumb, pc_thumb;
  995. LOG_DEBUG("target entered debug from Thumb state, changing to ARM");
  996. /* Entered debug from Thumb mode */
  997. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  998. arm7_9->change_to_arm(target, &r0_thumb, &pc_thumb);
  999. }
  1000. /* all register content is now invalid */
  1001. if ((retval = armv4_5_invalidate_core_regs(target)) != ERROR_OK)
  1002. {
  1003. return retval;
  1004. }
  1005. /* SVC, ARM state, IRQ and FIQ disabled */
  1006. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8, 0xd3);
  1007. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  1008. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  1009. /* start fetching from 0x0 */
  1010. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, 0x0);
  1011. armv4_5->core_cache->reg_list[15].dirty = 1;
  1012. armv4_5->core_cache->reg_list[15].valid = 1;
  1013. armv4_5->core_mode = ARMV4_5_MODE_SVC;
  1014. armv4_5->core_state = ARMV4_5_STATE_ARM;
  1015. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1016. return ERROR_FAIL;
  1017. /* reset registers */
  1018. for (i = 0; i <= 14; i++)
  1019. {
  1020. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, 0xffffffff);
  1021. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 1;
  1022. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  1023. }
  1024. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_HALTED)) != ERROR_OK)
  1025. {
  1026. return retval;
  1027. }
  1028. return ERROR_OK;
  1029. }
  1030. int arm7_9_halt(target_t *target)
  1031. {
  1032. if (target->state==TARGET_RESET)
  1033. {
  1034. LOG_ERROR("BUG: arm7/9 does not support halt during reset. This is handled in arm7_9_assert_reset()");
  1035. return ERROR_OK;
  1036. }
  1037. armv4_5_common_t *armv4_5 = target->arch_info;
  1038. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1039. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1040. LOG_DEBUG("target->state: %s",
  1041. Jim_Nvp_value2name_simple( nvp_target_state,target->state)->name);
  1042. if (target->state == TARGET_HALTED)
  1043. {
  1044. LOG_DEBUG("target was already halted");
  1045. return ERROR_OK;
  1046. }
  1047. if (target->state == TARGET_UNKNOWN)
  1048. {
  1049. LOG_WARNING("target was in unknown state when halt was requested");
  1050. }
  1051. if (arm7_9->use_dbgrq)
  1052. {
  1053. /* program EmbeddedICE Debug Control Register to assert DBGRQ
  1054. */
  1055. if (arm7_9->set_special_dbgrq) {
  1056. arm7_9->set_special_dbgrq(target);
  1057. } else {
  1058. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 1);
  1059. embeddedice_store_reg(dbg_ctrl);
  1060. }
  1061. }
  1062. else
  1063. {
  1064. /* program watchpoint unit to match on any address
  1065. */
  1066. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffff);
  1067. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  1068. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  1069. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  1070. }
  1071. target->debug_reason = DBG_REASON_DBGRQ;
  1072. return ERROR_OK;
  1073. }
  1074. int arm7_9_debug_entry(target_t *target)
  1075. {
  1076. int i;
  1077. u32 context[16];
  1078. u32* context_p[16];
  1079. u32 r0_thumb, pc_thumb;
  1080. u32 cpsr;
  1081. int retval;
  1082. /* get pointers to arch-specific information */
  1083. armv4_5_common_t *armv4_5 = target->arch_info;
  1084. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1085. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  1086. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1087. #ifdef _DEBUG_ARM7_9_
  1088. LOG_DEBUG("-");
  1089. #endif
  1090. if (arm7_9->pre_debug_entry)
  1091. arm7_9->pre_debug_entry(target);
  1092. /* program EmbeddedICE Debug Control Register to assert DBGACK and INTDIS
  1093. * ensure that DBGRQ is cleared
  1094. */
  1095. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  1096. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  1097. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 1);
  1098. embeddedice_store_reg(dbg_ctrl);
  1099. if ((retval = arm7_9_clear_halt(target)) != ERROR_OK)
  1100. {
  1101. return retval;
  1102. }
  1103. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1104. {
  1105. return retval;
  1106. }
  1107. if ((retval = arm7_9->examine_debug_reason(target)) != ERROR_OK)
  1108. return retval;
  1109. if (target->state != TARGET_HALTED)
  1110. {
  1111. LOG_WARNING("target not halted");
  1112. return ERROR_TARGET_NOT_HALTED;
  1113. }
  1114. /* if the target is in Thumb state, change to ARM state */
  1115. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_ITBIT, 1))
  1116. {
  1117. LOG_DEBUG("target entered debug from Thumb state");
  1118. /* Entered debug from Thumb mode */
  1119. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  1120. arm7_9->change_to_arm(target, &r0_thumb, &pc_thumb);
  1121. LOG_DEBUG("r0_thumb: 0x%8.8x, pc_thumb: 0x%8.8x", r0_thumb, pc_thumb);
  1122. }
  1123. else
  1124. {
  1125. LOG_DEBUG("target entered debug from ARM state");
  1126. /* Entered debug from ARM mode */
  1127. armv4_5->core_state = ARMV4_5_STATE_ARM;
  1128. }
  1129. for (i = 0; i < 16; i++)
  1130. context_p[i] = &context[i];
  1131. /* save core registers (r0 - r15 of current core mode) */
  1132. arm7_9->read_core_regs(target, 0xffff, context_p);
  1133. arm7_9->read_xpsr(target, &cpsr, 0);
  1134. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1135. return retval;
  1136. /* if the core has been executing in Thumb state, set the T bit */
  1137. if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1138. cpsr |= 0x20;
  1139. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, cpsr);
  1140. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 0;
  1141. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  1142. armv4_5->core_mode = cpsr & 0x1f;
  1143. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  1144. {
  1145. target->state = TARGET_UNKNOWN;
  1146. LOG_ERROR("cpsr contains invalid mode value - communication failure");
  1147. return ERROR_TARGET_FAILURE;
  1148. }
  1149. LOG_DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  1150. if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1151. {
  1152. LOG_DEBUG("thumb state, applying fixups");
  1153. context[0] = r0_thumb;
  1154. context[15] = pc_thumb;
  1155. } else if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1156. {
  1157. /* adjust value stored by STM */
  1158. context[15] -= 3 * 4;
  1159. }
  1160. if ((target->debug_reason == DBG_REASON_BREAKPOINT)
  1161. || (target->debug_reason == DBG_REASON_SINGLESTEP)
  1162. || (target->debug_reason == DBG_REASON_WATCHPOINT)
  1163. || (target->debug_reason == DBG_REASON_WPTANDBKPT)
  1164. || ((target->debug_reason == DBG_REASON_DBGRQ) && (arm7_9->use_dbgrq == 0)))
  1165. context[15] -= 3 * ((armv4_5->core_state == ARMV4_5_STATE_ARM) ? 4 : 2);
  1166. else if (target->debug_reason == DBG_REASON_DBGRQ)
  1167. context[15] -= arm7_9->dbgreq_adjust_pc * ((armv4_5->core_state == ARMV4_5_STATE_ARM) ? 4 : 2);
  1168. else
  1169. {
  1170. LOG_ERROR("unknown debug reason: %i", target->debug_reason);
  1171. }
  1172. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1173. return ERROR_FAIL;
  1174. for (i=0; i<=15; i++)
  1175. {
  1176. LOG_DEBUG("r%i: 0x%8.8x", i, context[i]);
  1177. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, context[i]);
  1178. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  1179. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  1180. }
  1181. LOG_DEBUG("entered debug state at PC 0x%x", context[15]);
  1182. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1183. return ERROR_FAIL;
  1184. /* exceptions other than USR & SYS have a saved program status register */
  1185. if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
  1186. {
  1187. u32 spsr;
  1188. arm7_9->read_xpsr(target, &spsr, 1);
  1189. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1190. {
  1191. return retval;
  1192. }
  1193. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, spsr);
  1194. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  1195. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  1196. }
  1197. /* r0 and r15 (pc) have to be restored later */
  1198. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0).dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0).valid;
  1199. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15).dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15).valid;
  1200. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1201. return retval;
  1202. if (arm7_9->post_debug_entry)
  1203. arm7_9->post_debug_entry(target);
  1204. return ERROR_OK;
  1205. }
  1206. int arm7_9_full_context(target_t *target)
  1207. {
  1208. int i;
  1209. int retval;
  1210. armv4_5_common_t *armv4_5 = target->arch_info;
  1211. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1212. LOG_DEBUG("-");
  1213. if (target->state != TARGET_HALTED)
  1214. {
  1215. LOG_WARNING("target not halted");
  1216. return ERROR_TARGET_NOT_HALTED;
  1217. }
  1218. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1219. return ERROR_FAIL;
  1220. /* iterate through processor modes (User, FIQ, IRQ, SVC, ABT, UND)
  1221. * SYS shares registers with User, so we don't touch SYS
  1222. */
  1223. for (i = 0; i < 6; i++)
  1224. {
  1225. u32 mask = 0;
  1226. u32* reg_p[16];
  1227. int j;
  1228. int valid = 1;
  1229. /* check if there are invalid registers in the current mode
  1230. */
  1231. for (j = 0; j <= 16; j++)
  1232. {
  1233. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1234. valid = 0;
  1235. }
  1236. if (!valid)
  1237. {
  1238. u32 tmp_cpsr;
  1239. /* change processor mode (and mask T bit) */
  1240. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1241. tmp_cpsr |= armv4_5_number_to_mode(i);
  1242. tmp_cpsr &= ~0x20;
  1243. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1244. for (j = 0; j < 15; j++)
  1245. {
  1246. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  1247. {
  1248. reg_p[j] = (u32*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value;
  1249. mask |= 1 << j;
  1250. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  1251. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  1252. }
  1253. }
  1254. /* if only the PSR is invalid, mask is all zeroes */
  1255. if (mask)
  1256. arm7_9->read_core_regs(target, mask, reg_p);
  1257. /* check if the PSR has to be read */
  1258. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid == 0)
  1259. {
  1260. arm7_9->read_xpsr(target, (u32*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).value, 1);
  1261. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  1262. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  1263. }
  1264. }
  1265. }
  1266. /* restore processor mode (mask T bit) */
  1267. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1268. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1269. {
  1270. return retval;
  1271. }
  1272. return ERROR_OK;
  1273. }
  1274. int arm7_9_restore_context(target_t *target)
  1275. {
  1276. armv4_5_common_t *armv4_5 = target->arch_info;
  1277. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1278. reg_t *reg;
  1279. armv4_5_core_reg_t *reg_arch_info;
  1280. enum armv4_5_mode current_mode = armv4_5->core_mode;
  1281. int i, j;
  1282. int dirty;
  1283. int mode_change;
  1284. LOG_DEBUG("-");
  1285. if (target->state != TARGET_HALTED)
  1286. {
  1287. LOG_WARNING("target not halted");
  1288. return ERROR_TARGET_NOT_HALTED;
  1289. }
  1290. if (arm7_9->pre_restore_context)
  1291. arm7_9->pre_restore_context(target);
  1292. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1293. return ERROR_FAIL;
  1294. /* iterate through processor modes (User, FIQ, IRQ, SVC, ABT, UND)
  1295. * SYS shares registers with User, so we don't touch SYS
  1296. */
  1297. for (i = 0; i < 6; i++)
  1298. {
  1299. LOG_DEBUG("examining %s mode", armv4_5_mode_strings[i]);
  1300. dirty = 0;
  1301. mode_change = 0;
  1302. /* check if there are dirty registers in the current mode
  1303. */
  1304. for (j = 0; j <= 16; j++)
  1305. {
  1306. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j);
  1307. reg_arch_info = reg->arch_info;
  1308. if (reg->dirty == 1)
  1309. {
  1310. if (reg->valid == 1)
  1311. {
  1312. dirty = 1;
  1313. LOG_DEBUG("examining dirty reg: %s", reg->name);
  1314. if ((reg_arch_info->mode != ARMV4_5_MODE_ANY)
  1315. && (reg_arch_info->mode != current_mode)
  1316. && !((reg_arch_info->mode == ARMV4_5_MODE_USR) && (armv4_5->core_mode == ARMV4_5_MODE_SYS))
  1317. && !((reg_arch_info->mode == ARMV4_5_MODE_SYS) && (armv4_5->core_mode == ARMV4_5_MODE_USR)))
  1318. {
  1319. mode_change = 1;
  1320. LOG_DEBUG("require mode change");
  1321. }
  1322. }
  1323. else
  1324. {
  1325. LOG_ERROR("BUG: dirty register '%s', but no valid data", reg->name);
  1326. }
  1327. }
  1328. }
  1329. if (dirty)
  1330. {
  1331. u32 mask = 0x0;
  1332. int num_regs = 0;
  1333. u32 regs[16];
  1334. if (mode_change)
  1335. {
  1336. u32 tmp_cpsr;
  1337. /* change processor mode (mask T bit) */
  1338. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1339. tmp_cpsr |= armv4_5_number_to_mode(i);
  1340. tmp_cpsr &= ~0x20;
  1341. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1342. current_mode = armv4_5_number_to_mode(i);
  1343. }
  1344. for (j = 0; j <= 14; j++)
  1345. {
  1346. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j);
  1347. reg_arch_info = reg->arch_info;
  1348. if (reg->dirty == 1)
  1349. {
  1350. regs[j] = buf_get_u32(reg->value, 0, 32);
  1351. mask |= 1 << j;
  1352. num_regs++;
  1353. reg->dirty = 0;
  1354. reg->valid = 1;
  1355. LOG_DEBUG("writing register %i of mode %s with value 0x%8.8x", j, armv4_5_mode_strings[i], regs[j]);
  1356. }
  1357. }
  1358. if (mask)
  1359. {
  1360. arm7_9->write_core_regs(target, mask, regs);
  1361. }
  1362. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16);
  1363. reg_arch_info = reg->arch_info;
  1364. if ((reg->dirty) && (reg_arch_info->mode != ARMV4_5_MODE_ANY))
  1365. {
  1366. LOG_DEBUG("writing SPSR of mode %i with value 0x%8.8x", i, buf_get_u32(reg->value, 0, 32));
  1367. arm7_9->write_xpsr(target, buf_get_u32(reg->value, 0, 32), 1);
  1368. }
  1369. }
  1370. }
  1371. if ((armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty == 0) && (armv4_5->core_mode != current_mode))
  1372. {
  1373. /* restore processor mode (mask T bit) */
  1374. u32 tmp_cpsr;
  1375. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1376. tmp_cpsr |= armv4_5_number_to_mode(i);
  1377. tmp_cpsr &= ~0x20;
  1378. LOG_DEBUG("writing lower 8 bit of cpsr with value 0x%2.2x", tmp_cpsr);
  1379. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1380. }
  1381. else if (armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty == 1)
  1382. {
  1383. /* CPSR has been changed, full restore necessary (mask T bit) */
  1384. LOG_DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1385. arm7_9->write_xpsr(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32) & ~0x20, 0);
  1386. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 0;
  1387. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  1388. }
  1389. /* restore PC */
  1390. LOG_DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1391. arm7_9->write_pc(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1392. armv4_5->core_cache->reg_list[15].dirty = 0;
  1393. if (arm7_9->post_restore_context)
  1394. arm7_9->post_restore_context(target);
  1395. return ERROR_OK;
  1396. }
  1397. int arm7_9_restart_core(struct target_s *target)
  1398. {
  1399. armv4_5_common_t *armv4_5 = target->arch_info;
  1400. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1401. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  1402. /* set RESTART instruction */
  1403. jtag_add_end_state(TAP_IDLE);
  1404. if (arm7_9->need_bypass_before_restart) {
  1405. arm7_9->need_bypass_before_restart = 0;
  1406. arm_jtag_set_instr(jtag_info, 0xf, NULL);
  1407. }
  1408. arm_jtag_set_instr(jtag_info, 0x4, NULL);
  1409. jtag_add_runtest(1, TAP_IDLE);
  1410. return jtag_execute_queue();
  1411. }
  1412. void arm7_9_enable_watchpoints(struct target_s *target)
  1413. {
  1414. watchpoint_t *watchpoint = target->watchpoints;
  1415. while (watchpoint)
  1416. {
  1417. if (watchpoint->set == 0)
  1418. arm7_9_set_watchpoint(target, watchpoint);
  1419. watchpoint = watchpoint->next;
  1420. }
  1421. }
  1422. void arm7_9_enable_breakpoints(struct target_s *target)
  1423. {
  1424. breakpoint_t *breakpoint = target->breakpoints;
  1425. /* set any pending breakpoints */
  1426. while (breakpoint)
  1427. {
  1428. arm7_9_set_breakpoint(target, breakpoint);
  1429. breakpoint = breakpoint->next;
  1430. }
  1431. }
  1432. int arm7_9_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
  1433. {
  1434. armv4_5_common_t *armv4_5 = target->arch_info;
  1435. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1436. breakpoint_t *breakpoint = target->breakpoints;
  1437. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1438. int err, retval = ERROR_OK;
  1439. LOG_DEBUG("-");
  1440. if (target->state != TARGET_HALTED)
  1441. {
  1442. LOG_WARNING("target not halted");
  1443. return ERROR_TARGET_NOT_HALTED;
  1444. }
  1445. if (!debug_execution)
  1446. {
  1447. target_free_all_working_areas(target);
  1448. }
  1449. /* current = 1: continue on current pc, otherwise continue at <address> */
  1450. if (!current)
  1451. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1452. u32 current_pc;
  1453. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1454. /* the front-end may request us not to handle breakpoints */
  1455. if (handle_breakpoints)
  1456. {
  1457. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1458. {
  1459. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  1460. if ((retval = arm7_9_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1461. {
  1462. return retval;
  1463. }
  1464. /* calculate PC of next instruction */
  1465. u32 next_pc;
  1466. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1467. {
  1468. u32 current_opcode;
  1469. target_read_u32(target, current_pc, &current_opcode);
  1470. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
  1471. return retval;
  1472. }
  1473. LOG_DEBUG("enable single-step");
  1474. arm7_9->enable_single_step(target, next_pc);
  1475. target->debug_reason = DBG_REASON_SINGLESTEP;
  1476. if ((retval = arm7_9_restore_context(target)) != ERROR_OK)
  1477. {
  1478. return retval;
  1479. }
  1480. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1481. arm7_9->branch_resume(target);
  1482. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1483. {
  1484. arm7_9->branch_resume_thumb(target);
  1485. }
  1486. else
  1487. {
  1488. LOG_ERROR("unhandled core state");
  1489. return ERROR_FAIL;
  1490. }
  1491. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1492. embeddedice_write_reg(dbg_ctrl, buf_get_u32(dbg_ctrl->value, 0, dbg_ctrl->size));
  1493. err = arm7_9_execute_sys_speed(target);
  1494. LOG_DEBUG("disable single-step");
  1495. arm7_9->disable_single_step(target);
  1496. if (err != ERROR_OK)
  1497. {
  1498. if ((retval = arm7_9_set_breakpoint(target, breakpoint)) != ERROR_OK)
  1499. {
  1500. return retval;
  1501. }
  1502. target->state = TARGET_UNKNOWN;
  1503. return err;
  1504. }
  1505. arm7_9_debug_entry(target);
  1506. LOG_DEBUG("new PC after step: 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1507. LOG_DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
  1508. if ((retval = arm7_9_set_breakpoint(target, breakpoint)) != ERROR_OK)
  1509. {
  1510. return retval;
  1511. }
  1512. }
  1513. }
  1514. /* enable any pending breakpoints and watchpoints */
  1515. arm7_9_enable_breakpoints(target);
  1516. arm7_9_enable_watchpoints(target);
  1517. if ((retval = arm7_9_restore_context(target)) != ERROR_OK)
  1518. {
  1519. return retval;
  1520. }
  1521. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1522. {
  1523. arm7_9->branch_resume(target);
  1524. }
  1525. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1526. {
  1527. arm7_9->branch_resume_thumb(target);
  1528. }
  1529. else
  1530. {
  1531. LOG_ERROR("unhandled core state");
  1532. return ERROR_FAIL;
  1533. }
  1534. /* deassert DBGACK and INTDIS */
  1535. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1536. /* INTDIS only when we really resume, not during debug execution */
  1537. if (!debug_execution)
  1538. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 0);
  1539. embeddedice_write_reg(dbg_ctrl, buf_get_u32(dbg_ctrl->value, 0, dbg_ctrl->size));
  1540. if ((retval = arm7_9_restart_core(target)) != ERROR_OK)
  1541. {
  1542. return retval;
  1543. }
  1544. target->debug_reason = DBG_REASON_NOTHALTED;
  1545. if (!debug_execution)
  1546. {
  1547. /* registers are now invalid */
  1548. armv4_5_invalidate_core_regs(target);
  1549. target->state = TARGET_RUNNING;
  1550. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_RESUMED)) != ERROR_OK)
  1551. {
  1552. return retval;
  1553. }
  1554. }
  1555. else
  1556. {
  1557. target->state = TARGET_DEBUG_RUNNING;
  1558. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED)) != ERROR_OK)
  1559. {
  1560. return retval;
  1561. }
  1562. }
  1563. LOG_DEBUG("target resumed");
  1564. return ERROR_OK;
  1565. }
  1566. void arm7_9_enable_eice_step(target_t *target, u32 next_pc)
  1567. {
  1568. armv4_5_common_t *armv4_5 = target->arch_info;
  1569. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1570. u32 current_pc;
  1571. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1572. if(next_pc != current_pc)
  1573. {
  1574. /* setup an inverse breakpoint on the current PC
  1575. * - comparator 1 matches the current address
  1576. * - rangeout from comparator 1 is connected to comparator 0 rangein
  1577. * - comparator 0 matches any address, as long as rangein is low */
  1578. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffff);
  1579. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  1580. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  1581. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~(EICE_W_CTRL_RANGE|EICE_W_CTRL_nOPC) & 0xff);
  1582. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], current_pc);
  1583. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], 0);
  1584. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0xffffffff);
  1585. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  1586. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  1587. }
  1588. else
  1589. {
  1590. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffff);
  1591. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  1592. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  1593. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], 0xff);
  1594. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], next_pc);
  1595. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], 0);
  1596. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0xffffffff);
  1597. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  1598. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  1599. }
  1600. }
  1601. void arm7_9_disable_eice_step(target_t *target)
  1602. {
  1603. armv4_5_common_t *armv4_5 = target->arch_info;
  1604. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1605. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK]);
  1606. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK]);
  1607. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE]);
  1608. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK]);
  1609. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE]);
  1610. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK]);
  1611. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK]);
  1612. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK]);
  1613. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE]);
  1614. }
  1615. int arm7_9_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
  1616. {
  1617. armv4_5_common_t *armv4_5 = target->arch_info;
  1618. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1619. breakpoint_t *breakpoint = NULL;
  1620. int err, retval;
  1621. if (target->state != TARGET_HALTED)
  1622. {
  1623. LOG_WARNING("target not halted");
  1624. return ERROR_TARGET_NOT_HALTED;
  1625. }
  1626. /* current = 1: continue on current pc, otherwise continue at <address> */
  1627. if (!current)
  1628. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1629. u32 current_pc;
  1630. current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1631. /* the front-end may request us not to handle breakpoints */
  1632. if (handle_breakpoints)
  1633. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1634. if ((retval = arm7_9_unset_breakpoint(target, breakpoint)) != ERROR_OK)
  1635. {
  1636. return retval;
  1637. }
  1638. target->debug_reason = DBG_REASON_SINGLESTEP;
  1639. /* calculate PC of next instruction */
  1640. u32 next_pc;
  1641. if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
  1642. {
  1643. u32 current_opcode;
  1644. target_read_u32(target, current_pc, &current_opcode);
  1645. LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
  1646. return retval;
  1647. }
  1648. if ((retval = arm7_9_restore_context(target)) != ERROR_OK)
  1649. {
  1650. return retval;
  1651. }
  1652. arm7_9->enable_single_step(target, next_pc);
  1653. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1654. {
  1655. arm7_9->branch_resume(target);
  1656. }
  1657. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1658. {
  1659. arm7_9->branch_resume_thumb(target);
  1660. }
  1661. else
  1662. {
  1663. LOG_ERROR("unhandled core state");
  1664. return ERROR_FAIL;
  1665. }
  1666. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_RESUMED)) != ERROR_OK)
  1667. {
  1668. return retval;
  1669. }
  1670. err = arm7_9_execute_sys_speed(target);
  1671. arm7_9->disable_single_step(target);
  1672. /* registers are now invalid */
  1673. armv4_5_invalidate_core_regs(target);
  1674. if (err != ERROR_OK)
  1675. {
  1676. target->state = TARGET_UNKNOWN;
  1677. } else {
  1678. arm7_9_debug_entry(target);
  1679. if ((retval = target_call_event_callbacks(target, TARGET_EVENT_HALTED)) != ERROR_OK)
  1680. {
  1681. return retval;
  1682. }
  1683. LOG_DEBUG("target stepped");
  1684. }
  1685. if (breakpoint)
  1686. if ((retval = arm7_9_set_breakpoint(target, breakpoint)) != ERROR_OK)
  1687. {
  1688. return retval;
  1689. }
  1690. return err;
  1691. }
  1692. int arm7_9_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
  1693. {
  1694. u32* reg_p[16];
  1695. u32 value;
  1696. int retval;
  1697. armv4_5_common_t *armv4_5 = target->arch_info;
  1698. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1699. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1700. return ERROR_FAIL;
  1701. enum armv4_5_mode reg_mode = ((armv4_5_core_reg_t*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info)->mode;
  1702. if ((num < 0) || (num > 16))
  1703. return ERROR_INVALID_ARGUMENTS;
  1704. if ((mode != ARMV4_5_MODE_ANY)
  1705. && (mode != armv4_5->core_mode)
  1706. && (reg_mode != ARMV4_5_MODE_ANY))
  1707. {
  1708. u32 tmp_cpsr;
  1709. /* change processor mode (mask T bit) */
  1710. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1711. tmp_cpsr |= mode;
  1712. tmp_cpsr &= ~0x20;
  1713. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1714. }
  1715. if ((num >= 0) && (num <= 15))
  1716. {
  1717. /* read a normal core register */
  1718. reg_p[num] = &value;
  1719. arm7_9->read_core_regs(target, 1 << num, reg_p);
  1720. }
  1721. else
  1722. {
  1723. /* read a program status register
  1724. * if the register mode is MODE_ANY, we read the cpsr, otherwise a spsr
  1725. */
  1726. armv4_5_core_reg_t *arch_info = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info;
  1727. int spsr = (arch_info->mode == ARMV4_5_MODE_ANY) ? 0 : 1;
  1728. arm7_9->read_xpsr(target, &value, spsr);
  1729. }
  1730. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1731. {
  1732. return retval;
  1733. }
  1734. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  1735. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  1736. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).value, 0, 32, value);
  1737. if ((mode != ARMV4_5_MODE_ANY)
  1738. && (mode != armv4_5->core_mode)
  1739. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1740. /* restore processor mode (mask T bit) */
  1741. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1742. }
  1743. return ERROR_OK;
  1744. }
  1745. int arm7_9_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
  1746. {
  1747. u32 reg[16];
  1748. armv4_5_common_t *armv4_5 = target->arch_info;
  1749. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1750. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1751. return ERROR_FAIL;
  1752. enum armv4_5_mode reg_mode = ((armv4_5_core_reg_t*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info)->mode;
  1753. if ((num < 0) || (num > 16))
  1754. return ERROR_INVALID_ARGUMENTS;
  1755. if ((mode != ARMV4_5_MODE_ANY)
  1756. && (mode != armv4_5->core_mode)
  1757. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1758. u32 tmp_cpsr;
  1759. /* change processor mode (mask T bit) */
  1760. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1761. tmp_cpsr |= mode;
  1762. tmp_cpsr &= ~0x20;
  1763. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1764. }
  1765. if ((num >= 0) && (num <= 15))
  1766. {
  1767. /* write a normal core register */
  1768. reg[num] = value;
  1769. arm7_9->write_core_regs(target, 1 << num, reg);
  1770. }
  1771. else
  1772. {
  1773. /* write a program status register
  1774. * if the register mode is MODE_ANY, we write the cpsr, otherwise a spsr
  1775. */
  1776. armv4_5_core_reg_t *arch_info = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info;
  1777. int spsr = (arch_info->mode == ARMV4_5_MODE_ANY) ? 0 : 1;
  1778. /* if we're writing the CPSR, mask the T bit */
  1779. if (!spsr)
  1780. value &= ~0x20;
  1781. arm7_9->write_xpsr(target, value, spsr);
  1782. }
  1783. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  1784. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  1785. if ((mode != ARMV4_5_MODE_ANY)
  1786. && (mode != armv4_5->core_mode)
  1787. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1788. /* restore processor mode (mask T bit) */
  1789. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1790. }
  1791. return jtag_execute_queue();
  1792. }
  1793. int arm7_9_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1794. {
  1795. armv4_5_common_t *armv4_5 = target->arch_info;
  1796. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1797. u32 reg[16];
  1798. u32 num_accesses = 0;
  1799. int thisrun_accesses;
  1800. int i;
  1801. u32 cpsr;
  1802. int retval;
  1803. int last_reg = 0;
  1804. LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1805. if (target->state != TARGET_HALTED)
  1806. {
  1807. LOG_WARNING("target not halted");
  1808. return ERROR_TARGET_NOT_HALTED;
  1809. }
  1810. /* sanitize arguments */
  1811. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1812. return ERROR_INVALID_ARGUMENTS;
  1813. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1814. return ERROR_TARGET_UNALIGNED_ACCESS;
  1815. /* load the base register with the address of the first word */
  1816. reg[0] = address;
  1817. arm7_9->write_core_regs(target, 0x1, reg);
  1818. int j=0;
  1819. switch (size)
  1820. {
  1821. case 4:
  1822. while (num_accesses < count)
  1823. {
  1824. u32 reg_list;
  1825. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1826. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1827. if (last_reg <= thisrun_accesses)
  1828. last_reg = thisrun_accesses;
  1829. arm7_9->load_word_regs(target, reg_list);
  1830. /* fast memory reads are only safe when the target is running
  1831. * from a sufficiently high clock (32 kHz is usually too slow)
  1832. */
  1833. if (arm7_9->fast_memory_access)
  1834. retval = arm7_9_execute_fast_sys_speed(target);
  1835. else
  1836. retval = arm7_9_execute_sys_speed(target);
  1837. if (retval != ERROR_OK)
  1838. return retval;
  1839. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 4);
  1840. /* advance buffer, count number of accesses */
  1841. buffer += thisrun_accesses * 4;
  1842. num_accesses += thisrun_accesses;
  1843. if ((j++%1024)==0)
  1844. {
  1845. keep_alive();
  1846. }
  1847. }
  1848. break;
  1849. case 2:
  1850. while (num_accesses < count)
  1851. {
  1852. u32 reg_list;
  1853. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1854. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1855. for (i = 1; i <= thisrun_accesses; i++)
  1856. {
  1857. if (i > last_reg)
  1858. last_reg = i;
  1859. arm7_9->load_hword_reg(target, i);
  1860. /* fast memory reads are only safe when the target is running
  1861. * from a sufficiently high clock (32 kHz is usually too slow)
  1862. */
  1863. if (arm7_9->fast_memory_access)
  1864. retval = arm7_9_execute_fast_sys_speed(target);
  1865. else
  1866. retval = arm7_9_execute_sys_speed(target);
  1867. if(retval != ERROR_OK)
  1868. {
  1869. return retval;
  1870. }
  1871. }
  1872. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 2);
  1873. /* advance buffer, count number of accesses */
  1874. buffer += thisrun_accesses * 2;
  1875. num_accesses += thisrun_accesses;
  1876. if ((j++%1024)==0)
  1877. {
  1878. keep_alive();
  1879. }
  1880. }
  1881. break;
  1882. case 1:
  1883. while (num_accesses < count)
  1884. {
  1885. u32 reg_list;
  1886. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1887. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1888. for (i = 1; i <= thisrun_accesses; i++)
  1889. {
  1890. if (i > last_reg)
  1891. last_reg = i;
  1892. arm7_9->load_byte_reg(target, i);
  1893. /* fast memory reads are only safe when the target is running
  1894. * from a sufficiently high clock (32 kHz is usually too slow)
  1895. */
  1896. if (arm7_9->fast_memory_access)
  1897. retval = arm7_9_execute_fast_sys_speed(target);
  1898. else
  1899. retval = arm7_9_execute_sys_speed(target);
  1900. if(retval != ERROR_OK)
  1901. {
  1902. return retval;
  1903. }
  1904. }
  1905. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 1);
  1906. /* advance buffer, count number of accesses */
  1907. buffer += thisrun_accesses * 1;
  1908. num_accesses += thisrun_accesses;
  1909. if ((j++%1024)==0)
  1910. {
  1911. keep_alive();
  1912. }
  1913. }
  1914. break;
  1915. default:
  1916. LOG_ERROR("BUG: we shouldn't get here");
  1917. exit(-1);
  1918. break;
  1919. }
  1920. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  1921. return ERROR_FAIL;
  1922. for (i=0; i<=last_reg; i++)
  1923. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid;
  1924. arm7_9->read_xpsr(target, &cpsr, 0);
  1925. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1926. {
  1927. LOG_ERROR("JTAG error while reading cpsr");
  1928. return ERROR_TARGET_DATA_ABORT;
  1929. }
  1930. if (((cpsr & 0x1f) == ARMV4_5_MODE_ABT) && (armv4_5->core_mode != ARMV4_5_MODE_ABT))
  1931. {
  1932. LOG_WARNING("memory read caused data abort (address: 0x%8.8x, size: 0x%x, count: 0x%x)", address, size, count);
  1933. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1934. return ERROR_TARGET_DATA_ABORT;
  1935. }
  1936. return ERROR_OK;
  1937. }
  1938. int arm7_9_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1939. {
  1940. armv4_5_common_t *armv4_5 = target->arch_info;
  1941. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1942. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1943. u32 reg[16];
  1944. u32 num_accesses = 0;
  1945. int thisrun_accesses;
  1946. int i;
  1947. u32 cpsr;
  1948. int retval;
  1949. int last_reg = 0;
  1950. #ifdef _DEBUG_ARM7_9_
  1951. LOG_DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1952. #endif
  1953. if (target->state != TARGET_HALTED)
  1954. {
  1955. LOG_WARNING("target not halted");
  1956. return ERROR_TARGET_NOT_HALTED;
  1957. }
  1958. /* sanitize arguments */
  1959. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1960. return ERROR_INVALID_ARGUMENTS;
  1961. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1962. return ERROR_TARGET_UNALIGNED_ACCESS;
  1963. /* load the base register with the address of the first word */
  1964. reg[0] = address;
  1965. arm7_9->write_core_regs(target, 0x1, reg);
  1966. /* Clear DBGACK, to make sure memory fetches work as expected */
  1967. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1968. embeddedice_store_reg(dbg_ctrl);
  1969. switch (size)
  1970. {
  1971. case 4:
  1972. while (num_accesses < count)
  1973. {
  1974. u32 reg_list;
  1975. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1976. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1977. for (i = 1; i <= thisrun_accesses; i++)
  1978. {
  1979. if (i > last_reg)
  1980. last_reg = i;
  1981. reg[i] = target_buffer_get_u32(target, buffer);
  1982. buffer += 4;
  1983. }
  1984. arm7_9->write_core_regs(target, reg_list, reg);
  1985. arm7_9->store_word_regs(target, reg_list);
  1986. /* fast memory writes are only safe when the target is running
  1987. * from a sufficiently high clock (32 kHz is usually too slow)
  1988. */
  1989. if (arm7_9->fast_memory_access)
  1990. retval = arm7_9_execute_fast_sys_speed(target);
  1991. else
  1992. retval = arm7_9_execute_sys_speed(target);
  1993. if(retval != ERROR_OK)
  1994. {
  1995. return retval;
  1996. }
  1997. num_accesses += thisrun_accesses;
  1998. }
  1999. break;
  2000. case 2:
  2001. while (num_accesses < count)
  2002. {
  2003. u32 reg_list;
  2004. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  2005. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  2006. for (i = 1; i <= thisrun_accesses; i++)
  2007. {
  2008. if (i > last_reg)
  2009. last_reg = i;
  2010. reg[i] = target_buffer_get_u16(target, buffer) & 0xffff;
  2011. buffer += 2;
  2012. }
  2013. arm7_9->write_core_regs(target, reg_list, reg);
  2014. for (i = 1; i <= thisrun_accesses; i++)
  2015. {
  2016. arm7_9->store_hword_reg(target, i);
  2017. /* fast memory writes are only safe when the target is running
  2018. * from a sufficiently high clock (32 kHz is usually too slow)
  2019. */
  2020. if (arm7_9->fast_memory_access)
  2021. retval = arm7_9_execute_fast_sys_speed(target);
  2022. else
  2023. retval = arm7_9_execute_sys_speed(target);
  2024. if(retval != ERROR_OK)
  2025. {
  2026. return retval;
  2027. }
  2028. }
  2029. num_accesses += thisrun_accesses;
  2030. }
  2031. break;
  2032. case 1:
  2033. while (num_accesses < count)
  2034. {
  2035. u32 reg_list;
  2036. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  2037. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  2038. for (i = 1; i <= thisrun_accesses; i++)
  2039. {
  2040. if (i > last_reg)
  2041. last_reg = i;
  2042. reg[i] = *buffer++ & 0xff;
  2043. }
  2044. arm7_9->write_core_regs(target, reg_list, reg);
  2045. for (i = 1; i <= thisrun_accesses; i++)
  2046. {
  2047. arm7_9->store_byte_reg(target, i);
  2048. /* fast memory writes are only safe when the target is running
  2049. * from a sufficiently high clock (32 kHz is usually too slow)
  2050. */
  2051. if (arm7_9->fast_memory_access)
  2052. retval = arm7_9_execute_fast_sys_speed(target);
  2053. else
  2054. retval = arm7_9_execute_sys_speed(target);
  2055. if(retval != ERROR_OK)
  2056. {
  2057. return retval;
  2058. }
  2059. }
  2060. num_accesses += thisrun_accesses;
  2061. }
  2062. break;
  2063. default:
  2064. LOG_ERROR("BUG: we shouldn't get here");
  2065. exit(-1);
  2066. break;
  2067. }
  2068. /* Re-Set DBGACK */
  2069. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  2070. embeddedice_store_reg(dbg_ctrl);
  2071. if (armv4_5_mode_to_number(armv4_5->core_mode)==-1)
  2072. return ERROR_FAIL;
  2073. for (i=0; i<=last_reg; i++)
  2074. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid;
  2075. arm7_9->read_xpsr(target, &cpsr, 0);
  2076. if ((retval = jtag_execute_queue()) != ERROR_OK)
  2077. {
  2078. LOG_ERROR("JTAG error while reading cpsr");
  2079. return ERROR_TARGET_DATA_ABORT;
  2080. }
  2081. if (((cpsr & 0x1f) == ARMV4_5_MODE_ABT) && (armv4_5->core_mode != ARMV4_5_MODE_ABT))
  2082. {
  2083. LOG_WARNING("memory write caused data abort (address: 0x%8.8x, size: 0x%x, count: 0x%x)", address, size, count);
  2084. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  2085. return ERROR_TARGET_DATA_ABORT;
  2086. }
  2087. return ERROR_OK;
  2088. }
  2089. static int dcc_count;
  2090. static u8 *dcc_buffer;
  2091. static int arm7_9_dcc_completion(struct target_s *target, u32 exit_point, int timeout_ms, void *arch_info)
  2092. {
  2093. int retval = ERROR_OK;
  2094. armv4_5_common_t *armv4_5 = target->arch_info;
  2095. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  2096. if ((retval=target_wait_state(target, TARGET_DEBUG_RUNNING, 500))!=ERROR_OK)
  2097. return retval;
  2098. int little=target->endianness==TARGET_LITTLE_ENDIAN;
  2099. int count=dcc_count;
  2100. u8 *buffer=dcc_buffer;
  2101. if (count>2)
  2102. {
  2103. /* Handle first & last using standard embeddedice_write_reg and the middle ones w/the
  2104. * core function repeated. */
  2105. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_COMMS_DATA], fast_target_buffer_get_u32(buffer, little));
  2106. buffer+=4;
  2107. embeddedice_reg_t *ice_reg = arm7_9->eice_cache->reg_list[EICE_COMMS_DATA].arch_info;
  2108. u8 reg_addr = ice_reg->addr & 0x1f;
  2109. jtag_tap_t *tap;
  2110. tap = ice_reg->jtag_info->tap;
  2111. embeddedice_write_dcc(tap, reg_addr, buffer, little, count-2);
  2112. buffer += (count-2)*4;
  2113. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_COMMS_DATA], fast_target_buffer_get_u32(buffer, little));
  2114. } else
  2115. {
  2116. int i;
  2117. for (i = 0; i < count; i++)
  2118. {
  2119. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_COMMS_DATA], fast_target_buffer_get_u32(buffer, little));
  2120. buffer += 4;
  2121. }
  2122. }
  2123. if((retval = target_halt(target))!= ERROR_OK)
  2124. {
  2125. return retval;
  2126. }
  2127. return target_wait_state(target, TARGET_HALTED, 500);
  2128. }
  2129. static const u32 dcc_code[] =
  2130. {
  2131. /* MRC TST BNE MRC STR B */
  2132. 0xee101e10, 0xe3110001, 0x0afffffc, 0xee111e10, 0xe4801004, 0xeafffff9
  2133. };
  2134. int armv4_5_run_algorithm_inner(struct target_s *target, int num_mem_params, mem_param_t *mem_params, int num_reg_params, reg_param_t *reg_params, u32 entry_point, u32 exit_point, int timeout_ms, void *arch_info, int (*run_it)(struct target_s *target, u32 exit_point, int timeout_ms, void *arch_info));
  2135. int arm7_9_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
  2136. {
  2137. int retval;
  2138. armv4_5_common_t *armv4_5 = target->arch_info;
  2139. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  2140. int i;
  2141. if (!arm7_9->dcc_downloads)
  2142. return target->type->write_memory(target, address, 4, count, buffer);
  2143. /* regrab previously allocated working_area, or allocate a new one */
  2144. if (!arm7_9->dcc_working_area)
  2145. {
  2146. u8 dcc_code_buf[6 * 4];
  2147. /* make sure we have a working area */
  2148. if (target_alloc_working_area(target, 24, &arm7_9->dcc_working_area) != ERROR_OK)
  2149. {
  2150. LOG_INFO("no working area available, falling back to memory writes");
  2151. return target->type->write_memory(target, address, 4, count, buffer);
  2152. }
  2153. /* copy target instructions to target endianness */
  2154. for (i = 0; i < 6; i++)
  2155. {
  2156. target_buffer_set_u32(target, dcc_code_buf + i*4, dcc_code[i]);
  2157. }
  2158. /* write DCC code to working area */
  2159. if ((retval = target->type->write_memory(target, arm7_9->dcc_working_area->address, 4, 6, dcc_code_buf)) != ERROR_OK)
  2160. {
  2161. return retval;
  2162. }
  2163. }
  2164. armv4_5_algorithm_t armv4_5_info;
  2165. reg_param_t reg_params[1];
  2166. armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
  2167. armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
  2168. armv4_5_info.core_state = ARMV4_5_STATE_ARM;
  2169. init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
  2170. buf_set_u32(reg_params[0].value, 0, 32, address);
  2171. dcc_count=count;
  2172. dcc_buffer=buffer;
  2173. retval = armv4_5_run_algorithm_inner(target, 0, NULL, 1, reg_params,
  2174. arm7_9->dcc_working_area->address, arm7_9->dcc_working_area->address+6*4, 20*1000, &armv4_5_info, arm7_9_dcc_completion);
  2175. if (retval==ERROR_OK)
  2176. {
  2177. u32 endaddress=buf_get_u32(reg_params[0].value, 0, 32);
  2178. if (endaddress!=(address+count*4))
  2179. {
  2180. LOG_ERROR("DCC write failed, expected end address 0x%08x got 0x%0x", (address+count*4), endaddress);
  2181. retval=ERROR_FAIL;
  2182. }
  2183. }
  2184. destroy_reg_param(&reg_params[0]);
  2185. return retval;
  2186. }
  2187. int arm7_9_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
  2188. {
  2189. working_area_t *crc_algorithm;
  2190. armv4_5_algorithm_t armv4_5_info;
  2191. reg_param_t reg_params[2];
  2192. int retval;
  2193. u32 arm7_9_crc_code[] = {
  2194. 0xE1A02000, /* mov r2, r0 */
  2195. 0xE3E00000, /* mov r0, #0xffffffff */
  2196. 0xE1A03001, /* mov r3, r1 */
  2197. 0xE3A04000, /* mov r4, #0 */
  2198. 0xEA00000B, /* b ncomp */
  2199. /* nbyte: */
  2200. 0xE7D21004, /* ldrb r1, [r2, r4] */
  2201. 0xE59F7030, /* ldr r7, CRC32XOR */
  2202. 0xE0200C01, /* eor r0, r0, r1, asl 24 */
  2203. 0xE3A05000, /* mov r5, #0 */
  2204. /* loop: */
  2205. 0xE3500000, /* cmp r0, #0 */
  2206. 0xE1A06080, /* mov r6, r0, asl #1 */
  2207. 0xE2855001, /* add r5, r5, #1 */
  2208. 0xE1A00006, /* mov r0, r6 */
  2209. 0xB0260007, /* eorlt r0, r6, r7 */
  2210. 0xE3550008, /* cmp r5, #8 */
  2211. 0x1AFFFFF8, /* bne loop */
  2212. 0xE2844001, /* add r4, r4, #1 */
  2213. /* ncomp: */
  2214. 0xE1540003, /* cmp r4, r3 */
  2215. 0x1AFFFFF1, /* bne nbyte */
  2216. /* end: */
  2217. 0xEAFFFFFE, /* b end */
  2218. 0x04C11DB7 /* CRC32XOR: .word 0x04C11DB7 */
  2219. };
  2220. u32 i;
  2221. if (target_alloc_working_area(target, sizeof(arm7_9_crc_code), &crc_algorithm) != ERROR_OK)
  2222. {
  2223. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  2224. }
  2225. /* convert flash writing code into a buffer in target endianness */
  2226. for (i = 0; i < (sizeof(arm7_9_crc_code)/sizeof(u32)); i++)
  2227. {
  2228. if ((retval=target_write_u32(target, crc_algorithm->address + i*sizeof(u32), arm7_9_crc_code[i]))!=ERROR_OK)
  2229. {
  2230. return retval;
  2231. }
  2232. }
  2233. armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
  2234. armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
  2235. armv4_5_info.core_state = ARMV4_5_STATE_ARM;
  2236. init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
  2237. init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
  2238. buf_set_u32(reg_params[0].value, 0, 32, address);
  2239. buf_set_u32(reg_params[1].value, 0, 32, count);
  2240. if ((retval = target->type->run_algorithm(target, 0, NULL, 2, reg_params,
  2241. crc_algorithm->address, crc_algorithm->address + (sizeof(arm7_9_crc_code) - 8), 20000, &armv4_5_info)) != ERROR_OK)
  2242. {
  2243. LOG_ERROR("error executing arm7_9 crc algorithm");
  2244. destroy_reg_param(&reg_params[0]);
  2245. destroy_reg_param(&reg_params[1]);
  2246. target_free_working_area(target, crc_algorithm);
  2247. return retval;
  2248. }
  2249. *checksum = buf_get_u32(reg_params[0].value, 0, 32);
  2250. destroy_reg_param(&reg_params[0]);
  2251. destroy_reg_param(&reg_params[1]);
  2252. target_free_working_area(target, crc_algorithm);
  2253. return ERROR_OK;
  2254. }
  2255. int arm7_9_blank_check_memory(struct target_s *target, u32 address, u32 count, u32* blank)
  2256. {
  2257. working_area_t *erase_check_algorithm;
  2258. reg_param_t reg_params[3];
  2259. armv4_5_algorithm_t armv4_5_info;
  2260. int retval;
  2261. u32 i;
  2262. u32 erase_check_code[] =
  2263. {
  2264. /* loop: */
  2265. 0xe4d03001, /* ldrb r3, [r0], #1 */
  2266. 0xe0022003, /* and r2, r2, r3 */
  2267. 0xe2511001, /* subs r1, r1, #1 */
  2268. 0x1afffffb, /* bne loop */
  2269. /* end: */
  2270. 0xeafffffe /* b end */
  2271. };
  2272. /* make sure we have a working area */
  2273. if (target_alloc_working_area(target, sizeof(erase_check_code), &erase_check_algorithm) != ERROR_OK)
  2274. {
  2275. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  2276. }
  2277. /* convert flash writing code into a buffer in target endianness */
  2278. for (i = 0; i < (sizeof(erase_check_code)/sizeof(u32)); i++)
  2279. if ((retval = target_write_u32(target, erase_check_algorithm->address + i*sizeof(u32), erase_check_code[i])) != ERROR_OK)
  2280. {
  2281. return retval;
  2282. }
  2283. armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
  2284. armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
  2285. armv4_5_info.core_state = ARMV4_5_STATE_ARM;
  2286. init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
  2287. buf_set_u32(reg_params[0].value, 0, 32, address);
  2288. init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
  2289. buf_set_u32(reg_params[1].value, 0, 32, count);
  2290. init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
  2291. buf_set_u32(reg_params[2].value, 0, 32, 0xff);
  2292. if ((retval = target->type->run_algorithm(target, 0, NULL, 3, reg_params,
  2293. erase_check_algorithm->address, erase_check_algorithm->address + (sizeof(erase_check_code) - 4), 10000, &armv4_5_info)) != ERROR_OK)
  2294. {
  2295. destroy_reg_param(&reg_params[0]);
  2296. destroy_reg_param(&reg_params[1]);
  2297. destroy_reg_param(&reg_params[2]);
  2298. target_free_working_area(target, erase_check_algorithm);
  2299. return 0;
  2300. }
  2301. *blank = buf_get_u32(reg_params[2].value, 0, 32);
  2302. destroy_reg_param(&reg_params[0]);
  2303. destroy_reg_param(&reg_params[1]);
  2304. destroy_reg_param(&reg_params[2]);
  2305. target_free_working_area(target, erase_check_algorithm);
  2306. return ERROR_OK;
  2307. }
  2308. int arm7_9_register_commands(struct command_context_s *cmd_ctx)
  2309. {
  2310. command_t *arm7_9_cmd;
  2311. arm7_9_cmd = register_command(cmd_ctx, NULL, "arm7_9", NULL, COMMAND_ANY, "arm7/9 specific commands");
  2312. register_command(cmd_ctx, arm7_9_cmd, "write_xpsr", handle_arm7_9_write_xpsr_command, COMMAND_EXEC, "write program status register <value> <not cpsr|spsr>");
  2313. register_command(cmd_ctx, arm7_9_cmd, "write_xpsr_im8", handle_arm7_9_write_xpsr_im8_command, COMMAND_EXEC, "write program status register <8bit immediate> <rotate> <not cpsr|spsr>");
  2314. register_command(cmd_ctx, arm7_9_cmd, "write_core_reg", handle_arm7_9_write_core_reg_command, COMMAND_EXEC, "write core register <num> <mode> <value>");
  2315. register_command(cmd_ctx, arm7_9_cmd, "dbgrq", handle_arm7_9_dbgrq_command,
  2316. COMMAND_ANY, "use EmbeddedICE dbgrq instead of breakpoint for target halt requests <enable|disable>");
  2317. register_command(cmd_ctx, arm7_9_cmd, "fast_memory_access", handle_arm7_9_fast_memory_access_command,
  2318. COMMAND_ANY, "use fast memory accesses instead of slower but potentially safer accesses <enable|disable>");
  2319. register_command(cmd_ctx, arm7_9_cmd, "dcc_downloads", handle_arm7_9_dcc_downloads_command,
  2320. COMMAND_ANY, "use DCC downloads for larger memory writes <enable|disable>");
  2321. armv4_5_register_commands(cmd_ctx);
  2322. etm_register_commands(cmd_ctx);
  2323. return ERROR_OK;
  2324. }
  2325. int handle_arm7_9_write_xpsr_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2326. {
  2327. u32 value;
  2328. int spsr;
  2329. int retval;
  2330. target_t *target = get_current_target(cmd_ctx);
  2331. armv4_5_common_t *armv4_5;
  2332. arm7_9_common_t *arm7_9;
  2333. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2334. {
  2335. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2336. return ERROR_OK;
  2337. }
  2338. if (target->state != TARGET_HALTED)
  2339. {
  2340. command_print(cmd_ctx, "can't write registers while running");
  2341. return ERROR_OK;
  2342. }
  2343. if (argc < 2)
  2344. {
  2345. command_print(cmd_ctx, "usage: write_xpsr <value> <not cpsr|spsr>");
  2346. return ERROR_OK;
  2347. }
  2348. value = strtoul(args[0], NULL, 0);
  2349. spsr = strtol(args[1], NULL, 0);
  2350. /* if we're writing the CPSR, mask the T bit */
  2351. if (!spsr)
  2352. value &= ~0x20;
  2353. arm7_9->write_xpsr(target, value, spsr);
  2354. if ((retval = jtag_execute_queue()) != ERROR_OK)
  2355. {
  2356. LOG_ERROR("JTAG error while writing to xpsr");
  2357. return retval;
  2358. }
  2359. return ERROR_OK;
  2360. }
  2361. int handle_arm7_9_write_xpsr_im8_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2362. {
  2363. u32 value;
  2364. int rotate;
  2365. int spsr;
  2366. int retval;
  2367. target_t *target = get_current_target(cmd_ctx);
  2368. armv4_5_common_t *armv4_5;
  2369. arm7_9_common_t *arm7_9;
  2370. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2371. {
  2372. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2373. return ERROR_OK;
  2374. }
  2375. if (target->state != TARGET_HALTED)
  2376. {
  2377. command_print(cmd_ctx, "can't write registers while running");
  2378. return ERROR_OK;
  2379. }
  2380. if (argc < 3)
  2381. {
  2382. command_print(cmd_ctx, "usage: write_xpsr_im8 <im8> <rotate> <not cpsr|spsr>");
  2383. return ERROR_OK;
  2384. }
  2385. value = strtoul(args[0], NULL, 0);
  2386. rotate = strtol(args[1], NULL, 0);
  2387. spsr = strtol(args[2], NULL, 0);
  2388. arm7_9->write_xpsr_im8(target, value, rotate, spsr);
  2389. if ((retval = jtag_execute_queue()) != ERROR_OK)
  2390. {
  2391. LOG_ERROR("JTAG error while writing 8-bit immediate to xpsr");
  2392. return retval;
  2393. }
  2394. return ERROR_OK;
  2395. }
  2396. int handle_arm7_9_write_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2397. {
  2398. u32 value;
  2399. u32 mode;
  2400. int num;
  2401. target_t *target = get_current_target(cmd_ctx);
  2402. armv4_5_common_t *armv4_5;
  2403. arm7_9_common_t *arm7_9;
  2404. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2405. {
  2406. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2407. return ERROR_OK;
  2408. }
  2409. if (target->state != TARGET_HALTED)
  2410. {
  2411. command_print(cmd_ctx, "can't write registers while running");
  2412. return ERROR_OK;
  2413. }
  2414. if (argc < 3)
  2415. {
  2416. command_print(cmd_ctx, "usage: write_core_reg <num> <mode> <value>");
  2417. return ERROR_OK;
  2418. }
  2419. num = strtol(args[0], NULL, 0);
  2420. mode = strtoul(args[1], NULL, 0);
  2421. value = strtoul(args[2], NULL, 0);
  2422. return arm7_9_write_core_reg(target, num, mode, value);
  2423. }
  2424. int handle_arm7_9_dbgrq_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2425. {
  2426. target_t *target = get_current_target(cmd_ctx);
  2427. armv4_5_common_t *armv4_5;
  2428. arm7_9_common_t *arm7_9;
  2429. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2430. {
  2431. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2432. return ERROR_OK;
  2433. }
  2434. if (argc > 0)
  2435. {
  2436. if (strcmp("enable", args[0]) == 0)
  2437. {
  2438. arm7_9->use_dbgrq = 1;
  2439. }
  2440. else if (strcmp("disable", args[0]) == 0)
  2441. {
  2442. arm7_9->use_dbgrq = 0;
  2443. }
  2444. else
  2445. {
  2446. command_print(cmd_ctx, "usage: arm7_9 dbgrq <enable|disable>");
  2447. }
  2448. }
  2449. command_print(cmd_ctx, "use of EmbeddedICE dbgrq instead of breakpoint for target halt %s", (arm7_9->use_dbgrq) ? "enabled" : "disabled");
  2450. return ERROR_OK;
  2451. }
  2452. int handle_arm7_9_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2453. {
  2454. target_t *target = get_current_target(cmd_ctx);
  2455. armv4_5_common_t *armv4_5;
  2456. arm7_9_common_t *arm7_9;
  2457. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2458. {
  2459. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2460. return ERROR_OK;
  2461. }
  2462. if (argc > 0)
  2463. {
  2464. if (strcmp("enable", args[0]) == 0)
  2465. {
  2466. arm7_9->fast_memory_access = 1;
  2467. }
  2468. else if (strcmp("disable", args[0]) == 0)
  2469. {
  2470. arm7_9->fast_memory_access = 0;
  2471. }
  2472. else
  2473. {
  2474. command_print(cmd_ctx, "usage: arm7_9 fast_memory_access <enable|disable>");
  2475. }
  2476. }
  2477. command_print(cmd_ctx, "fast memory access is %s", (arm7_9->fast_memory_access) ? "enabled" : "disabled");
  2478. return ERROR_OK;
  2479. }
  2480. int handle_arm7_9_dcc_downloads_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  2481. {
  2482. target_t *target = get_current_target(cmd_ctx);
  2483. armv4_5_common_t *armv4_5;
  2484. arm7_9_common_t *arm7_9;
  2485. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  2486. {
  2487. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  2488. return ERROR_OK;
  2489. }
  2490. if (argc > 0)
  2491. {
  2492. if (strcmp("enable", args[0]) == 0)
  2493. {
  2494. arm7_9->dcc_downloads = 1;
  2495. }
  2496. else if (strcmp("disable", args[0]) == 0)
  2497. {
  2498. arm7_9->dcc_downloads = 0;
  2499. }
  2500. else
  2501. {
  2502. command_print(cmd_ctx, "usage: arm7_9 dcc_downloads <enable|disable>");
  2503. }
  2504. }
  2505. command_print(cmd_ctx, "dcc downloads are %s", (arm7_9->dcc_downloads) ? "enabled" : "disabled");
  2506. return ERROR_OK;
  2507. }
  2508. int arm7_9_init_arch_info(target_t *target, arm7_9_common_t *arm7_9)
  2509. {
  2510. int retval = ERROR_OK;
  2511. armv4_5_common_t *armv4_5 = &arm7_9->armv4_5_common;
  2512. arm7_9->common_magic = ARM7_9_COMMON_MAGIC;
  2513. if((retval = arm_jtag_setup_connection(&arm7_9->jtag_info)) != ERROR_OK)
  2514. {
  2515. return retval;
  2516. }
  2517. arm7_9->wp_available = 0; /* this is set up in arm7_9_clear_watchpoints() */
  2518. arm7_9->wp_available_max = 2;
  2519. arm7_9->sw_breakpoints_added = 0;
  2520. arm7_9->breakpoint_count = 0;
  2521. arm7_9->wp0_used = 0;
  2522. arm7_9->wp1_used = 0;
  2523. arm7_9->wp1_used_default = 0;
  2524. arm7_9->use_dbgrq = 0;
  2525. arm7_9->etm_ctx = NULL;
  2526. arm7_9->has_single_step = 0;
  2527. arm7_9->has_monitor_mode = 0;
  2528. arm7_9->has_vector_catch = 0;
  2529. arm7_9->debug_entry_from_reset = 0;
  2530. arm7_9->dcc_working_area = NULL;
  2531. arm7_9->fast_memory_access = fast_and_dangerous;
  2532. arm7_9->dcc_downloads = fast_and_dangerous;
  2533. arm7_9->need_bypass_before_restart = 0;
  2534. armv4_5->arch_info = arm7_9;
  2535. armv4_5->read_core_reg = arm7_9_read_core_reg;
  2536. armv4_5->write_core_reg = arm7_9_write_core_reg;
  2537. armv4_5->full_context = arm7_9_full_context;
  2538. if((retval = armv4_5_init_arch_info(target, armv4_5)) != ERROR_OK)
  2539. {
  2540. return retval;
  2541. }
  2542. if((retval = target_register_timer_callback(arm7_9_handle_target_request, 1, 1, target)) != ERROR_OK)
  2543. {
  2544. return retval;
  2545. }
  2546. return ERROR_OK;
  2547. }