You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2411 lines
68 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write to the *
  17. * Free Software Foundation, Inc., *
  18. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  19. ***************************************************************************/
  20. #ifdef HAVE_CONFIG_H
  21. #include "config.h"
  22. #endif
  23. #include "replacements.h"
  24. #include "embeddedice.h"
  25. #include "target.h"
  26. #include "armv4_5.h"
  27. #include "arm_jtag.h"
  28. #include "jtag.h"
  29. #include "log.h"
  30. #include "arm7_9_common.h"
  31. #include "breakpoints.h"
  32. #include <stdlib.h>
  33. #include <string.h>
  34. #include <unistd.h>
  35. #include <sys/types.h>
  36. #include <sys/stat.h>
  37. #include <sys/time.h>
  38. #include <errno.h>
  39. int arm7_9_debug_entry(target_t *target);
  40. int arm7_9_enable_sw_bkpts(struct target_s *target);
  41. /* command handler forward declarations */
  42. int handle_arm7_9_write_xpsr_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  43. int handle_arm7_9_write_xpsr_im8_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  44. int handle_arm7_9_read_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  45. int handle_arm7_9_write_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  46. int handle_arm7_9_sw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  47. int handle_arm7_9_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  48. int handle_arm7_9_dbgrq_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  49. int handle_arm7_9_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  50. int handle_arm7_9_dcc_downloads_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc);
  51. int arm7_9_reinit_embeddedice(target_t *target)
  52. {
  53. armv4_5_common_t *armv4_5 = target->arch_info;
  54. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  55. breakpoint_t *breakpoint = target->breakpoints;
  56. arm7_9->wp_available = 2;
  57. arm7_9->wp0_used = 0;
  58. arm7_9->wp1_used = 0;
  59. /* mark all hardware breakpoints as unset */
  60. while (breakpoint)
  61. {
  62. if (breakpoint->type == BKPT_HARD)
  63. {
  64. breakpoint->set = 0;
  65. }
  66. breakpoint = breakpoint->next;
  67. }
  68. if (arm7_9->sw_bkpts_enabled && arm7_9->sw_bkpts_use_wp)
  69. {
  70. arm7_9->sw_bkpts_enabled = 0;
  71. arm7_9_enable_sw_bkpts(target);
  72. }
  73. arm7_9->reinit_embeddedice = 0;
  74. return ERROR_OK;
  75. }
  76. int arm7_9_jtag_callback(enum jtag_event event, void *priv)
  77. {
  78. target_t *target = priv;
  79. armv4_5_common_t *armv4_5 = target->arch_info;
  80. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  81. /* a test-logic reset occured
  82. * the EmbeddedICE registers have been reset
  83. * hardware breakpoints have been cleared
  84. */
  85. if (event == JTAG_TRST_ASSERTED)
  86. {
  87. arm7_9->reinit_embeddedice = 1;
  88. }
  89. return ERROR_OK;
  90. }
  91. int arm7_9_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, arm7_9_common_t **arm7_9_p)
  92. {
  93. armv4_5_common_t *armv4_5 = target->arch_info;
  94. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  95. if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
  96. {
  97. return -1;
  98. }
  99. if (arm7_9->common_magic != ARM7_9_COMMON_MAGIC)
  100. {
  101. return -1;
  102. }
  103. *armv4_5_p = armv4_5;
  104. *arm7_9_p = arm7_9;
  105. return ERROR_OK;
  106. }
  107. int arm7_9_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  108. {
  109. armv4_5_common_t *armv4_5 = target->arch_info;
  110. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  111. if (target->state != TARGET_HALTED)
  112. {
  113. WARNING("target not halted");
  114. return ERROR_TARGET_NOT_HALTED;
  115. }
  116. if (arm7_9->force_hw_bkpts)
  117. breakpoint->type = BKPT_HARD;
  118. if (breakpoint->set)
  119. {
  120. WARNING("breakpoint already set");
  121. return ERROR_OK;
  122. }
  123. if (breakpoint->type == BKPT_HARD)
  124. {
  125. /* either an ARM (4 byte) or Thumb (2 byte) breakpoint */
  126. u32 mask = (breakpoint->length == 4) ? 0x3u : 0x1u;
  127. if (!arm7_9->wp0_used)
  128. {
  129. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE], breakpoint->address);
  130. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], mask);
  131. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffffu);
  132. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  133. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  134. jtag_execute_queue();
  135. arm7_9->wp0_used = 1;
  136. breakpoint->set = 1;
  137. }
  138. else if (!arm7_9->wp1_used)
  139. {
  140. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], breakpoint->address);
  141. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], mask);
  142. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0xffffffffu);
  143. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  144. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  145. jtag_execute_queue();
  146. arm7_9->wp1_used = 1;
  147. breakpoint->set = 2;
  148. }
  149. else
  150. {
  151. ERROR("BUG: no hardware comparator available");
  152. return ERROR_OK;
  153. }
  154. }
  155. else if (breakpoint->type == BKPT_SOFT)
  156. {
  157. if (breakpoint->length == 4)
  158. {
  159. /* keep the original instruction in target endianness */
  160. target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
  161. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  162. target_write_u32(target, breakpoint->address, arm7_9->arm_bkpt);
  163. }
  164. else
  165. {
  166. /* keep the original instruction in target endianness */
  167. target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
  168. /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
  169. target_write_u32(target, breakpoint->address, arm7_9->thumb_bkpt);
  170. }
  171. breakpoint->set = 1;
  172. }
  173. return ERROR_OK;
  174. }
  175. int arm7_9_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  176. {
  177. armv4_5_common_t *armv4_5 = target->arch_info;
  178. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  179. if (target->state != TARGET_HALTED)
  180. {
  181. WARNING("target not halted");
  182. return ERROR_TARGET_NOT_HALTED;
  183. }
  184. if (!breakpoint->set)
  185. {
  186. WARNING("breakpoint not set");
  187. return ERROR_OK;
  188. }
  189. if (breakpoint->type == BKPT_HARD)
  190. {
  191. if (breakpoint->set == 1)
  192. {
  193. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  194. jtag_execute_queue();
  195. arm7_9->wp0_used = 0;
  196. }
  197. else if (breakpoint->set == 2)
  198. {
  199. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  200. jtag_execute_queue();
  201. arm7_9->wp1_used = 0;
  202. }
  203. breakpoint->set = 0;
  204. }
  205. else
  206. {
  207. /* restore original instruction (kept in target endianness) */
  208. if (breakpoint->length == 4)
  209. {
  210. target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
  211. }
  212. else
  213. {
  214. target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
  215. }
  216. breakpoint->set = 0;
  217. }
  218. return ERROR_OK;
  219. }
  220. int arm7_9_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  221. {
  222. armv4_5_common_t *armv4_5 = target->arch_info;
  223. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  224. if (target->state != TARGET_HALTED)
  225. {
  226. WARNING("target not halted");
  227. return ERROR_TARGET_NOT_HALTED;
  228. }
  229. if (arm7_9->force_hw_bkpts)
  230. {
  231. DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
  232. breakpoint->type = BKPT_HARD;
  233. }
  234. if ((breakpoint->type == BKPT_SOFT) && (arm7_9->sw_bkpts_enabled == 0))
  235. {
  236. INFO("sw breakpoint requested, but software breakpoints not enabled");
  237. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  238. }
  239. if ((breakpoint->type == BKPT_HARD) && (arm7_9->wp_available < 1))
  240. {
  241. INFO("no watchpoint unit available for hardware breakpoint");
  242. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  243. }
  244. if ((breakpoint->length != 2) && (breakpoint->length != 4))
  245. {
  246. INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
  247. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  248. }
  249. if (breakpoint->type == BKPT_HARD)
  250. arm7_9->wp_available--;
  251. return ERROR_OK;
  252. }
  253. int arm7_9_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
  254. {
  255. armv4_5_common_t *armv4_5 = target->arch_info;
  256. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  257. if (target->state != TARGET_HALTED)
  258. {
  259. WARNING("target not halted");
  260. return ERROR_TARGET_NOT_HALTED;
  261. }
  262. if (breakpoint->set)
  263. {
  264. arm7_9_unset_breakpoint(target, breakpoint);
  265. }
  266. if (breakpoint->type == BKPT_HARD)
  267. arm7_9->wp_available++;
  268. return ERROR_OK;
  269. }
  270. int arm7_9_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  271. {
  272. armv4_5_common_t *armv4_5 = target->arch_info;
  273. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  274. int rw_mask = 1;
  275. u32 mask;
  276. mask = watchpoint->length - 1;
  277. if (target->state != TARGET_HALTED)
  278. {
  279. WARNING("target not halted");
  280. return ERROR_TARGET_NOT_HALTED;
  281. }
  282. if (watchpoint->rw == WPT_ACCESS)
  283. rw_mask = 0;
  284. else
  285. rw_mask = 1;
  286. if (!arm7_9->wp0_used)
  287. {
  288. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_VALUE], watchpoint->address);
  289. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], mask);
  290. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], watchpoint->mask);
  291. if( watchpoint->mask != 0xffffffffu )
  292. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_VALUE], watchpoint->value);
  293. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], 0xff & ~EICE_W_CTRL_nOPC & ~rw_mask);
  294. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE | EICE_W_CTRL_nOPC | (watchpoint->rw & 1));
  295. jtag_execute_queue();
  296. watchpoint->set = 1;
  297. arm7_9->wp0_used = 2;
  298. }
  299. else if (!arm7_9->wp1_used)
  300. {
  301. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], watchpoint->address);
  302. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], mask);
  303. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], watchpoint->mask);
  304. if( watchpoint->mask != 0xffffffffu )
  305. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_VALUE], watchpoint->value);
  306. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], 0xff & ~EICE_W_CTRL_nOPC & ~rw_mask);
  307. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE | EICE_W_CTRL_nOPC | (watchpoint->rw & 1));
  308. jtag_execute_queue();
  309. watchpoint->set = 2;
  310. arm7_9->wp1_used = 2;
  311. }
  312. else
  313. {
  314. ERROR("BUG: no hardware comparator available");
  315. return ERROR_OK;
  316. }
  317. return ERROR_OK;
  318. }
  319. int arm7_9_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  320. {
  321. armv4_5_common_t *armv4_5 = target->arch_info;
  322. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  323. if (target->state != TARGET_HALTED)
  324. {
  325. WARNING("target not halted");
  326. return ERROR_TARGET_NOT_HALTED;
  327. }
  328. if (!watchpoint->set)
  329. {
  330. WARNING("breakpoint not set");
  331. return ERROR_OK;
  332. }
  333. if (watchpoint->set == 1)
  334. {
  335. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  336. jtag_execute_queue();
  337. arm7_9->wp0_used = 0;
  338. }
  339. else if (watchpoint->set == 2)
  340. {
  341. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  342. jtag_execute_queue();
  343. arm7_9->wp1_used = 0;
  344. }
  345. watchpoint->set = 0;
  346. return ERROR_OK;
  347. }
  348. int arm7_9_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  349. {
  350. armv4_5_common_t *armv4_5 = target->arch_info;
  351. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  352. if (target->state != TARGET_HALTED)
  353. {
  354. WARNING("target not halted");
  355. return ERROR_TARGET_NOT_HALTED;
  356. }
  357. if (arm7_9->wp_available < 1)
  358. {
  359. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  360. }
  361. if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
  362. {
  363. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  364. }
  365. arm7_9->wp_available--;
  366. return ERROR_OK;
  367. }
  368. int arm7_9_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
  369. {
  370. armv4_5_common_t *armv4_5 = target->arch_info;
  371. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  372. if (target->state != TARGET_HALTED)
  373. {
  374. WARNING("target not halted");
  375. return ERROR_TARGET_NOT_HALTED;
  376. }
  377. if (watchpoint->set)
  378. {
  379. arm7_9_unset_watchpoint(target, watchpoint);
  380. }
  381. arm7_9->wp_available++;
  382. return ERROR_OK;
  383. }
  384. int arm7_9_enable_sw_bkpts(struct target_s *target)
  385. {
  386. armv4_5_common_t *armv4_5 = target->arch_info;
  387. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  388. int retval;
  389. if (arm7_9->sw_bkpts_enabled)
  390. return ERROR_OK;
  391. if (arm7_9->wp_available < 1)
  392. {
  393. WARNING("can't enable sw breakpoints with no watchpoint unit available");
  394. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  395. }
  396. arm7_9->wp_available--;
  397. if (!arm7_9->wp0_used)
  398. {
  399. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_VALUE], arm7_9->arm_bkpt);
  400. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0x0);
  401. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffffu);
  402. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  403. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  404. arm7_9->sw_bkpts_enabled = 1;
  405. arm7_9->wp0_used = 3;
  406. }
  407. else if (!arm7_9->wp1_used)
  408. {
  409. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_VALUE], arm7_9->arm_bkpt);
  410. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0x0);
  411. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], 0xffffffffu);
  412. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], ~EICE_W_CTRL_nOPC & 0xff);
  413. embeddedice_set_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], EICE_W_CTRL_ENABLE);
  414. arm7_9->sw_bkpts_enabled = 2;
  415. arm7_9->wp1_used = 3;
  416. }
  417. else
  418. {
  419. ERROR("BUG: both watchpoints used, but wp_available >= 1");
  420. exit(-1);
  421. }
  422. if ((retval = jtag_execute_queue()) != ERROR_OK)
  423. {
  424. ERROR("error writing EmbeddedICE registers to enable sw breakpoints");
  425. exit(-1);
  426. };
  427. return ERROR_OK;
  428. }
  429. int arm7_9_disable_sw_bkpts(struct target_s *target)
  430. {
  431. armv4_5_common_t *armv4_5 = target->arch_info;
  432. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  433. if (!arm7_9->sw_bkpts_enabled)
  434. return ERROR_OK;
  435. if (arm7_9->sw_bkpts_enabled == 1)
  436. {
  437. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x0);
  438. arm7_9->sw_bkpts_enabled = 0;
  439. arm7_9->wp0_used = 0;
  440. arm7_9->wp_available++;
  441. }
  442. else if (arm7_9->sw_bkpts_enabled == 2)
  443. {
  444. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  445. arm7_9->sw_bkpts_enabled = 0;
  446. arm7_9->wp1_used = 0;
  447. arm7_9->wp_available++;
  448. }
  449. return ERROR_OK;
  450. }
  451. int arm7_9_execute_sys_speed(struct target_s *target)
  452. {
  453. int timeout;
  454. int retval;
  455. armv4_5_common_t *armv4_5 = target->arch_info;
  456. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  457. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  458. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  459. /* set RESTART instruction */
  460. jtag_add_end_state(TAP_RTI);
  461. arm_jtag_set_instr(jtag_info, 0x4);
  462. for (timeout=0; timeout<50; timeout++)
  463. {
  464. /* read debug status register */
  465. embeddedice_read_reg(dbg_stat);
  466. if ((retval = jtag_execute_queue()) != ERROR_OK)
  467. return retval;
  468. if ((buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1))
  469. && (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_SYSCOMP, 1)))
  470. break;
  471. usleep(100000);
  472. }
  473. if (timeout == 50)
  474. {
  475. ERROR("timeout waiting for SYSCOMP & DBGACK, last DBG_STATUS: %x", buf_get_u32(dbg_stat->value, 0, dbg_stat->size));
  476. return ERROR_TARGET_TIMEOUT;
  477. }
  478. return ERROR_OK;
  479. }
  480. int arm7_9_execute_fast_sys_speed(struct target_s *target)
  481. {
  482. u8 check_value[4], check_mask[4];
  483. armv4_5_common_t *armv4_5 = target->arch_info;
  484. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  485. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  486. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  487. /* set RESTART instruction */
  488. jtag_add_end_state(TAP_RTI);
  489. arm_jtag_set_instr(jtag_info, 0x4);
  490. /* check for DBGACK and SYSCOMP set (others don't care) */
  491. buf_set_u32(check_value, 0, 32, 0x9);
  492. buf_set_u32(check_mask, 0, 32, 0x9);
  493. /* read debug status register */
  494. embeddedice_read_reg_w_check(dbg_stat, check_value, check_value);
  495. return ERROR_OK;
  496. }
  497. enum target_state arm7_9_poll(target_t *target)
  498. {
  499. int retval;
  500. armv4_5_common_t *armv4_5 = target->arch_info;
  501. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  502. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  503. if (arm7_9->reinit_embeddedice)
  504. {
  505. arm7_9_reinit_embeddedice(target);
  506. }
  507. /* read debug status register */
  508. embeddedice_read_reg(dbg_stat);
  509. if ((retval = jtag_execute_queue()) != ERROR_OK)
  510. {
  511. switch (retval)
  512. {
  513. case ERROR_JTAG_QUEUE_FAILED:
  514. ERROR("JTAG queue failed while reading EmbeddedICE status register");
  515. exit(-1);
  516. break;
  517. default:
  518. break;
  519. }
  520. }
  521. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_DBGACK, 1))
  522. {
  523. DEBUG("DBGACK set, dbg_state->value: 0x%x", buf_get_u32(dbg_stat->value, 0, 32));
  524. if ((target->state == TARGET_UNKNOWN))
  525. {
  526. WARNING("DBGACK set while target was in unknown state. Reset or initialize target before resuming");
  527. target->state = TARGET_RUNNING;
  528. }
  529. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_RESET))
  530. {
  531. target->state = TARGET_HALTED;
  532. if ((retval = arm7_9_debug_entry(target)) != ERROR_OK)
  533. return retval;
  534. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  535. }
  536. if (target->state == TARGET_DEBUG_RUNNING)
  537. {
  538. target->state = TARGET_HALTED;
  539. if ((retval = arm7_9_debug_entry(target)) != ERROR_OK)
  540. return retval;
  541. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  542. }
  543. }
  544. else
  545. {
  546. if (target->state != TARGET_DEBUG_RUNNING)
  547. target->state = TARGET_RUNNING;
  548. }
  549. return target->state;
  550. }
  551. int arm7_9_assert_reset(target_t *target)
  552. {
  553. int retval;
  554. DEBUG("target->state: %s", target_state_strings[target->state]);
  555. if (target->state == TARGET_HALTED || target->state == TARGET_UNKNOWN)
  556. {
  557. /* if the target wasn't running, there might be working areas allocated */
  558. target_free_all_working_areas(target);
  559. /* assert SRST and TRST */
  560. /* system would get ouf sync if we didn't reset test-logic, too */
  561. if ((retval = jtag_add_reset(1, 1)) != ERROR_OK)
  562. {
  563. if (retval == ERROR_JTAG_RESET_CANT_SRST)
  564. {
  565. WARNING("can't assert srst");
  566. return retval;
  567. }
  568. else
  569. {
  570. ERROR("unknown error");
  571. exit(-1);
  572. }
  573. }
  574. jtag_add_sleep(5000);
  575. if ((retval = jtag_add_reset(0, 1)) != ERROR_OK)
  576. {
  577. if (retval == ERROR_JTAG_RESET_WOULD_ASSERT_TRST)
  578. {
  579. WARNING("srst resets test logic, too");
  580. retval = jtag_add_reset(1, 1);
  581. }
  582. }
  583. }
  584. else
  585. {
  586. if ((retval = jtag_add_reset(0, 1)) != ERROR_OK)
  587. {
  588. if (retval == ERROR_JTAG_RESET_WOULD_ASSERT_TRST)
  589. {
  590. WARNING("srst resets test logic, too");
  591. retval = jtag_add_reset(1, 1);
  592. }
  593. if (retval == ERROR_JTAG_RESET_CANT_SRST)
  594. {
  595. WARNING("can't assert srst");
  596. return retval;
  597. }
  598. else if (retval != ERROR_OK)
  599. {
  600. ERROR("unknown error");
  601. exit(-1);
  602. }
  603. }
  604. }
  605. target->state = TARGET_RESET;
  606. jtag_add_sleep(50000);
  607. armv4_5_invalidate_core_regs(target);
  608. return ERROR_OK;
  609. }
  610. int arm7_9_deassert_reset(target_t *target)
  611. {
  612. DEBUG("target->state: %s", target_state_strings[target->state]);
  613. /* deassert reset lines */
  614. jtag_add_reset(0, 0);
  615. return ERROR_OK;
  616. }
  617. int arm7_9_clear_halt(target_t *target)
  618. {
  619. armv4_5_common_t *armv4_5 = target->arch_info;
  620. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  621. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  622. if (arm7_9->use_dbgrq)
  623. {
  624. /* program EmbeddedICE Debug Control Register to deassert DBGRQ
  625. */
  626. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  627. embeddedice_store_reg(dbg_ctrl);
  628. }
  629. else
  630. {
  631. /* restore registers if watchpoint unit 0 was in use
  632. */
  633. if (arm7_9->wp0_used)
  634. {
  635. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK]);
  636. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK]);
  637. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK]);
  638. }
  639. /* control value always has to be restored, as it was either disabled,
  640. * or enabled with possibly different bits
  641. */
  642. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE]);
  643. }
  644. return ERROR_OK;
  645. }
  646. int arm7_9_soft_reset_halt(struct target_s *target)
  647. {
  648. armv4_5_common_t *armv4_5 = target->arch_info;
  649. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  650. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  651. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  652. int i;
  653. if (target->state == TARGET_RUNNING)
  654. {
  655. target->type->halt(target);
  656. }
  657. while (buf_get_u32(dbg_stat->value, EICE_DBG_CONTROL_DBGACK, 1) == 0)
  658. {
  659. embeddedice_read_reg(dbg_stat);
  660. jtag_execute_queue();
  661. }
  662. target->state = TARGET_HALTED;
  663. /* program EmbeddedICE Debug Control Register to assert DBGACK and INTDIS
  664. * ensure that DBGRQ is cleared
  665. */
  666. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  667. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  668. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 1);
  669. embeddedice_store_reg(dbg_ctrl);
  670. arm7_9_clear_halt(target);
  671. /* if the target is in Thumb state, change to ARM state */
  672. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_ITBIT, 1))
  673. {
  674. u32 r0_thumb, pc_thumb;
  675. DEBUG("target entered debug from Thumb state, changing to ARM");
  676. /* Entered debug from Thumb mode */
  677. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  678. arm7_9->change_to_arm(target, &r0_thumb, &pc_thumb);
  679. }
  680. /* all register content is now invalid */
  681. armv4_5_invalidate_core_regs(target);
  682. /* SVC, ARM state, IRQ and FIQ disabled */
  683. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8, 0xd3);
  684. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
  685. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  686. /* start fetching from 0x0 */
  687. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, 0x0);
  688. armv4_5->core_cache->reg_list[15].dirty = 1;
  689. armv4_5->core_cache->reg_list[15].valid = 1;
  690. armv4_5->core_mode = ARMV4_5_MODE_SVC;
  691. armv4_5->core_state = ARMV4_5_STATE_ARM;
  692. /* reset registers */
  693. for (i = 0; i <= 14; i++)
  694. {
  695. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, 0xffffffff);
  696. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 1;
  697. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  698. }
  699. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  700. return ERROR_OK;
  701. }
  702. int arm7_9_halt(target_t *target)
  703. {
  704. armv4_5_common_t *armv4_5 = target->arch_info;
  705. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  706. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  707. DEBUG("target->state: %s", target_state_strings[target->state]);
  708. if (target->state == TARGET_HALTED)
  709. {
  710. WARNING("target was already halted");
  711. return ERROR_TARGET_ALREADY_HALTED;
  712. }
  713. if (target->state == TARGET_UNKNOWN)
  714. {
  715. WARNING("target was in unknown state when halt was requested");
  716. }
  717. if ((target->state == TARGET_RESET) && (jtag_reset_config & RESET_SRST_PULLS_TRST) && (jtag_srst))
  718. {
  719. ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  720. return ERROR_TARGET_FAILURE;
  721. }
  722. if (arm7_9->use_dbgrq)
  723. {
  724. /* program EmbeddedICE Debug Control Register to assert DBGRQ
  725. */
  726. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 1);
  727. embeddedice_store_reg(dbg_ctrl);
  728. }
  729. else
  730. {
  731. /* program watchpoint unit to match on any address
  732. */
  733. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffff);
  734. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  735. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x100);
  736. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], 0xf7);
  737. }
  738. target->debug_reason = DBG_REASON_DBGRQ;
  739. return ERROR_OK;
  740. }
  741. int arm7_9_debug_entry(target_t *target)
  742. {
  743. int i;
  744. u32 context[16];
  745. u32* context_p[16];
  746. u32 r0_thumb, pc_thumb;
  747. u32 cpsr;
  748. int retval;
  749. /* get pointers to arch-specific information */
  750. armv4_5_common_t *armv4_5 = target->arch_info;
  751. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  752. reg_t *dbg_stat = &arm7_9->eice_cache->reg_list[EICE_DBG_STAT];
  753. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  754. #ifdef _DEBUG_ARM7_9_
  755. DEBUG("");
  756. #endif
  757. if (arm7_9->pre_debug_entry)
  758. arm7_9->pre_debug_entry(target);
  759. /* program EmbeddedICE Debug Control Register to assert DBGACK and INTDIS
  760. * ensure that DBGRQ is cleared
  761. */
  762. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  763. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGRQ, 1, 0);
  764. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 1);
  765. embeddedice_store_reg(dbg_ctrl);
  766. arm7_9_clear_halt(target);
  767. if ((retval = jtag_execute_queue()) != ERROR_OK)
  768. {
  769. switch (retval)
  770. {
  771. case ERROR_JTAG_QUEUE_FAILED:
  772. ERROR("JTAG queue failed while writing EmbeddedICE control register");
  773. exit(-1);
  774. break;
  775. default:
  776. break;
  777. }
  778. }
  779. if ((retval = arm7_9->examine_debug_reason(target)) != ERROR_OK)
  780. return retval;
  781. if (target->state != TARGET_HALTED)
  782. {
  783. WARNING("target not halted");
  784. return ERROR_TARGET_NOT_HALTED;
  785. }
  786. /* if the target is in Thumb state, change to ARM state */
  787. if (buf_get_u32(dbg_stat->value, EICE_DBG_STATUS_ITBIT, 1))
  788. {
  789. DEBUG("target entered debug from Thumb state");
  790. /* Entered debug from Thumb mode */
  791. armv4_5->core_state = ARMV4_5_STATE_THUMB;
  792. arm7_9->change_to_arm(target, &r0_thumb, &pc_thumb);
  793. DEBUG("r0_thumb: 0x%8.8x, pc_thumb: 0x%8.8x", r0_thumb, pc_thumb);
  794. }
  795. else
  796. {
  797. DEBUG("target entered debug from ARM state");
  798. /* Entered debug from ARM mode */
  799. armv4_5->core_state = ARMV4_5_STATE_ARM;
  800. }
  801. for (i = 0; i < 16; i++)
  802. context_p[i] = &context[i];
  803. /* save core registers (r0 - r15 of current core mode) */
  804. arm7_9->read_core_regs(target, 0xffff, context_p);
  805. arm7_9->read_xpsr(target, &cpsr, 0);
  806. if ((retval = jtag_execute_queue()) != ERROR_OK)
  807. return retval;
  808. /* if the core has been executing in Thumb state, set the T bit */
  809. if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  810. cpsr |= 0x20;
  811. buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, cpsr);
  812. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 0;
  813. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  814. armv4_5->core_mode = cpsr & 0x1f;
  815. if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
  816. {
  817. target->state = TARGET_UNKNOWN;
  818. ERROR("cpsr contains invalid mode value - communication failure");
  819. return ERROR_TARGET_FAILURE;
  820. }
  821. DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
  822. if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  823. {
  824. DEBUG("thumb state, applying fixups");
  825. context[0] = r0_thumb;
  826. context[15] = pc_thumb;
  827. } else if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  828. {
  829. /* adjust value stored by STM */
  830. context[15] -= 3 * 4;
  831. }
  832. if ((target->debug_reason == DBG_REASON_BREAKPOINT)
  833. || (target->debug_reason == DBG_REASON_SINGLESTEP)
  834. || (target->debug_reason == DBG_REASON_WATCHPOINT)
  835. || (target->debug_reason == DBG_REASON_WPTANDBKPT)
  836. || ((target->debug_reason == DBG_REASON_DBGRQ) && (arm7_9->use_dbgrq == 0)))
  837. context[15] -= 3 * ((armv4_5->core_state == ARMV4_5_STATE_ARM) ? 4 : 2);
  838. else if (target->debug_reason == DBG_REASON_DBGRQ)
  839. context[15] -= arm7_9->dbgreq_adjust_pc * ((armv4_5->core_state == ARMV4_5_STATE_ARM) ? 4 : 2);
  840. else
  841. {
  842. ERROR("unknown debug reason: %i", target->debug_reason);
  843. }
  844. for (i=0; i<=15; i++)
  845. {
  846. DEBUG("r%i: 0x%8.8x", i, context[i]);
  847. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, context[i]);
  848. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
  849. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
  850. }
  851. DEBUG("entered debug state at PC 0x%x", context[15]);
  852. /* exceptions other than USR & SYS have a saved program status register */
  853. if ((armv4_5_mode_to_number(armv4_5->core_mode) != ARMV4_5_MODE_USR) && (armv4_5_mode_to_number(armv4_5->core_mode) != ARMV4_5_MODE_SYS))
  854. {
  855. u32 spsr;
  856. arm7_9->read_xpsr(target, &spsr, 1);
  857. jtag_execute_queue();
  858. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, spsr);
  859. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  860. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  861. }
  862. /* r0 and r15 (pc) have to be restored later */
  863. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0).dirty = 1;
  864. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15).dirty = 1;
  865. if ((retval = jtag->execute_queue()) != ERROR_OK)
  866. return retval;
  867. if (arm7_9->post_debug_entry)
  868. arm7_9->post_debug_entry(target);
  869. return ERROR_OK;
  870. }
  871. int arm7_9_full_context(target_t *target)
  872. {
  873. int i;
  874. int retval;
  875. armv4_5_common_t *armv4_5 = target->arch_info;
  876. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  877. DEBUG("");
  878. if (target->state != TARGET_HALTED)
  879. {
  880. WARNING("target not halted");
  881. return ERROR_TARGET_NOT_HALTED;
  882. }
  883. /* iterate through processor modes (User, FIQ, IRQ, SVC, ABT, UND)
  884. * SYS shares registers with User, so we don't touch SYS
  885. */
  886. for(i = 0; i < 6; i++)
  887. {
  888. u32 mask = 0;
  889. u32* reg_p[16];
  890. int j;
  891. int valid = 1;
  892. /* check if there are invalid registers in the current mode
  893. */
  894. for (j = 0; j <= 16; j++)
  895. {
  896. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  897. valid = 0;
  898. }
  899. if (!valid)
  900. {
  901. u32 tmp_cpsr;
  902. /* change processor mode (and mask T bit) */
  903. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  904. tmp_cpsr |= armv4_5_number_to_mode(i);
  905. tmp_cpsr &= ~0x20;
  906. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  907. for (j = 0; j < 15; j++)
  908. {
  909. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
  910. {
  911. reg_p[j] = (u32*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value;
  912. mask |= 1 << j;
  913. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
  914. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
  915. }
  916. }
  917. /* if only the PSR is invalid, mask is all zeroes */
  918. if (mask)
  919. arm7_9->read_core_regs(target, mask, reg_p);
  920. /* check if the PSR has to be read */
  921. if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid == 0)
  922. {
  923. arm7_9->read_xpsr(target, (u32*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).value, 1);
  924. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
  925. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
  926. }
  927. }
  928. }
  929. /* restore processor mode (mask T bit) */
  930. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  931. if ((retval = jtag_execute_queue()) != ERROR_OK)
  932. {
  933. ERROR("JTAG failure");
  934. exit(-1);
  935. }
  936. return ERROR_OK;
  937. }
  938. int arm7_9_restore_context(target_t *target)
  939. {
  940. armv4_5_common_t *armv4_5 = target->arch_info;
  941. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  942. reg_t *reg;
  943. armv4_5_core_reg_t *reg_arch_info;
  944. enum armv4_5_mode current_mode = armv4_5->core_mode;
  945. int i, j;
  946. int dirty;
  947. int mode_change;
  948. DEBUG("");
  949. if (target->state != TARGET_HALTED)
  950. {
  951. WARNING("target not halted");
  952. return ERROR_TARGET_NOT_HALTED;
  953. }
  954. if (arm7_9->pre_restore_context)
  955. arm7_9->pre_restore_context(target);
  956. /* iterate through processor modes (User, FIQ, IRQ, SVC, ABT, UND)
  957. * SYS shares registers with User, so we don't touch SYS
  958. */
  959. for (i = 0; i < 6; i++)
  960. {
  961. DEBUG("examining %s mode", armv4_5_mode_strings[i]);
  962. dirty = 0;
  963. mode_change = 0;
  964. /* check if there are dirty registers in the current mode
  965. */
  966. for (j = 0; j <= 16; j++)
  967. {
  968. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j);
  969. reg_arch_info = reg->arch_info;
  970. if (reg->dirty == 1)
  971. {
  972. if (reg->valid == 1)
  973. {
  974. dirty = 1;
  975. DEBUG("examining dirty reg: %s", reg->name);
  976. if ((reg_arch_info->mode != ARMV4_5_MODE_ANY)
  977. && (reg_arch_info->mode != current_mode)
  978. && !((reg_arch_info->mode == ARMV4_5_MODE_USR) && (armv4_5->core_mode == ARMV4_5_MODE_SYS))
  979. && !((reg_arch_info->mode == ARMV4_5_MODE_SYS) && (armv4_5->core_mode == ARMV4_5_MODE_USR)))
  980. {
  981. mode_change = 1;
  982. DEBUG("require mode change");
  983. }
  984. }
  985. else
  986. {
  987. ERROR("BUG: dirty register '%s', but no valid data", reg->name);
  988. exit(-1);
  989. }
  990. }
  991. }
  992. if (dirty)
  993. {
  994. u32 mask = 0x0;
  995. int num_regs = 0;
  996. u32 regs[16];
  997. if (mode_change)
  998. {
  999. u32 tmp_cpsr;
  1000. /* change processor mode (mask T bit) */
  1001. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1002. tmp_cpsr |= armv4_5_number_to_mode(i);
  1003. tmp_cpsr &= ~0x20;
  1004. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1005. current_mode = armv4_5_number_to_mode(i);
  1006. }
  1007. for (j = 0; j <= 14; j++)
  1008. {
  1009. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j);
  1010. reg_arch_info = reg->arch_info;
  1011. if (reg->dirty == 1)
  1012. {
  1013. regs[j] = buf_get_u32(reg->value, 0, 32);
  1014. mask |= 1 << j;
  1015. num_regs++;
  1016. reg->dirty = 0;
  1017. reg->valid = 1;
  1018. DEBUG("writing register %i of mode %s with value 0x%8.8x", j, armv4_5_mode_strings[i], regs[j]);
  1019. }
  1020. }
  1021. if (mask)
  1022. {
  1023. arm7_9->write_core_regs(target, mask, regs);
  1024. }
  1025. reg = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16);
  1026. reg_arch_info = reg->arch_info;
  1027. if ((reg->dirty) && (reg_arch_info->mode != ARMV4_5_MODE_ANY))
  1028. {
  1029. DEBUG("writing SPSR of mode %i with value 0x%8.8x", i, buf_get_u32(reg->value, 0, 32));
  1030. arm7_9->write_xpsr(target, buf_get_u32(reg->value, 0, 32), 1);
  1031. }
  1032. }
  1033. }
  1034. if ((armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty == 0) && (armv4_5->core_mode != current_mode))
  1035. {
  1036. /* restore processor mode (mask T bit) */
  1037. u32 tmp_cpsr;
  1038. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1039. tmp_cpsr |= armv4_5_number_to_mode(i);
  1040. tmp_cpsr &= ~0x20;
  1041. DEBUG("writing lower 8 bit of cpsr with value 0x%2.2x", tmp_cpsr);
  1042. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1043. }
  1044. else if (armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty == 1)
  1045. {
  1046. /* CPSR has been changed, full restore necessary (mask T bit) */
  1047. DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
  1048. arm7_9->write_xpsr(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32) & ~0x20, 0);
  1049. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 0;
  1050. armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
  1051. }
  1052. /* restore PC */
  1053. DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1054. arm7_9->write_pc(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1055. armv4_5->core_cache->reg_list[15].dirty = 0;
  1056. if (arm7_9->post_restore_context)
  1057. arm7_9->post_restore_context(target);
  1058. return ERROR_OK;
  1059. }
  1060. int arm7_9_restart_core(struct target_s *target)
  1061. {
  1062. armv4_5_common_t *armv4_5 = target->arch_info;
  1063. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1064. arm_jtag_t *jtag_info = &arm7_9->jtag_info;
  1065. /* set RESTART instruction */
  1066. jtag_add_end_state(TAP_RTI);
  1067. arm_jtag_set_instr(jtag_info, 0x4);
  1068. jtag_add_runtest(1, TAP_RTI);
  1069. if ((jtag_execute_queue()) != ERROR_OK)
  1070. {
  1071. exit(-1);
  1072. }
  1073. return ERROR_OK;
  1074. }
  1075. void arm7_9_enable_watchpoints(struct target_s *target)
  1076. {
  1077. watchpoint_t *watchpoint = target->watchpoints;
  1078. while (watchpoint)
  1079. {
  1080. if (watchpoint->set == 0)
  1081. arm7_9_set_watchpoint(target, watchpoint);
  1082. watchpoint = watchpoint->next;
  1083. }
  1084. }
  1085. void arm7_9_enable_breakpoints(struct target_s *target)
  1086. {
  1087. breakpoint_t *breakpoint = target->breakpoints;
  1088. /* set any pending breakpoints */
  1089. while (breakpoint)
  1090. {
  1091. if (breakpoint->set == 0)
  1092. arm7_9_set_breakpoint(target, breakpoint);
  1093. breakpoint = breakpoint->next;
  1094. }
  1095. }
  1096. void arm7_9_disable_bkpts_and_wpts(struct target_s *target)
  1097. {
  1098. breakpoint_t *breakpoint = target->breakpoints;
  1099. watchpoint_t *watchpoint = target->watchpoints;
  1100. /* set any pending breakpoints */
  1101. while (breakpoint)
  1102. {
  1103. if (breakpoint->set != 0)
  1104. arm7_9_unset_breakpoint(target, breakpoint);
  1105. breakpoint = breakpoint->next;
  1106. }
  1107. while (watchpoint)
  1108. {
  1109. if (watchpoint->set != 0)
  1110. arm7_9_unset_watchpoint(target, watchpoint);
  1111. watchpoint = watchpoint->next;
  1112. }
  1113. }
  1114. int arm7_9_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
  1115. {
  1116. armv4_5_common_t *armv4_5 = target->arch_info;
  1117. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1118. breakpoint_t *breakpoint = target->breakpoints;
  1119. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1120. DEBUG("");
  1121. if (target->state != TARGET_HALTED)
  1122. {
  1123. WARNING("target not halted");
  1124. return ERROR_TARGET_NOT_HALTED;
  1125. }
  1126. if (!debug_execution)
  1127. {
  1128. target_free_all_working_areas(target);
  1129. }
  1130. /* current = 1: continue on current pc, otherwise continue at <address> */
  1131. if (!current)
  1132. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1133. /* the front-end may request us not to handle breakpoints */
  1134. if (handle_breakpoints)
  1135. {
  1136. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1137. {
  1138. DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  1139. arm7_9_unset_breakpoint(target, breakpoint);
  1140. DEBUG("enable single-step");
  1141. arm7_9->enable_single_step(target);
  1142. target->debug_reason = DBG_REASON_SINGLESTEP;
  1143. arm7_9_restore_context(target);
  1144. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1145. arm7_9->branch_resume(target);
  1146. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1147. {
  1148. arm7_9->branch_resume_thumb(target);
  1149. }
  1150. else
  1151. {
  1152. ERROR("unhandled core state");
  1153. exit(-1);
  1154. }
  1155. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1156. embeddedice_write_reg(dbg_ctrl, buf_get_u32(dbg_ctrl->value, 0, dbg_ctrl->size));
  1157. arm7_9_execute_sys_speed(target);
  1158. DEBUG("disable single-step");
  1159. arm7_9->disable_single_step(target);
  1160. arm7_9_debug_entry(target);
  1161. DEBUG("new PC after step: 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1162. DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
  1163. arm7_9_set_breakpoint(target, breakpoint);
  1164. }
  1165. }
  1166. /* enable any pending breakpoints and watchpoints */
  1167. arm7_9_enable_breakpoints(target);
  1168. arm7_9_enable_watchpoints(target);
  1169. arm7_9_restore_context(target);
  1170. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1171. {
  1172. arm7_9->branch_resume(target);
  1173. }
  1174. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1175. {
  1176. arm7_9->branch_resume_thumb(target);
  1177. }
  1178. else
  1179. {
  1180. ERROR("unhandled core state");
  1181. exit(-1);
  1182. }
  1183. /* deassert DBGACK and INTDIS */
  1184. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1185. /* INTDIS only when we really resume, not during debug execution */
  1186. if (!debug_execution)
  1187. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_INTDIS, 1, 0);
  1188. embeddedice_write_reg(dbg_ctrl, buf_get_u32(dbg_ctrl->value, 0, dbg_ctrl->size));
  1189. arm7_9_restart_core(target);
  1190. target->debug_reason = DBG_REASON_NOTHALTED;
  1191. if (!debug_execution)
  1192. {
  1193. /* registers are now invalid */
  1194. armv4_5_invalidate_core_regs(target);
  1195. target->state = TARGET_RUNNING;
  1196. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1197. }
  1198. else
  1199. {
  1200. target->state = TARGET_DEBUG_RUNNING;
  1201. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1202. }
  1203. DEBUG("target resumed");
  1204. return ERROR_OK;
  1205. }
  1206. void arm7_9_enable_eice_step(target_t *target)
  1207. {
  1208. armv4_5_common_t *armv4_5 = target->arch_info;
  1209. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1210. /* setup an inverse breakpoint on the current PC
  1211. * - comparator 1 matches the current address
  1212. * - rangeout from comparator 1 is connected to comparator 0 rangein
  1213. * - comparator 0 matches any address, as long as rangein is low */
  1214. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK], 0xffffffff);
  1215. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK], 0xffffffff);
  1216. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE], 0x100);
  1217. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK], 0x77);
  1218. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE], buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  1219. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK], 0);
  1220. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK], 0xffffffff);
  1221. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE], 0x0);
  1222. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK], 0xf7);
  1223. }
  1224. void arm7_9_disable_eice_step(target_t *target)
  1225. {
  1226. armv4_5_common_t *armv4_5 = target->arch_info;
  1227. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1228. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_ADDR_MASK]);
  1229. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_DATA_MASK]);
  1230. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_VALUE]);
  1231. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W0_CONTROL_MASK]);
  1232. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_VALUE]);
  1233. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_ADDR_MASK]);
  1234. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_DATA_MASK]);
  1235. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_MASK]);
  1236. embeddedice_store_reg(&arm7_9->eice_cache->reg_list[EICE_W1_CONTROL_VALUE]);
  1237. }
  1238. int arm7_9_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
  1239. {
  1240. armv4_5_common_t *armv4_5 = target->arch_info;
  1241. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1242. breakpoint_t *breakpoint = NULL;
  1243. if (target->state != TARGET_HALTED)
  1244. {
  1245. WARNING("target not halted");
  1246. return ERROR_TARGET_NOT_HALTED;
  1247. }
  1248. /* current = 1: continue on current pc, otherwise continue at <address> */
  1249. if (!current)
  1250. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
  1251. /* the front-end may request us not to handle breakpoints */
  1252. if (handle_breakpoints)
  1253. if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
  1254. arm7_9_unset_breakpoint(target, breakpoint);
  1255. target->debug_reason = DBG_REASON_SINGLESTEP;
  1256. arm7_9_restore_context(target);
  1257. arm7_9->enable_single_step(target);
  1258. if (armv4_5->core_state == ARMV4_5_STATE_ARM)
  1259. {
  1260. arm7_9->branch_resume(target);
  1261. }
  1262. else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
  1263. {
  1264. arm7_9->branch_resume_thumb(target);
  1265. }
  1266. else
  1267. {
  1268. ERROR("unhandled core state");
  1269. exit(-1);
  1270. }
  1271. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1272. arm7_9_execute_sys_speed(target);
  1273. arm7_9->disable_single_step(target);
  1274. /* registers are now invalid */
  1275. armv4_5_invalidate_core_regs(target);
  1276. arm7_9_debug_entry(target);
  1277. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  1278. if (breakpoint)
  1279. arm7_9_set_breakpoint(target, breakpoint);
  1280. DEBUG("target stepped");
  1281. return ERROR_OK;
  1282. }
  1283. int arm7_9_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
  1284. {
  1285. u32* reg_p[16];
  1286. u32 value;
  1287. int retval;
  1288. armv4_5_common_t *armv4_5 = target->arch_info;
  1289. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1290. enum armv4_5_mode reg_mode = ((armv4_5_core_reg_t*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info)->mode;
  1291. if ((num < 0) || (num > 16))
  1292. return ERROR_INVALID_ARGUMENTS;
  1293. if ((mode != ARMV4_5_MODE_ANY)
  1294. && (mode != armv4_5->core_mode)
  1295. && (reg_mode != ARMV4_5_MODE_ANY))
  1296. {
  1297. u32 tmp_cpsr;
  1298. /* change processor mode (mask T bit) */
  1299. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1300. tmp_cpsr |= mode;
  1301. tmp_cpsr &= ~0x20;
  1302. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1303. }
  1304. if ((num >= 0) && (num <= 15))
  1305. {
  1306. /* read a normal core register */
  1307. reg_p[num] = &value;
  1308. arm7_9->read_core_regs(target, 1 << num, reg_p);
  1309. }
  1310. else
  1311. {
  1312. /* read a program status register
  1313. * if the register mode is MODE_ANY, we read the cpsr, otherwise a spsr
  1314. */
  1315. armv4_5_core_reg_t *arch_info = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info;
  1316. int spsr = (arch_info->mode == ARMV4_5_MODE_ANY) ? 0 : 1;
  1317. arm7_9->read_xpsr(target, &value, spsr);
  1318. }
  1319. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1320. {
  1321. ERROR("JTAG failure");
  1322. exit(-1);
  1323. }
  1324. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  1325. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  1326. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).value, 0, 32, value);
  1327. if ((mode != ARMV4_5_MODE_ANY)
  1328. && (mode != armv4_5->core_mode)
  1329. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1330. /* restore processor mode (mask T bit) */
  1331. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1332. }
  1333. return ERROR_OK;
  1334. }
  1335. int arm7_9_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
  1336. {
  1337. u32 reg[16];
  1338. int retval;
  1339. armv4_5_common_t *armv4_5 = target->arch_info;
  1340. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1341. enum armv4_5_mode reg_mode = ((armv4_5_core_reg_t*)ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info)->mode;
  1342. if ((num < 0) || (num > 16))
  1343. return ERROR_INVALID_ARGUMENTS;
  1344. if ((mode != ARMV4_5_MODE_ANY)
  1345. && (mode != armv4_5->core_mode)
  1346. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1347. u32 tmp_cpsr;
  1348. /* change processor mode (mask T bit) */
  1349. tmp_cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & 0xE0;
  1350. tmp_cpsr |= mode;
  1351. tmp_cpsr &= ~0x20;
  1352. arm7_9->write_xpsr_im8(target, tmp_cpsr & 0xff, 0, 0);
  1353. }
  1354. if ((num >= 0) && (num <= 15))
  1355. {
  1356. /* write a normal core register */
  1357. reg[num] = value;
  1358. arm7_9->write_core_regs(target, 1 << num, reg);
  1359. }
  1360. else
  1361. {
  1362. /* write a program status register
  1363. * if the register mode is MODE_ANY, we write the cpsr, otherwise a spsr
  1364. */
  1365. armv4_5_core_reg_t *arch_info = ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).arch_info;
  1366. int spsr = (arch_info->mode == ARMV4_5_MODE_ANY) ? 0 : 1;
  1367. /* if we're writing the CPSR, mask the T bit */
  1368. if (!spsr)
  1369. value &= ~0x20;
  1370. arm7_9->write_xpsr(target, value, spsr);
  1371. }
  1372. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  1373. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  1374. if ((mode != ARMV4_5_MODE_ANY)
  1375. && (mode != armv4_5->core_mode)
  1376. && (reg_mode != ARMV4_5_MODE_ANY)) {
  1377. /* restore processor mode (mask T bit) */
  1378. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1379. }
  1380. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1381. {
  1382. ERROR("JTAG failure");
  1383. exit(-1);
  1384. }
  1385. return ERROR_OK;
  1386. }
  1387. int arm7_9_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1388. {
  1389. armv4_5_common_t *armv4_5 = target->arch_info;
  1390. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1391. u32 reg[16];
  1392. int num_accesses = 0;
  1393. int thisrun_accesses;
  1394. int i;
  1395. u32 cpsr;
  1396. int retval;
  1397. int last_reg = 0;
  1398. DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1399. if (target->state != TARGET_HALTED)
  1400. {
  1401. WARNING("target not halted");
  1402. return ERROR_TARGET_NOT_HALTED;
  1403. }
  1404. /* sanitize arguments */
  1405. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1406. return ERROR_INVALID_ARGUMENTS;
  1407. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1408. return ERROR_TARGET_UNALIGNED_ACCESS;
  1409. /* load the base register with the address of the first word */
  1410. reg[0] = address;
  1411. arm7_9->write_core_regs(target, 0x1, reg);
  1412. switch (size)
  1413. {
  1414. case 4:
  1415. while (num_accesses < count)
  1416. {
  1417. u32 reg_list;
  1418. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1419. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1420. if (last_reg <= thisrun_accesses)
  1421. last_reg = thisrun_accesses;
  1422. arm7_9->load_word_regs(target, reg_list);
  1423. /* fast memory reads are only safe when the target is running
  1424. * from a sufficiently high clock (32 kHz is usually too slow)
  1425. */
  1426. if (arm7_9->fast_memory_access)
  1427. arm7_9_execute_fast_sys_speed(target);
  1428. else
  1429. arm7_9_execute_sys_speed(target);
  1430. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 4);
  1431. /* advance buffer, count number of accesses */
  1432. buffer += thisrun_accesses * 4;
  1433. num_accesses += thisrun_accesses;
  1434. }
  1435. break;
  1436. case 2:
  1437. while (num_accesses < count)
  1438. {
  1439. u32 reg_list;
  1440. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1441. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1442. for (i = 1; i <= thisrun_accesses; i++)
  1443. {
  1444. if (i > last_reg)
  1445. last_reg = i;
  1446. arm7_9->load_hword_reg(target, i);
  1447. /* fast memory reads are only safe when the target is running
  1448. * from a sufficiently high clock (32 kHz is usually too slow)
  1449. */
  1450. if (arm7_9->fast_memory_access)
  1451. arm7_9_execute_fast_sys_speed(target);
  1452. else
  1453. arm7_9_execute_sys_speed(target);
  1454. }
  1455. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 2);
  1456. /* advance buffer, count number of accesses */
  1457. buffer += thisrun_accesses * 2;
  1458. num_accesses += thisrun_accesses;
  1459. }
  1460. break;
  1461. case 1:
  1462. while (num_accesses < count)
  1463. {
  1464. u32 reg_list;
  1465. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1466. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1467. for (i = 1; i <= thisrun_accesses; i++)
  1468. {
  1469. if (i > last_reg)
  1470. last_reg = i;
  1471. arm7_9->load_byte_reg(target, i);
  1472. /* fast memory reads are only safe when the target is running
  1473. * from a sufficiently high clock (32 kHz is usually too slow)
  1474. */
  1475. if (arm7_9->fast_memory_access)
  1476. arm7_9_execute_fast_sys_speed(target);
  1477. else
  1478. arm7_9_execute_sys_speed(target);
  1479. }
  1480. arm7_9->read_core_regs_target_buffer(target, reg_list, buffer, 1);
  1481. /* advance buffer, count number of accesses */
  1482. buffer += thisrun_accesses * 1;
  1483. num_accesses += thisrun_accesses;
  1484. }
  1485. break;
  1486. default:
  1487. ERROR("BUG: we shouldn't get here");
  1488. exit(-1);
  1489. break;
  1490. }
  1491. for (i=0; i<=last_reg; i++)
  1492. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 1;
  1493. arm7_9->read_xpsr(target, &cpsr, 0);
  1494. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1495. {
  1496. ERROR("JTAG error while reading cpsr");
  1497. exit(-1);
  1498. }
  1499. if (((cpsr & 0x1f) == ARMV4_5_MODE_ABT) && (armv4_5->core_mode != ARMV4_5_MODE_ABT))
  1500. {
  1501. WARNING("memory read caused data abort (address: 0x%8.8x, size: 0x%x, count: 0x%x)", address, size, count);
  1502. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1503. return ERROR_TARGET_DATA_ABORT;
  1504. }
  1505. return ERROR_OK;
  1506. }
  1507. int arm7_9_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
  1508. {
  1509. armv4_5_common_t *armv4_5 = target->arch_info;
  1510. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1511. reg_t *dbg_ctrl = &arm7_9->eice_cache->reg_list[EICE_DBG_CTRL];
  1512. u32 reg[16];
  1513. int num_accesses = 0;
  1514. int thisrun_accesses;
  1515. int i;
  1516. u32 cpsr;
  1517. int retval;
  1518. int last_reg = 0;
  1519. DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
  1520. if (target->state != TARGET_HALTED)
  1521. {
  1522. WARNING("target not halted");
  1523. return ERROR_TARGET_NOT_HALTED;
  1524. }
  1525. /* sanitize arguments */
  1526. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1527. return ERROR_INVALID_ARGUMENTS;
  1528. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1529. return ERROR_TARGET_UNALIGNED_ACCESS;
  1530. /* load the base register with the address of the first word */
  1531. reg[0] = address;
  1532. arm7_9->write_core_regs(target, 0x1, reg);
  1533. /* Clear DBGACK, to make sure memory fetches work as expected */
  1534. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 0);
  1535. embeddedice_store_reg(dbg_ctrl);
  1536. switch (size)
  1537. {
  1538. case 4:
  1539. while (num_accesses < count)
  1540. {
  1541. u32 reg_list;
  1542. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1543. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1544. for (i = 1; i <= thisrun_accesses; i++)
  1545. {
  1546. if (i > last_reg)
  1547. last_reg = i;
  1548. reg[i] = target_buffer_get_u32(target, buffer);
  1549. buffer += 4;
  1550. }
  1551. arm7_9->write_core_regs(target, reg_list, reg);
  1552. arm7_9->store_word_regs(target, reg_list);
  1553. /* fast memory writes are only safe when the target is running
  1554. * from a sufficiently high clock (32 kHz is usually too slow)
  1555. */
  1556. if (arm7_9->fast_memory_access)
  1557. arm7_9_execute_fast_sys_speed(target);
  1558. else
  1559. arm7_9_execute_sys_speed(target);
  1560. num_accesses += thisrun_accesses;
  1561. }
  1562. break;
  1563. case 2:
  1564. while (num_accesses < count)
  1565. {
  1566. u32 reg_list;
  1567. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1568. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1569. for (i = 1; i <= thisrun_accesses; i++)
  1570. {
  1571. if (i > last_reg)
  1572. last_reg = i;
  1573. reg[i] = target_buffer_get_u16(target, buffer) & 0xffff;
  1574. buffer += 2;
  1575. }
  1576. arm7_9->write_core_regs(target, reg_list, reg);
  1577. for (i = 1; i <= thisrun_accesses; i++)
  1578. {
  1579. arm7_9->store_hword_reg(target, i);
  1580. /* fast memory writes are only safe when the target is running
  1581. * from a sufficiently high clock (32 kHz is usually too slow)
  1582. */
  1583. if (arm7_9->fast_memory_access)
  1584. arm7_9_execute_fast_sys_speed(target);
  1585. else
  1586. arm7_9_execute_sys_speed(target);
  1587. }
  1588. num_accesses += thisrun_accesses;
  1589. }
  1590. break;
  1591. case 1:
  1592. while (num_accesses < count)
  1593. {
  1594. u32 reg_list;
  1595. thisrun_accesses = ((count - num_accesses) >= 14) ? 14 : (count - num_accesses);
  1596. reg_list = (0xffff >> (15 - thisrun_accesses)) & 0xfffe;
  1597. for (i = 1; i <= thisrun_accesses; i++)
  1598. {
  1599. if (i > last_reg)
  1600. last_reg = i;
  1601. reg[i] = *buffer++ & 0xff;
  1602. }
  1603. arm7_9->write_core_regs(target, reg_list, reg);
  1604. for (i = 1; i <= thisrun_accesses; i++)
  1605. {
  1606. arm7_9->store_byte_reg(target, i);
  1607. /* fast memory writes are only safe when the target is running
  1608. * from a sufficiently high clock (32 kHz is usually too slow)
  1609. */
  1610. if (arm7_9->fast_memory_access)
  1611. arm7_9_execute_fast_sys_speed(target);
  1612. else
  1613. arm7_9_execute_sys_speed(target);
  1614. }
  1615. num_accesses += thisrun_accesses;
  1616. }
  1617. break;
  1618. default:
  1619. ERROR("BUG: we shouldn't get here");
  1620. exit(-1);
  1621. break;
  1622. }
  1623. /* Re-Set DBGACK */
  1624. buf_set_u32(dbg_ctrl->value, EICE_DBG_CONTROL_DBGACK, 1, 1);
  1625. embeddedice_store_reg(dbg_ctrl);
  1626. for (i=0; i<=last_reg; i++)
  1627. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 1;
  1628. arm7_9->read_xpsr(target, &cpsr, 0);
  1629. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1630. {
  1631. ERROR("JTAG error while reading cpsr");
  1632. exit(-1);
  1633. }
  1634. if (((cpsr & 0x1f) == ARMV4_5_MODE_ABT) && (armv4_5->core_mode != ARMV4_5_MODE_ABT))
  1635. {
  1636. WARNING("memory write caused data abort (address: 0x%8.8x, size: 0x%x, count: 0x%x)", address, size, count);
  1637. arm7_9->write_xpsr_im8(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 8) & ~0x20, 0, 0);
  1638. return ERROR_TARGET_DATA_ABORT;
  1639. }
  1640. return ERROR_OK;
  1641. }
  1642. int arm7_9_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
  1643. {
  1644. armv4_5_common_t *armv4_5 = target->arch_info;
  1645. arm7_9_common_t *arm7_9 = armv4_5->arch_info;
  1646. enum armv4_5_state core_state = armv4_5->core_state;
  1647. u32 r0 = buf_get_u32(armv4_5->core_cache->reg_list[0].value, 0, 32);
  1648. u32 r1 = buf_get_u32(armv4_5->core_cache->reg_list[1].value, 0, 32);
  1649. u32 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
  1650. int i;
  1651. u32 dcc_code[] =
  1652. {
  1653. /* MRC TST BNE MRC STR B */
  1654. 0xee101e10, 0xe3110001, 0x0afffffc, 0xee111e10, 0xe4801004, 0xeafffff9
  1655. };
  1656. if (!arm7_9->dcc_downloads)
  1657. return target->type->write_memory(target, address, 4, count, buffer);
  1658. /* regrab previously allocated working_area, or allocate a new one */
  1659. if (!arm7_9->dcc_working_area)
  1660. {
  1661. u8 dcc_code_buf[6 * 4];
  1662. /* make sure we have a working area */
  1663. if (target_alloc_working_area(target, 24, &arm7_9->dcc_working_area) != ERROR_OK)
  1664. {
  1665. INFO("no working area available, falling back to memory writes");
  1666. return target->type->write_memory(target, address, 4, count, buffer);
  1667. }
  1668. /* copy target instructions to target endianness */
  1669. for (i = 0; i < 6; i++)
  1670. {
  1671. target_buffer_set_u32(target, dcc_code_buf + i*4, dcc_code[i]);
  1672. }
  1673. /* write DCC code to working area */
  1674. target->type->write_memory(target, arm7_9->dcc_working_area->address, 4, 6, dcc_code_buf);
  1675. }
  1676. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, address);
  1677. armv4_5->core_cache->reg_list[0].valid = 1;
  1678. armv4_5->core_cache->reg_list[0].dirty = 1;
  1679. armv4_5->core_state = ARMV4_5_STATE_ARM;
  1680. arm7_9_resume(target, 0, arm7_9->dcc_working_area->address, 1, 1);
  1681. for (i = 0; i < count; i++)
  1682. {
  1683. embeddedice_write_reg(&arm7_9->eice_cache->reg_list[EICE_COMMS_DATA], target_buffer_get_u32(target, buffer));
  1684. buffer += 4;
  1685. }
  1686. target->type->halt(target);
  1687. while (target->state != TARGET_HALTED)
  1688. target->type->poll(target);
  1689. /* restore target state */
  1690. buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, r0);
  1691. armv4_5->core_cache->reg_list[0].valid = 1;
  1692. armv4_5->core_cache->reg_list[0].dirty = 1;
  1693. buf_set_u32(armv4_5->core_cache->reg_list[1].value, 0, 32, r1);
  1694. armv4_5->core_cache->reg_list[1].valid = 1;
  1695. armv4_5->core_cache->reg_list[1].dirty = 1;
  1696. buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
  1697. armv4_5->core_cache->reg_list[15].valid = 1;
  1698. armv4_5->core_cache->reg_list[15].dirty = 1;
  1699. armv4_5->core_state = core_state;
  1700. return ERROR_OK;
  1701. }
  1702. int arm7_9_register_commands(struct command_context_s *cmd_ctx)
  1703. {
  1704. command_t *arm7_9_cmd;
  1705. arm7_9_cmd = register_command(cmd_ctx, NULL, "arm7_9", NULL, COMMAND_ANY, "arm7/9 specific commands");
  1706. register_command(cmd_ctx, arm7_9_cmd, "write_xpsr", handle_arm7_9_write_xpsr_command, COMMAND_EXEC, "write program status register <value> <not cpsr|spsr>");
  1707. register_command(cmd_ctx, arm7_9_cmd, "write_xpsr_im8", handle_arm7_9_write_xpsr_im8_command, COMMAND_EXEC, "write program status register <8bit immediate> <rotate> <not cpsr|spsr>");
  1708. register_command(cmd_ctx, arm7_9_cmd, "write_core_reg", handle_arm7_9_write_core_reg_command, COMMAND_EXEC, "write core register <num> <mode> <value>");
  1709. register_command(cmd_ctx, arm7_9_cmd, "sw_bkpts", handle_arm7_9_sw_bkpts_command, COMMAND_EXEC, "support for software breakpoints <enable|disable>");
  1710. register_command(cmd_ctx, arm7_9_cmd, "force_hw_bkpts", handle_arm7_9_force_hw_bkpts_command, COMMAND_EXEC, "use hardware breakpoints for all breakpoints (disables sw breakpoint support) <enable|disable>");
  1711. register_command(cmd_ctx, arm7_9_cmd, "dbgrq", handle_arm7_9_dbgrq_command,
  1712. COMMAND_ANY, "use EmbeddedICE dbgrq instead of breakpoint for target halt requests <enable|disable>");
  1713. register_command(cmd_ctx, arm7_9_cmd, "fast_writes", handle_arm7_9_fast_memory_access_command,
  1714. COMMAND_ANY, "(deprecated, see: arm7_9 fast_memory_access)");
  1715. register_command(cmd_ctx, arm7_9_cmd, "fast_memory_access", handle_arm7_9_fast_memory_access_command,
  1716. COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
  1717. register_command(cmd_ctx, arm7_9_cmd, "dcc_downloads", handle_arm7_9_dcc_downloads_command,
  1718. COMMAND_ANY, "use DCC downloads for larger memory writes <enable|disable>");
  1719. armv4_5_register_commands(cmd_ctx);
  1720. return ERROR_OK;
  1721. }
  1722. int handle_arm7_9_write_xpsr_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1723. {
  1724. u32 value;
  1725. int spsr;
  1726. int retval;
  1727. target_t *target = get_current_target(cmd_ctx);
  1728. armv4_5_common_t *armv4_5;
  1729. arm7_9_common_t *arm7_9;
  1730. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1731. {
  1732. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1733. return ERROR_OK;
  1734. }
  1735. if (target->state != TARGET_HALTED)
  1736. {
  1737. command_print(cmd_ctx, "can't write registers while running");
  1738. return ERROR_OK;
  1739. }
  1740. if (argc < 2)
  1741. {
  1742. command_print(cmd_ctx, "usage: write_xpsr <value> <not cpsr|spsr>");
  1743. return ERROR_OK;
  1744. }
  1745. value = strtoul(args[0], NULL, 0);
  1746. spsr = strtol(args[1], NULL, 0);
  1747. /* if we're writing the CPSR, mask the T bit */
  1748. if (!spsr)
  1749. value &= ~0x20;
  1750. arm7_9->write_xpsr(target, value, spsr);
  1751. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1752. {
  1753. ERROR("JTAG error while writing to xpsr");
  1754. exit(-1);
  1755. }
  1756. return ERROR_OK;
  1757. }
  1758. int handle_arm7_9_write_xpsr_im8_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1759. {
  1760. u32 value;
  1761. int rotate;
  1762. int spsr;
  1763. int retval;
  1764. target_t *target = get_current_target(cmd_ctx);
  1765. armv4_5_common_t *armv4_5;
  1766. arm7_9_common_t *arm7_9;
  1767. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1768. {
  1769. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1770. return ERROR_OK;
  1771. }
  1772. if (target->state != TARGET_HALTED)
  1773. {
  1774. command_print(cmd_ctx, "can't write registers while running");
  1775. return ERROR_OK;
  1776. }
  1777. if (argc < 3)
  1778. {
  1779. command_print(cmd_ctx, "usage: write_xpsr_im8 <im8> <rotate> <not cpsr|spsr>");
  1780. return ERROR_OK;
  1781. }
  1782. value = strtoul(args[0], NULL, 0);
  1783. rotate = strtol(args[1], NULL, 0);
  1784. spsr = strtol(args[2], NULL, 0);
  1785. arm7_9->write_xpsr_im8(target, value, rotate, spsr);
  1786. if ((retval = jtag_execute_queue()) != ERROR_OK)
  1787. {
  1788. ERROR("JTAG error while writing 8-bit immediate to xpsr");
  1789. exit(-1);
  1790. }
  1791. return ERROR_OK;
  1792. }
  1793. int handle_arm7_9_write_core_reg_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1794. {
  1795. u32 value;
  1796. u32 mode;
  1797. int num;
  1798. target_t *target = get_current_target(cmd_ctx);
  1799. armv4_5_common_t *armv4_5;
  1800. arm7_9_common_t *arm7_9;
  1801. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1802. {
  1803. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1804. return ERROR_OK;
  1805. }
  1806. if (target->state != TARGET_HALTED)
  1807. {
  1808. command_print(cmd_ctx, "can't write registers while running");
  1809. return ERROR_OK;
  1810. }
  1811. if (argc < 3)
  1812. {
  1813. command_print(cmd_ctx, "usage: write_core_reg <num> <mode> <value>");
  1814. return ERROR_OK;
  1815. }
  1816. num = strtol(args[0], NULL, 0);
  1817. mode = strtoul(args[1], NULL, 0);
  1818. value = strtoul(args[2], NULL, 0);
  1819. arm7_9_write_core_reg(target, num, mode, value);
  1820. return ERROR_OK;
  1821. }
  1822. int handle_arm7_9_sw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1823. {
  1824. target_t *target = get_current_target(cmd_ctx);
  1825. armv4_5_common_t *armv4_5;
  1826. arm7_9_common_t *arm7_9;
  1827. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1828. {
  1829. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1830. return ERROR_OK;
  1831. }
  1832. if (argc == 0)
  1833. {
  1834. command_print(cmd_ctx, "software breakpoints %s", (arm7_9->sw_bkpts_enabled) ? "enabled" : "disabled");
  1835. return ERROR_OK;
  1836. }
  1837. if (strcmp("enable", args[0]) == 0)
  1838. {
  1839. if (arm7_9->sw_bkpts_use_wp)
  1840. {
  1841. arm7_9_enable_sw_bkpts(target);
  1842. }
  1843. else
  1844. {
  1845. arm7_9->sw_bkpts_enabled = 1;
  1846. }
  1847. }
  1848. else if (strcmp("disable", args[0]) == 0)
  1849. {
  1850. if (arm7_9->sw_bkpts_use_wp)
  1851. {
  1852. arm7_9_disable_sw_bkpts(target);
  1853. }
  1854. else
  1855. {
  1856. arm7_9->sw_bkpts_enabled = 0;
  1857. }
  1858. }
  1859. else
  1860. {
  1861. command_print(cmd_ctx, "usage: arm7_9 sw_bkpts <enable|disable>");
  1862. }
  1863. command_print(cmd_ctx, "software breakpoints %s", (arm7_9->sw_bkpts_enabled) ? "enabled" : "disabled");
  1864. return ERROR_OK;
  1865. }
  1866. int handle_arm7_9_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1867. {
  1868. target_t *target = get_current_target(cmd_ctx);
  1869. armv4_5_common_t *armv4_5;
  1870. arm7_9_common_t *arm7_9;
  1871. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1872. {
  1873. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1874. return ERROR_OK;
  1875. }
  1876. if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
  1877. {
  1878. arm7_9->force_hw_bkpts = 1;
  1879. if (arm7_9->sw_bkpts_use_wp)
  1880. {
  1881. arm7_9_disable_sw_bkpts(target);
  1882. }
  1883. }
  1884. else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
  1885. {
  1886. arm7_9->force_hw_bkpts = 0;
  1887. }
  1888. else
  1889. {
  1890. command_print(cmd_ctx, "usage: arm7_9 force_hw_bkpts <enable|disable>");
  1891. }
  1892. command_print(cmd_ctx, "force hardware breakpoints %s", (arm7_9->force_hw_bkpts) ? "enabled" : "disabled");
  1893. return ERROR_OK;
  1894. }
  1895. int handle_arm7_9_dbgrq_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1896. {
  1897. target_t *target = get_current_target(cmd_ctx);
  1898. armv4_5_common_t *armv4_5;
  1899. arm7_9_common_t *arm7_9;
  1900. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1901. {
  1902. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1903. return ERROR_OK;
  1904. }
  1905. if (argc > 0)
  1906. {
  1907. if (strcmp("enable", args[0]) == 0)
  1908. {
  1909. arm7_9->use_dbgrq = 1;
  1910. }
  1911. else if (strcmp("disable", args[0]) == 0)
  1912. {
  1913. arm7_9->use_dbgrq = 0;
  1914. }
  1915. else
  1916. {
  1917. command_print(cmd_ctx, "usage: arm7_9 dbgrq <enable|disable>");
  1918. }
  1919. }
  1920. command_print(cmd_ctx, "use of EmbeddedICE dbgrq instead of breakpoint for target halt %s", (arm7_9->use_dbgrq) ? "enabled" : "disabled");
  1921. return ERROR_OK;
  1922. }
  1923. int handle_arm7_9_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1924. {
  1925. target_t *target = get_current_target(cmd_ctx);
  1926. armv4_5_common_t *armv4_5;
  1927. arm7_9_common_t *arm7_9;
  1928. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1929. {
  1930. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1931. return ERROR_OK;
  1932. }
  1933. if (argc > 0)
  1934. {
  1935. if (strcmp("enable", args[0]) == 0)
  1936. {
  1937. arm7_9->fast_memory_access = 1;
  1938. }
  1939. else if (strcmp("disable", args[0]) == 0)
  1940. {
  1941. arm7_9->fast_memory_access = 0;
  1942. }
  1943. else
  1944. {
  1945. command_print(cmd_ctx, "usage: arm7_9 fast_memory_access <enable|disable>");
  1946. }
  1947. }
  1948. command_print(cmd_ctx, "fast memory access is %s", (arm7_9->fast_memory_access) ? "enabled" : "disabled");
  1949. return ERROR_OK;
  1950. }
  1951. int handle_arm7_9_dcc_downloads_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
  1952. {
  1953. target_t *target = get_current_target(cmd_ctx);
  1954. armv4_5_common_t *armv4_5;
  1955. arm7_9_common_t *arm7_9;
  1956. if (arm7_9_get_arch_pointers(target, &armv4_5, &arm7_9) != ERROR_OK)
  1957. {
  1958. command_print(cmd_ctx, "current target isn't an ARM7/ARM9 target");
  1959. return ERROR_OK;
  1960. }
  1961. if (argc > 0)
  1962. {
  1963. if (strcmp("enable", args[0]) == 0)
  1964. {
  1965. arm7_9->dcc_downloads = 1;
  1966. }
  1967. else if (strcmp("disable", args[0]) == 0)
  1968. {
  1969. arm7_9->dcc_downloads = 0;
  1970. }
  1971. else
  1972. {
  1973. command_print(cmd_ctx, "usage: arm7_9 dcc_downloads <enable|disable>");
  1974. }
  1975. }
  1976. command_print(cmd_ctx, "dcc downloads are %s", (arm7_9->dcc_downloads) ? "enabled" : "disabled");
  1977. return ERROR_OK;
  1978. }
  1979. int arm7_9_init_arch_info(target_t *target, arm7_9_common_t *arm7_9)
  1980. {
  1981. armv4_5_common_t *armv4_5 = &arm7_9->armv4_5_common;
  1982. arm7_9->common_magic = ARM7_9_COMMON_MAGIC;
  1983. arm_jtag_setup_connection(&arm7_9->jtag_info);
  1984. arm7_9->wp_available = 2;
  1985. arm7_9->wp0_used = 0;
  1986. arm7_9->wp1_used = 0;
  1987. arm7_9->force_hw_bkpts = 0;
  1988. arm7_9->use_dbgrq = 0;
  1989. arm7_9->has_etm = 0;
  1990. arm7_9->reinit_embeddedice = 0;
  1991. arm7_9->dcc_working_area = NULL;
  1992. arm7_9->fast_memory_access = 0;
  1993. arm7_9->dcc_downloads = 0;
  1994. jtag_register_event_callback(arm7_9_jtag_callback, target);
  1995. armv4_5->arch_info = arm7_9;
  1996. armv4_5->read_core_reg = arm7_9_read_core_reg;
  1997. armv4_5->write_core_reg = arm7_9_write_core_reg;
  1998. armv4_5->full_context = arm7_9_full_context;
  1999. armv4_5_init_arch_info(target, armv4_5);
  2000. return ERROR_OK;
  2001. }