You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1610 lines
45 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * Copyright (C) 2009 by Dirk Behme *
  12. * dirk.behme@gmail.com - copy from cortex_m3 *
  13. * *
  14. * This program is free software; you can redistribute it and/or modify *
  15. * it under the terms of the GNU General Public License as published by *
  16. * the Free Software Foundation; either version 2 of the License, or *
  17. * (at your option) any later version. *
  18. * *
  19. * This program is distributed in the hope that it will be useful, *
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  22. * GNU General Public License for more details. *
  23. * *
  24. * You should have received a copy of the GNU General Public License *
  25. * along with this program; if not, write to the *
  26. * Free Software Foundation, Inc., *
  27. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  28. * *
  29. * Cortex-A8(tm) TRM, ARM DDI 0344H *
  30. * *
  31. ***************************************************************************/
  32. #ifdef HAVE_CONFIG_H
  33. #include "config.h"
  34. #endif
  35. #include "cortex_a8.h"
  36. #include "armv7a.h"
  37. #include "armv4_5.h"
  38. #include "target_request.h"
  39. #include "target_type.h"
  40. static int cortex_a8_poll(target_t *target);
  41. static int cortex_a8_debug_entry(target_t *target);
  42. static int cortex_a8_restore_context(target_t *target);
  43. static int cortex_a8_set_breakpoint(struct target_s *target,
  44. struct breakpoint *breakpoint, uint8_t matchmode);
  45. static int cortex_a8_unset_breakpoint(struct target_s *target,
  46. struct breakpoint *breakpoint);
  47. static int cortex_a8_dap_read_coreregister_u32(target_t *target,
  48. uint32_t *value, int regnum);
  49. static int cortex_a8_dap_write_coreregister_u32(target_t *target,
  50. uint32_t value, int regnum);
  51. /*
  52. * FIXME do topology discovery using the ROM; don't
  53. * assume this is an OMAP3.
  54. */
  55. #define swjdp_memoryap 0
  56. #define swjdp_debugap 1
  57. #define OMAP3530_DEBUG_BASE 0x54011000
  58. /*
  59. * Cortex-A8 Basic debug access, very low level assumes state is saved
  60. */
  61. static int cortex_a8_init_debug_access(target_t *target)
  62. {
  63. struct armv7a_common *armv7a = target_to_armv7a(target);
  64. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  65. int retval;
  66. uint32_t dummy;
  67. LOG_DEBUG(" ");
  68. /* Unlocking the debug registers for modification */
  69. /* The debugport might be uninitialised so try twice */
  70. retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  71. if (retval != ERROR_OK)
  72. mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  73. /* Clear Sticky Power Down status Bit in PRSR to enable access to
  74. the registers in the Core Power Domain */
  75. retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
  76. /* Enabling of instruction execution in debug mode is done in debug_entry code */
  77. /* Resync breakpoint registers */
  78. /* Since this is likley called from init or reset, update targtet state information*/
  79. cortex_a8_poll(target);
  80. return retval;
  81. }
  82. int cortex_a8_exec_opcode(target_t *target, uint32_t opcode)
  83. {
  84. uint32_t dscr;
  85. int retval;
  86. struct armv7a_common *armv7a = target_to_armv7a(target);
  87. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  88. LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
  89. do
  90. {
  91. retval = mem_ap_read_atomic_u32(swjdp,
  92. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  93. if (retval != ERROR_OK)
  94. {
  95. LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
  96. return retval;
  97. }
  98. }
  99. while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
  100. mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
  101. do
  102. {
  103. retval = mem_ap_read_atomic_u32(swjdp,
  104. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  105. if (retval != ERROR_OK)
  106. {
  107. LOG_ERROR("Could not read DSCR register");
  108. return retval;
  109. }
  110. }
  111. while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
  112. return retval;
  113. }
  114. /**************************************************************************
  115. Read core register with very few exec_opcode, fast but needs work_area.
  116. This can cause problems with MMU active.
  117. **************************************************************************/
  118. static int cortex_a8_read_regs_through_mem(target_t *target, uint32_t address,
  119. uint32_t * regfile)
  120. {
  121. int retval = ERROR_OK;
  122. struct armv7a_common *armv7a = target_to_armv7a(target);
  123. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  124. cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
  125. cortex_a8_dap_write_coreregister_u32(target, address, 0);
  126. cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0));
  127. dap_ap_select(swjdp, swjdp_memoryap);
  128. mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
  129. dap_ap_select(swjdp, swjdp_debugap);
  130. return retval;
  131. }
  132. static int cortex_a8_read_cp(target_t *target, uint32_t *value, uint8_t CP,
  133. uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
  134. {
  135. int retval;
  136. struct armv7a_common *armv7a = target_to_armv7a(target);
  137. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  138. cortex_a8_exec_opcode(target, ARMV4_5_MRC(CP, op1, 0, CRn, CRm, op2));
  139. /* Move R0 to DTRTX */
  140. cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
  141. /* Read DCCTX */
  142. retval = mem_ap_read_atomic_u32(swjdp,
  143. armv7a->debug_base + CPUDBG_DTRTX, value);
  144. return retval;
  145. }
  146. static int cortex_a8_write_cp(target_t *target, uint32_t value,
  147. uint8_t CP, uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
  148. {
  149. int retval;
  150. uint32_t dscr;
  151. struct armv7a_common *armv7a = target_to_armv7a(target);
  152. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  153. LOG_DEBUG("CP%i, CRn %i, value 0x%08" PRIx32, CP, CRn, value);
  154. /* Check that DCCRX is not full */
  155. retval = mem_ap_read_atomic_u32(swjdp,
  156. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  157. if (dscr & (1 << DSCR_DTR_RX_FULL))
  158. {
  159. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  160. /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
  161. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
  162. }
  163. retval = mem_ap_write_u32(swjdp,
  164. armv7a->debug_base + CPUDBG_DTRRX, value);
  165. /* Move DTRRX to r0 */
  166. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
  167. cortex_a8_exec_opcode(target, ARMV4_5_MCR(CP, op1, 0, CRn, CRm, op2));
  168. return retval;
  169. }
  170. static int cortex_a8_read_cp15(target_t *target, uint32_t op1, uint32_t op2,
  171. uint32_t CRn, uint32_t CRm, uint32_t *value)
  172. {
  173. return cortex_a8_read_cp(target, value, 15, op1, CRn, CRm, op2);
  174. }
  175. static int cortex_a8_write_cp15(target_t *target, uint32_t op1, uint32_t op2,
  176. uint32_t CRn, uint32_t CRm, uint32_t value)
  177. {
  178. return cortex_a8_write_cp(target, value, 15, op1, CRn, CRm, op2);
  179. }
  180. static int cortex_a8_mrc(target_t *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t *value)
  181. {
  182. if (cpnum!=15)
  183. {
  184. LOG_ERROR("Only cp15 is supported");
  185. return ERROR_FAIL;
  186. }
  187. return cortex_a8_read_cp15(target, op1, op2, CRn, CRm, value);
  188. }
  189. static int cortex_a8_mcr(target_t *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm, uint32_t value)
  190. {
  191. if (cpnum!=15)
  192. {
  193. LOG_ERROR("Only cp15 is supported");
  194. return ERROR_FAIL;
  195. }
  196. return cortex_a8_write_cp15(target, op1, op2, CRn, CRm, value);
  197. }
  198. static int cortex_a8_dap_read_coreregister_u32(target_t *target,
  199. uint32_t *value, int regnum)
  200. {
  201. int retval = ERROR_OK;
  202. uint8_t reg = regnum&0xFF;
  203. uint32_t dscr;
  204. struct armv7a_common *armv7a = target_to_armv7a(target);
  205. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  206. if (reg > 16)
  207. return retval;
  208. if (reg < 15)
  209. {
  210. /* Rn to DCCTX, MCR p14, 0, Rd, c0, c5, 0, 0xEE000E15 */
  211. cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, reg, 0, 5, 0));
  212. }
  213. else if (reg == 15)
  214. {
  215. cortex_a8_exec_opcode(target, 0xE1A0000F);
  216. cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
  217. }
  218. else if (reg == 16)
  219. {
  220. cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, 0));
  221. cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
  222. }
  223. /* Read DTRRTX */
  224. do
  225. {
  226. retval = mem_ap_read_atomic_u32(swjdp,
  227. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  228. }
  229. while ((dscr & (1 << DSCR_DTR_TX_FULL)) == 0); /* Wait for DTRRXfull */
  230. retval = mem_ap_read_atomic_u32(swjdp,
  231. armv7a->debug_base + CPUDBG_DTRTX, value);
  232. return retval;
  233. }
  234. static int cortex_a8_dap_write_coreregister_u32(target_t *target, uint32_t value, int regnum)
  235. {
  236. int retval = ERROR_OK;
  237. uint8_t Rd = regnum&0xFF;
  238. uint32_t dscr;
  239. struct armv7a_common *armv7a = target_to_armv7a(target);
  240. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  241. LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
  242. /* Check that DCCRX is not full */
  243. retval = mem_ap_read_atomic_u32(swjdp,
  244. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  245. if (dscr & (1 << DSCR_DTR_RX_FULL))
  246. {
  247. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  248. /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
  249. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
  250. }
  251. if (Rd > 16)
  252. return retval;
  253. /* Write to DCCRX */
  254. retval = mem_ap_write_u32(swjdp,
  255. armv7a->debug_base + CPUDBG_DTRRX, value);
  256. if (Rd < 15)
  257. {
  258. /* DCCRX to Rd, MCR p14, 0, Rd, c0, c5, 0, 0xEE000E15 */
  259. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0));
  260. }
  261. else if (Rd == 15)
  262. {
  263. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
  264. cortex_a8_exec_opcode(target, 0xE1A0F000);
  265. }
  266. else if (Rd == 16)
  267. {
  268. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
  269. cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, 0));
  270. /* Execute a PrefetchFlush instruction through the ITR. */
  271. cortex_a8_exec_opcode(target, ARMV4_5_MCR(15, 0, 0, 7, 5, 4));
  272. }
  273. return retval;
  274. }
  275. /* Write to memory mapped registers directly with no cache or mmu handling */
  276. static int cortex_a8_dap_write_memap_register_u32(target_t *target, uint32_t address, uint32_t value)
  277. {
  278. int retval;
  279. struct armv7a_common *armv7a = target_to_armv7a(target);
  280. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  281. retval = mem_ap_write_atomic_u32(swjdp, address, value);
  282. return retval;
  283. }
  284. /*
  285. * Cortex-A8 Run control
  286. */
  287. static int cortex_a8_poll(target_t *target)
  288. {
  289. int retval = ERROR_OK;
  290. uint32_t dscr;
  291. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  292. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  293. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  294. enum target_state prev_target_state = target->state;
  295. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  296. dap_ap_select(swjdp, swjdp_debugap);
  297. retval = mem_ap_read_atomic_u32(swjdp,
  298. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  299. if (retval != ERROR_OK)
  300. {
  301. dap_ap_select(swjdp, saved_apsel);
  302. return retval;
  303. }
  304. cortex_a8->cpudbg_dscr = dscr;
  305. if ((dscr & 0x3) == 0x3)
  306. {
  307. if (prev_target_state != TARGET_HALTED)
  308. {
  309. /* We have a halting debug event */
  310. LOG_DEBUG("Target halted");
  311. target->state = TARGET_HALTED;
  312. if ((prev_target_state == TARGET_RUNNING)
  313. || (prev_target_state == TARGET_RESET))
  314. {
  315. retval = cortex_a8_debug_entry(target);
  316. if (retval != ERROR_OK)
  317. return retval;
  318. target_call_event_callbacks(target,
  319. TARGET_EVENT_HALTED);
  320. }
  321. if (prev_target_state == TARGET_DEBUG_RUNNING)
  322. {
  323. LOG_DEBUG(" ");
  324. retval = cortex_a8_debug_entry(target);
  325. if (retval != ERROR_OK)
  326. return retval;
  327. target_call_event_callbacks(target,
  328. TARGET_EVENT_DEBUG_HALTED);
  329. }
  330. }
  331. }
  332. else if ((dscr & 0x3) == 0x2)
  333. {
  334. target->state = TARGET_RUNNING;
  335. }
  336. else
  337. {
  338. LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
  339. target->state = TARGET_UNKNOWN;
  340. }
  341. dap_ap_select(swjdp, saved_apsel);
  342. return retval;
  343. }
  344. static int cortex_a8_halt(target_t *target)
  345. {
  346. int retval = ERROR_OK;
  347. uint32_t dscr;
  348. struct armv7a_common *armv7a = target_to_armv7a(target);
  349. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  350. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  351. dap_ap_select(swjdp, swjdp_debugap);
  352. /*
  353. * Tell the core to be halted by writing DRCR with 0x1
  354. * and then wait for the core to be halted.
  355. */
  356. retval = mem_ap_write_atomic_u32(swjdp,
  357. armv7a->debug_base + CPUDBG_DRCR, 0x1);
  358. /*
  359. * enter halting debug mode
  360. */
  361. mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
  362. retval = mem_ap_write_atomic_u32(swjdp,
  363. armv7a->debug_base + CPUDBG_DSCR, dscr | (1 << DSCR_HALT_DBG_MODE));
  364. if (retval != ERROR_OK)
  365. goto out;
  366. do {
  367. mem_ap_read_atomic_u32(swjdp,
  368. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  369. } while ((dscr & (1 << DSCR_CORE_HALTED)) == 0);
  370. target->debug_reason = DBG_REASON_DBGRQ;
  371. out:
  372. dap_ap_select(swjdp, saved_apsel);
  373. return retval;
  374. }
  375. static int cortex_a8_resume(struct target_s *target, int current,
  376. uint32_t address, int handle_breakpoints, int debug_execution)
  377. {
  378. struct armv7a_common *armv7a = target_to_armv7a(target);
  379. struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
  380. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  381. // struct breakpoint *breakpoint = NULL;
  382. uint32_t resume_pc, dscr;
  383. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  384. dap_ap_select(swjdp, swjdp_debugap);
  385. if (!debug_execution)
  386. {
  387. target_free_all_working_areas(target);
  388. // cortex_m3_enable_breakpoints(target);
  389. // cortex_m3_enable_watchpoints(target);
  390. }
  391. #if 0
  392. if (debug_execution)
  393. {
  394. /* Disable interrupts */
  395. /* We disable interrupts in the PRIMASK register instead of
  396. * masking with C_MASKINTS,
  397. * This is probably the same issue as Cortex-M3 Errata 377493:
  398. * C_MASKINTS in parallel with disabled interrupts can cause
  399. * local faults to not be taken. */
  400. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
  401. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
  402. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
  403. /* Make sure we are in Thumb mode */
  404. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
  405. buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
  406. armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
  407. armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
  408. }
  409. #endif
  410. /* current = 1: continue on current pc, otherwise continue at <address> */
  411. resume_pc = buf_get_u32(
  412. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  413. armv4_5->core_mode, 15).value,
  414. 0, 32);
  415. if (!current)
  416. resume_pc = address;
  417. /* Make sure that the Armv7 gdb thumb fixups does not
  418. * kill the return address
  419. */
  420. if (armv7a->core_state == ARMV7A_STATE_ARM)
  421. {
  422. resume_pc &= 0xFFFFFFFC;
  423. }
  424. /* When the return address is loaded into PC
  425. * bit 0 must be 1 to stay in Thumb state
  426. */
  427. if (armv7a->core_state == ARMV7A_STATE_THUMB)
  428. {
  429. resume_pc |= 0x1;
  430. }
  431. LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
  432. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  433. armv4_5->core_mode, 15).value,
  434. 0, 32, resume_pc);
  435. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  436. armv4_5->core_mode, 15).dirty = 1;
  437. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  438. armv4_5->core_mode, 15).valid = 1;
  439. cortex_a8_restore_context(target);
  440. // arm7_9_restore_context(target); TODO Context is currently NOT Properly restored
  441. #if 0
  442. /* the front-end may request us not to handle breakpoints */
  443. if (handle_breakpoints)
  444. {
  445. /* Single step past breakpoint at current address */
  446. if ((breakpoint = breakpoint_find(target, resume_pc)))
  447. {
  448. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  449. cortex_m3_unset_breakpoint(target, breakpoint);
  450. cortex_m3_single_step_core(target);
  451. cortex_m3_set_breakpoint(target, breakpoint);
  452. }
  453. }
  454. #endif
  455. /* Restart core and wait for it to be started */
  456. mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
  457. do {
  458. mem_ap_read_atomic_u32(swjdp,
  459. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  460. } while ((dscr & (1 << DSCR_CORE_RESTARTED)) == 0);
  461. target->debug_reason = DBG_REASON_NOTHALTED;
  462. target->state = TARGET_RUNNING;
  463. /* registers are now invalid */
  464. armv4_5_invalidate_core_regs(target);
  465. if (!debug_execution)
  466. {
  467. target->state = TARGET_RUNNING;
  468. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  469. LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
  470. }
  471. else
  472. {
  473. target->state = TARGET_DEBUG_RUNNING;
  474. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  475. LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
  476. }
  477. dap_ap_select(swjdp, saved_apsel);
  478. return ERROR_OK;
  479. }
  480. static int cortex_a8_debug_entry(target_t *target)
  481. {
  482. int i;
  483. uint32_t regfile[16], pc, cpsr, dscr;
  484. int retval = ERROR_OK;
  485. struct working_area *regfile_working_area = NULL;
  486. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  487. struct armv7a_common *armv7a = target_to_armv7a(target);
  488. struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
  489. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  490. LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
  491. /* Enable the ITR execution once we are in debug mode */
  492. mem_ap_read_atomic_u32(swjdp,
  493. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  494. dscr |= (1 << DSCR_EXT_INT_EN);
  495. retval = mem_ap_write_atomic_u32(swjdp,
  496. armv7a->debug_base + CPUDBG_DSCR, dscr);
  497. /* Examine debug reason */
  498. switch ((cortex_a8->cpudbg_dscr >> 2)&0xF)
  499. {
  500. case 0:
  501. case 4:
  502. target->debug_reason = DBG_REASON_DBGRQ;
  503. break;
  504. case 1:
  505. case 3:
  506. target->debug_reason = DBG_REASON_BREAKPOINT;
  507. break;
  508. case 10:
  509. target->debug_reason = DBG_REASON_WATCHPOINT;
  510. break;
  511. default:
  512. target->debug_reason = DBG_REASON_UNDEFINED;
  513. break;
  514. }
  515. /* Examine target state and mode */
  516. if (cortex_a8->fast_reg_read)
  517. target_alloc_working_area(target, 64, &regfile_working_area);
  518. /* First load register acessible through core debug port*/
  519. if (!regfile_working_area)
  520. {
  521. for (i = 0; i <= 15; i++)
  522. cortex_a8_dap_read_coreregister_u32(target,
  523. &regfile[i], i);
  524. }
  525. else
  526. {
  527. dap_ap_select(swjdp, swjdp_memoryap);
  528. cortex_a8_read_regs_through_mem(target,
  529. regfile_working_area->address, regfile);
  530. dap_ap_select(swjdp, swjdp_memoryap);
  531. target_free_working_area(target, regfile_working_area);
  532. }
  533. cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
  534. pc = regfile[15];
  535. dap_ap_select(swjdp, swjdp_debugap);
  536. LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
  537. armv4_5->core_mode = cpsr & 0x1F;
  538. armv7a->core_state = (cpsr & 0x20)?ARMV7A_STATE_THUMB:ARMV7A_STATE_ARM;
  539. for (i = 0; i <= ARM_PC; i++)
  540. {
  541. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  542. armv4_5->core_mode, i).value,
  543. 0, 32, regfile[i]);
  544. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  545. armv4_5->core_mode, i).valid = 1;
  546. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  547. armv4_5->core_mode, i).dirty = 0;
  548. }
  549. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  550. armv4_5->core_mode, 16).value,
  551. 0, 32, cpsr);
  552. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
  553. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
  554. /* Fixup PC Resume Address */
  555. if (armv7a->core_state == ARMV7A_STATE_THUMB)
  556. {
  557. // T bit set for Thumb or ThumbEE state
  558. regfile[ARM_PC] -= 4;
  559. }
  560. else
  561. {
  562. // ARM state
  563. regfile[ARM_PC] -= 8;
  564. }
  565. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  566. armv4_5->core_mode, ARM_PC).value,
  567. 0, 32, regfile[ARM_PC]);
  568. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0)
  569. .dirty = ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  570. armv4_5->core_mode, 0).valid;
  571. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15)
  572. .dirty = ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  573. armv4_5->core_mode, 15).valid;
  574. #if 0
  575. /* TODO, Move this */
  576. uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
  577. cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
  578. LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
  579. cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
  580. LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
  581. cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
  582. LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
  583. #endif
  584. /* Are we in an exception handler */
  585. // armv4_5->exception_number = 0;
  586. if (armv7a->post_debug_entry)
  587. armv7a->post_debug_entry(target);
  588. return retval;
  589. }
  590. static void cortex_a8_post_debug_entry(target_t *target)
  591. {
  592. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  593. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  594. // cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
  595. /* examine cp15 control reg */
  596. armv7a->read_cp15(target, 0, 0, 1, 0, &cortex_a8->cp15_control_reg);
  597. jtag_execute_queue();
  598. LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
  599. if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
  600. {
  601. uint32_t cache_type_reg;
  602. /* identify caches */
  603. armv7a->read_cp15(target, 0, 1, 0, 0, &cache_type_reg);
  604. jtag_execute_queue();
  605. /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
  606. armv4_5_identify_cache(cache_type_reg,
  607. &armv7a->armv4_5_mmu.armv4_5_cache);
  608. }
  609. armv7a->armv4_5_mmu.mmu_enabled =
  610. (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
  611. armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
  612. (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
  613. armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
  614. (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
  615. }
  616. static int cortex_a8_step(struct target_s *target, int current, uint32_t address,
  617. int handle_breakpoints)
  618. {
  619. struct armv7a_common *armv7a = target_to_armv7a(target);
  620. struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
  621. struct breakpoint *breakpoint = NULL;
  622. struct breakpoint stepbreakpoint;
  623. int timeout = 100;
  624. if (target->state != TARGET_HALTED)
  625. {
  626. LOG_WARNING("target not halted");
  627. return ERROR_TARGET_NOT_HALTED;
  628. }
  629. /* current = 1: continue on current pc, otherwise continue at <address> */
  630. if (!current)
  631. {
  632. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  633. armv4_5->core_mode, ARM_PC).value,
  634. 0, 32, address);
  635. }
  636. else
  637. {
  638. address = buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  639. armv4_5->core_mode, ARM_PC).value,
  640. 0, 32);
  641. }
  642. /* The front-end may request us not to handle breakpoints.
  643. * But since Cortex-A8 uses breakpoint for single step,
  644. * we MUST handle breakpoints.
  645. */
  646. handle_breakpoints = 1;
  647. if (handle_breakpoints) {
  648. breakpoint = breakpoint_find(target,
  649. buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  650. armv4_5->core_mode, 15).value,
  651. 0, 32));
  652. if (breakpoint)
  653. cortex_a8_unset_breakpoint(target, breakpoint);
  654. }
  655. /* Setup single step breakpoint */
  656. stepbreakpoint.address = address;
  657. stepbreakpoint.length = (armv7a->core_state == ARMV7A_STATE_THUMB) ? 2 : 4;
  658. stepbreakpoint.type = BKPT_HARD;
  659. stepbreakpoint.set = 0;
  660. /* Break on IVA mismatch */
  661. cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
  662. target->debug_reason = DBG_REASON_SINGLESTEP;
  663. cortex_a8_resume(target, 1, address, 0, 0);
  664. while (target->state != TARGET_HALTED)
  665. {
  666. cortex_a8_poll(target);
  667. if (--timeout == 0)
  668. {
  669. LOG_WARNING("timeout waiting for target halt");
  670. break;
  671. }
  672. }
  673. cortex_a8_unset_breakpoint(target, &stepbreakpoint);
  674. if (timeout > 0) target->debug_reason = DBG_REASON_BREAKPOINT;
  675. if (breakpoint)
  676. cortex_a8_set_breakpoint(target, breakpoint, 0);
  677. if (target->state != TARGET_HALTED)
  678. LOG_DEBUG("target stepped");
  679. return ERROR_OK;
  680. }
  681. static int cortex_a8_restore_context(target_t *target)
  682. {
  683. int i;
  684. uint32_t value;
  685. struct armv7a_common *armv7a = target_to_armv7a(target);
  686. struct armv4_5_common_s *armv4_5 = &armv7a->armv4_5_common;
  687. LOG_DEBUG(" ");
  688. if (armv7a->pre_restore_context)
  689. armv7a->pre_restore_context(target);
  690. for (i = 15; i >= 0; i--)
  691. {
  692. if (ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  693. armv4_5->core_mode, i).dirty)
  694. {
  695. value = buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  696. armv4_5->core_mode, i).value,
  697. 0, 32);
  698. /* TODO Check return values */
  699. cortex_a8_dap_write_coreregister_u32(target, value, i);
  700. }
  701. }
  702. if (armv7a->post_restore_context)
  703. armv7a->post_restore_context(target);
  704. return ERROR_OK;
  705. }
  706. #if 0
  707. /*
  708. * Cortex-A8 Core register functions
  709. */
  710. static int cortex_a8_load_core_reg_u32(struct target_s *target, int num,
  711. armv4_5_mode_t mode, uint32_t * value)
  712. {
  713. int retval;
  714. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  715. if ((num <= ARM_CPSR))
  716. {
  717. /* read a normal core register */
  718. retval = cortex_a8_dap_read_coreregister_u32(target, value, num);
  719. if (retval != ERROR_OK)
  720. {
  721. LOG_ERROR("JTAG failure %i", retval);
  722. return ERROR_JTAG_DEVICE_ERROR;
  723. }
  724. LOG_DEBUG("load from core reg %i value 0x%" PRIx32, num, *value);
  725. }
  726. else
  727. {
  728. return ERROR_INVALID_ARGUMENTS;
  729. }
  730. /* Register other than r0 - r14 uses r0 for access */
  731. if (num > 14)
  732. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  733. armv4_5->core_mode, 0).dirty =
  734. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  735. armv4_5->core_mode, 0).valid;
  736. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  737. armv4_5->core_mode, 15).dirty =
  738. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  739. armv4_5->core_mode, 15).valid;
  740. return ERROR_OK;
  741. }
  742. static int cortex_a8_store_core_reg_u32(struct target_s *target, int num,
  743. armv4_5_mode_t mode, uint32_t value)
  744. {
  745. int retval;
  746. // uint32_t reg;
  747. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  748. #ifdef ARMV7_GDB_HACKS
  749. /* If the LR register is being modified, make sure it will put us
  750. * in "thumb" mode, or an INVSTATE exception will occur. This is a
  751. * hack to deal with the fact that gdb will sometimes "forge"
  752. * return addresses, and doesn't set the LSB correctly (i.e., when
  753. * printing expressions containing function calls, it sets LR=0.) */
  754. if (num == 14)
  755. value |= 0x01;
  756. #endif
  757. if ((num <= ARM_CPSR))
  758. {
  759. retval = cortex_a8_dap_write_coreregister_u32(target, value, num);
  760. if (retval != ERROR_OK)
  761. {
  762. LOG_ERROR("JTAG failure %i", retval);
  763. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  764. armv4_5->core_mode, num).dirty =
  765. ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  766. armv4_5->core_mode, num).valid;
  767. return ERROR_JTAG_DEVICE_ERROR;
  768. }
  769. LOG_DEBUG("write core reg %i value 0x%" PRIx32, num, value);
  770. }
  771. else
  772. {
  773. return ERROR_INVALID_ARGUMENTS;
  774. }
  775. return ERROR_OK;
  776. }
  777. #endif
  778. static int cortex_a8_read_core_reg(struct target_s *target, int num,
  779. enum armv4_5_mode mode)
  780. {
  781. uint32_t value;
  782. int retval;
  783. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  784. cortex_a8_dap_read_coreregister_u32(target, &value, num);
  785. if ((retval = jtag_execute_queue()) != ERROR_OK)
  786. {
  787. return retval;
  788. }
  789. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  790. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  791. buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
  792. mode, num).value, 0, 32, value);
  793. return ERROR_OK;
  794. }
  795. int cortex_a8_write_core_reg(struct target_s *target, int num,
  796. enum armv4_5_mode mode, uint32_t value)
  797. {
  798. int retval;
  799. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  800. cortex_a8_dap_write_coreregister_u32(target, value, num);
  801. if ((retval = jtag_execute_queue()) != ERROR_OK)
  802. {
  803. return retval;
  804. }
  805. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
  806. ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
  807. return ERROR_OK;
  808. }
  809. /*
  810. * Cortex-A8 Breakpoint and watchpoint fuctions
  811. */
  812. /* Setup hardware Breakpoint Register Pair */
  813. static int cortex_a8_set_breakpoint(struct target_s *target,
  814. struct breakpoint *breakpoint, uint8_t matchmode)
  815. {
  816. int retval;
  817. int brp_i=0;
  818. uint32_t control;
  819. uint8_t byte_addr_select = 0x0F;
  820. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  821. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  822. cortex_a8_brp_t * brp_list = cortex_a8->brp_list;
  823. if (breakpoint->set)
  824. {
  825. LOG_WARNING("breakpoint already set");
  826. return ERROR_OK;
  827. }
  828. if (breakpoint->type == BKPT_HARD)
  829. {
  830. while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
  831. brp_i++ ;
  832. if (brp_i >= cortex_a8->brp_num)
  833. {
  834. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  835. exit(-1);
  836. }
  837. breakpoint->set = brp_i + 1;
  838. if (breakpoint->length == 2)
  839. {
  840. byte_addr_select = (3 << (breakpoint->address & 0x02));
  841. }
  842. control = ((matchmode & 0x7) << 20)
  843. | (byte_addr_select << 5)
  844. | (3 << 1) | 1;
  845. brp_list[brp_i].used = 1;
  846. brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
  847. brp_list[brp_i].control = control;
  848. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  849. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  850. brp_list[brp_i].value);
  851. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  852. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  853. brp_list[brp_i].control);
  854. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  855. brp_list[brp_i].control,
  856. brp_list[brp_i].value);
  857. }
  858. else if (breakpoint->type == BKPT_SOFT)
  859. {
  860. uint8_t code[4];
  861. if (breakpoint->length == 2)
  862. {
  863. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  864. }
  865. else
  866. {
  867. buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
  868. }
  869. retval = target->type->read_memory(target,
  870. breakpoint->address & 0xFFFFFFFE,
  871. breakpoint->length, 1,
  872. breakpoint->orig_instr);
  873. if (retval != ERROR_OK)
  874. return retval;
  875. retval = target->type->write_memory(target,
  876. breakpoint->address & 0xFFFFFFFE,
  877. breakpoint->length, 1, code);
  878. if (retval != ERROR_OK)
  879. return retval;
  880. breakpoint->set = 0x11; /* Any nice value but 0 */
  881. }
  882. return ERROR_OK;
  883. }
  884. static int cortex_a8_unset_breakpoint(struct target_s *target, struct breakpoint *breakpoint)
  885. {
  886. int retval;
  887. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  888. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  889. cortex_a8_brp_t * brp_list = cortex_a8->brp_list;
  890. if (!breakpoint->set)
  891. {
  892. LOG_WARNING("breakpoint not set");
  893. return ERROR_OK;
  894. }
  895. if (breakpoint->type == BKPT_HARD)
  896. {
  897. int brp_i = breakpoint->set - 1;
  898. if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
  899. {
  900. LOG_DEBUG("Invalid BRP number in breakpoint");
  901. return ERROR_OK;
  902. }
  903. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  904. brp_list[brp_i].control, brp_list[brp_i].value);
  905. brp_list[brp_i].used = 0;
  906. brp_list[brp_i].value = 0;
  907. brp_list[brp_i].control = 0;
  908. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  909. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  910. brp_list[brp_i].control);
  911. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  912. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  913. brp_list[brp_i].value);
  914. }
  915. else
  916. {
  917. /* restore original instruction (kept in target endianness) */
  918. if (breakpoint->length == 4)
  919. {
  920. retval = target->type->write_memory(target,
  921. breakpoint->address & 0xFFFFFFFE,
  922. 4, 1, breakpoint->orig_instr);
  923. if (retval != ERROR_OK)
  924. return retval;
  925. }
  926. else
  927. {
  928. retval = target->type->write_memory(target,
  929. breakpoint->address & 0xFFFFFFFE,
  930. 2, 1, breakpoint->orig_instr);
  931. if (retval != ERROR_OK)
  932. return retval;
  933. }
  934. }
  935. breakpoint->set = 0;
  936. return ERROR_OK;
  937. }
  938. int cortex_a8_add_breakpoint(struct target_s *target, struct breakpoint *breakpoint)
  939. {
  940. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  941. if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
  942. {
  943. LOG_INFO("no hardware breakpoint available");
  944. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  945. }
  946. if (breakpoint->type == BKPT_HARD)
  947. cortex_a8->brp_num_available--;
  948. cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
  949. return ERROR_OK;
  950. }
  951. static int cortex_a8_remove_breakpoint(struct target_s *target, struct breakpoint *breakpoint)
  952. {
  953. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  954. #if 0
  955. /* It is perfectly possible to remove brakpoints while the taget is running */
  956. if (target->state != TARGET_HALTED)
  957. {
  958. LOG_WARNING("target not halted");
  959. return ERROR_TARGET_NOT_HALTED;
  960. }
  961. #endif
  962. if (breakpoint->set)
  963. {
  964. cortex_a8_unset_breakpoint(target, breakpoint);
  965. if (breakpoint->type == BKPT_HARD)
  966. cortex_a8->brp_num_available++ ;
  967. }
  968. return ERROR_OK;
  969. }
  970. /*
  971. * Cortex-A8 Reset fuctions
  972. */
  973. static int cortex_a8_assert_reset(target_t *target)
  974. {
  975. LOG_DEBUG(" ");
  976. /* registers are now invalid */
  977. armv4_5_invalidate_core_regs(target);
  978. target->state = TARGET_RESET;
  979. return ERROR_OK;
  980. }
  981. static int cortex_a8_deassert_reset(target_t *target)
  982. {
  983. LOG_DEBUG(" ");
  984. if (target->reset_halt)
  985. {
  986. int retval;
  987. if ((retval = target_halt(target)) != ERROR_OK)
  988. return retval;
  989. }
  990. return ERROR_OK;
  991. }
  992. /*
  993. * Cortex-A8 Memory access
  994. *
  995. * This is same Cortex M3 but we must also use the correct
  996. * ap number for every access.
  997. */
  998. static int cortex_a8_read_memory(struct target_s *target, uint32_t address,
  999. uint32_t size, uint32_t count, uint8_t *buffer)
  1000. {
  1001. struct armv7a_common *armv7a = target_to_armv7a(target);
  1002. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1003. int retval = ERROR_OK;
  1004. /* sanitize arguments */
  1005. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1006. return ERROR_INVALID_ARGUMENTS;
  1007. /* cortex_a8 handles unaligned memory access */
  1008. // ??? dap_ap_select(swjdp, swjdp_memoryap);
  1009. switch (size)
  1010. {
  1011. case 4:
  1012. retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
  1013. break;
  1014. case 2:
  1015. retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
  1016. break;
  1017. case 1:
  1018. retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
  1019. break;
  1020. default:
  1021. LOG_ERROR("BUG: we shouldn't get here");
  1022. exit(-1);
  1023. }
  1024. return retval;
  1025. }
  1026. int cortex_a8_write_memory(struct target_s *target, uint32_t address,
  1027. uint32_t size, uint32_t count, uint8_t *buffer)
  1028. {
  1029. struct armv7a_common *armv7a = target_to_armv7a(target);
  1030. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1031. int retval;
  1032. /* sanitize arguments */
  1033. if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
  1034. return ERROR_INVALID_ARGUMENTS;
  1035. // ??? dap_ap_select(swjdp, swjdp_memoryap);
  1036. switch (size)
  1037. {
  1038. case 4:
  1039. retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
  1040. break;
  1041. case 2:
  1042. retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
  1043. break;
  1044. case 1:
  1045. retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
  1046. break;
  1047. default:
  1048. LOG_ERROR("BUG: we shouldn't get here");
  1049. exit(-1);
  1050. }
  1051. if (target->state == TARGET_HALTED)
  1052. {
  1053. /* The Cache handling will NOT work with MMU active, the wrong addresses will be invalidated */
  1054. /* invalidate I-Cache */
  1055. if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
  1056. {
  1057. /* Invalidate ICache single entry with MVA, repeat this for all cache
  1058. lines in the address range, Cortex-A8 has fixed 64 byte line length */
  1059. /* Invalidate Cache single entry with MVA to PoU */
  1060. for (uint32_t cacheline=address; cacheline<address+size*count; cacheline+=64)
  1061. armv7a->write_cp15(target, 0, 1, 7, 5, cacheline); /* I-Cache to PoU */
  1062. }
  1063. /* invalidate D-Cache */
  1064. if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
  1065. {
  1066. /* Invalidate Cache single entry with MVA to PoC */
  1067. for (uint32_t cacheline=address; cacheline<address+size*count; cacheline+=64)
  1068. armv7a->write_cp15(target, 0, 1, 7, 6, cacheline); /* U/D cache to PoC */
  1069. }
  1070. }
  1071. return retval;
  1072. }
  1073. static int cortex_a8_bulk_write_memory(target_t *target, uint32_t address,
  1074. uint32_t count, uint8_t *buffer)
  1075. {
  1076. return cortex_a8_write_memory(target, address, 4, count, buffer);
  1077. }
  1078. static int cortex_a8_dcc_read(struct swjdp_common *swjdp, uint8_t *value, uint8_t *ctrl)
  1079. {
  1080. #if 0
  1081. u16 dcrdr;
  1082. mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1083. *ctrl = (uint8_t)dcrdr;
  1084. *value = (uint8_t)(dcrdr >> 8);
  1085. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1086. /* write ack back to software dcc register
  1087. * signify we have read data */
  1088. if (dcrdr & (1 << 0))
  1089. {
  1090. dcrdr = 0;
  1091. mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1092. }
  1093. #endif
  1094. return ERROR_OK;
  1095. }
  1096. static int cortex_a8_handle_target_request(void *priv)
  1097. {
  1098. target_t *target = priv;
  1099. if (!target->type->examined)
  1100. return ERROR_OK;
  1101. struct armv7a_common *armv7a = target_to_armv7a(target);
  1102. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1103. if (!target->dbg_msg_enabled)
  1104. return ERROR_OK;
  1105. if (target->state == TARGET_RUNNING)
  1106. {
  1107. uint8_t data = 0;
  1108. uint8_t ctrl = 0;
  1109. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1110. /* check if we have data */
  1111. if (ctrl & (1 << 0))
  1112. {
  1113. uint32_t request;
  1114. /* we assume target is quick enough */
  1115. request = data;
  1116. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1117. request |= (data << 8);
  1118. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1119. request |= (data << 16);
  1120. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1121. request |= (data << 24);
  1122. target_request(target, request);
  1123. }
  1124. }
  1125. return ERROR_OK;
  1126. }
  1127. /*
  1128. * Cortex-A8 target information and configuration
  1129. */
  1130. static int cortex_a8_examine(struct target_s *target)
  1131. {
  1132. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  1133. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  1134. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1135. int i;
  1136. int retval = ERROR_OK;
  1137. uint32_t didr, ctypr, ttypr, cpuid;
  1138. LOG_DEBUG("TODO");
  1139. /* Here we shall insert a proper ROM Table scan */
  1140. armv7a->debug_base = OMAP3530_DEBUG_BASE;
  1141. /* We do one extra read to ensure DAP is configured,
  1142. * we call ahbap_debugport_init(swjdp) instead
  1143. */
  1144. ahbap_debugport_init(swjdp);
  1145. mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
  1146. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1147. armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
  1148. {
  1149. LOG_DEBUG("Examine failed");
  1150. return retval;
  1151. }
  1152. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1153. armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
  1154. {
  1155. LOG_DEBUG("Examine failed");
  1156. return retval;
  1157. }
  1158. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1159. armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
  1160. {
  1161. LOG_DEBUG("Examine failed");
  1162. return retval;
  1163. }
  1164. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1165. armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
  1166. {
  1167. LOG_DEBUG("Examine failed");
  1168. return retval;
  1169. }
  1170. LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
  1171. LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
  1172. LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
  1173. LOG_DEBUG("didr = 0x%08" PRIx32, didr);
  1174. /* Setup Breakpoint Register Pairs */
  1175. cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
  1176. cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
  1177. cortex_a8->brp_num_available = cortex_a8->brp_num;
  1178. cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(cortex_a8_brp_t));
  1179. // cortex_a8->brb_enabled = ????;
  1180. for (i = 0; i < cortex_a8->brp_num; i++)
  1181. {
  1182. cortex_a8->brp_list[i].used = 0;
  1183. if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
  1184. cortex_a8->brp_list[i].type = BRP_NORMAL;
  1185. else
  1186. cortex_a8->brp_list[i].type = BRP_CONTEXT;
  1187. cortex_a8->brp_list[i].value = 0;
  1188. cortex_a8->brp_list[i].control = 0;
  1189. cortex_a8->brp_list[i].BRPn = i;
  1190. }
  1191. /* Setup Watchpoint Register Pairs */
  1192. cortex_a8->wrp_num = ((didr >> 28) & 0x0F) + 1;
  1193. cortex_a8->wrp_num_available = cortex_a8->wrp_num;
  1194. cortex_a8->wrp_list = calloc(cortex_a8->wrp_num, sizeof(cortex_a8_wrp_t));
  1195. for (i = 0; i < cortex_a8->wrp_num; i++)
  1196. {
  1197. cortex_a8->wrp_list[i].used = 0;
  1198. cortex_a8->wrp_list[i].type = 0;
  1199. cortex_a8->wrp_list[i].value = 0;
  1200. cortex_a8->wrp_list[i].control = 0;
  1201. cortex_a8->wrp_list[i].WRPn = i;
  1202. }
  1203. LOG_DEBUG("Configured %i hw breakpoint pairs and %i hw watchpoint pairs",
  1204. cortex_a8->brp_num , cortex_a8->wrp_num);
  1205. /* Configure core debug access */
  1206. cortex_a8_init_debug_access(target);
  1207. target->type->examined = 1;
  1208. return retval;
  1209. }
  1210. /*
  1211. * Cortex-A8 target creation and initialization
  1212. */
  1213. static void cortex_a8_build_reg_cache(target_t *target)
  1214. {
  1215. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  1216. struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
  1217. (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
  1218. armv4_5->core_cache = (*cache_p);
  1219. }
  1220. static int cortex_a8_init_target(struct command_context_s *cmd_ctx,
  1221. struct target_s *target)
  1222. {
  1223. cortex_a8_build_reg_cache(target);
  1224. return ERROR_OK;
  1225. }
  1226. int cortex_a8_init_arch_info(target_t *target,
  1227. struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
  1228. {
  1229. armv4_5_common_t *armv4_5;
  1230. struct armv7a_common *armv7a;
  1231. armv7a = &cortex_a8->armv7a_common;
  1232. armv4_5 = &armv7a->armv4_5_common;
  1233. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1234. /* Setup struct cortex_a8_common */
  1235. cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
  1236. armv4_5->arch_info = armv7a;
  1237. armv4_5_init_arch_info(target, armv4_5);
  1238. /* prepare JTAG information for the new target */
  1239. cortex_a8->jtag_info.tap = tap;
  1240. cortex_a8->jtag_info.scann_size = 4;
  1241. LOG_DEBUG(" ");
  1242. swjdp->dp_select_value = -1;
  1243. swjdp->ap_csw_value = -1;
  1244. swjdp->ap_tar_value = -1;
  1245. swjdp->jtag_info = &cortex_a8->jtag_info;
  1246. swjdp->memaccess_tck = 80;
  1247. /* Number of bits for tar autoincrement, impl. dep. at least 10 */
  1248. swjdp->tar_autoincr_block = (1 << 10);
  1249. cortex_a8->fast_reg_read = 0;
  1250. /* register arch-specific functions */
  1251. armv7a->examine_debug_reason = NULL;
  1252. armv7a->post_debug_entry = cortex_a8_post_debug_entry;
  1253. armv7a->pre_restore_context = NULL;
  1254. armv7a->post_restore_context = NULL;
  1255. armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
  1256. // armv7a->armv4_5_mmu.get_ttb = armv7a_get_ttb;
  1257. armv7a->armv4_5_mmu.read_memory = cortex_a8_read_memory;
  1258. armv7a->armv4_5_mmu.write_memory = cortex_a8_write_memory;
  1259. // armv7a->armv4_5_mmu.disable_mmu_caches = armv7a_disable_mmu_caches;
  1260. // armv7a->armv4_5_mmu.enable_mmu_caches = armv7a_enable_mmu_caches;
  1261. armv7a->armv4_5_mmu.has_tiny_pages = 1;
  1262. armv7a->armv4_5_mmu.mmu_enabled = 0;
  1263. armv7a->read_cp15 = cortex_a8_read_cp15;
  1264. armv7a->write_cp15 = cortex_a8_write_cp15;
  1265. // arm7_9->handle_target_request = cortex_a8_handle_target_request;
  1266. armv4_5->read_core_reg = cortex_a8_read_core_reg;
  1267. armv4_5->write_core_reg = cortex_a8_write_core_reg;
  1268. // armv4_5->full_context = arm7_9_full_context;
  1269. // armv4_5->load_core_reg_u32 = cortex_a8_load_core_reg_u32;
  1270. // armv4_5->store_core_reg_u32 = cortex_a8_store_core_reg_u32;
  1271. // armv4_5->read_core_reg = armv4_5_read_core_reg; /* this is default */
  1272. // armv4_5->write_core_reg = armv4_5_write_core_reg;
  1273. target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
  1274. return ERROR_OK;
  1275. }
  1276. static int cortex_a8_target_create(struct target_s *target, Jim_Interp *interp)
  1277. {
  1278. struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
  1279. cortex_a8_init_arch_info(target, cortex_a8, target->tap);
  1280. return ERROR_OK;
  1281. }
  1282. COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
  1283. {
  1284. target_t *target = get_current_target(cmd_ctx);
  1285. struct armv7a_common *armv7a = target_to_armv7a(target);
  1286. return armv4_5_handle_cache_info_command(cmd_ctx,
  1287. &armv7a->armv4_5_mmu.armv4_5_cache);
  1288. }
  1289. COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
  1290. {
  1291. target_t *target = get_current_target(cmd_ctx);
  1292. cortex_a8_init_debug_access(target);
  1293. return ERROR_OK;
  1294. }
  1295. static int cortex_a8_register_commands(struct command_context_s *cmd_ctx)
  1296. {
  1297. command_t *cortex_a8_cmd;
  1298. int retval = ERROR_OK;
  1299. armv4_5_register_commands(cmd_ctx);
  1300. armv7a_register_commands(cmd_ctx);
  1301. cortex_a8_cmd = register_command(cmd_ctx, NULL, "cortex_a8",
  1302. NULL, COMMAND_ANY,
  1303. "cortex_a8 specific commands");
  1304. register_command(cmd_ctx, cortex_a8_cmd, "cache_info",
  1305. cortex_a8_handle_cache_info_command, COMMAND_EXEC,
  1306. "display information about target caches");
  1307. register_command(cmd_ctx, cortex_a8_cmd, "dbginit",
  1308. cortex_a8_handle_dbginit_command, COMMAND_EXEC,
  1309. "Initialize core debug");
  1310. return retval;
  1311. }
  1312. target_type_t cortexa8_target = {
  1313. .name = "cortex_a8",
  1314. .poll = cortex_a8_poll,
  1315. .arch_state = armv7a_arch_state,
  1316. .target_request_data = NULL,
  1317. .halt = cortex_a8_halt,
  1318. .resume = cortex_a8_resume,
  1319. .step = cortex_a8_step,
  1320. .assert_reset = cortex_a8_assert_reset,
  1321. .deassert_reset = cortex_a8_deassert_reset,
  1322. .soft_reset_halt = NULL,
  1323. .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
  1324. .read_memory = cortex_a8_read_memory,
  1325. .write_memory = cortex_a8_write_memory,
  1326. .bulk_write_memory = cortex_a8_bulk_write_memory,
  1327. .checksum_memory = arm7_9_checksum_memory,
  1328. .blank_check_memory = arm7_9_blank_check_memory,
  1329. .run_algorithm = armv4_5_run_algorithm,
  1330. .add_breakpoint = cortex_a8_add_breakpoint,
  1331. .remove_breakpoint = cortex_a8_remove_breakpoint,
  1332. .add_watchpoint = NULL,
  1333. .remove_watchpoint = NULL,
  1334. .register_commands = cortex_a8_register_commands,
  1335. .target_create = cortex_a8_target_create,
  1336. .init_target = cortex_a8_init_target,
  1337. .examine = cortex_a8_examine,
  1338. .mrc = cortex_a8_mrc,
  1339. .mcr = cortex_a8_mcr,
  1340. };