You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1711 lines
46 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * Copyright (C) 2009 by Dirk Behme *
  12. * dirk.behme@gmail.com - copy from cortex_m3 *
  13. * *
  14. * This program is free software; you can redistribute it and/or modify *
  15. * it under the terms of the GNU General Public License as published by *
  16. * the Free Software Foundation; either version 2 of the License, or *
  17. * (at your option) any later version. *
  18. * *
  19. * This program is distributed in the hope that it will be useful, *
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  22. * GNU General Public License for more details. *
  23. * *
  24. * You should have received a copy of the GNU General Public License *
  25. * along with this program; if not, write to the *
  26. * Free Software Foundation, Inc., *
  27. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  28. * *
  29. * Cortex-A8(tm) TRM, ARM DDI 0344H *
  30. * *
  31. ***************************************************************************/
  32. #ifdef HAVE_CONFIG_H
  33. #include "config.h"
  34. #endif
  35. #include "breakpoints.h"
  36. #include "cortex_a8.h"
  37. #include "register.h"
  38. #include "target_request.h"
  39. #include "target_type.h"
  40. #include "arm_opcodes.h"
  41. static int cortex_a8_poll(struct target *target);
  42. static int cortex_a8_debug_entry(struct target *target);
  43. static int cortex_a8_restore_context(struct target *target, bool bpwp);
  44. static int cortex_a8_set_breakpoint(struct target *target,
  45. struct breakpoint *breakpoint, uint8_t matchmode);
  46. static int cortex_a8_unset_breakpoint(struct target *target,
  47. struct breakpoint *breakpoint);
  48. static int cortex_a8_dap_read_coreregister_u32(struct target *target,
  49. uint32_t *value, int regnum);
  50. static int cortex_a8_dap_write_coreregister_u32(struct target *target,
  51. uint32_t value, int regnum);
  52. /*
  53. * FIXME do topology discovery using the ROM; don't
  54. * assume this is an OMAP3.
  55. */
  56. #define swjdp_memoryap 0
  57. #define swjdp_debugap 1
  58. #define OMAP3530_DEBUG_BASE 0x54011000
  59. /*
  60. * Cortex-A8 Basic debug access, very low level assumes state is saved
  61. */
  62. static int cortex_a8_init_debug_access(struct target *target)
  63. {
  64. struct armv7a_common *armv7a = target_to_armv7a(target);
  65. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  66. int retval;
  67. uint32_t dummy;
  68. LOG_DEBUG(" ");
  69. /* Unlocking the debug registers for modification */
  70. /* The debugport might be uninitialised so try twice */
  71. retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  72. if (retval != ERROR_OK)
  73. mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  74. /* Clear Sticky Power Down status Bit in PRSR to enable access to
  75. the registers in the Core Power Domain */
  76. retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
  77. /* Enabling of instruction execution in debug mode is done in debug_entry code */
  78. /* Resync breakpoint registers */
  79. /* Since this is likley called from init or reset, update targtet state information*/
  80. cortex_a8_poll(target);
  81. return retval;
  82. }
  83. /* To reduce needless round-trips, pass in a pointer to the current
  84. * DSCR value. Initialize it to zero if you just need to know the
  85. * value on return from this function; or DSCR_INSTR_COMP if you
  86. * happen to know that no instruction is pending.
  87. */
  88. static int cortex_a8_exec_opcode(struct target *target,
  89. uint32_t opcode, uint32_t *dscr_p)
  90. {
  91. uint32_t dscr;
  92. int retval;
  93. struct armv7a_common *armv7a = target_to_armv7a(target);
  94. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  95. dscr = dscr_p ? *dscr_p : 0;
  96. LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
  97. /* Wait for InstrCompl bit to be set */
  98. while ((dscr & DSCR_INSTR_COMP) == 0)
  99. {
  100. retval = mem_ap_read_atomic_u32(swjdp,
  101. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  102. if (retval != ERROR_OK)
  103. {
  104. LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
  105. return retval;
  106. }
  107. }
  108. mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
  109. do
  110. {
  111. retval = mem_ap_read_atomic_u32(swjdp,
  112. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  113. if (retval != ERROR_OK)
  114. {
  115. LOG_ERROR("Could not read DSCR register");
  116. return retval;
  117. }
  118. }
  119. while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
  120. if (dscr_p)
  121. *dscr_p = dscr;
  122. return retval;
  123. }
  124. /**************************************************************************
  125. Read core register with very few exec_opcode, fast but needs work_area.
  126. This can cause problems with MMU active.
  127. **************************************************************************/
  128. static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
  129. uint32_t * regfile)
  130. {
  131. int retval = ERROR_OK;
  132. struct armv7a_common *armv7a = target_to_armv7a(target);
  133. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  134. cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
  135. cortex_a8_dap_write_coreregister_u32(target, address, 0);
  136. cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
  137. dap_ap_select(swjdp, swjdp_memoryap);
  138. mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
  139. dap_ap_select(swjdp, swjdp_debugap);
  140. return retval;
  141. }
  142. static int cortex_a8_dap_read_coreregister_u32(struct target *target,
  143. uint32_t *value, int regnum)
  144. {
  145. int retval = ERROR_OK;
  146. uint8_t reg = regnum&0xFF;
  147. uint32_t dscr = 0;
  148. struct armv7a_common *armv7a = target_to_armv7a(target);
  149. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  150. if (reg > 17)
  151. return retval;
  152. if (reg < 15)
  153. {
  154. /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
  155. cortex_a8_exec_opcode(target,
  156. ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
  157. &dscr);
  158. }
  159. else if (reg == 15)
  160. {
  161. /* "MOV r0, r15"; then move r0 to DCCTX */
  162. cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
  163. cortex_a8_exec_opcode(target,
  164. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  165. &dscr);
  166. }
  167. else
  168. {
  169. /* "MRS r0, CPSR" or "MRS r0, SPSR"
  170. * then move r0 to DCCTX
  171. */
  172. cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
  173. cortex_a8_exec_opcode(target,
  174. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  175. &dscr);
  176. }
  177. /* Wait for DTRRXfull then read DTRRTX */
  178. while ((dscr & DSCR_DTR_TX_FULL) == 0)
  179. {
  180. retval = mem_ap_read_atomic_u32(swjdp,
  181. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  182. }
  183. retval = mem_ap_read_atomic_u32(swjdp,
  184. armv7a->debug_base + CPUDBG_DTRTX, value);
  185. LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
  186. return retval;
  187. }
  188. static int cortex_a8_dap_write_coreregister_u32(struct target *target,
  189. uint32_t value, int regnum)
  190. {
  191. int retval = ERROR_OK;
  192. uint8_t Rd = regnum&0xFF;
  193. uint32_t dscr;
  194. struct armv7a_common *armv7a = target_to_armv7a(target);
  195. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  196. LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
  197. /* Check that DCCRX is not full */
  198. retval = mem_ap_read_atomic_u32(swjdp,
  199. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  200. if (dscr & DSCR_DTR_RX_FULL)
  201. {
  202. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  203. /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
  204. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  205. &dscr);
  206. }
  207. if (Rd > 17)
  208. return retval;
  209. /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
  210. LOG_DEBUG("write DCC 0x%08" PRIx32, value);
  211. retval = mem_ap_write_u32(swjdp,
  212. armv7a->debug_base + CPUDBG_DTRRX, value);
  213. if (Rd < 15)
  214. {
  215. /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
  216. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
  217. &dscr);
  218. }
  219. else if (Rd == 15)
  220. {
  221. /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
  222. * then "mov r15, r0"
  223. */
  224. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  225. &dscr);
  226. cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
  227. }
  228. else
  229. {
  230. /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
  231. * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
  232. */
  233. cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  234. &dscr);
  235. cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
  236. &dscr);
  237. /* "Prefetch flush" after modifying execution status in CPSR */
  238. if (Rd == 16)
  239. cortex_a8_exec_opcode(target,
  240. ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
  241. &dscr);
  242. }
  243. return retval;
  244. }
  245. /* Write to memory mapped registers directly with no cache or mmu handling */
  246. static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
  247. {
  248. int retval;
  249. struct armv7a_common *armv7a = target_to_armv7a(target);
  250. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  251. retval = mem_ap_write_atomic_u32(swjdp, address, value);
  252. return retval;
  253. }
  254. /*
  255. * Cortex-A8 implementation of Debug Programmer's Model
  256. *
  257. * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
  258. * so there's no need to poll for it before executing an instruction.
  259. *
  260. * NOTE that in several of these cases the "stall" mode might be useful.
  261. * It'd let us queue a few operations together... prepare/finish might
  262. * be the places to enable/disable that mode.
  263. */
  264. static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
  265. {
  266. return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
  267. }
  268. static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
  269. {
  270. LOG_DEBUG("write DCC 0x%08" PRIx32, data);
  271. return mem_ap_write_u32(&a8->armv7a_common.swjdp_info,
  272. a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
  273. }
  274. static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
  275. uint32_t *dscr_p)
  276. {
  277. struct swjdp_common *swjdp = &a8->armv7a_common.swjdp_info;
  278. uint32_t dscr = DSCR_INSTR_COMP;
  279. int retval;
  280. if (dscr_p)
  281. dscr = *dscr_p;
  282. /* Wait for DTRRXfull */
  283. while ((dscr & DSCR_DTR_TX_FULL) == 0) {
  284. retval = mem_ap_read_atomic_u32(swjdp,
  285. a8->armv7a_common.debug_base + CPUDBG_DSCR,
  286. &dscr);
  287. }
  288. retval = mem_ap_read_atomic_u32(swjdp,
  289. a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
  290. //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
  291. if (dscr_p)
  292. *dscr_p = dscr;
  293. return retval;
  294. }
  295. static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
  296. {
  297. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  298. struct swjdp_common *swjdp = &a8->armv7a_common.swjdp_info;
  299. uint32_t dscr;
  300. int retval;
  301. /* set up invariant: INSTR_COMP is set after ever DPM operation */
  302. do {
  303. retval = mem_ap_read_atomic_u32(swjdp,
  304. a8->armv7a_common.debug_base + CPUDBG_DSCR,
  305. &dscr);
  306. } while ((dscr & DSCR_INSTR_COMP) == 0);
  307. /* this "should never happen" ... */
  308. if (dscr & DSCR_DTR_RX_FULL) {
  309. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  310. /* Clear DCCRX */
  311. retval = cortex_a8_exec_opcode(
  312. a8->armv7a_common.armv4_5_common.target,
  313. ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  314. &dscr);
  315. }
  316. return retval;
  317. }
  318. static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
  319. {
  320. /* REVISIT what could be done here? */
  321. return ERROR_OK;
  322. }
  323. static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
  324. uint32_t opcode, uint32_t data)
  325. {
  326. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  327. int retval;
  328. uint32_t dscr = DSCR_INSTR_COMP;
  329. retval = cortex_a8_write_dcc(a8, data);
  330. return cortex_a8_exec_opcode(
  331. a8->armv7a_common.armv4_5_common.target,
  332. opcode,
  333. &dscr);
  334. }
  335. static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
  336. uint32_t opcode, uint32_t data)
  337. {
  338. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  339. uint32_t dscr = DSCR_INSTR_COMP;
  340. int retval;
  341. retval = cortex_a8_write_dcc(a8, data);
  342. /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
  343. retval = cortex_a8_exec_opcode(
  344. a8->armv7a_common.armv4_5_common.target,
  345. ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  346. &dscr);
  347. /* then the opcode, taking data from R0 */
  348. retval = cortex_a8_exec_opcode(
  349. a8->armv7a_common.armv4_5_common.target,
  350. opcode,
  351. &dscr);
  352. return retval;
  353. }
  354. static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
  355. {
  356. struct target *target = dpm->arm->target;
  357. uint32_t dscr = DSCR_INSTR_COMP;
  358. /* "Prefetch flush" after modifying execution status in CPSR */
  359. return cortex_a8_exec_opcode(target,
  360. ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
  361. &dscr);
  362. }
  363. static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
  364. uint32_t opcode, uint32_t *data)
  365. {
  366. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  367. int retval;
  368. uint32_t dscr = DSCR_INSTR_COMP;
  369. /* the opcode, writing data to DCC */
  370. retval = cortex_a8_exec_opcode(
  371. a8->armv7a_common.armv4_5_common.target,
  372. opcode,
  373. &dscr);
  374. return cortex_a8_read_dcc(a8, data, &dscr);
  375. }
  376. static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
  377. uint32_t opcode, uint32_t *data)
  378. {
  379. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  380. uint32_t dscr = DSCR_INSTR_COMP;
  381. int retval;
  382. /* the opcode, writing data to R0 */
  383. retval = cortex_a8_exec_opcode(
  384. a8->armv7a_common.armv4_5_common.target,
  385. opcode,
  386. &dscr);
  387. /* write R0 to DCC */
  388. retval = cortex_a8_exec_opcode(
  389. a8->armv7a_common.armv4_5_common.target,
  390. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  391. &dscr);
  392. return cortex_a8_read_dcc(a8, data, &dscr);
  393. }
  394. static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index,
  395. uint32_t addr, uint32_t control)
  396. {
  397. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  398. uint32_t vr = a8->armv7a_common.debug_base;
  399. uint32_t cr = a8->armv7a_common.debug_base;
  400. int retval;
  401. switch (index) {
  402. case 0 ... 15: /* breakpoints */
  403. vr += CPUDBG_BVR_BASE;
  404. cr += CPUDBG_BCR_BASE;
  405. break;
  406. case 16 ... 31: /* watchpoints */
  407. vr += CPUDBG_WVR_BASE;
  408. cr += CPUDBG_WCR_BASE;
  409. index -= 16;
  410. break;
  411. default:
  412. return ERROR_FAIL;
  413. }
  414. vr += 4 * index;
  415. cr += 4 * index;
  416. LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
  417. (unsigned) vr, (unsigned) cr);
  418. retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
  419. vr, addr);
  420. if (retval != ERROR_OK)
  421. return retval;
  422. retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
  423. cr, control);
  424. return retval;
  425. }
  426. static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index)
  427. {
  428. struct cortex_a8_common *a8 = dpm_to_a8(dpm);
  429. uint32_t cr;
  430. switch (index) {
  431. case 0 ... 15:
  432. cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
  433. break;
  434. case 16 ... 31:
  435. cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
  436. index -= 16;
  437. break;
  438. default:
  439. return ERROR_FAIL;
  440. }
  441. cr += 4 * index;
  442. LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
  443. /* clear control register */
  444. return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
  445. }
  446. static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
  447. {
  448. struct arm_dpm *dpm = &a8->armv7a_common.dpm;
  449. int retval;
  450. dpm->arm = &a8->armv7a_common.armv4_5_common;
  451. dpm->didr = didr;
  452. dpm->prepare = cortex_a8_dpm_prepare;
  453. dpm->finish = cortex_a8_dpm_finish;
  454. dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
  455. dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
  456. dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
  457. dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
  458. dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
  459. dpm->bpwp_enable = cortex_a8_bpwp_enable;
  460. dpm->bpwp_disable = cortex_a8_bpwp_disable;
  461. retval = arm_dpm_setup(dpm);
  462. if (retval == ERROR_OK)
  463. retval = arm_dpm_initialize(dpm);
  464. return retval;
  465. }
  466. /*
  467. * Cortex-A8 Run control
  468. */
  469. static int cortex_a8_poll(struct target *target)
  470. {
  471. int retval = ERROR_OK;
  472. uint32_t dscr;
  473. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  474. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  475. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  476. enum target_state prev_target_state = target->state;
  477. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  478. dap_ap_select(swjdp, swjdp_debugap);
  479. retval = mem_ap_read_atomic_u32(swjdp,
  480. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  481. if (retval != ERROR_OK)
  482. {
  483. dap_ap_select(swjdp, saved_apsel);
  484. return retval;
  485. }
  486. cortex_a8->cpudbg_dscr = dscr;
  487. if ((dscr & 0x3) == 0x3)
  488. {
  489. if (prev_target_state != TARGET_HALTED)
  490. {
  491. /* We have a halting debug event */
  492. LOG_DEBUG("Target halted");
  493. target->state = TARGET_HALTED;
  494. if ((prev_target_state == TARGET_RUNNING)
  495. || (prev_target_state == TARGET_RESET))
  496. {
  497. retval = cortex_a8_debug_entry(target);
  498. if (retval != ERROR_OK)
  499. return retval;
  500. target_call_event_callbacks(target,
  501. TARGET_EVENT_HALTED);
  502. }
  503. if (prev_target_state == TARGET_DEBUG_RUNNING)
  504. {
  505. LOG_DEBUG(" ");
  506. retval = cortex_a8_debug_entry(target);
  507. if (retval != ERROR_OK)
  508. return retval;
  509. target_call_event_callbacks(target,
  510. TARGET_EVENT_DEBUG_HALTED);
  511. }
  512. }
  513. }
  514. else if ((dscr & 0x3) == 0x2)
  515. {
  516. target->state = TARGET_RUNNING;
  517. }
  518. else
  519. {
  520. LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
  521. target->state = TARGET_UNKNOWN;
  522. }
  523. dap_ap_select(swjdp, saved_apsel);
  524. return retval;
  525. }
  526. static int cortex_a8_halt(struct target *target)
  527. {
  528. int retval = ERROR_OK;
  529. uint32_t dscr;
  530. struct armv7a_common *armv7a = target_to_armv7a(target);
  531. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  532. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  533. dap_ap_select(swjdp, swjdp_debugap);
  534. /*
  535. * Tell the core to be halted by writing DRCR with 0x1
  536. * and then wait for the core to be halted.
  537. */
  538. retval = mem_ap_write_atomic_u32(swjdp,
  539. armv7a->debug_base + CPUDBG_DRCR, 0x1);
  540. /*
  541. * enter halting debug mode
  542. */
  543. mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
  544. retval = mem_ap_write_atomic_u32(swjdp,
  545. armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
  546. if (retval != ERROR_OK)
  547. goto out;
  548. do {
  549. mem_ap_read_atomic_u32(swjdp,
  550. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  551. } while ((dscr & DSCR_CORE_HALTED) == 0);
  552. target->debug_reason = DBG_REASON_DBGRQ;
  553. out:
  554. dap_ap_select(swjdp, saved_apsel);
  555. return retval;
  556. }
  557. static int cortex_a8_resume(struct target *target, int current,
  558. uint32_t address, int handle_breakpoints, int debug_execution)
  559. {
  560. struct armv7a_common *armv7a = target_to_armv7a(target);
  561. struct arm *armv4_5 = &armv7a->armv4_5_common;
  562. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  563. // struct breakpoint *breakpoint = NULL;
  564. uint32_t resume_pc, dscr;
  565. uint8_t saved_apsel = dap_ap_get_select(swjdp);
  566. dap_ap_select(swjdp, swjdp_debugap);
  567. if (!debug_execution)
  568. target_free_all_working_areas(target);
  569. #if 0
  570. if (debug_execution)
  571. {
  572. /* Disable interrupts */
  573. /* We disable interrupts in the PRIMASK register instead of
  574. * masking with C_MASKINTS,
  575. * This is probably the same issue as Cortex-M3 Errata 377493:
  576. * C_MASKINTS in parallel with disabled interrupts can cause
  577. * local faults to not be taken. */
  578. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
  579. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
  580. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
  581. /* Make sure we are in Thumb mode */
  582. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
  583. buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
  584. armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
  585. armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
  586. }
  587. #endif
  588. /* current = 1: continue on current pc, otherwise continue at <address> */
  589. resume_pc = buf_get_u32(
  590. armv4_5->core_cache->reg_list[15].value,
  591. 0, 32);
  592. if (!current)
  593. resume_pc = address;
  594. /* Make sure that the Armv7 gdb thumb fixups does not
  595. * kill the return address
  596. */
  597. switch (armv4_5->core_state)
  598. {
  599. case ARM_STATE_ARM:
  600. resume_pc &= 0xFFFFFFFC;
  601. break;
  602. case ARM_STATE_THUMB:
  603. case ARM_STATE_THUMB_EE:
  604. /* When the return address is loaded into PC
  605. * bit 0 must be 1 to stay in Thumb state
  606. */
  607. resume_pc |= 0x1;
  608. break;
  609. case ARM_STATE_JAZELLE:
  610. LOG_ERROR("How do I resume into Jazelle state??");
  611. return ERROR_FAIL;
  612. }
  613. LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
  614. buf_set_u32(armv4_5->core_cache->reg_list[15].value,
  615. 0, 32, resume_pc);
  616. armv4_5->core_cache->reg_list[15].dirty = 1;
  617. armv4_5->core_cache->reg_list[15].valid = 1;
  618. cortex_a8_restore_context(target, handle_breakpoints);
  619. #if 0
  620. /* the front-end may request us not to handle breakpoints */
  621. if (handle_breakpoints)
  622. {
  623. /* Single step past breakpoint at current address */
  624. if ((breakpoint = breakpoint_find(target, resume_pc)))
  625. {
  626. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  627. cortex_m3_unset_breakpoint(target, breakpoint);
  628. cortex_m3_single_step_core(target);
  629. cortex_m3_set_breakpoint(target, breakpoint);
  630. }
  631. }
  632. #endif
  633. /* Restart core and wait for it to be started
  634. * NOTE: this clears DSCR_ITR_EN and other bits.
  635. *
  636. * REVISIT: for single stepping, we probably want to
  637. * disable IRQs by default, with optional override...
  638. */
  639. mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
  640. do {
  641. mem_ap_read_atomic_u32(swjdp,
  642. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  643. } while ((dscr & DSCR_CORE_RESTARTED) == 0);
  644. target->debug_reason = DBG_REASON_NOTHALTED;
  645. target->state = TARGET_RUNNING;
  646. /* registers are now invalid */
  647. register_cache_invalidate(armv4_5->core_cache);
  648. if (!debug_execution)
  649. {
  650. target->state = TARGET_RUNNING;
  651. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  652. LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
  653. }
  654. else
  655. {
  656. target->state = TARGET_DEBUG_RUNNING;
  657. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  658. LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
  659. }
  660. dap_ap_select(swjdp, saved_apsel);
  661. return ERROR_OK;
  662. }
  663. static int cortex_a8_debug_entry(struct target *target)
  664. {
  665. int i;
  666. uint32_t regfile[16], cpsr, dscr;
  667. int retval = ERROR_OK;
  668. struct working_area *regfile_working_area = NULL;
  669. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  670. struct armv7a_common *armv7a = target_to_armv7a(target);
  671. struct arm *armv4_5 = &armv7a->armv4_5_common;
  672. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  673. struct reg *reg;
  674. LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
  675. /* REVISIT surely we should not re-read DSCR !! */
  676. mem_ap_read_atomic_u32(swjdp,
  677. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  678. /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
  679. * imprecise data aborts get discarded by issuing a Data
  680. * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
  681. */
  682. /* Enable the ITR execution once we are in debug mode */
  683. dscr |= DSCR_ITR_EN;
  684. retval = mem_ap_write_atomic_u32(swjdp,
  685. armv7a->debug_base + CPUDBG_DSCR, dscr);
  686. /* Examine debug reason */
  687. arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
  688. /* save address of instruction that triggered the watchpoint? */
  689. if (target->debug_reason == DBG_REASON_WATCHPOINT) {
  690. uint32_t wfar;
  691. retval = mem_ap_read_atomic_u32(swjdp,
  692. armv7a->debug_base + CPUDBG_WFAR,
  693. &wfar);
  694. arm_dpm_report_wfar(&armv7a->dpm, wfar);
  695. }
  696. /* REVISIT fast_reg_read is never set ... */
  697. /* Examine target state and mode */
  698. if (cortex_a8->fast_reg_read)
  699. target_alloc_working_area(target, 64, &regfile_working_area);
  700. /* First load register acessible through core debug port*/
  701. if (!regfile_working_area)
  702. {
  703. retval = arm_dpm_read_current_registers(&armv7a->dpm);
  704. }
  705. else
  706. {
  707. dap_ap_select(swjdp, swjdp_memoryap);
  708. cortex_a8_read_regs_through_mem(target,
  709. regfile_working_area->address, regfile);
  710. dap_ap_select(swjdp, swjdp_memoryap);
  711. target_free_working_area(target, regfile_working_area);
  712. /* read Current PSR */
  713. cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
  714. dap_ap_select(swjdp, swjdp_debugap);
  715. LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
  716. arm_set_cpsr(armv4_5, cpsr);
  717. /* update cache */
  718. for (i = 0; i <= ARM_PC; i++)
  719. {
  720. reg = arm_reg_current(armv4_5, i);
  721. buf_set_u32(reg->value, 0, 32, regfile[i]);
  722. reg->valid = 1;
  723. reg->dirty = 0;
  724. }
  725. /* Fixup PC Resume Address */
  726. if (cpsr & (1 << 5))
  727. {
  728. // T bit set for Thumb or ThumbEE state
  729. regfile[ARM_PC] -= 4;
  730. }
  731. else
  732. {
  733. // ARM state
  734. regfile[ARM_PC] -= 8;
  735. }
  736. reg = armv4_5->core_cache->reg_list + 15;
  737. buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
  738. reg->dirty = reg->valid;
  739. }
  740. #if 0
  741. /* TODO, Move this */
  742. uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
  743. cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
  744. LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
  745. cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
  746. LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
  747. cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
  748. LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
  749. #endif
  750. /* Are we in an exception handler */
  751. // armv4_5->exception_number = 0;
  752. if (armv7a->post_debug_entry)
  753. armv7a->post_debug_entry(target);
  754. return retval;
  755. }
  756. static void cortex_a8_post_debug_entry(struct target *target)
  757. {
  758. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  759. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  760. int retval;
  761. /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
  762. retval = armv7a->armv4_5_common.mrc(target, 15,
  763. 0, 0, /* op1, op2 */
  764. 1, 0, /* CRn, CRm */
  765. &cortex_a8->cp15_control_reg);
  766. LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
  767. if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
  768. {
  769. uint32_t cache_type_reg;
  770. /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
  771. retval = armv7a->armv4_5_common.mrc(target, 15,
  772. 0, 1, /* op1, op2 */
  773. 0, 0, /* CRn, CRm */
  774. &cache_type_reg);
  775. LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
  776. /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
  777. armv4_5_identify_cache(cache_type_reg,
  778. &armv7a->armv4_5_mmu.armv4_5_cache);
  779. }
  780. armv7a->armv4_5_mmu.mmu_enabled =
  781. (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
  782. armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
  783. (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
  784. armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
  785. (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
  786. }
  787. static int cortex_a8_step(struct target *target, int current, uint32_t address,
  788. int handle_breakpoints)
  789. {
  790. struct armv7a_common *armv7a = target_to_armv7a(target);
  791. struct arm *armv4_5 = &armv7a->armv4_5_common;
  792. struct breakpoint *breakpoint = NULL;
  793. struct breakpoint stepbreakpoint;
  794. struct reg *r;
  795. int timeout = 100;
  796. if (target->state != TARGET_HALTED)
  797. {
  798. LOG_WARNING("target not halted");
  799. return ERROR_TARGET_NOT_HALTED;
  800. }
  801. /* current = 1: continue on current pc, otherwise continue at <address> */
  802. r = armv4_5->core_cache->reg_list + 15;
  803. if (!current)
  804. {
  805. buf_set_u32(r->value, 0, 32, address);
  806. }
  807. else
  808. {
  809. address = buf_get_u32(r->value, 0, 32);
  810. }
  811. /* The front-end may request us not to handle breakpoints.
  812. * But since Cortex-A8 uses breakpoint for single step,
  813. * we MUST handle breakpoints.
  814. */
  815. handle_breakpoints = 1;
  816. if (handle_breakpoints) {
  817. breakpoint = breakpoint_find(target, address);
  818. if (breakpoint)
  819. cortex_a8_unset_breakpoint(target, breakpoint);
  820. }
  821. /* Setup single step breakpoint */
  822. stepbreakpoint.address = address;
  823. stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
  824. ? 2 : 4;
  825. stepbreakpoint.type = BKPT_HARD;
  826. stepbreakpoint.set = 0;
  827. /* Break on IVA mismatch */
  828. cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
  829. target->debug_reason = DBG_REASON_SINGLESTEP;
  830. cortex_a8_resume(target, 1, address, 0, 0);
  831. while (target->state != TARGET_HALTED)
  832. {
  833. cortex_a8_poll(target);
  834. if (--timeout == 0)
  835. {
  836. LOG_WARNING("timeout waiting for target halt");
  837. break;
  838. }
  839. }
  840. cortex_a8_unset_breakpoint(target, &stepbreakpoint);
  841. if (timeout > 0)
  842. target->debug_reason = DBG_REASON_BREAKPOINT;
  843. if (breakpoint)
  844. cortex_a8_set_breakpoint(target, breakpoint, 0);
  845. if (target->state != TARGET_HALTED)
  846. LOG_DEBUG("target stepped");
  847. return ERROR_OK;
  848. }
  849. static int cortex_a8_restore_context(struct target *target, bool bpwp)
  850. {
  851. struct armv7a_common *armv7a = target_to_armv7a(target);
  852. LOG_DEBUG(" ");
  853. if (armv7a->pre_restore_context)
  854. armv7a->pre_restore_context(target);
  855. arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
  856. if (armv7a->post_restore_context)
  857. armv7a->post_restore_context(target);
  858. return ERROR_OK;
  859. }
  860. /*
  861. * Cortex-A8 Breakpoint and watchpoint fuctions
  862. */
  863. /* Setup hardware Breakpoint Register Pair */
  864. static int cortex_a8_set_breakpoint(struct target *target,
  865. struct breakpoint *breakpoint, uint8_t matchmode)
  866. {
  867. int retval;
  868. int brp_i=0;
  869. uint32_t control;
  870. uint8_t byte_addr_select = 0x0F;
  871. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  872. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  873. struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
  874. if (breakpoint->set)
  875. {
  876. LOG_WARNING("breakpoint already set");
  877. return ERROR_OK;
  878. }
  879. if (breakpoint->type == BKPT_HARD)
  880. {
  881. while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
  882. brp_i++ ;
  883. if (brp_i >= cortex_a8->brp_num)
  884. {
  885. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  886. return ERROR_FAIL;
  887. }
  888. breakpoint->set = brp_i + 1;
  889. if (breakpoint->length == 2)
  890. {
  891. byte_addr_select = (3 << (breakpoint->address & 0x02));
  892. }
  893. control = ((matchmode & 0x7) << 20)
  894. | (byte_addr_select << 5)
  895. | (3 << 1) | 1;
  896. brp_list[brp_i].used = 1;
  897. brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
  898. brp_list[brp_i].control = control;
  899. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  900. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  901. brp_list[brp_i].value);
  902. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  903. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  904. brp_list[brp_i].control);
  905. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  906. brp_list[brp_i].control,
  907. brp_list[brp_i].value);
  908. }
  909. else if (breakpoint->type == BKPT_SOFT)
  910. {
  911. uint8_t code[4];
  912. if (breakpoint->length == 2)
  913. {
  914. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  915. }
  916. else
  917. {
  918. buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
  919. }
  920. retval = target->type->read_memory(target,
  921. breakpoint->address & 0xFFFFFFFE,
  922. breakpoint->length, 1,
  923. breakpoint->orig_instr);
  924. if (retval != ERROR_OK)
  925. return retval;
  926. retval = target->type->write_memory(target,
  927. breakpoint->address & 0xFFFFFFFE,
  928. breakpoint->length, 1, code);
  929. if (retval != ERROR_OK)
  930. return retval;
  931. breakpoint->set = 0x11; /* Any nice value but 0 */
  932. }
  933. return ERROR_OK;
  934. }
  935. static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  936. {
  937. int retval;
  938. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  939. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  940. struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
  941. if (!breakpoint->set)
  942. {
  943. LOG_WARNING("breakpoint not set");
  944. return ERROR_OK;
  945. }
  946. if (breakpoint->type == BKPT_HARD)
  947. {
  948. int brp_i = breakpoint->set - 1;
  949. if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
  950. {
  951. LOG_DEBUG("Invalid BRP number in breakpoint");
  952. return ERROR_OK;
  953. }
  954. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  955. brp_list[brp_i].control, brp_list[brp_i].value);
  956. brp_list[brp_i].used = 0;
  957. brp_list[brp_i].value = 0;
  958. brp_list[brp_i].control = 0;
  959. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  960. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  961. brp_list[brp_i].control);
  962. cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
  963. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  964. brp_list[brp_i].value);
  965. }
  966. else
  967. {
  968. /* restore original instruction (kept in target endianness) */
  969. if (breakpoint->length == 4)
  970. {
  971. retval = target->type->write_memory(target,
  972. breakpoint->address & 0xFFFFFFFE,
  973. 4, 1, breakpoint->orig_instr);
  974. if (retval != ERROR_OK)
  975. return retval;
  976. }
  977. else
  978. {
  979. retval = target->type->write_memory(target,
  980. breakpoint->address & 0xFFFFFFFE,
  981. 2, 1, breakpoint->orig_instr);
  982. if (retval != ERROR_OK)
  983. return retval;
  984. }
  985. }
  986. breakpoint->set = 0;
  987. return ERROR_OK;
  988. }
  989. static int cortex_a8_add_breakpoint(struct target *target,
  990. struct breakpoint *breakpoint)
  991. {
  992. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  993. if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
  994. {
  995. LOG_INFO("no hardware breakpoint available");
  996. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  997. }
  998. if (breakpoint->type == BKPT_HARD)
  999. cortex_a8->brp_num_available--;
  1000. cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
  1001. return ERROR_OK;
  1002. }
  1003. static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1004. {
  1005. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  1006. #if 0
  1007. /* It is perfectly possible to remove brakpoints while the taget is running */
  1008. if (target->state != TARGET_HALTED)
  1009. {
  1010. LOG_WARNING("target not halted");
  1011. return ERROR_TARGET_NOT_HALTED;
  1012. }
  1013. #endif
  1014. if (breakpoint->set)
  1015. {
  1016. cortex_a8_unset_breakpoint(target, breakpoint);
  1017. if (breakpoint->type == BKPT_HARD)
  1018. cortex_a8->brp_num_available++ ;
  1019. }
  1020. return ERROR_OK;
  1021. }
  1022. /*
  1023. * Cortex-A8 Reset fuctions
  1024. */
  1025. static int cortex_a8_assert_reset(struct target *target)
  1026. {
  1027. struct armv7a_common *armv7a = target_to_armv7a(target);
  1028. LOG_DEBUG(" ");
  1029. /* FIXME when halt is requested, make it work somehow... */
  1030. /* Issue some kind of warm reset. */
  1031. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  1032. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  1033. } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
  1034. /* REVISIT handle "pulls" cases, if there's
  1035. * hardware that needs them to work.
  1036. */
  1037. jtag_add_reset(0, 1);
  1038. } else {
  1039. LOG_ERROR("%s: how to reset?", target_name(target));
  1040. return ERROR_FAIL;
  1041. }
  1042. /* registers are now invalid */
  1043. register_cache_invalidate(armv7a->armv4_5_common.core_cache);
  1044. target->state = TARGET_RESET;
  1045. return ERROR_OK;
  1046. }
  1047. static int cortex_a8_deassert_reset(struct target *target)
  1048. {
  1049. int retval;
  1050. LOG_DEBUG(" ");
  1051. /* be certain SRST is off */
  1052. jtag_add_reset(0, 0);
  1053. retval = cortex_a8_poll(target);
  1054. if (target->reset_halt) {
  1055. if (target->state != TARGET_HALTED) {
  1056. LOG_WARNING("%s: ran after reset and before halt ...",
  1057. target_name(target));
  1058. if ((retval = target_halt(target)) != ERROR_OK)
  1059. return retval;
  1060. }
  1061. }
  1062. return ERROR_OK;
  1063. }
  1064. /*
  1065. * Cortex-A8 Memory access
  1066. *
  1067. * This is same Cortex M3 but we must also use the correct
  1068. * ap number for every access.
  1069. */
  1070. static int cortex_a8_read_memory(struct target *target, uint32_t address,
  1071. uint32_t size, uint32_t count, uint8_t *buffer)
  1072. {
  1073. struct armv7a_common *armv7a = target_to_armv7a(target);
  1074. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1075. int retval = ERROR_INVALID_ARGUMENTS;
  1076. /* cortex_a8 handles unaligned memory access */
  1077. // ??? dap_ap_select(swjdp, swjdp_memoryap);
  1078. if (count && buffer) {
  1079. switch (size) {
  1080. case 4:
  1081. retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
  1082. break;
  1083. case 2:
  1084. retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
  1085. break;
  1086. case 1:
  1087. retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
  1088. break;
  1089. }
  1090. }
  1091. return retval;
  1092. }
  1093. static int cortex_a8_write_memory(struct target *target, uint32_t address,
  1094. uint32_t size, uint32_t count, uint8_t *buffer)
  1095. {
  1096. struct armv7a_common *armv7a = target_to_armv7a(target);
  1097. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1098. int retval = ERROR_INVALID_ARGUMENTS;
  1099. // ??? dap_ap_select(swjdp, swjdp_memoryap);
  1100. if (count && buffer) {
  1101. switch (size) {
  1102. case 4:
  1103. retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
  1104. break;
  1105. case 2:
  1106. retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
  1107. break;
  1108. case 1:
  1109. retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
  1110. break;
  1111. }
  1112. }
  1113. /* REVISIT this op is generic ARMv7-A/R stuff */
  1114. if (retval == ERROR_OK && target->state == TARGET_HALTED)
  1115. {
  1116. struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
  1117. retval = dpm->prepare(dpm);
  1118. if (retval != ERROR_OK)
  1119. return retval;
  1120. /* The Cache handling will NOT work with MMU active, the
  1121. * wrong addresses will be invalidated!
  1122. *
  1123. * For both ICache and DCache, walk all cache lines in the
  1124. * address range. Cortex-A8 has fixed 64 byte line length.
  1125. *
  1126. * REVISIT per ARMv7, these may trigger watchpoints ...
  1127. */
  1128. /* invalidate I-Cache */
  1129. if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
  1130. {
  1131. /* ICIMVAU - Invalidate Cache single entry
  1132. * with MVA to PoU
  1133. * MCR p15, 0, r0, c7, c5, 1
  1134. */
  1135. for (uint32_t cacheline = address;
  1136. cacheline < address + size * count;
  1137. cacheline += 64) {
  1138. retval = dpm->instr_write_data_r0(dpm,
  1139. ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
  1140. cacheline);
  1141. }
  1142. }
  1143. /* invalidate D-Cache */
  1144. if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
  1145. {
  1146. /* DCIMVAC - Invalidate data Cache line
  1147. * with MVA to PoC
  1148. * MCR p15, 0, r0, c7, c6, 1
  1149. */
  1150. for (uint32_t cacheline = address;
  1151. cacheline < address + size * count;
  1152. cacheline += 64) {
  1153. retval = dpm->instr_write_data_r0(dpm,
  1154. ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
  1155. cacheline);
  1156. }
  1157. }
  1158. /* (void) */ dpm->finish(dpm);
  1159. }
  1160. return retval;
  1161. }
  1162. static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
  1163. uint32_t count, uint8_t *buffer)
  1164. {
  1165. return cortex_a8_write_memory(target, address, 4, count, buffer);
  1166. }
  1167. static int cortex_a8_dcc_read(struct swjdp_common *swjdp, uint8_t *value, uint8_t *ctrl)
  1168. {
  1169. #if 0
  1170. u16 dcrdr;
  1171. mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1172. *ctrl = (uint8_t)dcrdr;
  1173. *value = (uint8_t)(dcrdr >> 8);
  1174. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1175. /* write ack back to software dcc register
  1176. * signify we have read data */
  1177. if (dcrdr & (1 << 0))
  1178. {
  1179. dcrdr = 0;
  1180. mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1181. }
  1182. #endif
  1183. return ERROR_OK;
  1184. }
  1185. static int cortex_a8_handle_target_request(void *priv)
  1186. {
  1187. struct target *target = priv;
  1188. struct armv7a_common *armv7a = target_to_armv7a(target);
  1189. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1190. if (!target_was_examined(target))
  1191. return ERROR_OK;
  1192. if (!target->dbg_msg_enabled)
  1193. return ERROR_OK;
  1194. if (target->state == TARGET_RUNNING)
  1195. {
  1196. uint8_t data = 0;
  1197. uint8_t ctrl = 0;
  1198. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1199. /* check if we have data */
  1200. if (ctrl & (1 << 0))
  1201. {
  1202. uint32_t request;
  1203. /* we assume target is quick enough */
  1204. request = data;
  1205. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1206. request |= (data << 8);
  1207. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1208. request |= (data << 16);
  1209. cortex_a8_dcc_read(swjdp, &data, &ctrl);
  1210. request |= (data << 24);
  1211. target_request(target, request);
  1212. }
  1213. }
  1214. return ERROR_OK;
  1215. }
  1216. /*
  1217. * Cortex-A8 target information and configuration
  1218. */
  1219. static int cortex_a8_examine_first(struct target *target)
  1220. {
  1221. struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
  1222. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  1223. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1224. int i;
  1225. int retval = ERROR_OK;
  1226. uint32_t didr, ctypr, ttypr, cpuid;
  1227. LOG_DEBUG("TODO");
  1228. /* Here we shall insert a proper ROM Table scan */
  1229. armv7a->debug_base = OMAP3530_DEBUG_BASE;
  1230. /* We do one extra read to ensure DAP is configured,
  1231. * we call ahbap_debugport_init(swjdp) instead
  1232. */
  1233. ahbap_debugport_init(swjdp);
  1234. mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
  1235. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1236. armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
  1237. {
  1238. LOG_DEBUG("Examine failed");
  1239. return retval;
  1240. }
  1241. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1242. armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
  1243. {
  1244. LOG_DEBUG("Examine failed");
  1245. return retval;
  1246. }
  1247. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1248. armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
  1249. {
  1250. LOG_DEBUG("Examine failed");
  1251. return retval;
  1252. }
  1253. if ((retval = mem_ap_read_atomic_u32(swjdp,
  1254. armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
  1255. {
  1256. LOG_DEBUG("Examine failed");
  1257. return retval;
  1258. }
  1259. LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
  1260. LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
  1261. LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
  1262. LOG_DEBUG("didr = 0x%08" PRIx32, didr);
  1263. armv7a->armv4_5_common.core_type = ARM_MODE_MON;
  1264. cortex_a8_dpm_setup(cortex_a8, didr);
  1265. /* Setup Breakpoint Register Pairs */
  1266. cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
  1267. cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
  1268. cortex_a8->brp_num_available = cortex_a8->brp_num;
  1269. cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
  1270. // cortex_a8->brb_enabled = ????;
  1271. for (i = 0; i < cortex_a8->brp_num; i++)
  1272. {
  1273. cortex_a8->brp_list[i].used = 0;
  1274. if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
  1275. cortex_a8->brp_list[i].type = BRP_NORMAL;
  1276. else
  1277. cortex_a8->brp_list[i].type = BRP_CONTEXT;
  1278. cortex_a8->brp_list[i].value = 0;
  1279. cortex_a8->brp_list[i].control = 0;
  1280. cortex_a8->brp_list[i].BRPn = i;
  1281. }
  1282. LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
  1283. target_set_examined(target);
  1284. return ERROR_OK;
  1285. }
  1286. static int cortex_a8_examine(struct target *target)
  1287. {
  1288. int retval = ERROR_OK;
  1289. /* don't re-probe hardware after each reset */
  1290. if (!target_was_examined(target))
  1291. retval = cortex_a8_examine_first(target);
  1292. /* Configure core debug access */
  1293. if (retval == ERROR_OK)
  1294. retval = cortex_a8_init_debug_access(target);
  1295. return retval;
  1296. }
  1297. /*
  1298. * Cortex-A8 target creation and initialization
  1299. */
  1300. static int cortex_a8_init_target(struct command_context *cmd_ctx,
  1301. struct target *target)
  1302. {
  1303. /* examine_first() does a bunch of this */
  1304. return ERROR_OK;
  1305. }
  1306. static int cortex_a8_init_arch_info(struct target *target,
  1307. struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
  1308. {
  1309. struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
  1310. struct arm *armv4_5 = &armv7a->armv4_5_common;
  1311. struct swjdp_common *swjdp = &armv7a->swjdp_info;
  1312. /* Setup struct cortex_a8_common */
  1313. cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
  1314. armv4_5->arch_info = armv7a;
  1315. /* prepare JTAG information for the new target */
  1316. cortex_a8->jtag_info.tap = tap;
  1317. cortex_a8->jtag_info.scann_size = 4;
  1318. swjdp->dp_select_value = -1;
  1319. swjdp->ap_csw_value = -1;
  1320. swjdp->ap_tar_value = -1;
  1321. swjdp->jtag_info = &cortex_a8->jtag_info;
  1322. swjdp->memaccess_tck = 80;
  1323. /* Number of bits for tar autoincrement, impl. dep. at least 10 */
  1324. swjdp->tar_autoincr_block = (1 << 10);
  1325. cortex_a8->fast_reg_read = 0;
  1326. /* register arch-specific functions */
  1327. armv7a->examine_debug_reason = NULL;
  1328. armv7a->post_debug_entry = cortex_a8_post_debug_entry;
  1329. armv7a->pre_restore_context = NULL;
  1330. armv7a->post_restore_context = NULL;
  1331. armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
  1332. // armv7a->armv4_5_mmu.get_ttb = armv7a_get_ttb;
  1333. armv7a->armv4_5_mmu.read_memory = cortex_a8_read_memory;
  1334. armv7a->armv4_5_mmu.write_memory = cortex_a8_write_memory;
  1335. // armv7a->armv4_5_mmu.disable_mmu_caches = armv7a_disable_mmu_caches;
  1336. // armv7a->armv4_5_mmu.enable_mmu_caches = armv7a_enable_mmu_caches;
  1337. armv7a->armv4_5_mmu.has_tiny_pages = 1;
  1338. armv7a->armv4_5_mmu.mmu_enabled = 0;
  1339. // arm7_9->handle_target_request = cortex_a8_handle_target_request;
  1340. /* REVISIT v7a setup should be in a v7a-specific routine */
  1341. arm_init_arch_info(target, armv4_5);
  1342. armv7a->common_magic = ARMV7_COMMON_MAGIC;
  1343. target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
  1344. return ERROR_OK;
  1345. }
  1346. static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
  1347. {
  1348. struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
  1349. cortex_a8_init_arch_info(target, cortex_a8, target->tap);
  1350. return ERROR_OK;
  1351. }
  1352. COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
  1353. {
  1354. struct target *target = get_current_target(CMD_CTX);
  1355. struct armv7a_common *armv7a = target_to_armv7a(target);
  1356. return armv4_5_handle_cache_info_command(CMD_CTX,
  1357. &armv7a->armv4_5_mmu.armv4_5_cache);
  1358. }
  1359. COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
  1360. {
  1361. struct target *target = get_current_target(CMD_CTX);
  1362. cortex_a8_init_debug_access(target);
  1363. return ERROR_OK;
  1364. }
  1365. static const struct command_registration cortex_a8_exec_command_handlers[] = {
  1366. {
  1367. .name = "cache_info",
  1368. .handler = cortex_a8_handle_cache_info_command,
  1369. .mode = COMMAND_EXEC,
  1370. .help = "display information about target caches",
  1371. },
  1372. {
  1373. .name = "dbginit",
  1374. .handler = cortex_a8_handle_dbginit_command,
  1375. .mode = COMMAND_EXEC,
  1376. .help = "Initialize core debug",
  1377. },
  1378. COMMAND_REGISTRATION_DONE
  1379. };
  1380. static const struct command_registration cortex_a8_command_handlers[] = {
  1381. {
  1382. .chain = arm_command_handlers,
  1383. },
  1384. {
  1385. .chain = armv7a_command_handlers,
  1386. },
  1387. {
  1388. .name = "cortex_a8",
  1389. .mode = COMMAND_ANY,
  1390. .help = "Cortex-A8 command group",
  1391. .chain = cortex_a8_exec_command_handlers,
  1392. },
  1393. COMMAND_REGISTRATION_DONE
  1394. };
  1395. struct target_type cortexa8_target = {
  1396. .name = "cortex_a8",
  1397. .poll = cortex_a8_poll,
  1398. .arch_state = armv7a_arch_state,
  1399. .target_request_data = NULL,
  1400. .halt = cortex_a8_halt,
  1401. .resume = cortex_a8_resume,
  1402. .step = cortex_a8_step,
  1403. .assert_reset = cortex_a8_assert_reset,
  1404. .deassert_reset = cortex_a8_deassert_reset,
  1405. .soft_reset_halt = NULL,
  1406. /* REVISIT allow exporting VFP3 registers ... */
  1407. .get_gdb_reg_list = arm_get_gdb_reg_list,
  1408. .read_memory = cortex_a8_read_memory,
  1409. .write_memory = cortex_a8_write_memory,
  1410. .bulk_write_memory = cortex_a8_bulk_write_memory,
  1411. .checksum_memory = arm_checksum_memory,
  1412. .blank_check_memory = arm_blank_check_memory,
  1413. .run_algorithm = armv4_5_run_algorithm,
  1414. .add_breakpoint = cortex_a8_add_breakpoint,
  1415. .remove_breakpoint = cortex_a8_remove_breakpoint,
  1416. .add_watchpoint = NULL,
  1417. .remove_watchpoint = NULL,
  1418. .commands = cortex_a8_command_handlers,
  1419. .target_create = cortex_a8_target_create,
  1420. .init_target = cortex_a8_init_target,
  1421. .examine = cortex_a8_examine,
  1422. };