You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3595 lines
104 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * Copyright (C) 2009 by Dirk Behme *
  12. * dirk.behme@gmail.com - copy from cortex_m3 *
  13. * *
  14. * Copyright (C) 2010 Øyvind Harboe *
  15. * oyvind.harboe@zylin.com *
  16. * *
  17. * Copyright (C) ST-Ericsson SA 2011 *
  18. * michel.jaouen@stericsson.com : smp minimum support *
  19. * *
  20. * Copyright (C) Broadcom 2012 *
  21. * ehunter@broadcom.com : Cortex R4 support *
  22. * *
  23. * Copyright (C) 2013 Kamal Dasu *
  24. * kdasu.kdev@gmail.com *
  25. * *
  26. * This program is free software; you can redistribute it and/or modify *
  27. * it under the terms of the GNU General Public License as published by *
  28. * the Free Software Foundation; either version 2 of the License, or *
  29. * (at your option) any later version. *
  30. * *
  31. * This program is distributed in the hope that it will be useful, *
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  34. * GNU General Public License for more details. *
  35. * *
  36. * You should have received a copy of the GNU General Public License *
  37. * along with this program; if not, write to the *
  38. * Free Software Foundation, Inc., *
  39. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
  40. * *
  41. * Cortex-A8(tm) TRM, ARM DDI 0344H *
  42. * Cortex-A9(tm) TRM, ARM DDI 0407F *
  43. * Cortex-A4(tm) TRM, ARM DDI 0363E *
  44. * Cortex-A15(tm)TRM, ARM DDI 0438C *
  45. * *
  46. ***************************************************************************/
  47. #ifdef HAVE_CONFIG_H
  48. #include "config.h"
  49. #endif
  50. #include "breakpoints.h"
  51. #include "cortex_a.h"
  52. #include "register.h"
  53. #include "target_request.h"
  54. #include "target_type.h"
  55. #include "arm_opcodes.h"
  56. #include <helper/time_support.h>
  57. static int cortex_a_poll(struct target *target);
  58. static int cortex_a_debug_entry(struct target *target);
  59. static int cortex_a_restore_context(struct target *target, bool bpwp);
  60. static int cortex_a_set_breakpoint(struct target *target,
  61. struct breakpoint *breakpoint, uint8_t matchmode);
  62. static int cortex_a_set_context_breakpoint(struct target *target,
  63. struct breakpoint *breakpoint, uint8_t matchmode);
  64. static int cortex_a_set_hybrid_breakpoint(struct target *target,
  65. struct breakpoint *breakpoint);
  66. static int cortex_a_unset_breakpoint(struct target *target,
  67. struct breakpoint *breakpoint);
  68. static int cortex_a_dap_read_coreregister_u32(struct target *target,
  69. uint32_t *value, int regnum);
  70. static int cortex_a_dap_write_coreregister_u32(struct target *target,
  71. uint32_t value, int regnum);
  72. static int cortex_a_mmu(struct target *target, int *enabled);
  73. static int cortex_a_mmu_modify(struct target *target, int enable);
  74. static int cortex_a_virt2phys(struct target *target,
  75. uint32_t virt, uint32_t *phys);
  76. static int cortex_a_read_apb_ab_memory(struct target *target,
  77. uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  78. /* restore cp15_control_reg at resume */
  79. static int cortex_a_restore_cp15_control_reg(struct target *target)
  80. {
  81. int retval = ERROR_OK;
  82. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  83. struct armv7a_common *armv7a = target_to_armv7a(target);
  84. if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
  85. cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
  86. /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
  87. retval = armv7a->arm.mcr(target, 15,
  88. 0, 0, /* op1, op2 */
  89. 1, 0, /* CRn, CRm */
  90. cortex_a->cp15_control_reg);
  91. }
  92. return retval;
  93. }
  94. /*
  95. * Set up ARM core for memory access.
  96. * If !phys_access, switch to SVC mode and make sure MMU is on
  97. * If phys_access, switch off mmu
  98. */
  99. static int cortex_a_prep_memaccess(struct target *target, int phys_access)
  100. {
  101. struct armv7a_common *armv7a = target_to_armv7a(target);
  102. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  103. int mmu_enabled = 0;
  104. if (phys_access == 0) {
  105. dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
  106. cortex_a_mmu(target, &mmu_enabled);
  107. if (mmu_enabled)
  108. cortex_a_mmu_modify(target, 1);
  109. if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
  110. /* overwrite DACR to all-manager */
  111. armv7a->arm.mcr(target, 15,
  112. 0, 0, 3, 0,
  113. 0xFFFFFFFF);
  114. }
  115. } else {
  116. cortex_a_mmu(target, &mmu_enabled);
  117. if (mmu_enabled)
  118. cortex_a_mmu_modify(target, 0);
  119. }
  120. return ERROR_OK;
  121. }
  122. /*
  123. * Restore ARM core after memory access.
  124. * If !phys_access, switch to previous mode
  125. * If phys_access, restore MMU setting
  126. */
  127. static int cortex_a_post_memaccess(struct target *target, int phys_access)
  128. {
  129. struct armv7a_common *armv7a = target_to_armv7a(target);
  130. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  131. if (phys_access == 0) {
  132. if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
  133. /* restore */
  134. armv7a->arm.mcr(target, 15,
  135. 0, 0, 3, 0,
  136. cortex_a->cp15_dacr_reg);
  137. }
  138. dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  139. } else {
  140. int mmu_enabled = 0;
  141. cortex_a_mmu(target, &mmu_enabled);
  142. if (mmu_enabled)
  143. cortex_a_mmu_modify(target, 1);
  144. }
  145. return ERROR_OK;
  146. }
  147. /* modify cp15_control_reg in order to enable or disable mmu for :
  148. * - virt2phys address conversion
  149. * - read or write memory in phys or virt address */
  150. static int cortex_a_mmu_modify(struct target *target, int enable)
  151. {
  152. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  153. struct armv7a_common *armv7a = target_to_armv7a(target);
  154. int retval = ERROR_OK;
  155. int need_write = 0;
  156. if (enable) {
  157. /* if mmu enabled at target stop and mmu not enable */
  158. if (!(cortex_a->cp15_control_reg & 0x1U)) {
  159. LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
  160. return ERROR_FAIL;
  161. }
  162. if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
  163. cortex_a->cp15_control_reg_curr |= 0x1U;
  164. need_write = 1;
  165. }
  166. } else {
  167. if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
  168. cortex_a->cp15_control_reg_curr &= ~0x1U;
  169. need_write = 1;
  170. }
  171. }
  172. if (need_write) {
  173. LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
  174. enable ? "enable mmu" : "disable mmu",
  175. cortex_a->cp15_control_reg_curr);
  176. retval = armv7a->arm.mcr(target, 15,
  177. 0, 0, /* op1, op2 */
  178. 1, 0, /* CRn, CRm */
  179. cortex_a->cp15_control_reg_curr);
  180. }
  181. return retval;
  182. }
  183. /*
  184. * Cortex-A Basic debug access, very low level assumes state is saved
  185. */
  186. static int cortex_a8_init_debug_access(struct target *target)
  187. {
  188. struct armv7a_common *armv7a = target_to_armv7a(target);
  189. int retval;
  190. LOG_DEBUG(" ");
  191. /* Unlocking the debug registers for modification
  192. * The debugport might be uninitialised so try twice */
  193. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  194. armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  195. if (retval != ERROR_OK) {
  196. /* try again */
  197. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  198. armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
  199. if (retval == ERROR_OK)
  200. LOG_USER(
  201. "Locking debug access failed on first, but succeeded on second try.");
  202. }
  203. return retval;
  204. }
  205. /*
  206. * Cortex-A Basic debug access, very low level assumes state is saved
  207. */
  208. static int cortex_a_init_debug_access(struct target *target)
  209. {
  210. struct armv7a_common *armv7a = target_to_armv7a(target);
  211. int retval;
  212. uint32_t dbg_osreg;
  213. uint32_t cortex_part_num;
  214. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  215. LOG_DEBUG(" ");
  216. cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
  217. CORTEX_A_MIDR_PARTNUM_SHIFT;
  218. switch (cortex_part_num) {
  219. case CORTEX_A7_PARTNUM:
  220. case CORTEX_A15_PARTNUM:
  221. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  222. armv7a->debug_base + CPUDBG_OSLSR,
  223. &dbg_osreg);
  224. if (retval != ERROR_OK)
  225. return retval;
  226. LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
  227. if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
  228. /* Unlocking the DEBUG OS registers for modification */
  229. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  230. armv7a->debug_base + CPUDBG_OSLAR,
  231. 0);
  232. break;
  233. case CORTEX_A5_PARTNUM:
  234. case CORTEX_A8_PARTNUM:
  235. case CORTEX_A9_PARTNUM:
  236. default:
  237. retval = cortex_a8_init_debug_access(target);
  238. }
  239. if (retval != ERROR_OK)
  240. return retval;
  241. /* Clear Sticky Power Down status Bit in PRSR to enable access to
  242. the registers in the Core Power Domain */
  243. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  244. armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
  245. LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
  246. if (retval != ERROR_OK)
  247. return retval;
  248. /* Disable cacheline fills and force cache write-through in debug state */
  249. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  250. armv7a->debug_base + CPUDBG_DSCCR, 0);
  251. if (retval != ERROR_OK)
  252. return retval;
  253. /* Disable TLB lookup and refill/eviction in debug state */
  254. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  255. armv7a->debug_base + CPUDBG_DSMCR, 0);
  256. if (retval != ERROR_OK)
  257. return retval;
  258. /* Enabling of instruction execution in debug mode is done in debug_entry code */
  259. /* Resync breakpoint registers */
  260. /* Since this is likely called from init or reset, update target state information*/
  261. return cortex_a_poll(target);
  262. }
  263. static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
  264. {
  265. /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
  266. * Writes final value of DSCR into *dscr. Pass force to force always
  267. * reading DSCR at least once. */
  268. struct armv7a_common *armv7a = target_to_armv7a(target);
  269. int64_t then = timeval_ms();
  270. while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
  271. force = false;
  272. int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  273. armv7a->debug_base + CPUDBG_DSCR, dscr);
  274. if (retval != ERROR_OK) {
  275. LOG_ERROR("Could not read DSCR register");
  276. return retval;
  277. }
  278. if (timeval_ms() > then + 1000) {
  279. LOG_ERROR("Timeout waiting for InstrCompl=1");
  280. return ERROR_FAIL;
  281. }
  282. }
  283. return ERROR_OK;
  284. }
  285. /* To reduce needless round-trips, pass in a pointer to the current
  286. * DSCR value. Initialize it to zero if you just need to know the
  287. * value on return from this function; or DSCR_INSTR_COMP if you
  288. * happen to know that no instruction is pending.
  289. */
  290. static int cortex_a_exec_opcode(struct target *target,
  291. uint32_t opcode, uint32_t *dscr_p)
  292. {
  293. uint32_t dscr;
  294. int retval;
  295. struct armv7a_common *armv7a = target_to_armv7a(target);
  296. dscr = dscr_p ? *dscr_p : 0;
  297. LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
  298. /* Wait for InstrCompl bit to be set */
  299. retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
  300. if (retval != ERROR_OK)
  301. return retval;
  302. retval = mem_ap_write_u32(armv7a->debug_ap,
  303. armv7a->debug_base + CPUDBG_ITR, opcode);
  304. if (retval != ERROR_OK)
  305. return retval;
  306. int64_t then = timeval_ms();
  307. do {
  308. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  309. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  310. if (retval != ERROR_OK) {
  311. LOG_ERROR("Could not read DSCR register");
  312. return retval;
  313. }
  314. if (timeval_ms() > then + 1000) {
  315. LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
  316. return ERROR_FAIL;
  317. }
  318. } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
  319. if (dscr_p)
  320. *dscr_p = dscr;
  321. return retval;
  322. }
  323. /**************************************************************************
  324. Read core register with very few exec_opcode, fast but needs work_area.
  325. This can cause problems with MMU active.
  326. **************************************************************************/
  327. static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
  328. uint32_t *regfile)
  329. {
  330. int retval = ERROR_OK;
  331. struct armv7a_common *armv7a = target_to_armv7a(target);
  332. retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
  333. if (retval != ERROR_OK)
  334. return retval;
  335. retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
  336. if (retval != ERROR_OK)
  337. return retval;
  338. retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
  339. if (retval != ERROR_OK)
  340. return retval;
  341. retval = mem_ap_read_buf(armv7a->memory_ap,
  342. (uint8_t *)(&regfile[1]), 4, 15, address);
  343. return retval;
  344. }
  345. static int cortex_a_dap_read_coreregister_u32(struct target *target,
  346. uint32_t *value, int regnum)
  347. {
  348. int retval = ERROR_OK;
  349. uint8_t reg = regnum&0xFF;
  350. uint32_t dscr = 0;
  351. struct armv7a_common *armv7a = target_to_armv7a(target);
  352. if (reg > 17)
  353. return retval;
  354. if (reg < 15) {
  355. /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
  356. retval = cortex_a_exec_opcode(target,
  357. ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
  358. &dscr);
  359. if (retval != ERROR_OK)
  360. return retval;
  361. } else if (reg == 15) {
  362. /* "MOV r0, r15"; then move r0 to DCCTX */
  363. retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
  364. if (retval != ERROR_OK)
  365. return retval;
  366. retval = cortex_a_exec_opcode(target,
  367. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  368. &dscr);
  369. if (retval != ERROR_OK)
  370. return retval;
  371. } else {
  372. /* "MRS r0, CPSR" or "MRS r0, SPSR"
  373. * then move r0 to DCCTX
  374. */
  375. retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
  376. if (retval != ERROR_OK)
  377. return retval;
  378. retval = cortex_a_exec_opcode(target,
  379. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  380. &dscr);
  381. if (retval != ERROR_OK)
  382. return retval;
  383. }
  384. /* Wait for DTRRXfull then read DTRRTX */
  385. int64_t then = timeval_ms();
  386. while ((dscr & DSCR_DTR_TX_FULL) == 0) {
  387. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  388. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  389. if (retval != ERROR_OK)
  390. return retval;
  391. if (timeval_ms() > then + 1000) {
  392. LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
  393. return ERROR_FAIL;
  394. }
  395. }
  396. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  397. armv7a->debug_base + CPUDBG_DTRTX, value);
  398. LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
  399. return retval;
  400. }
  401. static int cortex_a_dap_write_coreregister_u32(struct target *target,
  402. uint32_t value, int regnum)
  403. {
  404. int retval = ERROR_OK;
  405. uint8_t Rd = regnum&0xFF;
  406. uint32_t dscr;
  407. struct armv7a_common *armv7a = target_to_armv7a(target);
  408. LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
  409. /* Check that DCCRX is not full */
  410. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  411. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  412. if (retval != ERROR_OK)
  413. return retval;
  414. if (dscr & DSCR_DTR_RX_FULL) {
  415. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  416. /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
  417. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  418. &dscr);
  419. if (retval != ERROR_OK)
  420. return retval;
  421. }
  422. if (Rd > 17)
  423. return retval;
  424. /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
  425. LOG_DEBUG("write DCC 0x%08" PRIx32, value);
  426. retval = mem_ap_write_u32(armv7a->debug_ap,
  427. armv7a->debug_base + CPUDBG_DTRRX, value);
  428. if (retval != ERROR_OK)
  429. return retval;
  430. if (Rd < 15) {
  431. /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
  432. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
  433. &dscr);
  434. if (retval != ERROR_OK)
  435. return retval;
  436. } else if (Rd == 15) {
  437. /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
  438. * then "mov r15, r0"
  439. */
  440. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  441. &dscr);
  442. if (retval != ERROR_OK)
  443. return retval;
  444. retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
  445. if (retval != ERROR_OK)
  446. return retval;
  447. } else {
  448. /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
  449. * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
  450. */
  451. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  452. &dscr);
  453. if (retval != ERROR_OK)
  454. return retval;
  455. retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
  456. &dscr);
  457. if (retval != ERROR_OK)
  458. return retval;
  459. /* "Prefetch flush" after modifying execution status in CPSR */
  460. if (Rd == 16) {
  461. retval = cortex_a_exec_opcode(target,
  462. ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
  463. &dscr);
  464. if (retval != ERROR_OK)
  465. return retval;
  466. }
  467. }
  468. return retval;
  469. }
  470. /* Write to memory mapped registers directly with no cache or mmu handling */
  471. static int cortex_a_dap_write_memap_register_u32(struct target *target,
  472. uint32_t address,
  473. uint32_t value)
  474. {
  475. int retval;
  476. struct armv7a_common *armv7a = target_to_armv7a(target);
  477. retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
  478. return retval;
  479. }
  480. /*
  481. * Cortex-A implementation of Debug Programmer's Model
  482. *
  483. * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
  484. * so there's no need to poll for it before executing an instruction.
  485. *
  486. * NOTE that in several of these cases the "stall" mode might be useful.
  487. * It'd let us queue a few operations together... prepare/finish might
  488. * be the places to enable/disable that mode.
  489. */
  490. static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
  491. {
  492. return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
  493. }
  494. static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
  495. {
  496. LOG_DEBUG("write DCC 0x%08" PRIx32, data);
  497. return mem_ap_write_u32(a->armv7a_common.debug_ap,
  498. a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
  499. }
  500. static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
  501. uint32_t *dscr_p)
  502. {
  503. uint32_t dscr = DSCR_INSTR_COMP;
  504. int retval;
  505. if (dscr_p)
  506. dscr = *dscr_p;
  507. /* Wait for DTRRXfull */
  508. int64_t then = timeval_ms();
  509. while ((dscr & DSCR_DTR_TX_FULL) == 0) {
  510. retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
  511. a->armv7a_common.debug_base + CPUDBG_DSCR,
  512. &dscr);
  513. if (retval != ERROR_OK)
  514. return retval;
  515. if (timeval_ms() > then + 1000) {
  516. LOG_ERROR("Timeout waiting for read dcc");
  517. return ERROR_FAIL;
  518. }
  519. }
  520. retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
  521. a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
  522. if (retval != ERROR_OK)
  523. return retval;
  524. /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
  525. if (dscr_p)
  526. *dscr_p = dscr;
  527. return retval;
  528. }
  529. static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
  530. {
  531. struct cortex_a_common *a = dpm_to_a(dpm);
  532. uint32_t dscr;
  533. int retval;
  534. /* set up invariant: INSTR_COMP is set after ever DPM operation */
  535. int64_t then = timeval_ms();
  536. for (;; ) {
  537. retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
  538. a->armv7a_common.debug_base + CPUDBG_DSCR,
  539. &dscr);
  540. if (retval != ERROR_OK)
  541. return retval;
  542. if ((dscr & DSCR_INSTR_COMP) != 0)
  543. break;
  544. if (timeval_ms() > then + 1000) {
  545. LOG_ERROR("Timeout waiting for dpm prepare");
  546. return ERROR_FAIL;
  547. }
  548. }
  549. /* this "should never happen" ... */
  550. if (dscr & DSCR_DTR_RX_FULL) {
  551. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  552. /* Clear DCCRX */
  553. retval = cortex_a_exec_opcode(
  554. a->armv7a_common.arm.target,
  555. ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  556. &dscr);
  557. if (retval != ERROR_OK)
  558. return retval;
  559. }
  560. return retval;
  561. }
  562. static int cortex_a_dpm_finish(struct arm_dpm *dpm)
  563. {
  564. /* REVISIT what could be done here? */
  565. return ERROR_OK;
  566. }
  567. static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
  568. uint32_t opcode, uint32_t data)
  569. {
  570. struct cortex_a_common *a = dpm_to_a(dpm);
  571. int retval;
  572. uint32_t dscr = DSCR_INSTR_COMP;
  573. retval = cortex_a_write_dcc(a, data);
  574. if (retval != ERROR_OK)
  575. return retval;
  576. return cortex_a_exec_opcode(
  577. a->armv7a_common.arm.target,
  578. opcode,
  579. &dscr);
  580. }
  581. static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
  582. uint32_t opcode, uint32_t data)
  583. {
  584. struct cortex_a_common *a = dpm_to_a(dpm);
  585. uint32_t dscr = DSCR_INSTR_COMP;
  586. int retval;
  587. retval = cortex_a_write_dcc(a, data);
  588. if (retval != ERROR_OK)
  589. return retval;
  590. /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
  591. retval = cortex_a_exec_opcode(
  592. a->armv7a_common.arm.target,
  593. ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  594. &dscr);
  595. if (retval != ERROR_OK)
  596. return retval;
  597. /* then the opcode, taking data from R0 */
  598. retval = cortex_a_exec_opcode(
  599. a->armv7a_common.arm.target,
  600. opcode,
  601. &dscr);
  602. return retval;
  603. }
  604. static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
  605. {
  606. struct target *target = dpm->arm->target;
  607. uint32_t dscr = DSCR_INSTR_COMP;
  608. /* "Prefetch flush" after modifying execution status in CPSR */
  609. return cortex_a_exec_opcode(target,
  610. ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
  611. &dscr);
  612. }
  613. static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
  614. uint32_t opcode, uint32_t *data)
  615. {
  616. struct cortex_a_common *a = dpm_to_a(dpm);
  617. int retval;
  618. uint32_t dscr = DSCR_INSTR_COMP;
  619. /* the opcode, writing data to DCC */
  620. retval = cortex_a_exec_opcode(
  621. a->armv7a_common.arm.target,
  622. opcode,
  623. &dscr);
  624. if (retval != ERROR_OK)
  625. return retval;
  626. return cortex_a_read_dcc(a, data, &dscr);
  627. }
  628. static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
  629. uint32_t opcode, uint32_t *data)
  630. {
  631. struct cortex_a_common *a = dpm_to_a(dpm);
  632. uint32_t dscr = DSCR_INSTR_COMP;
  633. int retval;
  634. /* the opcode, writing data to R0 */
  635. retval = cortex_a_exec_opcode(
  636. a->armv7a_common.arm.target,
  637. opcode,
  638. &dscr);
  639. if (retval != ERROR_OK)
  640. return retval;
  641. /* write R0 to DCC */
  642. retval = cortex_a_exec_opcode(
  643. a->armv7a_common.arm.target,
  644. ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
  645. &dscr);
  646. if (retval != ERROR_OK)
  647. return retval;
  648. return cortex_a_read_dcc(a, data, &dscr);
  649. }
  650. static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
  651. uint32_t addr, uint32_t control)
  652. {
  653. struct cortex_a_common *a = dpm_to_a(dpm);
  654. uint32_t vr = a->armv7a_common.debug_base;
  655. uint32_t cr = a->armv7a_common.debug_base;
  656. int retval;
  657. switch (index_t) {
  658. case 0 ... 15: /* breakpoints */
  659. vr += CPUDBG_BVR_BASE;
  660. cr += CPUDBG_BCR_BASE;
  661. break;
  662. case 16 ... 31: /* watchpoints */
  663. vr += CPUDBG_WVR_BASE;
  664. cr += CPUDBG_WCR_BASE;
  665. index_t -= 16;
  666. break;
  667. default:
  668. return ERROR_FAIL;
  669. }
  670. vr += 4 * index_t;
  671. cr += 4 * index_t;
  672. LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
  673. (unsigned) vr, (unsigned) cr);
  674. retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
  675. vr, addr);
  676. if (retval != ERROR_OK)
  677. return retval;
  678. retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
  679. cr, control);
  680. return retval;
  681. }
  682. static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
  683. {
  684. struct cortex_a_common *a = dpm_to_a(dpm);
  685. uint32_t cr;
  686. switch (index_t) {
  687. case 0 ... 15:
  688. cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
  689. break;
  690. case 16 ... 31:
  691. cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
  692. index_t -= 16;
  693. break;
  694. default:
  695. return ERROR_FAIL;
  696. }
  697. cr += 4 * index_t;
  698. LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
  699. /* clear control register */
  700. return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
  701. }
  702. static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
  703. {
  704. struct arm_dpm *dpm = &a->armv7a_common.dpm;
  705. int retval;
  706. dpm->arm = &a->armv7a_common.arm;
  707. dpm->didr = didr;
  708. dpm->prepare = cortex_a_dpm_prepare;
  709. dpm->finish = cortex_a_dpm_finish;
  710. dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
  711. dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
  712. dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
  713. dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
  714. dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
  715. dpm->bpwp_enable = cortex_a_bpwp_enable;
  716. dpm->bpwp_disable = cortex_a_bpwp_disable;
  717. retval = arm_dpm_setup(dpm);
  718. if (retval == ERROR_OK)
  719. retval = arm_dpm_initialize(dpm);
  720. return retval;
  721. }
  722. static struct target *get_cortex_a(struct target *target, int32_t coreid)
  723. {
  724. struct target_list *head;
  725. struct target *curr;
  726. head = target->head;
  727. while (head != (struct target_list *)NULL) {
  728. curr = head->target;
  729. if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
  730. return curr;
  731. head = head->next;
  732. }
  733. return target;
  734. }
  735. static int cortex_a_halt(struct target *target);
  736. static int cortex_a_halt_smp(struct target *target)
  737. {
  738. int retval = 0;
  739. struct target_list *head;
  740. struct target *curr;
  741. head = target->head;
  742. while (head != (struct target_list *)NULL) {
  743. curr = head->target;
  744. if ((curr != target) && (curr->state != TARGET_HALTED))
  745. retval += cortex_a_halt(curr);
  746. head = head->next;
  747. }
  748. return retval;
  749. }
  750. static int update_halt_gdb(struct target *target)
  751. {
  752. int retval = 0;
  753. if (target->gdb_service && target->gdb_service->core[0] == -1) {
  754. target->gdb_service->target = target;
  755. target->gdb_service->core[0] = target->coreid;
  756. retval += cortex_a_halt_smp(target);
  757. }
  758. return retval;
  759. }
  760. /*
  761. * Cortex-A Run control
  762. */
  763. static int cortex_a_poll(struct target *target)
  764. {
  765. int retval = ERROR_OK;
  766. uint32_t dscr;
  767. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  768. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  769. enum target_state prev_target_state = target->state;
  770. /* toggle to another core is done by gdb as follow */
  771. /* maint packet J core_id */
  772. /* continue */
  773. /* the next polling trigger an halt event sent to gdb */
  774. if ((target->state == TARGET_HALTED) && (target->smp) &&
  775. (target->gdb_service) &&
  776. (target->gdb_service->target == NULL)) {
  777. target->gdb_service->target =
  778. get_cortex_a(target, target->gdb_service->core[1]);
  779. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  780. return retval;
  781. }
  782. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  783. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  784. if (retval != ERROR_OK)
  785. return retval;
  786. cortex_a->cpudbg_dscr = dscr;
  787. if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
  788. if (prev_target_state != TARGET_HALTED) {
  789. /* We have a halting debug event */
  790. LOG_DEBUG("Target halted");
  791. target->state = TARGET_HALTED;
  792. if ((prev_target_state == TARGET_RUNNING)
  793. || (prev_target_state == TARGET_UNKNOWN)
  794. || (prev_target_state == TARGET_RESET)) {
  795. retval = cortex_a_debug_entry(target);
  796. if (retval != ERROR_OK)
  797. return retval;
  798. if (target->smp) {
  799. retval = update_halt_gdb(target);
  800. if (retval != ERROR_OK)
  801. return retval;
  802. }
  803. target_call_event_callbacks(target,
  804. TARGET_EVENT_HALTED);
  805. }
  806. if (prev_target_state == TARGET_DEBUG_RUNNING) {
  807. LOG_DEBUG(" ");
  808. retval = cortex_a_debug_entry(target);
  809. if (retval != ERROR_OK)
  810. return retval;
  811. if (target->smp) {
  812. retval = update_halt_gdb(target);
  813. if (retval != ERROR_OK)
  814. return retval;
  815. }
  816. target_call_event_callbacks(target,
  817. TARGET_EVENT_DEBUG_HALTED);
  818. }
  819. }
  820. } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
  821. target->state = TARGET_RUNNING;
  822. else {
  823. LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
  824. target->state = TARGET_UNKNOWN;
  825. }
  826. return retval;
  827. }
  828. static int cortex_a_halt(struct target *target)
  829. {
  830. int retval = ERROR_OK;
  831. uint32_t dscr;
  832. struct armv7a_common *armv7a = target_to_armv7a(target);
  833. /*
  834. * Tell the core to be halted by writing DRCR with 0x1
  835. * and then wait for the core to be halted.
  836. */
  837. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  838. armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
  839. if (retval != ERROR_OK)
  840. return retval;
  841. /*
  842. * enter halting debug mode
  843. */
  844. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  845. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  846. if (retval != ERROR_OK)
  847. return retval;
  848. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  849. armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
  850. if (retval != ERROR_OK)
  851. return retval;
  852. int64_t then = timeval_ms();
  853. for (;; ) {
  854. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  855. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  856. if (retval != ERROR_OK)
  857. return retval;
  858. if ((dscr & DSCR_CORE_HALTED) != 0)
  859. break;
  860. if (timeval_ms() > then + 1000) {
  861. LOG_ERROR("Timeout waiting for halt");
  862. return ERROR_FAIL;
  863. }
  864. }
  865. target->debug_reason = DBG_REASON_DBGRQ;
  866. return ERROR_OK;
  867. }
  868. static int cortex_a_internal_restore(struct target *target, int current,
  869. uint32_t *address, int handle_breakpoints, int debug_execution)
  870. {
  871. struct armv7a_common *armv7a = target_to_armv7a(target);
  872. struct arm *arm = &armv7a->arm;
  873. int retval;
  874. uint32_t resume_pc;
  875. if (!debug_execution)
  876. target_free_all_working_areas(target);
  877. #if 0
  878. if (debug_execution) {
  879. /* Disable interrupts */
  880. /* We disable interrupts in the PRIMASK register instead of
  881. * masking with C_MASKINTS,
  882. * This is probably the same issue as Cortex-M3 Errata 377493:
  883. * C_MASKINTS in parallel with disabled interrupts can cause
  884. * local faults to not be taken. */
  885. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
  886. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
  887. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
  888. /* Make sure we are in Thumb mode */
  889. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
  890. buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
  891. 32) | (1 << 24));
  892. armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
  893. armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
  894. }
  895. #endif
  896. /* current = 1: continue on current pc, otherwise continue at <address> */
  897. resume_pc = buf_get_u32(arm->pc->value, 0, 32);
  898. if (!current)
  899. resume_pc = *address;
  900. else
  901. *address = resume_pc;
  902. /* Make sure that the Armv7 gdb thumb fixups does not
  903. * kill the return address
  904. */
  905. switch (arm->core_state) {
  906. case ARM_STATE_ARM:
  907. resume_pc &= 0xFFFFFFFC;
  908. break;
  909. case ARM_STATE_THUMB:
  910. case ARM_STATE_THUMB_EE:
  911. /* When the return address is loaded into PC
  912. * bit 0 must be 1 to stay in Thumb state
  913. */
  914. resume_pc |= 0x1;
  915. break;
  916. case ARM_STATE_JAZELLE:
  917. LOG_ERROR("How do I resume into Jazelle state??");
  918. return ERROR_FAIL;
  919. }
  920. LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
  921. buf_set_u32(arm->pc->value, 0, 32, resume_pc);
  922. arm->pc->dirty = 1;
  923. arm->pc->valid = 1;
  924. /* restore dpm_mode at system halt */
  925. dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  926. /* called it now before restoring context because it uses cpu
  927. * register r0 for restoring cp15 control register */
  928. retval = cortex_a_restore_cp15_control_reg(target);
  929. if (retval != ERROR_OK)
  930. return retval;
  931. retval = cortex_a_restore_context(target, handle_breakpoints);
  932. if (retval != ERROR_OK)
  933. return retval;
  934. target->debug_reason = DBG_REASON_NOTHALTED;
  935. target->state = TARGET_RUNNING;
  936. /* registers are now invalid */
  937. register_cache_invalidate(arm->core_cache);
  938. #if 0
  939. /* the front-end may request us not to handle breakpoints */
  940. if (handle_breakpoints) {
  941. /* Single step past breakpoint at current address */
  942. breakpoint = breakpoint_find(target, resume_pc);
  943. if (breakpoint) {
  944. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  945. cortex_m3_unset_breakpoint(target, breakpoint);
  946. cortex_m3_single_step_core(target);
  947. cortex_m3_set_breakpoint(target, breakpoint);
  948. }
  949. }
  950. #endif
  951. return retval;
  952. }
  953. static int cortex_a_internal_restart(struct target *target)
  954. {
  955. struct armv7a_common *armv7a = target_to_armv7a(target);
  956. struct arm *arm = &armv7a->arm;
  957. int retval;
  958. uint32_t dscr;
  959. /*
  960. * * Restart core and wait for it to be started. Clear ITRen and sticky
  961. * * exception flags: see ARMv7 ARM, C5.9.
  962. *
  963. * REVISIT: for single stepping, we probably want to
  964. * disable IRQs by default, with optional override...
  965. */
  966. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  967. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  968. if (retval != ERROR_OK)
  969. return retval;
  970. if ((dscr & DSCR_INSTR_COMP) == 0)
  971. LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
  972. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  973. armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
  974. if (retval != ERROR_OK)
  975. return retval;
  976. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  977. armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
  978. DRCR_CLEAR_EXCEPTIONS);
  979. if (retval != ERROR_OK)
  980. return retval;
  981. int64_t then = timeval_ms();
  982. for (;; ) {
  983. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  984. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  985. if (retval != ERROR_OK)
  986. return retval;
  987. if ((dscr & DSCR_CORE_RESTARTED) != 0)
  988. break;
  989. if (timeval_ms() > then + 1000) {
  990. LOG_ERROR("Timeout waiting for resume");
  991. return ERROR_FAIL;
  992. }
  993. }
  994. target->debug_reason = DBG_REASON_NOTHALTED;
  995. target->state = TARGET_RUNNING;
  996. /* registers are now invalid */
  997. register_cache_invalidate(arm->core_cache);
  998. return ERROR_OK;
  999. }
  1000. static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
  1001. {
  1002. int retval = 0;
  1003. struct target_list *head;
  1004. struct target *curr;
  1005. uint32_t address;
  1006. head = target->head;
  1007. while (head != (struct target_list *)NULL) {
  1008. curr = head->target;
  1009. if ((curr != target) && (curr->state != TARGET_RUNNING)) {
  1010. /* resume current address , not in step mode */
  1011. retval += cortex_a_internal_restore(curr, 1, &address,
  1012. handle_breakpoints, 0);
  1013. retval += cortex_a_internal_restart(curr);
  1014. }
  1015. head = head->next;
  1016. }
  1017. return retval;
  1018. }
  1019. static int cortex_a_resume(struct target *target, int current,
  1020. uint32_t address, int handle_breakpoints, int debug_execution)
  1021. {
  1022. int retval = 0;
  1023. /* dummy resume for smp toggle in order to reduce gdb impact */
  1024. if ((target->smp) && (target->gdb_service->core[1] != -1)) {
  1025. /* simulate a start and halt of target */
  1026. target->gdb_service->target = NULL;
  1027. target->gdb_service->core[0] = target->gdb_service->core[1];
  1028. /* fake resume at next poll we play the target core[1], see poll*/
  1029. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1030. return 0;
  1031. }
  1032. cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
  1033. if (target->smp) {
  1034. target->gdb_service->core[0] = -1;
  1035. retval = cortex_a_restore_smp(target, handle_breakpoints);
  1036. if (retval != ERROR_OK)
  1037. return retval;
  1038. }
  1039. cortex_a_internal_restart(target);
  1040. if (!debug_execution) {
  1041. target->state = TARGET_RUNNING;
  1042. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  1043. LOG_DEBUG("target resumed at 0x%" PRIx32, address);
  1044. } else {
  1045. target->state = TARGET_DEBUG_RUNNING;
  1046. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  1047. LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
  1048. }
  1049. return ERROR_OK;
  1050. }
  1051. static int cortex_a_debug_entry(struct target *target)
  1052. {
  1053. int i;
  1054. uint32_t regfile[16], cpsr, dscr;
  1055. int retval = ERROR_OK;
  1056. struct working_area *regfile_working_area = NULL;
  1057. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1058. struct armv7a_common *armv7a = target_to_armv7a(target);
  1059. struct arm *arm = &armv7a->arm;
  1060. struct reg *reg;
  1061. LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
  1062. /* REVISIT surely we should not re-read DSCR !! */
  1063. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1064. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  1065. if (retval != ERROR_OK)
  1066. return retval;
  1067. /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
  1068. * imprecise data aborts get discarded by issuing a Data
  1069. * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
  1070. */
  1071. /* Enable the ITR execution once we are in debug mode */
  1072. dscr |= DSCR_ITR_EN;
  1073. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1074. armv7a->debug_base + CPUDBG_DSCR, dscr);
  1075. if (retval != ERROR_OK)
  1076. return retval;
  1077. /* Examine debug reason */
  1078. arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
  1079. /* save address of instruction that triggered the watchpoint? */
  1080. if (target->debug_reason == DBG_REASON_WATCHPOINT) {
  1081. uint32_t wfar;
  1082. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1083. armv7a->debug_base + CPUDBG_WFAR,
  1084. &wfar);
  1085. if (retval != ERROR_OK)
  1086. return retval;
  1087. arm_dpm_report_wfar(&armv7a->dpm, wfar);
  1088. }
  1089. /* REVISIT fast_reg_read is never set ... */
  1090. /* Examine target state and mode */
  1091. if (cortex_a->fast_reg_read)
  1092. target_alloc_working_area(target, 64, &regfile_working_area);
  1093. /* First load register acessible through core debug port*/
  1094. if (!regfile_working_area)
  1095. retval = arm_dpm_read_current_registers(&armv7a->dpm);
  1096. else {
  1097. retval = cortex_a_read_regs_through_mem(target,
  1098. regfile_working_area->address, regfile);
  1099. target_free_working_area(target, regfile_working_area);
  1100. if (retval != ERROR_OK)
  1101. return retval;
  1102. /* read Current PSR */
  1103. retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
  1104. /* store current cpsr */
  1105. if (retval != ERROR_OK)
  1106. return retval;
  1107. LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
  1108. arm_set_cpsr(arm, cpsr);
  1109. /* update cache */
  1110. for (i = 0; i <= ARM_PC; i++) {
  1111. reg = arm_reg_current(arm, i);
  1112. buf_set_u32(reg->value, 0, 32, regfile[i]);
  1113. reg->valid = 1;
  1114. reg->dirty = 0;
  1115. }
  1116. /* Fixup PC Resume Address */
  1117. if (cpsr & (1 << 5)) {
  1118. /* T bit set for Thumb or ThumbEE state */
  1119. regfile[ARM_PC] -= 4;
  1120. } else {
  1121. /* ARM state */
  1122. regfile[ARM_PC] -= 8;
  1123. }
  1124. reg = arm->pc;
  1125. buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
  1126. reg->dirty = reg->valid;
  1127. }
  1128. #if 0
  1129. /* TODO, Move this */
  1130. uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
  1131. cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
  1132. LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
  1133. cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
  1134. LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
  1135. cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
  1136. LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
  1137. #endif
  1138. /* Are we in an exception handler */
  1139. /* armv4_5->exception_number = 0; */
  1140. if (armv7a->post_debug_entry) {
  1141. retval = armv7a->post_debug_entry(target);
  1142. if (retval != ERROR_OK)
  1143. return retval;
  1144. }
  1145. return retval;
  1146. }
  1147. static int cortex_a_post_debug_entry(struct target *target)
  1148. {
  1149. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1150. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1151. int retval;
  1152. /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
  1153. retval = armv7a->arm.mrc(target, 15,
  1154. 0, 0, /* op1, op2 */
  1155. 1, 0, /* CRn, CRm */
  1156. &cortex_a->cp15_control_reg);
  1157. if (retval != ERROR_OK)
  1158. return retval;
  1159. LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
  1160. cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
  1161. if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
  1162. armv7a_identify_cache(target);
  1163. if (armv7a->is_armv7r) {
  1164. armv7a->armv7a_mmu.mmu_enabled = 0;
  1165. } else {
  1166. armv7a->armv7a_mmu.mmu_enabled =
  1167. (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
  1168. }
  1169. armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
  1170. (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
  1171. armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
  1172. (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
  1173. cortex_a->curr_mode = armv7a->arm.core_mode;
  1174. /* switch to SVC mode to read DACR */
  1175. dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
  1176. armv7a->arm.mrc(target, 15,
  1177. 0, 0, 3, 0,
  1178. &cortex_a->cp15_dacr_reg);
  1179. LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
  1180. cortex_a->cp15_dacr_reg);
  1181. dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  1182. return ERROR_OK;
  1183. }
  1184. int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
  1185. {
  1186. struct armv7a_common *armv7a = target_to_armv7a(target);
  1187. uint32_t dscr;
  1188. /* Read DSCR */
  1189. int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1190. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  1191. if (ERROR_OK != retval)
  1192. return retval;
  1193. /* clear bitfield */
  1194. dscr &= ~bit_mask;
  1195. /* put new value */
  1196. dscr |= value & bit_mask;
  1197. /* write new DSCR */
  1198. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1199. armv7a->debug_base + CPUDBG_DSCR, dscr);
  1200. return retval;
  1201. }
  1202. static int cortex_a_step(struct target *target, int current, uint32_t address,
  1203. int handle_breakpoints)
  1204. {
  1205. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1206. struct armv7a_common *armv7a = target_to_armv7a(target);
  1207. struct arm *arm = &armv7a->arm;
  1208. struct breakpoint *breakpoint = NULL;
  1209. struct breakpoint stepbreakpoint;
  1210. struct reg *r;
  1211. int retval;
  1212. if (target->state != TARGET_HALTED) {
  1213. LOG_WARNING("target not halted");
  1214. return ERROR_TARGET_NOT_HALTED;
  1215. }
  1216. /* current = 1: continue on current pc, otherwise continue at <address> */
  1217. r = arm->pc;
  1218. if (!current)
  1219. buf_set_u32(r->value, 0, 32, address);
  1220. else
  1221. address = buf_get_u32(r->value, 0, 32);
  1222. /* The front-end may request us not to handle breakpoints.
  1223. * But since Cortex-A uses breakpoint for single step,
  1224. * we MUST handle breakpoints.
  1225. */
  1226. handle_breakpoints = 1;
  1227. if (handle_breakpoints) {
  1228. breakpoint = breakpoint_find(target, address);
  1229. if (breakpoint)
  1230. cortex_a_unset_breakpoint(target, breakpoint);
  1231. }
  1232. /* Setup single step breakpoint */
  1233. stepbreakpoint.address = address;
  1234. stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
  1235. ? 2 : 4;
  1236. stepbreakpoint.type = BKPT_HARD;
  1237. stepbreakpoint.set = 0;
  1238. /* Disable interrupts during single step if requested */
  1239. if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
  1240. retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
  1241. if (ERROR_OK != retval)
  1242. return retval;
  1243. }
  1244. /* Break on IVA mismatch */
  1245. cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
  1246. target->debug_reason = DBG_REASON_SINGLESTEP;
  1247. retval = cortex_a_resume(target, 1, address, 0, 0);
  1248. if (retval != ERROR_OK)
  1249. return retval;
  1250. int64_t then = timeval_ms();
  1251. while (target->state != TARGET_HALTED) {
  1252. retval = cortex_a_poll(target);
  1253. if (retval != ERROR_OK)
  1254. return retval;
  1255. if (timeval_ms() > then + 1000) {
  1256. LOG_ERROR("timeout waiting for target halt");
  1257. return ERROR_FAIL;
  1258. }
  1259. }
  1260. cortex_a_unset_breakpoint(target, &stepbreakpoint);
  1261. /* Re-enable interrupts if they were disabled */
  1262. if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
  1263. retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
  1264. if (ERROR_OK != retval)
  1265. return retval;
  1266. }
  1267. target->debug_reason = DBG_REASON_BREAKPOINT;
  1268. if (breakpoint)
  1269. cortex_a_set_breakpoint(target, breakpoint, 0);
  1270. if (target->state != TARGET_HALTED)
  1271. LOG_DEBUG("target stepped");
  1272. return ERROR_OK;
  1273. }
  1274. static int cortex_a_restore_context(struct target *target, bool bpwp)
  1275. {
  1276. struct armv7a_common *armv7a = target_to_armv7a(target);
  1277. LOG_DEBUG(" ");
  1278. if (armv7a->pre_restore_context)
  1279. armv7a->pre_restore_context(target);
  1280. return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
  1281. }
  1282. /*
  1283. * Cortex-A Breakpoint and watchpoint functions
  1284. */
  1285. /* Setup hardware Breakpoint Register Pair */
  1286. static int cortex_a_set_breakpoint(struct target *target,
  1287. struct breakpoint *breakpoint, uint8_t matchmode)
  1288. {
  1289. int retval;
  1290. int brp_i = 0;
  1291. uint32_t control;
  1292. uint8_t byte_addr_select = 0x0F;
  1293. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1294. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1295. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1296. if (breakpoint->set) {
  1297. LOG_WARNING("breakpoint already set");
  1298. return ERROR_OK;
  1299. }
  1300. if (breakpoint->type == BKPT_HARD) {
  1301. while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
  1302. brp_i++;
  1303. if (brp_i >= cortex_a->brp_num) {
  1304. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1305. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1306. }
  1307. breakpoint->set = brp_i + 1;
  1308. if (breakpoint->length == 2)
  1309. byte_addr_select = (3 << (breakpoint->address & 0x02));
  1310. control = ((matchmode & 0x7) << 20)
  1311. | (byte_addr_select << 5)
  1312. | (3 << 1) | 1;
  1313. brp_list[brp_i].used = 1;
  1314. brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
  1315. brp_list[brp_i].control = control;
  1316. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1317. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  1318. brp_list[brp_i].value);
  1319. if (retval != ERROR_OK)
  1320. return retval;
  1321. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1322. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  1323. brp_list[brp_i].control);
  1324. if (retval != ERROR_OK)
  1325. return retval;
  1326. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1327. brp_list[brp_i].control,
  1328. brp_list[brp_i].value);
  1329. } else if (breakpoint->type == BKPT_SOFT) {
  1330. uint8_t code[4];
  1331. if (breakpoint->length == 2)
  1332. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1333. else
  1334. buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
  1335. retval = target_read_memory(target,
  1336. breakpoint->address & 0xFFFFFFFE,
  1337. breakpoint->length, 1,
  1338. breakpoint->orig_instr);
  1339. if (retval != ERROR_OK)
  1340. return retval;
  1341. /* make sure data cache is cleaned & invalidated down to PoC */
  1342. if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
  1343. armv7a_cache_flush_virt(target, breakpoint->address,
  1344. breakpoint->length);
  1345. }
  1346. retval = target_write_memory(target,
  1347. breakpoint->address & 0xFFFFFFFE,
  1348. breakpoint->length, 1, code);
  1349. if (retval != ERROR_OK)
  1350. return retval;
  1351. /* update i-cache at breakpoint location */
  1352. armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
  1353. breakpoint->length);
  1354. armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
  1355. breakpoint->length);
  1356. breakpoint->set = 0x11; /* Any nice value but 0 */
  1357. }
  1358. return ERROR_OK;
  1359. }
  1360. static int cortex_a_set_context_breakpoint(struct target *target,
  1361. struct breakpoint *breakpoint, uint8_t matchmode)
  1362. {
  1363. int retval = ERROR_FAIL;
  1364. int brp_i = 0;
  1365. uint32_t control;
  1366. uint8_t byte_addr_select = 0x0F;
  1367. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1368. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1369. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1370. if (breakpoint->set) {
  1371. LOG_WARNING("breakpoint already set");
  1372. return retval;
  1373. }
  1374. /*check available context BRPs*/
  1375. while ((brp_list[brp_i].used ||
  1376. (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
  1377. brp_i++;
  1378. if (brp_i >= cortex_a->brp_num) {
  1379. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1380. return ERROR_FAIL;
  1381. }
  1382. breakpoint->set = brp_i + 1;
  1383. control = ((matchmode & 0x7) << 20)
  1384. | (byte_addr_select << 5)
  1385. | (3 << 1) | 1;
  1386. brp_list[brp_i].used = 1;
  1387. brp_list[brp_i].value = (breakpoint->asid);
  1388. brp_list[brp_i].control = control;
  1389. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1390. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  1391. brp_list[brp_i].value);
  1392. if (retval != ERROR_OK)
  1393. return retval;
  1394. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1395. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  1396. brp_list[brp_i].control);
  1397. if (retval != ERROR_OK)
  1398. return retval;
  1399. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1400. brp_list[brp_i].control,
  1401. brp_list[brp_i].value);
  1402. return ERROR_OK;
  1403. }
  1404. static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1405. {
  1406. int retval = ERROR_FAIL;
  1407. int brp_1 = 0; /* holds the contextID pair */
  1408. int brp_2 = 0; /* holds the IVA pair */
  1409. uint32_t control_CTX, control_IVA;
  1410. uint8_t CTX_byte_addr_select = 0x0F;
  1411. uint8_t IVA_byte_addr_select = 0x0F;
  1412. uint8_t CTX_machmode = 0x03;
  1413. uint8_t IVA_machmode = 0x01;
  1414. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1415. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1416. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1417. if (breakpoint->set) {
  1418. LOG_WARNING("breakpoint already set");
  1419. return retval;
  1420. }
  1421. /*check available context BRPs*/
  1422. while ((brp_list[brp_1].used ||
  1423. (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
  1424. brp_1++;
  1425. printf("brp(CTX) found num: %d\n", brp_1);
  1426. if (brp_1 >= cortex_a->brp_num) {
  1427. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1428. return ERROR_FAIL;
  1429. }
  1430. while ((brp_list[brp_2].used ||
  1431. (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
  1432. brp_2++;
  1433. printf("brp(IVA) found num: %d\n", brp_2);
  1434. if (brp_2 >= cortex_a->brp_num) {
  1435. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1436. return ERROR_FAIL;
  1437. }
  1438. breakpoint->set = brp_1 + 1;
  1439. breakpoint->linked_BRP = brp_2;
  1440. control_CTX = ((CTX_machmode & 0x7) << 20)
  1441. | (brp_2 << 16)
  1442. | (0 << 14)
  1443. | (CTX_byte_addr_select << 5)
  1444. | (3 << 1) | 1;
  1445. brp_list[brp_1].used = 1;
  1446. brp_list[brp_1].value = (breakpoint->asid);
  1447. brp_list[brp_1].control = control_CTX;
  1448. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1449. + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
  1450. brp_list[brp_1].value);
  1451. if (retval != ERROR_OK)
  1452. return retval;
  1453. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1454. + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
  1455. brp_list[brp_1].control);
  1456. if (retval != ERROR_OK)
  1457. return retval;
  1458. control_IVA = ((IVA_machmode & 0x7) << 20)
  1459. | (brp_1 << 16)
  1460. | (IVA_byte_addr_select << 5)
  1461. | (3 << 1) | 1;
  1462. brp_list[brp_2].used = 1;
  1463. brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
  1464. brp_list[brp_2].control = control_IVA;
  1465. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1466. + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
  1467. brp_list[brp_2].value);
  1468. if (retval != ERROR_OK)
  1469. return retval;
  1470. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1471. + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
  1472. brp_list[brp_2].control);
  1473. if (retval != ERROR_OK)
  1474. return retval;
  1475. return ERROR_OK;
  1476. }
  1477. static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1478. {
  1479. int retval;
  1480. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1481. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1482. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1483. if (!breakpoint->set) {
  1484. LOG_WARNING("breakpoint not set");
  1485. return ERROR_OK;
  1486. }
  1487. if (breakpoint->type == BKPT_HARD) {
  1488. if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
  1489. int brp_i = breakpoint->set - 1;
  1490. int brp_j = breakpoint->linked_BRP;
  1491. if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
  1492. LOG_DEBUG("Invalid BRP number in breakpoint");
  1493. return ERROR_OK;
  1494. }
  1495. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1496. brp_list[brp_i].control, brp_list[brp_i].value);
  1497. brp_list[brp_i].used = 0;
  1498. brp_list[brp_i].value = 0;
  1499. brp_list[brp_i].control = 0;
  1500. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1501. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  1502. brp_list[brp_i].control);
  1503. if (retval != ERROR_OK)
  1504. return retval;
  1505. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1506. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  1507. brp_list[brp_i].value);
  1508. if (retval != ERROR_OK)
  1509. return retval;
  1510. if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
  1511. LOG_DEBUG("Invalid BRP number in breakpoint");
  1512. return ERROR_OK;
  1513. }
  1514. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
  1515. brp_list[brp_j].control, brp_list[brp_j].value);
  1516. brp_list[brp_j].used = 0;
  1517. brp_list[brp_j].value = 0;
  1518. brp_list[brp_j].control = 0;
  1519. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1520. + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
  1521. brp_list[brp_j].control);
  1522. if (retval != ERROR_OK)
  1523. return retval;
  1524. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1525. + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
  1526. brp_list[brp_j].value);
  1527. if (retval != ERROR_OK)
  1528. return retval;
  1529. breakpoint->linked_BRP = 0;
  1530. breakpoint->set = 0;
  1531. return ERROR_OK;
  1532. } else {
  1533. int brp_i = breakpoint->set - 1;
  1534. if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
  1535. LOG_DEBUG("Invalid BRP number in breakpoint");
  1536. return ERROR_OK;
  1537. }
  1538. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1539. brp_list[brp_i].control, brp_list[brp_i].value);
  1540. brp_list[brp_i].used = 0;
  1541. brp_list[brp_i].value = 0;
  1542. brp_list[brp_i].control = 0;
  1543. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1544. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
  1545. brp_list[brp_i].control);
  1546. if (retval != ERROR_OK)
  1547. return retval;
  1548. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1549. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
  1550. brp_list[brp_i].value);
  1551. if (retval != ERROR_OK)
  1552. return retval;
  1553. breakpoint->set = 0;
  1554. return ERROR_OK;
  1555. }
  1556. } else {
  1557. /* make sure data cache is cleaned & invalidated down to PoC */
  1558. if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
  1559. armv7a_cache_flush_virt(target, breakpoint->address,
  1560. breakpoint->length);
  1561. }
  1562. /* restore original instruction (kept in target endianness) */
  1563. if (breakpoint->length == 4) {
  1564. retval = target_write_memory(target,
  1565. breakpoint->address & 0xFFFFFFFE,
  1566. 4, 1, breakpoint->orig_instr);
  1567. if (retval != ERROR_OK)
  1568. return retval;
  1569. } else {
  1570. retval = target_write_memory(target,
  1571. breakpoint->address & 0xFFFFFFFE,
  1572. 2, 1, breakpoint->orig_instr);
  1573. if (retval != ERROR_OK)
  1574. return retval;
  1575. }
  1576. /* update i-cache at breakpoint location */
  1577. armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
  1578. breakpoint->length);
  1579. armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
  1580. breakpoint->length);
  1581. }
  1582. breakpoint->set = 0;
  1583. return ERROR_OK;
  1584. }
  1585. static int cortex_a_add_breakpoint(struct target *target,
  1586. struct breakpoint *breakpoint)
  1587. {
  1588. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1589. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1590. LOG_INFO("no hardware breakpoint available");
  1591. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1592. }
  1593. if (breakpoint->type == BKPT_HARD)
  1594. cortex_a->brp_num_available--;
  1595. return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
  1596. }
  1597. static int cortex_a_add_context_breakpoint(struct target *target,
  1598. struct breakpoint *breakpoint)
  1599. {
  1600. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1601. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1602. LOG_INFO("no hardware breakpoint available");
  1603. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1604. }
  1605. if (breakpoint->type == BKPT_HARD)
  1606. cortex_a->brp_num_available--;
  1607. return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
  1608. }
  1609. static int cortex_a_add_hybrid_breakpoint(struct target *target,
  1610. struct breakpoint *breakpoint)
  1611. {
  1612. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1613. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1614. LOG_INFO("no hardware breakpoint available");
  1615. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1616. }
  1617. if (breakpoint->type == BKPT_HARD)
  1618. cortex_a->brp_num_available--;
  1619. return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
  1620. }
  1621. static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1622. {
  1623. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1624. #if 0
  1625. /* It is perfectly possible to remove breakpoints while the target is running */
  1626. if (target->state != TARGET_HALTED) {
  1627. LOG_WARNING("target not halted");
  1628. return ERROR_TARGET_NOT_HALTED;
  1629. }
  1630. #endif
  1631. if (breakpoint->set) {
  1632. cortex_a_unset_breakpoint(target, breakpoint);
  1633. if (breakpoint->type == BKPT_HARD)
  1634. cortex_a->brp_num_available++;
  1635. }
  1636. return ERROR_OK;
  1637. }
  1638. /*
  1639. * Cortex-A Reset functions
  1640. */
  1641. static int cortex_a_assert_reset(struct target *target)
  1642. {
  1643. struct armv7a_common *armv7a = target_to_armv7a(target);
  1644. LOG_DEBUG(" ");
  1645. /* FIXME when halt is requested, make it work somehow... */
  1646. /* This function can be called in "target not examined" state */
  1647. /* Issue some kind of warm reset. */
  1648. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
  1649. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  1650. else if (jtag_get_reset_config() & RESET_HAS_SRST) {
  1651. /* REVISIT handle "pulls" cases, if there's
  1652. * hardware that needs them to work.
  1653. */
  1654. if (target->reset_halt)
  1655. if (jtag_get_reset_config() & RESET_SRST_NO_GATING)
  1656. jtag_add_reset(0, 1);
  1657. } else {
  1658. LOG_ERROR("%s: how to reset?", target_name(target));
  1659. return ERROR_FAIL;
  1660. }
  1661. /* registers are now invalid */
  1662. register_cache_invalidate(armv7a->arm.core_cache);
  1663. target->state = TARGET_RESET;
  1664. return ERROR_OK;
  1665. }
  1666. static int cortex_a_deassert_reset(struct target *target)
  1667. {
  1668. int retval;
  1669. LOG_DEBUG(" ");
  1670. /* be certain SRST is off */
  1671. jtag_add_reset(0, 0);
  1672. retval = cortex_a_poll(target);
  1673. if (retval != ERROR_OK)
  1674. return retval;
  1675. if (target->reset_halt) {
  1676. if (target->state != TARGET_HALTED) {
  1677. LOG_WARNING("%s: ran after reset and before halt ...",
  1678. target_name(target));
  1679. retval = target_halt(target);
  1680. if (retval != ERROR_OK)
  1681. return retval;
  1682. }
  1683. }
  1684. return ERROR_OK;
  1685. }
  1686. static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
  1687. {
  1688. /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
  1689. * New desired mode must be in mode. Current value of DSCR must be in
  1690. * *dscr, which is updated with new value.
  1691. *
  1692. * This function elides actually sending the mode-change over the debug
  1693. * interface if the mode is already set as desired.
  1694. */
  1695. uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
  1696. if (new_dscr != *dscr) {
  1697. struct armv7a_common *armv7a = target_to_armv7a(target);
  1698. int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1699. armv7a->debug_base + CPUDBG_DSCR, new_dscr);
  1700. if (retval == ERROR_OK)
  1701. *dscr = new_dscr;
  1702. return retval;
  1703. } else {
  1704. return ERROR_OK;
  1705. }
  1706. }
  1707. static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
  1708. uint32_t value, uint32_t *dscr)
  1709. {
  1710. /* Waits until the specified bit(s) of DSCR take on a specified value. */
  1711. struct armv7a_common *armv7a = target_to_armv7a(target);
  1712. int64_t then = timeval_ms();
  1713. int retval;
  1714. while ((*dscr & mask) != value) {
  1715. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1716. armv7a->debug_base + CPUDBG_DSCR, dscr);
  1717. if (retval != ERROR_OK)
  1718. return retval;
  1719. if (timeval_ms() > then + 1000) {
  1720. LOG_ERROR("timeout waiting for DSCR bit change");
  1721. return ERROR_FAIL;
  1722. }
  1723. }
  1724. return ERROR_OK;
  1725. }
  1726. static int cortex_a_read_copro(struct target *target, uint32_t opcode,
  1727. uint32_t *data, uint32_t *dscr)
  1728. {
  1729. int retval;
  1730. struct armv7a_common *armv7a = target_to_armv7a(target);
  1731. /* Move from coprocessor to R0. */
  1732. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1733. if (retval != ERROR_OK)
  1734. return retval;
  1735. /* Move from R0 to DTRTX. */
  1736. retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
  1737. if (retval != ERROR_OK)
  1738. return retval;
  1739. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
  1740. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  1741. * must also check TXfull_l). Most of the time this will be free
  1742. * because TXfull_l will be set immediately and cached in dscr. */
  1743. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  1744. DSCR_DTRTX_FULL_LATCHED, dscr);
  1745. if (retval != ERROR_OK)
  1746. return retval;
  1747. /* Read the value transferred to DTRTX. */
  1748. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1749. armv7a->debug_base + CPUDBG_DTRTX, data);
  1750. if (retval != ERROR_OK)
  1751. return retval;
  1752. return ERROR_OK;
  1753. }
  1754. static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
  1755. uint32_t *dfsr, uint32_t *dscr)
  1756. {
  1757. int retval;
  1758. if (dfar) {
  1759. retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
  1760. if (retval != ERROR_OK)
  1761. return retval;
  1762. }
  1763. if (dfsr) {
  1764. retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
  1765. if (retval != ERROR_OK)
  1766. return retval;
  1767. }
  1768. return ERROR_OK;
  1769. }
  1770. static int cortex_a_write_copro(struct target *target, uint32_t opcode,
  1771. uint32_t data, uint32_t *dscr)
  1772. {
  1773. int retval;
  1774. struct armv7a_common *armv7a = target_to_armv7a(target);
  1775. /* Write the value into DTRRX. */
  1776. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1777. armv7a->debug_base + CPUDBG_DTRRX, data);
  1778. if (retval != ERROR_OK)
  1779. return retval;
  1780. /* Move from DTRRX to R0. */
  1781. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
  1782. if (retval != ERROR_OK)
  1783. return retval;
  1784. /* Move from R0 to coprocessor. */
  1785. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1786. if (retval != ERROR_OK)
  1787. return retval;
  1788. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
  1789. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  1790. * check RXfull_l). Most of the time this will be free because RXfull_l
  1791. * will be cleared immediately and cached in dscr. */
  1792. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
  1793. if (retval != ERROR_OK)
  1794. return retval;
  1795. return ERROR_OK;
  1796. }
  1797. static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
  1798. uint32_t dfsr, uint32_t *dscr)
  1799. {
  1800. int retval;
  1801. retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
  1802. if (retval != ERROR_OK)
  1803. return retval;
  1804. retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
  1805. if (retval != ERROR_OK)
  1806. return retval;
  1807. return ERROR_OK;
  1808. }
  1809. static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
  1810. {
  1811. uint32_t status, upper4;
  1812. if (dfsr & (1 << 9)) {
  1813. /* LPAE format. */
  1814. status = dfsr & 0x3f;
  1815. upper4 = status >> 2;
  1816. if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
  1817. return ERROR_TARGET_TRANSLATION_FAULT;
  1818. else if (status == 33)
  1819. return ERROR_TARGET_UNALIGNED_ACCESS;
  1820. else
  1821. return ERROR_TARGET_DATA_ABORT;
  1822. } else {
  1823. /* Normal format. */
  1824. status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
  1825. if (status == 1)
  1826. return ERROR_TARGET_UNALIGNED_ACCESS;
  1827. else if (status == 5 || status == 7 || status == 3 || status == 6 ||
  1828. status == 9 || status == 11 || status == 13 || status == 15)
  1829. return ERROR_TARGET_TRANSLATION_FAULT;
  1830. else
  1831. return ERROR_TARGET_DATA_ABORT;
  1832. }
  1833. }
  1834. static int cortex_a_write_apb_ab_memory_slow(struct target *target,
  1835. uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1836. {
  1837. /* Writes count objects of size size from *buffer. Old value of DSCR must
  1838. * be in *dscr; updated to new value. This is slow because it works for
  1839. * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
  1840. * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
  1841. * preferred.
  1842. * Preconditions:
  1843. * - Address is in R0.
  1844. * - R0 is marked dirty.
  1845. */
  1846. struct armv7a_common *armv7a = target_to_armv7a(target);
  1847. struct arm *arm = &armv7a->arm;
  1848. int retval;
  1849. /* Mark register R1 as dirty, to use for transferring data. */
  1850. arm_reg_current(arm, 1)->dirty = true;
  1851. /* Switch to non-blocking mode if not already in that mode. */
  1852. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  1853. if (retval != ERROR_OK)
  1854. return retval;
  1855. /* Go through the objects. */
  1856. while (count) {
  1857. /* Write the value to store into DTRRX. */
  1858. uint32_t data, opcode;
  1859. if (size == 1)
  1860. data = *buffer;
  1861. else if (size == 2)
  1862. data = target_buffer_get_u16(target, buffer);
  1863. else
  1864. data = target_buffer_get_u32(target, buffer);
  1865. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1866. armv7a->debug_base + CPUDBG_DTRRX, data);
  1867. if (retval != ERROR_OK)
  1868. return retval;
  1869. /* Transfer the value from DTRRX to R1. */
  1870. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
  1871. if (retval != ERROR_OK)
  1872. return retval;
  1873. /* Write the value transferred to R1 into memory. */
  1874. if (size == 1)
  1875. opcode = ARMV4_5_STRB_IP(1, 0);
  1876. else if (size == 2)
  1877. opcode = ARMV4_5_STRH_IP(1, 0);
  1878. else
  1879. opcode = ARMV4_5_STRW_IP(1, 0);
  1880. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1881. if (retval != ERROR_OK)
  1882. return retval;
  1883. /* Check for faults and return early. */
  1884. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  1885. return ERROR_OK; /* A data fault is not considered a system failure. */
  1886. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
  1887. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  1888. * must also check RXfull_l). Most of the time this will be free
  1889. * because RXfull_l will be cleared immediately and cached in dscr. */
  1890. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
  1891. if (retval != ERROR_OK)
  1892. return retval;
  1893. /* Advance. */
  1894. buffer += size;
  1895. --count;
  1896. }
  1897. return ERROR_OK;
  1898. }
  1899. static int cortex_a_write_apb_ab_memory_fast(struct target *target,
  1900. uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1901. {
  1902. /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
  1903. * in *dscr; updated to new value. This is fast but only works for
  1904. * word-sized objects at aligned addresses.
  1905. * Preconditions:
  1906. * - Address is in R0 and must be a multiple of 4.
  1907. * - R0 is marked dirty.
  1908. */
  1909. struct armv7a_common *armv7a = target_to_armv7a(target);
  1910. int retval;
  1911. /* Switch to fast mode if not already in that mode. */
  1912. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
  1913. if (retval != ERROR_OK)
  1914. return retval;
  1915. /* Latch STC instruction. */
  1916. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1917. armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
  1918. if (retval != ERROR_OK)
  1919. return retval;
  1920. /* Transfer all the data and issue all the instructions. */
  1921. return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
  1922. 4, count, armv7a->debug_base + CPUDBG_DTRRX);
  1923. }
  1924. static int cortex_a_write_apb_ab_memory(struct target *target,
  1925. uint32_t address, uint32_t size,
  1926. uint32_t count, const uint8_t *buffer)
  1927. {
  1928. /* Write memory through APB-AP. */
  1929. int retval, final_retval;
  1930. struct armv7a_common *armv7a = target_to_armv7a(target);
  1931. struct arm *arm = &armv7a->arm;
  1932. uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
  1933. LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
  1934. address, size, count);
  1935. if (target->state != TARGET_HALTED) {
  1936. LOG_WARNING("target not halted");
  1937. return ERROR_TARGET_NOT_HALTED;
  1938. }
  1939. if (!count)
  1940. return ERROR_OK;
  1941. /* Clear any abort. */
  1942. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1943. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  1944. if (retval != ERROR_OK)
  1945. return retval;
  1946. /* Read DSCR. */
  1947. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1948. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  1949. if (retval != ERROR_OK)
  1950. return retval;
  1951. /* Switch to non-blocking mode if not already in that mode. */
  1952. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  1953. if (retval != ERROR_OK)
  1954. goto out;
  1955. /* Mark R0 as dirty. */
  1956. arm_reg_current(arm, 0)->dirty = true;
  1957. /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
  1958. retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
  1959. if (retval != ERROR_OK)
  1960. goto out;
  1961. /* Get the memory address into R0. */
  1962. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1963. armv7a->debug_base + CPUDBG_DTRRX, address);
  1964. if (retval != ERROR_OK)
  1965. goto out;
  1966. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
  1967. if (retval != ERROR_OK)
  1968. goto out;
  1969. if (size == 4 && (address % 4) == 0) {
  1970. /* We are doing a word-aligned transfer, so use fast mode. */
  1971. retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
  1972. } else {
  1973. /* Use slow path. */
  1974. retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
  1975. }
  1976. out:
  1977. final_retval = retval;
  1978. /* Switch to non-blocking mode if not already in that mode. */
  1979. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  1980. if (final_retval == ERROR_OK)
  1981. final_retval = retval;
  1982. /* Wait for last issued instruction to complete. */
  1983. retval = cortex_a_wait_instrcmpl(target, &dscr, true);
  1984. if (final_retval == ERROR_OK)
  1985. final_retval = retval;
  1986. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
  1987. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  1988. * check RXfull_l). Most of the time this will be free because RXfull_l
  1989. * will be cleared immediately and cached in dscr. However, don't do this
  1990. * if there is fault, because then the instruction might not have completed
  1991. * successfully. */
  1992. if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
  1993. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
  1994. if (retval != ERROR_OK)
  1995. return retval;
  1996. }
  1997. /* If there were any sticky abort flags, clear them. */
  1998. if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
  1999. fault_dscr = dscr;
  2000. mem_ap_write_atomic_u32(armv7a->debug_ap,
  2001. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2002. dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
  2003. } else {
  2004. fault_dscr = 0;
  2005. }
  2006. /* Handle synchronous data faults. */
  2007. if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
  2008. if (final_retval == ERROR_OK) {
  2009. /* Final return value will reflect cause of fault. */
  2010. retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
  2011. if (retval == ERROR_OK) {
  2012. LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
  2013. final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
  2014. } else
  2015. final_retval = retval;
  2016. }
  2017. /* Fault destroyed DFAR/DFSR; restore them. */
  2018. retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
  2019. if (retval != ERROR_OK)
  2020. LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
  2021. }
  2022. /* Handle asynchronous data faults. */
  2023. if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
  2024. if (final_retval == ERROR_OK)
  2025. /* No other error has been recorded so far, so keep this one. */
  2026. final_retval = ERROR_TARGET_DATA_ABORT;
  2027. }
  2028. /* If the DCC is nonempty, clear it. */
  2029. if (dscr & DSCR_DTRTX_FULL_LATCHED) {
  2030. uint32_t dummy;
  2031. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2032. armv7a->debug_base + CPUDBG_DTRTX, &dummy);
  2033. if (final_retval == ERROR_OK)
  2034. final_retval = retval;
  2035. }
  2036. if (dscr & DSCR_DTRRX_FULL_LATCHED) {
  2037. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
  2038. if (final_retval == ERROR_OK)
  2039. final_retval = retval;
  2040. }
  2041. /* Done. */
  2042. return final_retval;
  2043. }
  2044. static int cortex_a_read_apb_ab_memory_slow(struct target *target,
  2045. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
  2046. {
  2047. /* Reads count objects of size size into *buffer. Old value of DSCR must be
  2048. * in *dscr; updated to new value. This is slow because it works for
  2049. * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
  2050. * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
  2051. * preferred.
  2052. * Preconditions:
  2053. * - Address is in R0.
  2054. * - R0 is marked dirty.
  2055. */
  2056. struct armv7a_common *armv7a = target_to_armv7a(target);
  2057. struct arm *arm = &armv7a->arm;
  2058. int retval;
  2059. /* Mark register R1 as dirty, to use for transferring data. */
  2060. arm_reg_current(arm, 1)->dirty = true;
  2061. /* Switch to non-blocking mode if not already in that mode. */
  2062. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2063. if (retval != ERROR_OK)
  2064. return retval;
  2065. /* Go through the objects. */
  2066. while (count) {
  2067. /* Issue a load of the appropriate size to R1. */
  2068. uint32_t opcode, data;
  2069. if (size == 1)
  2070. opcode = ARMV4_5_LDRB_IP(1, 0);
  2071. else if (size == 2)
  2072. opcode = ARMV4_5_LDRH_IP(1, 0);
  2073. else
  2074. opcode = ARMV4_5_LDRW_IP(1, 0);
  2075. retval = cortex_a_exec_opcode(target, opcode, dscr);
  2076. if (retval != ERROR_OK)
  2077. return retval;
  2078. /* Issue a write of R1 to DTRTX. */
  2079. retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
  2080. if (retval != ERROR_OK)
  2081. return retval;
  2082. /* Check for faults and return early. */
  2083. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  2084. return ERROR_OK; /* A data fault is not considered a system failure. */
  2085. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
  2086. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  2087. * must also check TXfull_l). Most of the time this will be free
  2088. * because TXfull_l will be set immediately and cached in dscr. */
  2089. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  2090. DSCR_DTRTX_FULL_LATCHED, dscr);
  2091. if (retval != ERROR_OK)
  2092. return retval;
  2093. /* Read the value transferred to DTRTX into the buffer. */
  2094. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2095. armv7a->debug_base + CPUDBG_DTRTX, &data);
  2096. if (retval != ERROR_OK)
  2097. return retval;
  2098. if (size == 1)
  2099. *buffer = (uint8_t) data;
  2100. else if (size == 2)
  2101. target_buffer_set_u16(target, buffer, (uint16_t) data);
  2102. else
  2103. target_buffer_set_u32(target, buffer, data);
  2104. /* Advance. */
  2105. buffer += size;
  2106. --count;
  2107. }
  2108. return ERROR_OK;
  2109. }
  2110. static int cortex_a_read_apb_ab_memory_fast(struct target *target,
  2111. uint32_t count, uint8_t *buffer, uint32_t *dscr)
  2112. {
  2113. /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
  2114. * *dscr; updated to new value. This is fast but only works for word-sized
  2115. * objects at aligned addresses.
  2116. * Preconditions:
  2117. * - Address is in R0 and must be a multiple of 4.
  2118. * - R0 is marked dirty.
  2119. */
  2120. struct armv7a_common *armv7a = target_to_armv7a(target);
  2121. uint32_t u32;
  2122. int retval;
  2123. /* Switch to non-blocking mode if not already in that mode. */
  2124. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2125. if (retval != ERROR_OK)
  2126. return retval;
  2127. /* Issue the LDC instruction via a write to ITR. */
  2128. retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
  2129. if (retval != ERROR_OK)
  2130. return retval;
  2131. count--;
  2132. if (count > 0) {
  2133. /* Switch to fast mode if not already in that mode. */
  2134. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
  2135. if (retval != ERROR_OK)
  2136. return retval;
  2137. /* Latch LDC instruction. */
  2138. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2139. armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
  2140. if (retval != ERROR_OK)
  2141. return retval;
  2142. /* Read the value transferred to DTRTX into the buffer. Due to fast
  2143. * mode rules, this blocks until the instruction finishes executing and
  2144. * then reissues the read instruction to read the next word from
  2145. * memory. The last read of DTRTX in this call reads the second-to-last
  2146. * word from memory and issues the read instruction for the last word.
  2147. */
  2148. retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
  2149. 4, count, armv7a->debug_base + CPUDBG_DTRTX);
  2150. if (retval != ERROR_OK)
  2151. return retval;
  2152. /* Advance. */
  2153. buffer += count * 4;
  2154. }
  2155. /* Wait for last issued instruction to complete. */
  2156. retval = cortex_a_wait_instrcmpl(target, dscr, false);
  2157. if (retval != ERROR_OK)
  2158. return retval;
  2159. /* Switch to non-blocking mode if not already in that mode. */
  2160. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2161. if (retval != ERROR_OK)
  2162. return retval;
  2163. /* Check for faults and return early. */
  2164. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  2165. return ERROR_OK; /* A data fault is not considered a system failure. */
  2166. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
  2167. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  2168. * check TXfull_l). Most of the time this will be free because TXfull_l
  2169. * will be set immediately and cached in dscr. */
  2170. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  2171. DSCR_DTRTX_FULL_LATCHED, dscr);
  2172. if (retval != ERROR_OK)
  2173. return retval;
  2174. /* Read the value transferred to DTRTX into the buffer. This is the last
  2175. * word. */
  2176. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2177. armv7a->debug_base + CPUDBG_DTRTX, &u32);
  2178. if (retval != ERROR_OK)
  2179. return retval;
  2180. target_buffer_set_u32(target, buffer, u32);
  2181. return ERROR_OK;
  2182. }
  2183. static int cortex_a_read_apb_ab_memory(struct target *target,
  2184. uint32_t address, uint32_t size,
  2185. uint32_t count, uint8_t *buffer)
  2186. {
  2187. /* Read memory through APB-AP. */
  2188. int retval, final_retval;
  2189. struct armv7a_common *armv7a = target_to_armv7a(target);
  2190. struct arm *arm = &armv7a->arm;
  2191. uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
  2192. LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
  2193. address, size, count);
  2194. if (target->state != TARGET_HALTED) {
  2195. LOG_WARNING("target not halted");
  2196. return ERROR_TARGET_NOT_HALTED;
  2197. }
  2198. if (!count)
  2199. return ERROR_OK;
  2200. /* Clear any abort. */
  2201. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2202. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2203. if (retval != ERROR_OK)
  2204. return retval;
  2205. /* Read DSCR */
  2206. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2207. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2208. if (retval != ERROR_OK)
  2209. return retval;
  2210. /* Switch to non-blocking mode if not already in that mode. */
  2211. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  2212. if (retval != ERROR_OK)
  2213. goto out;
  2214. /* Mark R0 as dirty. */
  2215. arm_reg_current(arm, 0)->dirty = true;
  2216. /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
  2217. retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
  2218. if (retval != ERROR_OK)
  2219. goto out;
  2220. /* Get the memory address into R0. */
  2221. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2222. armv7a->debug_base + CPUDBG_DTRRX, address);
  2223. if (retval != ERROR_OK)
  2224. goto out;
  2225. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
  2226. if (retval != ERROR_OK)
  2227. goto out;
  2228. if (size == 4 && (address % 4) == 0) {
  2229. /* We are doing a word-aligned transfer, so use fast mode. */
  2230. retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
  2231. } else {
  2232. /* Use slow path. */
  2233. retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
  2234. }
  2235. out:
  2236. final_retval = retval;
  2237. /* Switch to non-blocking mode if not already in that mode. */
  2238. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  2239. if (final_retval == ERROR_OK)
  2240. final_retval = retval;
  2241. /* Wait for last issued instruction to complete. */
  2242. retval = cortex_a_wait_instrcmpl(target, &dscr, true);
  2243. if (final_retval == ERROR_OK)
  2244. final_retval = retval;
  2245. /* If there were any sticky abort flags, clear them. */
  2246. if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
  2247. fault_dscr = dscr;
  2248. mem_ap_write_atomic_u32(armv7a->debug_ap,
  2249. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2250. dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
  2251. } else {
  2252. fault_dscr = 0;
  2253. }
  2254. /* Handle synchronous data faults. */
  2255. if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
  2256. if (final_retval == ERROR_OK) {
  2257. /* Final return value will reflect cause of fault. */
  2258. retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
  2259. if (retval == ERROR_OK) {
  2260. LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
  2261. final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
  2262. } else
  2263. final_retval = retval;
  2264. }
  2265. /* Fault destroyed DFAR/DFSR; restore them. */
  2266. retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
  2267. if (retval != ERROR_OK)
  2268. LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
  2269. }
  2270. /* Handle asynchronous data faults. */
  2271. if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
  2272. if (final_retval == ERROR_OK)
  2273. /* No other error has been recorded so far, so keep this one. */
  2274. final_retval = ERROR_TARGET_DATA_ABORT;
  2275. }
  2276. /* If the DCC is nonempty, clear it. */
  2277. if (dscr & DSCR_DTRTX_FULL_LATCHED) {
  2278. uint32_t dummy;
  2279. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2280. armv7a->debug_base + CPUDBG_DTRTX, &dummy);
  2281. if (final_retval == ERROR_OK)
  2282. final_retval = retval;
  2283. }
  2284. if (dscr & DSCR_DTRRX_FULL_LATCHED) {
  2285. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
  2286. if (final_retval == ERROR_OK)
  2287. final_retval = retval;
  2288. }
  2289. /* Done. */
  2290. return final_retval;
  2291. }
  2292. /*
  2293. * Cortex-A Memory access
  2294. *
  2295. * This is same Cortex M3 but we must also use the correct
  2296. * ap number for every access.
  2297. */
  2298. static int cortex_a_read_phys_memory(struct target *target,
  2299. uint32_t address, uint32_t size,
  2300. uint32_t count, uint8_t *buffer)
  2301. {
  2302. struct armv7a_common *armv7a = target_to_armv7a(target);
  2303. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2304. uint8_t apsel = swjdp->apsel;
  2305. int retval;
  2306. if (!count || !buffer)
  2307. return ERROR_COMMAND_SYNTAX_ERROR;
  2308. LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
  2309. address, size, count);
  2310. if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
  2311. return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
  2312. /* read memory through APB-AP */
  2313. cortex_a_prep_memaccess(target, 1);
  2314. retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
  2315. cortex_a_post_memaccess(target, 1);
  2316. return retval;
  2317. }
  2318. static int cortex_a_read_memory(struct target *target, uint32_t address,
  2319. uint32_t size, uint32_t count, uint8_t *buffer)
  2320. {
  2321. int retval;
  2322. /* cortex_a handles unaligned memory access */
  2323. LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
  2324. size, count);
  2325. cortex_a_prep_memaccess(target, 0);
  2326. retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
  2327. cortex_a_post_memaccess(target, 0);
  2328. return retval;
  2329. }
  2330. static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
  2331. uint32_t size, uint32_t count, uint8_t *buffer)
  2332. {
  2333. int mmu_enabled = 0;
  2334. uint32_t virt, phys;
  2335. int retval;
  2336. struct armv7a_common *armv7a = target_to_armv7a(target);
  2337. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2338. uint8_t apsel = swjdp->apsel;
  2339. if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
  2340. return target_read_memory(target, address, size, count, buffer);
  2341. /* cortex_a handles unaligned memory access */
  2342. LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
  2343. size, count);
  2344. /* determine if MMU was enabled on target stop */
  2345. if (!armv7a->is_armv7r) {
  2346. retval = cortex_a_mmu(target, &mmu_enabled);
  2347. if (retval != ERROR_OK)
  2348. return retval;
  2349. }
  2350. if (mmu_enabled) {
  2351. virt = address;
  2352. retval = cortex_a_virt2phys(target, virt, &phys);
  2353. if (retval != ERROR_OK)
  2354. return retval;
  2355. LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
  2356. virt, phys);
  2357. address = phys;
  2358. }
  2359. if (!count || !buffer)
  2360. return ERROR_COMMAND_SYNTAX_ERROR;
  2361. retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
  2362. return retval;
  2363. }
  2364. static int cortex_a_write_phys_memory(struct target *target,
  2365. uint32_t address, uint32_t size,
  2366. uint32_t count, const uint8_t *buffer)
  2367. {
  2368. struct armv7a_common *armv7a = target_to_armv7a(target);
  2369. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2370. uint8_t apsel = swjdp->apsel;
  2371. int retval;
  2372. if (!count || !buffer)
  2373. return ERROR_COMMAND_SYNTAX_ERROR;
  2374. LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
  2375. size, count);
  2376. if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
  2377. return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
  2378. /* write memory through APB-AP */
  2379. cortex_a_prep_memaccess(target, 1);
  2380. retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
  2381. cortex_a_post_memaccess(target, 1);
  2382. return retval;
  2383. }
  2384. static int cortex_a_write_memory(struct target *target, uint32_t address,
  2385. uint32_t size, uint32_t count, const uint8_t *buffer)
  2386. {
  2387. int retval;
  2388. /* cortex_a handles unaligned memory access */
  2389. LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
  2390. size, count);
  2391. /* memory writes bypass the caches, must flush before writing */
  2392. armv7a_cache_auto_flush_on_write(target, address, size * count);
  2393. cortex_a_prep_memaccess(target, 0);
  2394. retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
  2395. cortex_a_post_memaccess(target, 0);
  2396. return retval;
  2397. }
  2398. static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
  2399. uint32_t size, uint32_t count, const uint8_t *buffer)
  2400. {
  2401. int mmu_enabled = 0;
  2402. uint32_t virt, phys;
  2403. int retval;
  2404. struct armv7a_common *armv7a = target_to_armv7a(target);
  2405. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2406. uint8_t apsel = swjdp->apsel;
  2407. if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
  2408. return target_write_memory(target, address, size, count, buffer);
  2409. /* cortex_a handles unaligned memory access */
  2410. LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
  2411. size, count);
  2412. /* determine if MMU was enabled on target stop */
  2413. if (!armv7a->is_armv7r) {
  2414. retval = cortex_a_mmu(target, &mmu_enabled);
  2415. if (retval != ERROR_OK)
  2416. return retval;
  2417. }
  2418. if (mmu_enabled) {
  2419. virt = address;
  2420. retval = cortex_a_virt2phys(target, virt, &phys);
  2421. if (retval != ERROR_OK)
  2422. return retval;
  2423. LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
  2424. virt,
  2425. phys);
  2426. address = phys;
  2427. }
  2428. if (!count || !buffer)
  2429. return ERROR_COMMAND_SYNTAX_ERROR;
  2430. retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
  2431. return retval;
  2432. }
  2433. static int cortex_a_read_buffer(struct target *target, uint32_t address,
  2434. uint32_t count, uint8_t *buffer)
  2435. {
  2436. uint32_t size;
  2437. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  2438. * will have something to do with the size we leave to it. */
  2439. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  2440. if (address & size) {
  2441. int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
  2442. if (retval != ERROR_OK)
  2443. return retval;
  2444. address += size;
  2445. count -= size;
  2446. buffer += size;
  2447. }
  2448. }
  2449. /* Read the data with as large access size as possible. */
  2450. for (; size > 0; size /= 2) {
  2451. uint32_t aligned = count - count % size;
  2452. if (aligned > 0) {
  2453. int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
  2454. if (retval != ERROR_OK)
  2455. return retval;
  2456. address += aligned;
  2457. count -= aligned;
  2458. buffer += aligned;
  2459. }
  2460. }
  2461. return ERROR_OK;
  2462. }
  2463. static int cortex_a_write_buffer(struct target *target, uint32_t address,
  2464. uint32_t count, const uint8_t *buffer)
  2465. {
  2466. uint32_t size;
  2467. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  2468. * will have something to do with the size we leave to it. */
  2469. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  2470. if (address & size) {
  2471. int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
  2472. if (retval != ERROR_OK)
  2473. return retval;
  2474. address += size;
  2475. count -= size;
  2476. buffer += size;
  2477. }
  2478. }
  2479. /* Write the data with as large access size as possible. */
  2480. for (; size > 0; size /= 2) {
  2481. uint32_t aligned = count - count % size;
  2482. if (aligned > 0) {
  2483. int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
  2484. if (retval != ERROR_OK)
  2485. return retval;
  2486. address += aligned;
  2487. count -= aligned;
  2488. buffer += aligned;
  2489. }
  2490. }
  2491. return ERROR_OK;
  2492. }
  2493. static int cortex_a_handle_target_request(void *priv)
  2494. {
  2495. struct target *target = priv;
  2496. struct armv7a_common *armv7a = target_to_armv7a(target);
  2497. int retval;
  2498. if (!target_was_examined(target))
  2499. return ERROR_OK;
  2500. if (!target->dbg_msg_enabled)
  2501. return ERROR_OK;
  2502. if (target->state == TARGET_RUNNING) {
  2503. uint32_t request;
  2504. uint32_t dscr;
  2505. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2506. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2507. /* check if we have data */
  2508. int64_t then = timeval_ms();
  2509. while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
  2510. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2511. armv7a->debug_base + CPUDBG_DTRTX, &request);
  2512. if (retval == ERROR_OK) {
  2513. target_request(target, request);
  2514. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2515. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2516. }
  2517. if (timeval_ms() > then + 1000) {
  2518. LOG_ERROR("Timeout waiting for dtr tx full");
  2519. return ERROR_FAIL;
  2520. }
  2521. }
  2522. }
  2523. return ERROR_OK;
  2524. }
  2525. /*
  2526. * Cortex-A target information and configuration
  2527. */
  2528. static int cortex_a_examine_first(struct target *target)
  2529. {
  2530. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2531. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  2532. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2533. int i;
  2534. int retval = ERROR_OK;
  2535. uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
  2536. retval = dap_dp_init(swjdp);
  2537. if (retval != ERROR_OK) {
  2538. LOG_ERROR("Could not initialize the debug port");
  2539. return retval;
  2540. }
  2541. /* Search for the APB-AB - it is needed for access to debug registers */
  2542. retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
  2543. if (retval != ERROR_OK) {
  2544. LOG_ERROR("Could not find APB-AP for debug access");
  2545. return retval;
  2546. }
  2547. retval = mem_ap_init(armv7a->debug_ap);
  2548. if (retval != ERROR_OK) {
  2549. LOG_ERROR("Could not initialize the APB-AP");
  2550. return retval;
  2551. }
  2552. armv7a->debug_ap->memaccess_tck = 80;
  2553. /* Search for the AHB-AB.
  2554. * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
  2555. * can access system memory. */
  2556. armv7a->memory_ap_available = false;
  2557. retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
  2558. if (retval == ERROR_OK) {
  2559. retval = mem_ap_init(armv7a->memory_ap);
  2560. if (retval == ERROR_OK)
  2561. armv7a->memory_ap_available = true;
  2562. else
  2563. LOG_WARNING("Could not initialize AHB-AP for memory access - using APB-AP");
  2564. } else {
  2565. /* AHB-AP not found - use APB-AP */
  2566. LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
  2567. }
  2568. if (!target->dbgbase_set) {
  2569. uint32_t dbgbase;
  2570. /* Get ROM Table base */
  2571. uint32_t apid;
  2572. int32_t coreidx = target->coreid;
  2573. LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
  2574. target->cmd_name);
  2575. retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
  2576. if (retval != ERROR_OK)
  2577. return retval;
  2578. /* Lookup 0x15 -- Processor DAP */
  2579. retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
  2580. &armv7a->debug_base, &coreidx);
  2581. if (retval != ERROR_OK) {
  2582. LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
  2583. target->cmd_name);
  2584. return retval;
  2585. }
  2586. LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
  2587. target->coreid, armv7a->debug_base);
  2588. } else
  2589. armv7a->debug_base = target->dbgbase;
  2590. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2591. armv7a->debug_base + CPUDBG_CPUID, &cpuid);
  2592. if (retval != ERROR_OK)
  2593. return retval;
  2594. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2595. armv7a->debug_base + CPUDBG_CPUID, &cpuid);
  2596. if (retval != ERROR_OK) {
  2597. LOG_DEBUG("Examine %s failed", "CPUID");
  2598. return retval;
  2599. }
  2600. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2601. armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
  2602. if (retval != ERROR_OK) {
  2603. LOG_DEBUG("Examine %s failed", "CTYPR");
  2604. return retval;
  2605. }
  2606. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2607. armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
  2608. if (retval != ERROR_OK) {
  2609. LOG_DEBUG("Examine %s failed", "TTYPR");
  2610. return retval;
  2611. }
  2612. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2613. armv7a->debug_base + CPUDBG_DIDR, &didr);
  2614. if (retval != ERROR_OK) {
  2615. LOG_DEBUG("Examine %s failed", "DIDR");
  2616. return retval;
  2617. }
  2618. LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
  2619. LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
  2620. LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
  2621. LOG_DEBUG("didr = 0x%08" PRIx32, didr);
  2622. cortex_a->cpuid = cpuid;
  2623. cortex_a->ctypr = ctypr;
  2624. cortex_a->ttypr = ttypr;
  2625. cortex_a->didr = didr;
  2626. /* Unlocking the debug registers */
  2627. if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
  2628. CORTEX_A15_PARTNUM) {
  2629. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2630. armv7a->debug_base + CPUDBG_OSLAR,
  2631. 0);
  2632. if (retval != ERROR_OK)
  2633. return retval;
  2634. }
  2635. /* Unlocking the debug registers */
  2636. if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
  2637. CORTEX_A7_PARTNUM) {
  2638. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2639. armv7a->debug_base + CPUDBG_OSLAR,
  2640. 0);
  2641. if (retval != ERROR_OK)
  2642. return retval;
  2643. }
  2644. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2645. armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
  2646. if (retval != ERROR_OK)
  2647. return retval;
  2648. LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
  2649. armv7a->arm.core_type = ARM_MODE_MON;
  2650. /* Avoid recreating the registers cache */
  2651. if (!target_was_examined(target)) {
  2652. retval = cortex_a_dpm_setup(cortex_a, didr);
  2653. if (retval != ERROR_OK)
  2654. return retval;
  2655. }
  2656. /* Setup Breakpoint Register Pairs */
  2657. cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
  2658. cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
  2659. cortex_a->brp_num_available = cortex_a->brp_num;
  2660. free(cortex_a->brp_list);
  2661. cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
  2662. /* cortex_a->brb_enabled = ????; */
  2663. for (i = 0; i < cortex_a->brp_num; i++) {
  2664. cortex_a->brp_list[i].used = 0;
  2665. if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
  2666. cortex_a->brp_list[i].type = BRP_NORMAL;
  2667. else
  2668. cortex_a->brp_list[i].type = BRP_CONTEXT;
  2669. cortex_a->brp_list[i].value = 0;
  2670. cortex_a->brp_list[i].control = 0;
  2671. cortex_a->brp_list[i].BRPn = i;
  2672. }
  2673. LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
  2674. /* select debug_ap as default */
  2675. swjdp->apsel = armv7a->debug_ap->ap_num;
  2676. target_set_examined(target);
  2677. return ERROR_OK;
  2678. }
  2679. static int cortex_a_examine(struct target *target)
  2680. {
  2681. int retval = ERROR_OK;
  2682. /* Reestablish communication after target reset */
  2683. retval = cortex_a_examine_first(target);
  2684. /* Configure core debug access */
  2685. if (retval == ERROR_OK)
  2686. retval = cortex_a_init_debug_access(target);
  2687. return retval;
  2688. }
  2689. /*
  2690. * Cortex-A target creation and initialization
  2691. */
  2692. static int cortex_a_init_target(struct command_context *cmd_ctx,
  2693. struct target *target)
  2694. {
  2695. /* examine_first() does a bunch of this */
  2696. return ERROR_OK;
  2697. }
  2698. static int cortex_a_init_arch_info(struct target *target,
  2699. struct cortex_a_common *cortex_a, struct jtag_tap *tap)
  2700. {
  2701. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  2702. /* Setup struct cortex_a_common */
  2703. cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
  2704. /* tap has no dap initialized */
  2705. if (!tap->dap) {
  2706. tap->dap = dap_init();
  2707. /* Leave (only) generic DAP stuff for debugport_init() */
  2708. tap->dap->tap = tap;
  2709. }
  2710. armv7a->arm.dap = tap->dap;
  2711. cortex_a->fast_reg_read = 0;
  2712. /* register arch-specific functions */
  2713. armv7a->examine_debug_reason = NULL;
  2714. armv7a->post_debug_entry = cortex_a_post_debug_entry;
  2715. armv7a->pre_restore_context = NULL;
  2716. armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
  2717. /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
  2718. /* REVISIT v7a setup should be in a v7a-specific routine */
  2719. armv7a_init_arch_info(target, armv7a);
  2720. target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
  2721. return ERROR_OK;
  2722. }
  2723. static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
  2724. {
  2725. struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
  2726. cortex_a->armv7a_common.is_armv7r = false;
  2727. return cortex_a_init_arch_info(target, cortex_a, target->tap);
  2728. }
  2729. static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
  2730. {
  2731. struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
  2732. cortex_a->armv7a_common.is_armv7r = true;
  2733. return cortex_a_init_arch_info(target, cortex_a, target->tap);
  2734. }
  2735. static void cortex_a_deinit_target(struct target *target)
  2736. {
  2737. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2738. struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
  2739. free(cortex_a->brp_list);
  2740. free(dpm->dbp);
  2741. free(dpm->dwp);
  2742. free(cortex_a);
  2743. }
  2744. static int cortex_a_mmu(struct target *target, int *enabled)
  2745. {
  2746. struct armv7a_common *armv7a = target_to_armv7a(target);
  2747. if (target->state != TARGET_HALTED) {
  2748. LOG_ERROR("%s: target not halted", __func__);
  2749. return ERROR_TARGET_INVALID;
  2750. }
  2751. if (armv7a->is_armv7r)
  2752. *enabled = 0;
  2753. else
  2754. *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
  2755. return ERROR_OK;
  2756. }
  2757. static int cortex_a_virt2phys(struct target *target,
  2758. uint32_t virt, uint32_t *phys)
  2759. {
  2760. int retval = ERROR_FAIL;
  2761. struct armv7a_common *armv7a = target_to_armv7a(target);
  2762. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2763. uint8_t apsel = swjdp->apsel;
  2764. if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
  2765. uint32_t ret;
  2766. retval = armv7a_mmu_translate_va(target,
  2767. virt, &ret);
  2768. if (retval != ERROR_OK)
  2769. goto done;
  2770. *phys = ret;
  2771. } else {/* use this method if armv7a->memory_ap not selected
  2772. * mmu must be enable in order to get a correct translation */
  2773. retval = cortex_a_mmu_modify(target, 1);
  2774. if (retval != ERROR_OK)
  2775. goto done;
  2776. retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
  2777. }
  2778. done:
  2779. return retval;
  2780. }
  2781. COMMAND_HANDLER(cortex_a_handle_cache_info_command)
  2782. {
  2783. struct target *target = get_current_target(CMD_CTX);
  2784. struct armv7a_common *armv7a = target_to_armv7a(target);
  2785. return armv7a_handle_cache_info_command(CMD_CTX,
  2786. &armv7a->armv7a_mmu.armv7a_cache);
  2787. }
  2788. COMMAND_HANDLER(cortex_a_handle_dbginit_command)
  2789. {
  2790. struct target *target = get_current_target(CMD_CTX);
  2791. if (!target_was_examined(target)) {
  2792. LOG_ERROR("target not examined yet");
  2793. return ERROR_FAIL;
  2794. }
  2795. return cortex_a_init_debug_access(target);
  2796. }
  2797. COMMAND_HANDLER(cortex_a_handle_smp_off_command)
  2798. {
  2799. struct target *target = get_current_target(CMD_CTX);
  2800. /* check target is an smp target */
  2801. struct target_list *head;
  2802. struct target *curr;
  2803. head = target->head;
  2804. target->smp = 0;
  2805. if (head != (struct target_list *)NULL) {
  2806. while (head != (struct target_list *)NULL) {
  2807. curr = head->target;
  2808. curr->smp = 0;
  2809. head = head->next;
  2810. }
  2811. /* fixes the target display to the debugger */
  2812. target->gdb_service->target = target;
  2813. }
  2814. return ERROR_OK;
  2815. }
  2816. COMMAND_HANDLER(cortex_a_handle_smp_on_command)
  2817. {
  2818. struct target *target = get_current_target(CMD_CTX);
  2819. struct target_list *head;
  2820. struct target *curr;
  2821. head = target->head;
  2822. if (head != (struct target_list *)NULL) {
  2823. target->smp = 1;
  2824. while (head != (struct target_list *)NULL) {
  2825. curr = head->target;
  2826. curr->smp = 1;
  2827. head = head->next;
  2828. }
  2829. }
  2830. return ERROR_OK;
  2831. }
  2832. COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
  2833. {
  2834. struct target *target = get_current_target(CMD_CTX);
  2835. int retval = ERROR_OK;
  2836. struct target_list *head;
  2837. head = target->head;
  2838. if (head != (struct target_list *)NULL) {
  2839. if (CMD_ARGC == 1) {
  2840. int coreid = 0;
  2841. COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
  2842. if (ERROR_OK != retval)
  2843. return retval;
  2844. target->gdb_service->core[1] = coreid;
  2845. }
  2846. command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
  2847. , target->gdb_service->core[1]);
  2848. }
  2849. return ERROR_OK;
  2850. }
  2851. COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
  2852. {
  2853. struct target *target = get_current_target(CMD_CTX);
  2854. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2855. static const Jim_Nvp nvp_maskisr_modes[] = {
  2856. { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
  2857. { .name = "on", .value = CORTEX_A_ISRMASK_ON },
  2858. { .name = NULL, .value = -1 },
  2859. };
  2860. const Jim_Nvp *n;
  2861. if (CMD_ARGC > 0) {
  2862. n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  2863. if (n->name == NULL) {
  2864. LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
  2865. return ERROR_COMMAND_SYNTAX_ERROR;
  2866. }
  2867. cortex_a->isrmasking_mode = n->value;
  2868. }
  2869. n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
  2870. command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
  2871. return ERROR_OK;
  2872. }
  2873. COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
  2874. {
  2875. struct target *target = get_current_target(CMD_CTX);
  2876. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2877. static const Jim_Nvp nvp_dacrfixup_modes[] = {
  2878. { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
  2879. { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
  2880. { .name = NULL, .value = -1 },
  2881. };
  2882. const Jim_Nvp *n;
  2883. if (CMD_ARGC > 0) {
  2884. n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
  2885. if (n->name == NULL)
  2886. return ERROR_COMMAND_SYNTAX_ERROR;
  2887. cortex_a->dacrfixup_mode = n->value;
  2888. }
  2889. n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
  2890. command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
  2891. return ERROR_OK;
  2892. }
  2893. static const struct command_registration cortex_a_exec_command_handlers[] = {
  2894. {
  2895. .name = "cache_info",
  2896. .handler = cortex_a_handle_cache_info_command,
  2897. .mode = COMMAND_EXEC,
  2898. .help = "display information about target caches",
  2899. .usage = "",
  2900. },
  2901. {
  2902. .name = "dbginit",
  2903. .handler = cortex_a_handle_dbginit_command,
  2904. .mode = COMMAND_EXEC,
  2905. .help = "Initialize core debug",
  2906. .usage = "",
  2907. },
  2908. { .name = "smp_off",
  2909. .handler = cortex_a_handle_smp_off_command,
  2910. .mode = COMMAND_EXEC,
  2911. .help = "Stop smp handling",
  2912. .usage = "",},
  2913. {
  2914. .name = "smp_on",
  2915. .handler = cortex_a_handle_smp_on_command,
  2916. .mode = COMMAND_EXEC,
  2917. .help = "Restart smp handling",
  2918. .usage = "",
  2919. },
  2920. {
  2921. .name = "smp_gdb",
  2922. .handler = cortex_a_handle_smp_gdb_command,
  2923. .mode = COMMAND_EXEC,
  2924. .help = "display/fix current core played to gdb",
  2925. .usage = "",
  2926. },
  2927. {
  2928. .name = "maskisr",
  2929. .handler = handle_cortex_a_mask_interrupts_command,
  2930. .mode = COMMAND_ANY,
  2931. .help = "mask cortex_a interrupts",
  2932. .usage = "['on'|'off']",
  2933. },
  2934. {
  2935. .name = "dacrfixup",
  2936. .handler = handle_cortex_a_dacrfixup_command,
  2937. .mode = COMMAND_EXEC,
  2938. .help = "set domain access control (DACR) to all-manager "
  2939. "on memory access",
  2940. .usage = "['on'|'off']",
  2941. },
  2942. COMMAND_REGISTRATION_DONE
  2943. };
  2944. static const struct command_registration cortex_a_command_handlers[] = {
  2945. {
  2946. .chain = arm_command_handlers,
  2947. },
  2948. {
  2949. .chain = armv7a_command_handlers,
  2950. },
  2951. {
  2952. .name = "cortex_a",
  2953. .mode = COMMAND_ANY,
  2954. .help = "Cortex-A command group",
  2955. .usage = "",
  2956. .chain = cortex_a_exec_command_handlers,
  2957. },
  2958. COMMAND_REGISTRATION_DONE
  2959. };
  2960. struct target_type cortexa_target = {
  2961. .name = "cortex_a",
  2962. .deprecated_name = "cortex_a8",
  2963. .poll = cortex_a_poll,
  2964. .arch_state = armv7a_arch_state,
  2965. .halt = cortex_a_halt,
  2966. .resume = cortex_a_resume,
  2967. .step = cortex_a_step,
  2968. .assert_reset = cortex_a_assert_reset,
  2969. .deassert_reset = cortex_a_deassert_reset,
  2970. /* REVISIT allow exporting VFP3 registers ... */
  2971. .get_gdb_reg_list = arm_get_gdb_reg_list,
  2972. .read_memory = cortex_a_read_memory,
  2973. .write_memory = cortex_a_write_memory,
  2974. .read_buffer = cortex_a_read_buffer,
  2975. .write_buffer = cortex_a_write_buffer,
  2976. .checksum_memory = arm_checksum_memory,
  2977. .blank_check_memory = arm_blank_check_memory,
  2978. .run_algorithm = armv4_5_run_algorithm,
  2979. .add_breakpoint = cortex_a_add_breakpoint,
  2980. .add_context_breakpoint = cortex_a_add_context_breakpoint,
  2981. .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
  2982. .remove_breakpoint = cortex_a_remove_breakpoint,
  2983. .add_watchpoint = NULL,
  2984. .remove_watchpoint = NULL,
  2985. .commands = cortex_a_command_handlers,
  2986. .target_create = cortex_a_target_create,
  2987. .init_target = cortex_a_init_target,
  2988. .examine = cortex_a_examine,
  2989. .deinit_target = cortex_a_deinit_target,
  2990. .read_phys_memory = cortex_a_read_phys_memory,
  2991. .write_phys_memory = cortex_a_write_phys_memory,
  2992. .mmu = cortex_a_mmu,
  2993. .virt2phys = cortex_a_virt2phys,
  2994. };
  2995. static const struct command_registration cortex_r4_exec_command_handlers[] = {
  2996. {
  2997. .name = "cache_info",
  2998. .handler = cortex_a_handle_cache_info_command,
  2999. .mode = COMMAND_EXEC,
  3000. .help = "display information about target caches",
  3001. .usage = "",
  3002. },
  3003. {
  3004. .name = "dbginit",
  3005. .handler = cortex_a_handle_dbginit_command,
  3006. .mode = COMMAND_EXEC,
  3007. .help = "Initialize core debug",
  3008. .usage = "",
  3009. },
  3010. {
  3011. .name = "maskisr",
  3012. .handler = handle_cortex_a_mask_interrupts_command,
  3013. .mode = COMMAND_EXEC,
  3014. .help = "mask cortex_r4 interrupts",
  3015. .usage = "['on'|'off']",
  3016. },
  3017. COMMAND_REGISTRATION_DONE
  3018. };
  3019. static const struct command_registration cortex_r4_command_handlers[] = {
  3020. {
  3021. .chain = arm_command_handlers,
  3022. },
  3023. {
  3024. .chain = armv7a_command_handlers,
  3025. },
  3026. {
  3027. .name = "cortex_r4",
  3028. .mode = COMMAND_ANY,
  3029. .help = "Cortex-R4 command group",
  3030. .usage = "",
  3031. .chain = cortex_r4_exec_command_handlers,
  3032. },
  3033. COMMAND_REGISTRATION_DONE
  3034. };
  3035. struct target_type cortexr4_target = {
  3036. .name = "cortex_r4",
  3037. .poll = cortex_a_poll,
  3038. .arch_state = armv7a_arch_state,
  3039. .halt = cortex_a_halt,
  3040. .resume = cortex_a_resume,
  3041. .step = cortex_a_step,
  3042. .assert_reset = cortex_a_assert_reset,
  3043. .deassert_reset = cortex_a_deassert_reset,
  3044. /* REVISIT allow exporting VFP3 registers ... */
  3045. .get_gdb_reg_list = arm_get_gdb_reg_list,
  3046. .read_memory = cortex_a_read_memory,
  3047. .write_memory = cortex_a_write_memory,
  3048. .checksum_memory = arm_checksum_memory,
  3049. .blank_check_memory = arm_blank_check_memory,
  3050. .run_algorithm = armv4_5_run_algorithm,
  3051. .add_breakpoint = cortex_a_add_breakpoint,
  3052. .add_context_breakpoint = cortex_a_add_context_breakpoint,
  3053. .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
  3054. .remove_breakpoint = cortex_a_remove_breakpoint,
  3055. .add_watchpoint = NULL,
  3056. .remove_watchpoint = NULL,
  3057. .commands = cortex_r4_command_handlers,
  3058. .target_create = cortex_r4_target_create,
  3059. .init_target = cortex_a_init_target,
  3060. .examine = cortex_a_examine,
  3061. .deinit_target = cortex_a_deinit_target,
  3062. };