You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3486 lines
102 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * Copyright (C) 2009 by Dirk Behme *
  12. * dirk.behme@gmail.com - copy from cortex_m3 *
  13. * *
  14. * Copyright (C) 2010 Øyvind Harboe *
  15. * oyvind.harboe@zylin.com *
  16. * *
  17. * Copyright (C) ST-Ericsson SA 2011 *
  18. * michel.jaouen@stericsson.com : smp minimum support *
  19. * *
  20. * Copyright (C) Broadcom 2012 *
  21. * ehunter@broadcom.com : Cortex-R4 support *
  22. * *
  23. * Copyright (C) 2013 Kamal Dasu *
  24. * kdasu.kdev@gmail.com *
  25. * *
  26. * Copyright (C) 2016 Chengyu Zheng *
  27. * chengyu.zheng@polimi.it : watchpoint support *
  28. * *
  29. * This program is free software; you can redistribute it and/or modify *
  30. * it under the terms of the GNU General Public License as published by *
  31. * the Free Software Foundation; either version 2 of the License, or *
  32. * (at your option) any later version. *
  33. * *
  34. * This program is distributed in the hope that it will be useful, *
  35. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  36. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  37. * GNU General Public License for more details. *
  38. * *
  39. * You should have received a copy of the GNU General Public License *
  40. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  41. * *
  42. * Cortex-A8(tm) TRM, ARM DDI 0344H *
  43. * Cortex-A9(tm) TRM, ARM DDI 0407F *
  44. * Cortex-A4(tm) TRM, ARM DDI 0363E *
  45. * Cortex-A15(tm)TRM, ARM DDI 0438C *
  46. * *
  47. ***************************************************************************/
  48. #ifdef HAVE_CONFIG_H
  49. #include "config.h"
  50. #endif
  51. #include "breakpoints.h"
  52. #include "cortex_a.h"
  53. #include "register.h"
  54. #include "armv7a_mmu.h"
  55. #include "target_request.h"
  56. #include "target_type.h"
  57. #include "arm_opcodes.h"
  58. #include "arm_semihosting.h"
  59. #include "jtag/interface.h"
  60. #include "transport/transport.h"
  61. #include "smp.h"
  62. #include <helper/bits.h>
  63. #include <helper/time_support.h>
  64. static int cortex_a_poll(struct target *target);
  65. static int cortex_a_debug_entry(struct target *target);
  66. static int cortex_a_restore_context(struct target *target, bool bpwp);
  67. static int cortex_a_set_breakpoint(struct target *target,
  68. struct breakpoint *breakpoint, uint8_t matchmode);
  69. static int cortex_a_set_context_breakpoint(struct target *target,
  70. struct breakpoint *breakpoint, uint8_t matchmode);
  71. static int cortex_a_set_hybrid_breakpoint(struct target *target,
  72. struct breakpoint *breakpoint);
  73. static int cortex_a_unset_breakpoint(struct target *target,
  74. struct breakpoint *breakpoint);
  75. static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
  76. uint32_t value, uint32_t *dscr);
  77. static int cortex_a_mmu(struct target *target, int *enabled);
  78. static int cortex_a_mmu_modify(struct target *target, int enable);
  79. static int cortex_a_virt2phys(struct target *target,
  80. target_addr_t virt, target_addr_t *phys);
  81. static int cortex_a_read_cpu_memory(struct target *target,
  82. uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  83. static unsigned int ilog2(unsigned int x)
  84. {
  85. unsigned int y = 0;
  86. x /= 2;
  87. while (x) {
  88. ++y;
  89. x /= 2;
  90. }
  91. return y;
  92. }
  93. /* restore cp15_control_reg at resume */
  94. static int cortex_a_restore_cp15_control_reg(struct target *target)
  95. {
  96. int retval = ERROR_OK;
  97. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  98. struct armv7a_common *armv7a = target_to_armv7a(target);
  99. if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
  100. cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
  101. /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
  102. retval = armv7a->arm.mcr(target, 15,
  103. 0, 0, /* op1, op2 */
  104. 1, 0, /* CRn, CRm */
  105. cortex_a->cp15_control_reg);
  106. }
  107. return retval;
  108. }
  109. /*
  110. * Set up ARM core for memory access.
  111. * If !phys_access, switch to SVC mode and make sure MMU is on
  112. * If phys_access, switch off mmu
  113. */
  114. static int cortex_a_prep_memaccess(struct target *target, int phys_access)
  115. {
  116. struct armv7a_common *armv7a = target_to_armv7a(target);
  117. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  118. int mmu_enabled = 0;
  119. if (phys_access == 0) {
  120. arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
  121. cortex_a_mmu(target, &mmu_enabled);
  122. if (mmu_enabled)
  123. cortex_a_mmu_modify(target, 1);
  124. if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
  125. /* overwrite DACR to all-manager */
  126. armv7a->arm.mcr(target, 15,
  127. 0, 0, 3, 0,
  128. 0xFFFFFFFF);
  129. }
  130. } else {
  131. cortex_a_mmu(target, &mmu_enabled);
  132. if (mmu_enabled)
  133. cortex_a_mmu_modify(target, 0);
  134. }
  135. return ERROR_OK;
  136. }
  137. /*
  138. * Restore ARM core after memory access.
  139. * If !phys_access, switch to previous mode
  140. * If phys_access, restore MMU setting
  141. */
  142. static int cortex_a_post_memaccess(struct target *target, int phys_access)
  143. {
  144. struct armv7a_common *armv7a = target_to_armv7a(target);
  145. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  146. if (phys_access == 0) {
  147. if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
  148. /* restore */
  149. armv7a->arm.mcr(target, 15,
  150. 0, 0, 3, 0,
  151. cortex_a->cp15_dacr_reg);
  152. }
  153. arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  154. } else {
  155. int mmu_enabled = 0;
  156. cortex_a_mmu(target, &mmu_enabled);
  157. if (mmu_enabled)
  158. cortex_a_mmu_modify(target, 1);
  159. }
  160. return ERROR_OK;
  161. }
  162. /* modify cp15_control_reg in order to enable or disable mmu for :
  163. * - virt2phys address conversion
  164. * - read or write memory in phys or virt address */
  165. static int cortex_a_mmu_modify(struct target *target, int enable)
  166. {
  167. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  168. struct armv7a_common *armv7a = target_to_armv7a(target);
  169. int retval = ERROR_OK;
  170. int need_write = 0;
  171. if (enable) {
  172. /* if mmu enabled at target stop and mmu not enable */
  173. if (!(cortex_a->cp15_control_reg & 0x1U)) {
  174. LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
  175. return ERROR_FAIL;
  176. }
  177. if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
  178. cortex_a->cp15_control_reg_curr |= 0x1U;
  179. need_write = 1;
  180. }
  181. } else {
  182. if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
  183. cortex_a->cp15_control_reg_curr &= ~0x1U;
  184. need_write = 1;
  185. }
  186. }
  187. if (need_write) {
  188. LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
  189. enable ? "enable mmu" : "disable mmu",
  190. cortex_a->cp15_control_reg_curr);
  191. retval = armv7a->arm.mcr(target, 15,
  192. 0, 0, /* op1, op2 */
  193. 1, 0, /* CRn, CRm */
  194. cortex_a->cp15_control_reg_curr);
  195. }
  196. return retval;
  197. }
  198. /*
  199. * Cortex-A Basic debug access, very low level assumes state is saved
  200. */
  201. static int cortex_a_init_debug_access(struct target *target)
  202. {
  203. struct armv7a_common *armv7a = target_to_armv7a(target);
  204. uint32_t dscr;
  205. int retval;
  206. /* lock memory-mapped access to debug registers to prevent
  207. * software interference */
  208. retval = mem_ap_write_u32(armv7a->debug_ap,
  209. armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
  210. if (retval != ERROR_OK)
  211. return retval;
  212. /* Disable cacheline fills and force cache write-through in debug state */
  213. retval = mem_ap_write_u32(armv7a->debug_ap,
  214. armv7a->debug_base + CPUDBG_DSCCR, 0);
  215. if (retval != ERROR_OK)
  216. return retval;
  217. /* Disable TLB lookup and refill/eviction in debug state */
  218. retval = mem_ap_write_u32(armv7a->debug_ap,
  219. armv7a->debug_base + CPUDBG_DSMCR, 0);
  220. if (retval != ERROR_OK)
  221. return retval;
  222. retval = dap_run(armv7a->debug_ap->dap);
  223. if (retval != ERROR_OK)
  224. return retval;
  225. /* Enabling of instruction execution in debug mode is done in debug_entry code */
  226. /* Resync breakpoint registers */
  227. /* Enable halt for breakpoint, watchpoint and vector catch */
  228. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  229. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  230. if (retval != ERROR_OK)
  231. return retval;
  232. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  233. armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
  234. if (retval != ERROR_OK)
  235. return retval;
  236. /* Since this is likely called from init or reset, update target state information*/
  237. return cortex_a_poll(target);
  238. }
  239. static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
  240. {
  241. /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
  242. * Writes final value of DSCR into *dscr. Pass force to force always
  243. * reading DSCR at least once. */
  244. struct armv7a_common *armv7a = target_to_armv7a(target);
  245. int retval;
  246. if (force) {
  247. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  248. armv7a->debug_base + CPUDBG_DSCR, dscr);
  249. if (retval != ERROR_OK) {
  250. LOG_ERROR("Could not read DSCR register");
  251. return retval;
  252. }
  253. }
  254. retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
  255. if (retval != ERROR_OK)
  256. LOG_ERROR("Error waiting for InstrCompl=1");
  257. return retval;
  258. }
  259. /* To reduce needless round-trips, pass in a pointer to the current
  260. * DSCR value. Initialize it to zero if you just need to know the
  261. * value on return from this function; or DSCR_INSTR_COMP if you
  262. * happen to know that no instruction is pending.
  263. */
  264. static int cortex_a_exec_opcode(struct target *target,
  265. uint32_t opcode, uint32_t *dscr_p)
  266. {
  267. uint32_t dscr;
  268. int retval;
  269. struct armv7a_common *armv7a = target_to_armv7a(target);
  270. dscr = dscr_p ? *dscr_p : 0;
  271. LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
  272. /* Wait for InstrCompl bit to be set */
  273. retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
  274. if (retval != ERROR_OK)
  275. return retval;
  276. retval = mem_ap_write_u32(armv7a->debug_ap,
  277. armv7a->debug_base + CPUDBG_ITR, opcode);
  278. if (retval != ERROR_OK)
  279. return retval;
  280. /* Wait for InstrCompl bit to be set */
  281. retval = cortex_a_wait_instrcmpl(target, &dscr, true);
  282. if (retval != ERROR_OK) {
  283. LOG_ERROR("Error waiting for cortex_a_exec_opcode");
  284. return retval;
  285. }
  286. if (dscr_p)
  287. *dscr_p = dscr;
  288. return retval;
  289. }
  290. /* Write to memory mapped registers directly with no cache or mmu handling */
  291. static int cortex_a_dap_write_memap_register_u32(struct target *target,
  292. uint32_t address,
  293. uint32_t value)
  294. {
  295. int retval;
  296. struct armv7a_common *armv7a = target_to_armv7a(target);
  297. retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
  298. return retval;
  299. }
  300. /*
  301. * Cortex-A implementation of Debug Programmer's Model
  302. *
  303. * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
  304. * so there's no need to poll for it before executing an instruction.
  305. *
  306. * NOTE that in several of these cases the "stall" mode might be useful.
  307. * It'd let us queue a few operations together... prepare/finish might
  308. * be the places to enable/disable that mode.
  309. */
  310. static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
  311. {
  312. return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
  313. }
  314. static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
  315. {
  316. LOG_DEBUG("write DCC 0x%08" PRIx32, data);
  317. return mem_ap_write_u32(a->armv7a_common.debug_ap,
  318. a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
  319. }
  320. static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
  321. uint32_t *dscr_p)
  322. {
  323. uint32_t dscr = DSCR_INSTR_COMP;
  324. int retval;
  325. if (dscr_p)
  326. dscr = *dscr_p;
  327. /* Wait for DTRRXfull */
  328. retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
  329. DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
  330. if (retval != ERROR_OK) {
  331. LOG_ERROR("Error waiting for read dcc");
  332. return retval;
  333. }
  334. retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
  335. a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
  336. if (retval != ERROR_OK)
  337. return retval;
  338. /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
  339. if (dscr_p)
  340. *dscr_p = dscr;
  341. return retval;
  342. }
  343. static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
  344. {
  345. struct cortex_a_common *a = dpm_to_a(dpm);
  346. uint32_t dscr;
  347. int retval;
  348. /* set up invariant: INSTR_COMP is set after ever DPM operation */
  349. retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
  350. if (retval != ERROR_OK) {
  351. LOG_ERROR("Error waiting for dpm prepare");
  352. return retval;
  353. }
  354. /* this "should never happen" ... */
  355. if (dscr & DSCR_DTR_RX_FULL) {
  356. LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
  357. /* Clear DCCRX */
  358. retval = cortex_a_exec_opcode(
  359. a->armv7a_common.arm.target,
  360. ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
  361. &dscr);
  362. if (retval != ERROR_OK)
  363. return retval;
  364. }
  365. return retval;
  366. }
  367. static int cortex_a_dpm_finish(struct arm_dpm *dpm)
  368. {
  369. /* REVISIT what could be done here? */
  370. return ERROR_OK;
  371. }
  372. static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
  373. uint32_t opcode, uint32_t data)
  374. {
  375. struct cortex_a_common *a = dpm_to_a(dpm);
  376. int retval;
  377. uint32_t dscr = DSCR_INSTR_COMP;
  378. retval = cortex_a_write_dcc(a, data);
  379. if (retval != ERROR_OK)
  380. return retval;
  381. return cortex_a_exec_opcode(
  382. a->armv7a_common.arm.target,
  383. opcode,
  384. &dscr);
  385. }
  386. static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
  387. uint8_t rt, uint32_t data)
  388. {
  389. struct cortex_a_common *a = dpm_to_a(dpm);
  390. uint32_t dscr = DSCR_INSTR_COMP;
  391. int retval;
  392. if (rt > 15)
  393. return ERROR_TARGET_INVALID;
  394. retval = cortex_a_write_dcc(a, data);
  395. if (retval != ERROR_OK)
  396. return retval;
  397. /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
  398. return cortex_a_exec_opcode(
  399. a->armv7a_common.arm.target,
  400. ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
  401. &dscr);
  402. }
  403. static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
  404. uint32_t opcode, uint32_t data)
  405. {
  406. struct cortex_a_common *a = dpm_to_a(dpm);
  407. uint32_t dscr = DSCR_INSTR_COMP;
  408. int retval;
  409. retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
  410. if (retval != ERROR_OK)
  411. return retval;
  412. /* then the opcode, taking data from R0 */
  413. retval = cortex_a_exec_opcode(
  414. a->armv7a_common.arm.target,
  415. opcode,
  416. &dscr);
  417. return retval;
  418. }
  419. static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
  420. {
  421. struct target *target = dpm->arm->target;
  422. uint32_t dscr = DSCR_INSTR_COMP;
  423. /* "Prefetch flush" after modifying execution status in CPSR */
  424. return cortex_a_exec_opcode(target,
  425. ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
  426. &dscr);
  427. }
  428. static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
  429. uint32_t opcode, uint32_t *data)
  430. {
  431. struct cortex_a_common *a = dpm_to_a(dpm);
  432. int retval;
  433. uint32_t dscr = DSCR_INSTR_COMP;
  434. /* the opcode, writing data to DCC */
  435. retval = cortex_a_exec_opcode(
  436. a->armv7a_common.arm.target,
  437. opcode,
  438. &dscr);
  439. if (retval != ERROR_OK)
  440. return retval;
  441. return cortex_a_read_dcc(a, data, &dscr);
  442. }
  443. static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
  444. uint8_t rt, uint32_t *data)
  445. {
  446. struct cortex_a_common *a = dpm_to_a(dpm);
  447. uint32_t dscr = DSCR_INSTR_COMP;
  448. int retval;
  449. if (rt > 15)
  450. return ERROR_TARGET_INVALID;
  451. retval = cortex_a_exec_opcode(
  452. a->armv7a_common.arm.target,
  453. ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
  454. &dscr);
  455. if (retval != ERROR_OK)
  456. return retval;
  457. return cortex_a_read_dcc(a, data, &dscr);
  458. }
  459. static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
  460. uint32_t opcode, uint32_t *data)
  461. {
  462. struct cortex_a_common *a = dpm_to_a(dpm);
  463. uint32_t dscr = DSCR_INSTR_COMP;
  464. int retval;
  465. /* the opcode, writing data to R0 */
  466. retval = cortex_a_exec_opcode(
  467. a->armv7a_common.arm.target,
  468. opcode,
  469. &dscr);
  470. if (retval != ERROR_OK)
  471. return retval;
  472. /* write R0 to DCC */
  473. return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
  474. }
  475. static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
  476. uint32_t addr, uint32_t control)
  477. {
  478. struct cortex_a_common *a = dpm_to_a(dpm);
  479. uint32_t vr = a->armv7a_common.debug_base;
  480. uint32_t cr = a->armv7a_common.debug_base;
  481. int retval;
  482. switch (index_t) {
  483. case 0 ... 15: /* breakpoints */
  484. vr += CPUDBG_BVR_BASE;
  485. cr += CPUDBG_BCR_BASE;
  486. break;
  487. case 16 ... 31: /* watchpoints */
  488. vr += CPUDBG_WVR_BASE;
  489. cr += CPUDBG_WCR_BASE;
  490. index_t -= 16;
  491. break;
  492. default:
  493. return ERROR_FAIL;
  494. }
  495. vr += 4 * index_t;
  496. cr += 4 * index_t;
  497. LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
  498. (unsigned) vr, (unsigned) cr);
  499. retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
  500. vr, addr);
  501. if (retval != ERROR_OK)
  502. return retval;
  503. retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
  504. cr, control);
  505. return retval;
  506. }
  507. static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
  508. {
  509. struct cortex_a_common *a = dpm_to_a(dpm);
  510. uint32_t cr;
  511. switch (index_t) {
  512. case 0 ... 15:
  513. cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
  514. break;
  515. case 16 ... 31:
  516. cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
  517. index_t -= 16;
  518. break;
  519. default:
  520. return ERROR_FAIL;
  521. }
  522. cr += 4 * index_t;
  523. LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
  524. /* clear control register */
  525. return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
  526. }
  527. static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
  528. {
  529. struct arm_dpm *dpm = &a->armv7a_common.dpm;
  530. int retval;
  531. dpm->arm = &a->armv7a_common.arm;
  532. dpm->didr = didr;
  533. dpm->prepare = cortex_a_dpm_prepare;
  534. dpm->finish = cortex_a_dpm_finish;
  535. dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
  536. dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
  537. dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
  538. dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
  539. dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
  540. dpm->bpwp_enable = cortex_a_bpwp_enable;
  541. dpm->bpwp_disable = cortex_a_bpwp_disable;
  542. retval = arm_dpm_setup(dpm);
  543. if (retval == ERROR_OK)
  544. retval = arm_dpm_initialize(dpm);
  545. return retval;
  546. }
  547. static struct target *get_cortex_a(struct target *target, int32_t coreid)
  548. {
  549. struct target_list *head;
  550. struct target *curr;
  551. head = target->head;
  552. while (head != (struct target_list *)NULL) {
  553. curr = head->target;
  554. if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
  555. return curr;
  556. head = head->next;
  557. }
  558. return target;
  559. }
  560. static int cortex_a_halt(struct target *target);
  561. static int cortex_a_halt_smp(struct target *target)
  562. {
  563. int retval = 0;
  564. struct target_list *head;
  565. struct target *curr;
  566. head = target->head;
  567. while (head != (struct target_list *)NULL) {
  568. curr = head->target;
  569. if ((curr != target) && (curr->state != TARGET_HALTED)
  570. && target_was_examined(curr))
  571. retval += cortex_a_halt(curr);
  572. head = head->next;
  573. }
  574. return retval;
  575. }
  576. static int update_halt_gdb(struct target *target)
  577. {
  578. struct target *gdb_target = NULL;
  579. struct target_list *head;
  580. struct target *curr;
  581. int retval = 0;
  582. if (target->gdb_service && target->gdb_service->core[0] == -1) {
  583. target->gdb_service->target = target;
  584. target->gdb_service->core[0] = target->coreid;
  585. retval += cortex_a_halt_smp(target);
  586. }
  587. if (target->gdb_service)
  588. gdb_target = target->gdb_service->target;
  589. foreach_smp_target(head, target->head) {
  590. curr = head->target;
  591. /* skip calling context */
  592. if (curr == target)
  593. continue;
  594. if (!target_was_examined(curr))
  595. continue;
  596. /* skip targets that were already halted */
  597. if (curr->state == TARGET_HALTED)
  598. continue;
  599. /* Skip gdb_target; it alerts GDB so has to be polled as last one */
  600. if (curr == gdb_target)
  601. continue;
  602. /* avoid recursion in cortex_a_poll() */
  603. curr->smp = 0;
  604. cortex_a_poll(curr);
  605. curr->smp = 1;
  606. }
  607. /* after all targets were updated, poll the gdb serving target */
  608. if (gdb_target && gdb_target != target)
  609. cortex_a_poll(gdb_target);
  610. return retval;
  611. }
  612. /*
  613. * Cortex-A Run control
  614. */
  615. static int cortex_a_poll(struct target *target)
  616. {
  617. int retval = ERROR_OK;
  618. uint32_t dscr;
  619. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  620. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  621. enum target_state prev_target_state = target->state;
  622. /* toggle to another core is done by gdb as follow */
  623. /* maint packet J core_id */
  624. /* continue */
  625. /* the next polling trigger an halt event sent to gdb */
  626. if ((target->state == TARGET_HALTED) && (target->smp) &&
  627. (target->gdb_service) &&
  628. (!target->gdb_service->target)) {
  629. target->gdb_service->target =
  630. get_cortex_a(target, target->gdb_service->core[1]);
  631. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  632. return retval;
  633. }
  634. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  635. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  636. if (retval != ERROR_OK)
  637. return retval;
  638. cortex_a->cpudbg_dscr = dscr;
  639. if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
  640. if (prev_target_state != TARGET_HALTED) {
  641. /* We have a halting debug event */
  642. LOG_DEBUG("Target halted");
  643. target->state = TARGET_HALTED;
  644. retval = cortex_a_debug_entry(target);
  645. if (retval != ERROR_OK)
  646. return retval;
  647. if (target->smp) {
  648. retval = update_halt_gdb(target);
  649. if (retval != ERROR_OK)
  650. return retval;
  651. }
  652. if (prev_target_state == TARGET_DEBUG_RUNNING) {
  653. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  654. } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
  655. if (arm_semihosting(target, &retval) != 0)
  656. return retval;
  657. target_call_event_callbacks(target,
  658. TARGET_EVENT_HALTED);
  659. }
  660. }
  661. } else
  662. target->state = TARGET_RUNNING;
  663. return retval;
  664. }
  665. static int cortex_a_halt(struct target *target)
  666. {
  667. int retval;
  668. uint32_t dscr;
  669. struct armv7a_common *armv7a = target_to_armv7a(target);
  670. /*
  671. * Tell the core to be halted by writing DRCR with 0x1
  672. * and then wait for the core to be halted.
  673. */
  674. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  675. armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
  676. if (retval != ERROR_OK)
  677. return retval;
  678. dscr = 0; /* force read of dscr */
  679. retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
  680. DSCR_CORE_HALTED, &dscr);
  681. if (retval != ERROR_OK) {
  682. LOG_ERROR("Error waiting for halt");
  683. return retval;
  684. }
  685. target->debug_reason = DBG_REASON_DBGRQ;
  686. return ERROR_OK;
  687. }
  688. static int cortex_a_internal_restore(struct target *target, int current,
  689. target_addr_t *address, int handle_breakpoints, int debug_execution)
  690. {
  691. struct armv7a_common *armv7a = target_to_armv7a(target);
  692. struct arm *arm = &armv7a->arm;
  693. int retval;
  694. uint32_t resume_pc;
  695. if (!debug_execution)
  696. target_free_all_working_areas(target);
  697. #if 0
  698. if (debug_execution) {
  699. /* Disable interrupts */
  700. /* We disable interrupts in the PRIMASK register instead of
  701. * masking with C_MASKINTS,
  702. * This is probably the same issue as Cortex-M3 Errata 377493:
  703. * C_MASKINTS in parallel with disabled interrupts can cause
  704. * local faults to not be taken. */
  705. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
  706. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
  707. armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
  708. /* Make sure we are in Thumb mode */
  709. buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
  710. buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
  711. 32) | (1 << 24));
  712. armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
  713. armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
  714. }
  715. #endif
  716. /* current = 1: continue on current pc, otherwise continue at <address> */
  717. resume_pc = buf_get_u32(arm->pc->value, 0, 32);
  718. if (!current)
  719. resume_pc = *address;
  720. else
  721. *address = resume_pc;
  722. /* Make sure that the Armv7 gdb thumb fixups does not
  723. * kill the return address
  724. */
  725. switch (arm->core_state) {
  726. case ARM_STATE_ARM:
  727. resume_pc &= 0xFFFFFFFC;
  728. break;
  729. case ARM_STATE_THUMB:
  730. case ARM_STATE_THUMB_EE:
  731. /* When the return address is loaded into PC
  732. * bit 0 must be 1 to stay in Thumb state
  733. */
  734. resume_pc |= 0x1;
  735. break;
  736. case ARM_STATE_JAZELLE:
  737. LOG_ERROR("How do I resume into Jazelle state??");
  738. return ERROR_FAIL;
  739. case ARM_STATE_AARCH64:
  740. LOG_ERROR("Shouldn't be in AARCH64 state");
  741. return ERROR_FAIL;
  742. }
  743. LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
  744. buf_set_u32(arm->pc->value, 0, 32, resume_pc);
  745. arm->pc->dirty = true;
  746. arm->pc->valid = true;
  747. /* restore dpm_mode at system halt */
  748. arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  749. /* called it now before restoring context because it uses cpu
  750. * register r0 for restoring cp15 control register */
  751. retval = cortex_a_restore_cp15_control_reg(target);
  752. if (retval != ERROR_OK)
  753. return retval;
  754. retval = cortex_a_restore_context(target, handle_breakpoints);
  755. if (retval != ERROR_OK)
  756. return retval;
  757. target->debug_reason = DBG_REASON_NOTHALTED;
  758. target->state = TARGET_RUNNING;
  759. /* registers are now invalid */
  760. register_cache_invalidate(arm->core_cache);
  761. #if 0
  762. /* the front-end may request us not to handle breakpoints */
  763. if (handle_breakpoints) {
  764. /* Single step past breakpoint at current address */
  765. breakpoint = breakpoint_find(target, resume_pc);
  766. if (breakpoint) {
  767. LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
  768. cortex_m3_unset_breakpoint(target, breakpoint);
  769. cortex_m3_single_step_core(target);
  770. cortex_m3_set_breakpoint(target, breakpoint);
  771. }
  772. }
  773. #endif
  774. return retval;
  775. }
  776. static int cortex_a_internal_restart(struct target *target)
  777. {
  778. struct armv7a_common *armv7a = target_to_armv7a(target);
  779. struct arm *arm = &armv7a->arm;
  780. int retval;
  781. uint32_t dscr;
  782. /*
  783. * * Restart core and wait for it to be started. Clear ITRen and sticky
  784. * * exception flags: see ARMv7 ARM, C5.9.
  785. *
  786. * REVISIT: for single stepping, we probably want to
  787. * disable IRQs by default, with optional override...
  788. */
  789. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  790. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  791. if (retval != ERROR_OK)
  792. return retval;
  793. if ((dscr & DSCR_INSTR_COMP) == 0)
  794. LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
  795. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  796. armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
  797. if (retval != ERROR_OK)
  798. return retval;
  799. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  800. armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
  801. DRCR_CLEAR_EXCEPTIONS);
  802. if (retval != ERROR_OK)
  803. return retval;
  804. dscr = 0; /* force read of dscr */
  805. retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
  806. DSCR_CORE_RESTARTED, &dscr);
  807. if (retval != ERROR_OK) {
  808. LOG_ERROR("Error waiting for resume");
  809. return retval;
  810. }
  811. target->debug_reason = DBG_REASON_NOTHALTED;
  812. target->state = TARGET_RUNNING;
  813. /* registers are now invalid */
  814. register_cache_invalidate(arm->core_cache);
  815. return ERROR_OK;
  816. }
  817. static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
  818. {
  819. int retval = 0;
  820. struct target_list *head;
  821. struct target *curr;
  822. target_addr_t address;
  823. head = target->head;
  824. while (head != (struct target_list *)NULL) {
  825. curr = head->target;
  826. if ((curr != target) && (curr->state != TARGET_RUNNING)
  827. && target_was_examined(curr)) {
  828. /* resume current address , not in step mode */
  829. retval += cortex_a_internal_restore(curr, 1, &address,
  830. handle_breakpoints, 0);
  831. retval += cortex_a_internal_restart(curr);
  832. }
  833. head = head->next;
  834. }
  835. return retval;
  836. }
  837. static int cortex_a_resume(struct target *target, int current,
  838. target_addr_t address, int handle_breakpoints, int debug_execution)
  839. {
  840. int retval = 0;
  841. /* dummy resume for smp toggle in order to reduce gdb impact */
  842. if ((target->smp) && (target->gdb_service->core[1] != -1)) {
  843. /* simulate a start and halt of target */
  844. target->gdb_service->target = NULL;
  845. target->gdb_service->core[0] = target->gdb_service->core[1];
  846. /* fake resume at next poll we play the target core[1], see poll*/
  847. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  848. return 0;
  849. }
  850. cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
  851. if (target->smp) {
  852. target->gdb_service->core[0] = -1;
  853. retval = cortex_a_restore_smp(target, handle_breakpoints);
  854. if (retval != ERROR_OK)
  855. return retval;
  856. }
  857. cortex_a_internal_restart(target);
  858. if (!debug_execution) {
  859. target->state = TARGET_RUNNING;
  860. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  861. LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
  862. } else {
  863. target->state = TARGET_DEBUG_RUNNING;
  864. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  865. LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
  866. }
  867. return ERROR_OK;
  868. }
  869. static int cortex_a_debug_entry(struct target *target)
  870. {
  871. uint32_t dscr;
  872. int retval = ERROR_OK;
  873. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  874. struct armv7a_common *armv7a = target_to_armv7a(target);
  875. struct arm *arm = &armv7a->arm;
  876. LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
  877. /* REVISIT surely we should not re-read DSCR !! */
  878. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  879. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  880. if (retval != ERROR_OK)
  881. return retval;
  882. /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
  883. * imprecise data aborts get discarded by issuing a Data
  884. * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
  885. */
  886. /* Enable the ITR execution once we are in debug mode */
  887. dscr |= DSCR_ITR_EN;
  888. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  889. armv7a->debug_base + CPUDBG_DSCR, dscr);
  890. if (retval != ERROR_OK)
  891. return retval;
  892. /* Examine debug reason */
  893. arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
  894. /* save address of instruction that triggered the watchpoint? */
  895. if (target->debug_reason == DBG_REASON_WATCHPOINT) {
  896. uint32_t wfar;
  897. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  898. armv7a->debug_base + CPUDBG_WFAR,
  899. &wfar);
  900. if (retval != ERROR_OK)
  901. return retval;
  902. arm_dpm_report_wfar(&armv7a->dpm, wfar);
  903. }
  904. /* First load register accessible through core debug port */
  905. retval = arm_dpm_read_current_registers(&armv7a->dpm);
  906. if (retval != ERROR_OK)
  907. return retval;
  908. if (arm->spsr) {
  909. /* read SPSR */
  910. retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
  911. if (retval != ERROR_OK)
  912. return retval;
  913. }
  914. #if 0
  915. /* TODO, Move this */
  916. uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
  917. cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
  918. LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
  919. cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
  920. LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
  921. cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
  922. LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
  923. #endif
  924. /* Are we in an exception handler */
  925. /* armv4_5->exception_number = 0; */
  926. if (armv7a->post_debug_entry) {
  927. retval = armv7a->post_debug_entry(target);
  928. if (retval != ERROR_OK)
  929. return retval;
  930. }
  931. return retval;
  932. }
  933. static int cortex_a_post_debug_entry(struct target *target)
  934. {
  935. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  936. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  937. int retval;
  938. /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
  939. retval = armv7a->arm.mrc(target, 15,
  940. 0, 0, /* op1, op2 */
  941. 1, 0, /* CRn, CRm */
  942. &cortex_a->cp15_control_reg);
  943. if (retval != ERROR_OK)
  944. return retval;
  945. LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
  946. cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
  947. if (!armv7a->is_armv7r)
  948. armv7a_read_ttbcr(target);
  949. if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
  950. armv7a_identify_cache(target);
  951. if (armv7a->is_armv7r) {
  952. armv7a->armv7a_mmu.mmu_enabled = 0;
  953. } else {
  954. armv7a->armv7a_mmu.mmu_enabled =
  955. (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
  956. }
  957. armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
  958. (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
  959. armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
  960. (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
  961. cortex_a->curr_mode = armv7a->arm.core_mode;
  962. /* switch to SVC mode to read DACR */
  963. arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
  964. armv7a->arm.mrc(target, 15,
  965. 0, 0, 3, 0,
  966. &cortex_a->cp15_dacr_reg);
  967. LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
  968. cortex_a->cp15_dacr_reg);
  969. arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
  970. return ERROR_OK;
  971. }
  972. static int cortex_a_set_dscr_bits(struct target *target,
  973. unsigned long bit_mask, unsigned long value)
  974. {
  975. struct armv7a_common *armv7a = target_to_armv7a(target);
  976. uint32_t dscr;
  977. /* Read DSCR */
  978. int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  979. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  980. if (retval != ERROR_OK)
  981. return retval;
  982. /* clear bitfield */
  983. dscr &= ~bit_mask;
  984. /* put new value */
  985. dscr |= value & bit_mask;
  986. /* write new DSCR */
  987. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  988. armv7a->debug_base + CPUDBG_DSCR, dscr);
  989. return retval;
  990. }
  991. static int cortex_a_step(struct target *target, int current, target_addr_t address,
  992. int handle_breakpoints)
  993. {
  994. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  995. struct armv7a_common *armv7a = target_to_armv7a(target);
  996. struct arm *arm = &armv7a->arm;
  997. struct breakpoint *breakpoint = NULL;
  998. struct breakpoint stepbreakpoint;
  999. struct reg *r;
  1000. int retval;
  1001. if (target->state != TARGET_HALTED) {
  1002. LOG_WARNING("target not halted");
  1003. return ERROR_TARGET_NOT_HALTED;
  1004. }
  1005. /* current = 1: continue on current pc, otherwise continue at <address> */
  1006. r = arm->pc;
  1007. if (!current)
  1008. buf_set_u32(r->value, 0, 32, address);
  1009. else
  1010. address = buf_get_u32(r->value, 0, 32);
  1011. /* The front-end may request us not to handle breakpoints.
  1012. * But since Cortex-A uses breakpoint for single step,
  1013. * we MUST handle breakpoints.
  1014. */
  1015. handle_breakpoints = 1;
  1016. if (handle_breakpoints) {
  1017. breakpoint = breakpoint_find(target, address);
  1018. if (breakpoint)
  1019. cortex_a_unset_breakpoint(target, breakpoint);
  1020. }
  1021. /* Setup single step breakpoint */
  1022. stepbreakpoint.address = address;
  1023. stepbreakpoint.asid = 0;
  1024. stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
  1025. ? 2 : 4;
  1026. stepbreakpoint.type = BKPT_HARD;
  1027. stepbreakpoint.set = 0;
  1028. /* Disable interrupts during single step if requested */
  1029. if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
  1030. retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
  1031. if (retval != ERROR_OK)
  1032. return retval;
  1033. }
  1034. /* Break on IVA mismatch */
  1035. cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
  1036. target->debug_reason = DBG_REASON_SINGLESTEP;
  1037. retval = cortex_a_resume(target, 1, address, 0, 0);
  1038. if (retval != ERROR_OK)
  1039. return retval;
  1040. int64_t then = timeval_ms();
  1041. while (target->state != TARGET_HALTED) {
  1042. retval = cortex_a_poll(target);
  1043. if (retval != ERROR_OK)
  1044. return retval;
  1045. if (target->state == TARGET_HALTED)
  1046. break;
  1047. if (timeval_ms() > then + 1000) {
  1048. LOG_ERROR("timeout waiting for target halt");
  1049. return ERROR_FAIL;
  1050. }
  1051. }
  1052. cortex_a_unset_breakpoint(target, &stepbreakpoint);
  1053. /* Re-enable interrupts if they were disabled */
  1054. if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
  1055. retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
  1056. if (retval != ERROR_OK)
  1057. return retval;
  1058. }
  1059. target->debug_reason = DBG_REASON_BREAKPOINT;
  1060. if (breakpoint)
  1061. cortex_a_set_breakpoint(target, breakpoint, 0);
  1062. if (target->state != TARGET_HALTED)
  1063. LOG_DEBUG("target stepped");
  1064. return ERROR_OK;
  1065. }
  1066. static int cortex_a_restore_context(struct target *target, bool bpwp)
  1067. {
  1068. struct armv7a_common *armv7a = target_to_armv7a(target);
  1069. LOG_DEBUG(" ");
  1070. if (armv7a->pre_restore_context)
  1071. armv7a->pre_restore_context(target);
  1072. return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
  1073. }
  1074. /*
  1075. * Cortex-A Breakpoint and watchpoint functions
  1076. */
  1077. /* Setup hardware Breakpoint Register Pair */
  1078. static int cortex_a_set_breakpoint(struct target *target,
  1079. struct breakpoint *breakpoint, uint8_t matchmode)
  1080. {
  1081. int retval;
  1082. int brp_i = 0;
  1083. uint32_t control;
  1084. uint8_t byte_addr_select = 0x0F;
  1085. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1086. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1087. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1088. if (breakpoint->set) {
  1089. LOG_WARNING("breakpoint already set");
  1090. return ERROR_OK;
  1091. }
  1092. if (breakpoint->type == BKPT_HARD) {
  1093. while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
  1094. brp_i++;
  1095. if (brp_i >= cortex_a->brp_num) {
  1096. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1097. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1098. }
  1099. breakpoint->set = brp_i + 1;
  1100. if (breakpoint->length == 2)
  1101. byte_addr_select = (3 << (breakpoint->address & 0x02));
  1102. control = ((matchmode & 0x7) << 20)
  1103. | (byte_addr_select << 5)
  1104. | (3 << 1) | 1;
  1105. brp_list[brp_i].used = true;
  1106. brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
  1107. brp_list[brp_i].control = control;
  1108. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1109. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
  1110. brp_list[brp_i].value);
  1111. if (retval != ERROR_OK)
  1112. return retval;
  1113. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1114. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
  1115. brp_list[brp_i].control);
  1116. if (retval != ERROR_OK)
  1117. return retval;
  1118. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1119. brp_list[brp_i].control,
  1120. brp_list[brp_i].value);
  1121. } else if (breakpoint->type == BKPT_SOFT) {
  1122. uint8_t code[4];
  1123. /* length == 2: Thumb breakpoint */
  1124. if (breakpoint->length == 2)
  1125. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1126. else
  1127. /* length == 3: Thumb-2 breakpoint, actual encoding is
  1128. * a regular Thumb BKPT instruction but we replace a
  1129. * 32bit Thumb-2 instruction, so fix-up the breakpoint
  1130. * length
  1131. */
  1132. if (breakpoint->length == 3) {
  1133. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1134. breakpoint->length = 4;
  1135. } else
  1136. /* length == 4, normal ARM breakpoint */
  1137. buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
  1138. retval = target_read_memory(target,
  1139. breakpoint->address & 0xFFFFFFFE,
  1140. breakpoint->length, 1,
  1141. breakpoint->orig_instr);
  1142. if (retval != ERROR_OK)
  1143. return retval;
  1144. /* make sure data cache is cleaned & invalidated down to PoC */
  1145. if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
  1146. armv7a_cache_flush_virt(target, breakpoint->address,
  1147. breakpoint->length);
  1148. }
  1149. retval = target_write_memory(target,
  1150. breakpoint->address & 0xFFFFFFFE,
  1151. breakpoint->length, 1, code);
  1152. if (retval != ERROR_OK)
  1153. return retval;
  1154. /* update i-cache at breakpoint location */
  1155. armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
  1156. breakpoint->length);
  1157. armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
  1158. breakpoint->length);
  1159. breakpoint->set = 0x11; /* Any nice value but 0 */
  1160. }
  1161. return ERROR_OK;
  1162. }
  1163. static int cortex_a_set_context_breakpoint(struct target *target,
  1164. struct breakpoint *breakpoint, uint8_t matchmode)
  1165. {
  1166. int retval = ERROR_FAIL;
  1167. int brp_i = 0;
  1168. uint32_t control;
  1169. uint8_t byte_addr_select = 0x0F;
  1170. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1171. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1172. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1173. if (breakpoint->set) {
  1174. LOG_WARNING("breakpoint already set");
  1175. return retval;
  1176. }
  1177. /*check available context BRPs*/
  1178. while ((brp_list[brp_i].used ||
  1179. (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
  1180. brp_i++;
  1181. if (brp_i >= cortex_a->brp_num) {
  1182. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1183. return ERROR_FAIL;
  1184. }
  1185. breakpoint->set = brp_i + 1;
  1186. control = ((matchmode & 0x7) << 20)
  1187. | (byte_addr_select << 5)
  1188. | (3 << 1) | 1;
  1189. brp_list[brp_i].used = true;
  1190. brp_list[brp_i].value = (breakpoint->asid);
  1191. brp_list[brp_i].control = control;
  1192. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1193. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
  1194. brp_list[brp_i].value);
  1195. if (retval != ERROR_OK)
  1196. return retval;
  1197. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1198. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
  1199. brp_list[brp_i].control);
  1200. if (retval != ERROR_OK)
  1201. return retval;
  1202. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1203. brp_list[brp_i].control,
  1204. brp_list[brp_i].value);
  1205. return ERROR_OK;
  1206. }
  1207. static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1208. {
  1209. int retval = ERROR_FAIL;
  1210. int brp_1 = 0; /* holds the contextID pair */
  1211. int brp_2 = 0; /* holds the IVA pair */
  1212. uint32_t control_ctx, control_iva;
  1213. uint8_t ctx_byte_addr_select = 0x0F;
  1214. uint8_t iva_byte_addr_select = 0x0F;
  1215. uint8_t ctx_machmode = 0x03;
  1216. uint8_t iva_machmode = 0x01;
  1217. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1218. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1219. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1220. if (breakpoint->set) {
  1221. LOG_WARNING("breakpoint already set");
  1222. return retval;
  1223. }
  1224. /*check available context BRPs*/
  1225. while ((brp_list[brp_1].used ||
  1226. (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
  1227. brp_1++;
  1228. LOG_DEBUG("brp(CTX) found num: %d", brp_1);
  1229. if (brp_1 >= cortex_a->brp_num) {
  1230. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1231. return ERROR_FAIL;
  1232. }
  1233. while ((brp_list[brp_2].used ||
  1234. (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
  1235. brp_2++;
  1236. LOG_DEBUG("brp(IVA) found num: %d", brp_2);
  1237. if (brp_2 >= cortex_a->brp_num) {
  1238. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1239. return ERROR_FAIL;
  1240. }
  1241. breakpoint->set = brp_1 + 1;
  1242. breakpoint->linked_brp = brp_2;
  1243. control_ctx = ((ctx_machmode & 0x7) << 20)
  1244. | (brp_2 << 16)
  1245. | (0 << 14)
  1246. | (ctx_byte_addr_select << 5)
  1247. | (3 << 1) | 1;
  1248. brp_list[brp_1].used = true;
  1249. brp_list[brp_1].value = (breakpoint->asid);
  1250. brp_list[brp_1].control = control_ctx;
  1251. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1252. + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
  1253. brp_list[brp_1].value);
  1254. if (retval != ERROR_OK)
  1255. return retval;
  1256. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1257. + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
  1258. brp_list[brp_1].control);
  1259. if (retval != ERROR_OK)
  1260. return retval;
  1261. control_iva = ((iva_machmode & 0x7) << 20)
  1262. | (brp_1 << 16)
  1263. | (iva_byte_addr_select << 5)
  1264. | (3 << 1) | 1;
  1265. brp_list[brp_2].used = true;
  1266. brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
  1267. brp_list[brp_2].control = control_iva;
  1268. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1269. + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
  1270. brp_list[brp_2].value);
  1271. if (retval != ERROR_OK)
  1272. return retval;
  1273. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1274. + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
  1275. brp_list[brp_2].control);
  1276. if (retval != ERROR_OK)
  1277. return retval;
  1278. return ERROR_OK;
  1279. }
  1280. static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1281. {
  1282. int retval;
  1283. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1284. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1285. struct cortex_a_brp *brp_list = cortex_a->brp_list;
  1286. if (!breakpoint->set) {
  1287. LOG_WARNING("breakpoint not set");
  1288. return ERROR_OK;
  1289. }
  1290. if (breakpoint->type == BKPT_HARD) {
  1291. if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
  1292. int brp_i = breakpoint->set - 1;
  1293. int brp_j = breakpoint->linked_brp;
  1294. if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
  1295. LOG_DEBUG("Invalid BRP number in breakpoint");
  1296. return ERROR_OK;
  1297. }
  1298. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1299. brp_list[brp_i].control, brp_list[brp_i].value);
  1300. brp_list[brp_i].used = false;
  1301. brp_list[brp_i].value = 0;
  1302. brp_list[brp_i].control = 0;
  1303. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1304. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
  1305. brp_list[brp_i].control);
  1306. if (retval != ERROR_OK)
  1307. return retval;
  1308. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1309. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
  1310. brp_list[brp_i].value);
  1311. if (retval != ERROR_OK)
  1312. return retval;
  1313. if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
  1314. LOG_DEBUG("Invalid BRP number in breakpoint");
  1315. return ERROR_OK;
  1316. }
  1317. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
  1318. brp_list[brp_j].control, brp_list[brp_j].value);
  1319. brp_list[brp_j].used = false;
  1320. brp_list[brp_j].value = 0;
  1321. brp_list[brp_j].control = 0;
  1322. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1323. + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
  1324. brp_list[brp_j].control);
  1325. if (retval != ERROR_OK)
  1326. return retval;
  1327. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1328. + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
  1329. brp_list[brp_j].value);
  1330. if (retval != ERROR_OK)
  1331. return retval;
  1332. breakpoint->linked_brp = 0;
  1333. breakpoint->set = 0;
  1334. return ERROR_OK;
  1335. } else {
  1336. int brp_i = breakpoint->set - 1;
  1337. if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
  1338. LOG_DEBUG("Invalid BRP number in breakpoint");
  1339. return ERROR_OK;
  1340. }
  1341. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
  1342. brp_list[brp_i].control, brp_list[brp_i].value);
  1343. brp_list[brp_i].used = false;
  1344. brp_list[brp_i].value = 0;
  1345. brp_list[brp_i].control = 0;
  1346. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1347. + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
  1348. brp_list[brp_i].control);
  1349. if (retval != ERROR_OK)
  1350. return retval;
  1351. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1352. + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
  1353. brp_list[brp_i].value);
  1354. if (retval != ERROR_OK)
  1355. return retval;
  1356. breakpoint->set = 0;
  1357. return ERROR_OK;
  1358. }
  1359. } else {
  1360. /* make sure data cache is cleaned & invalidated down to PoC */
  1361. if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
  1362. armv7a_cache_flush_virt(target, breakpoint->address,
  1363. breakpoint->length);
  1364. }
  1365. /* restore original instruction (kept in target endianness) */
  1366. if (breakpoint->length == 4) {
  1367. retval = target_write_memory(target,
  1368. breakpoint->address & 0xFFFFFFFE,
  1369. 4, 1, breakpoint->orig_instr);
  1370. if (retval != ERROR_OK)
  1371. return retval;
  1372. } else {
  1373. retval = target_write_memory(target,
  1374. breakpoint->address & 0xFFFFFFFE,
  1375. 2, 1, breakpoint->orig_instr);
  1376. if (retval != ERROR_OK)
  1377. return retval;
  1378. }
  1379. /* update i-cache at breakpoint location */
  1380. armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
  1381. breakpoint->length);
  1382. armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
  1383. breakpoint->length);
  1384. }
  1385. breakpoint->set = 0;
  1386. return ERROR_OK;
  1387. }
  1388. static int cortex_a_add_breakpoint(struct target *target,
  1389. struct breakpoint *breakpoint)
  1390. {
  1391. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1392. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1393. LOG_INFO("no hardware breakpoint available");
  1394. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1395. }
  1396. if (breakpoint->type == BKPT_HARD)
  1397. cortex_a->brp_num_available--;
  1398. return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
  1399. }
  1400. static int cortex_a_add_context_breakpoint(struct target *target,
  1401. struct breakpoint *breakpoint)
  1402. {
  1403. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1404. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1405. LOG_INFO("no hardware breakpoint available");
  1406. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1407. }
  1408. if (breakpoint->type == BKPT_HARD)
  1409. cortex_a->brp_num_available--;
  1410. return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
  1411. }
  1412. static int cortex_a_add_hybrid_breakpoint(struct target *target,
  1413. struct breakpoint *breakpoint)
  1414. {
  1415. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1416. if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
  1417. LOG_INFO("no hardware breakpoint available");
  1418. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1419. }
  1420. if (breakpoint->type == BKPT_HARD)
  1421. cortex_a->brp_num_available--;
  1422. return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
  1423. }
  1424. static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1425. {
  1426. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1427. #if 0
  1428. /* It is perfectly possible to remove breakpoints while the target is running */
  1429. if (target->state != TARGET_HALTED) {
  1430. LOG_WARNING("target not halted");
  1431. return ERROR_TARGET_NOT_HALTED;
  1432. }
  1433. #endif
  1434. if (breakpoint->set) {
  1435. cortex_a_unset_breakpoint(target, breakpoint);
  1436. if (breakpoint->type == BKPT_HARD)
  1437. cortex_a->brp_num_available++;
  1438. }
  1439. return ERROR_OK;
  1440. }
  1441. /**
  1442. * Sets a watchpoint for an Cortex-A target in one of the watchpoint units. It is
  1443. * considered a bug to call this function when there are no available watchpoint
  1444. * units.
  1445. *
  1446. * @param target Pointer to an Cortex-A target to set a watchpoint on
  1447. * @param watchpoint Pointer to the watchpoint to be set
  1448. * @return Error status if watchpoint set fails or the result of executing the
  1449. * JTAG queue
  1450. */
  1451. static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1452. {
  1453. int retval = ERROR_OK;
  1454. int wrp_i = 0;
  1455. uint32_t control;
  1456. uint32_t address;
  1457. uint8_t address_mask;
  1458. uint8_t byte_address_select;
  1459. uint8_t load_store_access_control = 0x3;
  1460. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1461. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1462. struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
  1463. if (watchpoint->set) {
  1464. LOG_WARNING("watchpoint already set");
  1465. return retval;
  1466. }
  1467. /* check available context WRPs */
  1468. while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
  1469. wrp_i++;
  1470. if (wrp_i >= cortex_a->wrp_num) {
  1471. LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
  1472. return ERROR_FAIL;
  1473. }
  1474. if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
  1475. (watchpoint->length & (watchpoint->length - 1))) {
  1476. LOG_WARNING("watchpoint length must be a power of 2");
  1477. return ERROR_FAIL;
  1478. }
  1479. if (watchpoint->address & (watchpoint->length - 1)) {
  1480. LOG_WARNING("watchpoint address must be aligned at length");
  1481. return ERROR_FAIL;
  1482. }
  1483. /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
  1484. /* handle wp length 1 and 2 through byte select */
  1485. switch (watchpoint->length) {
  1486. case 1:
  1487. byte_address_select = BIT(watchpoint->address & 0x3);
  1488. address = watchpoint->address & ~0x3;
  1489. address_mask = 0;
  1490. break;
  1491. case 2:
  1492. byte_address_select = 0x03 << (watchpoint->address & 0x2);
  1493. address = watchpoint->address & ~0x3;
  1494. address_mask = 0;
  1495. break;
  1496. case 4:
  1497. byte_address_select = 0x0f;
  1498. address = watchpoint->address;
  1499. address_mask = 0;
  1500. break;
  1501. default:
  1502. byte_address_select = 0xff;
  1503. address = watchpoint->address;
  1504. address_mask = ilog2(watchpoint->length);
  1505. break;
  1506. }
  1507. watchpoint->set = wrp_i + 1;
  1508. control = (address_mask << 24) |
  1509. (byte_address_select << 5) |
  1510. (load_store_access_control << 3) |
  1511. (0x3 << 1) | 1;
  1512. wrp_list[wrp_i].used = true;
  1513. wrp_list[wrp_i].value = address;
  1514. wrp_list[wrp_i].control = control;
  1515. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1516. + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
  1517. wrp_list[wrp_i].value);
  1518. if (retval != ERROR_OK)
  1519. return retval;
  1520. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1521. + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
  1522. wrp_list[wrp_i].control);
  1523. if (retval != ERROR_OK)
  1524. return retval;
  1525. LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
  1526. wrp_list[wrp_i].control,
  1527. wrp_list[wrp_i].value);
  1528. return ERROR_OK;
  1529. }
  1530. /**
  1531. * Unset an existing watchpoint and clear the used watchpoint unit.
  1532. *
  1533. * @param target Pointer to the target to have the watchpoint removed
  1534. * @param watchpoint Pointer to the watchpoint to be removed
  1535. * @return Error status while trying to unset the watchpoint or the result of
  1536. * executing the JTAG queue
  1537. */
  1538. static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1539. {
  1540. int retval;
  1541. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1542. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  1543. struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
  1544. if (!watchpoint->set) {
  1545. LOG_WARNING("watchpoint not set");
  1546. return ERROR_OK;
  1547. }
  1548. int wrp_i = watchpoint->set - 1;
  1549. if (wrp_i < 0 || wrp_i >= cortex_a->wrp_num) {
  1550. LOG_DEBUG("Invalid WRP number in watchpoint");
  1551. return ERROR_OK;
  1552. }
  1553. LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
  1554. wrp_list[wrp_i].control, wrp_list[wrp_i].value);
  1555. wrp_list[wrp_i].used = false;
  1556. wrp_list[wrp_i].value = 0;
  1557. wrp_list[wrp_i].control = 0;
  1558. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1559. + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
  1560. wrp_list[wrp_i].control);
  1561. if (retval != ERROR_OK)
  1562. return retval;
  1563. retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
  1564. + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
  1565. wrp_list[wrp_i].value);
  1566. if (retval != ERROR_OK)
  1567. return retval;
  1568. watchpoint->set = 0;
  1569. return ERROR_OK;
  1570. }
  1571. /**
  1572. * Add a watchpoint to an Cortex-A target. If there are no watchpoint units
  1573. * available, an error response is returned.
  1574. *
  1575. * @param target Pointer to the Cortex-A target to add a watchpoint to
  1576. * @param watchpoint Pointer to the watchpoint to be added
  1577. * @return Error status while trying to add the watchpoint
  1578. */
  1579. static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1580. {
  1581. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1582. if (cortex_a->wrp_num_available < 1) {
  1583. LOG_INFO("no hardware watchpoint available");
  1584. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1585. }
  1586. int retval = cortex_a_set_watchpoint(target, watchpoint);
  1587. if (retval != ERROR_OK)
  1588. return retval;
  1589. cortex_a->wrp_num_available--;
  1590. return ERROR_OK;
  1591. }
  1592. /**
  1593. * Remove a watchpoint from an Cortex-A target. The watchpoint will be unset and
  1594. * the used watchpoint unit will be reopened.
  1595. *
  1596. * @param target Pointer to the target to remove a watchpoint from
  1597. * @param watchpoint Pointer to the watchpoint to be removed
  1598. * @return Result of trying to unset the watchpoint
  1599. */
  1600. static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1601. {
  1602. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  1603. if (watchpoint->set) {
  1604. cortex_a->wrp_num_available++;
  1605. cortex_a_unset_watchpoint(target, watchpoint);
  1606. }
  1607. return ERROR_OK;
  1608. }
  1609. /*
  1610. * Cortex-A Reset functions
  1611. */
  1612. static int cortex_a_assert_reset(struct target *target)
  1613. {
  1614. struct armv7a_common *armv7a = target_to_armv7a(target);
  1615. LOG_DEBUG(" ");
  1616. /* FIXME when halt is requested, make it work somehow... */
  1617. /* This function can be called in "target not examined" state */
  1618. /* Issue some kind of warm reset. */
  1619. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
  1620. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  1621. else if (jtag_get_reset_config() & RESET_HAS_SRST) {
  1622. /* REVISIT handle "pulls" cases, if there's
  1623. * hardware that needs them to work.
  1624. */
  1625. /*
  1626. * FIXME: fix reset when transport is not JTAG. This is a temporary
  1627. * work-around for release v0.10 that is not intended to stay!
  1628. */
  1629. if (!transport_is_jtag() ||
  1630. (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
  1631. adapter_assert_reset();
  1632. } else {
  1633. LOG_ERROR("%s: how to reset?", target_name(target));
  1634. return ERROR_FAIL;
  1635. }
  1636. /* registers are now invalid */
  1637. if (target_was_examined(target))
  1638. register_cache_invalidate(armv7a->arm.core_cache);
  1639. target->state = TARGET_RESET;
  1640. return ERROR_OK;
  1641. }
  1642. static int cortex_a_deassert_reset(struct target *target)
  1643. {
  1644. struct armv7a_common *armv7a = target_to_armv7a(target);
  1645. int retval;
  1646. LOG_DEBUG(" ");
  1647. /* be certain SRST is off */
  1648. adapter_deassert_reset();
  1649. if (target_was_examined(target)) {
  1650. retval = cortex_a_poll(target);
  1651. if (retval != ERROR_OK)
  1652. return retval;
  1653. }
  1654. if (target->reset_halt) {
  1655. if (target->state != TARGET_HALTED) {
  1656. LOG_WARNING("%s: ran after reset and before halt ...",
  1657. target_name(target));
  1658. if (target_was_examined(target)) {
  1659. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1660. armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
  1661. if (retval != ERROR_OK)
  1662. return retval;
  1663. } else
  1664. target->state = TARGET_UNKNOWN;
  1665. }
  1666. }
  1667. return ERROR_OK;
  1668. }
  1669. static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
  1670. {
  1671. /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
  1672. * New desired mode must be in mode. Current value of DSCR must be in
  1673. * *dscr, which is updated with new value.
  1674. *
  1675. * This function elides actually sending the mode-change over the debug
  1676. * interface if the mode is already set as desired.
  1677. */
  1678. uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
  1679. if (new_dscr != *dscr) {
  1680. struct armv7a_common *armv7a = target_to_armv7a(target);
  1681. int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1682. armv7a->debug_base + CPUDBG_DSCR, new_dscr);
  1683. if (retval == ERROR_OK)
  1684. *dscr = new_dscr;
  1685. return retval;
  1686. } else {
  1687. return ERROR_OK;
  1688. }
  1689. }
  1690. static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
  1691. uint32_t value, uint32_t *dscr)
  1692. {
  1693. /* Waits until the specified bit(s) of DSCR take on a specified value. */
  1694. struct armv7a_common *armv7a = target_to_armv7a(target);
  1695. int64_t then;
  1696. int retval;
  1697. if ((*dscr & mask) == value)
  1698. return ERROR_OK;
  1699. then = timeval_ms();
  1700. while (1) {
  1701. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1702. armv7a->debug_base + CPUDBG_DSCR, dscr);
  1703. if (retval != ERROR_OK) {
  1704. LOG_ERROR("Could not read DSCR register");
  1705. return retval;
  1706. }
  1707. if ((*dscr & mask) == value)
  1708. break;
  1709. if (timeval_ms() > then + 1000) {
  1710. LOG_ERROR("timeout waiting for DSCR bit change");
  1711. return ERROR_FAIL;
  1712. }
  1713. }
  1714. return ERROR_OK;
  1715. }
  1716. static int cortex_a_read_copro(struct target *target, uint32_t opcode,
  1717. uint32_t *data, uint32_t *dscr)
  1718. {
  1719. int retval;
  1720. struct armv7a_common *armv7a = target_to_armv7a(target);
  1721. /* Move from coprocessor to R0. */
  1722. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1723. if (retval != ERROR_OK)
  1724. return retval;
  1725. /* Move from R0 to DTRTX. */
  1726. retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
  1727. if (retval != ERROR_OK)
  1728. return retval;
  1729. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
  1730. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  1731. * must also check TXfull_l). Most of the time this will be free
  1732. * because TXfull_l will be set immediately and cached in dscr. */
  1733. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  1734. DSCR_DTRTX_FULL_LATCHED, dscr);
  1735. if (retval != ERROR_OK)
  1736. return retval;
  1737. /* Read the value transferred to DTRTX. */
  1738. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1739. armv7a->debug_base + CPUDBG_DTRTX, data);
  1740. if (retval != ERROR_OK)
  1741. return retval;
  1742. return ERROR_OK;
  1743. }
  1744. static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
  1745. uint32_t *dfsr, uint32_t *dscr)
  1746. {
  1747. int retval;
  1748. if (dfar) {
  1749. retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
  1750. if (retval != ERROR_OK)
  1751. return retval;
  1752. }
  1753. if (dfsr) {
  1754. retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
  1755. if (retval != ERROR_OK)
  1756. return retval;
  1757. }
  1758. return ERROR_OK;
  1759. }
  1760. static int cortex_a_write_copro(struct target *target, uint32_t opcode,
  1761. uint32_t data, uint32_t *dscr)
  1762. {
  1763. int retval;
  1764. struct armv7a_common *armv7a = target_to_armv7a(target);
  1765. /* Write the value into DTRRX. */
  1766. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1767. armv7a->debug_base + CPUDBG_DTRRX, data);
  1768. if (retval != ERROR_OK)
  1769. return retval;
  1770. /* Move from DTRRX to R0. */
  1771. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
  1772. if (retval != ERROR_OK)
  1773. return retval;
  1774. /* Move from R0 to coprocessor. */
  1775. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1776. if (retval != ERROR_OK)
  1777. return retval;
  1778. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
  1779. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  1780. * check RXfull_l). Most of the time this will be free because RXfull_l
  1781. * will be cleared immediately and cached in dscr. */
  1782. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
  1783. if (retval != ERROR_OK)
  1784. return retval;
  1785. return ERROR_OK;
  1786. }
  1787. static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
  1788. uint32_t dfsr, uint32_t *dscr)
  1789. {
  1790. int retval;
  1791. retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
  1792. if (retval != ERROR_OK)
  1793. return retval;
  1794. retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
  1795. if (retval != ERROR_OK)
  1796. return retval;
  1797. return ERROR_OK;
  1798. }
  1799. static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
  1800. {
  1801. uint32_t status, upper4;
  1802. if (dfsr & (1 << 9)) {
  1803. /* LPAE format. */
  1804. status = dfsr & 0x3f;
  1805. upper4 = status >> 2;
  1806. if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
  1807. return ERROR_TARGET_TRANSLATION_FAULT;
  1808. else if (status == 33)
  1809. return ERROR_TARGET_UNALIGNED_ACCESS;
  1810. else
  1811. return ERROR_TARGET_DATA_ABORT;
  1812. } else {
  1813. /* Normal format. */
  1814. status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
  1815. if (status == 1)
  1816. return ERROR_TARGET_UNALIGNED_ACCESS;
  1817. else if (status == 5 || status == 7 || status == 3 || status == 6 ||
  1818. status == 9 || status == 11 || status == 13 || status == 15)
  1819. return ERROR_TARGET_TRANSLATION_FAULT;
  1820. else
  1821. return ERROR_TARGET_DATA_ABORT;
  1822. }
  1823. }
  1824. static int cortex_a_write_cpu_memory_slow(struct target *target,
  1825. uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1826. {
  1827. /* Writes count objects of size size from *buffer. Old value of DSCR must
  1828. * be in *dscr; updated to new value. This is slow because it works for
  1829. * non-word-sized objects. Avoid unaligned accesses as they do not work
  1830. * on memory address space without "Normal" attribute. If size == 4 and
  1831. * the address is aligned, cortex_a_write_cpu_memory_fast should be
  1832. * preferred.
  1833. * Preconditions:
  1834. * - Address is in R0.
  1835. * - R0 is marked dirty.
  1836. */
  1837. struct armv7a_common *armv7a = target_to_armv7a(target);
  1838. struct arm *arm = &armv7a->arm;
  1839. int retval;
  1840. /* Mark register R1 as dirty, to use for transferring data. */
  1841. arm_reg_current(arm, 1)->dirty = true;
  1842. /* Switch to non-blocking mode if not already in that mode. */
  1843. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  1844. if (retval != ERROR_OK)
  1845. return retval;
  1846. /* Go through the objects. */
  1847. while (count) {
  1848. /* Write the value to store into DTRRX. */
  1849. uint32_t data, opcode;
  1850. if (size == 1)
  1851. data = *buffer;
  1852. else if (size == 2)
  1853. data = target_buffer_get_u16(target, buffer);
  1854. else
  1855. data = target_buffer_get_u32(target, buffer);
  1856. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1857. armv7a->debug_base + CPUDBG_DTRRX, data);
  1858. if (retval != ERROR_OK)
  1859. return retval;
  1860. /* Transfer the value from DTRRX to R1. */
  1861. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
  1862. if (retval != ERROR_OK)
  1863. return retval;
  1864. /* Write the value transferred to R1 into memory. */
  1865. if (size == 1)
  1866. opcode = ARMV4_5_STRB_IP(1, 0);
  1867. else if (size == 2)
  1868. opcode = ARMV4_5_STRH_IP(1, 0);
  1869. else
  1870. opcode = ARMV4_5_STRW_IP(1, 0);
  1871. retval = cortex_a_exec_opcode(target, opcode, dscr);
  1872. if (retval != ERROR_OK)
  1873. return retval;
  1874. /* Check for faults and return early. */
  1875. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  1876. return ERROR_OK; /* A data fault is not considered a system failure. */
  1877. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
  1878. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  1879. * must also check RXfull_l). Most of the time this will be free
  1880. * because RXfull_l will be cleared immediately and cached in dscr. */
  1881. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
  1882. if (retval != ERROR_OK)
  1883. return retval;
  1884. /* Advance. */
  1885. buffer += size;
  1886. --count;
  1887. }
  1888. return ERROR_OK;
  1889. }
  1890. static int cortex_a_write_cpu_memory_fast(struct target *target,
  1891. uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1892. {
  1893. /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
  1894. * in *dscr; updated to new value. This is fast but only works for
  1895. * word-sized objects at aligned addresses.
  1896. * Preconditions:
  1897. * - Address is in R0 and must be a multiple of 4.
  1898. * - R0 is marked dirty.
  1899. */
  1900. struct armv7a_common *armv7a = target_to_armv7a(target);
  1901. int retval;
  1902. /* Switch to fast mode if not already in that mode. */
  1903. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
  1904. if (retval != ERROR_OK)
  1905. return retval;
  1906. /* Latch STC instruction. */
  1907. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1908. armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
  1909. if (retval != ERROR_OK)
  1910. return retval;
  1911. /* Transfer all the data and issue all the instructions. */
  1912. return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
  1913. 4, count, armv7a->debug_base + CPUDBG_DTRRX);
  1914. }
  1915. static int cortex_a_write_cpu_memory(struct target *target,
  1916. uint32_t address, uint32_t size,
  1917. uint32_t count, const uint8_t *buffer)
  1918. {
  1919. /* Write memory through the CPU. */
  1920. int retval, final_retval;
  1921. struct armv7a_common *armv7a = target_to_armv7a(target);
  1922. struct arm *arm = &armv7a->arm;
  1923. uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
  1924. LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
  1925. address, size, count);
  1926. if (target->state != TARGET_HALTED) {
  1927. LOG_WARNING("target not halted");
  1928. return ERROR_TARGET_NOT_HALTED;
  1929. }
  1930. if (!count)
  1931. return ERROR_OK;
  1932. /* Clear any abort. */
  1933. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1934. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  1935. if (retval != ERROR_OK)
  1936. return retval;
  1937. /* Read DSCR. */
  1938. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  1939. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  1940. if (retval != ERROR_OK)
  1941. return retval;
  1942. /* Switch to non-blocking mode if not already in that mode. */
  1943. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  1944. if (retval != ERROR_OK)
  1945. goto out;
  1946. /* Mark R0 as dirty. */
  1947. arm_reg_current(arm, 0)->dirty = true;
  1948. /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
  1949. retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
  1950. if (retval != ERROR_OK)
  1951. goto out;
  1952. /* Get the memory address into R0. */
  1953. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  1954. armv7a->debug_base + CPUDBG_DTRRX, address);
  1955. if (retval != ERROR_OK)
  1956. goto out;
  1957. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
  1958. if (retval != ERROR_OK)
  1959. goto out;
  1960. if (size == 4 && (address % 4) == 0) {
  1961. /* We are doing a word-aligned transfer, so use fast mode. */
  1962. retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
  1963. } else {
  1964. /* Use slow path. Adjust size for aligned accesses */
  1965. switch (address % 4) {
  1966. case 1:
  1967. case 3:
  1968. count *= size;
  1969. size = 1;
  1970. break;
  1971. case 2:
  1972. if (size == 4) {
  1973. count *= 2;
  1974. size = 2;
  1975. }
  1976. case 0:
  1977. default:
  1978. break;
  1979. }
  1980. retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
  1981. }
  1982. out:
  1983. final_retval = retval;
  1984. /* Switch to non-blocking mode if not already in that mode. */
  1985. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  1986. if (final_retval == ERROR_OK)
  1987. final_retval = retval;
  1988. /* Wait for last issued instruction to complete. */
  1989. retval = cortex_a_wait_instrcmpl(target, &dscr, true);
  1990. if (final_retval == ERROR_OK)
  1991. final_retval = retval;
  1992. /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
  1993. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  1994. * check RXfull_l). Most of the time this will be free because RXfull_l
  1995. * will be cleared immediately and cached in dscr. However, don't do this
  1996. * if there is fault, because then the instruction might not have completed
  1997. * successfully. */
  1998. if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
  1999. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
  2000. if (retval != ERROR_OK)
  2001. return retval;
  2002. }
  2003. /* If there were any sticky abort flags, clear them. */
  2004. if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
  2005. fault_dscr = dscr;
  2006. mem_ap_write_atomic_u32(armv7a->debug_ap,
  2007. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2008. dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
  2009. } else {
  2010. fault_dscr = 0;
  2011. }
  2012. /* Handle synchronous data faults. */
  2013. if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
  2014. if (final_retval == ERROR_OK) {
  2015. /* Final return value will reflect cause of fault. */
  2016. retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
  2017. if (retval == ERROR_OK) {
  2018. LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
  2019. final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
  2020. } else
  2021. final_retval = retval;
  2022. }
  2023. /* Fault destroyed DFAR/DFSR; restore them. */
  2024. retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
  2025. if (retval != ERROR_OK)
  2026. LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
  2027. }
  2028. /* Handle asynchronous data faults. */
  2029. if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
  2030. if (final_retval == ERROR_OK)
  2031. /* No other error has been recorded so far, so keep this one. */
  2032. final_retval = ERROR_TARGET_DATA_ABORT;
  2033. }
  2034. /* If the DCC is nonempty, clear it. */
  2035. if (dscr & DSCR_DTRTX_FULL_LATCHED) {
  2036. uint32_t dummy;
  2037. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2038. armv7a->debug_base + CPUDBG_DTRTX, &dummy);
  2039. if (final_retval == ERROR_OK)
  2040. final_retval = retval;
  2041. }
  2042. if (dscr & DSCR_DTRRX_FULL_LATCHED) {
  2043. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
  2044. if (final_retval == ERROR_OK)
  2045. final_retval = retval;
  2046. }
  2047. /* Done. */
  2048. return final_retval;
  2049. }
  2050. static int cortex_a_read_cpu_memory_slow(struct target *target,
  2051. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
  2052. {
  2053. /* Reads count objects of size size into *buffer. Old value of DSCR must be
  2054. * in *dscr; updated to new value. This is slow because it works for
  2055. * non-word-sized objects. Avoid unaligned accesses as they do not work
  2056. * on memory address space without "Normal" attribute. If size == 4 and
  2057. * the address is aligned, cortex_a_read_cpu_memory_fast should be
  2058. * preferred.
  2059. * Preconditions:
  2060. * - Address is in R0.
  2061. * - R0 is marked dirty.
  2062. */
  2063. struct armv7a_common *armv7a = target_to_armv7a(target);
  2064. struct arm *arm = &armv7a->arm;
  2065. int retval;
  2066. /* Mark register R1 as dirty, to use for transferring data. */
  2067. arm_reg_current(arm, 1)->dirty = true;
  2068. /* Switch to non-blocking mode if not already in that mode. */
  2069. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2070. if (retval != ERROR_OK)
  2071. return retval;
  2072. /* Go through the objects. */
  2073. while (count) {
  2074. /* Issue a load of the appropriate size to R1. */
  2075. uint32_t opcode, data;
  2076. if (size == 1)
  2077. opcode = ARMV4_5_LDRB_IP(1, 0);
  2078. else if (size == 2)
  2079. opcode = ARMV4_5_LDRH_IP(1, 0);
  2080. else
  2081. opcode = ARMV4_5_LDRW_IP(1, 0);
  2082. retval = cortex_a_exec_opcode(target, opcode, dscr);
  2083. if (retval != ERROR_OK)
  2084. return retval;
  2085. /* Issue a write of R1 to DTRTX. */
  2086. retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
  2087. if (retval != ERROR_OK)
  2088. return retval;
  2089. /* Check for faults and return early. */
  2090. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  2091. return ERROR_OK; /* A data fault is not considered a system failure. */
  2092. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
  2093. * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
  2094. * must also check TXfull_l). Most of the time this will be free
  2095. * because TXfull_l will be set immediately and cached in dscr. */
  2096. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  2097. DSCR_DTRTX_FULL_LATCHED, dscr);
  2098. if (retval != ERROR_OK)
  2099. return retval;
  2100. /* Read the value transferred to DTRTX into the buffer. */
  2101. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2102. armv7a->debug_base + CPUDBG_DTRTX, &data);
  2103. if (retval != ERROR_OK)
  2104. return retval;
  2105. if (size == 1)
  2106. *buffer = (uint8_t) data;
  2107. else if (size == 2)
  2108. target_buffer_set_u16(target, buffer, (uint16_t) data);
  2109. else
  2110. target_buffer_set_u32(target, buffer, data);
  2111. /* Advance. */
  2112. buffer += size;
  2113. --count;
  2114. }
  2115. return ERROR_OK;
  2116. }
  2117. static int cortex_a_read_cpu_memory_fast(struct target *target,
  2118. uint32_t count, uint8_t *buffer, uint32_t *dscr)
  2119. {
  2120. /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
  2121. * *dscr; updated to new value. This is fast but only works for word-sized
  2122. * objects at aligned addresses.
  2123. * Preconditions:
  2124. * - Address is in R0 and must be a multiple of 4.
  2125. * - R0 is marked dirty.
  2126. */
  2127. struct armv7a_common *armv7a = target_to_armv7a(target);
  2128. uint32_t u32;
  2129. int retval;
  2130. /* Switch to non-blocking mode if not already in that mode. */
  2131. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2132. if (retval != ERROR_OK)
  2133. return retval;
  2134. /* Issue the LDC instruction via a write to ITR. */
  2135. retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
  2136. if (retval != ERROR_OK)
  2137. return retval;
  2138. count--;
  2139. if (count > 0) {
  2140. /* Switch to fast mode if not already in that mode. */
  2141. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
  2142. if (retval != ERROR_OK)
  2143. return retval;
  2144. /* Latch LDC instruction. */
  2145. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2146. armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
  2147. if (retval != ERROR_OK)
  2148. return retval;
  2149. /* Read the value transferred to DTRTX into the buffer. Due to fast
  2150. * mode rules, this blocks until the instruction finishes executing and
  2151. * then reissues the read instruction to read the next word from
  2152. * memory. The last read of DTRTX in this call reads the second-to-last
  2153. * word from memory and issues the read instruction for the last word.
  2154. */
  2155. retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
  2156. 4, count, armv7a->debug_base + CPUDBG_DTRTX);
  2157. if (retval != ERROR_OK)
  2158. return retval;
  2159. /* Advance. */
  2160. buffer += count * 4;
  2161. }
  2162. /* Wait for last issued instruction to complete. */
  2163. retval = cortex_a_wait_instrcmpl(target, dscr, false);
  2164. if (retval != ERROR_OK)
  2165. return retval;
  2166. /* Switch to non-blocking mode if not already in that mode. */
  2167. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
  2168. if (retval != ERROR_OK)
  2169. return retval;
  2170. /* Check for faults and return early. */
  2171. if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
  2172. return ERROR_OK; /* A data fault is not considered a system failure. */
  2173. /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
  2174. * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
  2175. * check TXfull_l). Most of the time this will be free because TXfull_l
  2176. * will be set immediately and cached in dscr. */
  2177. retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
  2178. DSCR_DTRTX_FULL_LATCHED, dscr);
  2179. if (retval != ERROR_OK)
  2180. return retval;
  2181. /* Read the value transferred to DTRTX into the buffer. This is the last
  2182. * word. */
  2183. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2184. armv7a->debug_base + CPUDBG_DTRTX, &u32);
  2185. if (retval != ERROR_OK)
  2186. return retval;
  2187. target_buffer_set_u32(target, buffer, u32);
  2188. return ERROR_OK;
  2189. }
  2190. static int cortex_a_read_cpu_memory(struct target *target,
  2191. uint32_t address, uint32_t size,
  2192. uint32_t count, uint8_t *buffer)
  2193. {
  2194. /* Read memory through the CPU. */
  2195. int retval, final_retval;
  2196. struct armv7a_common *armv7a = target_to_armv7a(target);
  2197. struct arm *arm = &armv7a->arm;
  2198. uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
  2199. LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
  2200. address, size, count);
  2201. if (target->state != TARGET_HALTED) {
  2202. LOG_WARNING("target not halted");
  2203. return ERROR_TARGET_NOT_HALTED;
  2204. }
  2205. if (!count)
  2206. return ERROR_OK;
  2207. /* Clear any abort. */
  2208. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2209. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2210. if (retval != ERROR_OK)
  2211. return retval;
  2212. /* Read DSCR */
  2213. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2214. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2215. if (retval != ERROR_OK)
  2216. return retval;
  2217. /* Switch to non-blocking mode if not already in that mode. */
  2218. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  2219. if (retval != ERROR_OK)
  2220. goto out;
  2221. /* Mark R0 as dirty. */
  2222. arm_reg_current(arm, 0)->dirty = true;
  2223. /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
  2224. retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
  2225. if (retval != ERROR_OK)
  2226. goto out;
  2227. /* Get the memory address into R0. */
  2228. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2229. armv7a->debug_base + CPUDBG_DTRRX, address);
  2230. if (retval != ERROR_OK)
  2231. goto out;
  2232. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
  2233. if (retval != ERROR_OK)
  2234. goto out;
  2235. if (size == 4 && (address % 4) == 0) {
  2236. /* We are doing a word-aligned transfer, so use fast mode. */
  2237. retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
  2238. } else {
  2239. /* Use slow path. Adjust size for aligned accesses */
  2240. switch (address % 4) {
  2241. case 1:
  2242. case 3:
  2243. count *= size;
  2244. size = 1;
  2245. break;
  2246. case 2:
  2247. if (size == 4) {
  2248. count *= 2;
  2249. size = 2;
  2250. }
  2251. break;
  2252. case 0:
  2253. default:
  2254. break;
  2255. }
  2256. retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
  2257. }
  2258. out:
  2259. final_retval = retval;
  2260. /* Switch to non-blocking mode if not already in that mode. */
  2261. retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
  2262. if (final_retval == ERROR_OK)
  2263. final_retval = retval;
  2264. /* Wait for last issued instruction to complete. */
  2265. retval = cortex_a_wait_instrcmpl(target, &dscr, true);
  2266. if (final_retval == ERROR_OK)
  2267. final_retval = retval;
  2268. /* If there were any sticky abort flags, clear them. */
  2269. if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
  2270. fault_dscr = dscr;
  2271. mem_ap_write_atomic_u32(armv7a->debug_ap,
  2272. armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
  2273. dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
  2274. } else {
  2275. fault_dscr = 0;
  2276. }
  2277. /* Handle synchronous data faults. */
  2278. if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
  2279. if (final_retval == ERROR_OK) {
  2280. /* Final return value will reflect cause of fault. */
  2281. retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
  2282. if (retval == ERROR_OK) {
  2283. LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
  2284. final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
  2285. } else
  2286. final_retval = retval;
  2287. }
  2288. /* Fault destroyed DFAR/DFSR; restore them. */
  2289. retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
  2290. if (retval != ERROR_OK)
  2291. LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
  2292. }
  2293. /* Handle asynchronous data faults. */
  2294. if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
  2295. if (final_retval == ERROR_OK)
  2296. /* No other error has been recorded so far, so keep this one. */
  2297. final_retval = ERROR_TARGET_DATA_ABORT;
  2298. }
  2299. /* If the DCC is nonempty, clear it. */
  2300. if (dscr & DSCR_DTRTX_FULL_LATCHED) {
  2301. uint32_t dummy;
  2302. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2303. armv7a->debug_base + CPUDBG_DTRTX, &dummy);
  2304. if (final_retval == ERROR_OK)
  2305. final_retval = retval;
  2306. }
  2307. if (dscr & DSCR_DTRRX_FULL_LATCHED) {
  2308. retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
  2309. if (final_retval == ERROR_OK)
  2310. final_retval = retval;
  2311. }
  2312. /* Done. */
  2313. return final_retval;
  2314. }
  2315. /*
  2316. * Cortex-A Memory access
  2317. *
  2318. * This is same Cortex-M3 but we must also use the correct
  2319. * ap number for every access.
  2320. */
  2321. static int cortex_a_read_phys_memory(struct target *target,
  2322. target_addr_t address, uint32_t size,
  2323. uint32_t count, uint8_t *buffer)
  2324. {
  2325. int retval;
  2326. if (!count || !buffer)
  2327. return ERROR_COMMAND_SYNTAX_ERROR;
  2328. LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
  2329. address, size, count);
  2330. /* read memory through the CPU */
  2331. cortex_a_prep_memaccess(target, 1);
  2332. retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
  2333. cortex_a_post_memaccess(target, 1);
  2334. return retval;
  2335. }
  2336. static int cortex_a_read_memory(struct target *target, target_addr_t address,
  2337. uint32_t size, uint32_t count, uint8_t *buffer)
  2338. {
  2339. int retval;
  2340. /* cortex_a handles unaligned memory access */
  2341. LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
  2342. address, size, count);
  2343. cortex_a_prep_memaccess(target, 0);
  2344. retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
  2345. cortex_a_post_memaccess(target, 0);
  2346. return retval;
  2347. }
  2348. static int cortex_a_write_phys_memory(struct target *target,
  2349. target_addr_t address, uint32_t size,
  2350. uint32_t count, const uint8_t *buffer)
  2351. {
  2352. int retval;
  2353. if (!count || !buffer)
  2354. return ERROR_COMMAND_SYNTAX_ERROR;
  2355. LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
  2356. address, size, count);
  2357. /* write memory through the CPU */
  2358. cortex_a_prep_memaccess(target, 1);
  2359. retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
  2360. cortex_a_post_memaccess(target, 1);
  2361. return retval;
  2362. }
  2363. static int cortex_a_write_memory(struct target *target, target_addr_t address,
  2364. uint32_t size, uint32_t count, const uint8_t *buffer)
  2365. {
  2366. int retval;
  2367. /* cortex_a handles unaligned memory access */
  2368. LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
  2369. address, size, count);
  2370. /* memory writes bypass the caches, must flush before writing */
  2371. armv7a_cache_auto_flush_on_write(target, address, size * count);
  2372. cortex_a_prep_memaccess(target, 0);
  2373. retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
  2374. cortex_a_post_memaccess(target, 0);
  2375. return retval;
  2376. }
  2377. static int cortex_a_read_buffer(struct target *target, target_addr_t address,
  2378. uint32_t count, uint8_t *buffer)
  2379. {
  2380. uint32_t size;
  2381. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  2382. * will have something to do with the size we leave to it. */
  2383. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  2384. if (address & size) {
  2385. int retval = target_read_memory(target, address, size, 1, buffer);
  2386. if (retval != ERROR_OK)
  2387. return retval;
  2388. address += size;
  2389. count -= size;
  2390. buffer += size;
  2391. }
  2392. }
  2393. /* Read the data with as large access size as possible. */
  2394. for (; size > 0; size /= 2) {
  2395. uint32_t aligned = count - count % size;
  2396. if (aligned > 0) {
  2397. int retval = target_read_memory(target, address, size, aligned / size, buffer);
  2398. if (retval != ERROR_OK)
  2399. return retval;
  2400. address += aligned;
  2401. count -= aligned;
  2402. buffer += aligned;
  2403. }
  2404. }
  2405. return ERROR_OK;
  2406. }
  2407. static int cortex_a_write_buffer(struct target *target, target_addr_t address,
  2408. uint32_t count, const uint8_t *buffer)
  2409. {
  2410. uint32_t size;
  2411. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  2412. * will have something to do with the size we leave to it. */
  2413. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  2414. if (address & size) {
  2415. int retval = target_write_memory(target, address, size, 1, buffer);
  2416. if (retval != ERROR_OK)
  2417. return retval;
  2418. address += size;
  2419. count -= size;
  2420. buffer += size;
  2421. }
  2422. }
  2423. /* Write the data with as large access size as possible. */
  2424. for (; size > 0; size /= 2) {
  2425. uint32_t aligned = count - count % size;
  2426. if (aligned > 0) {
  2427. int retval = target_write_memory(target, address, size, aligned / size, buffer);
  2428. if (retval != ERROR_OK)
  2429. return retval;
  2430. address += aligned;
  2431. count -= aligned;
  2432. buffer += aligned;
  2433. }
  2434. }
  2435. return ERROR_OK;
  2436. }
  2437. static int cortex_a_handle_target_request(void *priv)
  2438. {
  2439. struct target *target = priv;
  2440. struct armv7a_common *armv7a = target_to_armv7a(target);
  2441. int retval;
  2442. if (!target_was_examined(target))
  2443. return ERROR_OK;
  2444. if (!target->dbg_msg_enabled)
  2445. return ERROR_OK;
  2446. if (target->state == TARGET_RUNNING) {
  2447. uint32_t request;
  2448. uint32_t dscr;
  2449. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2450. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2451. /* check if we have data */
  2452. int64_t then = timeval_ms();
  2453. while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
  2454. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2455. armv7a->debug_base + CPUDBG_DTRTX, &request);
  2456. if (retval == ERROR_OK) {
  2457. target_request(target, request);
  2458. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2459. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2460. }
  2461. if (timeval_ms() > then + 1000) {
  2462. LOG_ERROR("Timeout waiting for dtr tx full");
  2463. return ERROR_FAIL;
  2464. }
  2465. }
  2466. }
  2467. return ERROR_OK;
  2468. }
  2469. /*
  2470. * Cortex-A target information and configuration
  2471. */
  2472. static int cortex_a_examine_first(struct target *target)
  2473. {
  2474. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2475. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  2476. struct adiv5_dap *swjdp = armv7a->arm.dap;
  2477. int i;
  2478. int retval = ERROR_OK;
  2479. uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
  2480. /* Search for the APB-AP - it is needed for access to debug registers */
  2481. retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
  2482. if (retval != ERROR_OK) {
  2483. LOG_ERROR("Could not find APB-AP for debug access");
  2484. return retval;
  2485. }
  2486. retval = mem_ap_init(armv7a->debug_ap);
  2487. if (retval != ERROR_OK) {
  2488. LOG_ERROR("Could not initialize the APB-AP");
  2489. return retval;
  2490. }
  2491. armv7a->debug_ap->memaccess_tck = 80;
  2492. if (!target->dbgbase_set) {
  2493. target_addr_t dbgbase;
  2494. /* Get ROM Table base */
  2495. uint32_t apid;
  2496. int32_t coreidx = target->coreid;
  2497. LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
  2498. target->cmd_name);
  2499. retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
  2500. if (retval != ERROR_OK)
  2501. return retval;
  2502. /* Lookup 0x15 -- Processor DAP */
  2503. retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
  2504. &armv7a->debug_base, &coreidx);
  2505. if (retval != ERROR_OK) {
  2506. LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
  2507. target->cmd_name);
  2508. return retval;
  2509. }
  2510. LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
  2511. target->coreid, armv7a->debug_base);
  2512. } else
  2513. armv7a->debug_base = target->dbgbase;
  2514. if ((armv7a->debug_base & (1UL<<31)) == 0)
  2515. LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
  2516. "Please fix the target configuration.", target_name(target));
  2517. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2518. armv7a->debug_base + CPUDBG_DIDR, &didr);
  2519. if (retval != ERROR_OK) {
  2520. LOG_DEBUG("Examine %s failed", "DIDR");
  2521. return retval;
  2522. }
  2523. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2524. armv7a->debug_base + CPUDBG_CPUID, &cpuid);
  2525. if (retval != ERROR_OK) {
  2526. LOG_DEBUG("Examine %s failed", "CPUID");
  2527. return retval;
  2528. }
  2529. LOG_DEBUG("didr = 0x%08" PRIx32, didr);
  2530. LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
  2531. cortex_a->didr = didr;
  2532. cortex_a->cpuid = cpuid;
  2533. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2534. armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
  2535. if (retval != ERROR_OK)
  2536. return retval;
  2537. LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
  2538. if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
  2539. LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
  2540. target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
  2541. return ERROR_TARGET_INIT_FAILED;
  2542. }
  2543. if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
  2544. LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
  2545. /* Read DBGOSLSR and check if OSLK is implemented */
  2546. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2547. armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
  2548. if (retval != ERROR_OK)
  2549. return retval;
  2550. LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
  2551. /* check if OS Lock is implemented */
  2552. if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
  2553. /* check if OS Lock is set */
  2554. if (dbg_osreg & OSLSR_OSLK) {
  2555. LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
  2556. retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
  2557. armv7a->debug_base + CPUDBG_OSLAR,
  2558. 0);
  2559. if (retval == ERROR_OK)
  2560. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2561. armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
  2562. /* if we fail to access the register or cannot reset the OSLK bit, bail out */
  2563. if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
  2564. LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
  2565. target->coreid);
  2566. target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
  2567. return ERROR_TARGET_INIT_FAILED;
  2568. }
  2569. }
  2570. }
  2571. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2572. armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
  2573. if (retval != ERROR_OK)
  2574. return retval;
  2575. if (dbg_idpfr1 & 0x000000f0) {
  2576. LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
  2577. target->coreid);
  2578. armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
  2579. }
  2580. if (dbg_idpfr1 & 0x0000f000) {
  2581. LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
  2582. target->coreid);
  2583. /*
  2584. * overwrite and simplify the checks.
  2585. * virtualization extensions require implementation of security extension
  2586. */
  2587. armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
  2588. }
  2589. /* Avoid recreating the registers cache */
  2590. if (!target_was_examined(target)) {
  2591. retval = cortex_a_dpm_setup(cortex_a, didr);
  2592. if (retval != ERROR_OK)
  2593. return retval;
  2594. }
  2595. /* Setup Breakpoint Register Pairs */
  2596. cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
  2597. cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
  2598. cortex_a->brp_num_available = cortex_a->brp_num;
  2599. free(cortex_a->brp_list);
  2600. cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
  2601. /* cortex_a->brb_enabled = ????; */
  2602. for (i = 0; i < cortex_a->brp_num; i++) {
  2603. cortex_a->brp_list[i].used = false;
  2604. if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
  2605. cortex_a->brp_list[i].type = BRP_NORMAL;
  2606. else
  2607. cortex_a->brp_list[i].type = BRP_CONTEXT;
  2608. cortex_a->brp_list[i].value = 0;
  2609. cortex_a->brp_list[i].control = 0;
  2610. cortex_a->brp_list[i].brpn = i;
  2611. }
  2612. LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
  2613. /* Setup Watchpoint Register Pairs */
  2614. cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
  2615. cortex_a->wrp_num_available = cortex_a->wrp_num;
  2616. free(cortex_a->wrp_list);
  2617. cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
  2618. for (i = 0; i < cortex_a->wrp_num; i++) {
  2619. cortex_a->wrp_list[i].used = false;
  2620. cortex_a->wrp_list[i].value = 0;
  2621. cortex_a->wrp_list[i].control = 0;
  2622. cortex_a->wrp_list[i].wrpn = i;
  2623. }
  2624. LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
  2625. /* select debug_ap as default */
  2626. swjdp->apsel = armv7a->debug_ap->ap_num;
  2627. target_set_examined(target);
  2628. return ERROR_OK;
  2629. }
  2630. static int cortex_a_examine(struct target *target)
  2631. {
  2632. int retval = ERROR_OK;
  2633. /* Reestablish communication after target reset */
  2634. retval = cortex_a_examine_first(target);
  2635. /* Configure core debug access */
  2636. if (retval == ERROR_OK)
  2637. retval = cortex_a_init_debug_access(target);
  2638. return retval;
  2639. }
  2640. /*
  2641. * Cortex-A target creation and initialization
  2642. */
  2643. static int cortex_a_init_target(struct command_context *cmd_ctx,
  2644. struct target *target)
  2645. {
  2646. /* examine_first() does a bunch of this */
  2647. arm_semihosting_init(target);
  2648. return ERROR_OK;
  2649. }
  2650. static int cortex_a_init_arch_info(struct target *target,
  2651. struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
  2652. {
  2653. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  2654. /* Setup struct cortex_a_common */
  2655. cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
  2656. armv7a->arm.dap = dap;
  2657. /* register arch-specific functions */
  2658. armv7a->examine_debug_reason = NULL;
  2659. armv7a->post_debug_entry = cortex_a_post_debug_entry;
  2660. armv7a->pre_restore_context = NULL;
  2661. armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
  2662. /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
  2663. /* REVISIT v7a setup should be in a v7a-specific routine */
  2664. armv7a_init_arch_info(target, armv7a);
  2665. target_register_timer_callback(cortex_a_handle_target_request, 1,
  2666. TARGET_TIMER_TYPE_PERIODIC, target);
  2667. return ERROR_OK;
  2668. }
  2669. static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
  2670. {
  2671. struct cortex_a_common *cortex_a;
  2672. struct adiv5_private_config *pc;
  2673. if (!target->private_config)
  2674. return ERROR_FAIL;
  2675. pc = (struct adiv5_private_config *)target->private_config;
  2676. cortex_a = calloc(1, sizeof(struct cortex_a_common));
  2677. if (!cortex_a) {
  2678. LOG_ERROR("Out of memory");
  2679. return ERROR_FAIL;
  2680. }
  2681. cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
  2682. cortex_a->armv7a_common.is_armv7r = false;
  2683. cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
  2684. return cortex_a_init_arch_info(target, cortex_a, pc->dap);
  2685. }
  2686. static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
  2687. {
  2688. struct cortex_a_common *cortex_a;
  2689. struct adiv5_private_config *pc;
  2690. pc = (struct adiv5_private_config *)target->private_config;
  2691. if (adiv5_verify_config(pc) != ERROR_OK)
  2692. return ERROR_FAIL;
  2693. cortex_a = calloc(1, sizeof(struct cortex_a_common));
  2694. if (!cortex_a) {
  2695. LOG_ERROR("Out of memory");
  2696. return ERROR_FAIL;
  2697. }
  2698. cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
  2699. cortex_a->armv7a_common.is_armv7r = true;
  2700. return cortex_a_init_arch_info(target, cortex_a, pc->dap);
  2701. }
  2702. static void cortex_a_deinit_target(struct target *target)
  2703. {
  2704. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2705. struct armv7a_common *armv7a = &cortex_a->armv7a_common;
  2706. struct arm_dpm *dpm = &armv7a->dpm;
  2707. uint32_t dscr;
  2708. int retval;
  2709. if (target_was_examined(target)) {
  2710. /* Disable halt for breakpoint, watchpoint and vector catch */
  2711. retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
  2712. armv7a->debug_base + CPUDBG_DSCR, &dscr);
  2713. if (retval == ERROR_OK)
  2714. mem_ap_write_atomic_u32(armv7a->debug_ap,
  2715. armv7a->debug_base + CPUDBG_DSCR,
  2716. dscr & ~DSCR_HALT_DBG_MODE);
  2717. }
  2718. free(cortex_a->wrp_list);
  2719. free(cortex_a->brp_list);
  2720. arm_free_reg_cache(dpm->arm);
  2721. free(dpm->dbp);
  2722. free(dpm->dwp);
  2723. free(target->private_config);
  2724. free(cortex_a);
  2725. }
  2726. static int cortex_a_mmu(struct target *target, int *enabled)
  2727. {
  2728. struct armv7a_common *armv7a = target_to_armv7a(target);
  2729. if (target->state != TARGET_HALTED) {
  2730. LOG_ERROR("%s: target not halted", __func__);
  2731. return ERROR_TARGET_INVALID;
  2732. }
  2733. if (armv7a->is_armv7r)
  2734. *enabled = 0;
  2735. else
  2736. *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
  2737. return ERROR_OK;
  2738. }
  2739. static int cortex_a_virt2phys(struct target *target,
  2740. target_addr_t virt, target_addr_t *phys)
  2741. {
  2742. int retval;
  2743. int mmu_enabled = 0;
  2744. /*
  2745. * If the MMU was not enabled at debug entry, there is no
  2746. * way of knowing if there was ever a valid configuration
  2747. * for it and thus it's not safe to enable it. In this case,
  2748. * just return the virtual address as physical.
  2749. */
  2750. cortex_a_mmu(target, &mmu_enabled);
  2751. if (!mmu_enabled) {
  2752. *phys = virt;
  2753. return ERROR_OK;
  2754. }
  2755. /* mmu must be enable in order to get a correct translation */
  2756. retval = cortex_a_mmu_modify(target, 1);
  2757. if (retval != ERROR_OK)
  2758. return retval;
  2759. return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
  2760. phys, 1);
  2761. }
  2762. COMMAND_HANDLER(cortex_a_handle_cache_info_command)
  2763. {
  2764. struct target *target = get_current_target(CMD_CTX);
  2765. struct armv7a_common *armv7a = target_to_armv7a(target);
  2766. return armv7a_handle_cache_info_command(CMD,
  2767. &armv7a->armv7a_mmu.armv7a_cache);
  2768. }
  2769. COMMAND_HANDLER(cortex_a_handle_dbginit_command)
  2770. {
  2771. struct target *target = get_current_target(CMD_CTX);
  2772. if (!target_was_examined(target)) {
  2773. LOG_ERROR("target not examined yet");
  2774. return ERROR_FAIL;
  2775. }
  2776. return cortex_a_init_debug_access(target);
  2777. }
  2778. COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
  2779. {
  2780. struct target *target = get_current_target(CMD_CTX);
  2781. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2782. static const struct jim_nvp nvp_maskisr_modes[] = {
  2783. { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
  2784. { .name = "on", .value = CORTEX_A_ISRMASK_ON },
  2785. { .name = NULL, .value = -1 },
  2786. };
  2787. const struct jim_nvp *n;
  2788. if (CMD_ARGC > 0) {
  2789. n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  2790. if (!n->name) {
  2791. LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
  2792. return ERROR_COMMAND_SYNTAX_ERROR;
  2793. }
  2794. cortex_a->isrmasking_mode = n->value;
  2795. }
  2796. n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
  2797. command_print(CMD, "cortex_a interrupt mask %s", n->name);
  2798. return ERROR_OK;
  2799. }
  2800. COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
  2801. {
  2802. struct target *target = get_current_target(CMD_CTX);
  2803. struct cortex_a_common *cortex_a = target_to_cortex_a(target);
  2804. static const struct jim_nvp nvp_dacrfixup_modes[] = {
  2805. { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
  2806. { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
  2807. { .name = NULL, .value = -1 },
  2808. };
  2809. const struct jim_nvp *n;
  2810. if (CMD_ARGC > 0) {
  2811. n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
  2812. if (!n->name)
  2813. return ERROR_COMMAND_SYNTAX_ERROR;
  2814. cortex_a->dacrfixup_mode = n->value;
  2815. }
  2816. n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
  2817. command_print(CMD, "cortex_a domain access control fixup %s", n->name);
  2818. return ERROR_OK;
  2819. }
  2820. static const struct command_registration cortex_a_exec_command_handlers[] = {
  2821. {
  2822. .name = "cache_info",
  2823. .handler = cortex_a_handle_cache_info_command,
  2824. .mode = COMMAND_EXEC,
  2825. .help = "display information about target caches",
  2826. .usage = "",
  2827. },
  2828. {
  2829. .name = "dbginit",
  2830. .handler = cortex_a_handle_dbginit_command,
  2831. .mode = COMMAND_EXEC,
  2832. .help = "Initialize core debug",
  2833. .usage = "",
  2834. },
  2835. {
  2836. .name = "maskisr",
  2837. .handler = handle_cortex_a_mask_interrupts_command,
  2838. .mode = COMMAND_ANY,
  2839. .help = "mask cortex_a interrupts",
  2840. .usage = "['on'|'off']",
  2841. },
  2842. {
  2843. .name = "dacrfixup",
  2844. .handler = handle_cortex_a_dacrfixup_command,
  2845. .mode = COMMAND_ANY,
  2846. .help = "set domain access control (DACR) to all-manager "
  2847. "on memory access",
  2848. .usage = "['on'|'off']",
  2849. },
  2850. {
  2851. .chain = armv7a_mmu_command_handlers,
  2852. },
  2853. {
  2854. .chain = smp_command_handlers,
  2855. },
  2856. COMMAND_REGISTRATION_DONE
  2857. };
  2858. static const struct command_registration cortex_a_command_handlers[] = {
  2859. {
  2860. .chain = arm_command_handlers,
  2861. },
  2862. {
  2863. .chain = armv7a_command_handlers,
  2864. },
  2865. {
  2866. .name = "cortex_a",
  2867. .mode = COMMAND_ANY,
  2868. .help = "Cortex-A command group",
  2869. .usage = "",
  2870. .chain = cortex_a_exec_command_handlers,
  2871. },
  2872. COMMAND_REGISTRATION_DONE
  2873. };
  2874. struct target_type cortexa_target = {
  2875. .name = "cortex_a",
  2876. .poll = cortex_a_poll,
  2877. .arch_state = armv7a_arch_state,
  2878. .halt = cortex_a_halt,
  2879. .resume = cortex_a_resume,
  2880. .step = cortex_a_step,
  2881. .assert_reset = cortex_a_assert_reset,
  2882. .deassert_reset = cortex_a_deassert_reset,
  2883. /* REVISIT allow exporting VFP3 registers ... */
  2884. .get_gdb_arch = arm_get_gdb_arch,
  2885. .get_gdb_reg_list = arm_get_gdb_reg_list,
  2886. .read_memory = cortex_a_read_memory,
  2887. .write_memory = cortex_a_write_memory,
  2888. .read_buffer = cortex_a_read_buffer,
  2889. .write_buffer = cortex_a_write_buffer,
  2890. .checksum_memory = arm_checksum_memory,
  2891. .blank_check_memory = arm_blank_check_memory,
  2892. .run_algorithm = armv4_5_run_algorithm,
  2893. .add_breakpoint = cortex_a_add_breakpoint,
  2894. .add_context_breakpoint = cortex_a_add_context_breakpoint,
  2895. .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
  2896. .remove_breakpoint = cortex_a_remove_breakpoint,
  2897. .add_watchpoint = cortex_a_add_watchpoint,
  2898. .remove_watchpoint = cortex_a_remove_watchpoint,
  2899. .commands = cortex_a_command_handlers,
  2900. .target_create = cortex_a_target_create,
  2901. .target_jim_configure = adiv5_jim_configure,
  2902. .init_target = cortex_a_init_target,
  2903. .examine = cortex_a_examine,
  2904. .deinit_target = cortex_a_deinit_target,
  2905. .read_phys_memory = cortex_a_read_phys_memory,
  2906. .write_phys_memory = cortex_a_write_phys_memory,
  2907. .mmu = cortex_a_mmu,
  2908. .virt2phys = cortex_a_virt2phys,
  2909. };
  2910. static const struct command_registration cortex_r4_exec_command_handlers[] = {
  2911. {
  2912. .name = "dbginit",
  2913. .handler = cortex_a_handle_dbginit_command,
  2914. .mode = COMMAND_EXEC,
  2915. .help = "Initialize core debug",
  2916. .usage = "",
  2917. },
  2918. {
  2919. .name = "maskisr",
  2920. .handler = handle_cortex_a_mask_interrupts_command,
  2921. .mode = COMMAND_EXEC,
  2922. .help = "mask cortex_r4 interrupts",
  2923. .usage = "['on'|'off']",
  2924. },
  2925. COMMAND_REGISTRATION_DONE
  2926. };
  2927. static const struct command_registration cortex_r4_command_handlers[] = {
  2928. {
  2929. .chain = arm_command_handlers,
  2930. },
  2931. {
  2932. .name = "cortex_r4",
  2933. .mode = COMMAND_ANY,
  2934. .help = "Cortex-R4 command group",
  2935. .usage = "",
  2936. .chain = cortex_r4_exec_command_handlers,
  2937. },
  2938. COMMAND_REGISTRATION_DONE
  2939. };
  2940. struct target_type cortexr4_target = {
  2941. .name = "cortex_r4",
  2942. .poll = cortex_a_poll,
  2943. .arch_state = armv7a_arch_state,
  2944. .halt = cortex_a_halt,
  2945. .resume = cortex_a_resume,
  2946. .step = cortex_a_step,
  2947. .assert_reset = cortex_a_assert_reset,
  2948. .deassert_reset = cortex_a_deassert_reset,
  2949. /* REVISIT allow exporting VFP3 registers ... */
  2950. .get_gdb_arch = arm_get_gdb_arch,
  2951. .get_gdb_reg_list = arm_get_gdb_reg_list,
  2952. .read_memory = cortex_a_read_phys_memory,
  2953. .write_memory = cortex_a_write_phys_memory,
  2954. .checksum_memory = arm_checksum_memory,
  2955. .blank_check_memory = arm_blank_check_memory,
  2956. .run_algorithm = armv4_5_run_algorithm,
  2957. .add_breakpoint = cortex_a_add_breakpoint,
  2958. .add_context_breakpoint = cortex_a_add_context_breakpoint,
  2959. .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
  2960. .remove_breakpoint = cortex_a_remove_breakpoint,
  2961. .add_watchpoint = cortex_a_add_watchpoint,
  2962. .remove_watchpoint = cortex_a_remove_watchpoint,
  2963. .commands = cortex_r4_command_handlers,
  2964. .target_create = cortex_r4_target_create,
  2965. .target_jim_configure = adiv5_jim_configure,
  2966. .init_target = cortex_a_init_target,
  2967. .examine = cortex_a_examine,
  2968. .deinit_target = cortex_a_deinit_target,
  2969. };