You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2693 lines
77 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  23. * *
  24. * *
  25. * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
  26. * *
  27. ***************************************************************************/
  28. #ifdef HAVE_CONFIG_H
  29. #include "config.h"
  30. #endif
  31. #include "jtag/interface.h"
  32. #include "breakpoints.h"
  33. #include "cortex_m.h"
  34. #include "target_request.h"
  35. #include "target_type.h"
  36. #include "arm_disassembler.h"
  37. #include "register.h"
  38. #include "arm_opcodes.h"
  39. #include "arm_semihosting.h"
  40. #include <helper/time_support.h>
  41. /* NOTE: most of this should work fine for the Cortex-M1 and
  42. * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
  43. * Some differences: M0/M1 doesn't have FPB remapping or the
  44. * DWT tracing/profiling support. (So the cycle counter will
  45. * not be usable; the other stuff isn't currently used here.)
  46. *
  47. * Although there are some workarounds for errata seen only in r0p0
  48. * silicon, such old parts are hard to find and thus not much tested
  49. * any longer.
  50. */
  51. /* forward declarations */
  52. static int cortex_m_store_core_reg_u32(struct target *target,
  53. uint32_t num, uint32_t value);
  54. static void cortex_m_dwt_free(struct target *target);
  55. static int cortexm_dap_read_coreregister_u32(struct target *target,
  56. uint32_t *value, int regnum)
  57. {
  58. struct armv7m_common *armv7m = target_to_armv7m(target);
  59. int retval;
  60. uint32_t dcrdr;
  61. /* because the DCB_DCRDR is used for the emulated dcc channel
  62. * we have to save/restore the DCB_DCRDR when used */
  63. if (target->dbg_msg_enabled) {
  64. retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
  65. if (retval != ERROR_OK)
  66. return retval;
  67. }
  68. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regnum);
  69. if (retval != ERROR_OK)
  70. return retval;
  71. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR, value);
  72. if (retval != ERROR_OK)
  73. return retval;
  74. if (target->dbg_msg_enabled) {
  75. /* restore DCB_DCRDR - this needs to be in a separate
  76. * transaction otherwise the emulated DCC channel breaks */
  77. if (retval == ERROR_OK)
  78. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
  79. }
  80. return retval;
  81. }
  82. static int cortexm_dap_write_coreregister_u32(struct target *target,
  83. uint32_t value, int regnum)
  84. {
  85. struct armv7m_common *armv7m = target_to_armv7m(target);
  86. int retval;
  87. uint32_t dcrdr;
  88. /* because the DCB_DCRDR is used for the emulated dcc channel
  89. * we have to save/restore the DCB_DCRDR when used */
  90. if (target->dbg_msg_enabled) {
  91. retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
  92. if (retval != ERROR_OK)
  93. return retval;
  94. }
  95. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
  96. if (retval != ERROR_OK)
  97. return retval;
  98. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRSR, regnum | DCRSR_WnR);
  99. if (retval != ERROR_OK)
  100. return retval;
  101. if (target->dbg_msg_enabled) {
  102. /* restore DCB_DCRDR - this needs to be in a seperate
  103. * transaction otherwise the emulated DCC channel breaks */
  104. if (retval == ERROR_OK)
  105. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
  106. }
  107. return retval;
  108. }
  109. static int cortex_m_write_debug_halt_mask(struct target *target,
  110. uint32_t mask_on, uint32_t mask_off)
  111. {
  112. struct cortex_m_common *cortex_m = target_to_cm(target);
  113. struct armv7m_common *armv7m = &cortex_m->armv7m;
  114. /* mask off status bits */
  115. cortex_m->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
  116. /* create new register mask */
  117. cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
  118. return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
  119. }
  120. static int cortex_m_set_maskints(struct target *target, bool mask)
  121. {
  122. struct cortex_m_common *cortex_m = target_to_cm(target);
  123. if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
  124. return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
  125. else
  126. return ERROR_OK;
  127. }
  128. static int cortex_m_set_maskints_for_halt(struct target *target)
  129. {
  130. struct cortex_m_common *cortex_m = target_to_cm(target);
  131. switch (cortex_m->isrmasking_mode) {
  132. case CORTEX_M_ISRMASK_AUTO:
  133. /* interrupts taken at resume, whether for step or run -> no mask */
  134. return cortex_m_set_maskints(target, false);
  135. case CORTEX_M_ISRMASK_OFF:
  136. /* interrupts never masked */
  137. return cortex_m_set_maskints(target, false);
  138. case CORTEX_M_ISRMASK_ON:
  139. /* interrupts always masked */
  140. return cortex_m_set_maskints(target, true);
  141. case CORTEX_M_ISRMASK_STEPONLY:
  142. /* interrupts masked for single step only -> mask now if MASKINTS
  143. * erratum, otherwise only mask before stepping */
  144. return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
  145. }
  146. return ERROR_OK;
  147. }
  148. static int cortex_m_set_maskints_for_run(struct target *target)
  149. {
  150. switch (target_to_cm(target)->isrmasking_mode) {
  151. case CORTEX_M_ISRMASK_AUTO:
  152. /* interrupts taken at resume, whether for step or run -> no mask */
  153. return cortex_m_set_maskints(target, false);
  154. case CORTEX_M_ISRMASK_OFF:
  155. /* interrupts never masked */
  156. return cortex_m_set_maskints(target, false);
  157. case CORTEX_M_ISRMASK_ON:
  158. /* interrupts always masked */
  159. return cortex_m_set_maskints(target, true);
  160. case CORTEX_M_ISRMASK_STEPONLY:
  161. /* interrupts masked for single step only -> no mask */
  162. return cortex_m_set_maskints(target, false);
  163. }
  164. return ERROR_OK;
  165. }
  166. static int cortex_m_set_maskints_for_step(struct target *target)
  167. {
  168. switch (target_to_cm(target)->isrmasking_mode) {
  169. case CORTEX_M_ISRMASK_AUTO:
  170. /* the auto-interrupt should already be done -> mask */
  171. return cortex_m_set_maskints(target, true);
  172. case CORTEX_M_ISRMASK_OFF:
  173. /* interrupts never masked */
  174. return cortex_m_set_maskints(target, false);
  175. case CORTEX_M_ISRMASK_ON:
  176. /* interrupts always masked */
  177. return cortex_m_set_maskints(target, true);
  178. case CORTEX_M_ISRMASK_STEPONLY:
  179. /* interrupts masked for single step only -> mask */
  180. return cortex_m_set_maskints(target, true);
  181. }
  182. return ERROR_OK;
  183. }
  184. static int cortex_m_clear_halt(struct target *target)
  185. {
  186. struct cortex_m_common *cortex_m = target_to_cm(target);
  187. struct armv7m_common *armv7m = &cortex_m->armv7m;
  188. int retval;
  189. /* clear step if any */
  190. cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
  191. /* Read Debug Fault Status Register */
  192. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
  193. if (retval != ERROR_OK)
  194. return retval;
  195. /* Clear Debug Fault Status */
  196. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
  197. if (retval != ERROR_OK)
  198. return retval;
  199. LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
  200. return ERROR_OK;
  201. }
  202. static int cortex_m_single_step_core(struct target *target)
  203. {
  204. struct cortex_m_common *cortex_m = target_to_cm(target);
  205. struct armv7m_common *armv7m = &cortex_m->armv7m;
  206. int retval;
  207. /* Mask interrupts before clearing halt, if not done already. This avoids
  208. * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
  209. * HALT can put the core into an unknown state.
  210. */
  211. if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
  212. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
  213. DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
  214. if (retval != ERROR_OK)
  215. return retval;
  216. }
  217. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
  218. DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
  219. if (retval != ERROR_OK)
  220. return retval;
  221. LOG_DEBUG(" ");
  222. /* restore dhcsr reg */
  223. cortex_m_clear_halt(target);
  224. return ERROR_OK;
  225. }
  226. static int cortex_m_enable_fpb(struct target *target)
  227. {
  228. int retval = target_write_u32(target, FP_CTRL, 3);
  229. if (retval != ERROR_OK)
  230. return retval;
  231. /* check the fpb is actually enabled */
  232. uint32_t fpctrl;
  233. retval = target_read_u32(target, FP_CTRL, &fpctrl);
  234. if (retval != ERROR_OK)
  235. return retval;
  236. if (fpctrl & 1)
  237. return ERROR_OK;
  238. return ERROR_FAIL;
  239. }
  240. static int cortex_m_endreset_event(struct target *target)
  241. {
  242. int i;
  243. int retval;
  244. uint32_t dcb_demcr;
  245. struct cortex_m_common *cortex_m = target_to_cm(target);
  246. struct armv7m_common *armv7m = &cortex_m->armv7m;
  247. struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
  248. struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
  249. struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
  250. /* REVISIT The four debug monitor bits are currently ignored... */
  251. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
  252. if (retval != ERROR_OK)
  253. return retval;
  254. LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
  255. /* this register is used for emulated dcc channel */
  256. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
  257. if (retval != ERROR_OK)
  258. return retval;
  259. /* Enable debug requests */
  260. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  261. if (retval != ERROR_OK)
  262. return retval;
  263. if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
  264. retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
  265. if (retval != ERROR_OK)
  266. return retval;
  267. }
  268. /* Restore proper interrupt masking setting for running CPU. */
  269. cortex_m_set_maskints_for_run(target);
  270. /* Enable features controlled by ITM and DWT blocks, and catch only
  271. * the vectors we were told to pay attention to.
  272. *
  273. * Target firmware is responsible for all fault handling policy
  274. * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
  275. * or manual updates to the NVIC SHCSR and CCR registers.
  276. */
  277. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
  278. if (retval != ERROR_OK)
  279. return retval;
  280. /* Paranoia: evidently some (early?) chips don't preserve all the
  281. * debug state (including FPB, DWT, etc) across reset...
  282. */
  283. /* Enable FPB */
  284. retval = cortex_m_enable_fpb(target);
  285. if (retval != ERROR_OK) {
  286. LOG_ERROR("Failed to enable the FPB");
  287. return retval;
  288. }
  289. cortex_m->fpb_enabled = true;
  290. /* Restore FPB registers */
  291. for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
  292. retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
  293. if (retval != ERROR_OK)
  294. return retval;
  295. }
  296. /* Restore DWT registers */
  297. for (i = 0; i < cortex_m->dwt_num_comp; i++) {
  298. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
  299. dwt_list[i].comp);
  300. if (retval != ERROR_OK)
  301. return retval;
  302. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
  303. dwt_list[i].mask);
  304. if (retval != ERROR_OK)
  305. return retval;
  306. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
  307. dwt_list[i].function);
  308. if (retval != ERROR_OK)
  309. return retval;
  310. }
  311. retval = dap_run(swjdp);
  312. if (retval != ERROR_OK)
  313. return retval;
  314. register_cache_invalidate(armv7m->arm.core_cache);
  315. /* make sure we have latest dhcsr flags */
  316. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  317. return retval;
  318. }
  319. static int cortex_m_examine_debug_reason(struct target *target)
  320. {
  321. struct cortex_m_common *cortex_m = target_to_cm(target);
  322. /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
  323. * only check the debug reason if we don't know it already */
  324. if ((target->debug_reason != DBG_REASON_DBGRQ)
  325. && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
  326. if (cortex_m->nvic_dfsr & DFSR_BKPT) {
  327. target->debug_reason = DBG_REASON_BREAKPOINT;
  328. if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
  329. target->debug_reason = DBG_REASON_WPTANDBKPT;
  330. } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
  331. target->debug_reason = DBG_REASON_WATCHPOINT;
  332. else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
  333. target->debug_reason = DBG_REASON_BREAKPOINT;
  334. else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
  335. target->debug_reason = DBG_REASON_DBGRQ;
  336. else /* HALTED */
  337. target->debug_reason = DBG_REASON_UNDEFINED;
  338. }
  339. return ERROR_OK;
  340. }
  341. static int cortex_m_examine_exception_reason(struct target *target)
  342. {
  343. uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
  344. struct armv7m_common *armv7m = target_to_armv7m(target);
  345. struct adiv5_dap *swjdp = armv7m->arm.dap;
  346. int retval;
  347. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
  348. if (retval != ERROR_OK)
  349. return retval;
  350. switch (armv7m->exception_number) {
  351. case 2: /* NMI */
  352. break;
  353. case 3: /* Hard Fault */
  354. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
  355. if (retval != ERROR_OK)
  356. return retval;
  357. if (except_sr & 0x40000000) {
  358. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
  359. if (retval != ERROR_OK)
  360. return retval;
  361. }
  362. break;
  363. case 4: /* Memory Management */
  364. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  365. if (retval != ERROR_OK)
  366. return retval;
  367. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
  368. if (retval != ERROR_OK)
  369. return retval;
  370. break;
  371. case 5: /* Bus Fault */
  372. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  373. if (retval != ERROR_OK)
  374. return retval;
  375. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
  376. if (retval != ERROR_OK)
  377. return retval;
  378. break;
  379. case 6: /* Usage Fault */
  380. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  381. if (retval != ERROR_OK)
  382. return retval;
  383. break;
  384. case 11: /* SVCall */
  385. break;
  386. case 12: /* Debug Monitor */
  387. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
  388. if (retval != ERROR_OK)
  389. return retval;
  390. break;
  391. case 14: /* PendSV */
  392. break;
  393. case 15: /* SysTick */
  394. break;
  395. default:
  396. except_sr = 0;
  397. break;
  398. }
  399. retval = dap_run(swjdp);
  400. if (retval == ERROR_OK)
  401. LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
  402. ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
  403. armv7m_exception_string(armv7m->exception_number),
  404. shcsr, except_sr, cfsr, except_ar);
  405. return retval;
  406. }
  407. static int cortex_m_debug_entry(struct target *target)
  408. {
  409. int i;
  410. uint32_t xPSR;
  411. int retval;
  412. struct cortex_m_common *cortex_m = target_to_cm(target);
  413. struct armv7m_common *armv7m = &cortex_m->armv7m;
  414. struct arm *arm = &armv7m->arm;
  415. struct reg *r;
  416. LOG_DEBUG(" ");
  417. /* Do this really early to minimize the window where the MASKINTS erratum
  418. * can pile up pending interrupts. */
  419. cortex_m_set_maskints_for_halt(target);
  420. cortex_m_clear_halt(target);
  421. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  422. if (retval != ERROR_OK)
  423. return retval;
  424. retval = armv7m->examine_debug_reason(target);
  425. if (retval != ERROR_OK)
  426. return retval;
  427. /* Examine target state and mode
  428. * First load register accessible through core debug port */
  429. int num_regs = arm->core_cache->num_regs;
  430. for (i = 0; i < num_regs; i++) {
  431. r = &armv7m->arm.core_cache->reg_list[i];
  432. if (!r->valid)
  433. arm->read_core_reg(target, r, i, ARM_MODE_ANY);
  434. }
  435. r = arm->cpsr;
  436. xPSR = buf_get_u32(r->value, 0, 32);
  437. /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
  438. if (xPSR & 0xf00) {
  439. r->dirty = r->valid;
  440. cortex_m_store_core_reg_u32(target, 16, xPSR & ~0xff);
  441. }
  442. /* Are we in an exception handler */
  443. if (xPSR & 0x1FF) {
  444. armv7m->exception_number = (xPSR & 0x1FF);
  445. arm->core_mode = ARM_MODE_HANDLER;
  446. arm->map = armv7m_msp_reg_map;
  447. } else {
  448. unsigned control = buf_get_u32(arm->core_cache
  449. ->reg_list[ARMV7M_CONTROL].value, 0, 2);
  450. /* is this thread privileged? */
  451. arm->core_mode = control & 1
  452. ? ARM_MODE_USER_THREAD
  453. : ARM_MODE_THREAD;
  454. /* which stack is it using? */
  455. if (control & 2)
  456. arm->map = armv7m_psp_reg_map;
  457. else
  458. arm->map = armv7m_msp_reg_map;
  459. armv7m->exception_number = 0;
  460. }
  461. if (armv7m->exception_number)
  462. cortex_m_examine_exception_reason(target);
  463. LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
  464. arm_mode_name(arm->core_mode),
  465. buf_get_u32(arm->pc->value, 0, 32),
  466. target_state_name(target));
  467. if (armv7m->post_debug_entry) {
  468. retval = armv7m->post_debug_entry(target);
  469. if (retval != ERROR_OK)
  470. return retval;
  471. }
  472. return ERROR_OK;
  473. }
  474. static int cortex_m_poll(struct target *target)
  475. {
  476. int detected_failure = ERROR_OK;
  477. int retval = ERROR_OK;
  478. enum target_state prev_target_state = target->state;
  479. struct cortex_m_common *cortex_m = target_to_cm(target);
  480. struct armv7m_common *armv7m = &cortex_m->armv7m;
  481. /* Read from Debug Halting Control and Status Register */
  482. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  483. if (retval != ERROR_OK) {
  484. target->state = TARGET_UNKNOWN;
  485. return retval;
  486. }
  487. /* Recover from lockup. See ARMv7-M architecture spec,
  488. * section B1.5.15 "Unrecoverable exception cases".
  489. */
  490. if (cortex_m->dcb_dhcsr & S_LOCKUP) {
  491. LOG_ERROR("%s -- clearing lockup after double fault",
  492. target_name(target));
  493. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  494. target->debug_reason = DBG_REASON_DBGRQ;
  495. /* We have to execute the rest (the "finally" equivalent, but
  496. * still throw this exception again).
  497. */
  498. detected_failure = ERROR_FAIL;
  499. /* refresh status bits */
  500. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  501. if (retval != ERROR_OK)
  502. return retval;
  503. }
  504. if (cortex_m->dcb_dhcsr & S_RESET_ST) {
  505. if (target->state != TARGET_RESET) {
  506. target->state = TARGET_RESET;
  507. LOG_INFO("%s: external reset detected", target_name(target));
  508. }
  509. return ERROR_OK;
  510. }
  511. if (target->state == TARGET_RESET) {
  512. /* Cannot switch context while running so endreset is
  513. * called with target->state == TARGET_RESET
  514. */
  515. LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
  516. cortex_m->dcb_dhcsr);
  517. retval = cortex_m_endreset_event(target);
  518. if (retval != ERROR_OK) {
  519. target->state = TARGET_UNKNOWN;
  520. return retval;
  521. }
  522. target->state = TARGET_RUNNING;
  523. prev_target_state = TARGET_RUNNING;
  524. }
  525. if (cortex_m->dcb_dhcsr & S_HALT) {
  526. target->state = TARGET_HALTED;
  527. if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
  528. retval = cortex_m_debug_entry(target);
  529. if (retval != ERROR_OK)
  530. return retval;
  531. if (arm_semihosting(target, &retval) != 0)
  532. return retval;
  533. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  534. }
  535. if (prev_target_state == TARGET_DEBUG_RUNNING) {
  536. LOG_DEBUG(" ");
  537. retval = cortex_m_debug_entry(target);
  538. if (retval != ERROR_OK)
  539. return retval;
  540. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  541. }
  542. }
  543. /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
  544. * How best to model low power modes?
  545. */
  546. if (target->state == TARGET_UNKNOWN) {
  547. /* check if processor is retiring instructions */
  548. if (cortex_m->dcb_dhcsr & S_RETIRE_ST) {
  549. target->state = TARGET_RUNNING;
  550. retval = ERROR_OK;
  551. }
  552. }
  553. /* Check that target is truly halted, since the target could be resumed externally */
  554. if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
  555. /* registers are now invalid */
  556. register_cache_invalidate(armv7m->arm.core_cache);
  557. target->state = TARGET_RUNNING;
  558. LOG_WARNING("%s: external resume detected", target_name(target));
  559. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  560. retval = ERROR_OK;
  561. }
  562. /* Did we detect a failure condition that we cleared? */
  563. if (detected_failure != ERROR_OK)
  564. retval = detected_failure;
  565. return retval;
  566. }
  567. static int cortex_m_halt(struct target *target)
  568. {
  569. LOG_DEBUG("target->state: %s",
  570. target_state_name(target));
  571. if (target->state == TARGET_HALTED) {
  572. LOG_DEBUG("target was already halted");
  573. return ERROR_OK;
  574. }
  575. if (target->state == TARGET_UNKNOWN)
  576. LOG_WARNING("target was in unknown state when halt was requested");
  577. if (target->state == TARGET_RESET) {
  578. if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
  579. LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  580. return ERROR_TARGET_FAILURE;
  581. } else {
  582. /* we came here in a reset_halt or reset_init sequence
  583. * debug entry was already prepared in cortex_m3_assert_reset()
  584. */
  585. target->debug_reason = DBG_REASON_DBGRQ;
  586. return ERROR_OK;
  587. }
  588. }
  589. /* Write to Debug Halting Control and Status Register */
  590. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  591. /* Do this really early to minimize the window where the MASKINTS erratum
  592. * can pile up pending interrupts. */
  593. cortex_m_set_maskints_for_halt(target);
  594. target->debug_reason = DBG_REASON_DBGRQ;
  595. return ERROR_OK;
  596. }
  597. static int cortex_m_soft_reset_halt(struct target *target)
  598. {
  599. struct cortex_m_common *cortex_m = target_to_cm(target);
  600. struct armv7m_common *armv7m = &cortex_m->armv7m;
  601. uint32_t dcb_dhcsr = 0;
  602. int retval, timeout = 0;
  603. /* soft_reset_halt is deprecated on cortex_m as the same functionality
  604. * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'
  605. * As this reset only used VC_CORERESET it would only ever reset the cortex_m
  606. * core, not the peripherals */
  607. LOG_WARNING("soft_reset_halt is deprecated, please use 'reset halt' instead.");
  608. /* Set C_DEBUGEN */
  609. retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
  610. if (retval != ERROR_OK)
  611. return retval;
  612. /* Enter debug state on reset; restore DEMCR in endreset_event() */
  613. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
  614. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  615. if (retval != ERROR_OK)
  616. return retval;
  617. /* Request a core-only reset */
  618. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
  619. AIRCR_VECTKEY | AIRCR_VECTRESET);
  620. if (retval != ERROR_OK)
  621. return retval;
  622. target->state = TARGET_RESET;
  623. /* registers are now invalid */
  624. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  625. while (timeout < 100) {
  626. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &dcb_dhcsr);
  627. if (retval == ERROR_OK) {
  628. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
  629. &cortex_m->nvic_dfsr);
  630. if (retval != ERROR_OK)
  631. return retval;
  632. if ((dcb_dhcsr & S_HALT)
  633. && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
  634. LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
  635. "DFSR 0x%08x",
  636. (unsigned) dcb_dhcsr,
  637. (unsigned) cortex_m->nvic_dfsr);
  638. cortex_m_poll(target);
  639. /* FIXME restore user's vector catch config */
  640. return ERROR_OK;
  641. } else
  642. LOG_DEBUG("waiting for system reset-halt, "
  643. "DHCSR 0x%08x, %d ms",
  644. (unsigned) dcb_dhcsr, timeout);
  645. }
  646. timeout++;
  647. alive_sleep(1);
  648. }
  649. return ERROR_OK;
  650. }
  651. void cortex_m_enable_breakpoints(struct target *target)
  652. {
  653. struct breakpoint *breakpoint = target->breakpoints;
  654. /* set any pending breakpoints */
  655. while (breakpoint) {
  656. if (!breakpoint->set)
  657. cortex_m_set_breakpoint(target, breakpoint);
  658. breakpoint = breakpoint->next;
  659. }
  660. }
  661. static int cortex_m_resume(struct target *target, int current,
  662. target_addr_t address, int handle_breakpoints, int debug_execution)
  663. {
  664. struct armv7m_common *armv7m = target_to_armv7m(target);
  665. struct breakpoint *breakpoint = NULL;
  666. uint32_t resume_pc;
  667. struct reg *r;
  668. if (target->state != TARGET_HALTED) {
  669. LOG_WARNING("target not halted");
  670. return ERROR_TARGET_NOT_HALTED;
  671. }
  672. if (!debug_execution) {
  673. target_free_all_working_areas(target);
  674. cortex_m_enable_breakpoints(target);
  675. cortex_m_enable_watchpoints(target);
  676. }
  677. if (debug_execution) {
  678. r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
  679. /* Disable interrupts */
  680. /* We disable interrupts in the PRIMASK register instead of
  681. * masking with C_MASKINTS. This is probably the same issue
  682. * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
  683. * in parallel with disabled interrupts can cause local faults
  684. * to not be taken.
  685. *
  686. * REVISIT this clearly breaks non-debug execution, since the
  687. * PRIMASK register state isn't saved/restored... workaround
  688. * by never resuming app code after debug execution.
  689. */
  690. buf_set_u32(r->value, 0, 1, 1);
  691. r->dirty = true;
  692. r->valid = true;
  693. /* Make sure we are in Thumb mode */
  694. r = armv7m->arm.cpsr;
  695. buf_set_u32(r->value, 24, 1, 1);
  696. r->dirty = true;
  697. r->valid = true;
  698. }
  699. /* current = 1: continue on current pc, otherwise continue at <address> */
  700. r = armv7m->arm.pc;
  701. if (!current) {
  702. buf_set_u32(r->value, 0, 32, address);
  703. r->dirty = true;
  704. r->valid = true;
  705. }
  706. /* if we halted last time due to a bkpt instruction
  707. * then we have to manually step over it, otherwise
  708. * the core will break again */
  709. if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
  710. && !debug_execution)
  711. armv7m_maybe_skip_bkpt_inst(target, NULL);
  712. resume_pc = buf_get_u32(r->value, 0, 32);
  713. armv7m_restore_context(target);
  714. /* the front-end may request us not to handle breakpoints */
  715. if (handle_breakpoints) {
  716. /* Single step past breakpoint at current address */
  717. breakpoint = breakpoint_find(target, resume_pc);
  718. if (breakpoint) {
  719. LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
  720. breakpoint->address,
  721. breakpoint->unique_id);
  722. cortex_m_unset_breakpoint(target, breakpoint);
  723. cortex_m_single_step_core(target);
  724. cortex_m_set_breakpoint(target, breakpoint);
  725. }
  726. }
  727. /* Restart core */
  728. cortex_m_set_maskints_for_run(target);
  729. cortex_m_write_debug_halt_mask(target, 0, C_HALT);
  730. target->debug_reason = DBG_REASON_NOTHALTED;
  731. /* registers are now invalid */
  732. register_cache_invalidate(armv7m->arm.core_cache);
  733. if (!debug_execution) {
  734. target->state = TARGET_RUNNING;
  735. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  736. LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
  737. } else {
  738. target->state = TARGET_DEBUG_RUNNING;
  739. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  740. LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
  741. }
  742. return ERROR_OK;
  743. }
  744. /* int irqstepcount = 0; */
  745. static int cortex_m_step(struct target *target, int current,
  746. target_addr_t address, int handle_breakpoints)
  747. {
  748. struct cortex_m_common *cortex_m = target_to_cm(target);
  749. struct armv7m_common *armv7m = &cortex_m->armv7m;
  750. struct breakpoint *breakpoint = NULL;
  751. struct reg *pc = armv7m->arm.pc;
  752. bool bkpt_inst_found = false;
  753. int retval;
  754. bool isr_timed_out = false;
  755. if (target->state != TARGET_HALTED) {
  756. LOG_WARNING("target not halted");
  757. return ERROR_TARGET_NOT_HALTED;
  758. }
  759. /* current = 1: continue on current pc, otherwise continue at <address> */
  760. if (!current)
  761. buf_set_u32(pc->value, 0, 32, address);
  762. uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
  763. /* the front-end may request us not to handle breakpoints */
  764. if (handle_breakpoints) {
  765. breakpoint = breakpoint_find(target, pc_value);
  766. if (breakpoint)
  767. cortex_m_unset_breakpoint(target, breakpoint);
  768. }
  769. armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
  770. target->debug_reason = DBG_REASON_SINGLESTEP;
  771. armv7m_restore_context(target);
  772. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  773. /* if no bkpt instruction is found at pc then we can perform
  774. * a normal step, otherwise we have to manually step over the bkpt
  775. * instruction - as such simulate a step */
  776. if (bkpt_inst_found == false) {
  777. if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
  778. /* Automatic ISR masking mode off: Just step over the next
  779. * instruction, with interrupts on or off as appropriate. */
  780. cortex_m_set_maskints_for_step(target);
  781. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  782. } else {
  783. /* Process interrupts during stepping in a way they don't interfere
  784. * debugging.
  785. *
  786. * Principle:
  787. *
  788. * Set a temporary break point at the current pc and let the core run
  789. * with interrupts enabled. Pending interrupts get served and we run
  790. * into the breakpoint again afterwards. Then we step over the next
  791. * instruction with interrupts disabled.
  792. *
  793. * If the pending interrupts don't complete within time, we leave the
  794. * core running. This may happen if the interrupts trigger faster
  795. * than the core can process them or the handler doesn't return.
  796. *
  797. * If no more breakpoints are available we simply do a step with
  798. * interrupts enabled.
  799. *
  800. */
  801. /* 2012-09-29 ph
  802. *
  803. * If a break point is already set on the lower half word then a break point on
  804. * the upper half word will not break again when the core is restarted. So we
  805. * just step over the instruction with interrupts disabled.
  806. *
  807. * The documentation has no information about this, it was found by observation
  808. * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
  809. * suffer from this problem.
  810. *
  811. * To add some confusion: pc_value has bit 0 always set, while the breakpoint
  812. * address has it always cleared. The former is done to indicate thumb mode
  813. * to gdb.
  814. *
  815. */
  816. if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
  817. LOG_DEBUG("Stepping over next instruction with interrupts disabled");
  818. cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
  819. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  820. /* Re-enable interrupts if appropriate */
  821. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  822. cortex_m_set_maskints_for_halt(target);
  823. }
  824. else {
  825. /* Set a temporary break point */
  826. if (breakpoint) {
  827. retval = cortex_m_set_breakpoint(target, breakpoint);
  828. } else {
  829. enum breakpoint_type type = BKPT_HARD;
  830. if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
  831. /* FPB rev.1 cannot handle such addr, try BKPT instr */
  832. type = BKPT_SOFT;
  833. }
  834. retval = breakpoint_add(target, pc_value, 2, type);
  835. }
  836. bool tmp_bp_set = (retval == ERROR_OK);
  837. /* No more breakpoints left, just do a step */
  838. if (!tmp_bp_set) {
  839. cortex_m_set_maskints_for_step(target);
  840. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  841. /* Re-enable interrupts if appropriate */
  842. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  843. cortex_m_set_maskints_for_halt(target);
  844. } else {
  845. /* Start the core */
  846. LOG_DEBUG("Starting core to serve pending interrupts");
  847. int64_t t_start = timeval_ms();
  848. cortex_m_set_maskints_for_run(target);
  849. cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
  850. /* Wait for pending handlers to complete or timeout */
  851. do {
  852. retval = mem_ap_read_atomic_u32(armv7m->debug_ap,
  853. DCB_DHCSR,
  854. &cortex_m->dcb_dhcsr);
  855. if (retval != ERROR_OK) {
  856. target->state = TARGET_UNKNOWN;
  857. return retval;
  858. }
  859. isr_timed_out = ((timeval_ms() - t_start) > 500);
  860. } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
  861. /* only remove breakpoint if we created it */
  862. if (breakpoint)
  863. cortex_m_unset_breakpoint(target, breakpoint);
  864. else {
  865. /* Remove the temporary breakpoint */
  866. breakpoint_remove(target, pc_value);
  867. }
  868. if (isr_timed_out) {
  869. LOG_DEBUG("Interrupt handlers didn't complete within time, "
  870. "leaving target running");
  871. } else {
  872. /* Step over next instruction with interrupts disabled */
  873. cortex_m_set_maskints_for_step(target);
  874. cortex_m_write_debug_halt_mask(target,
  875. C_HALT | C_MASKINTS,
  876. 0);
  877. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  878. /* Re-enable interrupts if appropriate */
  879. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  880. cortex_m_set_maskints_for_halt(target);
  881. }
  882. }
  883. }
  884. }
  885. }
  886. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  887. if (retval != ERROR_OK)
  888. return retval;
  889. /* registers are now invalid */
  890. register_cache_invalidate(armv7m->arm.core_cache);
  891. if (breakpoint)
  892. cortex_m_set_breakpoint(target, breakpoint);
  893. if (isr_timed_out) {
  894. /* Leave the core running. The user has to stop execution manually. */
  895. target->debug_reason = DBG_REASON_NOTHALTED;
  896. target->state = TARGET_RUNNING;
  897. return ERROR_OK;
  898. }
  899. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  900. " nvic_icsr = 0x%" PRIx32,
  901. cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
  902. retval = cortex_m_debug_entry(target);
  903. if (retval != ERROR_OK)
  904. return retval;
  905. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  906. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  907. " nvic_icsr = 0x%" PRIx32,
  908. cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
  909. return ERROR_OK;
  910. }
  911. static int cortex_m_assert_reset(struct target *target)
  912. {
  913. struct cortex_m_common *cortex_m = target_to_cm(target);
  914. struct armv7m_common *armv7m = &cortex_m->armv7m;
  915. enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
  916. LOG_DEBUG("target->state: %s",
  917. target_state_name(target));
  918. enum reset_types jtag_reset_config = jtag_get_reset_config();
  919. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  920. /* allow scripts to override the reset event */
  921. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  922. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  923. target->state = TARGET_RESET;
  924. return ERROR_OK;
  925. }
  926. /* some cores support connecting while srst is asserted
  927. * use that mode is it has been configured */
  928. bool srst_asserted = false;
  929. if (!target_was_examined(target)) {
  930. if (jtag_reset_config & RESET_HAS_SRST) {
  931. adapter_assert_reset();
  932. if (target->reset_halt)
  933. LOG_ERROR("Target not examined, will not halt after reset!");
  934. return ERROR_OK;
  935. } else {
  936. LOG_ERROR("Target not examined, reset NOT asserted!");
  937. return ERROR_FAIL;
  938. }
  939. }
  940. if ((jtag_reset_config & RESET_HAS_SRST) &&
  941. (jtag_reset_config & RESET_SRST_NO_GATING)) {
  942. adapter_assert_reset();
  943. srst_asserted = true;
  944. }
  945. /* Enable debug requests */
  946. int retval;
  947. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  948. /* Store important errors instead of failing and proceed to reset assert */
  949. if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
  950. retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
  951. /* If the processor is sleeping in a WFI or WFE instruction, the
  952. * C_HALT bit must be asserted to regain control */
  953. if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
  954. retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  955. mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
  956. /* Ignore less important errors */
  957. if (!target->reset_halt) {
  958. /* Set/Clear C_MASKINTS in a separate operation */
  959. cortex_m_set_maskints_for_run(target);
  960. /* clear any debug flags before resuming */
  961. cortex_m_clear_halt(target);
  962. /* clear C_HALT in dhcsr reg */
  963. cortex_m_write_debug_halt_mask(target, 0, C_HALT);
  964. } else {
  965. /* Halt in debug on reset; endreset_event() restores DEMCR.
  966. *
  967. * REVISIT catching BUSERR presumably helps to defend against
  968. * bad vector table entries. Should this include MMERR or
  969. * other flags too?
  970. */
  971. int retval2;
  972. retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
  973. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  974. if (retval != ERROR_OK || retval2 != ERROR_OK)
  975. LOG_INFO("AP write error, reset will not halt");
  976. }
  977. if (jtag_reset_config & RESET_HAS_SRST) {
  978. /* default to asserting srst */
  979. if (!srst_asserted)
  980. adapter_assert_reset();
  981. /* srst is asserted, ignore AP access errors */
  982. retval = ERROR_OK;
  983. } else {
  984. /* Use a standard Cortex-M3 software reset mechanism.
  985. * We default to using VECRESET as it is supported on all current cores
  986. * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
  987. * This has the disadvantage of not resetting the peripherals, so a
  988. * reset-init event handler is needed to perform any peripheral resets.
  989. */
  990. if (!cortex_m->vectreset_supported
  991. && reset_config == CORTEX_M_RESET_VECTRESET) {
  992. reset_config = CORTEX_M_RESET_SYSRESETREQ;
  993. LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
  994. LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
  995. }
  996. LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
  997. ? "SYSRESETREQ" : "VECTRESET");
  998. if (reset_config == CORTEX_M_RESET_VECTRESET) {
  999. LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
  1000. "handler to reset any peripherals or configure hardware srst support.");
  1001. }
  1002. int retval3;
  1003. retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
  1004. AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
  1005. ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
  1006. if (retval3 != ERROR_OK)
  1007. LOG_DEBUG("Ignoring AP write error right after reset");
  1008. retval3 = dap_dp_init(armv7m->debug_ap->dap);
  1009. if (retval3 != ERROR_OK)
  1010. LOG_ERROR("DP initialisation failed");
  1011. else {
  1012. /* I do not know why this is necessary, but it
  1013. * fixes strange effects (step/resume cause NMI
  1014. * after reset) on LM3S6918 -- Michael Schwingen
  1015. */
  1016. uint32_t tmp;
  1017. mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
  1018. }
  1019. }
  1020. target->state = TARGET_RESET;
  1021. jtag_sleep(50000);
  1022. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  1023. /* now return stored error code if any */
  1024. if (retval != ERROR_OK)
  1025. return retval;
  1026. if (target->reset_halt) {
  1027. retval = target_halt(target);
  1028. if (retval != ERROR_OK)
  1029. return retval;
  1030. }
  1031. return ERROR_OK;
  1032. }
  1033. static int cortex_m_deassert_reset(struct target *target)
  1034. {
  1035. struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
  1036. LOG_DEBUG("target->state: %s",
  1037. target_state_name(target));
  1038. /* deassert reset lines */
  1039. adapter_deassert_reset();
  1040. enum reset_types jtag_reset_config = jtag_get_reset_config();
  1041. if ((jtag_reset_config & RESET_HAS_SRST) &&
  1042. !(jtag_reset_config & RESET_SRST_NO_GATING) &&
  1043. target_was_examined(target)) {
  1044. int retval = dap_dp_init(armv7m->debug_ap->dap);
  1045. if (retval != ERROR_OK) {
  1046. LOG_ERROR("DP initialisation failed");
  1047. return retval;
  1048. }
  1049. }
  1050. return ERROR_OK;
  1051. }
  1052. int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1053. {
  1054. int retval;
  1055. int fp_num = 0;
  1056. struct cortex_m_common *cortex_m = target_to_cm(target);
  1057. struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
  1058. if (breakpoint->set) {
  1059. LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
  1060. return ERROR_OK;
  1061. }
  1062. if (breakpoint->type == BKPT_HARD) {
  1063. uint32_t fpcr_value;
  1064. while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
  1065. fp_num++;
  1066. if (fp_num >= cortex_m->fp_num_code) {
  1067. LOG_ERROR("Can not find free FPB Comparator!");
  1068. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1069. }
  1070. breakpoint->set = fp_num + 1;
  1071. fpcr_value = breakpoint->address | 1;
  1072. if (cortex_m->fp_rev == 0) {
  1073. if (breakpoint->address > 0x1FFFFFFF) {
  1074. LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
  1075. return ERROR_FAIL;
  1076. }
  1077. uint32_t hilo;
  1078. hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
  1079. fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
  1080. } else if (cortex_m->fp_rev > 1) {
  1081. LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
  1082. return ERROR_FAIL;
  1083. }
  1084. comparator_list[fp_num].used = true;
  1085. comparator_list[fp_num].fpcr_value = fpcr_value;
  1086. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  1087. comparator_list[fp_num].fpcr_value);
  1088. LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
  1089. fp_num,
  1090. comparator_list[fp_num].fpcr_value);
  1091. if (!cortex_m->fpb_enabled) {
  1092. LOG_DEBUG("FPB wasn't enabled, do it now");
  1093. retval = cortex_m_enable_fpb(target);
  1094. if (retval != ERROR_OK) {
  1095. LOG_ERROR("Failed to enable the FPB");
  1096. return retval;
  1097. }
  1098. cortex_m->fpb_enabled = true;
  1099. }
  1100. } else if (breakpoint->type == BKPT_SOFT) {
  1101. uint8_t code[4];
  1102. /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
  1103. * semihosting; don't use that. Otherwise the BKPT
  1104. * parameter is arbitrary.
  1105. */
  1106. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1107. retval = target_read_memory(target,
  1108. breakpoint->address & 0xFFFFFFFE,
  1109. breakpoint->length, 1,
  1110. breakpoint->orig_instr);
  1111. if (retval != ERROR_OK)
  1112. return retval;
  1113. retval = target_write_memory(target,
  1114. breakpoint->address & 0xFFFFFFFE,
  1115. breakpoint->length, 1,
  1116. code);
  1117. if (retval != ERROR_OK)
  1118. return retval;
  1119. breakpoint->set = true;
  1120. }
  1121. LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
  1122. breakpoint->unique_id,
  1123. (int)(breakpoint->type),
  1124. breakpoint->address,
  1125. breakpoint->length,
  1126. breakpoint->set);
  1127. return ERROR_OK;
  1128. }
  1129. int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1130. {
  1131. int retval;
  1132. struct cortex_m_common *cortex_m = target_to_cm(target);
  1133. struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
  1134. if (!breakpoint->set) {
  1135. LOG_WARNING("breakpoint not set");
  1136. return ERROR_OK;
  1137. }
  1138. LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
  1139. breakpoint->unique_id,
  1140. (int)(breakpoint->type),
  1141. breakpoint->address,
  1142. breakpoint->length,
  1143. breakpoint->set);
  1144. if (breakpoint->type == BKPT_HARD) {
  1145. int fp_num = breakpoint->set - 1;
  1146. if ((fp_num < 0) || (fp_num >= cortex_m->fp_num_code)) {
  1147. LOG_DEBUG("Invalid FP Comparator number in breakpoint");
  1148. return ERROR_OK;
  1149. }
  1150. comparator_list[fp_num].used = false;
  1151. comparator_list[fp_num].fpcr_value = 0;
  1152. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  1153. comparator_list[fp_num].fpcr_value);
  1154. } else {
  1155. /* restore original instruction (kept in target endianness) */
  1156. retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
  1157. breakpoint->length, 1,
  1158. breakpoint->orig_instr);
  1159. if (retval != ERROR_OK)
  1160. return retval;
  1161. }
  1162. breakpoint->set = false;
  1163. return ERROR_OK;
  1164. }
  1165. int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1166. {
  1167. if (breakpoint->length == 3) {
  1168. LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
  1169. breakpoint->length = 2;
  1170. }
  1171. if ((breakpoint->length != 2)) {
  1172. LOG_INFO("only breakpoints of two bytes length supported");
  1173. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1174. }
  1175. return cortex_m_set_breakpoint(target, breakpoint);
  1176. }
  1177. int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1178. {
  1179. if (!breakpoint->set)
  1180. return ERROR_OK;
  1181. return cortex_m_unset_breakpoint(target, breakpoint);
  1182. }
  1183. int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1184. {
  1185. int dwt_num = 0;
  1186. struct cortex_m_common *cortex_m = target_to_cm(target);
  1187. /* REVISIT Don't fully trust these "not used" records ... users
  1188. * may set up breakpoints by hand, e.g. dual-address data value
  1189. * watchpoint using comparator #1; comparator #0 matching cycle
  1190. * count; send data trace info through ITM and TPIU; etc
  1191. */
  1192. struct cortex_m_dwt_comparator *comparator;
  1193. for (comparator = cortex_m->dwt_comparator_list;
  1194. comparator->used && dwt_num < cortex_m->dwt_num_comp;
  1195. comparator++, dwt_num++)
  1196. continue;
  1197. if (dwt_num >= cortex_m->dwt_num_comp) {
  1198. LOG_ERROR("Can not find free DWT Comparator");
  1199. return ERROR_FAIL;
  1200. }
  1201. comparator->used = true;
  1202. watchpoint->set = dwt_num + 1;
  1203. comparator->comp = watchpoint->address;
  1204. target_write_u32(target, comparator->dwt_comparator_address + 0,
  1205. comparator->comp);
  1206. if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
  1207. uint32_t mask = 0, temp;
  1208. /* watchpoint params were validated earlier */
  1209. temp = watchpoint->length;
  1210. while (temp) {
  1211. temp >>= 1;
  1212. mask++;
  1213. }
  1214. mask--;
  1215. comparator->mask = mask;
  1216. target_write_u32(target, comparator->dwt_comparator_address + 4,
  1217. comparator->mask);
  1218. switch (watchpoint->rw) {
  1219. case WPT_READ:
  1220. comparator->function = 5;
  1221. break;
  1222. case WPT_WRITE:
  1223. comparator->function = 6;
  1224. break;
  1225. case WPT_ACCESS:
  1226. comparator->function = 7;
  1227. break;
  1228. }
  1229. } else {
  1230. uint32_t data_size = watchpoint->length >> 1;
  1231. comparator->mask = (watchpoint->length >> 1) | 1;
  1232. switch (watchpoint->rw) {
  1233. case WPT_ACCESS:
  1234. comparator->function = 4;
  1235. break;
  1236. case WPT_WRITE:
  1237. comparator->function = 5;
  1238. break;
  1239. case WPT_READ:
  1240. comparator->function = 6;
  1241. break;
  1242. }
  1243. comparator->function = comparator->function | (1 << 4) |
  1244. (data_size << 10);
  1245. }
  1246. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1247. comparator->function);
  1248. LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
  1249. watchpoint->unique_id, dwt_num,
  1250. (unsigned) comparator->comp,
  1251. (unsigned) comparator->mask,
  1252. (unsigned) comparator->function);
  1253. return ERROR_OK;
  1254. }
  1255. int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1256. {
  1257. struct cortex_m_common *cortex_m = target_to_cm(target);
  1258. struct cortex_m_dwt_comparator *comparator;
  1259. int dwt_num;
  1260. if (!watchpoint->set) {
  1261. LOG_WARNING("watchpoint (wpid: %d) not set",
  1262. watchpoint->unique_id);
  1263. return ERROR_OK;
  1264. }
  1265. dwt_num = watchpoint->set - 1;
  1266. LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
  1267. watchpoint->unique_id, dwt_num,
  1268. (unsigned) watchpoint->address);
  1269. if ((dwt_num < 0) || (dwt_num >= cortex_m->dwt_num_comp)) {
  1270. LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
  1271. return ERROR_OK;
  1272. }
  1273. comparator = cortex_m->dwt_comparator_list + dwt_num;
  1274. comparator->used = false;
  1275. comparator->function = 0;
  1276. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1277. comparator->function);
  1278. watchpoint->set = false;
  1279. return ERROR_OK;
  1280. }
  1281. int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1282. {
  1283. struct cortex_m_common *cortex_m = target_to_cm(target);
  1284. if (cortex_m->dwt_comp_available < 1) {
  1285. LOG_DEBUG("no comparators?");
  1286. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1287. }
  1288. /* hardware doesn't support data value masking */
  1289. if (watchpoint->mask != ~(uint32_t)0) {
  1290. LOG_DEBUG("watchpoint value masks not supported");
  1291. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1292. }
  1293. /* hardware allows address masks of up to 32K */
  1294. unsigned mask;
  1295. for (mask = 0; mask < 16; mask++) {
  1296. if ((1u << mask) == watchpoint->length)
  1297. break;
  1298. }
  1299. if (mask == 16) {
  1300. LOG_DEBUG("unsupported watchpoint length");
  1301. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1302. }
  1303. if (watchpoint->address & ((1 << mask) - 1)) {
  1304. LOG_DEBUG("watchpoint address is unaligned");
  1305. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1306. }
  1307. /* Caller doesn't seem to be able to describe watching for data
  1308. * values of zero; that flags "no value".
  1309. *
  1310. * REVISIT This DWT may well be able to watch for specific data
  1311. * values. Requires comparator #1 to set DATAVMATCH and match
  1312. * the data, and another comparator (DATAVADDR0) matching addr.
  1313. */
  1314. if (watchpoint->value) {
  1315. LOG_DEBUG("data value watchpoint not YET supported");
  1316. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1317. }
  1318. cortex_m->dwt_comp_available--;
  1319. LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
  1320. return ERROR_OK;
  1321. }
  1322. int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1323. {
  1324. struct cortex_m_common *cortex_m = target_to_cm(target);
  1325. /* REVISIT why check? DWT can be updated with core running ... */
  1326. if (target->state != TARGET_HALTED) {
  1327. LOG_WARNING("target not halted");
  1328. return ERROR_TARGET_NOT_HALTED;
  1329. }
  1330. if (watchpoint->set)
  1331. cortex_m_unset_watchpoint(target, watchpoint);
  1332. cortex_m->dwt_comp_available++;
  1333. LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
  1334. return ERROR_OK;
  1335. }
  1336. void cortex_m_enable_watchpoints(struct target *target)
  1337. {
  1338. struct watchpoint *watchpoint = target->watchpoints;
  1339. /* set any pending watchpoints */
  1340. while (watchpoint) {
  1341. if (!watchpoint->set)
  1342. cortex_m_set_watchpoint(target, watchpoint);
  1343. watchpoint = watchpoint->next;
  1344. }
  1345. }
  1346. static int cortex_m_load_core_reg_u32(struct target *target,
  1347. uint32_t num, uint32_t *value)
  1348. {
  1349. int retval;
  1350. /* NOTE: we "know" here that the register identifiers used
  1351. * in the v7m header match the Cortex-M3 Debug Core Register
  1352. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1353. */
  1354. switch (num) {
  1355. case 0 ... 18:
  1356. /* read a normal core register */
  1357. retval = cortexm_dap_read_coreregister_u32(target, value, num);
  1358. if (retval != ERROR_OK) {
  1359. LOG_ERROR("JTAG failure %i", retval);
  1360. return ERROR_JTAG_DEVICE_ERROR;
  1361. }
  1362. LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
  1363. break;
  1364. case ARMV7M_FPSCR:
  1365. /* Floating-point Status and Registers */
  1366. retval = target_write_u32(target, DCB_DCRSR, 0x21);
  1367. if (retval != ERROR_OK)
  1368. return retval;
  1369. retval = target_read_u32(target, DCB_DCRDR, value);
  1370. if (retval != ERROR_OK)
  1371. return retval;
  1372. LOG_DEBUG("load from FPSCR value 0x%" PRIx32, *value);
  1373. break;
  1374. case ARMV7M_S0 ... ARMV7M_S31:
  1375. /* Floating-point Status and Registers */
  1376. retval = target_write_u32(target, DCB_DCRSR, num - ARMV7M_S0 + 0x40);
  1377. if (retval != ERROR_OK)
  1378. return retval;
  1379. retval = target_read_u32(target, DCB_DCRDR, value);
  1380. if (retval != ERROR_OK)
  1381. return retval;
  1382. LOG_DEBUG("load from FPU reg S%d value 0x%" PRIx32,
  1383. (int)(num - ARMV7M_S0), *value);
  1384. break;
  1385. case ARMV7M_PRIMASK:
  1386. case ARMV7M_BASEPRI:
  1387. case ARMV7M_FAULTMASK:
  1388. case ARMV7M_CONTROL:
  1389. /* Cortex-M3 packages these four registers as bitfields
  1390. * in one Debug Core register. So say r0 and r2 docs;
  1391. * it was removed from r1 docs, but still works.
  1392. */
  1393. cortexm_dap_read_coreregister_u32(target, value, 20);
  1394. switch (num) {
  1395. case ARMV7M_PRIMASK:
  1396. *value = buf_get_u32((uint8_t *)value, 0, 1);
  1397. break;
  1398. case ARMV7M_BASEPRI:
  1399. *value = buf_get_u32((uint8_t *)value, 8, 8);
  1400. break;
  1401. case ARMV7M_FAULTMASK:
  1402. *value = buf_get_u32((uint8_t *)value, 16, 1);
  1403. break;
  1404. case ARMV7M_CONTROL:
  1405. *value = buf_get_u32((uint8_t *)value, 24, 2);
  1406. break;
  1407. }
  1408. LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
  1409. break;
  1410. default:
  1411. return ERROR_COMMAND_SYNTAX_ERROR;
  1412. }
  1413. return ERROR_OK;
  1414. }
  1415. static int cortex_m_store_core_reg_u32(struct target *target,
  1416. uint32_t num, uint32_t value)
  1417. {
  1418. int retval;
  1419. uint32_t reg;
  1420. struct armv7m_common *armv7m = target_to_armv7m(target);
  1421. /* NOTE: we "know" here that the register identifiers used
  1422. * in the v7m header match the Cortex-M3 Debug Core Register
  1423. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1424. */
  1425. switch (num) {
  1426. case 0 ... 18:
  1427. retval = cortexm_dap_write_coreregister_u32(target, value, num);
  1428. if (retval != ERROR_OK) {
  1429. struct reg *r;
  1430. LOG_ERROR("JTAG failure");
  1431. r = armv7m->arm.core_cache->reg_list + num;
  1432. r->dirty = r->valid;
  1433. return ERROR_JTAG_DEVICE_ERROR;
  1434. }
  1435. LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
  1436. break;
  1437. case ARMV7M_FPSCR:
  1438. /* Floating-point Status and Registers */
  1439. retval = target_write_u32(target, DCB_DCRDR, value);
  1440. if (retval != ERROR_OK)
  1441. return retval;
  1442. retval = target_write_u32(target, DCB_DCRSR, 0x21 | (1<<16));
  1443. if (retval != ERROR_OK)
  1444. return retval;
  1445. LOG_DEBUG("write FPSCR value 0x%" PRIx32, value);
  1446. break;
  1447. case ARMV7M_S0 ... ARMV7M_S31:
  1448. /* Floating-point Status and Registers */
  1449. retval = target_write_u32(target, DCB_DCRDR, value);
  1450. if (retval != ERROR_OK)
  1451. return retval;
  1452. retval = target_write_u32(target, DCB_DCRSR, (num - ARMV7M_S0 + 0x40) | (1<<16));
  1453. if (retval != ERROR_OK)
  1454. return retval;
  1455. LOG_DEBUG("write FPU reg S%d value 0x%" PRIx32,
  1456. (int)(num - ARMV7M_S0), value);
  1457. break;
  1458. case ARMV7M_PRIMASK:
  1459. case ARMV7M_BASEPRI:
  1460. case ARMV7M_FAULTMASK:
  1461. case ARMV7M_CONTROL:
  1462. /* Cortex-M3 packages these four registers as bitfields
  1463. * in one Debug Core register. So say r0 and r2 docs;
  1464. * it was removed from r1 docs, but still works.
  1465. */
  1466. cortexm_dap_read_coreregister_u32(target, &reg, 20);
  1467. switch (num) {
  1468. case ARMV7M_PRIMASK:
  1469. buf_set_u32((uint8_t *)&reg, 0, 1, value);
  1470. break;
  1471. case ARMV7M_BASEPRI:
  1472. buf_set_u32((uint8_t *)&reg, 8, 8, value);
  1473. break;
  1474. case ARMV7M_FAULTMASK:
  1475. buf_set_u32((uint8_t *)&reg, 16, 1, value);
  1476. break;
  1477. case ARMV7M_CONTROL:
  1478. buf_set_u32((uint8_t *)&reg, 24, 2, value);
  1479. break;
  1480. }
  1481. cortexm_dap_write_coreregister_u32(target, reg, 20);
  1482. LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
  1483. break;
  1484. default:
  1485. return ERROR_COMMAND_SYNTAX_ERROR;
  1486. }
  1487. return ERROR_OK;
  1488. }
  1489. static int cortex_m_read_memory(struct target *target, target_addr_t address,
  1490. uint32_t size, uint32_t count, uint8_t *buffer)
  1491. {
  1492. struct armv7m_common *armv7m = target_to_armv7m(target);
  1493. if (armv7m->arm.is_armv6m) {
  1494. /* armv6m does not handle unaligned memory access */
  1495. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1496. return ERROR_TARGET_UNALIGNED_ACCESS;
  1497. }
  1498. return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
  1499. }
  1500. static int cortex_m_write_memory(struct target *target, target_addr_t address,
  1501. uint32_t size, uint32_t count, const uint8_t *buffer)
  1502. {
  1503. struct armv7m_common *armv7m = target_to_armv7m(target);
  1504. if (armv7m->arm.is_armv6m) {
  1505. /* armv6m does not handle unaligned memory access */
  1506. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1507. return ERROR_TARGET_UNALIGNED_ACCESS;
  1508. }
  1509. return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
  1510. }
  1511. static int cortex_m_init_target(struct command_context *cmd_ctx,
  1512. struct target *target)
  1513. {
  1514. armv7m_build_reg_cache(target);
  1515. arm_semihosting_init(target);
  1516. return ERROR_OK;
  1517. }
  1518. void cortex_m_deinit_target(struct target *target)
  1519. {
  1520. struct cortex_m_common *cortex_m = target_to_cm(target);
  1521. if (cortex_m->armv7m.debug_ap)
  1522. dap_dp_uninit(cortex_m->armv7m.debug_ap->dap);
  1523. free(cortex_m->fp_comparator_list);
  1524. cortex_m_dwt_free(target);
  1525. armv7m_free_reg_cache(target);
  1526. free(target->private_config);
  1527. free(cortex_m);
  1528. }
  1529. int cortex_m_profiling(struct target *target, uint32_t *samples,
  1530. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1531. {
  1532. struct timeval timeout, now;
  1533. struct armv7m_common *armv7m = target_to_armv7m(target);
  1534. uint32_t reg_value;
  1535. bool use_pcsr = false;
  1536. int retval = ERROR_OK;
  1537. struct reg *reg;
  1538. gettimeofday(&timeout, NULL);
  1539. timeval_add_time(&timeout, seconds, 0);
  1540. retval = target_read_u32(target, DWT_PCSR, &reg_value);
  1541. if (retval != ERROR_OK) {
  1542. LOG_ERROR("Error while reading PCSR");
  1543. return retval;
  1544. }
  1545. if (reg_value != 0) {
  1546. use_pcsr = true;
  1547. LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
  1548. } else {
  1549. LOG_INFO("Starting profiling. Halting and resuming the"
  1550. " target as often as we can...");
  1551. reg = register_get_by_name(target->reg_cache, "pc", 1);
  1552. }
  1553. /* Make sure the target is running */
  1554. target_poll(target);
  1555. if (target->state == TARGET_HALTED)
  1556. retval = target_resume(target, 1, 0, 0, 0);
  1557. if (retval != ERROR_OK) {
  1558. LOG_ERROR("Error while resuming target");
  1559. return retval;
  1560. }
  1561. uint32_t sample_count = 0;
  1562. for (;;) {
  1563. if (use_pcsr) {
  1564. if (armv7m && armv7m->debug_ap) {
  1565. uint32_t read_count = max_num_samples - sample_count;
  1566. if (read_count > 1024)
  1567. read_count = 1024;
  1568. retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
  1569. (void *)&samples[sample_count],
  1570. 4, read_count, DWT_PCSR);
  1571. sample_count += read_count;
  1572. } else {
  1573. target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
  1574. }
  1575. } else {
  1576. target_poll(target);
  1577. if (target->state == TARGET_HALTED) {
  1578. reg_value = buf_get_u32(reg->value, 0, 32);
  1579. /* current pc, addr = 0, do not handle breakpoints, not debugging */
  1580. retval = target_resume(target, 1, 0, 0, 0);
  1581. samples[sample_count++] = reg_value;
  1582. target_poll(target);
  1583. alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
  1584. } else if (target->state == TARGET_RUNNING) {
  1585. /* We want to quickly sample the PC. */
  1586. retval = target_halt(target);
  1587. } else {
  1588. LOG_INFO("Target not halted or running");
  1589. retval = ERROR_OK;
  1590. break;
  1591. }
  1592. }
  1593. if (retval != ERROR_OK) {
  1594. LOG_ERROR("Error while reading %s", use_pcsr ? "PCSR" : "target pc");
  1595. return retval;
  1596. }
  1597. gettimeofday(&now, NULL);
  1598. if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
  1599. LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
  1600. break;
  1601. }
  1602. }
  1603. *num_samples = sample_count;
  1604. return retval;
  1605. }
  1606. /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
  1607. * on r/w if the core is not running, and clear on resume or reset ... or
  1608. * at least, in a post_restore_context() method.
  1609. */
  1610. struct dwt_reg_state {
  1611. struct target *target;
  1612. uint32_t addr;
  1613. uint8_t value[4]; /* scratch/cache */
  1614. };
  1615. static int cortex_m_dwt_get_reg(struct reg *reg)
  1616. {
  1617. struct dwt_reg_state *state = reg->arch_info;
  1618. uint32_t tmp;
  1619. int retval = target_read_u32(state->target, state->addr, &tmp);
  1620. if (retval != ERROR_OK)
  1621. return retval;
  1622. buf_set_u32(state->value, 0, 32, tmp);
  1623. return ERROR_OK;
  1624. }
  1625. static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
  1626. {
  1627. struct dwt_reg_state *state = reg->arch_info;
  1628. return target_write_u32(state->target, state->addr,
  1629. buf_get_u32(buf, 0, reg->size));
  1630. }
  1631. struct dwt_reg {
  1632. uint32_t addr;
  1633. const char *name;
  1634. unsigned size;
  1635. };
  1636. static const struct dwt_reg dwt_base_regs[] = {
  1637. { DWT_CTRL, "dwt_ctrl", 32, },
  1638. /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
  1639. * increments while the core is asleep.
  1640. */
  1641. { DWT_CYCCNT, "dwt_cyccnt", 32, },
  1642. /* plus some 8 bit counters, useful for profiling with TPIU */
  1643. };
  1644. static const struct dwt_reg dwt_comp[] = {
  1645. #define DWT_COMPARATOR(i) \
  1646. { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
  1647. { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
  1648. { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
  1649. DWT_COMPARATOR(0),
  1650. DWT_COMPARATOR(1),
  1651. DWT_COMPARATOR(2),
  1652. DWT_COMPARATOR(3),
  1653. DWT_COMPARATOR(4),
  1654. DWT_COMPARATOR(5),
  1655. DWT_COMPARATOR(6),
  1656. DWT_COMPARATOR(7),
  1657. DWT_COMPARATOR(8),
  1658. DWT_COMPARATOR(9),
  1659. DWT_COMPARATOR(10),
  1660. DWT_COMPARATOR(11),
  1661. DWT_COMPARATOR(12),
  1662. DWT_COMPARATOR(13),
  1663. DWT_COMPARATOR(14),
  1664. DWT_COMPARATOR(15),
  1665. #undef DWT_COMPARATOR
  1666. };
  1667. static const struct reg_arch_type dwt_reg_type = {
  1668. .get = cortex_m_dwt_get_reg,
  1669. .set = cortex_m_dwt_set_reg,
  1670. };
  1671. static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
  1672. {
  1673. struct dwt_reg_state *state;
  1674. state = calloc(1, sizeof *state);
  1675. if (!state)
  1676. return;
  1677. state->addr = d->addr;
  1678. state->target = t;
  1679. r->name = d->name;
  1680. r->size = d->size;
  1681. r->value = state->value;
  1682. r->arch_info = state;
  1683. r->type = &dwt_reg_type;
  1684. }
  1685. void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
  1686. {
  1687. uint32_t dwtcr;
  1688. struct reg_cache *cache;
  1689. struct cortex_m_dwt_comparator *comparator;
  1690. int reg, i;
  1691. target_read_u32(target, DWT_CTRL, &dwtcr);
  1692. LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
  1693. if (!dwtcr) {
  1694. LOG_DEBUG("no DWT");
  1695. return;
  1696. }
  1697. target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
  1698. LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
  1699. cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
  1700. cm->dwt_comp_available = cm->dwt_num_comp;
  1701. cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
  1702. sizeof(struct cortex_m_dwt_comparator));
  1703. if (!cm->dwt_comparator_list) {
  1704. fail0:
  1705. cm->dwt_num_comp = 0;
  1706. LOG_ERROR("out of mem");
  1707. return;
  1708. }
  1709. cache = calloc(1, sizeof *cache);
  1710. if (!cache) {
  1711. fail1:
  1712. free(cm->dwt_comparator_list);
  1713. goto fail0;
  1714. }
  1715. cache->name = "Cortex-M DWT registers";
  1716. cache->num_regs = 2 + cm->dwt_num_comp * 3;
  1717. cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
  1718. if (!cache->reg_list) {
  1719. free(cache);
  1720. goto fail1;
  1721. }
  1722. for (reg = 0; reg < 2; reg++)
  1723. cortex_m_dwt_addreg(target, cache->reg_list + reg,
  1724. dwt_base_regs + reg);
  1725. comparator = cm->dwt_comparator_list;
  1726. for (i = 0; i < cm->dwt_num_comp; i++, comparator++) {
  1727. int j;
  1728. comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
  1729. for (j = 0; j < 3; j++, reg++)
  1730. cortex_m_dwt_addreg(target, cache->reg_list + reg,
  1731. dwt_comp + 3 * i + j);
  1732. /* make sure we clear any watchpoints enabled on the target */
  1733. target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
  1734. }
  1735. *register_get_last_cache_p(&target->reg_cache) = cache;
  1736. cm->dwt_cache = cache;
  1737. LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
  1738. dwtcr, cm->dwt_num_comp,
  1739. (dwtcr & (0xf << 24)) ? " only" : "/trigger");
  1740. /* REVISIT: if num_comp > 1, check whether comparator #1 can
  1741. * implement single-address data value watchpoints ... so we
  1742. * won't need to check it later, when asked to set one up.
  1743. */
  1744. }
  1745. static void cortex_m_dwt_free(struct target *target)
  1746. {
  1747. struct cortex_m_common *cm = target_to_cm(target);
  1748. struct reg_cache *cache = cm->dwt_cache;
  1749. free(cm->dwt_comparator_list);
  1750. cm->dwt_comparator_list = NULL;
  1751. cm->dwt_num_comp = 0;
  1752. if (cache) {
  1753. register_unlink_cache(&target->reg_cache, cache);
  1754. if (cache->reg_list) {
  1755. for (size_t i = 0; i < cache->num_regs; i++)
  1756. free(cache->reg_list[i].arch_info);
  1757. free(cache->reg_list);
  1758. }
  1759. free(cache);
  1760. }
  1761. cm->dwt_cache = NULL;
  1762. }
  1763. #define MVFR0 0xe000ef40
  1764. #define MVFR1 0xe000ef44
  1765. #define MVFR0_DEFAULT_M4 0x10110021
  1766. #define MVFR1_DEFAULT_M4 0x11000011
  1767. #define MVFR0_DEFAULT_M7_SP 0x10110021
  1768. #define MVFR0_DEFAULT_M7_DP 0x10110221
  1769. #define MVFR1_DEFAULT_M7_SP 0x11000011
  1770. #define MVFR1_DEFAULT_M7_DP 0x12000011
  1771. static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
  1772. struct adiv5_ap **debug_ap)
  1773. {
  1774. if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
  1775. return ERROR_OK;
  1776. return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
  1777. }
  1778. int cortex_m_examine(struct target *target)
  1779. {
  1780. int retval;
  1781. uint32_t cpuid, fpcr, mvfr0, mvfr1;
  1782. int i;
  1783. struct cortex_m_common *cortex_m = target_to_cm(target);
  1784. struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
  1785. struct armv7m_common *armv7m = target_to_armv7m(target);
  1786. /* stlink shares the examine handler but does not support
  1787. * all its calls */
  1788. if (!armv7m->stlink) {
  1789. if (cortex_m->apsel == DP_APSEL_INVALID) {
  1790. /* Search for the MEM-AP */
  1791. retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
  1792. if (retval != ERROR_OK) {
  1793. LOG_ERROR("Could not find MEM-AP to control the core");
  1794. return retval;
  1795. }
  1796. } else {
  1797. armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
  1798. }
  1799. /* Leave (only) generic DAP stuff for debugport_init(); */
  1800. armv7m->debug_ap->memaccess_tck = 8;
  1801. retval = mem_ap_init(armv7m->debug_ap);
  1802. if (retval != ERROR_OK)
  1803. return retval;
  1804. }
  1805. if (!target_was_examined(target)) {
  1806. target_set_examined(target);
  1807. /* Read from Device Identification Registers */
  1808. retval = target_read_u32(target, CPUID, &cpuid);
  1809. if (retval != ERROR_OK)
  1810. return retval;
  1811. /* Get CPU Type */
  1812. i = (cpuid >> 4) & 0xf;
  1813. switch (cpuid & ARM_CPUID_PARTNO_MASK) {
  1814. case CORTEX_M23_PARTNO:
  1815. i = 23;
  1816. break;
  1817. case CORTEX_M33_PARTNO:
  1818. i = 33;
  1819. break;
  1820. default:
  1821. break;
  1822. }
  1823. LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
  1824. i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
  1825. cortex_m->maskints_erratum = false;
  1826. if (i == 7) {
  1827. uint8_t rev, patch;
  1828. rev = (cpuid >> 20) & 0xf;
  1829. patch = (cpuid >> 0) & 0xf;
  1830. if ((rev == 0) && (patch < 2)) {
  1831. LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
  1832. cortex_m->maskints_erratum = true;
  1833. }
  1834. }
  1835. LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
  1836. /* VECTRESET is not supported on Cortex-M0, M0+ and M1 */
  1837. cortex_m->vectreset_supported = i > 1;
  1838. if (i == 4) {
  1839. target_read_u32(target, MVFR0, &mvfr0);
  1840. target_read_u32(target, MVFR1, &mvfr1);
  1841. /* test for floating point feature on Cortex-M4 */
  1842. if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
  1843. LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
  1844. armv7m->fp_feature = FPv4_SP;
  1845. }
  1846. } else if (i == 7 || i == 33) {
  1847. target_read_u32(target, MVFR0, &mvfr0);
  1848. target_read_u32(target, MVFR1, &mvfr1);
  1849. /* test for floating point features on Cortex-M7 */
  1850. if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
  1851. LOG_DEBUG("Cortex-M%d floating point feature FPv5_SP found", i);
  1852. armv7m->fp_feature = FPv5_SP;
  1853. } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
  1854. LOG_DEBUG("Cortex-M%d floating point feature FPv5_DP found", i);
  1855. armv7m->fp_feature = FPv5_DP;
  1856. }
  1857. } else if (i == 0) {
  1858. /* Cortex-M0 does not support unaligned memory access */
  1859. armv7m->arm.is_armv6m = true;
  1860. }
  1861. if (armv7m->fp_feature == FP_NONE &&
  1862. armv7m->arm.core_cache->num_regs > ARMV7M_NUM_CORE_REGS_NOFP) {
  1863. /* free unavailable FPU registers */
  1864. size_t idx;
  1865. for (idx = ARMV7M_NUM_CORE_REGS_NOFP;
  1866. idx < armv7m->arm.core_cache->num_regs;
  1867. idx++) {
  1868. free(armv7m->arm.core_cache->reg_list[idx].value);
  1869. free(armv7m->arm.core_cache->reg_list[idx].feature);
  1870. free(armv7m->arm.core_cache->reg_list[idx].reg_data_type);
  1871. }
  1872. armv7m->arm.core_cache->num_regs = ARMV7M_NUM_CORE_REGS_NOFP;
  1873. }
  1874. if (!armv7m->stlink) {
  1875. if (i == 3 || i == 4)
  1876. /* Cortex-M3/M4 have 4096 bytes autoincrement range,
  1877. * s. ARM IHI 0031C: MEM-AP 7.2.2 */
  1878. armv7m->debug_ap->tar_autoincr_block = (1 << 12);
  1879. else if (i == 7)
  1880. /* Cortex-M7 has only 1024 bytes autoincrement range */
  1881. armv7m->debug_ap->tar_autoincr_block = (1 << 10);
  1882. }
  1883. /* Configure trace modules */
  1884. retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
  1885. if (retval != ERROR_OK)
  1886. return retval;
  1887. if (armv7m->trace_config.config_type != TRACE_CONFIG_TYPE_DISABLED) {
  1888. armv7m_trace_tpiu_config(target);
  1889. armv7m_trace_itm_config(target);
  1890. }
  1891. /* NOTE: FPB and DWT are both optional. */
  1892. /* Setup FPB */
  1893. target_read_u32(target, FP_CTRL, &fpcr);
  1894. /* bits [14:12] and [7:4] */
  1895. cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
  1896. cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
  1897. /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
  1898. Revision is zero base, fp_rev == 1 means Rev.2 ! */
  1899. cortex_m->fp_rev = (fpcr >> 28) & 0xf;
  1900. free(cortex_m->fp_comparator_list);
  1901. cortex_m->fp_comparator_list = calloc(
  1902. cortex_m->fp_num_code + cortex_m->fp_num_lit,
  1903. sizeof(struct cortex_m_fp_comparator));
  1904. cortex_m->fpb_enabled = fpcr & 1;
  1905. for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
  1906. cortex_m->fp_comparator_list[i].type =
  1907. (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
  1908. cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
  1909. /* make sure we clear any breakpoints enabled on the target */
  1910. target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
  1911. }
  1912. LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
  1913. fpcr,
  1914. cortex_m->fp_num_code,
  1915. cortex_m->fp_num_lit);
  1916. /* Setup DWT */
  1917. cortex_m_dwt_free(target);
  1918. cortex_m_dwt_setup(cortex_m, target);
  1919. /* These hardware breakpoints only work for code in flash! */
  1920. LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
  1921. target_name(target),
  1922. cortex_m->fp_num_code,
  1923. cortex_m->dwt_num_comp);
  1924. }
  1925. return ERROR_OK;
  1926. }
  1927. static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
  1928. {
  1929. struct armv7m_common *armv7m = target_to_armv7m(target);
  1930. uint16_t dcrdr;
  1931. uint8_t buf[2];
  1932. int retval;
  1933. retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
  1934. if (retval != ERROR_OK)
  1935. return retval;
  1936. dcrdr = target_buffer_get_u16(target, buf);
  1937. *ctrl = (uint8_t)dcrdr;
  1938. *value = (uint8_t)(dcrdr >> 8);
  1939. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1940. /* write ack back to software dcc register
  1941. * signify we have read data */
  1942. if (dcrdr & (1 << 0)) {
  1943. target_buffer_set_u16(target, buf, 0);
  1944. retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
  1945. if (retval != ERROR_OK)
  1946. return retval;
  1947. }
  1948. return ERROR_OK;
  1949. }
  1950. static int cortex_m_target_request_data(struct target *target,
  1951. uint32_t size, uint8_t *buffer)
  1952. {
  1953. uint8_t data;
  1954. uint8_t ctrl;
  1955. uint32_t i;
  1956. for (i = 0; i < (size * 4); i++) {
  1957. int retval = cortex_m_dcc_read(target, &data, &ctrl);
  1958. if (retval != ERROR_OK)
  1959. return retval;
  1960. buffer[i] = data;
  1961. }
  1962. return ERROR_OK;
  1963. }
  1964. static int cortex_m_handle_target_request(void *priv)
  1965. {
  1966. struct target *target = priv;
  1967. if (!target_was_examined(target))
  1968. return ERROR_OK;
  1969. if (!target->dbg_msg_enabled)
  1970. return ERROR_OK;
  1971. if (target->state == TARGET_RUNNING) {
  1972. uint8_t data;
  1973. uint8_t ctrl;
  1974. int retval;
  1975. retval = cortex_m_dcc_read(target, &data, &ctrl);
  1976. if (retval != ERROR_OK)
  1977. return retval;
  1978. /* check if we have data */
  1979. if (ctrl & (1 << 0)) {
  1980. uint32_t request;
  1981. /* we assume target is quick enough */
  1982. request = data;
  1983. for (int i = 1; i <= 3; i++) {
  1984. retval = cortex_m_dcc_read(target, &data, &ctrl);
  1985. if (retval != ERROR_OK)
  1986. return retval;
  1987. request |= ((uint32_t)data << (i * 8));
  1988. }
  1989. target_request(target, request);
  1990. }
  1991. }
  1992. return ERROR_OK;
  1993. }
  1994. static int cortex_m_init_arch_info(struct target *target,
  1995. struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
  1996. {
  1997. struct armv7m_common *armv7m = &cortex_m->armv7m;
  1998. armv7m_init_arch_info(target, armv7m);
  1999. /* default reset mode is to use srst if fitted
  2000. * if not it will use CORTEX_M3_RESET_VECTRESET */
  2001. cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
  2002. armv7m->arm.dap = dap;
  2003. /* register arch-specific functions */
  2004. armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
  2005. armv7m->post_debug_entry = NULL;
  2006. armv7m->pre_restore_context = NULL;
  2007. armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
  2008. armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
  2009. target_register_timer_callback(cortex_m_handle_target_request, 1,
  2010. TARGET_TIMER_TYPE_PERIODIC, target);
  2011. return ERROR_OK;
  2012. }
  2013. static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
  2014. {
  2015. struct adiv5_private_config *pc;
  2016. pc = (struct adiv5_private_config *)target->private_config;
  2017. if (adiv5_verify_config(pc) != ERROR_OK)
  2018. return ERROR_FAIL;
  2019. struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
  2020. if (cortex_m == NULL) {
  2021. LOG_ERROR("No memory creating target");
  2022. return ERROR_FAIL;
  2023. }
  2024. cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
  2025. cortex_m->apsel = pc->ap_num;
  2026. cortex_m_init_arch_info(target, cortex_m, pc->dap);
  2027. return ERROR_OK;
  2028. }
  2029. /*--------------------------------------------------------------------------*/
  2030. static int cortex_m_verify_pointer(struct command_invocation *cmd,
  2031. struct cortex_m_common *cm)
  2032. {
  2033. if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
  2034. command_print(cmd, "target is not a Cortex-M");
  2035. return ERROR_TARGET_INVALID;
  2036. }
  2037. return ERROR_OK;
  2038. }
  2039. /*
  2040. * Only stuff below this line should need to verify that its target
  2041. * is a Cortex-M3. Everything else should have indirected through the
  2042. * cortexm3_target structure, which is only used with CM3 targets.
  2043. */
  2044. COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
  2045. {
  2046. struct target *target = get_current_target(CMD_CTX);
  2047. struct cortex_m_common *cortex_m = target_to_cm(target);
  2048. struct armv7m_common *armv7m = &cortex_m->armv7m;
  2049. uint32_t demcr = 0;
  2050. int retval;
  2051. static const struct {
  2052. char name[10];
  2053. unsigned mask;
  2054. } vec_ids[] = {
  2055. { "hard_err", VC_HARDERR, },
  2056. { "int_err", VC_INTERR, },
  2057. { "bus_err", VC_BUSERR, },
  2058. { "state_err", VC_STATERR, },
  2059. { "chk_err", VC_CHKERR, },
  2060. { "nocp_err", VC_NOCPERR, },
  2061. { "mm_err", VC_MMERR, },
  2062. { "reset", VC_CORERESET, },
  2063. };
  2064. retval = cortex_m_verify_pointer(CMD, cortex_m);
  2065. if (retval != ERROR_OK)
  2066. return retval;
  2067. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
  2068. if (retval != ERROR_OK)
  2069. return retval;
  2070. if (CMD_ARGC > 0) {
  2071. unsigned catch = 0;
  2072. if (CMD_ARGC == 1) {
  2073. if (strcmp(CMD_ARGV[0], "all") == 0) {
  2074. catch = VC_HARDERR | VC_INTERR | VC_BUSERR
  2075. | VC_STATERR | VC_CHKERR | VC_NOCPERR
  2076. | VC_MMERR | VC_CORERESET;
  2077. goto write;
  2078. } else if (strcmp(CMD_ARGV[0], "none") == 0)
  2079. goto write;
  2080. }
  2081. while (CMD_ARGC-- > 0) {
  2082. unsigned i;
  2083. for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2084. if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
  2085. continue;
  2086. catch |= vec_ids[i].mask;
  2087. break;
  2088. }
  2089. if (i == ARRAY_SIZE(vec_ids)) {
  2090. LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
  2091. return ERROR_COMMAND_SYNTAX_ERROR;
  2092. }
  2093. }
  2094. write:
  2095. /* For now, armv7m->demcr only stores vector catch flags. */
  2096. armv7m->demcr = catch;
  2097. demcr &= ~0xffff;
  2098. demcr |= catch;
  2099. /* write, but don't assume it stuck (why not??) */
  2100. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
  2101. if (retval != ERROR_OK)
  2102. return retval;
  2103. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
  2104. if (retval != ERROR_OK)
  2105. return retval;
  2106. /* FIXME be sure to clear DEMCR on clean server shutdown.
  2107. * Otherwise the vector catch hardware could fire when there's
  2108. * no debugger hooked up, causing much confusion...
  2109. */
  2110. }
  2111. for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2112. command_print(CMD, "%9s: %s", vec_ids[i].name,
  2113. (demcr & vec_ids[i].mask) ? "catch" : "ignore");
  2114. }
  2115. return ERROR_OK;
  2116. }
  2117. COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
  2118. {
  2119. struct target *target = get_current_target(CMD_CTX);
  2120. struct cortex_m_common *cortex_m = target_to_cm(target);
  2121. int retval;
  2122. static const Jim_Nvp nvp_maskisr_modes[] = {
  2123. { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
  2124. { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
  2125. { .name = "on", .value = CORTEX_M_ISRMASK_ON },
  2126. { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
  2127. { .name = NULL, .value = -1 },
  2128. };
  2129. const Jim_Nvp *n;
  2130. retval = cortex_m_verify_pointer(CMD, cortex_m);
  2131. if (retval != ERROR_OK)
  2132. return retval;
  2133. if (target->state != TARGET_HALTED) {
  2134. command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
  2135. return ERROR_OK;
  2136. }
  2137. if (CMD_ARGC > 0) {
  2138. n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  2139. if (n->name == NULL)
  2140. return ERROR_COMMAND_SYNTAX_ERROR;
  2141. cortex_m->isrmasking_mode = n->value;
  2142. cortex_m_set_maskints_for_halt(target);
  2143. }
  2144. n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
  2145. command_print(CMD, "cortex_m interrupt mask %s", n->name);
  2146. return ERROR_OK;
  2147. }
  2148. COMMAND_HANDLER(handle_cortex_m_reset_config_command)
  2149. {
  2150. struct target *target = get_current_target(CMD_CTX);
  2151. struct cortex_m_common *cortex_m = target_to_cm(target);
  2152. int retval;
  2153. char *reset_config;
  2154. retval = cortex_m_verify_pointer(CMD, cortex_m);
  2155. if (retval != ERROR_OK)
  2156. return retval;
  2157. if (CMD_ARGC > 0) {
  2158. if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
  2159. cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
  2160. else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
  2161. if (target_was_examined(target)
  2162. && !cortex_m->vectreset_supported)
  2163. LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
  2164. else
  2165. cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
  2166. } else
  2167. return ERROR_COMMAND_SYNTAX_ERROR;
  2168. }
  2169. switch (cortex_m->soft_reset_config) {
  2170. case CORTEX_M_RESET_SYSRESETREQ:
  2171. reset_config = "sysresetreq";
  2172. break;
  2173. case CORTEX_M_RESET_VECTRESET:
  2174. reset_config = "vectreset";
  2175. break;
  2176. default:
  2177. reset_config = "unknown";
  2178. break;
  2179. }
  2180. command_print(CMD, "cortex_m reset_config %s", reset_config);
  2181. return ERROR_OK;
  2182. }
  2183. static const struct command_registration cortex_m_exec_command_handlers[] = {
  2184. {
  2185. .name = "maskisr",
  2186. .handler = handle_cortex_m_mask_interrupts_command,
  2187. .mode = COMMAND_EXEC,
  2188. .help = "mask cortex_m interrupts",
  2189. .usage = "['auto'|'on'|'off'|'steponly']",
  2190. },
  2191. {
  2192. .name = "vector_catch",
  2193. .handler = handle_cortex_m_vector_catch_command,
  2194. .mode = COMMAND_EXEC,
  2195. .help = "configure hardware vectors to trigger debug entry",
  2196. .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
  2197. },
  2198. {
  2199. .name = "reset_config",
  2200. .handler = handle_cortex_m_reset_config_command,
  2201. .mode = COMMAND_ANY,
  2202. .help = "configure software reset handling",
  2203. .usage = "['sysresetreq'|'vectreset']",
  2204. },
  2205. COMMAND_REGISTRATION_DONE
  2206. };
  2207. static const struct command_registration cortex_m_command_handlers[] = {
  2208. {
  2209. .chain = armv7m_command_handlers,
  2210. },
  2211. {
  2212. .chain = armv7m_trace_command_handlers,
  2213. },
  2214. {
  2215. .name = "cortex_m",
  2216. .mode = COMMAND_EXEC,
  2217. .help = "Cortex-M command group",
  2218. .usage = "",
  2219. .chain = cortex_m_exec_command_handlers,
  2220. },
  2221. COMMAND_REGISTRATION_DONE
  2222. };
  2223. struct target_type cortexm_target = {
  2224. .name = "cortex_m",
  2225. .deprecated_name = "cortex_m3",
  2226. .poll = cortex_m_poll,
  2227. .arch_state = armv7m_arch_state,
  2228. .target_request_data = cortex_m_target_request_data,
  2229. .halt = cortex_m_halt,
  2230. .resume = cortex_m_resume,
  2231. .step = cortex_m_step,
  2232. .assert_reset = cortex_m_assert_reset,
  2233. .deassert_reset = cortex_m_deassert_reset,
  2234. .soft_reset_halt = cortex_m_soft_reset_halt,
  2235. .get_gdb_arch = arm_get_gdb_arch,
  2236. .get_gdb_reg_list = armv7m_get_gdb_reg_list,
  2237. .read_memory = cortex_m_read_memory,
  2238. .write_memory = cortex_m_write_memory,
  2239. .checksum_memory = armv7m_checksum_memory,
  2240. .blank_check_memory = armv7m_blank_check_memory,
  2241. .run_algorithm = armv7m_run_algorithm,
  2242. .start_algorithm = armv7m_start_algorithm,
  2243. .wait_algorithm = armv7m_wait_algorithm,
  2244. .add_breakpoint = cortex_m_add_breakpoint,
  2245. .remove_breakpoint = cortex_m_remove_breakpoint,
  2246. .add_watchpoint = cortex_m_add_watchpoint,
  2247. .remove_watchpoint = cortex_m_remove_watchpoint,
  2248. .commands = cortex_m_command_handlers,
  2249. .target_create = cortex_m_target_create,
  2250. .target_jim_configure = adiv5_jim_configure,
  2251. .init_target = cortex_m_init_target,
  2252. .examine = cortex_m_examine,
  2253. .deinit_target = cortex_m_deinit_target,
  2254. .profiling = cortex_m_profiling,
  2255. };