You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2617 lines
76 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  23. * *
  24. * *
  25. * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
  26. * *
  27. ***************************************************************************/
  28. #ifdef HAVE_CONFIG_H
  29. #include "config.h"
  30. #endif
  31. #include "jtag/interface.h"
  32. #include "breakpoints.h"
  33. #include "cortex_m.h"
  34. #include "target_request.h"
  35. #include "target_type.h"
  36. #include "arm_disassembler.h"
  37. #include "register.h"
  38. #include "arm_opcodes.h"
  39. #include "arm_semihosting.h"
  40. #include <helper/time_support.h>
  41. #include <rtt/rtt.h>
  42. /* NOTE: most of this should work fine for the Cortex-M1 and
  43. * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
  44. * Some differences: M0/M1 doesn't have FPB remapping or the
  45. * DWT tracing/profiling support. (So the cycle counter will
  46. * not be usable; the other stuff isn't currently used here.)
  47. *
  48. * Although there are some workarounds for errata seen only in r0p0
  49. * silicon, such old parts are hard to find and thus not much tested
  50. * any longer.
  51. */
  52. /* Supported Cortex-M Cores */
  53. static const struct cortex_m_part_info cortex_m_parts[] = {
  54. {
  55. .partno = CORTEX_M0_PARTNO,
  56. .name = "Cortex-M0",
  57. .arch = ARM_ARCH_V6M,
  58. },
  59. {
  60. .partno = CORTEX_M0P_PARTNO,
  61. .name = "Cortex-M0+",
  62. .arch = ARM_ARCH_V6M,
  63. },
  64. {
  65. .partno = CORTEX_M1_PARTNO,
  66. .name = "Cortex-M1",
  67. .arch = ARM_ARCH_V6M,
  68. },
  69. {
  70. .partno = CORTEX_M3_PARTNO,
  71. .name = "Cortex-M3",
  72. .arch = ARM_ARCH_V7M,
  73. .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
  74. },
  75. {
  76. .partno = CORTEX_M4_PARTNO,
  77. .name = "Cortex-M4",
  78. .arch = ARM_ARCH_V7M,
  79. .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
  80. },
  81. {
  82. .partno = CORTEX_M7_PARTNO,
  83. .name = "Cortex-M7",
  84. .arch = ARM_ARCH_V7M,
  85. .flags = CORTEX_M_F_HAS_FPV5,
  86. },
  87. {
  88. .partno = CORTEX_M23_PARTNO,
  89. .name = "Cortex-M23",
  90. .arch = ARM_ARCH_V8M,
  91. },
  92. {
  93. .partno = CORTEX_M33_PARTNO,
  94. .name = "Cortex-M33",
  95. .arch = ARM_ARCH_V8M,
  96. .flags = CORTEX_M_F_HAS_FPV5,
  97. },
  98. {
  99. .partno = CORTEX_M35P_PARTNO,
  100. .name = "Cortex-M35P",
  101. .arch = ARM_ARCH_V8M,
  102. .flags = CORTEX_M_F_HAS_FPV5,
  103. },
  104. {
  105. .partno = CORTEX_M55_PARTNO,
  106. .name = "Cortex-M55",
  107. .arch = ARM_ARCH_V8M,
  108. .flags = CORTEX_M_F_HAS_FPV5,
  109. },
  110. };
  111. /* forward declarations */
  112. static int cortex_m_store_core_reg_u32(struct target *target,
  113. uint32_t num, uint32_t value);
  114. static void cortex_m_dwt_free(struct target *target);
  115. static int cortex_m_load_core_reg_u32(struct target *target,
  116. uint32_t regsel, uint32_t *value)
  117. {
  118. struct armv7m_common *armv7m = target_to_armv7m(target);
  119. int retval;
  120. uint32_t dcrdr;
  121. /* because the DCB_DCRDR is used for the emulated dcc channel
  122. * we have to save/restore the DCB_DCRDR when used */
  123. if (target->dbg_msg_enabled) {
  124. retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
  125. if (retval != ERROR_OK)
  126. return retval;
  127. }
  128. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
  129. if (retval != ERROR_OK)
  130. return retval;
  131. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR, value);
  132. if (retval != ERROR_OK)
  133. return retval;
  134. if (target->dbg_msg_enabled) {
  135. /* restore DCB_DCRDR - this needs to be in a separate
  136. * transaction otherwise the emulated DCC channel breaks */
  137. if (retval == ERROR_OK)
  138. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
  139. }
  140. return retval;
  141. }
  142. static int cortex_m_store_core_reg_u32(struct target *target,
  143. uint32_t regsel, uint32_t value)
  144. {
  145. struct armv7m_common *armv7m = target_to_armv7m(target);
  146. int retval;
  147. uint32_t dcrdr;
  148. /* because the DCB_DCRDR is used for the emulated dcc channel
  149. * we have to save/restore the DCB_DCRDR when used */
  150. if (target->dbg_msg_enabled) {
  151. retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
  152. if (retval != ERROR_OK)
  153. return retval;
  154. }
  155. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
  156. if (retval != ERROR_OK)
  157. return retval;
  158. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
  159. if (retval != ERROR_OK)
  160. return retval;
  161. if (target->dbg_msg_enabled) {
  162. /* restore DCB_DCRDR - this needs to be in a separate
  163. * transaction otherwise the emulated DCC channel breaks */
  164. if (retval == ERROR_OK)
  165. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
  166. }
  167. return retval;
  168. }
  169. static int cortex_m_write_debug_halt_mask(struct target *target,
  170. uint32_t mask_on, uint32_t mask_off)
  171. {
  172. struct cortex_m_common *cortex_m = target_to_cm(target);
  173. struct armv7m_common *armv7m = &cortex_m->armv7m;
  174. /* mask off status bits */
  175. cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
  176. /* create new register mask */
  177. cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
  178. return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
  179. }
  180. static int cortex_m_set_maskints(struct target *target, bool mask)
  181. {
  182. struct cortex_m_common *cortex_m = target_to_cm(target);
  183. if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
  184. return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
  185. else
  186. return ERROR_OK;
  187. }
  188. static int cortex_m_set_maskints_for_halt(struct target *target)
  189. {
  190. struct cortex_m_common *cortex_m = target_to_cm(target);
  191. switch (cortex_m->isrmasking_mode) {
  192. case CORTEX_M_ISRMASK_AUTO:
  193. /* interrupts taken at resume, whether for step or run -> no mask */
  194. return cortex_m_set_maskints(target, false);
  195. case CORTEX_M_ISRMASK_OFF:
  196. /* interrupts never masked */
  197. return cortex_m_set_maskints(target, false);
  198. case CORTEX_M_ISRMASK_ON:
  199. /* interrupts always masked */
  200. return cortex_m_set_maskints(target, true);
  201. case CORTEX_M_ISRMASK_STEPONLY:
  202. /* interrupts masked for single step only -> mask now if MASKINTS
  203. * erratum, otherwise only mask before stepping */
  204. return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
  205. }
  206. return ERROR_OK;
  207. }
  208. static int cortex_m_set_maskints_for_run(struct target *target)
  209. {
  210. switch (target_to_cm(target)->isrmasking_mode) {
  211. case CORTEX_M_ISRMASK_AUTO:
  212. /* interrupts taken at resume, whether for step or run -> no mask */
  213. return cortex_m_set_maskints(target, false);
  214. case CORTEX_M_ISRMASK_OFF:
  215. /* interrupts never masked */
  216. return cortex_m_set_maskints(target, false);
  217. case CORTEX_M_ISRMASK_ON:
  218. /* interrupts always masked */
  219. return cortex_m_set_maskints(target, true);
  220. case CORTEX_M_ISRMASK_STEPONLY:
  221. /* interrupts masked for single step only -> no mask */
  222. return cortex_m_set_maskints(target, false);
  223. }
  224. return ERROR_OK;
  225. }
  226. static int cortex_m_set_maskints_for_step(struct target *target)
  227. {
  228. switch (target_to_cm(target)->isrmasking_mode) {
  229. case CORTEX_M_ISRMASK_AUTO:
  230. /* the auto-interrupt should already be done -> mask */
  231. return cortex_m_set_maskints(target, true);
  232. case CORTEX_M_ISRMASK_OFF:
  233. /* interrupts never masked */
  234. return cortex_m_set_maskints(target, false);
  235. case CORTEX_M_ISRMASK_ON:
  236. /* interrupts always masked */
  237. return cortex_m_set_maskints(target, true);
  238. case CORTEX_M_ISRMASK_STEPONLY:
  239. /* interrupts masked for single step only -> mask */
  240. return cortex_m_set_maskints(target, true);
  241. }
  242. return ERROR_OK;
  243. }
  244. static int cortex_m_clear_halt(struct target *target)
  245. {
  246. struct cortex_m_common *cortex_m = target_to_cm(target);
  247. struct armv7m_common *armv7m = &cortex_m->armv7m;
  248. int retval;
  249. /* clear step if any */
  250. cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
  251. /* Read Debug Fault Status Register */
  252. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
  253. if (retval != ERROR_OK)
  254. return retval;
  255. /* Clear Debug Fault Status */
  256. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
  257. if (retval != ERROR_OK)
  258. return retval;
  259. LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
  260. return ERROR_OK;
  261. }
  262. static int cortex_m_single_step_core(struct target *target)
  263. {
  264. struct cortex_m_common *cortex_m = target_to_cm(target);
  265. struct armv7m_common *armv7m = &cortex_m->armv7m;
  266. int retval;
  267. /* Mask interrupts before clearing halt, if not done already. This avoids
  268. * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
  269. * HALT can put the core into an unknown state.
  270. */
  271. if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
  272. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
  273. DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
  274. if (retval != ERROR_OK)
  275. return retval;
  276. }
  277. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
  278. DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
  279. if (retval != ERROR_OK)
  280. return retval;
  281. LOG_DEBUG(" ");
  282. /* restore dhcsr reg */
  283. cortex_m_clear_halt(target);
  284. return ERROR_OK;
  285. }
  286. static int cortex_m_enable_fpb(struct target *target)
  287. {
  288. int retval = target_write_u32(target, FP_CTRL, 3);
  289. if (retval != ERROR_OK)
  290. return retval;
  291. /* check the fpb is actually enabled */
  292. uint32_t fpctrl;
  293. retval = target_read_u32(target, FP_CTRL, &fpctrl);
  294. if (retval != ERROR_OK)
  295. return retval;
  296. if (fpctrl & 1)
  297. return ERROR_OK;
  298. return ERROR_FAIL;
  299. }
  300. static int cortex_m_endreset_event(struct target *target)
  301. {
  302. int retval;
  303. uint32_t dcb_demcr;
  304. struct cortex_m_common *cortex_m = target_to_cm(target);
  305. struct armv7m_common *armv7m = &cortex_m->armv7m;
  306. struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
  307. struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
  308. struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
  309. /* REVISIT The four debug monitor bits are currently ignored... */
  310. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
  311. if (retval != ERROR_OK)
  312. return retval;
  313. LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
  314. /* this register is used for emulated dcc channel */
  315. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
  316. if (retval != ERROR_OK)
  317. return retval;
  318. /* Enable debug requests */
  319. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  320. if (retval != ERROR_OK)
  321. return retval;
  322. if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
  323. retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
  324. if (retval != ERROR_OK)
  325. return retval;
  326. }
  327. /* Restore proper interrupt masking setting for running CPU. */
  328. cortex_m_set_maskints_for_run(target);
  329. /* Enable features controlled by ITM and DWT blocks, and catch only
  330. * the vectors we were told to pay attention to.
  331. *
  332. * Target firmware is responsible for all fault handling policy
  333. * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
  334. * or manual updates to the NVIC SHCSR and CCR registers.
  335. */
  336. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
  337. if (retval != ERROR_OK)
  338. return retval;
  339. /* Paranoia: evidently some (early?) chips don't preserve all the
  340. * debug state (including FPB, DWT, etc) across reset...
  341. */
  342. /* Enable FPB */
  343. retval = cortex_m_enable_fpb(target);
  344. if (retval != ERROR_OK) {
  345. LOG_ERROR("Failed to enable the FPB");
  346. return retval;
  347. }
  348. cortex_m->fpb_enabled = true;
  349. /* Restore FPB registers */
  350. for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
  351. retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
  352. if (retval != ERROR_OK)
  353. return retval;
  354. }
  355. /* Restore DWT registers */
  356. for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
  357. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
  358. dwt_list[i].comp);
  359. if (retval != ERROR_OK)
  360. return retval;
  361. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
  362. dwt_list[i].mask);
  363. if (retval != ERROR_OK)
  364. return retval;
  365. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
  366. dwt_list[i].function);
  367. if (retval != ERROR_OK)
  368. return retval;
  369. }
  370. retval = dap_run(swjdp);
  371. if (retval != ERROR_OK)
  372. return retval;
  373. register_cache_invalidate(armv7m->arm.core_cache);
  374. /* make sure we have latest dhcsr flags */
  375. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  376. return retval;
  377. }
  378. static int cortex_m_examine_debug_reason(struct target *target)
  379. {
  380. struct cortex_m_common *cortex_m = target_to_cm(target);
  381. /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
  382. * only check the debug reason if we don't know it already */
  383. if ((target->debug_reason != DBG_REASON_DBGRQ)
  384. && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
  385. if (cortex_m->nvic_dfsr & DFSR_BKPT) {
  386. target->debug_reason = DBG_REASON_BREAKPOINT;
  387. if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
  388. target->debug_reason = DBG_REASON_WPTANDBKPT;
  389. } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
  390. target->debug_reason = DBG_REASON_WATCHPOINT;
  391. else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
  392. target->debug_reason = DBG_REASON_BREAKPOINT;
  393. else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
  394. target->debug_reason = DBG_REASON_DBGRQ;
  395. else /* HALTED */
  396. target->debug_reason = DBG_REASON_UNDEFINED;
  397. }
  398. return ERROR_OK;
  399. }
  400. static int cortex_m_examine_exception_reason(struct target *target)
  401. {
  402. uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
  403. struct armv7m_common *armv7m = target_to_armv7m(target);
  404. struct adiv5_dap *swjdp = armv7m->arm.dap;
  405. int retval;
  406. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
  407. if (retval != ERROR_OK)
  408. return retval;
  409. switch (armv7m->exception_number) {
  410. case 2: /* NMI */
  411. break;
  412. case 3: /* Hard Fault */
  413. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
  414. if (retval != ERROR_OK)
  415. return retval;
  416. if (except_sr & 0x40000000) {
  417. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
  418. if (retval != ERROR_OK)
  419. return retval;
  420. }
  421. break;
  422. case 4: /* Memory Management */
  423. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  424. if (retval != ERROR_OK)
  425. return retval;
  426. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
  427. if (retval != ERROR_OK)
  428. return retval;
  429. break;
  430. case 5: /* Bus Fault */
  431. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  432. if (retval != ERROR_OK)
  433. return retval;
  434. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
  435. if (retval != ERROR_OK)
  436. return retval;
  437. break;
  438. case 6: /* Usage Fault */
  439. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
  440. if (retval != ERROR_OK)
  441. return retval;
  442. break;
  443. case 7: /* Secure Fault */
  444. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
  445. if (retval != ERROR_OK)
  446. return retval;
  447. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
  448. if (retval != ERROR_OK)
  449. return retval;
  450. break;
  451. case 11: /* SVCall */
  452. break;
  453. case 12: /* Debug Monitor */
  454. retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
  455. if (retval != ERROR_OK)
  456. return retval;
  457. break;
  458. case 14: /* PendSV */
  459. break;
  460. case 15: /* SysTick */
  461. break;
  462. default:
  463. except_sr = 0;
  464. break;
  465. }
  466. retval = dap_run(swjdp);
  467. if (retval == ERROR_OK)
  468. LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
  469. ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
  470. armv7m_exception_string(armv7m->exception_number),
  471. shcsr, except_sr, cfsr, except_ar);
  472. return retval;
  473. }
  474. static int cortex_m_debug_entry(struct target *target)
  475. {
  476. int i;
  477. uint32_t xPSR;
  478. int retval;
  479. struct cortex_m_common *cortex_m = target_to_cm(target);
  480. struct armv7m_common *armv7m = &cortex_m->armv7m;
  481. struct arm *arm = &armv7m->arm;
  482. struct reg *r;
  483. LOG_DEBUG(" ");
  484. /* Do this really early to minimize the window where the MASKINTS erratum
  485. * can pile up pending interrupts. */
  486. cortex_m_set_maskints_for_halt(target);
  487. cortex_m_clear_halt(target);
  488. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  489. if (retval != ERROR_OK)
  490. return retval;
  491. retval = armv7m->examine_debug_reason(target);
  492. if (retval != ERROR_OK)
  493. return retval;
  494. /* examine PE security state */
  495. bool secure_state = false;
  496. if (armv7m->arm.arch == ARM_ARCH_V8M) {
  497. uint32_t dscsr;
  498. retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
  499. if (retval != ERROR_OK)
  500. return retval;
  501. secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
  502. }
  503. /* Examine target state and mode
  504. * First load register accessible through core debug port */
  505. int num_regs = arm->core_cache->num_regs;
  506. for (i = 0; i < num_regs; i++) {
  507. r = &armv7m->arm.core_cache->reg_list[i];
  508. if (r->exist && !r->valid)
  509. arm->read_core_reg(target, r, i, ARM_MODE_ANY);
  510. }
  511. r = arm->cpsr;
  512. xPSR = buf_get_u32(r->value, 0, 32);
  513. /* Are we in an exception handler */
  514. if (xPSR & 0x1FF) {
  515. armv7m->exception_number = (xPSR & 0x1FF);
  516. arm->core_mode = ARM_MODE_HANDLER;
  517. arm->map = armv7m_msp_reg_map;
  518. } else {
  519. unsigned control = buf_get_u32(arm->core_cache
  520. ->reg_list[ARMV7M_CONTROL].value, 0, 3);
  521. /* is this thread privileged? */
  522. arm->core_mode = control & 1
  523. ? ARM_MODE_USER_THREAD
  524. : ARM_MODE_THREAD;
  525. /* which stack is it using? */
  526. if (control & 2)
  527. arm->map = armv7m_psp_reg_map;
  528. else
  529. arm->map = armv7m_msp_reg_map;
  530. armv7m->exception_number = 0;
  531. }
  532. if (armv7m->exception_number)
  533. cortex_m_examine_exception_reason(target);
  534. LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", cpu in %s state, target->state: %s",
  535. arm_mode_name(arm->core_mode),
  536. buf_get_u32(arm->pc->value, 0, 32),
  537. secure_state ? "Secure" : "Non-Secure",
  538. target_state_name(target));
  539. if (armv7m->post_debug_entry) {
  540. retval = armv7m->post_debug_entry(target);
  541. if (retval != ERROR_OK)
  542. return retval;
  543. }
  544. return ERROR_OK;
  545. }
  546. static int cortex_m_poll(struct target *target)
  547. {
  548. int detected_failure = ERROR_OK;
  549. int retval = ERROR_OK;
  550. enum target_state prev_target_state = target->state;
  551. struct cortex_m_common *cortex_m = target_to_cm(target);
  552. struct armv7m_common *armv7m = &cortex_m->armv7m;
  553. /* Read from Debug Halting Control and Status Register */
  554. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  555. if (retval != ERROR_OK) {
  556. target->state = TARGET_UNKNOWN;
  557. return retval;
  558. }
  559. /* Recover from lockup. See ARMv7-M architecture spec,
  560. * section B1.5.15 "Unrecoverable exception cases".
  561. */
  562. if (cortex_m->dcb_dhcsr & S_LOCKUP) {
  563. LOG_ERROR("%s -- clearing lockup after double fault",
  564. target_name(target));
  565. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  566. target->debug_reason = DBG_REASON_DBGRQ;
  567. /* We have to execute the rest (the "finally" equivalent, but
  568. * still throw this exception again).
  569. */
  570. detected_failure = ERROR_FAIL;
  571. /* refresh status bits */
  572. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  573. if (retval != ERROR_OK)
  574. return retval;
  575. }
  576. if (cortex_m->dcb_dhcsr & S_RESET_ST) {
  577. if (target->state != TARGET_RESET) {
  578. target->state = TARGET_RESET;
  579. LOG_INFO("%s: external reset detected", target_name(target));
  580. }
  581. return ERROR_OK;
  582. }
  583. if (target->state == TARGET_RESET) {
  584. /* Cannot switch context while running so endreset is
  585. * called with target->state == TARGET_RESET
  586. */
  587. LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
  588. cortex_m->dcb_dhcsr);
  589. retval = cortex_m_endreset_event(target);
  590. if (retval != ERROR_OK) {
  591. target->state = TARGET_UNKNOWN;
  592. return retval;
  593. }
  594. target->state = TARGET_RUNNING;
  595. prev_target_state = TARGET_RUNNING;
  596. }
  597. if (cortex_m->dcb_dhcsr & S_HALT) {
  598. target->state = TARGET_HALTED;
  599. if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
  600. retval = cortex_m_debug_entry(target);
  601. if (retval != ERROR_OK)
  602. return retval;
  603. if (arm_semihosting(target, &retval) != 0)
  604. return retval;
  605. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  606. }
  607. if (prev_target_state == TARGET_DEBUG_RUNNING) {
  608. LOG_DEBUG(" ");
  609. retval = cortex_m_debug_entry(target);
  610. if (retval != ERROR_OK)
  611. return retval;
  612. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  613. }
  614. }
  615. if (target->state == TARGET_UNKNOWN) {
  616. /* check if processor is retiring instructions or sleeping */
  617. if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
  618. target->state = TARGET_RUNNING;
  619. retval = ERROR_OK;
  620. }
  621. }
  622. /* Check that target is truly halted, since the target could be resumed externally */
  623. if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
  624. /* registers are now invalid */
  625. register_cache_invalidate(armv7m->arm.core_cache);
  626. target->state = TARGET_RUNNING;
  627. LOG_WARNING("%s: external resume detected", target_name(target));
  628. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  629. retval = ERROR_OK;
  630. }
  631. /* Did we detect a failure condition that we cleared? */
  632. if (detected_failure != ERROR_OK)
  633. retval = detected_failure;
  634. return retval;
  635. }
  636. static int cortex_m_halt(struct target *target)
  637. {
  638. LOG_DEBUG("target->state: %s",
  639. target_state_name(target));
  640. if (target->state == TARGET_HALTED) {
  641. LOG_DEBUG("target was already halted");
  642. return ERROR_OK;
  643. }
  644. if (target->state == TARGET_UNKNOWN)
  645. LOG_WARNING("target was in unknown state when halt was requested");
  646. if (target->state == TARGET_RESET) {
  647. if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
  648. LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  649. return ERROR_TARGET_FAILURE;
  650. } else {
  651. /* we came here in a reset_halt or reset_init sequence
  652. * debug entry was already prepared in cortex_m3_assert_reset()
  653. */
  654. target->debug_reason = DBG_REASON_DBGRQ;
  655. return ERROR_OK;
  656. }
  657. }
  658. /* Write to Debug Halting Control and Status Register */
  659. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  660. /* Do this really early to minimize the window where the MASKINTS erratum
  661. * can pile up pending interrupts. */
  662. cortex_m_set_maskints_for_halt(target);
  663. target->debug_reason = DBG_REASON_DBGRQ;
  664. return ERROR_OK;
  665. }
  666. static int cortex_m_soft_reset_halt(struct target *target)
  667. {
  668. struct cortex_m_common *cortex_m = target_to_cm(target);
  669. struct armv7m_common *armv7m = &cortex_m->armv7m;
  670. uint32_t dcb_dhcsr = 0;
  671. int retval, timeout = 0;
  672. /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
  673. * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
  674. * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
  675. * core, not the peripherals */
  676. LOG_DEBUG("soft_reset_halt is discouraged, please use 'reset halt' instead.");
  677. if (!cortex_m->vectreset_supported) {
  678. LOG_ERROR("VECTRESET is not supported on this Cortex-M core");
  679. return ERROR_FAIL;
  680. }
  681. /* Set C_DEBUGEN */
  682. retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
  683. if (retval != ERROR_OK)
  684. return retval;
  685. /* Enter debug state on reset; restore DEMCR in endreset_event() */
  686. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
  687. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  688. if (retval != ERROR_OK)
  689. return retval;
  690. /* Request a core-only reset */
  691. retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
  692. AIRCR_VECTKEY | AIRCR_VECTRESET);
  693. if (retval != ERROR_OK)
  694. return retval;
  695. target->state = TARGET_RESET;
  696. /* registers are now invalid */
  697. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  698. while (timeout < 100) {
  699. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &dcb_dhcsr);
  700. if (retval == ERROR_OK) {
  701. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
  702. &cortex_m->nvic_dfsr);
  703. if (retval != ERROR_OK)
  704. return retval;
  705. if ((dcb_dhcsr & S_HALT)
  706. && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
  707. LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
  708. "DFSR 0x%08x",
  709. (unsigned) dcb_dhcsr,
  710. (unsigned) cortex_m->nvic_dfsr);
  711. cortex_m_poll(target);
  712. /* FIXME restore user's vector catch config */
  713. return ERROR_OK;
  714. } else
  715. LOG_DEBUG("waiting for system reset-halt, "
  716. "DHCSR 0x%08x, %d ms",
  717. (unsigned) dcb_dhcsr, timeout);
  718. }
  719. timeout++;
  720. alive_sleep(1);
  721. }
  722. return ERROR_OK;
  723. }
  724. void cortex_m_enable_breakpoints(struct target *target)
  725. {
  726. struct breakpoint *breakpoint = target->breakpoints;
  727. /* set any pending breakpoints */
  728. while (breakpoint) {
  729. if (!breakpoint->set)
  730. cortex_m_set_breakpoint(target, breakpoint);
  731. breakpoint = breakpoint->next;
  732. }
  733. }
  734. static int cortex_m_resume(struct target *target, int current,
  735. target_addr_t address, int handle_breakpoints, int debug_execution)
  736. {
  737. struct armv7m_common *armv7m = target_to_armv7m(target);
  738. struct breakpoint *breakpoint = NULL;
  739. uint32_t resume_pc;
  740. struct reg *r;
  741. if (target->state != TARGET_HALTED) {
  742. LOG_WARNING("target not halted");
  743. return ERROR_TARGET_NOT_HALTED;
  744. }
  745. if (!debug_execution) {
  746. target_free_all_working_areas(target);
  747. cortex_m_enable_breakpoints(target);
  748. cortex_m_enable_watchpoints(target);
  749. }
  750. if (debug_execution) {
  751. r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
  752. /* Disable interrupts */
  753. /* We disable interrupts in the PRIMASK register instead of
  754. * masking with C_MASKINTS. This is probably the same issue
  755. * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
  756. * in parallel with disabled interrupts can cause local faults
  757. * to not be taken.
  758. *
  759. * This breaks non-debug (application) execution if not
  760. * called from armv7m_start_algorithm() which saves registers.
  761. */
  762. buf_set_u32(r->value, 0, 1, 1);
  763. r->dirty = true;
  764. r->valid = true;
  765. /* Make sure we are in Thumb mode, set xPSR.T bit */
  766. /* armv7m_start_algorithm() initializes entire xPSR register.
  767. * This duplicity handles the case when cortex_m_resume()
  768. * is used with the debug_execution flag directly,
  769. * not called through armv7m_start_algorithm().
  770. */
  771. r = armv7m->arm.cpsr;
  772. buf_set_u32(r->value, 24, 1, 1);
  773. r->dirty = true;
  774. r->valid = true;
  775. }
  776. /* current = 1: continue on current pc, otherwise continue at <address> */
  777. r = armv7m->arm.pc;
  778. if (!current) {
  779. buf_set_u32(r->value, 0, 32, address);
  780. r->dirty = true;
  781. r->valid = true;
  782. }
  783. /* if we halted last time due to a bkpt instruction
  784. * then we have to manually step over it, otherwise
  785. * the core will break again */
  786. if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
  787. && !debug_execution)
  788. armv7m_maybe_skip_bkpt_inst(target, NULL);
  789. resume_pc = buf_get_u32(r->value, 0, 32);
  790. armv7m_restore_context(target);
  791. /* the front-end may request us not to handle breakpoints */
  792. if (handle_breakpoints) {
  793. /* Single step past breakpoint at current address */
  794. breakpoint = breakpoint_find(target, resume_pc);
  795. if (breakpoint) {
  796. LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
  797. breakpoint->address,
  798. breakpoint->unique_id);
  799. cortex_m_unset_breakpoint(target, breakpoint);
  800. cortex_m_single_step_core(target);
  801. cortex_m_set_breakpoint(target, breakpoint);
  802. }
  803. }
  804. /* Restart core */
  805. cortex_m_set_maskints_for_run(target);
  806. cortex_m_write_debug_halt_mask(target, 0, C_HALT);
  807. target->debug_reason = DBG_REASON_NOTHALTED;
  808. /* registers are now invalid */
  809. register_cache_invalidate(armv7m->arm.core_cache);
  810. if (!debug_execution) {
  811. target->state = TARGET_RUNNING;
  812. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  813. LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
  814. } else {
  815. target->state = TARGET_DEBUG_RUNNING;
  816. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  817. LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
  818. }
  819. return ERROR_OK;
  820. }
  821. /* int irqstepcount = 0; */
  822. static int cortex_m_step(struct target *target, int current,
  823. target_addr_t address, int handle_breakpoints)
  824. {
  825. struct cortex_m_common *cortex_m = target_to_cm(target);
  826. struct armv7m_common *armv7m = &cortex_m->armv7m;
  827. struct breakpoint *breakpoint = NULL;
  828. struct reg *pc = armv7m->arm.pc;
  829. bool bkpt_inst_found = false;
  830. int retval;
  831. bool isr_timed_out = false;
  832. if (target->state != TARGET_HALTED) {
  833. LOG_WARNING("target not halted");
  834. return ERROR_TARGET_NOT_HALTED;
  835. }
  836. /* current = 1: continue on current pc, otherwise continue at <address> */
  837. if (!current)
  838. buf_set_u32(pc->value, 0, 32, address);
  839. uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
  840. /* the front-end may request us not to handle breakpoints */
  841. if (handle_breakpoints) {
  842. breakpoint = breakpoint_find(target, pc_value);
  843. if (breakpoint)
  844. cortex_m_unset_breakpoint(target, breakpoint);
  845. }
  846. armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
  847. target->debug_reason = DBG_REASON_SINGLESTEP;
  848. armv7m_restore_context(target);
  849. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  850. /* if no bkpt instruction is found at pc then we can perform
  851. * a normal step, otherwise we have to manually step over the bkpt
  852. * instruction - as such simulate a step */
  853. if (bkpt_inst_found == false) {
  854. if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
  855. /* Automatic ISR masking mode off: Just step over the next
  856. * instruction, with interrupts on or off as appropriate. */
  857. cortex_m_set_maskints_for_step(target);
  858. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  859. } else {
  860. /* Process interrupts during stepping in a way they don't interfere
  861. * debugging.
  862. *
  863. * Principle:
  864. *
  865. * Set a temporary break point at the current pc and let the core run
  866. * with interrupts enabled. Pending interrupts get served and we run
  867. * into the breakpoint again afterwards. Then we step over the next
  868. * instruction with interrupts disabled.
  869. *
  870. * If the pending interrupts don't complete within time, we leave the
  871. * core running. This may happen if the interrupts trigger faster
  872. * than the core can process them or the handler doesn't return.
  873. *
  874. * If no more breakpoints are available we simply do a step with
  875. * interrupts enabled.
  876. *
  877. */
  878. /* 2012-09-29 ph
  879. *
  880. * If a break point is already set on the lower half word then a break point on
  881. * the upper half word will not break again when the core is restarted. So we
  882. * just step over the instruction with interrupts disabled.
  883. *
  884. * The documentation has no information about this, it was found by observation
  885. * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
  886. * suffer from this problem.
  887. *
  888. * To add some confusion: pc_value has bit 0 always set, while the breakpoint
  889. * address has it always cleared. The former is done to indicate thumb mode
  890. * to gdb.
  891. *
  892. */
  893. if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
  894. LOG_DEBUG("Stepping over next instruction with interrupts disabled");
  895. cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
  896. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  897. /* Re-enable interrupts if appropriate */
  898. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  899. cortex_m_set_maskints_for_halt(target);
  900. } else {
  901. /* Set a temporary break point */
  902. if (breakpoint) {
  903. retval = cortex_m_set_breakpoint(target, breakpoint);
  904. } else {
  905. enum breakpoint_type type = BKPT_HARD;
  906. if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
  907. /* FPB rev.1 cannot handle such addr, try BKPT instr */
  908. type = BKPT_SOFT;
  909. }
  910. retval = breakpoint_add(target, pc_value, 2, type);
  911. }
  912. bool tmp_bp_set = (retval == ERROR_OK);
  913. /* No more breakpoints left, just do a step */
  914. if (!tmp_bp_set) {
  915. cortex_m_set_maskints_for_step(target);
  916. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  917. /* Re-enable interrupts if appropriate */
  918. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  919. cortex_m_set_maskints_for_halt(target);
  920. } else {
  921. /* Start the core */
  922. LOG_DEBUG("Starting core to serve pending interrupts");
  923. int64_t t_start = timeval_ms();
  924. cortex_m_set_maskints_for_run(target);
  925. cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
  926. /* Wait for pending handlers to complete or timeout */
  927. do {
  928. retval = mem_ap_read_atomic_u32(armv7m->debug_ap,
  929. DCB_DHCSR,
  930. &cortex_m->dcb_dhcsr);
  931. if (retval != ERROR_OK) {
  932. target->state = TARGET_UNKNOWN;
  933. return retval;
  934. }
  935. isr_timed_out = ((timeval_ms() - t_start) > 500);
  936. } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
  937. /* only remove breakpoint if we created it */
  938. if (breakpoint)
  939. cortex_m_unset_breakpoint(target, breakpoint);
  940. else {
  941. /* Remove the temporary breakpoint */
  942. breakpoint_remove(target, pc_value);
  943. }
  944. if (isr_timed_out) {
  945. LOG_DEBUG("Interrupt handlers didn't complete within time, "
  946. "leaving target running");
  947. } else {
  948. /* Step over next instruction with interrupts disabled */
  949. cortex_m_set_maskints_for_step(target);
  950. cortex_m_write_debug_halt_mask(target,
  951. C_HALT | C_MASKINTS,
  952. 0);
  953. cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
  954. /* Re-enable interrupts if appropriate */
  955. cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  956. cortex_m_set_maskints_for_halt(target);
  957. }
  958. }
  959. }
  960. }
  961. }
  962. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  963. if (retval != ERROR_OK)
  964. return retval;
  965. /* registers are now invalid */
  966. register_cache_invalidate(armv7m->arm.core_cache);
  967. if (breakpoint)
  968. cortex_m_set_breakpoint(target, breakpoint);
  969. if (isr_timed_out) {
  970. /* Leave the core running. The user has to stop execution manually. */
  971. target->debug_reason = DBG_REASON_NOTHALTED;
  972. target->state = TARGET_RUNNING;
  973. return ERROR_OK;
  974. }
  975. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  976. " nvic_icsr = 0x%" PRIx32,
  977. cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
  978. retval = cortex_m_debug_entry(target);
  979. if (retval != ERROR_OK)
  980. return retval;
  981. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  982. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  983. " nvic_icsr = 0x%" PRIx32,
  984. cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
  985. return ERROR_OK;
  986. }
  987. static int cortex_m_assert_reset(struct target *target)
  988. {
  989. struct cortex_m_common *cortex_m = target_to_cm(target);
  990. struct armv7m_common *armv7m = &cortex_m->armv7m;
  991. enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
  992. LOG_DEBUG("target->state: %s",
  993. target_state_name(target));
  994. enum reset_types jtag_reset_config = jtag_get_reset_config();
  995. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  996. /* allow scripts to override the reset event */
  997. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  998. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  999. target->state = TARGET_RESET;
  1000. return ERROR_OK;
  1001. }
  1002. /* some cores support connecting while srst is asserted
  1003. * use that mode is it has been configured */
  1004. bool srst_asserted = false;
  1005. if (!target_was_examined(target)) {
  1006. if (jtag_reset_config & RESET_HAS_SRST) {
  1007. adapter_assert_reset();
  1008. if (target->reset_halt)
  1009. LOG_ERROR("Target not examined, will not halt after reset!");
  1010. return ERROR_OK;
  1011. } else {
  1012. LOG_ERROR("Target not examined, reset NOT asserted!");
  1013. return ERROR_FAIL;
  1014. }
  1015. }
  1016. if ((jtag_reset_config & RESET_HAS_SRST) &&
  1017. (jtag_reset_config & RESET_SRST_NO_GATING)) {
  1018. adapter_assert_reset();
  1019. srst_asserted = true;
  1020. }
  1021. /* Enable debug requests */
  1022. int retval;
  1023. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  1024. /* Store important errors instead of failing and proceed to reset assert */
  1025. if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
  1026. retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
  1027. /* If the processor is sleeping in a WFI or WFE instruction, the
  1028. * C_HALT bit must be asserted to regain control */
  1029. if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
  1030. retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
  1031. mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
  1032. /* Ignore less important errors */
  1033. if (!target->reset_halt) {
  1034. /* Set/Clear C_MASKINTS in a separate operation */
  1035. cortex_m_set_maskints_for_run(target);
  1036. /* clear any debug flags before resuming */
  1037. cortex_m_clear_halt(target);
  1038. /* clear C_HALT in dhcsr reg */
  1039. cortex_m_write_debug_halt_mask(target, 0, C_HALT);
  1040. } else {
  1041. /* Halt in debug on reset; endreset_event() restores DEMCR.
  1042. *
  1043. * REVISIT catching BUSERR presumably helps to defend against
  1044. * bad vector table entries. Should this include MMERR or
  1045. * other flags too?
  1046. */
  1047. int retval2;
  1048. retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
  1049. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  1050. if (retval != ERROR_OK || retval2 != ERROR_OK)
  1051. LOG_INFO("AP write error, reset will not halt");
  1052. }
  1053. if (jtag_reset_config & RESET_HAS_SRST) {
  1054. /* default to asserting srst */
  1055. if (!srst_asserted)
  1056. adapter_assert_reset();
  1057. /* srst is asserted, ignore AP access errors */
  1058. retval = ERROR_OK;
  1059. } else {
  1060. /* Use a standard Cortex-M3 software reset mechanism.
  1061. * We default to using VECRESET as it is supported on all current cores
  1062. * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
  1063. * This has the disadvantage of not resetting the peripherals, so a
  1064. * reset-init event handler is needed to perform any peripheral resets.
  1065. */
  1066. if (!cortex_m->vectreset_supported
  1067. && reset_config == CORTEX_M_RESET_VECTRESET) {
  1068. reset_config = CORTEX_M_RESET_SYSRESETREQ;
  1069. LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
  1070. LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
  1071. }
  1072. LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
  1073. ? "SYSRESETREQ" : "VECTRESET");
  1074. if (reset_config == CORTEX_M_RESET_VECTRESET) {
  1075. LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
  1076. "handler to reset any peripherals or configure hardware srst support.");
  1077. }
  1078. int retval3;
  1079. retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
  1080. AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
  1081. ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
  1082. if (retval3 != ERROR_OK)
  1083. LOG_DEBUG("Ignoring AP write error right after reset");
  1084. retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
  1085. if (retval3 != ERROR_OK) {
  1086. LOG_ERROR("DP initialisation failed");
  1087. /* The error return value must not be propagated in this case.
  1088. * SYSRESETREQ or VECTRESET have been possibly triggered
  1089. * so reset processing should continue */
  1090. } else {
  1091. /* I do not know why this is necessary, but it
  1092. * fixes strange effects (step/resume cause NMI
  1093. * after reset) on LM3S6918 -- Michael Schwingen
  1094. */
  1095. uint32_t tmp;
  1096. mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
  1097. }
  1098. }
  1099. target->state = TARGET_RESET;
  1100. jtag_sleep(50000);
  1101. register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
  1102. /* now return stored error code if any */
  1103. if (retval != ERROR_OK)
  1104. return retval;
  1105. if (target->reset_halt) {
  1106. retval = target_halt(target);
  1107. if (retval != ERROR_OK)
  1108. return retval;
  1109. }
  1110. return ERROR_OK;
  1111. }
  1112. static int cortex_m_deassert_reset(struct target *target)
  1113. {
  1114. struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
  1115. LOG_DEBUG("target->state: %s",
  1116. target_state_name(target));
  1117. /* deassert reset lines */
  1118. adapter_deassert_reset();
  1119. enum reset_types jtag_reset_config = jtag_get_reset_config();
  1120. if ((jtag_reset_config & RESET_HAS_SRST) &&
  1121. !(jtag_reset_config & RESET_SRST_NO_GATING) &&
  1122. target_was_examined(target)) {
  1123. int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
  1124. if (retval != ERROR_OK) {
  1125. LOG_ERROR("DP initialisation failed");
  1126. return retval;
  1127. }
  1128. }
  1129. return ERROR_OK;
  1130. }
  1131. int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1132. {
  1133. int retval;
  1134. unsigned int fp_num = 0;
  1135. struct cortex_m_common *cortex_m = target_to_cm(target);
  1136. struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
  1137. if (breakpoint->set) {
  1138. LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
  1139. return ERROR_OK;
  1140. }
  1141. if (breakpoint->type == BKPT_HARD) {
  1142. uint32_t fpcr_value;
  1143. while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
  1144. fp_num++;
  1145. if (fp_num >= cortex_m->fp_num_code) {
  1146. LOG_ERROR("Can not find free FPB Comparator!");
  1147. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1148. }
  1149. breakpoint->set = fp_num + 1;
  1150. fpcr_value = breakpoint->address | 1;
  1151. if (cortex_m->fp_rev == 0) {
  1152. if (breakpoint->address > 0x1FFFFFFF) {
  1153. LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
  1154. return ERROR_FAIL;
  1155. }
  1156. uint32_t hilo;
  1157. hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
  1158. fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
  1159. } else if (cortex_m->fp_rev > 1) {
  1160. LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
  1161. return ERROR_FAIL;
  1162. }
  1163. comparator_list[fp_num].used = true;
  1164. comparator_list[fp_num].fpcr_value = fpcr_value;
  1165. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  1166. comparator_list[fp_num].fpcr_value);
  1167. LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
  1168. fp_num,
  1169. comparator_list[fp_num].fpcr_value);
  1170. if (!cortex_m->fpb_enabled) {
  1171. LOG_DEBUG("FPB wasn't enabled, do it now");
  1172. retval = cortex_m_enable_fpb(target);
  1173. if (retval != ERROR_OK) {
  1174. LOG_ERROR("Failed to enable the FPB");
  1175. return retval;
  1176. }
  1177. cortex_m->fpb_enabled = true;
  1178. }
  1179. } else if (breakpoint->type == BKPT_SOFT) {
  1180. uint8_t code[4];
  1181. /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
  1182. * semihosting; don't use that. Otherwise the BKPT
  1183. * parameter is arbitrary.
  1184. */
  1185. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1186. retval = target_read_memory(target,
  1187. breakpoint->address & 0xFFFFFFFE,
  1188. breakpoint->length, 1,
  1189. breakpoint->orig_instr);
  1190. if (retval != ERROR_OK)
  1191. return retval;
  1192. retval = target_write_memory(target,
  1193. breakpoint->address & 0xFFFFFFFE,
  1194. breakpoint->length, 1,
  1195. code);
  1196. if (retval != ERROR_OK)
  1197. return retval;
  1198. breakpoint->set = true;
  1199. }
  1200. LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
  1201. breakpoint->unique_id,
  1202. (int)(breakpoint->type),
  1203. breakpoint->address,
  1204. breakpoint->length,
  1205. breakpoint->set);
  1206. return ERROR_OK;
  1207. }
  1208. int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1209. {
  1210. int retval;
  1211. struct cortex_m_common *cortex_m = target_to_cm(target);
  1212. struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
  1213. if (breakpoint->set <= 0) {
  1214. LOG_WARNING("breakpoint not set");
  1215. return ERROR_OK;
  1216. }
  1217. LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
  1218. breakpoint->unique_id,
  1219. (int)(breakpoint->type),
  1220. breakpoint->address,
  1221. breakpoint->length,
  1222. breakpoint->set);
  1223. if (breakpoint->type == BKPT_HARD) {
  1224. unsigned int fp_num = breakpoint->set - 1;
  1225. if (fp_num >= cortex_m->fp_num_code) {
  1226. LOG_DEBUG("Invalid FP Comparator number in breakpoint");
  1227. return ERROR_OK;
  1228. }
  1229. comparator_list[fp_num].used = false;
  1230. comparator_list[fp_num].fpcr_value = 0;
  1231. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  1232. comparator_list[fp_num].fpcr_value);
  1233. } else {
  1234. /* restore original instruction (kept in target endianness) */
  1235. retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
  1236. breakpoint->length, 1,
  1237. breakpoint->orig_instr);
  1238. if (retval != ERROR_OK)
  1239. return retval;
  1240. }
  1241. breakpoint->set = false;
  1242. return ERROR_OK;
  1243. }
  1244. int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1245. {
  1246. if (breakpoint->length == 3) {
  1247. LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
  1248. breakpoint->length = 2;
  1249. }
  1250. if ((breakpoint->length != 2)) {
  1251. LOG_INFO("only breakpoints of two bytes length supported");
  1252. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1253. }
  1254. return cortex_m_set_breakpoint(target, breakpoint);
  1255. }
  1256. int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1257. {
  1258. if (!breakpoint->set)
  1259. return ERROR_OK;
  1260. return cortex_m_unset_breakpoint(target, breakpoint);
  1261. }
  1262. static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1263. {
  1264. unsigned int dwt_num = 0;
  1265. struct cortex_m_common *cortex_m = target_to_cm(target);
  1266. /* REVISIT Don't fully trust these "not used" records ... users
  1267. * may set up breakpoints by hand, e.g. dual-address data value
  1268. * watchpoint using comparator #1; comparator #0 matching cycle
  1269. * count; send data trace info through ITM and TPIU; etc
  1270. */
  1271. struct cortex_m_dwt_comparator *comparator;
  1272. for (comparator = cortex_m->dwt_comparator_list;
  1273. comparator->used && dwt_num < cortex_m->dwt_num_comp;
  1274. comparator++, dwt_num++)
  1275. continue;
  1276. if (dwt_num >= cortex_m->dwt_num_comp) {
  1277. LOG_ERROR("Can not find free DWT Comparator");
  1278. return ERROR_FAIL;
  1279. }
  1280. comparator->used = true;
  1281. watchpoint->set = dwt_num + 1;
  1282. comparator->comp = watchpoint->address;
  1283. target_write_u32(target, comparator->dwt_comparator_address + 0,
  1284. comparator->comp);
  1285. if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
  1286. uint32_t mask = 0, temp;
  1287. /* watchpoint params were validated earlier */
  1288. temp = watchpoint->length;
  1289. while (temp) {
  1290. temp >>= 1;
  1291. mask++;
  1292. }
  1293. mask--;
  1294. comparator->mask = mask;
  1295. target_write_u32(target, comparator->dwt_comparator_address + 4,
  1296. comparator->mask);
  1297. switch (watchpoint->rw) {
  1298. case WPT_READ:
  1299. comparator->function = 5;
  1300. break;
  1301. case WPT_WRITE:
  1302. comparator->function = 6;
  1303. break;
  1304. case WPT_ACCESS:
  1305. comparator->function = 7;
  1306. break;
  1307. }
  1308. } else {
  1309. uint32_t data_size = watchpoint->length >> 1;
  1310. comparator->mask = (watchpoint->length >> 1) | 1;
  1311. switch (watchpoint->rw) {
  1312. case WPT_ACCESS:
  1313. comparator->function = 4;
  1314. break;
  1315. case WPT_WRITE:
  1316. comparator->function = 5;
  1317. break;
  1318. case WPT_READ:
  1319. comparator->function = 6;
  1320. break;
  1321. }
  1322. comparator->function = comparator->function | (1 << 4) |
  1323. (data_size << 10);
  1324. }
  1325. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1326. comparator->function);
  1327. LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
  1328. watchpoint->unique_id, dwt_num,
  1329. (unsigned) comparator->comp,
  1330. (unsigned) comparator->mask,
  1331. (unsigned) comparator->function);
  1332. return ERROR_OK;
  1333. }
  1334. static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1335. {
  1336. struct cortex_m_common *cortex_m = target_to_cm(target);
  1337. struct cortex_m_dwt_comparator *comparator;
  1338. if (watchpoint->set <= 0) {
  1339. LOG_WARNING("watchpoint (wpid: %d) not set",
  1340. watchpoint->unique_id);
  1341. return ERROR_OK;
  1342. }
  1343. unsigned int dwt_num = watchpoint->set - 1;
  1344. LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
  1345. watchpoint->unique_id, dwt_num,
  1346. (unsigned) watchpoint->address);
  1347. if (dwt_num >= cortex_m->dwt_num_comp) {
  1348. LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
  1349. return ERROR_OK;
  1350. }
  1351. comparator = cortex_m->dwt_comparator_list + dwt_num;
  1352. comparator->used = false;
  1353. comparator->function = 0;
  1354. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1355. comparator->function);
  1356. watchpoint->set = false;
  1357. return ERROR_OK;
  1358. }
  1359. int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1360. {
  1361. struct cortex_m_common *cortex_m = target_to_cm(target);
  1362. if (cortex_m->dwt_comp_available < 1) {
  1363. LOG_DEBUG("no comparators?");
  1364. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1365. }
  1366. /* hardware doesn't support data value masking */
  1367. if (watchpoint->mask != ~(uint32_t)0) {
  1368. LOG_DEBUG("watchpoint value masks not supported");
  1369. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1370. }
  1371. /* hardware allows address masks of up to 32K */
  1372. unsigned mask;
  1373. for (mask = 0; mask < 16; mask++) {
  1374. if ((1u << mask) == watchpoint->length)
  1375. break;
  1376. }
  1377. if (mask == 16) {
  1378. LOG_DEBUG("unsupported watchpoint length");
  1379. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1380. }
  1381. if (watchpoint->address & ((1 << mask) - 1)) {
  1382. LOG_DEBUG("watchpoint address is unaligned");
  1383. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1384. }
  1385. /* Caller doesn't seem to be able to describe watching for data
  1386. * values of zero; that flags "no value".
  1387. *
  1388. * REVISIT This DWT may well be able to watch for specific data
  1389. * values. Requires comparator #1 to set DATAVMATCH and match
  1390. * the data, and another comparator (DATAVADDR0) matching addr.
  1391. */
  1392. if (watchpoint->value) {
  1393. LOG_DEBUG("data value watchpoint not YET supported");
  1394. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1395. }
  1396. cortex_m->dwt_comp_available--;
  1397. LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
  1398. return ERROR_OK;
  1399. }
  1400. int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1401. {
  1402. struct cortex_m_common *cortex_m = target_to_cm(target);
  1403. /* REVISIT why check? DWT can be updated with core running ... */
  1404. if (target->state != TARGET_HALTED) {
  1405. LOG_WARNING("target not halted");
  1406. return ERROR_TARGET_NOT_HALTED;
  1407. }
  1408. if (watchpoint->set)
  1409. cortex_m_unset_watchpoint(target, watchpoint);
  1410. cortex_m->dwt_comp_available++;
  1411. LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
  1412. return ERROR_OK;
  1413. }
  1414. int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
  1415. {
  1416. if (target->debug_reason != DBG_REASON_WATCHPOINT)
  1417. return ERROR_FAIL;
  1418. struct cortex_m_common *cortex_m = target_to_cm(target);
  1419. for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
  1420. if (!wp->set)
  1421. continue;
  1422. unsigned int dwt_num = wp->set - 1;
  1423. struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
  1424. uint32_t dwt_function;
  1425. int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
  1426. if (retval != ERROR_OK)
  1427. return ERROR_FAIL;
  1428. /* check the MATCHED bit */
  1429. if (dwt_function & BIT(24)) {
  1430. *hit_watchpoint = wp;
  1431. return ERROR_OK;
  1432. }
  1433. }
  1434. return ERROR_FAIL;
  1435. }
  1436. void cortex_m_enable_watchpoints(struct target *target)
  1437. {
  1438. struct watchpoint *watchpoint = target->watchpoints;
  1439. /* set any pending watchpoints */
  1440. while (watchpoint) {
  1441. if (!watchpoint->set)
  1442. cortex_m_set_watchpoint(target, watchpoint);
  1443. watchpoint = watchpoint->next;
  1444. }
  1445. }
  1446. static int cortex_m_read_memory(struct target *target, target_addr_t address,
  1447. uint32_t size, uint32_t count, uint8_t *buffer)
  1448. {
  1449. struct armv7m_common *armv7m = target_to_armv7m(target);
  1450. if (armv7m->arm.arch == ARM_ARCH_V6M) {
  1451. /* armv6m does not handle unaligned memory access */
  1452. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1453. return ERROR_TARGET_UNALIGNED_ACCESS;
  1454. }
  1455. return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
  1456. }
  1457. static int cortex_m_write_memory(struct target *target, target_addr_t address,
  1458. uint32_t size, uint32_t count, const uint8_t *buffer)
  1459. {
  1460. struct armv7m_common *armv7m = target_to_armv7m(target);
  1461. if (armv7m->arm.arch == ARM_ARCH_V6M) {
  1462. /* armv6m does not handle unaligned memory access */
  1463. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1464. return ERROR_TARGET_UNALIGNED_ACCESS;
  1465. }
  1466. return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
  1467. }
  1468. static int cortex_m_init_target(struct command_context *cmd_ctx,
  1469. struct target *target)
  1470. {
  1471. armv7m_build_reg_cache(target);
  1472. arm_semihosting_init(target);
  1473. return ERROR_OK;
  1474. }
  1475. void cortex_m_deinit_target(struct target *target)
  1476. {
  1477. struct cortex_m_common *cortex_m = target_to_cm(target);
  1478. free(cortex_m->fp_comparator_list);
  1479. cortex_m_dwt_free(target);
  1480. armv7m_free_reg_cache(target);
  1481. free(target->private_config);
  1482. free(cortex_m);
  1483. }
  1484. int cortex_m_profiling(struct target *target, uint32_t *samples,
  1485. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1486. {
  1487. struct timeval timeout, now;
  1488. struct armv7m_common *armv7m = target_to_armv7m(target);
  1489. uint32_t reg_value;
  1490. int retval;
  1491. retval = target_read_u32(target, DWT_PCSR, &reg_value);
  1492. if (retval != ERROR_OK) {
  1493. LOG_ERROR("Error while reading PCSR");
  1494. return retval;
  1495. }
  1496. if (reg_value == 0) {
  1497. LOG_INFO("PCSR sampling not supported on this processor.");
  1498. return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
  1499. }
  1500. gettimeofday(&timeout, NULL);
  1501. timeval_add_time(&timeout, seconds, 0);
  1502. LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
  1503. /* Make sure the target is running */
  1504. target_poll(target);
  1505. if (target->state == TARGET_HALTED)
  1506. retval = target_resume(target, 1, 0, 0, 0);
  1507. if (retval != ERROR_OK) {
  1508. LOG_ERROR("Error while resuming target");
  1509. return retval;
  1510. }
  1511. uint32_t sample_count = 0;
  1512. for (;;) {
  1513. if (armv7m && armv7m->debug_ap) {
  1514. uint32_t read_count = max_num_samples - sample_count;
  1515. if (read_count > 1024)
  1516. read_count = 1024;
  1517. retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
  1518. (void *)&samples[sample_count],
  1519. 4, read_count, DWT_PCSR);
  1520. sample_count += read_count;
  1521. } else {
  1522. target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
  1523. }
  1524. if (retval != ERROR_OK) {
  1525. LOG_ERROR("Error while reading PCSR");
  1526. return retval;
  1527. }
  1528. gettimeofday(&now, NULL);
  1529. if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
  1530. LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
  1531. break;
  1532. }
  1533. }
  1534. *num_samples = sample_count;
  1535. return retval;
  1536. }
  1537. /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
  1538. * on r/w if the core is not running, and clear on resume or reset ... or
  1539. * at least, in a post_restore_context() method.
  1540. */
  1541. struct dwt_reg_state {
  1542. struct target *target;
  1543. uint32_t addr;
  1544. uint8_t value[4]; /* scratch/cache */
  1545. };
  1546. static int cortex_m_dwt_get_reg(struct reg *reg)
  1547. {
  1548. struct dwt_reg_state *state = reg->arch_info;
  1549. uint32_t tmp;
  1550. int retval = target_read_u32(state->target, state->addr, &tmp);
  1551. if (retval != ERROR_OK)
  1552. return retval;
  1553. buf_set_u32(state->value, 0, 32, tmp);
  1554. return ERROR_OK;
  1555. }
  1556. static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
  1557. {
  1558. struct dwt_reg_state *state = reg->arch_info;
  1559. return target_write_u32(state->target, state->addr,
  1560. buf_get_u32(buf, 0, reg->size));
  1561. }
  1562. struct dwt_reg {
  1563. uint32_t addr;
  1564. const char *name;
  1565. unsigned size;
  1566. };
  1567. static const struct dwt_reg dwt_base_regs[] = {
  1568. { DWT_CTRL, "dwt_ctrl", 32, },
  1569. /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
  1570. * increments while the core is asleep.
  1571. */
  1572. { DWT_CYCCNT, "dwt_cyccnt", 32, },
  1573. /* plus some 8 bit counters, useful for profiling with TPIU */
  1574. };
  1575. static const struct dwt_reg dwt_comp[] = {
  1576. #define DWT_COMPARATOR(i) \
  1577. { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
  1578. { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
  1579. { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
  1580. DWT_COMPARATOR(0),
  1581. DWT_COMPARATOR(1),
  1582. DWT_COMPARATOR(2),
  1583. DWT_COMPARATOR(3),
  1584. DWT_COMPARATOR(4),
  1585. DWT_COMPARATOR(5),
  1586. DWT_COMPARATOR(6),
  1587. DWT_COMPARATOR(7),
  1588. DWT_COMPARATOR(8),
  1589. DWT_COMPARATOR(9),
  1590. DWT_COMPARATOR(10),
  1591. DWT_COMPARATOR(11),
  1592. DWT_COMPARATOR(12),
  1593. DWT_COMPARATOR(13),
  1594. DWT_COMPARATOR(14),
  1595. DWT_COMPARATOR(15),
  1596. #undef DWT_COMPARATOR
  1597. };
  1598. static const struct reg_arch_type dwt_reg_type = {
  1599. .get = cortex_m_dwt_get_reg,
  1600. .set = cortex_m_dwt_set_reg,
  1601. };
  1602. static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
  1603. {
  1604. struct dwt_reg_state *state;
  1605. state = calloc(1, sizeof(*state));
  1606. if (!state)
  1607. return;
  1608. state->addr = d->addr;
  1609. state->target = t;
  1610. r->name = d->name;
  1611. r->size = d->size;
  1612. r->value = state->value;
  1613. r->arch_info = state;
  1614. r->type = &dwt_reg_type;
  1615. }
  1616. static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
  1617. {
  1618. uint32_t dwtcr;
  1619. struct reg_cache *cache;
  1620. struct cortex_m_dwt_comparator *comparator;
  1621. int reg;
  1622. target_read_u32(target, DWT_CTRL, &dwtcr);
  1623. LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
  1624. if (!dwtcr) {
  1625. LOG_DEBUG("no DWT");
  1626. return;
  1627. }
  1628. target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
  1629. LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
  1630. cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
  1631. cm->dwt_comp_available = cm->dwt_num_comp;
  1632. cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
  1633. sizeof(struct cortex_m_dwt_comparator));
  1634. if (!cm->dwt_comparator_list) {
  1635. fail0:
  1636. cm->dwt_num_comp = 0;
  1637. LOG_ERROR("out of mem");
  1638. return;
  1639. }
  1640. cache = calloc(1, sizeof(*cache));
  1641. if (!cache) {
  1642. fail1:
  1643. free(cm->dwt_comparator_list);
  1644. goto fail0;
  1645. }
  1646. cache->name = "Cortex-M DWT registers";
  1647. cache->num_regs = 2 + cm->dwt_num_comp * 3;
  1648. cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
  1649. if (!cache->reg_list) {
  1650. free(cache);
  1651. goto fail1;
  1652. }
  1653. for (reg = 0; reg < 2; reg++)
  1654. cortex_m_dwt_addreg(target, cache->reg_list + reg,
  1655. dwt_base_regs + reg);
  1656. comparator = cm->dwt_comparator_list;
  1657. for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
  1658. int j;
  1659. comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
  1660. for (j = 0; j < 3; j++, reg++)
  1661. cortex_m_dwt_addreg(target, cache->reg_list + reg,
  1662. dwt_comp + 3 * i + j);
  1663. /* make sure we clear any watchpoints enabled on the target */
  1664. target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
  1665. }
  1666. *register_get_last_cache_p(&target->reg_cache) = cache;
  1667. cm->dwt_cache = cache;
  1668. LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
  1669. dwtcr, cm->dwt_num_comp,
  1670. (dwtcr & (0xf << 24)) ? " only" : "/trigger");
  1671. /* REVISIT: if num_comp > 1, check whether comparator #1 can
  1672. * implement single-address data value watchpoints ... so we
  1673. * won't need to check it later, when asked to set one up.
  1674. */
  1675. }
  1676. static void cortex_m_dwt_free(struct target *target)
  1677. {
  1678. struct cortex_m_common *cm = target_to_cm(target);
  1679. struct reg_cache *cache = cm->dwt_cache;
  1680. free(cm->dwt_comparator_list);
  1681. cm->dwt_comparator_list = NULL;
  1682. cm->dwt_num_comp = 0;
  1683. if (cache) {
  1684. register_unlink_cache(&target->reg_cache, cache);
  1685. if (cache->reg_list) {
  1686. for (size_t i = 0; i < cache->num_regs; i++)
  1687. free(cache->reg_list[i].arch_info);
  1688. free(cache->reg_list);
  1689. }
  1690. free(cache);
  1691. }
  1692. cm->dwt_cache = NULL;
  1693. }
  1694. #define MVFR0 0xe000ef40
  1695. #define MVFR1 0xe000ef44
  1696. #define MVFR0_DEFAULT_M4 0x10110021
  1697. #define MVFR1_DEFAULT_M4 0x11000011
  1698. #define MVFR0_DEFAULT_M7_SP 0x10110021
  1699. #define MVFR0_DEFAULT_M7_DP 0x10110221
  1700. #define MVFR1_DEFAULT_M7_SP 0x11000011
  1701. #define MVFR1_DEFAULT_M7_DP 0x12000011
  1702. static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
  1703. struct adiv5_ap **debug_ap)
  1704. {
  1705. if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
  1706. return ERROR_OK;
  1707. return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
  1708. }
  1709. int cortex_m_examine(struct target *target)
  1710. {
  1711. int retval;
  1712. uint32_t cpuid, fpcr, mvfr0, mvfr1;
  1713. struct cortex_m_common *cortex_m = target_to_cm(target);
  1714. struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
  1715. struct armv7m_common *armv7m = target_to_armv7m(target);
  1716. /* hla_target shares the examine handler but does not support
  1717. * all its calls */
  1718. if (!armv7m->is_hla_target) {
  1719. if (cortex_m->apsel == DP_APSEL_INVALID) {
  1720. /* Search for the MEM-AP */
  1721. retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
  1722. if (retval != ERROR_OK) {
  1723. LOG_ERROR("Could not find MEM-AP to control the core");
  1724. return retval;
  1725. }
  1726. } else {
  1727. armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
  1728. }
  1729. /* Leave (only) generic DAP stuff for debugport_init(); */
  1730. armv7m->debug_ap->memaccess_tck = 8;
  1731. retval = mem_ap_init(armv7m->debug_ap);
  1732. if (retval != ERROR_OK)
  1733. return retval;
  1734. }
  1735. if (!target_was_examined(target)) {
  1736. target_set_examined(target);
  1737. /* Read from Device Identification Registers */
  1738. retval = target_read_u32(target, CPUID, &cpuid);
  1739. if (retval != ERROR_OK)
  1740. return retval;
  1741. /* Get ARCH and CPU types */
  1742. const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
  1743. for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
  1744. if (core_partno == cortex_m_parts[n].partno) {
  1745. cortex_m->core_info = &cortex_m_parts[n];
  1746. break;
  1747. }
  1748. }
  1749. if (!cortex_m->core_info) {
  1750. LOG_ERROR("Cortex-M PARTNO 0x%x is unrecognized", core_partno);
  1751. return ERROR_FAIL;
  1752. }
  1753. armv7m->arm.arch = cortex_m->core_info->arch;
  1754. LOG_DEBUG("%s r%" PRId8 "p%" PRId8 " processor detected",
  1755. cortex_m->core_info->name, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
  1756. cortex_m->maskints_erratum = false;
  1757. if (core_partno == CORTEX_M7_PARTNO) {
  1758. uint8_t rev, patch;
  1759. rev = (cpuid >> 20) & 0xf;
  1760. patch = (cpuid >> 0) & 0xf;
  1761. if ((rev == 0) && (patch < 2)) {
  1762. LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
  1763. cortex_m->maskints_erratum = true;
  1764. }
  1765. }
  1766. LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
  1767. if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
  1768. target_read_u32(target, MVFR0, &mvfr0);
  1769. target_read_u32(target, MVFR1, &mvfr1);
  1770. /* test for floating point feature on Cortex-M4 */
  1771. if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
  1772. LOG_DEBUG("%s floating point feature FPv4_SP found", cortex_m->core_info->name);
  1773. armv7m->fp_feature = FPV4_SP;
  1774. }
  1775. } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
  1776. target_read_u32(target, MVFR0, &mvfr0);
  1777. target_read_u32(target, MVFR1, &mvfr1);
  1778. /* test for floating point features on Cortex-M7 */
  1779. if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
  1780. LOG_DEBUG("%s floating point feature FPv5_SP found", cortex_m->core_info->name);
  1781. armv7m->fp_feature = FPV5_SP;
  1782. } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
  1783. LOG_DEBUG("%s floating point feature FPv5_DP found", cortex_m->core_info->name);
  1784. armv7m->fp_feature = FPV5_DP;
  1785. }
  1786. }
  1787. /* VECTRESET is supported only on ARMv7-M cores */
  1788. cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
  1789. /* Check for FPU, otherwise mark FPU register as non-existent */
  1790. if (armv7m->fp_feature == FP_NONE)
  1791. for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
  1792. armv7m->arm.core_cache->reg_list[idx].exist = false;
  1793. if (armv7m->arm.arch != ARM_ARCH_V8M)
  1794. for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
  1795. armv7m->arm.core_cache->reg_list[idx].exist = false;
  1796. if (!armv7m->is_hla_target) {
  1797. if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
  1798. /* Cortex-M3/M4 have 4096 bytes autoincrement range,
  1799. * s. ARM IHI 0031C: MEM-AP 7.2.2 */
  1800. armv7m->debug_ap->tar_autoincr_block = (1 << 12);
  1801. }
  1802. /* Enable debug requests */
  1803. retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
  1804. if (retval != ERROR_OK)
  1805. return retval;
  1806. if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
  1807. uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
  1808. retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
  1809. if (retval != ERROR_OK)
  1810. return retval;
  1811. cortex_m->dcb_dhcsr = dhcsr;
  1812. }
  1813. /* Configure trace modules */
  1814. retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
  1815. if (retval != ERROR_OK)
  1816. return retval;
  1817. if (armv7m->trace_config.itm_deferred_config)
  1818. armv7m_trace_itm_config(target);
  1819. /* NOTE: FPB and DWT are both optional. */
  1820. /* Setup FPB */
  1821. target_read_u32(target, FP_CTRL, &fpcr);
  1822. /* bits [14:12] and [7:4] */
  1823. cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
  1824. cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
  1825. /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
  1826. Revision is zero base, fp_rev == 1 means Rev.2 ! */
  1827. cortex_m->fp_rev = (fpcr >> 28) & 0xf;
  1828. free(cortex_m->fp_comparator_list);
  1829. cortex_m->fp_comparator_list = calloc(
  1830. cortex_m->fp_num_code + cortex_m->fp_num_lit,
  1831. sizeof(struct cortex_m_fp_comparator));
  1832. cortex_m->fpb_enabled = fpcr & 1;
  1833. for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
  1834. cortex_m->fp_comparator_list[i].type =
  1835. (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
  1836. cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
  1837. /* make sure we clear any breakpoints enabled on the target */
  1838. target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
  1839. }
  1840. LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
  1841. fpcr,
  1842. cortex_m->fp_num_code,
  1843. cortex_m->fp_num_lit);
  1844. /* Setup DWT */
  1845. cortex_m_dwt_free(target);
  1846. cortex_m_dwt_setup(cortex_m, target);
  1847. /* These hardware breakpoints only work for code in flash! */
  1848. LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
  1849. target_name(target),
  1850. cortex_m->fp_num_code,
  1851. cortex_m->dwt_num_comp);
  1852. }
  1853. return ERROR_OK;
  1854. }
  1855. static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
  1856. {
  1857. struct armv7m_common *armv7m = target_to_armv7m(target);
  1858. uint16_t dcrdr;
  1859. uint8_t buf[2];
  1860. int retval;
  1861. retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
  1862. if (retval != ERROR_OK)
  1863. return retval;
  1864. dcrdr = target_buffer_get_u16(target, buf);
  1865. *ctrl = (uint8_t)dcrdr;
  1866. *value = (uint8_t)(dcrdr >> 8);
  1867. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1868. /* write ack back to software dcc register
  1869. * signify we have read data */
  1870. if (dcrdr & (1 << 0)) {
  1871. target_buffer_set_u16(target, buf, 0);
  1872. retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
  1873. if (retval != ERROR_OK)
  1874. return retval;
  1875. }
  1876. return ERROR_OK;
  1877. }
  1878. static int cortex_m_target_request_data(struct target *target,
  1879. uint32_t size, uint8_t *buffer)
  1880. {
  1881. uint8_t data;
  1882. uint8_t ctrl;
  1883. uint32_t i;
  1884. for (i = 0; i < (size * 4); i++) {
  1885. int retval = cortex_m_dcc_read(target, &data, &ctrl);
  1886. if (retval != ERROR_OK)
  1887. return retval;
  1888. buffer[i] = data;
  1889. }
  1890. return ERROR_OK;
  1891. }
  1892. static int cortex_m_handle_target_request(void *priv)
  1893. {
  1894. struct target *target = priv;
  1895. if (!target_was_examined(target))
  1896. return ERROR_OK;
  1897. if (!target->dbg_msg_enabled)
  1898. return ERROR_OK;
  1899. if (target->state == TARGET_RUNNING) {
  1900. uint8_t data;
  1901. uint8_t ctrl;
  1902. int retval;
  1903. retval = cortex_m_dcc_read(target, &data, &ctrl);
  1904. if (retval != ERROR_OK)
  1905. return retval;
  1906. /* check if we have data */
  1907. if (ctrl & (1 << 0)) {
  1908. uint32_t request;
  1909. /* we assume target is quick enough */
  1910. request = data;
  1911. for (int i = 1; i <= 3; i++) {
  1912. retval = cortex_m_dcc_read(target, &data, &ctrl);
  1913. if (retval != ERROR_OK)
  1914. return retval;
  1915. request |= ((uint32_t)data << (i * 8));
  1916. }
  1917. target_request(target, request);
  1918. }
  1919. }
  1920. return ERROR_OK;
  1921. }
  1922. static int cortex_m_init_arch_info(struct target *target,
  1923. struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
  1924. {
  1925. struct armv7m_common *armv7m = &cortex_m->armv7m;
  1926. armv7m_init_arch_info(target, armv7m);
  1927. /* default reset mode is to use srst if fitted
  1928. * if not it will use CORTEX_M3_RESET_VECTRESET */
  1929. cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
  1930. armv7m->arm.dap = dap;
  1931. /* register arch-specific functions */
  1932. armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
  1933. armv7m->post_debug_entry = NULL;
  1934. armv7m->pre_restore_context = NULL;
  1935. armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
  1936. armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
  1937. target_register_timer_callback(cortex_m_handle_target_request, 1,
  1938. TARGET_TIMER_TYPE_PERIODIC, target);
  1939. return ERROR_OK;
  1940. }
  1941. static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
  1942. {
  1943. struct adiv5_private_config *pc;
  1944. pc = (struct adiv5_private_config *)target->private_config;
  1945. if (adiv5_verify_config(pc) != ERROR_OK)
  1946. return ERROR_FAIL;
  1947. struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
  1948. if (!cortex_m) {
  1949. LOG_ERROR("No memory creating target");
  1950. return ERROR_FAIL;
  1951. }
  1952. cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
  1953. cortex_m->apsel = pc->ap_num;
  1954. cortex_m_init_arch_info(target, cortex_m, pc->dap);
  1955. return ERROR_OK;
  1956. }
  1957. /*--------------------------------------------------------------------------*/
  1958. static int cortex_m_verify_pointer(struct command_invocation *cmd,
  1959. struct cortex_m_common *cm)
  1960. {
  1961. if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
  1962. command_print(cmd, "target is not a Cortex-M");
  1963. return ERROR_TARGET_INVALID;
  1964. }
  1965. return ERROR_OK;
  1966. }
  1967. /*
  1968. * Only stuff below this line should need to verify that its target
  1969. * is a Cortex-M3. Everything else should have indirected through the
  1970. * cortexm3_target structure, which is only used with CM3 targets.
  1971. */
  1972. COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
  1973. {
  1974. struct target *target = get_current_target(CMD_CTX);
  1975. struct cortex_m_common *cortex_m = target_to_cm(target);
  1976. struct armv7m_common *armv7m = &cortex_m->armv7m;
  1977. uint32_t demcr = 0;
  1978. int retval;
  1979. static const struct {
  1980. char name[10];
  1981. unsigned mask;
  1982. } vec_ids[] = {
  1983. { "hard_err", VC_HARDERR, },
  1984. { "int_err", VC_INTERR, },
  1985. { "bus_err", VC_BUSERR, },
  1986. { "state_err", VC_STATERR, },
  1987. { "chk_err", VC_CHKERR, },
  1988. { "nocp_err", VC_NOCPERR, },
  1989. { "mm_err", VC_MMERR, },
  1990. { "reset", VC_CORERESET, },
  1991. };
  1992. retval = cortex_m_verify_pointer(CMD, cortex_m);
  1993. if (retval != ERROR_OK)
  1994. return retval;
  1995. if (!target_was_examined(target)) {
  1996. LOG_ERROR("Target not examined yet");
  1997. return ERROR_FAIL;
  1998. }
  1999. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
  2000. if (retval != ERROR_OK)
  2001. return retval;
  2002. if (CMD_ARGC > 0) {
  2003. unsigned catch = 0;
  2004. if (CMD_ARGC == 1) {
  2005. if (strcmp(CMD_ARGV[0], "all") == 0) {
  2006. catch = VC_HARDERR | VC_INTERR | VC_BUSERR
  2007. | VC_STATERR | VC_CHKERR | VC_NOCPERR
  2008. | VC_MMERR | VC_CORERESET;
  2009. goto write;
  2010. } else if (strcmp(CMD_ARGV[0], "none") == 0)
  2011. goto write;
  2012. }
  2013. while (CMD_ARGC-- > 0) {
  2014. unsigned i;
  2015. for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2016. if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
  2017. continue;
  2018. catch |= vec_ids[i].mask;
  2019. break;
  2020. }
  2021. if (i == ARRAY_SIZE(vec_ids)) {
  2022. LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
  2023. return ERROR_COMMAND_SYNTAX_ERROR;
  2024. }
  2025. }
  2026. write:
  2027. /* For now, armv7m->demcr only stores vector catch flags. */
  2028. armv7m->demcr = catch;
  2029. demcr &= ~0xffff;
  2030. demcr |= catch;
  2031. /* write, but don't assume it stuck (why not??) */
  2032. retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
  2033. if (retval != ERROR_OK)
  2034. return retval;
  2035. retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
  2036. if (retval != ERROR_OK)
  2037. return retval;
  2038. /* FIXME be sure to clear DEMCR on clean server shutdown.
  2039. * Otherwise the vector catch hardware could fire when there's
  2040. * no debugger hooked up, causing much confusion...
  2041. */
  2042. }
  2043. for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  2044. command_print(CMD, "%9s: %s", vec_ids[i].name,
  2045. (demcr & vec_ids[i].mask) ? "catch" : "ignore");
  2046. }
  2047. return ERROR_OK;
  2048. }
  2049. COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
  2050. {
  2051. struct target *target = get_current_target(CMD_CTX);
  2052. struct cortex_m_common *cortex_m = target_to_cm(target);
  2053. int retval;
  2054. static const struct jim_nvp nvp_maskisr_modes[] = {
  2055. { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
  2056. { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
  2057. { .name = "on", .value = CORTEX_M_ISRMASK_ON },
  2058. { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
  2059. { .name = NULL, .value = -1 },
  2060. };
  2061. const struct jim_nvp *n;
  2062. retval = cortex_m_verify_pointer(CMD, cortex_m);
  2063. if (retval != ERROR_OK)
  2064. return retval;
  2065. if (target->state != TARGET_HALTED) {
  2066. command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
  2067. return ERROR_OK;
  2068. }
  2069. if (CMD_ARGC > 0) {
  2070. n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  2071. if (!n->name)
  2072. return ERROR_COMMAND_SYNTAX_ERROR;
  2073. cortex_m->isrmasking_mode = n->value;
  2074. cortex_m_set_maskints_for_halt(target);
  2075. }
  2076. n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
  2077. command_print(CMD, "cortex_m interrupt mask %s", n->name);
  2078. return ERROR_OK;
  2079. }
  2080. COMMAND_HANDLER(handle_cortex_m_reset_config_command)
  2081. {
  2082. struct target *target = get_current_target(CMD_CTX);
  2083. struct cortex_m_common *cortex_m = target_to_cm(target);
  2084. int retval;
  2085. char *reset_config;
  2086. retval = cortex_m_verify_pointer(CMD, cortex_m);
  2087. if (retval != ERROR_OK)
  2088. return retval;
  2089. if (CMD_ARGC > 0) {
  2090. if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
  2091. cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
  2092. else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
  2093. if (target_was_examined(target)
  2094. && !cortex_m->vectreset_supported)
  2095. LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
  2096. else
  2097. cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
  2098. } else
  2099. return ERROR_COMMAND_SYNTAX_ERROR;
  2100. }
  2101. switch (cortex_m->soft_reset_config) {
  2102. case CORTEX_M_RESET_SYSRESETREQ:
  2103. reset_config = "sysresetreq";
  2104. break;
  2105. case CORTEX_M_RESET_VECTRESET:
  2106. reset_config = "vectreset";
  2107. break;
  2108. default:
  2109. reset_config = "unknown";
  2110. break;
  2111. }
  2112. command_print(CMD, "cortex_m reset_config %s", reset_config);
  2113. return ERROR_OK;
  2114. }
  2115. static const struct command_registration cortex_m_exec_command_handlers[] = {
  2116. {
  2117. .name = "maskisr",
  2118. .handler = handle_cortex_m_mask_interrupts_command,
  2119. .mode = COMMAND_EXEC,
  2120. .help = "mask cortex_m interrupts",
  2121. .usage = "['auto'|'on'|'off'|'steponly']",
  2122. },
  2123. {
  2124. .name = "vector_catch",
  2125. .handler = handle_cortex_m_vector_catch_command,
  2126. .mode = COMMAND_EXEC,
  2127. .help = "configure hardware vectors to trigger debug entry",
  2128. .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
  2129. },
  2130. {
  2131. .name = "reset_config",
  2132. .handler = handle_cortex_m_reset_config_command,
  2133. .mode = COMMAND_ANY,
  2134. .help = "configure software reset handling",
  2135. .usage = "['sysresetreq'|'vectreset']",
  2136. },
  2137. COMMAND_REGISTRATION_DONE
  2138. };
  2139. static const struct command_registration cortex_m_command_handlers[] = {
  2140. {
  2141. .chain = armv7m_command_handlers,
  2142. },
  2143. {
  2144. .chain = armv7m_trace_command_handlers,
  2145. },
  2146. /* START_DEPRECATED_TPIU */
  2147. {
  2148. .chain = arm_tpiu_deprecated_command_handlers,
  2149. },
  2150. /* END_DEPRECATED_TPIU */
  2151. {
  2152. .name = "cortex_m",
  2153. .mode = COMMAND_EXEC,
  2154. .help = "Cortex-M command group",
  2155. .usage = "",
  2156. .chain = cortex_m_exec_command_handlers,
  2157. },
  2158. {
  2159. .chain = rtt_target_command_handlers,
  2160. },
  2161. COMMAND_REGISTRATION_DONE
  2162. };
  2163. struct target_type cortexm_target = {
  2164. .name = "cortex_m",
  2165. .poll = cortex_m_poll,
  2166. .arch_state = armv7m_arch_state,
  2167. .target_request_data = cortex_m_target_request_data,
  2168. .halt = cortex_m_halt,
  2169. .resume = cortex_m_resume,
  2170. .step = cortex_m_step,
  2171. .assert_reset = cortex_m_assert_reset,
  2172. .deassert_reset = cortex_m_deassert_reset,
  2173. .soft_reset_halt = cortex_m_soft_reset_halt,
  2174. .get_gdb_arch = arm_get_gdb_arch,
  2175. .get_gdb_reg_list = armv7m_get_gdb_reg_list,
  2176. .read_memory = cortex_m_read_memory,
  2177. .write_memory = cortex_m_write_memory,
  2178. .checksum_memory = armv7m_checksum_memory,
  2179. .blank_check_memory = armv7m_blank_check_memory,
  2180. .run_algorithm = armv7m_run_algorithm,
  2181. .start_algorithm = armv7m_start_algorithm,
  2182. .wait_algorithm = armv7m_wait_algorithm,
  2183. .add_breakpoint = cortex_m_add_breakpoint,
  2184. .remove_breakpoint = cortex_m_remove_breakpoint,
  2185. .add_watchpoint = cortex_m_add_watchpoint,
  2186. .remove_watchpoint = cortex_m_remove_watchpoint,
  2187. .hit_watchpoint = cortex_m_hit_watchpoint,
  2188. .commands = cortex_m_command_handlers,
  2189. .target_create = cortex_m_target_create,
  2190. .target_jim_configure = adiv5_jim_configure,
  2191. .init_target = cortex_m_init_target,
  2192. .examine = cortex_m_examine,
  2193. .deinit_target = cortex_m_deinit_target,
  2194. .profiling = cortex_m_profiling,
  2195. };