You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1445 lines
39 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2008 by Spencer Oliver *
  6. * spen@spen-soft.co.uk *
  7. * *
  8. * Copyright (C) 2008 by Oyvind Harboe *
  9. * oyvind.harboe@zylin.com *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. ***************************************************************************/
  26. #ifdef HAVE_CONFIG_H
  27. #include "config.h"
  28. #endif
  29. #include "arm.h"
  30. #include "armv4_5.h"
  31. #include "arm_jtag.h"
  32. #include "breakpoints.h"
  33. #include "arm_disassembler.h"
  34. #include <helper/binarybuffer.h>
  35. #include "algorithm.h"
  36. #include "register.h"
  37. /* offsets into armv4_5 core register cache */
  38. enum {
  39. // ARMV4_5_CPSR = 31,
  40. ARMV4_5_SPSR_FIQ = 32,
  41. ARMV4_5_SPSR_IRQ = 33,
  42. ARMV4_5_SPSR_SVC = 34,
  43. ARMV4_5_SPSR_ABT = 35,
  44. ARMV4_5_SPSR_UND = 36,
  45. ARM_SPSR_MON = 39,
  46. };
  47. static const uint8_t arm_usr_indices[17] = {
  48. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
  49. };
  50. static const uint8_t arm_fiq_indices[8] = {
  51. 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
  52. };
  53. static const uint8_t arm_irq_indices[3] = {
  54. 23, 24, ARMV4_5_SPSR_IRQ,
  55. };
  56. static const uint8_t arm_svc_indices[3] = {
  57. 25, 26, ARMV4_5_SPSR_SVC,
  58. };
  59. static const uint8_t arm_abt_indices[3] = {
  60. 27, 28, ARMV4_5_SPSR_ABT,
  61. };
  62. static const uint8_t arm_und_indices[3] = {
  63. 29, 30, ARMV4_5_SPSR_UND,
  64. };
  65. static const uint8_t arm_mon_indices[3] = {
  66. 37, 38, ARM_SPSR_MON,
  67. };
  68. static const struct {
  69. const char *name;
  70. unsigned short psr;
  71. /* For user and system modes, these list indices for all registers.
  72. * otherwise they're just indices for the shadow registers and SPSR.
  73. */
  74. unsigned short n_indices;
  75. const uint8_t *indices;
  76. } arm_mode_data[] = {
  77. /* Seven modes are standard from ARM7 on. "System" and "User" share
  78. * the same registers; other modes shadow from 3 to 8 registers.
  79. */
  80. {
  81. .name = "User",
  82. .psr = ARM_MODE_USR,
  83. .n_indices = ARRAY_SIZE(arm_usr_indices),
  84. .indices = arm_usr_indices,
  85. },
  86. {
  87. .name = "FIQ",
  88. .psr = ARM_MODE_FIQ,
  89. .n_indices = ARRAY_SIZE(arm_fiq_indices),
  90. .indices = arm_fiq_indices,
  91. },
  92. {
  93. .name = "Supervisor",
  94. .psr = ARM_MODE_SVC,
  95. .n_indices = ARRAY_SIZE(arm_svc_indices),
  96. .indices = arm_svc_indices,
  97. },
  98. {
  99. .name = "Abort",
  100. .psr = ARM_MODE_ABT,
  101. .n_indices = ARRAY_SIZE(arm_abt_indices),
  102. .indices = arm_abt_indices,
  103. },
  104. {
  105. .name = "IRQ",
  106. .psr = ARM_MODE_IRQ,
  107. .n_indices = ARRAY_SIZE(arm_irq_indices),
  108. .indices = arm_irq_indices,
  109. },
  110. {
  111. .name = "Undefined instruction",
  112. .psr = ARM_MODE_UND,
  113. .n_indices = ARRAY_SIZE(arm_und_indices),
  114. .indices = arm_und_indices,
  115. },
  116. {
  117. .name = "System",
  118. .psr = ARM_MODE_SYS,
  119. .n_indices = ARRAY_SIZE(arm_usr_indices),
  120. .indices = arm_usr_indices,
  121. },
  122. /* TrustZone "Security Extensions" add a secure monitor mode.
  123. * This is distinct from a "debug monitor" which can support
  124. * non-halting debug, in conjunction with some debuggers.
  125. */
  126. {
  127. .name = "Secure Monitor",
  128. .psr = ARM_MODE_MON,
  129. .n_indices = ARRAY_SIZE(arm_mon_indices),
  130. .indices = arm_mon_indices,
  131. },
  132. };
  133. /** Map PSR mode bits to the name of an ARM processor operating mode. */
  134. const char *arm_mode_name(unsigned psr_mode)
  135. {
  136. for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
  137. if (arm_mode_data[i].psr == psr_mode)
  138. return arm_mode_data[i].name;
  139. }
  140. LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
  141. return "UNRECOGNIZED";
  142. }
  143. /** Return true iff the parameter denotes a valid ARM processor mode. */
  144. bool is_arm_mode(unsigned psr_mode)
  145. {
  146. for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
  147. if (arm_mode_data[i].psr == psr_mode)
  148. return true;
  149. }
  150. return false;
  151. }
  152. /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
  153. int arm_mode_to_number(enum arm_mode mode)
  154. {
  155. switch (mode) {
  156. case ARM_MODE_ANY:
  157. /* map MODE_ANY to user mode */
  158. case ARM_MODE_USR:
  159. return 0;
  160. case ARM_MODE_FIQ:
  161. return 1;
  162. case ARM_MODE_IRQ:
  163. return 2;
  164. case ARM_MODE_SVC:
  165. return 3;
  166. case ARM_MODE_ABT:
  167. return 4;
  168. case ARM_MODE_UND:
  169. return 5;
  170. case ARM_MODE_SYS:
  171. return 6;
  172. case ARM_MODE_MON:
  173. return 7;
  174. default:
  175. LOG_ERROR("invalid mode value encountered %d", mode);
  176. return -1;
  177. }
  178. }
  179. /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
  180. enum arm_mode armv4_5_number_to_mode(int number)
  181. {
  182. switch (number) {
  183. case 0:
  184. return ARM_MODE_USR;
  185. case 1:
  186. return ARM_MODE_FIQ;
  187. case 2:
  188. return ARM_MODE_IRQ;
  189. case 3:
  190. return ARM_MODE_SVC;
  191. case 4:
  192. return ARM_MODE_ABT;
  193. case 5:
  194. return ARM_MODE_UND;
  195. case 6:
  196. return ARM_MODE_SYS;
  197. case 7:
  198. return ARM_MODE_MON;
  199. default:
  200. LOG_ERROR("mode index out of bounds %d", number);
  201. return ARM_MODE_ANY;
  202. }
  203. }
  204. const char *arm_state_strings[] =
  205. {
  206. "ARM", "Thumb", "Jazelle", "ThumbEE",
  207. };
  208. /* Templates for ARM core registers.
  209. *
  210. * NOTE: offsets in this table are coupled to the arm_mode_data
  211. * table above, the armv4_5_core_reg_map array below, and also to
  212. * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
  213. */
  214. static const struct {
  215. /* The name is used for e.g. the "regs" command. */
  216. const char *name;
  217. /* The {cookie, mode} tuple uniquely identifies one register.
  218. * In a given mode, cookies 0..15 map to registers R0..R15,
  219. * with R13..R15 usually called SP, LR, PC.
  220. *
  221. * MODE_ANY is used as *input* to the mapping, and indicates
  222. * various special cases (sigh) and errors.
  223. *
  224. * Cookie 16 is (currently) confusing, since it indicates
  225. * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
  226. * (Exception modes have both CPSR and SPSR registers ...)
  227. */
  228. unsigned cookie;
  229. enum arm_mode mode;
  230. } arm_core_regs[] = {
  231. /* IMPORTANT: we guarantee that the first eight cached registers
  232. * correspond to r0..r7, and the fifteenth to PC, so that callers
  233. * don't need to map them.
  234. */
  235. { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
  236. { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
  237. { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
  238. { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
  239. { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
  240. { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
  241. { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
  242. { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
  243. /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
  244. * them as MODE_ANY creates special cases. (ANY means
  245. * "not mapped" elsewhere; here it's "everything but FIQ".)
  246. */
  247. { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
  248. { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
  249. { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
  250. { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
  251. { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
  252. /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
  253. { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
  254. { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
  255. /* guaranteed to be at index 15 */
  256. { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
  257. { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
  258. { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
  259. { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
  260. { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
  261. { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
  262. { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
  263. { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
  264. { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
  265. { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
  266. { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
  267. { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
  268. { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
  269. { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
  270. { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
  271. { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
  272. { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
  273. { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
  274. { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
  275. { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
  276. { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
  277. { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
  278. { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
  279. { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
  280. { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
  281. };
  282. /* map core mode (USR, FIQ, ...) and register number to
  283. * indices into the register cache
  284. */
  285. const int armv4_5_core_reg_map[8][17] =
  286. {
  287. { /* USR */
  288. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
  289. },
  290. { /* FIQ (8 shadows of USR, vs normal 3) */
  291. 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
  292. },
  293. { /* IRQ */
  294. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
  295. },
  296. { /* SVC */
  297. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
  298. },
  299. { /* ABT */
  300. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
  301. },
  302. { /* UND */
  303. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
  304. },
  305. { /* SYS (same registers as USR) */
  306. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
  307. },
  308. { /* MON */
  309. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
  310. }
  311. };
  312. /**
  313. * Configures host-side ARM records to reflect the specified CPSR.
  314. * Later, code can use arm_reg_current() to map register numbers
  315. * according to how they are exposed by this mode.
  316. */
  317. void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
  318. {
  319. enum arm_mode mode = cpsr & 0x1f;
  320. int num;
  321. /* NOTE: this may be called very early, before the register
  322. * cache is set up. We can't defend against many errors, in
  323. * particular against CPSRs that aren't valid *here* ...
  324. */
  325. if (arm->cpsr) {
  326. buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
  327. arm->cpsr->valid = 1;
  328. arm->cpsr->dirty = 0;
  329. }
  330. arm->core_mode = mode;
  331. /* mode_to_number() warned; set up a somewhat-sane mapping */
  332. num = arm_mode_to_number(mode);
  333. if (num < 0) {
  334. mode = ARM_MODE_USR;
  335. num = 0;
  336. }
  337. arm->map = &armv4_5_core_reg_map[num][0];
  338. arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
  339. ? NULL
  340. : arm->core_cache->reg_list + arm->map[16];
  341. /* Older ARMs won't have the J bit */
  342. enum arm_state state;
  343. if (cpsr & (1 << 5)) { /* T */
  344. if (cpsr & (1 << 24)) { /* J */
  345. LOG_WARNING("ThumbEE -- incomplete support");
  346. state = ARM_STATE_THUMB_EE;
  347. } else
  348. state = ARM_STATE_THUMB;
  349. } else {
  350. if (cpsr & (1 << 24)) { /* J */
  351. LOG_ERROR("Jazelle state handling is BROKEN!");
  352. state = ARM_STATE_JAZELLE;
  353. } else
  354. state = ARM_STATE_ARM;
  355. }
  356. arm->core_state = state;
  357. LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
  358. arm_mode_name(mode),
  359. arm_state_strings[arm->core_state]);
  360. }
  361. /**
  362. * Returns handle to the register currently mapped to a given number.
  363. * Someone must have called arm_set_cpsr() before.
  364. *
  365. * \param arm This core's state and registers are used.
  366. * \param regnum From 0..15 corresponding to R0..R14 and PC.
  367. * Note that R0..R7 don't require mapping; you may access those
  368. * as the first eight entries in the register cache. Likewise
  369. * R15 (PC) doesn't need mapping; you may also access it directly.
  370. * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
  371. * CPSR (arm->cpsr) is also not mapped.
  372. */
  373. struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
  374. {
  375. struct reg *r;
  376. if (regnum > 16)
  377. return NULL;
  378. r = arm->core_cache->reg_list + arm->map[regnum];
  379. /* e.g. invalid CPSR said "secure monitor" mode on a core
  380. * that doesn't support it...
  381. */
  382. if (!r) {
  383. LOG_ERROR("Invalid CPSR mode");
  384. r = arm->core_cache->reg_list + regnum;
  385. }
  386. return r;
  387. }
  388. static const uint8_t arm_gdb_dummy_fp_value[12];
  389. /**
  390. * Dummy FPA registers are required to support GDB on ARM.
  391. * Register packets require eight obsolete FPA register values.
  392. * Modern ARM cores use Vector Floating Point (VFP), if they
  393. * have any floating point support. VFP is not FPA-compatible.
  394. */
  395. struct reg arm_gdb_dummy_fp_reg =
  396. {
  397. .name = "GDB dummy FPA register",
  398. .value = (uint8_t *) arm_gdb_dummy_fp_value,
  399. .valid = 1,
  400. .size = 96,
  401. };
  402. static const uint8_t arm_gdb_dummy_fps_value[4];
  403. /**
  404. * Dummy FPA status registers are required to support GDB on ARM.
  405. * Register packets require an obsolete FPA status register.
  406. */
  407. struct reg arm_gdb_dummy_fps_reg =
  408. {
  409. .name = "GDB dummy FPA status register",
  410. .value = (uint8_t *) arm_gdb_dummy_fps_value,
  411. .valid = 1,
  412. .size = 32,
  413. };
  414. static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
  415. static void arm_gdb_dummy_init(void)
  416. {
  417. register_init_dummy(&arm_gdb_dummy_fp_reg);
  418. register_init_dummy(&arm_gdb_dummy_fps_reg);
  419. }
  420. static int armv4_5_get_core_reg(struct reg *reg)
  421. {
  422. int retval;
  423. struct arm_reg *armv4_5 = reg->arch_info;
  424. struct target *target = armv4_5->target;
  425. if (target->state != TARGET_HALTED)
  426. {
  427. LOG_ERROR("Target not halted");
  428. return ERROR_TARGET_NOT_HALTED;
  429. }
  430. retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
  431. if (retval == ERROR_OK) {
  432. reg->valid = 1;
  433. reg->dirty = 0;
  434. }
  435. return retval;
  436. }
  437. static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
  438. {
  439. struct arm_reg *armv4_5 = reg->arch_info;
  440. struct target *target = armv4_5->target;
  441. struct arm *armv4_5_target = target_to_arm(target);
  442. uint32_t value = buf_get_u32(buf, 0, 32);
  443. if (target->state != TARGET_HALTED)
  444. {
  445. LOG_ERROR("Target not halted");
  446. return ERROR_TARGET_NOT_HALTED;
  447. }
  448. /* Except for CPSR, the "reg" command exposes a writeback model
  449. * for the register cache.
  450. */
  451. if (reg == armv4_5_target->cpsr) {
  452. arm_set_cpsr(armv4_5_target, value);
  453. /* Older cores need help to be in ARM mode during halt
  454. * mode debug, so we clear the J and T bits if we flush.
  455. * For newer cores (v6/v7a/v7r) we don't need that, but
  456. * it won't hurt since CPSR is always flushed anyway.
  457. */
  458. if (armv4_5_target->core_mode !=
  459. (enum arm_mode)(value & 0x1f)) {
  460. LOG_DEBUG("changing ARM core mode to '%s'",
  461. arm_mode_name(value & 0x1f));
  462. value &= ~((1 << 24) | (1 << 5));
  463. armv4_5_target->write_core_reg(target, reg,
  464. 16, ARM_MODE_ANY, value);
  465. }
  466. } else {
  467. buf_set_u32(reg->value, 0, 32, value);
  468. reg->valid = 1;
  469. }
  470. reg->dirty = 1;
  471. return ERROR_OK;
  472. }
  473. static const struct reg_arch_type arm_reg_type = {
  474. .get = armv4_5_get_core_reg,
  475. .set = armv4_5_set_core_reg,
  476. };
  477. struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
  478. {
  479. int num_regs = ARRAY_SIZE(arm_core_regs);
  480. struct reg_cache *cache = malloc(sizeof(struct reg_cache));
  481. struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
  482. struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
  483. int i;
  484. if (!cache || !reg_list || !arch_info) {
  485. free(cache);
  486. free(reg_list);
  487. free(arch_info);
  488. return NULL;
  489. }
  490. cache->name = "ARM registers";
  491. cache->next = NULL;
  492. cache->reg_list = reg_list;
  493. cache->num_regs = 0;
  494. for (i = 0; i < num_regs; i++)
  495. {
  496. /* Skip registers this core doesn't expose */
  497. if (arm_core_regs[i].mode == ARM_MODE_MON
  498. && arm->core_type != ARM_MODE_MON)
  499. continue;
  500. /* REVISIT handle Cortex-M, which only shadows R13/SP */
  501. arch_info[i].num = arm_core_regs[i].cookie;
  502. arch_info[i].mode = arm_core_regs[i].mode;
  503. arch_info[i].target = target;
  504. arch_info[i].armv4_5_common = arm;
  505. reg_list[i].name = (char *) arm_core_regs[i].name;
  506. reg_list[i].size = 32;
  507. reg_list[i].value = &arch_info[i].value;
  508. reg_list[i].type = &arm_reg_type;
  509. reg_list[i].arch_info = &arch_info[i];
  510. cache->num_regs++;
  511. }
  512. arm->cpsr = reg_list + ARMV4_5_CPSR;
  513. arm->core_cache = cache;
  514. return cache;
  515. }
  516. int arm_arch_state(struct target *target)
  517. {
  518. struct arm *armv4_5 = target_to_arm(target);
  519. if (armv4_5->common_magic != ARM_COMMON_MAGIC)
  520. {
  521. LOG_ERROR("BUG: called for a non-ARM target");
  522. return ERROR_FAIL;
  523. }
  524. LOG_USER("target halted in %s state due to %s, current mode: %s\n"
  525. "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
  526. arm_state_strings[armv4_5->core_state],
  527. debug_reason_name(target),
  528. arm_mode_name(armv4_5->core_mode),
  529. buf_get_u32(armv4_5->cpsr->value, 0, 32),
  530. buf_get_u32(armv4_5->core_cache->reg_list[15].value,
  531. 0, 32),
  532. armv4_5->is_semihosting ? ", semihosting" : "");
  533. return ERROR_OK;
  534. }
  535. #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
  536. cache->reg_list[armv4_5_core_reg_map[mode][num]]
  537. COMMAND_HANDLER(handle_armv4_5_reg_command)
  538. {
  539. struct target *target = get_current_target(CMD_CTX);
  540. struct arm *armv4_5 = target_to_arm(target);
  541. unsigned num_regs;
  542. struct reg *regs;
  543. if (!is_arm(armv4_5))
  544. {
  545. command_print(CMD_CTX, "current target isn't an ARM");
  546. return ERROR_FAIL;
  547. }
  548. if (target->state != TARGET_HALTED)
  549. {
  550. command_print(CMD_CTX, "error: target must be halted for register accesses");
  551. return ERROR_FAIL;
  552. }
  553. if (!is_arm_mode(armv4_5->core_mode))
  554. return ERROR_FAIL;
  555. if (!armv4_5->full_context) {
  556. command_print(CMD_CTX, "error: target doesn't support %s",
  557. CMD_NAME);
  558. return ERROR_FAIL;
  559. }
  560. num_regs = armv4_5->core_cache->num_regs;
  561. regs = armv4_5->core_cache->reg_list;
  562. for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
  563. const char *name;
  564. char *sep = "\n";
  565. char *shadow = "";
  566. /* label this bank of registers (or shadows) */
  567. switch (arm_mode_data[mode].psr) {
  568. case ARM_MODE_SYS:
  569. continue;
  570. case ARM_MODE_USR:
  571. name = "System and User";
  572. sep = "";
  573. break;
  574. case ARM_MODE_MON:
  575. if (armv4_5->core_type != ARM_MODE_MON)
  576. continue;
  577. /* FALLTHROUGH */
  578. default:
  579. name = arm_mode_data[mode].name;
  580. shadow = "shadow ";
  581. break;
  582. }
  583. command_print(CMD_CTX, "%s%s mode %sregisters",
  584. sep, name, shadow);
  585. /* display N rows of up to 4 registers each */
  586. for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
  587. char output[80];
  588. int output_len = 0;
  589. for (unsigned j = 0; j < 4; j++, i++) {
  590. uint32_t value;
  591. struct reg *reg = regs;
  592. if (i >= arm_mode_data[mode].n_indices)
  593. break;
  594. reg += arm_mode_data[mode].indices[i];
  595. /* REVISIT be smarter about faults... */
  596. if (!reg->valid)
  597. armv4_5->full_context(target);
  598. value = buf_get_u32(reg->value, 0, 32);
  599. output_len += snprintf(output + output_len,
  600. sizeof(output) - output_len,
  601. "%8s: %8.8" PRIx32 " ",
  602. reg->name, value);
  603. }
  604. command_print(CMD_CTX, "%s", output);
  605. }
  606. }
  607. return ERROR_OK;
  608. }
  609. COMMAND_HANDLER(handle_armv4_5_core_state_command)
  610. {
  611. struct target *target = get_current_target(CMD_CTX);
  612. struct arm *armv4_5 = target_to_arm(target);
  613. if (!is_arm(armv4_5))
  614. {
  615. command_print(CMD_CTX, "current target isn't an ARM");
  616. return ERROR_FAIL;
  617. }
  618. if (CMD_ARGC > 0)
  619. {
  620. if (strcmp(CMD_ARGV[0], "arm") == 0)
  621. {
  622. armv4_5->core_state = ARM_STATE_ARM;
  623. }
  624. if (strcmp(CMD_ARGV[0], "thumb") == 0)
  625. {
  626. armv4_5->core_state = ARM_STATE_THUMB;
  627. }
  628. }
  629. command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
  630. return ERROR_OK;
  631. }
  632. COMMAND_HANDLER(handle_armv4_5_disassemble_command)
  633. {
  634. int retval = ERROR_OK;
  635. struct target *target = get_current_target(CMD_CTX);
  636. struct arm *arm = target ? target_to_arm(target) : NULL;
  637. uint32_t address;
  638. int count = 1;
  639. int thumb = 0;
  640. if (!is_arm(arm)) {
  641. command_print(CMD_CTX, "current target isn't an ARM");
  642. return ERROR_FAIL;
  643. }
  644. switch (CMD_ARGC) {
  645. case 3:
  646. if (strcmp(CMD_ARGV[2], "thumb") != 0)
  647. goto usage;
  648. thumb = 1;
  649. /* FALL THROUGH */
  650. case 2:
  651. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
  652. /* FALL THROUGH */
  653. case 1:
  654. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
  655. if (address & 0x01) {
  656. if (!thumb) {
  657. command_print(CMD_CTX, "Disassemble as Thumb");
  658. thumb = 1;
  659. }
  660. address &= ~1;
  661. }
  662. break;
  663. default:
  664. usage:
  665. command_print(CMD_CTX,
  666. "usage: arm disassemble <address> [<count> ['thumb']]");
  667. count = 0;
  668. retval = ERROR_FAIL;
  669. }
  670. while (count-- > 0) {
  671. struct arm_instruction cur_instruction;
  672. if (thumb) {
  673. /* Always use Thumb2 disassembly for best handling
  674. * of 32-bit BL/BLX, and to work with newer cores
  675. * (some ARMv6, all ARMv7) that use Thumb2.
  676. */
  677. retval = thumb2_opcode(target, address,
  678. &cur_instruction);
  679. if (retval != ERROR_OK)
  680. break;
  681. } else {
  682. uint32_t opcode;
  683. retval = target_read_u32(target, address, &opcode);
  684. if (retval != ERROR_OK)
  685. break;
  686. retval = arm_evaluate_opcode(opcode, address,
  687. &cur_instruction) != ERROR_OK;
  688. if (retval != ERROR_OK)
  689. break;
  690. }
  691. command_print(CMD_CTX, "%s", cur_instruction.text);
  692. address += cur_instruction.instruction_size;
  693. }
  694. return retval;
  695. }
  696. static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  697. {
  698. struct command_context *context;
  699. struct target *target;
  700. struct arm *arm;
  701. int retval;
  702. context = Jim_GetAssocData(interp, "context");
  703. if (context == NULL) {
  704. LOG_ERROR("%s: no command context", __func__);
  705. return JIM_ERR;
  706. }
  707. target = get_current_target(context);
  708. if (target == NULL) {
  709. LOG_ERROR("%s: no current target", __func__);
  710. return JIM_ERR;
  711. }
  712. if (!target_was_examined(target)) {
  713. LOG_ERROR("%s: not yet examined", target_name(target));
  714. return JIM_ERR;
  715. }
  716. arm = target_to_arm(target);
  717. if (!is_arm(arm)) {
  718. LOG_ERROR("%s: not an ARM", target_name(target));
  719. return JIM_ERR;
  720. }
  721. if ((argc < 6) || (argc > 7)) {
  722. /* FIXME use the command name to verify # params... */
  723. LOG_ERROR("%s: wrong number of arguments", __func__);
  724. return JIM_ERR;
  725. }
  726. int cpnum;
  727. uint32_t op1;
  728. uint32_t op2;
  729. uint32_t CRn;
  730. uint32_t CRm;
  731. uint32_t value;
  732. long l;
  733. /* NOTE: parameter sequence matches ARM instruction set usage:
  734. * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
  735. * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
  736. * The "rX" is necessarily omitted; it uses Tcl mechanisms.
  737. */
  738. retval = Jim_GetLong(interp, argv[1], &l);
  739. if (retval != JIM_OK)
  740. return retval;
  741. if (l & ~0xf) {
  742. LOG_ERROR("%s: %s %d out of range", __func__,
  743. "coprocessor", (int) l);
  744. return JIM_ERR;
  745. }
  746. cpnum = l;
  747. retval = Jim_GetLong(interp, argv[2], &l);
  748. if (retval != JIM_OK)
  749. return retval;
  750. if (l & ~0x7) {
  751. LOG_ERROR("%s: %s %d out of range", __func__,
  752. "op1", (int) l);
  753. return JIM_ERR;
  754. }
  755. op1 = l;
  756. retval = Jim_GetLong(interp, argv[3], &l);
  757. if (retval != JIM_OK)
  758. return retval;
  759. if (l & ~0xf) {
  760. LOG_ERROR("%s: %s %d out of range", __func__,
  761. "CRn", (int) l);
  762. return JIM_ERR;
  763. }
  764. CRn = l;
  765. retval = Jim_GetLong(interp, argv[4], &l);
  766. if (retval != JIM_OK)
  767. return retval;
  768. if (l & ~0xf) {
  769. LOG_ERROR("%s: %s %d out of range", __func__,
  770. "CRm", (int) l);
  771. return JIM_ERR;
  772. }
  773. CRm = l;
  774. retval = Jim_GetLong(interp, argv[5], &l);
  775. if (retval != JIM_OK)
  776. return retval;
  777. if (l & ~0x7) {
  778. LOG_ERROR("%s: %s %d out of range", __func__,
  779. "op2", (int) l);
  780. return JIM_ERR;
  781. }
  782. op2 = l;
  783. value = 0;
  784. /* FIXME don't assume "mrc" vs "mcr" from the number of params;
  785. * that could easily be a typo! Check both...
  786. *
  787. * FIXME change the call syntax here ... simplest to just pass
  788. * the MRC() or MCR() instruction to be executed. That will also
  789. * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
  790. * if that's ever needed.
  791. */
  792. if (argc == 7) {
  793. retval = Jim_GetLong(interp, argv[6], &l);
  794. if (retval != JIM_OK) {
  795. return retval;
  796. }
  797. value = l;
  798. /* NOTE: parameters reordered! */
  799. // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
  800. retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
  801. if (retval != ERROR_OK)
  802. return JIM_ERR;
  803. } else {
  804. /* NOTE: parameters reordered! */
  805. // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
  806. retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
  807. if (retval != ERROR_OK)
  808. return JIM_ERR;
  809. Jim_SetResult(interp, Jim_NewIntObj(interp, value));
  810. }
  811. return JIM_OK;
  812. }
  813. static const struct command_registration arm_exec_command_handlers[] = {
  814. {
  815. .name = "reg",
  816. .handler = handle_armv4_5_reg_command,
  817. .mode = COMMAND_EXEC,
  818. .help = "display ARM core registers",
  819. },
  820. {
  821. .name = "core_state",
  822. .handler = handle_armv4_5_core_state_command,
  823. .mode = COMMAND_EXEC,
  824. .usage = "['arm'|'thumb']",
  825. .help = "display/change ARM core state",
  826. },
  827. {
  828. .name = "disassemble",
  829. .handler = handle_armv4_5_disassemble_command,
  830. .mode = COMMAND_EXEC,
  831. .usage = "address [count ['thumb']]",
  832. .help = "disassemble instructions ",
  833. },
  834. {
  835. .name = "mcr",
  836. .mode = COMMAND_EXEC,
  837. .jim_handler = &jim_mcrmrc,
  838. .help = "write coprocessor register",
  839. .usage = "cpnum op1 CRn op2 CRm value",
  840. },
  841. {
  842. .name = "mrc",
  843. .jim_handler = &jim_mcrmrc,
  844. .help = "read coprocessor register",
  845. .usage = "cpnum op1 CRn op2 CRm",
  846. },
  847. COMMAND_REGISTRATION_DONE
  848. };
  849. const struct command_registration arm_command_handlers[] = {
  850. {
  851. .name = "arm",
  852. .mode = COMMAND_ANY,
  853. .help = "ARM command group",
  854. .chain = arm_exec_command_handlers,
  855. },
  856. COMMAND_REGISTRATION_DONE
  857. };
  858. int arm_get_gdb_reg_list(struct target *target,
  859. struct reg **reg_list[], int *reg_list_size)
  860. {
  861. struct arm *armv4_5 = target_to_arm(target);
  862. int i;
  863. if (!is_arm_mode(armv4_5->core_mode))
  864. return ERROR_FAIL;
  865. *reg_list_size = 26;
  866. *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
  867. for (i = 0; i < 16; i++)
  868. (*reg_list)[i] = arm_reg_current(armv4_5, i);
  869. for (i = 16; i < 24; i++)
  870. (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
  871. (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
  872. (*reg_list)[25] = armv4_5->cpsr;
  873. return ERROR_OK;
  874. }
  875. /* wait for execution to complete and check exit point */
  876. static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
  877. {
  878. int retval;
  879. struct arm *armv4_5 = target_to_arm(target);
  880. if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
  881. {
  882. return retval;
  883. }
  884. if (target->state != TARGET_HALTED)
  885. {
  886. if ((retval = target_halt(target)) != ERROR_OK)
  887. return retval;
  888. if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
  889. {
  890. return retval;
  891. }
  892. return ERROR_TARGET_TIMEOUT;
  893. }
  894. /* fast exit: ARMv5+ code can use BKPT */
  895. if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
  896. 0, 32) != exit_point)
  897. {
  898. LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
  899. buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
  900. return ERROR_TARGET_TIMEOUT;
  901. }
  902. return ERROR_OK;
  903. }
  904. int armv4_5_run_algorithm_inner(struct target *target,
  905. int num_mem_params, struct mem_param *mem_params,
  906. int num_reg_params, struct reg_param *reg_params,
  907. uint32_t entry_point, uint32_t exit_point,
  908. int timeout_ms, void *arch_info,
  909. int (*run_it)(struct target *target, uint32_t exit_point,
  910. int timeout_ms, void *arch_info))
  911. {
  912. struct arm *armv4_5 = target_to_arm(target);
  913. struct arm_algorithm *arm_algorithm_info = arch_info;
  914. enum arm_state core_state = armv4_5->core_state;
  915. uint32_t context[17];
  916. uint32_t cpsr;
  917. int exit_breakpoint_size = 0;
  918. int i;
  919. int retval = ERROR_OK;
  920. LOG_DEBUG("Running algorithm");
  921. if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
  922. {
  923. LOG_ERROR("current target isn't an ARMV4/5 target");
  924. return ERROR_TARGET_INVALID;
  925. }
  926. if (target->state != TARGET_HALTED)
  927. {
  928. LOG_WARNING("target not halted");
  929. return ERROR_TARGET_NOT_HALTED;
  930. }
  931. if (!is_arm_mode(armv4_5->core_mode))
  932. return ERROR_FAIL;
  933. /* armv5 and later can terminate with BKPT instruction; less overhead */
  934. if (!exit_point && armv4_5->is_armv4)
  935. {
  936. LOG_ERROR("ARMv4 target needs HW breakpoint location");
  937. return ERROR_FAIL;
  938. }
  939. /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
  940. * they'll be restored later.
  941. */
  942. for (i = 0; i <= 16; i++)
  943. {
  944. struct reg *r;
  945. r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
  946. arm_algorithm_info->core_mode, i);
  947. if (!r->valid)
  948. armv4_5->read_core_reg(target, r, i,
  949. arm_algorithm_info->core_mode);
  950. context[i] = buf_get_u32(r->value, 0, 32);
  951. }
  952. cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
  953. for (i = 0; i < num_mem_params; i++)
  954. {
  955. if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
  956. {
  957. return retval;
  958. }
  959. }
  960. for (i = 0; i < num_reg_params; i++)
  961. {
  962. struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
  963. if (!reg)
  964. {
  965. LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
  966. return ERROR_INVALID_ARGUMENTS;
  967. }
  968. if (reg->size != reg_params[i].size)
  969. {
  970. LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
  971. return ERROR_INVALID_ARGUMENTS;
  972. }
  973. if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
  974. {
  975. return retval;
  976. }
  977. }
  978. armv4_5->core_state = arm_algorithm_info->core_state;
  979. if (armv4_5->core_state == ARM_STATE_ARM)
  980. exit_breakpoint_size = 4;
  981. else if (armv4_5->core_state == ARM_STATE_THUMB)
  982. exit_breakpoint_size = 2;
  983. else
  984. {
  985. LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
  986. return ERROR_INVALID_ARGUMENTS;
  987. }
  988. if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
  989. {
  990. LOG_DEBUG("setting core_mode: 0x%2.2x",
  991. arm_algorithm_info->core_mode);
  992. buf_set_u32(armv4_5->cpsr->value, 0, 5,
  993. arm_algorithm_info->core_mode);
  994. armv4_5->cpsr->dirty = 1;
  995. armv4_5->cpsr->valid = 1;
  996. }
  997. /* terminate using a hardware or (ARMv5+) software breakpoint */
  998. if (exit_point && (retval = breakpoint_add(target, exit_point,
  999. exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
  1000. {
  1001. LOG_ERROR("can't add HW breakpoint to terminate algorithm");
  1002. return ERROR_TARGET_FAILURE;
  1003. }
  1004. if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
  1005. {
  1006. return retval;
  1007. }
  1008. int retvaltemp;
  1009. retval = run_it(target, exit_point, timeout_ms, arch_info);
  1010. if (exit_point)
  1011. breakpoint_remove(target, exit_point);
  1012. if (retval != ERROR_OK)
  1013. return retval;
  1014. for (i = 0; i < num_mem_params; i++)
  1015. {
  1016. if (mem_params[i].direction != PARAM_OUT)
  1017. if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
  1018. {
  1019. retval = retvaltemp;
  1020. }
  1021. }
  1022. for (i = 0; i < num_reg_params; i++)
  1023. {
  1024. if (reg_params[i].direction != PARAM_OUT)
  1025. {
  1026. struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
  1027. if (!reg)
  1028. {
  1029. LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
  1030. retval = ERROR_INVALID_ARGUMENTS;
  1031. continue;
  1032. }
  1033. if (reg->size != reg_params[i].size)
  1034. {
  1035. LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
  1036. retval = ERROR_INVALID_ARGUMENTS;
  1037. continue;
  1038. }
  1039. buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
  1040. }
  1041. }
  1042. /* restore everything we saved before (17 or 18 registers) */
  1043. for (i = 0; i <= 16; i++)
  1044. {
  1045. uint32_t regvalue;
  1046. regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
  1047. if (regvalue != context[i])
  1048. {
  1049. LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
  1050. buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
  1051. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
  1052. ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
  1053. }
  1054. }
  1055. arm_set_cpsr(armv4_5, cpsr);
  1056. armv4_5->cpsr->dirty = 1;
  1057. armv4_5->core_state = core_state;
  1058. return retval;
  1059. }
  1060. int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
  1061. {
  1062. return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
  1063. }
  1064. /**
  1065. * Runs ARM code in the target to calculate a CRC32 checksum.
  1066. *
  1067. * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
  1068. */
  1069. int arm_checksum_memory(struct target *target,
  1070. uint32_t address, uint32_t count, uint32_t *checksum)
  1071. {
  1072. struct working_area *crc_algorithm;
  1073. struct arm_algorithm armv4_5_info;
  1074. struct reg_param reg_params[2];
  1075. int retval;
  1076. uint32_t i;
  1077. static const uint32_t arm_crc_code[] = {
  1078. 0xE1A02000, /* mov r2, r0 */
  1079. 0xE3E00000, /* mov r0, #0xffffffff */
  1080. 0xE1A03001, /* mov r3, r1 */
  1081. 0xE3A04000, /* mov r4, #0 */
  1082. 0xEA00000B, /* b ncomp */
  1083. /* nbyte: */
  1084. 0xE7D21004, /* ldrb r1, [r2, r4] */
  1085. 0xE59F7030, /* ldr r7, CRC32XOR */
  1086. 0xE0200C01, /* eor r0, r0, r1, asl 24 */
  1087. 0xE3A05000, /* mov r5, #0 */
  1088. /* loop: */
  1089. 0xE3500000, /* cmp r0, #0 */
  1090. 0xE1A06080, /* mov r6, r0, asl #1 */
  1091. 0xE2855001, /* add r5, r5, #1 */
  1092. 0xE1A00006, /* mov r0, r6 */
  1093. 0xB0260007, /* eorlt r0, r6, r7 */
  1094. 0xE3550008, /* cmp r5, #8 */
  1095. 0x1AFFFFF8, /* bne loop */
  1096. 0xE2844001, /* add r4, r4, #1 */
  1097. /* ncomp: */
  1098. 0xE1540003, /* cmp r4, r3 */
  1099. 0x1AFFFFF1, /* bne nbyte */
  1100. /* end: */
  1101. 0xEAFFFFFE, /* b end */
  1102. /* CRC32XOR: */
  1103. 0x04C11DB7 /* .word 0x04C11DB7 */
  1104. };
  1105. retval = target_alloc_working_area(target,
  1106. sizeof(arm_crc_code), &crc_algorithm);
  1107. if (retval != ERROR_OK)
  1108. return retval;
  1109. /* convert code into a buffer in target endianness */
  1110. for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
  1111. retval = target_write_u32(target,
  1112. crc_algorithm->address + i * sizeof(uint32_t),
  1113. arm_crc_code[i]);
  1114. if (retval != ERROR_OK)
  1115. return retval;
  1116. }
  1117. armv4_5_info.common_magic = ARM_COMMON_MAGIC;
  1118. armv4_5_info.core_mode = ARM_MODE_SVC;
  1119. armv4_5_info.core_state = ARM_STATE_ARM;
  1120. init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
  1121. init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
  1122. buf_set_u32(reg_params[0].value, 0, 32, address);
  1123. buf_set_u32(reg_params[1].value, 0, 32, count);
  1124. /* 20 second timeout/megabyte */
  1125. int timeout = 20000 * (1 + (count / (1024 * 1024)));
  1126. retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
  1127. crc_algorithm->address,
  1128. crc_algorithm->address + sizeof(arm_crc_code) - 8,
  1129. timeout, &armv4_5_info);
  1130. if (retval != ERROR_OK) {
  1131. LOG_ERROR("error executing ARM crc algorithm");
  1132. destroy_reg_param(&reg_params[0]);
  1133. destroy_reg_param(&reg_params[1]);
  1134. target_free_working_area(target, crc_algorithm);
  1135. return retval;
  1136. }
  1137. *checksum = buf_get_u32(reg_params[0].value, 0, 32);
  1138. destroy_reg_param(&reg_params[0]);
  1139. destroy_reg_param(&reg_params[1]);
  1140. target_free_working_area(target, crc_algorithm);
  1141. return ERROR_OK;
  1142. }
  1143. /**
  1144. * Runs ARM code in the target to check whether a memory block holds
  1145. * all ones. NOR flash which has been erased, and thus may be written,
  1146. * holds all ones.
  1147. *
  1148. * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
  1149. */
  1150. int arm_blank_check_memory(struct target *target,
  1151. uint32_t address, uint32_t count, uint32_t *blank)
  1152. {
  1153. struct working_area *check_algorithm;
  1154. struct reg_param reg_params[3];
  1155. struct arm_algorithm armv4_5_info;
  1156. int retval;
  1157. uint32_t i;
  1158. static const uint32_t check_code[] = {
  1159. /* loop: */
  1160. 0xe4d03001, /* ldrb r3, [r0], #1 */
  1161. 0xe0022003, /* and r2, r2, r3 */
  1162. 0xe2511001, /* subs r1, r1, #1 */
  1163. 0x1afffffb, /* bne loop */
  1164. /* end: */
  1165. 0xeafffffe /* b end */
  1166. };
  1167. /* make sure we have a working area */
  1168. retval = target_alloc_working_area(target,
  1169. sizeof(check_code), &check_algorithm);
  1170. if (retval != ERROR_OK)
  1171. return retval;
  1172. /* convert code into a buffer in target endianness */
  1173. for (i = 0; i < ARRAY_SIZE(check_code); i++) {
  1174. retval = target_write_u32(target,
  1175. check_algorithm->address
  1176. + i * sizeof(uint32_t),
  1177. check_code[i]);
  1178. if (retval != ERROR_OK)
  1179. return retval;
  1180. }
  1181. armv4_5_info.common_magic = ARM_COMMON_MAGIC;
  1182. armv4_5_info.core_mode = ARM_MODE_SVC;
  1183. armv4_5_info.core_state = ARM_STATE_ARM;
  1184. init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
  1185. buf_set_u32(reg_params[0].value, 0, 32, address);
  1186. init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
  1187. buf_set_u32(reg_params[1].value, 0, 32, count);
  1188. init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
  1189. buf_set_u32(reg_params[2].value, 0, 32, 0xff);
  1190. retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
  1191. check_algorithm->address,
  1192. check_algorithm->address + sizeof(check_code) - 4,
  1193. 10000, &armv4_5_info);
  1194. if (retval != ERROR_OK) {
  1195. destroy_reg_param(&reg_params[0]);
  1196. destroy_reg_param(&reg_params[1]);
  1197. destroy_reg_param(&reg_params[2]);
  1198. target_free_working_area(target, check_algorithm);
  1199. return retval;
  1200. }
  1201. *blank = buf_get_u32(reg_params[2].value, 0, 32);
  1202. destroy_reg_param(&reg_params[0]);
  1203. destroy_reg_param(&reg_params[1]);
  1204. destroy_reg_param(&reg_params[2]);
  1205. target_free_working_area(target, check_algorithm);
  1206. return ERROR_OK;
  1207. }
  1208. static int arm_full_context(struct target *target)
  1209. {
  1210. struct arm *armv4_5 = target_to_arm(target);
  1211. unsigned num_regs = armv4_5->core_cache->num_regs;
  1212. struct reg *reg = armv4_5->core_cache->reg_list;
  1213. int retval = ERROR_OK;
  1214. for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
  1215. if (reg->valid)
  1216. continue;
  1217. retval = armv4_5_get_core_reg(reg);
  1218. }
  1219. return retval;
  1220. }
  1221. static int arm_default_mrc(struct target *target, int cpnum,
  1222. uint32_t op1, uint32_t op2,
  1223. uint32_t CRn, uint32_t CRm,
  1224. uint32_t *value)
  1225. {
  1226. LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
  1227. return ERROR_FAIL;
  1228. }
  1229. static int arm_default_mcr(struct target *target, int cpnum,
  1230. uint32_t op1, uint32_t op2,
  1231. uint32_t CRn, uint32_t CRm,
  1232. uint32_t value)
  1233. {
  1234. LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
  1235. return ERROR_FAIL;
  1236. }
  1237. int arm_init_arch_info(struct target *target, struct arm *armv4_5)
  1238. {
  1239. target->arch_info = armv4_5;
  1240. armv4_5->target = target;
  1241. armv4_5->common_magic = ARM_COMMON_MAGIC;
  1242. arm_set_cpsr(armv4_5, ARM_MODE_USR);
  1243. /* core_type may be overridden by subtype logic */
  1244. armv4_5->core_type = ARM_MODE_ANY;
  1245. /* default full_context() has no core-specific optimizations */
  1246. if (!armv4_5->full_context && armv4_5->read_core_reg)
  1247. armv4_5->full_context = arm_full_context;
  1248. if (!armv4_5->mrc)
  1249. armv4_5->mrc = arm_default_mrc;
  1250. if (!armv4_5->mcr)
  1251. armv4_5->mcr = arm_default_mcr;
  1252. return ERROR_OK;
  1253. }