You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

805 lines
23 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2009 by David Brownell *
  3. * *
  4. * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
  5. * *
  6. * This program is free software; you can redistribute it and/or modify *
  7. * it under the terms of the GNU General Public License as published by *
  8. * the Free Software Foundation; either version 2 of the License, or *
  9. * (at your option) any later version. *
  10. * *
  11. * This program is distributed in the hope that it will be useful, *
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  14. * GNU General Public License for more details. *
  15. * *
  16. * You should have received a copy of the GNU General Public License *
  17. * along with this program; if not, write to the *
  18. * Free Software Foundation, Inc., *
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
  20. ***************************************************************************/
  21. #ifdef HAVE_CONFIG_H
  22. #include "config.h"
  23. #endif
  24. #include <helper/replacements.h>
  25. #include "armv7a.h"
  26. #include "arm_disassembler.h"
  27. #include "register.h"
  28. #include <helper/binarybuffer.h>
  29. #include <helper/command.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <unistd.h>
  33. #include "arm_opcodes.h"
  34. #include "target.h"
  35. #include "target_type.h"
  36. static void armv7a_show_fault_registers(struct target *target)
  37. {
  38. uint32_t dfsr, ifsr, dfar, ifar;
  39. struct armv7a_common *armv7a = target_to_armv7a(target);
  40. struct arm_dpm *dpm = armv7a->arm.dpm;
  41. int retval;
  42. retval = dpm->prepare(dpm);
  43. if (retval != ERROR_OK)
  44. return;
  45. /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
  46. /* c5/c0 - {data, instruction} fault status registers */
  47. retval = dpm->instr_read_data_r0(dpm,
  48. ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
  49. &dfsr);
  50. if (retval != ERROR_OK)
  51. goto done;
  52. retval = dpm->instr_read_data_r0(dpm,
  53. ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
  54. &ifsr);
  55. if (retval != ERROR_OK)
  56. goto done;
  57. /* c6/c0 - {data, instruction} fault address registers */
  58. retval = dpm->instr_read_data_r0(dpm,
  59. ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
  60. &dfar);
  61. if (retval != ERROR_OK)
  62. goto done;
  63. retval = dpm->instr_read_data_r0(dpm,
  64. ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
  65. &ifar);
  66. if (retval != ERROR_OK)
  67. goto done;
  68. LOG_USER("Data fault registers DFSR: %8.8" PRIx32
  69. ", DFAR: %8.8" PRIx32, dfsr, dfar);
  70. LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
  71. ", IFAR: %8.8" PRIx32, ifsr, ifar);
  72. done:
  73. /* (void) */ dpm->finish(dpm);
  74. }
  75. static int armv7a_read_ttbcr(struct target *target)
  76. {
  77. struct armv7a_common *armv7a = target_to_armv7a(target);
  78. struct arm_dpm *dpm = armv7a->arm.dpm;
  79. uint32_t ttbcr;
  80. int retval = dpm->prepare(dpm);
  81. if (retval != ERROR_OK)
  82. goto done;
  83. /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
  84. retval = dpm->instr_read_data_r0(dpm,
  85. ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
  86. &ttbcr);
  87. if (retval != ERROR_OK)
  88. goto done;
  89. armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7) != 0) ? 1 : 0;
  90. armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 - ((ttbcr & 0x7)));
  91. #if 0
  92. LOG_INFO("ttb1 %s ,ttb0_mask %x",
  93. armv7a->armv7a_mmu.ttbr1_used ? "used" : "not used",
  94. armv7a->armv7a_mmu.ttbr0_mask);
  95. #endif
  96. if (armv7a->armv7a_mmu.ttbr1_used == 1) {
  97. LOG_INFO("SVC access above %" PRIx32,
  98. (uint32_t)(0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
  99. armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
  100. } else {
  101. /* fix me , default is hard coded LINUX border */
  102. armv7a->armv7a_mmu.os_border = 0xc0000000;
  103. }
  104. done:
  105. dpm->finish(dpm);
  106. return retval;
  107. }
  108. /* method adapted to cortex A : reused arm v4 v5 method*/
  109. int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
  110. {
  111. uint32_t first_lvl_descriptor = 0x0;
  112. uint32_t second_lvl_descriptor = 0x0;
  113. int retval;
  114. struct armv7a_common *armv7a = target_to_armv7a(target);
  115. struct arm_dpm *dpm = armv7a->arm.dpm;
  116. uint32_t ttb = 0; /* default ttb0 */
  117. if (armv7a->armv7a_mmu.ttbr1_used == -1)
  118. armv7a_read_ttbcr(target);
  119. if ((armv7a->armv7a_mmu.ttbr1_used) &&
  120. (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask))) {
  121. /* select ttb 1 */
  122. ttb = 1;
  123. }
  124. retval = dpm->prepare(dpm);
  125. if (retval != ERROR_OK)
  126. goto done;
  127. /* MRC p15,0,<Rt>,c2,c0,ttb */
  128. retval = dpm->instr_read_data_r0(dpm,
  129. ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
  130. &ttb);
  131. if (retval != ERROR_OK)
  132. return retval;
  133. retval = armv7a->armv7a_mmu.read_physical_memory(target,
  134. (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
  135. 4, 1, (uint8_t *)&first_lvl_descriptor);
  136. if (retval != ERROR_OK)
  137. return retval;
  138. first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
  139. &first_lvl_descriptor);
  140. /* reuse armv4_5 piece of code, specific armv7a changes may come later */
  141. LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
  142. if ((first_lvl_descriptor & 0x3) == 0) {
  143. LOG_ERROR("Address translation failure");
  144. return ERROR_TARGET_TRANSLATION_FAULT;
  145. }
  146. if ((first_lvl_descriptor & 0x3) == 2) {
  147. /* section descriptor */
  148. *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
  149. return ERROR_OK;
  150. }
  151. if ((first_lvl_descriptor & 0x3) == 1) {
  152. /* coarse page table */
  153. retval = armv7a->armv7a_mmu.read_physical_memory(target,
  154. (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
  155. 4, 1, (uint8_t *)&second_lvl_descriptor);
  156. if (retval != ERROR_OK)
  157. return retval;
  158. } else if ((first_lvl_descriptor & 0x3) == 3) {
  159. /* fine page table */
  160. retval = armv7a->armv7a_mmu.read_physical_memory(target,
  161. (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8),
  162. 4, 1, (uint8_t *)&second_lvl_descriptor);
  163. if (retval != ERROR_OK)
  164. return retval;
  165. }
  166. second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
  167. &second_lvl_descriptor);
  168. LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
  169. if ((second_lvl_descriptor & 0x3) == 0) {
  170. LOG_ERROR("Address translation failure");
  171. return ERROR_TARGET_TRANSLATION_FAULT;
  172. }
  173. if ((second_lvl_descriptor & 0x3) == 1) {
  174. /* large page descriptor */
  175. *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
  176. return ERROR_OK;
  177. }
  178. if ((second_lvl_descriptor & 0x3) == 2) {
  179. /* small page descriptor */
  180. *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
  181. return ERROR_OK;
  182. }
  183. if ((second_lvl_descriptor & 0x3) == 3) {
  184. *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff);
  185. return ERROR_OK;
  186. }
  187. /* should not happen */
  188. LOG_ERROR("Address translation failure");
  189. return ERROR_TARGET_TRANSLATION_FAULT;
  190. done:
  191. return retval;
  192. }
  193. /* V7 method VA TO PA */
  194. int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
  195. uint32_t *val, int meminfo)
  196. {
  197. int retval = ERROR_FAIL;
  198. struct armv7a_common *armv7a = target_to_armv7a(target);
  199. struct arm_dpm *dpm = armv7a->arm.dpm;
  200. uint32_t virt = va & ~0xfff;
  201. uint32_t NOS, NS, INNER, OUTER;
  202. *val = 0xdeadbeef;
  203. retval = dpm->prepare(dpm);
  204. if (retval != ERROR_OK)
  205. goto done;
  206. /* mmu must be enable in order to get a correct translation
  207. * use VA to PA CP15 register for conversion */
  208. retval = dpm->instr_write_data_r0(dpm,
  209. ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
  210. virt);
  211. if (retval != ERROR_OK)
  212. goto done;
  213. retval = dpm->instr_read_data_r0(dpm,
  214. ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
  215. val);
  216. /* decode memory attribute */
  217. NOS = (*val >> 10) & 1; /* Not Outer shareable */
  218. NS = (*val >> 9) & 1; /* Non secure */
  219. INNER = (*val >> 4) & 0x7;
  220. OUTER = (*val >> 2) & 0x3;
  221. if (retval != ERROR_OK)
  222. goto done;
  223. *val = (*val & ~0xfff) + (va & 0xfff);
  224. if (*val == va)
  225. LOG_WARNING("virt = phys : MMU disable !!");
  226. if (meminfo) {
  227. LOG_INFO("%" PRIx32 " : %" PRIx32 " %s outer shareable %s secured",
  228. va, *val,
  229. NOS == 1 ? "not" : " ",
  230. NS == 1 ? "not" : "");
  231. switch (OUTER) {
  232. case 0:
  233. LOG_INFO("outer: Non-Cacheable");
  234. break;
  235. case 1:
  236. LOG_INFO("outer: Write-Back, Write-Allocate");
  237. break;
  238. case 2:
  239. LOG_INFO("outer: Write-Through, No Write-Allocate");
  240. break;
  241. case 3:
  242. LOG_INFO("outer: Write-Back, no Write-Allocate");
  243. break;
  244. }
  245. switch (INNER) {
  246. case 0:
  247. LOG_INFO("inner: Non-Cacheable");
  248. break;
  249. case 1:
  250. LOG_INFO("inner: Strongly-ordered");
  251. break;
  252. case 3:
  253. LOG_INFO("inner: Device");
  254. break;
  255. case 5:
  256. LOG_INFO("inner: Write-Back, Write-Allocate");
  257. break;
  258. case 6:
  259. LOG_INFO("inner: Write-Through");
  260. break;
  261. case 7:
  262. LOG_INFO("inner: Write-Back, no Write-Allocate");
  263. default:
  264. LOG_INFO("inner: %" PRIx32 " ???", INNER);
  265. }
  266. }
  267. done:
  268. dpm->finish(dpm);
  269. return retval;
  270. }
  271. static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
  272. struct armv7a_cache_common *armv7a_cache)
  273. {
  274. if (armv7a_cache->ctype == -1) {
  275. command_print(cmd_ctx, "cache not yet identified");
  276. return ERROR_OK;
  277. }
  278. command_print(cmd_ctx,
  279. "D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
  280. armv7a_cache->d_u_size.linelen,
  281. armv7a_cache->d_u_size.associativity,
  282. armv7a_cache->d_u_size.nsets,
  283. armv7a_cache->d_u_size.cachesize);
  284. command_print(cmd_ctx,
  285. "I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
  286. armv7a_cache->i_size.linelen,
  287. armv7a_cache->i_size.associativity,
  288. armv7a_cache->i_size.nsets,
  289. armv7a_cache->i_size.cachesize);
  290. return ERROR_OK;
  291. }
  292. static int _armv7a_flush_all_data(struct target *target)
  293. {
  294. struct armv7a_common *armv7a = target_to_armv7a(target);
  295. struct arm_dpm *dpm = armv7a->arm.dpm;
  296. struct armv7a_cachesize *d_u_size =
  297. &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
  298. int32_t c_way, c_index = d_u_size->index;
  299. int retval;
  300. /* check that cache data is on at target halt */
  301. if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
  302. LOG_INFO("flushed not performed :cache not on at target halt");
  303. return ERROR_OK;
  304. }
  305. retval = dpm->prepare(dpm);
  306. if (retval != ERROR_OK)
  307. goto done;
  308. do {
  309. c_way = d_u_size->way;
  310. do {
  311. uint32_t value = (c_index << d_u_size->index_shift)
  312. | (c_way << d_u_size->way_shift);
  313. /* DCCISW */
  314. /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
  315. retval = dpm->instr_write_data_r0(dpm,
  316. ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
  317. value);
  318. if (retval != ERROR_OK)
  319. goto done;
  320. c_way -= 1;
  321. } while (c_way >= 0);
  322. c_index -= 1;
  323. } while (c_index >= 0);
  324. return retval;
  325. done:
  326. LOG_ERROR("flushed failed");
  327. dpm->finish(dpm);
  328. return retval;
  329. }
  330. static int armv7a_flush_all_data(struct target *target)
  331. {
  332. int retval = ERROR_FAIL;
  333. /* check that armv7a_cache is correctly identify */
  334. struct armv7a_common *armv7a = target_to_armv7a(target);
  335. if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) {
  336. LOG_ERROR("trying to flush un-identified cache");
  337. return retval;
  338. }
  339. if (target->smp) {
  340. /* look if all the other target have been flushed in order to flush level
  341. * 2 */
  342. struct target_list *head;
  343. struct target *curr;
  344. head = target->head;
  345. while (head != (struct target_list *)NULL) {
  346. curr = head->target;
  347. if (curr->state == TARGET_HALTED) {
  348. LOG_INFO("Wait flushing data l1 on core %" PRId32, curr->coreid);
  349. retval = _armv7a_flush_all_data(curr);
  350. }
  351. head = head->next;
  352. }
  353. } else
  354. retval = _armv7a_flush_all_data(target);
  355. return retval;
  356. }
  357. /* L2 is not specific to armv7a a specific file is needed */
  358. static int armv7a_l2x_flush_all_data(struct target *target)
  359. {
  360. #define L2X0_CLEAN_INV_WAY 0x7FC
  361. int retval = ERROR_FAIL;
  362. struct armv7a_common *armv7a = target_to_armv7a(target);
  363. struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
  364. (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
  365. uint32_t base = l2x_cache->base;
  366. uint32_t l2_way = l2x_cache->way;
  367. uint32_t l2_way_val = (1 << l2_way) - 1;
  368. retval = armv7a_flush_all_data(target);
  369. if (retval != ERROR_OK)
  370. return retval;
  371. retval = target->type->write_phys_memory(target,
  372. (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
  373. (uint32_t)4,
  374. (uint32_t)1,
  375. (uint8_t *)&l2_way_val);
  376. return retval;
  377. }
  378. static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
  379. struct armv7a_cache_common *armv7a_cache)
  380. {
  381. struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
  382. (armv7a_cache->l2_cache);
  383. if (armv7a_cache->ctype == -1) {
  384. command_print(cmd_ctx, "cache not yet identified");
  385. return ERROR_OK;
  386. }
  387. command_print(cmd_ctx,
  388. "L1 D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
  389. armv7a_cache->d_u_size.linelen,
  390. armv7a_cache->d_u_size.associativity,
  391. armv7a_cache->d_u_size.nsets,
  392. armv7a_cache->d_u_size.cachesize);
  393. command_print(cmd_ctx,
  394. "L1 I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
  395. armv7a_cache->i_size.linelen,
  396. armv7a_cache->i_size.associativity,
  397. armv7a_cache->i_size.nsets,
  398. armv7a_cache->i_size.cachesize);
  399. command_print(cmd_ctx, "L2 unified cache Base Address 0x%" PRIx32 ", %" PRId32 " ways",
  400. l2x_cache->base, l2x_cache->way);
  401. return ERROR_OK;
  402. }
  403. static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
  404. {
  405. struct armv7a_l2x_cache *l2x_cache;
  406. struct target_list *head = target->head;
  407. struct target *curr;
  408. struct armv7a_common *armv7a = target_to_armv7a(target);
  409. l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
  410. l2x_cache->base = base;
  411. l2x_cache->way = way;
  412. /*LOG_INFO("cache l2 initialized base %x way %d",
  413. l2x_cache->base,l2x_cache->way);*/
  414. if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
  415. LOG_INFO("cache l2 already initialized\n");
  416. armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
  417. /* initialize l1 / l2x cache function */
  418. armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
  419. = armv7a_l2x_flush_all_data;
  420. armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
  421. armv7a_handle_l2x_cache_info_command;
  422. /* initialize all target in this cluster (smp target)
  423. * l2 cache must be configured after smp declaration */
  424. while (head != (struct target_list *)NULL) {
  425. curr = head->target;
  426. if (curr != target) {
  427. armv7a = target_to_armv7a(curr);
  428. if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
  429. LOG_ERROR("smp target : cache l2 already initialized\n");
  430. armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
  431. armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
  432. armv7a_l2x_flush_all_data;
  433. armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
  434. armv7a_handle_l2x_cache_info_command;
  435. }
  436. head = head->next;
  437. }
  438. return JIM_OK;
  439. }
  440. COMMAND_HANDLER(handle_cache_l2x)
  441. {
  442. struct target *target = get_current_target(CMD_CTX);
  443. uint32_t base, way;
  444. switch (CMD_ARGC) {
  445. case 0:
  446. return ERROR_COMMAND_SYNTAX_ERROR;
  447. break;
  448. case 2:
  449. /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
  450. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
  451. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
  452. /* AP address is in bits 31:24 of DP_SELECT */
  453. armv7a_l2x_cache_init(target, base, way);
  454. break;
  455. default:
  456. return ERROR_COMMAND_SYNTAX_ERROR;
  457. }
  458. return ERROR_OK;
  459. }
  460. int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
  461. struct armv7a_cache_common *armv7a_cache)
  462. {
  463. if (armv7a_cache->ctype == -1) {
  464. command_print(cmd_ctx, "cache not yet identified");
  465. return ERROR_OK;
  466. }
  467. if (armv7a_cache->display_cache_info)
  468. armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
  469. return ERROR_OK;
  470. }
  471. /* retrieve core id cluster id */
  472. static int armv7a_read_mpidr(struct target *target)
  473. {
  474. int retval = ERROR_FAIL;
  475. struct armv7a_common *armv7a = target_to_armv7a(target);
  476. struct arm_dpm *dpm = armv7a->arm.dpm;
  477. uint32_t mpidr;
  478. retval = dpm->prepare(dpm);
  479. if (retval != ERROR_OK)
  480. goto done;
  481. /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
  482. retval = dpm->instr_read_data_r0(dpm,
  483. ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
  484. &mpidr);
  485. if (retval != ERROR_OK)
  486. goto done;
  487. if (mpidr & 1<<31) {
  488. armv7a->multi_processor_system = (mpidr >> 30) & 1;
  489. armv7a->cluster_id = (mpidr >> 8) & 0xf;
  490. armv7a->cpu_id = mpidr & 0x3;
  491. LOG_INFO("%s cluster %x core %x %s", target_name(target),
  492. armv7a->cluster_id,
  493. armv7a->cpu_id,
  494. armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
  495. } else
  496. LOG_ERROR("mpdir not in multiprocessor format");
  497. done:
  498. dpm->finish(dpm);
  499. return retval;
  500. }
  501. int armv7a_identify_cache(struct target *target)
  502. {
  503. /* read cache descriptor */
  504. int retval = ERROR_FAIL;
  505. struct armv7a_common *armv7a = target_to_armv7a(target);
  506. struct arm_dpm *dpm = armv7a->arm.dpm;
  507. uint32_t cache_selected, clidr;
  508. uint32_t cache_i_reg, cache_d_reg;
  509. struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
  510. if (!armv7a->is_armv7r)
  511. armv7a_read_ttbcr(target);
  512. retval = dpm->prepare(dpm);
  513. if (retval != ERROR_OK)
  514. goto done;
  515. /* retrieve CLIDR
  516. * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
  517. retval = dpm->instr_read_data_r0(dpm,
  518. ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
  519. &clidr);
  520. if (retval != ERROR_OK)
  521. goto done;
  522. clidr = (clidr & 0x7000000) >> 23;
  523. LOG_INFO("number of cache level %" PRIx32, (uint32_t)(clidr / 2));
  524. if ((clidr / 2) > 1) {
  525. /* FIXME not supported present in cortex A8 and later */
  526. /* in cortex A7, A15 */
  527. LOG_ERROR("cache l2 present :not supported");
  528. }
  529. /* retrieve selected cache
  530. * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
  531. retval = dpm->instr_read_data_r0(dpm,
  532. ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
  533. &cache_selected);
  534. if (retval != ERROR_OK)
  535. goto done;
  536. retval = armv7a->arm.mrc(target, 15,
  537. 2, 0, /* op1, op2 */
  538. 0, 0, /* CRn, CRm */
  539. &cache_selected);
  540. if (retval != ERROR_OK)
  541. goto done;
  542. /* select instruction cache
  543. * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
  544. * [0] : 1 instruction cache selection , 0 data cache selection */
  545. retval = dpm->instr_write_data_r0(dpm,
  546. ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
  547. 1);
  548. if (retval != ERROR_OK)
  549. goto done;
  550. /* read CCSIDR
  551. * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
  552. * [2:0] line size 001 eight word per line
  553. * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
  554. retval = dpm->instr_read_data_r0(dpm,
  555. ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
  556. &cache_i_reg);
  557. if (retval != ERROR_OK)
  558. goto done;
  559. /* select data cache*/
  560. retval = dpm->instr_write_data_r0(dpm,
  561. ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
  562. 0);
  563. if (retval != ERROR_OK)
  564. goto done;
  565. retval = dpm->instr_read_data_r0(dpm,
  566. ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
  567. &cache_d_reg);
  568. if (retval != ERROR_OK)
  569. goto done;
  570. /* restore selected cache */
  571. dpm->instr_write_data_r0(dpm,
  572. ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
  573. cache_selected);
  574. if (retval != ERROR_OK)
  575. goto done;
  576. dpm->finish(dpm);
  577. /* put fake type */
  578. cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
  579. cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
  580. cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
  581. cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) + 1;
  582. /* compute info for set way operation on cache */
  583. cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
  584. cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
  585. cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
  586. cache->d_u_size.way_shift = cache->d_u_size.way + 1;
  587. {
  588. int i = 0;
  589. while (((cache->d_u_size.way_shift >> i) & 1) != 1)
  590. i++;
  591. cache->d_u_size.way_shift = 32-i;
  592. }
  593. #if 0
  594. LOG_INFO("data cache index %d << %d, way %d << %d",
  595. cache->d_u_size.index, cache->d_u_size.index_shift,
  596. cache->d_u_size.way,
  597. cache->d_u_size.way_shift);
  598. LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
  599. cache->d_u_size.linelen,
  600. cache->d_u_size.cachesize,
  601. cache->d_u_size.associativity);
  602. #endif
  603. cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
  604. cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) + 1;
  605. cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
  606. cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
  607. /* compute info for set way operation on cache */
  608. cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
  609. cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
  610. cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
  611. cache->i_size.way_shift = cache->i_size.way + 1;
  612. {
  613. int i = 0;
  614. while (((cache->i_size.way_shift >> i) & 1) != 1)
  615. i++;
  616. cache->i_size.way_shift = 32-i;
  617. }
  618. #if 0
  619. LOG_INFO("instruction cache index %d << %d, way %d << %d",
  620. cache->i_size.index, cache->i_size.index_shift,
  621. cache->i_size.way, cache->i_size.way_shift);
  622. LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
  623. cache->i_size.linelen,
  624. cache->i_size.cachesize,
  625. cache->i_size.associativity);
  626. #endif
  627. /* if no l2 cache initialize l1 data cache flush function function */
  628. if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
  629. armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
  630. armv7a_handle_inner_cache_info_command;
  631. armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
  632. armv7a_flush_all_data;
  633. }
  634. armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
  635. done:
  636. dpm->finish(dpm);
  637. armv7a_read_mpidr(target);
  638. return retval;
  639. }
  640. int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
  641. {
  642. struct arm *arm = &armv7a->arm;
  643. arm->arch_info = armv7a;
  644. target->arch_info = &armv7a->arm;
  645. /* target is useful in all function arm v4 5 compatible */
  646. armv7a->arm.target = target;
  647. armv7a->arm.common_magic = ARM_COMMON_MAGIC;
  648. armv7a->common_magic = ARMV7_COMMON_MAGIC;
  649. armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
  650. armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
  651. armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
  652. armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
  653. return ERROR_OK;
  654. }
  655. int armv7a_arch_state(struct target *target)
  656. {
  657. static const char *state[] = {
  658. "disabled", "enabled"
  659. };
  660. struct armv7a_common *armv7a = target_to_armv7a(target);
  661. struct arm *arm = &armv7a->arm;
  662. if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
  663. LOG_ERROR("BUG: called for a non-ARMv7A target");
  664. return ERROR_COMMAND_SYNTAX_ERROR;
  665. }
  666. arm_arch_state(target);
  667. if (armv7a->is_armv7r) {
  668. LOG_USER("D-Cache: %s, I-Cache: %s",
  669. state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
  670. state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
  671. } else {
  672. LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
  673. state[armv7a->armv7a_mmu.mmu_enabled],
  674. state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
  675. state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
  676. }
  677. if (arm->core_mode == ARM_MODE_ABT)
  678. armv7a_show_fault_registers(target);
  679. if (target->debug_reason == DBG_REASON_WATCHPOINT)
  680. LOG_USER("Watchpoint triggered at PC %#08x",
  681. (unsigned) armv7a->dpm.wp_pc);
  682. return ERROR_OK;
  683. }
  684. static const struct command_registration l2_cache_commands[] = {
  685. {
  686. .name = "l2x",
  687. .handler = handle_cache_l2x,
  688. .mode = COMMAND_EXEC,
  689. .help = "configure l2x cache "
  690. "",
  691. .usage = "[base_addr] [number_of_way]",
  692. },
  693. COMMAND_REGISTRATION_DONE
  694. };
  695. const struct command_registration l2x_cache_command_handlers[] = {
  696. {
  697. .name = "cache_config",
  698. .mode = COMMAND_EXEC,
  699. .help = "cache configuation for a target",
  700. .usage = "",
  701. .chain = l2_cache_commands,
  702. },
  703. COMMAND_REGISTRATION_DONE
  704. };
  705. const struct command_registration armv7a_command_handlers[] = {
  706. {
  707. .chain = dap_command_handlers,
  708. },
  709. {
  710. .chain = l2x_cache_command_handlers,
  711. },
  712. COMMAND_REGISTRATION_DONE
  713. };