You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1546 lines
46 KiB

  1. /*
  2. * Copyright(c) 2013 Intel Corporation.
  3. *
  4. * Adrian Burns (adrian.burns@intel.com)
  5. * Thomas Faust (thomas.faust@intel.com)
  6. * Ivan De Cesaris (ivan.de.cesaris@intel.com)
  7. * Julien Carreno (julien.carreno@intel.com)
  8. * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. *
  23. * Contact Information:
  24. * Intel Corporation
  25. */
  26. /*
  27. * @file
  28. * This implements generic x86 32 bit memory and breakpoint operations.
  29. */
  30. #ifdef HAVE_CONFIG_H
  31. #include "config.h"
  32. #endif
  33. #include <helper/log.h>
  34. #include "target.h"
  35. #include "target_type.h"
  36. #include "register.h"
  37. #include "breakpoints.h"
  38. #include "x86_32_common.h"
  39. static int set_debug_regs(struct target *t, uint32_t address,
  40. uint8_t bp_num, uint8_t bp_type, uint8_t bp_length);
  41. static int unset_debug_regs(struct target *t, uint8_t bp_num);
  42. static int read_mem(struct target *t, uint32_t size,
  43. uint32_t addr, uint8_t *buf);
  44. static int write_mem(struct target *t, uint32_t size,
  45. uint32_t addr, const uint8_t *buf);
  46. static int calcaddr_physfromlin(struct target *t, target_addr_t addr,
  47. target_addr_t *physaddr);
  48. static int read_phys_mem(struct target *t, uint32_t phys_address,
  49. uint32_t size, uint32_t count, uint8_t *buffer);
  50. static int write_phys_mem(struct target *t, uint32_t phys_address,
  51. uint32_t size, uint32_t count, const uint8_t *buffer);
  52. static int set_breakpoint(struct target *target,
  53. struct breakpoint *breakpoint);
  54. static int unset_breakpoint(struct target *target,
  55. struct breakpoint *breakpoint);
  56. static int set_watchpoint(struct target *target,
  57. struct watchpoint *watchpoint);
  58. static int unset_watchpoint(struct target *target,
  59. struct watchpoint *watchpoint);
  60. static int read_hw_reg_to_cache(struct target *t, int num);
  61. static int write_hw_reg_from_cache(struct target *t, int num);
  62. int x86_32_get_gdb_reg_list(struct target *t,
  63. struct reg **reg_list[], int *reg_list_size,
  64. enum target_register_class reg_class)
  65. {
  66. struct x86_32_common *x86_32 = target_to_x86_32(t);
  67. int i;
  68. *reg_list_size = x86_32->cache->num_regs;
  69. LOG_DEBUG("num_regs=%d, reg_class=%d", (*reg_list_size), reg_class);
  70. *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
  71. if (!*reg_list) {
  72. LOG_ERROR("%s out of memory", __func__);
  73. return ERROR_FAIL;
  74. }
  75. /* this will copy the values from our reg list to gdbs */
  76. for (i = 0; i < (*reg_list_size); i++) {
  77. (*reg_list)[i] = &x86_32->cache->reg_list[i];
  78. LOG_DEBUG("value %s = %08" PRIx32, x86_32->cache->reg_list[i].name,
  79. buf_get_u32(x86_32->cache->reg_list[i].value, 0, 32));
  80. }
  81. return ERROR_OK;
  82. }
  83. int x86_32_common_init_arch_info(struct target *t, struct x86_32_common *x86_32)
  84. {
  85. t->arch_info = x86_32;
  86. x86_32->common_magic = X86_32_COMMON_MAGIC;
  87. x86_32->num_hw_bpoints = MAX_DEBUG_REGS;
  88. x86_32->hw_break_list = calloc(x86_32->num_hw_bpoints,
  89. sizeof(struct x86_32_dbg_reg));
  90. if (!x86_32->hw_break_list) {
  91. LOG_ERROR("%s out of memory", __func__);
  92. return ERROR_FAIL;
  93. }
  94. x86_32->curr_tap = t->tap;
  95. x86_32->fast_data_area = NULL;
  96. x86_32->flush = 1;
  97. x86_32->read_hw_reg_to_cache = read_hw_reg_to_cache;
  98. x86_32->write_hw_reg_from_cache = write_hw_reg_from_cache;
  99. return ERROR_OK;
  100. }
  101. int x86_32_common_mmu(struct target *t, int *enabled)
  102. {
  103. *enabled = true;
  104. return ERROR_OK;
  105. }
  106. int x86_32_common_virt2phys(struct target *t, target_addr_t address, target_addr_t *physical)
  107. {
  108. struct x86_32_common *x86_32 = target_to_x86_32(t);
  109. /*
  110. * We need to ignore 'segmentation' for now, as OpenOCD can't handle
  111. * segmented addresses.
  112. * In protected mode that is almost OK, as (almost) any known OS is using
  113. * flat segmentation. In real mode we use use the base of the DS segment,
  114. * as we don't know better ...
  115. */
  116. uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
  117. if (!(cr0 & CR0_PG)) {
  118. /* target halted in real mode */
  119. /* TODO: needs validation !!! */
  120. uint32_t dsb = buf_get_u32(x86_32->cache->reg_list[DSB].value, 0, 32);
  121. *physical = dsb + address;
  122. } else {
  123. /* target halted in protected mode */
  124. if (calcaddr_physfromlin(t, address, physical) != ERROR_OK) {
  125. LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
  126. __func__, address);
  127. return ERROR_FAIL;
  128. }
  129. }
  130. return ERROR_OK;
  131. }
  132. int x86_32_common_read_phys_mem(struct target *t, target_addr_t phys_address,
  133. uint32_t size, uint32_t count, uint8_t *buffer)
  134. {
  135. struct x86_32_common *x86_32 = target_to_x86_32(t);
  136. int error;
  137. error = read_phys_mem(t, phys_address, size, count, buffer);
  138. if (error != ERROR_OK)
  139. return error;
  140. /* After reading memory from target, we must replace software breakpoints
  141. * with the original instructions again.
  142. */
  143. struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
  144. while (iter) {
  145. if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
  146. uint32_t offset = iter->physaddr - phys_address;
  147. buffer[offset] = iter->orig_byte;
  148. }
  149. iter = iter->next;
  150. }
  151. return ERROR_OK;
  152. }
  153. static int read_phys_mem(struct target *t, uint32_t phys_address,
  154. uint32_t size, uint32_t count, uint8_t *buffer)
  155. {
  156. int retval = ERROR_OK;
  157. bool pg_disabled = false;
  158. LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
  159. phys_address, size, count, buffer);
  160. struct x86_32_common *x86_32 = target_to_x86_32(t);
  161. if (check_not_halted(t))
  162. return ERROR_TARGET_NOT_HALTED;
  163. if (!count || !buffer || !phys_address) {
  164. LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=0x%08" PRIx32,
  165. __func__, count, buffer, phys_address);
  166. return ERROR_COMMAND_ARGUMENT_INVALID;
  167. }
  168. /* to access physical memory, switch off the CR0.PG bit */
  169. if (x86_32->is_paging_enabled(t)) {
  170. retval = x86_32->disable_paging(t);
  171. if (retval != ERROR_OK) {
  172. LOG_ERROR("%s could not disable paging", __func__);
  173. return retval;
  174. }
  175. pg_disabled = true;
  176. }
  177. for (uint32_t i = 0; i < count; i++) {
  178. switch (size) {
  179. case BYTE:
  180. retval = read_mem(t, size, phys_address + i, buffer + i);
  181. break;
  182. case WORD:
  183. retval = read_mem(t, size, phys_address + i * 2, buffer + i * 2);
  184. break;
  185. case DWORD:
  186. retval = read_mem(t, size, phys_address + i * 4, buffer + i * 4);
  187. break;
  188. default:
  189. LOG_ERROR("%s invalid read size", __func__);
  190. break;
  191. }
  192. if (retval != ERROR_OK)
  193. break;
  194. }
  195. /* restore CR0.PG bit if needed (regardless of retval) */
  196. if (pg_disabled) {
  197. int retval2 = x86_32->enable_paging(t);
  198. if (retval2 != ERROR_OK) {
  199. LOG_ERROR("%s could not enable paging", __func__);
  200. return retval2;
  201. }
  202. }
  203. /* TODO: After reading memory from target, we must replace
  204. * software breakpoints with the original instructions again.
  205. * Solve this with the breakpoint fix
  206. */
  207. return retval;
  208. }
  209. int x86_32_common_write_phys_mem(struct target *t, target_addr_t phys_address,
  210. uint32_t size, uint32_t count, const uint8_t *buffer)
  211. {
  212. struct x86_32_common *x86_32 = target_to_x86_32(t);
  213. int error = ERROR_OK;
  214. uint8_t *newbuffer = NULL;
  215. check_not_halted(t);
  216. if (!count || !buffer || !phys_address) {
  217. LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
  218. __func__, count, buffer, phys_address);
  219. return ERROR_COMMAND_ARGUMENT_INVALID;
  220. }
  221. /* Before writing memory to target, we must update software breakpoints
  222. * with the new instructions and patch the memory buffer with the
  223. * breakpoint instruction.
  224. */
  225. newbuffer = malloc(size*count);
  226. if (!newbuffer) {
  227. LOG_ERROR("%s out of memory", __func__);
  228. return ERROR_FAIL;
  229. }
  230. memcpy(newbuffer, buffer, size*count);
  231. struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
  232. while (iter) {
  233. if (iter->physaddr >= phys_address && iter->physaddr < phys_address+(size*count)) {
  234. uint32_t offset = iter->physaddr - phys_address;
  235. newbuffer[offset] = SW_BP_OPCODE;
  236. /* update the breakpoint */
  237. struct breakpoint *pbiter = t->breakpoints;
  238. while (pbiter && pbiter->unique_id != iter->swbp_unique_id)
  239. pbiter = pbiter->next;
  240. if (pbiter)
  241. pbiter->orig_instr[0] = buffer[offset];
  242. }
  243. iter = iter->next;
  244. }
  245. error = write_phys_mem(t, phys_address, size, count, newbuffer);
  246. free(newbuffer);
  247. return error;
  248. }
  249. static int write_phys_mem(struct target *t, uint32_t phys_address,
  250. uint32_t size, uint32_t count, const uint8_t *buffer)
  251. {
  252. int retval = ERROR_OK;
  253. bool pg_disabled = false;
  254. struct x86_32_common *x86_32 = target_to_x86_32(t);
  255. LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
  256. phys_address, size, count, buffer);
  257. check_not_halted(t);
  258. if (!count || !buffer || !phys_address) {
  259. LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=0x%08" PRIx32,
  260. __func__, count, buffer, phys_address);
  261. return ERROR_COMMAND_ARGUMENT_INVALID;
  262. }
  263. /* TODO: Before writing memory to target, we must update
  264. * software breakpoints with the new instructions and
  265. * patch the memory buffer with the breakpoint instruction.
  266. * Solve this with the breakpoint fix
  267. */
  268. /* to access physical memory, switch off the CR0.PG bit */
  269. if (x86_32->is_paging_enabled(t)) {
  270. retval = x86_32->disable_paging(t);
  271. if (retval != ERROR_OK) {
  272. LOG_ERROR("%s could not disable paging", __func__);
  273. return retval;
  274. }
  275. pg_disabled = true;
  276. }
  277. for (uint32_t i = 0; i < count; i++) {
  278. switch (size) {
  279. case BYTE:
  280. retval = write_mem(t, size, phys_address + i, buffer + i);
  281. break;
  282. case WORD:
  283. retval = write_mem(t, size, phys_address + i * 2, buffer + i * 2);
  284. break;
  285. case DWORD:
  286. retval = write_mem(t, size, phys_address + i * 4, buffer + i * 4);
  287. break;
  288. default:
  289. LOG_DEBUG("invalid read size");
  290. break;
  291. }
  292. }
  293. /* restore CR0.PG bit if needed (regardless of retval) */
  294. if (pg_disabled) {
  295. retval = x86_32->enable_paging(t);
  296. if (retval != ERROR_OK) {
  297. LOG_ERROR("%s could not enable paging", __func__);
  298. return retval;
  299. }
  300. }
  301. return retval;
  302. }
  303. static int read_mem(struct target *t, uint32_t size,
  304. uint32_t addr, uint8_t *buf)
  305. {
  306. struct x86_32_common *x86_32 = target_to_x86_32(t);
  307. /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
  308. bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
  309. int retval = x86_32->write_hw_reg(t, EAX, addr, 0);
  310. if (retval != ERROR_OK) {
  311. LOG_ERROR("%s error write EAX", __func__);
  312. return retval;
  313. }
  314. switch (size) {
  315. case BYTE:
  316. if (use32)
  317. retval = x86_32->submit_instruction(t, MEMRDB32);
  318. else
  319. retval = x86_32->submit_instruction(t, MEMRDB16);
  320. break;
  321. case WORD:
  322. if (use32)
  323. retval = x86_32->submit_instruction(t, MEMRDH32);
  324. else
  325. retval = x86_32->submit_instruction(t, MEMRDH16);
  326. break;
  327. case DWORD:
  328. if (use32)
  329. retval = x86_32->submit_instruction(t, MEMRDW32);
  330. else
  331. retval = x86_32->submit_instruction(t, MEMRDW16);
  332. break;
  333. default:
  334. LOG_ERROR("%s invalid read mem size", __func__);
  335. break;
  336. }
  337. if (retval != ERROR_OK)
  338. return retval;
  339. /* read_hw_reg() will write to 4 bytes (uint32_t)
  340. * Watch out, the buffer passed into read_mem() might be 1 or 2 bytes.
  341. */
  342. uint32_t regval;
  343. retval = x86_32->read_hw_reg(t, EDX, &regval, 0);
  344. if (retval != ERROR_OK) {
  345. LOG_ERROR("%s error read EDX", __func__);
  346. return retval;
  347. }
  348. for (uint8_t i = 0; i < size; i++)
  349. buf[i] = (regval >> (i*8)) & 0x000000FF;
  350. retval = x86_32->transaction_status(t);
  351. if (retval != ERROR_OK) {
  352. LOG_ERROR("%s error on mem read", __func__);
  353. return retval;
  354. }
  355. return retval;
  356. }
  357. static int write_mem(struct target *t, uint32_t size,
  358. uint32_t addr, const uint8_t *buf)
  359. {
  360. uint32_t i = 0;
  361. uint32_t buf4bytes = 0;
  362. int retval = ERROR_OK;
  363. struct x86_32_common *x86_32 = target_to_x86_32(t);
  364. for (i = 0; i < size; ++i) {
  365. buf4bytes = buf4bytes << 8; /* first time we only shift 0s */
  366. buf4bytes += buf[(size-1)-i]; /* it was hard to write, should be hard to read! */
  367. }
  368. /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
  369. bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
  370. retval = x86_32->write_hw_reg(t, EAX, addr, 0);
  371. if (retval != ERROR_OK) {
  372. LOG_ERROR("%s error write EAX", __func__);
  373. return retval;
  374. }
  375. /* write_hw_reg() will write to 4 bytes (uint32_t)
  376. * Watch out, the buffer passed into write_mem() might be 1 or 2 bytes.
  377. */
  378. retval = x86_32->write_hw_reg(t, EDX, buf4bytes, 0);
  379. if (retval != ERROR_OK) {
  380. LOG_ERROR("%s error write EDX", __func__);
  381. return retval;
  382. }
  383. switch (size) {
  384. case BYTE:
  385. if (use32)
  386. retval = x86_32->submit_instruction(t, MEMWRB32);
  387. else
  388. retval = x86_32->submit_instruction(t, MEMWRB16);
  389. break;
  390. case WORD:
  391. if (use32)
  392. retval = x86_32->submit_instruction(t, MEMWRH32);
  393. else
  394. retval = x86_32->submit_instruction(t, MEMWRH16);
  395. break;
  396. case DWORD:
  397. if (use32)
  398. retval = x86_32->submit_instruction(t, MEMWRW32);
  399. else
  400. retval = x86_32->submit_instruction(t, MEMWRW16);
  401. break;
  402. default:
  403. LOG_ERROR("%s invalid write mem size", __func__);
  404. return ERROR_FAIL;
  405. }
  406. if (retval != ERROR_OK)
  407. return retval;
  408. retval = x86_32->transaction_status(t);
  409. if (retval != ERROR_OK) {
  410. LOG_ERROR("%s error on mem write", __func__);
  411. return retval;
  412. }
  413. return retval;
  414. }
  415. int calcaddr_physfromlin(struct target *t, target_addr_t addr, target_addr_t *physaddr)
  416. {
  417. uint8_t entry_buffer[8];
  418. if (!physaddr || !t)
  419. return ERROR_FAIL;
  420. struct x86_32_common *x86_32 = target_to_x86_32(t);
  421. /* The 'user-visible' CR0.PG should be set - otherwise the function shouldn't be called
  422. * (Don't check the CR0.PG on the target, this might be temporally disabled at this point)
  423. */
  424. uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
  425. if (!(cr0 & CR0_PG)) {
  426. /* you are wrong in this function, never mind */
  427. *physaddr = addr;
  428. return ERROR_OK;
  429. }
  430. uint32_t cr4 = buf_get_u32(x86_32->cache->reg_list[CR4].value, 0, 32);
  431. bool is_pae = cr4 & 0x00000020; /* PAE - Physical Address Extension */
  432. uint32_t cr3 = buf_get_u32(x86_32->cache->reg_list[CR3].value, 0, 32);
  433. if (is_pae) {
  434. uint32_t pdpt_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
  435. uint32_t pdpt_index = (addr & 0xC0000000) >> 30; /* A[31:30] index to PDPT */
  436. uint32_t pdpt_addr = pdpt_base + (8 * pdpt_index);
  437. if (x86_32_common_read_phys_mem(t, pdpt_addr, 4, 2, entry_buffer) != ERROR_OK) {
  438. LOG_ERROR("%s couldn't read page directory pointer table entry at 0x%08" PRIx32,
  439. __func__, pdpt_addr);
  440. return ERROR_FAIL;
  441. }
  442. uint64_t pdpt_entry = target_buffer_get_u64(t, entry_buffer);
  443. if (!(pdpt_entry & 0x0000000000000001)) {
  444. LOG_ERROR("%s page directory pointer table entry at 0x%08" PRIx32 " is not present",
  445. __func__, pdpt_addr);
  446. return ERROR_FAIL;
  447. }
  448. uint32_t pd_base = pdpt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
  449. uint32_t pd_index = (addr & 0x3FE00000) >> 21; /* A[29:21] index to PD entry with PAE */
  450. uint32_t pd_addr = pd_base + (8 * pd_index);
  451. if (x86_32_common_read_phys_mem(t, pd_addr, 4, 2, entry_buffer) != ERROR_OK) {
  452. LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32,
  453. __func__, pd_addr);
  454. return ERROR_FAIL;
  455. }
  456. uint64_t pd_entry = target_buffer_get_u64(t, entry_buffer);
  457. if (!(pd_entry & 0x0000000000000001)) {
  458. LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present",
  459. __func__, pd_addr);
  460. return ERROR_FAIL;
  461. }
  462. /* PS bit in PD entry is indicating 4KB or 2MB page size */
  463. if (pd_entry & 0x0000000000000080) {
  464. uint32_t page_base = (uint32_t)(pd_entry & 0x00000000FFE00000); /* [31:21] */
  465. uint32_t offset = addr & 0x001FFFFF; /* [20:0] */
  466. *physaddr = page_base + offset;
  467. return ERROR_OK;
  468. } else {
  469. uint32_t pt_base = (uint32_t)(pd_entry & 0x00000000FFFFF000); /*[31:12]*/
  470. uint32_t pt_index = (addr & 0x001FF000) >> 12; /*[20:12]*/
  471. uint32_t pt_addr = pt_base + (8 * pt_index);
  472. if (x86_32_common_read_phys_mem(t, pt_addr, 4, 2, entry_buffer) != ERROR_OK) {
  473. LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
  474. return ERROR_FAIL;
  475. }
  476. uint64_t pt_entry = target_buffer_get_u64(t, entry_buffer);
  477. if (!(pt_entry & 0x0000000000000001)) {
  478. LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
  479. return ERROR_FAIL;
  480. }
  481. uint32_t page_base = (uint32_t)(pt_entry & 0x00000000FFFFF000); /*[31:12]*/
  482. uint32_t offset = addr & 0x00000FFF; /*[11:0]*/
  483. *physaddr = page_base + offset;
  484. return ERROR_OK;
  485. }
  486. } else {
  487. uint32_t pd_base = cr3 & 0xFFFFF000; /* lower 12 bits of CR3 must always be 0 */
  488. uint32_t pd_index = (addr & 0xFFC00000) >> 22; /* A[31:22] index to PD entry */
  489. uint32_t pd_addr = pd_base + (4 * pd_index);
  490. if (x86_32_common_read_phys_mem(t, pd_addr, 4, 1, entry_buffer) != ERROR_OK) {
  491. LOG_ERROR("%s couldn't read page directory entry at 0x%08" PRIx32, __func__, pd_addr);
  492. return ERROR_FAIL;
  493. }
  494. uint32_t pd_entry = target_buffer_get_u32(t, entry_buffer);
  495. if (!(pd_entry & 0x00000001)) {
  496. LOG_ERROR("%s page directory entry at 0x%08" PRIx32 " is not present", __func__, pd_addr);
  497. return ERROR_FAIL;
  498. }
  499. /* Bit 7 in page directory entry is page size.
  500. */
  501. if (pd_entry & 0x00000080) {
  502. /* 4MB pages */
  503. uint32_t page_base = pd_entry & 0xFFC00000;
  504. *physaddr = page_base + (addr & 0x003FFFFF);
  505. } else {
  506. /* 4KB pages */
  507. uint32_t pt_base = pd_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
  508. uint32_t pt_index = (addr & 0x003FF000) >> 12; /* A[21:12] index to page table entry */
  509. uint32_t pt_addr = pt_base + (4 * pt_index);
  510. if (x86_32_common_read_phys_mem(t, pt_addr, 4, 1, entry_buffer) != ERROR_OK) {
  511. LOG_ERROR("%s couldn't read page table entry at 0x%08" PRIx32, __func__, pt_addr);
  512. return ERROR_FAIL;
  513. }
  514. uint32_t pt_entry = target_buffer_get_u32(t, entry_buffer);
  515. if (!(pt_entry & 0x00000001)) {
  516. LOG_ERROR("%s page table entry at 0x%08" PRIx32 " is not present", __func__, pt_addr);
  517. return ERROR_FAIL;
  518. }
  519. uint32_t page_base = pt_entry & 0xFFFFF000; /* A[31:12] is PageTable/Page Base Address */
  520. *physaddr = page_base + (addr & 0x00000FFF); /* A[11:0] offset to 4KB page in linear address */
  521. }
  522. }
  523. return ERROR_OK;
  524. }
  525. int x86_32_common_read_memory(struct target *t, target_addr_t addr,
  526. uint32_t size, uint32_t count, uint8_t *buf)
  527. {
  528. int retval = ERROR_OK;
  529. struct x86_32_common *x86_32 = target_to_x86_32(t);
  530. LOG_DEBUG("addr=" TARGET_ADDR_FMT ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
  531. addr, size, count, buf);
  532. check_not_halted(t);
  533. if (!count || !buf || !addr) {
  534. LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
  535. __func__, count, buf, addr);
  536. return ERROR_COMMAND_ARGUMENT_INVALID;
  537. }
  538. if (x86_32->is_paging_enabled(t)) {
  539. /* all memory accesses from debugger must be physical (CR0.PG == 0)
  540. * conversion to physical address space needed
  541. */
  542. retval = x86_32->disable_paging(t);
  543. if (retval != ERROR_OK) {
  544. LOG_ERROR("%s could not disable paging", __func__);
  545. return retval;
  546. }
  547. target_addr_t physaddr = 0;
  548. if (calcaddr_physfromlin(t, addr, &physaddr) != ERROR_OK) {
  549. LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
  550. __func__, addr);
  551. retval = ERROR_FAIL;
  552. }
  553. /* TODO: !!! Watch out for page boundaries
  554. * for every 4kB, the physical address has to be re-calculated
  555. * This should be fixed together with bulk memory reads
  556. */
  557. if (retval == ERROR_OK
  558. && x86_32_common_read_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
  559. LOG_ERROR("%s failed to read memory from physical address " TARGET_ADDR_FMT,
  560. __func__, physaddr);
  561. }
  562. /* restore PG bit if it was cleared prior (regardless of retval) */
  563. retval = x86_32->enable_paging(t);
  564. if (retval != ERROR_OK) {
  565. LOG_ERROR("%s could not enable paging", __func__);
  566. return retval;
  567. }
  568. } else {
  569. /* paging is off - linear address is physical address */
  570. if (x86_32_common_read_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
  571. LOG_ERROR("%s failed to read memory from address " TARGET_ADDR_FMT,
  572. __func__, addr);
  573. retval = ERROR_FAIL;
  574. }
  575. }
  576. return retval;
  577. }
  578. int x86_32_common_write_memory(struct target *t, target_addr_t addr,
  579. uint32_t size, uint32_t count, const uint8_t *buf)
  580. {
  581. int retval = ERROR_OK;
  582. struct x86_32_common *x86_32 = target_to_x86_32(t);
  583. LOG_DEBUG("addr=" TARGET_ADDR_FMT ", size=%" PRIu32 ", count=0x%" PRIx32 ", buf=%p",
  584. addr, size, count, buf);
  585. check_not_halted(t);
  586. if (!count || !buf || !addr) {
  587. LOG_ERROR("%s invalid params count=0x%" PRIx32 ", buf=%p, addr=" TARGET_ADDR_FMT,
  588. __func__, count, buf, addr);
  589. return ERROR_COMMAND_ARGUMENT_INVALID;
  590. }
  591. if (x86_32->is_paging_enabled(t)) {
  592. /* all memory accesses from debugger must be physical (CR0.PG == 0)
  593. * conversion to physical address space needed
  594. */
  595. retval = x86_32->disable_paging(t);
  596. if (retval != ERROR_OK) {
  597. LOG_ERROR("%s could not disable paging", __func__);
  598. return retval;
  599. }
  600. target_addr_t physaddr = 0;
  601. if (calcaddr_physfromlin(t, addr, &physaddr) != ERROR_OK) {
  602. LOG_ERROR("%s failed to calculate physical address from " TARGET_ADDR_FMT,
  603. __func__, addr);
  604. retval = ERROR_FAIL;
  605. }
  606. /* TODO: !!! Watch out for page boundaries
  607. * for every 4kB, the physical address has to be re-calculated
  608. * This should be fixed together with bulk memory reads
  609. */
  610. if (retval == ERROR_OK
  611. && x86_32_common_write_phys_mem(t, physaddr, size, count, buf) != ERROR_OK) {
  612. LOG_ERROR("%s failed to write memory to physical address " TARGET_ADDR_FMT,
  613. __func__, physaddr);
  614. }
  615. /* restore PG bit if it was cleared prior (regardless of retval) */
  616. retval = x86_32->enable_paging(t);
  617. if (retval != ERROR_OK) {
  618. LOG_ERROR("%s could not enable paging", __func__);
  619. return retval;
  620. }
  621. } else {
  622. /* paging is off - linear address is physical address */
  623. if (x86_32_common_write_phys_mem(t, addr, size, count, buf) != ERROR_OK) {
  624. LOG_ERROR("%s failed to write memory to address " TARGET_ADDR_FMT,
  625. __func__, addr);
  626. retval = ERROR_FAIL;
  627. }
  628. }
  629. return retval;
  630. }
  631. int x86_32_common_read_io(struct target *t, uint32_t addr,
  632. uint32_t size, uint8_t *buf)
  633. {
  634. struct x86_32_common *x86_32 = target_to_x86_32(t);
  635. /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
  636. bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
  637. int retval = ERROR_FAIL;
  638. bool pg_disabled = false;
  639. LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", buf=%p", addr, size, buf);
  640. check_not_halted(t);
  641. if (!buf || !addr) {
  642. LOG_ERROR("%s invalid params buf=%p, addr=%08" PRIx32, __func__, buf, addr);
  643. return retval;
  644. }
  645. retval = x86_32->write_hw_reg(t, EDX, addr, 0);
  646. if (retval != ERROR_OK) {
  647. LOG_ERROR("%s error EDX write", __func__);
  648. return retval;
  649. }
  650. /* to access physical memory, switch off the CR0.PG bit */
  651. if (x86_32->is_paging_enabled(t)) {
  652. retval = x86_32->disable_paging(t);
  653. if (retval != ERROR_OK) {
  654. LOG_ERROR("%s could not disable paging", __func__);
  655. return retval;
  656. }
  657. pg_disabled = true;
  658. }
  659. switch (size) {
  660. case BYTE:
  661. if (use32)
  662. retval = x86_32->submit_instruction(t, IORDB32);
  663. else
  664. retval = x86_32->submit_instruction(t, IORDB16);
  665. break;
  666. case WORD:
  667. if (use32)
  668. retval = x86_32->submit_instruction(t, IORDH32);
  669. else
  670. retval = x86_32->submit_instruction(t, IORDH16);
  671. break;
  672. case DWORD:
  673. if (use32)
  674. retval = x86_32->submit_instruction(t, IORDW32);
  675. else
  676. retval = x86_32->submit_instruction(t, IORDW16);
  677. break;
  678. default:
  679. LOG_ERROR("%s invalid read io size", __func__);
  680. return ERROR_FAIL;
  681. }
  682. /* restore CR0.PG bit if needed */
  683. if (pg_disabled) {
  684. int retval2 = x86_32->enable_paging(t);
  685. if (retval2 != ERROR_OK) {
  686. LOG_ERROR("%s could not enable paging", __func__);
  687. return retval2;
  688. }
  689. }
  690. if (retval != ERROR_OK)
  691. return retval;
  692. uint32_t regval = 0;
  693. retval = x86_32->read_hw_reg(t, EAX, &regval, 0);
  694. if (retval != ERROR_OK) {
  695. LOG_ERROR("%s error on read EAX", __func__);
  696. return retval;
  697. }
  698. for (uint8_t i = 0; i < size; i++)
  699. buf[i] = (regval >> (i*8)) & 0x000000FF;
  700. retval = x86_32->transaction_status(t);
  701. if (retval != ERROR_OK) {
  702. LOG_ERROR("%s error on io read", __func__);
  703. return retval;
  704. }
  705. return retval;
  706. }
  707. int x86_32_common_write_io(struct target *t, uint32_t addr,
  708. uint32_t size, const uint8_t *buf)
  709. {
  710. struct x86_32_common *x86_32 = target_to_x86_32(t);
  711. /* if CS.D bit=1 then its a 32 bit code segment, else 16 */
  712. bool use32 = (buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32)) & CSAR_D;
  713. LOG_DEBUG("addr=0x%08" PRIx32 ", size=%" PRIu32 ", buf=%p", addr, size, buf);
  714. check_not_halted(t);
  715. int retval = ERROR_FAIL;
  716. bool pg_disabled = false;
  717. if (!buf || !addr) {
  718. LOG_ERROR("%s invalid params buf=%p, addr=0x%08" PRIx32, __func__, buf, addr);
  719. return retval;
  720. }
  721. /* no do the write */
  722. retval = x86_32->write_hw_reg(t, EDX, addr, 0);
  723. if (retval != ERROR_OK) {
  724. LOG_ERROR("%s error on EDX write", __func__);
  725. return retval;
  726. }
  727. uint32_t regval = 0;
  728. for (uint8_t i = 0; i < size; i++)
  729. regval += (buf[i] << (i*8));
  730. retval = x86_32->write_hw_reg(t, EAX, regval, 0);
  731. if (retval != ERROR_OK) {
  732. LOG_ERROR("%s error on EAX write", __func__);
  733. return retval;
  734. }
  735. /* to access physical memory, switch off the CR0.PG bit */
  736. if (x86_32->is_paging_enabled(t)) {
  737. retval = x86_32->disable_paging(t);
  738. if (retval != ERROR_OK) {
  739. LOG_ERROR("%s could not disable paging", __func__);
  740. return retval;
  741. }
  742. pg_disabled = true;
  743. }
  744. switch (size) {
  745. case BYTE:
  746. if (use32)
  747. retval = x86_32->submit_instruction(t, IOWRB32);
  748. else
  749. retval = x86_32->submit_instruction(t, IOWRB16);
  750. break;
  751. case WORD:
  752. if (use32)
  753. retval = x86_32->submit_instruction(t, IOWRH32);
  754. else
  755. retval = x86_32->submit_instruction(t, IOWRH16);
  756. break;
  757. case DWORD:
  758. if (use32)
  759. retval = x86_32->submit_instruction(t, IOWRW32);
  760. else
  761. retval = x86_32->submit_instruction(t, IOWRW16);
  762. break;
  763. default:
  764. LOG_ERROR("%s invalid write io size", __func__);
  765. return ERROR_FAIL;
  766. }
  767. /* restore CR0.PG bit if needed */
  768. if (pg_disabled) {
  769. int retval2 = x86_32->enable_paging(t);
  770. if (retval2 != ERROR_OK) {
  771. LOG_ERROR("%s could not enable paging", __func__);
  772. return retval2;
  773. }
  774. }
  775. if (retval != ERROR_OK)
  776. return retval;
  777. retval = x86_32->transaction_status(t);
  778. if (retval != ERROR_OK) {
  779. LOG_ERROR("%s error on io write", __func__);
  780. return retval;
  781. }
  782. return retval;
  783. }
  784. int x86_32_common_add_watchpoint(struct target *t, struct watchpoint *wp)
  785. {
  786. check_not_halted(t);
  787. /* set_watchpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
  788. * hardware registers are gone
  789. */
  790. return set_watchpoint(t, wp);
  791. }
  792. int x86_32_common_remove_watchpoint(struct target *t, struct watchpoint *wp)
  793. {
  794. if (check_not_halted(t))
  795. return ERROR_TARGET_NOT_HALTED;
  796. if (wp->set)
  797. unset_watchpoint(t, wp);
  798. return ERROR_OK;
  799. }
  800. int x86_32_common_add_breakpoint(struct target *t, struct breakpoint *bp)
  801. {
  802. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
  803. if (check_not_halted(t))
  804. return ERROR_TARGET_NOT_HALTED;
  805. /* set_breakpoint() will return ERROR_TARGET_RESOURCE_NOT_AVAILABLE if all
  806. * hardware registers are gone (for hardware breakpoints)
  807. */
  808. return set_breakpoint(t, bp);
  809. }
  810. int x86_32_common_remove_breakpoint(struct target *t, struct breakpoint *bp)
  811. {
  812. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
  813. if (check_not_halted(t))
  814. return ERROR_TARGET_NOT_HALTED;
  815. if (bp->set)
  816. unset_breakpoint(t, bp);
  817. return ERROR_OK;
  818. }
  819. static int set_debug_regs(struct target *t, uint32_t address,
  820. uint8_t bp_num, uint8_t bp_type, uint8_t bp_length)
  821. {
  822. struct x86_32_common *x86_32 = target_to_x86_32(t);
  823. LOG_DEBUG("addr=0x%08" PRIx32 ", bp_num=%" PRIu8 ", bp_type=%" PRIu8 ", pb_length=%" PRIu8,
  824. address, bp_num, bp_type, bp_length);
  825. /* DR7 - set global enable */
  826. uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
  827. if (bp_length != 1 && bp_length != 2 && bp_length != 4)
  828. return ERROR_FAIL;
  829. if (DR7_BP_FREE(dr7, bp_num))
  830. DR7_GLOBAL_ENABLE(dr7, bp_num);
  831. else {
  832. LOG_ERROR("%s dr7 error, already enabled, val=%08" PRIx32, __func__, dr7);
  833. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  834. }
  835. switch (bp_type) {
  836. case 0:
  837. /* 00 - only on instruction execution */
  838. DR7_SET_EXE(dr7, bp_num);
  839. DR7_SET_LENGTH(dr7, bp_num, bp_length);
  840. break;
  841. case 1:
  842. /* 01 - only on data writes */
  843. DR7_SET_WRITE(dr7, bp_num);
  844. DR7_SET_LENGTH(dr7, bp_num, bp_length);
  845. break;
  846. case 2:
  847. /* 10 UNSUPPORTED - an I/O read and I/O write */
  848. LOG_ERROR("%s unsupported feature bp_type=%d", __func__, bp_type);
  849. return ERROR_FAIL;
  850. break;
  851. case 3:
  852. /* on data read or data write */
  853. DR7_SET_ACCESS(dr7, bp_num);
  854. DR7_SET_LENGTH(dr7, bp_num, bp_length);
  855. break;
  856. default:
  857. LOG_ERROR("%s invalid request [only 0-3] bp_type=%d", __func__, bp_type);
  858. return ERROR_FAIL;
  859. }
  860. /* update regs in the reg cache ready to be written to hardware
  861. * when we exit PM
  862. */
  863. buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, address);
  864. x86_32->cache->reg_list[bp_num+DR0].dirty = true;
  865. x86_32->cache->reg_list[bp_num+DR0].valid = true;
  866. buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
  867. x86_32->cache->reg_list[DR6].dirty = true;
  868. x86_32->cache->reg_list[DR6].valid = true;
  869. buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
  870. x86_32->cache->reg_list[DR7].dirty = true;
  871. x86_32->cache->reg_list[DR7].valid = true;
  872. return ERROR_OK;
  873. }
  874. static int unset_debug_regs(struct target *t, uint8_t bp_num)
  875. {
  876. struct x86_32_common *x86_32 = target_to_x86_32(t);
  877. LOG_DEBUG("bp_num=%" PRIu8, bp_num);
  878. uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
  879. if (!(DR7_BP_FREE(dr7, bp_num))) {
  880. DR7_GLOBAL_DISABLE(dr7, bp_num);
  881. } else {
  882. LOG_ERROR("%s dr7 error, not enabled, val=0x%08" PRIx32, __func__, dr7);
  883. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  884. }
  885. /* this will clear rw and len bits */
  886. DR7_RESET_RWLEN_BITS(dr7, bp_num);
  887. /* update regs in the reg cache ready to be written to hardware
  888. * when we exit PM
  889. */
  890. buf_set_u32(x86_32->cache->reg_list[bp_num+DR0].value, 0, 32, 0);
  891. x86_32->cache->reg_list[bp_num+DR0].dirty = true;
  892. x86_32->cache->reg_list[bp_num+DR0].valid = true;
  893. buf_set_u32(x86_32->cache->reg_list[DR6].value, 0, 32, PM_DR6);
  894. x86_32->cache->reg_list[DR6].dirty = true;
  895. x86_32->cache->reg_list[DR6].valid = true;
  896. buf_set_u32(x86_32->cache->reg_list[DR7].value, 0, 32, dr7);
  897. x86_32->cache->reg_list[DR7].dirty = true;
  898. x86_32->cache->reg_list[DR7].valid = true;
  899. return ERROR_OK;
  900. }
  901. static int set_hwbp(struct target *t, struct breakpoint *bp)
  902. {
  903. struct x86_32_common *x86_32 = target_to_x86_32(t);
  904. struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
  905. uint8_t hwbp_num = 0;
  906. while (debug_reg_list[hwbp_num].used && (hwbp_num < x86_32->num_hw_bpoints))
  907. hwbp_num++;
  908. if (hwbp_num >= x86_32->num_hw_bpoints) {
  909. LOG_ERROR("%s no free hw breakpoint bpid=0x%" PRIx32, __func__, bp->unique_id);
  910. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  911. }
  912. if (set_debug_regs(t, bp->address, hwbp_num, DR7_BP_EXECUTE, 1) != ERROR_OK)
  913. return ERROR_FAIL;
  914. bp->set = hwbp_num + 1;
  915. debug_reg_list[hwbp_num].used = 1;
  916. debug_reg_list[hwbp_num].bp_value = bp->address;
  917. LOG_USER("%s hardware breakpoint %" PRIu32 " set at 0x%08" PRIx32 " (hwreg=%" PRIu8 ")", __func__,
  918. bp->unique_id, debug_reg_list[hwbp_num].bp_value, hwbp_num);
  919. return ERROR_OK;
  920. }
  921. static int unset_hwbp(struct target *t, struct breakpoint *bp)
  922. {
  923. struct x86_32_common *x86_32 = target_to_x86_32(t);
  924. struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
  925. int hwbp_num = bp->set - 1;
  926. if ((hwbp_num < 0) || (hwbp_num >= x86_32->num_hw_bpoints)) {
  927. LOG_ERROR("%s invalid breakpoint number=%d, bpid=%" PRIu32,
  928. __func__, hwbp_num, bp->unique_id);
  929. return ERROR_OK;
  930. }
  931. if (unset_debug_regs(t, hwbp_num) != ERROR_OK)
  932. return ERROR_FAIL;
  933. debug_reg_list[hwbp_num].used = 0;
  934. debug_reg_list[hwbp_num].bp_value = 0;
  935. LOG_USER("%s hardware breakpoint %" PRIu32 " removed from " TARGET_ADDR_FMT " (hwreg=%d)",
  936. __func__, bp->unique_id, bp->address, hwbp_num);
  937. return ERROR_OK;
  938. }
  939. static int set_swbp(struct target *t, struct breakpoint *bp)
  940. {
  941. struct x86_32_common *x86_32 = target_to_x86_32(t);
  942. LOG_DEBUG("id %" PRIx32, bp->unique_id);
  943. target_addr_t physaddr;
  944. uint8_t opcode = SW_BP_OPCODE;
  945. uint8_t readback;
  946. if (calcaddr_physfromlin(t, bp->address, &physaddr) != ERROR_OK)
  947. return ERROR_FAIL;
  948. if (read_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
  949. return ERROR_FAIL;
  950. LOG_DEBUG("set software breakpoint - orig byte=0x%02" PRIx8 "", *bp->orig_instr);
  951. /* just write the instruction trap byte */
  952. if (write_phys_mem(t, physaddr, 1, 1, &opcode))
  953. return ERROR_FAIL;
  954. /* verify that this is not invalid/read-only memory */
  955. if (read_phys_mem(t, physaddr, 1, 1, &readback))
  956. return ERROR_FAIL;
  957. if (readback != SW_BP_OPCODE) {
  958. LOG_ERROR("%s software breakpoint error at " TARGET_ADDR_FMT ", check memory",
  959. __func__, bp->address);
  960. LOG_ERROR("%s readback=0x%02" PRIx8 " orig=0x%02" PRIx8 "",
  961. __func__, readback, *bp->orig_instr);
  962. return ERROR_FAIL;
  963. }
  964. bp->set = SW_BP_OPCODE; /* just non 0 */
  965. /* add the memory patch */
  966. struct swbp_mem_patch *new_patch = malloc(sizeof(struct swbp_mem_patch));
  967. if (!new_patch) {
  968. LOG_ERROR("%s out of memory", __func__);
  969. return ERROR_FAIL;
  970. }
  971. new_patch->next = NULL;
  972. new_patch->orig_byte = *bp->orig_instr;
  973. new_patch->physaddr = physaddr;
  974. new_patch->swbp_unique_id = bp->unique_id;
  975. struct swbp_mem_patch *addto = x86_32->swbbp_mem_patch_list;
  976. if (!addto)
  977. x86_32->swbbp_mem_patch_list = new_patch;
  978. else {
  979. while (addto->next)
  980. addto = addto->next;
  981. addto->next = new_patch;
  982. }
  983. LOG_USER("%s software breakpoint %" PRIu32 " set at " TARGET_ADDR_FMT,
  984. __func__, bp->unique_id, bp->address);
  985. return ERROR_OK;
  986. }
  987. static int unset_swbp(struct target *t, struct breakpoint *bp)
  988. {
  989. struct x86_32_common *x86_32 = target_to_x86_32(t);
  990. LOG_DEBUG("id %" PRIx32, bp->unique_id);
  991. target_addr_t physaddr;
  992. uint8_t current_instr;
  993. /* check that user program has not modified breakpoint instruction */
  994. if (calcaddr_physfromlin(t, bp->address, &physaddr) != ERROR_OK)
  995. return ERROR_FAIL;
  996. if (read_phys_mem(t, physaddr, 1, 1, &current_instr))
  997. return ERROR_FAIL;
  998. if (current_instr == SW_BP_OPCODE) {
  999. if (write_phys_mem(t, physaddr, 1, 1, bp->orig_instr))
  1000. return ERROR_FAIL;
  1001. } else {
  1002. LOG_ERROR("%s software breakpoint remove error at " TARGET_ADDR_FMT ", check memory",
  1003. __func__, bp->address);
  1004. LOG_ERROR("%s current=0x%02" PRIx8 " orig=0x%02" PRIx8 "",
  1005. __func__, current_instr, *bp->orig_instr);
  1006. return ERROR_FAIL;
  1007. }
  1008. /* remove from patch */
  1009. struct swbp_mem_patch *iter = x86_32->swbbp_mem_patch_list;
  1010. if (iter) {
  1011. if (iter->swbp_unique_id == bp->unique_id) {
  1012. /* it's the first item */
  1013. x86_32->swbbp_mem_patch_list = iter->next;
  1014. free(iter);
  1015. } else {
  1016. while (iter->next && iter->next->swbp_unique_id != bp->unique_id)
  1017. iter = iter->next;
  1018. if (iter->next) {
  1019. /* it's the next one */
  1020. struct swbp_mem_patch *freeme = iter->next;
  1021. iter->next = iter->next->next;
  1022. free(freeme);
  1023. }
  1024. }
  1025. }
  1026. LOG_USER("%s software breakpoint %" PRIu32 " removed from " TARGET_ADDR_FMT,
  1027. __func__, bp->unique_id, bp->address);
  1028. return ERROR_OK;
  1029. }
  1030. static int set_breakpoint(struct target *t, struct breakpoint *bp)
  1031. {
  1032. int error = ERROR_OK;
  1033. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1034. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
  1035. if (bp->set) {
  1036. LOG_ERROR("breakpoint already set");
  1037. return error;
  1038. }
  1039. if (bp->type == BKPT_HARD) {
  1040. error = set_hwbp(t, bp);
  1041. if (error != ERROR_OK) {
  1042. LOG_ERROR("%s error setting hardware breakpoint at " TARGET_ADDR_FMT,
  1043. __func__, bp->address);
  1044. return error;
  1045. }
  1046. } else {
  1047. if (x86_32->sw_bpts_supported(t)) {
  1048. error = set_swbp(t, bp);
  1049. if (error != ERROR_OK) {
  1050. LOG_ERROR("%s error setting software breakpoint at " TARGET_ADDR_FMT,
  1051. __func__, bp->address);
  1052. return error;
  1053. }
  1054. } else {
  1055. LOG_ERROR("%s core doesn't support SW breakpoints", __func__);
  1056. return ERROR_FAIL;
  1057. }
  1058. }
  1059. return error;
  1060. }
  1061. static int unset_breakpoint(struct target *t, struct breakpoint *bp)
  1062. {
  1063. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, bp->type, bp->address);
  1064. if (!bp->set) {
  1065. LOG_WARNING("breakpoint not set");
  1066. return ERROR_OK;
  1067. }
  1068. if (bp->type == BKPT_HARD) {
  1069. if (unset_hwbp(t, bp) != ERROR_OK) {
  1070. LOG_ERROR("%s error removing hardware breakpoint at " TARGET_ADDR_FMT,
  1071. __func__, bp->address);
  1072. return ERROR_FAIL;
  1073. }
  1074. } else {
  1075. if (unset_swbp(t, bp) != ERROR_OK) {
  1076. LOG_ERROR("%s error removing software breakpoint at " TARGET_ADDR_FMT,
  1077. __func__, bp->address);
  1078. return ERROR_FAIL;
  1079. }
  1080. }
  1081. bp->set = 0;
  1082. return ERROR_OK;
  1083. }
  1084. static int set_watchpoint(struct target *t, struct watchpoint *wp)
  1085. {
  1086. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1087. struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
  1088. int wp_num = 0;
  1089. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, wp->rw, wp->address);
  1090. if (wp->set) {
  1091. LOG_ERROR("%s watchpoint already set", __func__);
  1092. return ERROR_OK;
  1093. }
  1094. if (wp->rw == WPT_READ) {
  1095. LOG_ERROR("%s no support for 'read' watchpoints, use 'access' or 'write'"
  1096. , __func__);
  1097. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1098. }
  1099. while (debug_reg_list[wp_num].used && (wp_num < x86_32->num_hw_bpoints))
  1100. wp_num++;
  1101. if (wp_num >= x86_32->num_hw_bpoints) {
  1102. LOG_ERROR("%s no debug registers left", __func__);
  1103. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1104. }
  1105. if (wp->length != 4 && wp->length != 2 && wp->length != 1) {
  1106. LOG_ERROR("%s only watchpoints of length 1, 2 or 4 are supported", __func__);
  1107. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1108. }
  1109. switch (wp->rw) {
  1110. case WPT_WRITE:
  1111. if (set_debug_regs(t, wp->address, wp_num,
  1112. DR7_BP_WRITE, wp->length) != ERROR_OK) {
  1113. return ERROR_FAIL;
  1114. }
  1115. break;
  1116. case WPT_ACCESS:
  1117. if (set_debug_regs(t, wp->address, wp_num, DR7_BP_READWRITE,
  1118. wp->length) != ERROR_OK) {
  1119. return ERROR_FAIL;
  1120. }
  1121. break;
  1122. default:
  1123. LOG_ERROR("%s only 'access' or 'write' watchpoints are supported", __func__);
  1124. break;
  1125. }
  1126. wp->set = wp_num + 1;
  1127. debug_reg_list[wp_num].used = 1;
  1128. debug_reg_list[wp_num].bp_value = wp->address;
  1129. LOG_USER("'%s' watchpoint %d set at " TARGET_ADDR_FMT " with length %" PRIu32 " (hwreg=%d)",
  1130. wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
  1131. "write" : wp->rw == WPT_ACCESS ? "access" : "?",
  1132. wp->unique_id, wp->address, wp->length, wp_num);
  1133. return ERROR_OK;
  1134. }
  1135. static int unset_watchpoint(struct target *t, struct watchpoint *wp)
  1136. {
  1137. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1138. struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
  1139. LOG_DEBUG("type=%d, addr=" TARGET_ADDR_FMT, wp->rw, wp->address);
  1140. if (!wp->set) {
  1141. LOG_WARNING("watchpoint not set");
  1142. return ERROR_OK;
  1143. }
  1144. int wp_num = wp->set - 1;
  1145. if ((wp_num < 0) || (wp_num >= x86_32->num_hw_bpoints)) {
  1146. LOG_DEBUG("Invalid FP Comparator number in watchpoint");
  1147. return ERROR_OK;
  1148. }
  1149. if (unset_debug_regs(t, wp_num) != ERROR_OK)
  1150. return ERROR_FAIL;
  1151. debug_reg_list[wp_num].used = 0;
  1152. debug_reg_list[wp_num].bp_value = 0;
  1153. wp->set = 0;
  1154. LOG_USER("'%s' watchpoint %d removed from " TARGET_ADDR_FMT " with length %" PRIu32 " (hwreg=%d)",
  1155. wp->rw == WPT_READ ? "read" : wp->rw == WPT_WRITE ?
  1156. "write" : wp->rw == WPT_ACCESS ? "access" : "?",
  1157. wp->unique_id, wp->address, wp->length, wp_num);
  1158. return ERROR_OK;
  1159. }
  1160. /* after reset breakpoints and watchpoints in memory are not valid anymore and
  1161. * debug registers are cleared.
  1162. * we can't afford to remove sw breakpoints using the default methods as the
  1163. * memory doesn't have the same layout yet and an access might crash the target,
  1164. * so we just clear the openocd breakpoints structures.
  1165. */
  1166. void x86_32_common_reset_breakpoints_watchpoints(struct target *t)
  1167. {
  1168. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1169. struct x86_32_dbg_reg *debug_reg_list = x86_32->hw_break_list;
  1170. struct breakpoint *next_b;
  1171. struct watchpoint *next_w;
  1172. while (t->breakpoints) {
  1173. next_b = t->breakpoints->next;
  1174. free(t->breakpoints->orig_instr);
  1175. free(t->breakpoints);
  1176. t->breakpoints = next_b;
  1177. }
  1178. while (t->watchpoints) {
  1179. next_w = t->watchpoints->next;
  1180. free(t->watchpoints);
  1181. t->watchpoints = next_w;
  1182. }
  1183. for (int i = 0; i < x86_32->num_hw_bpoints; i++) {
  1184. debug_reg_list[i].used = 0;
  1185. debug_reg_list[i].bp_value = 0;
  1186. }
  1187. }
  1188. static int read_hw_reg_to_cache(struct target *t, int num)
  1189. {
  1190. uint32_t reg_value;
  1191. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1192. if (check_not_halted(t))
  1193. return ERROR_TARGET_NOT_HALTED;
  1194. if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
  1195. return ERROR_COMMAND_SYNTAX_ERROR;
  1196. if (x86_32->read_hw_reg(t, num, &reg_value, 1) != ERROR_OK) {
  1197. LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
  1198. return ERROR_FAIL;
  1199. }
  1200. LOG_DEBUG("reg %s value 0x%08" PRIx32,
  1201. x86_32->cache->reg_list[num].name, reg_value);
  1202. return ERROR_OK;
  1203. }
  1204. static int write_hw_reg_from_cache(struct target *t, int num)
  1205. {
  1206. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1207. if (check_not_halted(t))
  1208. return ERROR_TARGET_NOT_HALTED;
  1209. if ((num < 0) || (num >= x86_32->get_num_user_regs(t)))
  1210. return ERROR_COMMAND_SYNTAX_ERROR;
  1211. if (x86_32->write_hw_reg(t, num, 0, 1) != ERROR_OK) {
  1212. LOG_ERROR("%s fail for %s", x86_32->cache->reg_list[num].name, __func__);
  1213. return ERROR_FAIL;
  1214. }
  1215. LOG_DEBUG("reg %s value 0x%08" PRIx32, x86_32->cache->reg_list[num].name,
  1216. buf_get_u32(x86_32->cache->reg_list[num].value, 0, 32));
  1217. return ERROR_OK;
  1218. }
  1219. /* x86 32 commands */
  1220. static void handle_iod_output(struct command_invocation *cmd,
  1221. struct target *target, uint32_t address, unsigned size,
  1222. unsigned count, const uint8_t *buffer)
  1223. {
  1224. const unsigned line_bytecnt = 32;
  1225. unsigned line_modulo = line_bytecnt / size;
  1226. char output[line_bytecnt * 4 + 1];
  1227. unsigned output_len = 0;
  1228. const char *value_fmt;
  1229. switch (size) {
  1230. case 4:
  1231. value_fmt = "%8.8x ";
  1232. break;
  1233. case 2:
  1234. value_fmt = "%4.4x ";
  1235. break;
  1236. case 1:
  1237. value_fmt = "%2.2x ";
  1238. break;
  1239. default:
  1240. /* "can't happen", caller checked */
  1241. LOG_ERROR("%s invalid memory read size: %u", __func__, size);
  1242. return;
  1243. }
  1244. for (unsigned i = 0; i < count; i++) {
  1245. if (i % line_modulo == 0) {
  1246. output_len += snprintf(output + output_len,
  1247. sizeof(output) - output_len,
  1248. "0x%8.8x: ",
  1249. (unsigned)(address + (i*size)));
  1250. }
  1251. uint32_t value = 0;
  1252. const uint8_t *value_ptr = buffer + i * size;
  1253. switch (size) {
  1254. case 4:
  1255. value = target_buffer_get_u32(target, value_ptr);
  1256. break;
  1257. case 2:
  1258. value = target_buffer_get_u16(target, value_ptr);
  1259. break;
  1260. case 1:
  1261. value = *value_ptr;
  1262. }
  1263. output_len += snprintf(output + output_len,
  1264. sizeof(output) - output_len,
  1265. value_fmt, value);
  1266. if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
  1267. command_print(cmd, "%s", output);
  1268. output_len = 0;
  1269. }
  1270. }
  1271. }
  1272. COMMAND_HANDLER(handle_iod_command)
  1273. {
  1274. if (CMD_ARGC != 1)
  1275. return ERROR_COMMAND_SYNTAX_ERROR;
  1276. uint32_t address;
  1277. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
  1278. if (address > 0xffff) {
  1279. LOG_ERROR("%s IA-32 I/O space is 2^16, 0x%08" PRIx32 " exceeds max", __func__, address);
  1280. return ERROR_COMMAND_SYNTAX_ERROR;
  1281. }
  1282. unsigned size = 0;
  1283. switch (CMD_NAME[2]) {
  1284. case 'w':
  1285. size = 4;
  1286. break;
  1287. case 'h':
  1288. size = 2;
  1289. break;
  1290. case 'b':
  1291. size = 1;
  1292. break;
  1293. default:
  1294. return ERROR_COMMAND_SYNTAX_ERROR;
  1295. }
  1296. unsigned count = 1;
  1297. uint8_t *buffer = calloc(count, size);
  1298. struct target *target = get_current_target(CMD_CTX);
  1299. int retval = x86_32_common_read_io(target, address, size, buffer);
  1300. if (retval == ERROR_OK)
  1301. handle_iod_output(CMD, target, address, size, count, buffer);
  1302. free(buffer);
  1303. return retval;
  1304. }
  1305. static int target_fill_io(struct target *target,
  1306. uint32_t address,
  1307. unsigned data_size,
  1308. /* value */
  1309. uint32_t b)
  1310. {
  1311. LOG_DEBUG("address=0x%08" PRIx32 ", data_size=%u, b=0x%08" PRIx32,
  1312. address, data_size, b);
  1313. uint8_t target_buf[data_size];
  1314. switch (data_size) {
  1315. case 4:
  1316. target_buffer_set_u32(target, target_buf, b);
  1317. break;
  1318. case 2:
  1319. target_buffer_set_u16(target, target_buf, b);
  1320. break;
  1321. case 1:
  1322. target_buf[0] = (b & 0x0ff);
  1323. break;
  1324. default:
  1325. exit(-1);
  1326. }
  1327. return x86_32_common_write_io(target, address, data_size, target_buf);
  1328. }
  1329. COMMAND_HANDLER(handle_iow_command)
  1330. {
  1331. if (CMD_ARGC != 2)
  1332. return ERROR_COMMAND_SYNTAX_ERROR;
  1333. uint32_t address;
  1334. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
  1335. uint32_t value;
  1336. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
  1337. struct target *target = get_current_target(CMD_CTX);
  1338. unsigned wordsize;
  1339. switch (CMD_NAME[2]) {
  1340. case 'w':
  1341. wordsize = 4;
  1342. break;
  1343. case 'h':
  1344. wordsize = 2;
  1345. break;
  1346. case 'b':
  1347. wordsize = 1;
  1348. break;
  1349. default:
  1350. return ERROR_COMMAND_SYNTAX_ERROR;
  1351. }
  1352. return target_fill_io(target, address, wordsize, value);
  1353. }
  1354. static const struct command_registration x86_32_exec_command_handlers[] = {
  1355. {
  1356. .name = "iww",
  1357. .mode = COMMAND_EXEC,
  1358. .handler = handle_iow_command,
  1359. .help = "write I/O port word",
  1360. .usage = "port data[word]",
  1361. },
  1362. {
  1363. .name = "iwh",
  1364. .mode = COMMAND_EXEC,
  1365. .handler = handle_iow_command,
  1366. .help = "write I/O port halfword",
  1367. .usage = "port data[halfword]",
  1368. },
  1369. {
  1370. .name = "iwb",
  1371. .mode = COMMAND_EXEC,
  1372. .handler = handle_iow_command,
  1373. .help = "write I/O port byte",
  1374. .usage = "port data[byte]",
  1375. },
  1376. {
  1377. .name = "idw",
  1378. .mode = COMMAND_EXEC,
  1379. .handler = handle_iod_command,
  1380. .help = "display I/O port word",
  1381. .usage = "port",
  1382. },
  1383. {
  1384. .name = "idh",
  1385. .mode = COMMAND_EXEC,
  1386. .handler = handle_iod_command,
  1387. .help = "display I/O port halfword",
  1388. .usage = "port",
  1389. },
  1390. {
  1391. .name = "idb",
  1392. .mode = COMMAND_EXEC,
  1393. .handler = handle_iod_command,
  1394. .help = "display I/O port byte",
  1395. .usage = "port",
  1396. },
  1397. COMMAND_REGISTRATION_DONE
  1398. };
  1399. const struct command_registration x86_32_command_handlers[] = {
  1400. {
  1401. .name = "x86_32",
  1402. .mode = COMMAND_ANY,
  1403. .help = "x86_32 target commands",
  1404. .usage = "",
  1405. .chain = x86_32_exec_command_handlers,
  1406. },
  1407. COMMAND_REGISTRATION_DONE
  1408. };