Last active
July 6, 2016 15:42
-
-
Save sdamashek/abd1dc20d063528fa6db65f266011495 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From f1dd0199cd7bc2e3b5b579852d96d505f731cb7a Mon Sep 17 00:00:00 2001 | |
From: Samuel Damashek <[email protected]> | |
Date: Wed, 6 Jul 2016 11:41:52 -0400 | |
Subject: [PATCH] Fix for self-modifying writes across page boundaries. | |
As it currently stands, QEMU does not properly handle self-modifying code | |
when the write is unaligned and crosses a page boundary. The procedure | |
for handling a write to the current translation block is to write-protect | |
the current translation block, catch the write, split up the translation | |
block into the current instruction (which remains write-protected so that | |
the current instruction is not modified) and the remaining instructions | |
in the translation block, and then restore the CPU state to before the | |
write occurred so the write will be retried and successfully executed. | |
However, since unaligned writes across pages are split into one-byte | |
writes for simplicity, writes to the second page (which is not the | |
current TB) may succeed before a write to the current TB is attempted, | |
and since these writes are not invalidated before resuming state after | |
splitting the TB, these writes will be performed a second time, thus | |
corrupting the second page. Credit goes to Patrick Hulin for | |
discovering this. | |
In recent 64-bit versions of Windows running in emulated mode, this | |
results in either being very unstable (a BSOD after a couple minutes of | |
on-time), or being entirely unable to boot. Windows performs one or more | |
8-byte unaligned self-modifying writes (xors) which intersect the end | |
of the current TB and the beginning of the next TB, which runs into the | |
aforementioned issue. This commit fixes that issue by making the | |
unaligned write loop perform the writes in forwards order, instead of | |
reverse order. This way, QEMU immediately tries to write to the current | |
TB, and splits the TB before any write to the second page is executed. | |
The write then proceeds as intended. With this patch applied, I am able | |
to boot and use Windows 7 64-bit and Windows 10 64-bit in QEMU without | |
KVM. | |
Per Richard Henderson's input, this patch also ensures the second page | |
is in the TLB before executing the write loop, to ensure the second | |
page is mapped. | |
The VICTIM_TLB_HIT macro was also updated to accept a second argument, | |
the name of the addr variable being used, so that I could add an | |
additional addr variable (addr_page2) instead of temporarily setting addr | |
to the second page's address. | |
More discussion of the issue and patch is located at | |
https://lists.nongnu.org/archive/html/qemu-devel/2014-08/msg02161.html. | |
Signed-off-by: Samuel Damashek <[email protected]> | |
--- | |
softmmu_template.h | 61 ++++++++++++++++++++++++++++++++++++++++-------------- | |
1 file changed, 46 insertions(+), 15 deletions(-) | |
diff --git a/softmmu_template.h b/softmmu_template.h | |
index 208f808..0d173a0 100644 | |
--- a/softmmu_template.h | |
+++ b/softmmu_template.h | |
@@ -117,7 +117,7 @@ | |
#endif | |
/* macro to check the victim tlb */ | |
-#define VICTIM_TLB_HIT(ty) \ | |
+#define VICTIM_TLB_HIT(ty, addr_nm) \ | |
({ \ | |
/* we are about to do a page table walk. our last hope is the \ | |
* victim tlb. try to refill from the victim tlb before walking the \ | |
@@ -126,7 +126,8 @@ | |
CPUIOTLBEntry tmpiotlb; \ | |
CPUTLBEntry tmptlb; \ | |
for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ | |
- if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ | |
+ if (env->tlb_v_table[mmu_idx][vidx].ty \ | |
+ == (addr_nm & TARGET_PAGE_MASK)) { \ | |
/* found entry in victim tlb, swap tlb and iotlb */ \ | |
tmptlb = env->tlb_table[mmu_idx][index]; \ | |
env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ | |
@@ -185,7 +186,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
mmu_idx, retaddr); | |
} | |
- if (!VICTIM_TLB_HIT(ADDR_READ)) { | |
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { | |
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
mmu_idx, retaddr); | |
} | |
@@ -269,7 +270,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
mmu_idx, retaddr); | |
} | |
- if (!VICTIM_TLB_HIT(ADDR_READ)) { | |
+ if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { | |
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, | |
mmu_idx, retaddr); | |
} | |
@@ -389,7 +390,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
mmu_idx, retaddr); | |
} | |
- if (!VICTIM_TLB_HIT(addr_write)) { | |
+ if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); | |
} | |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
@@ -414,16 +415,31 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, | |
if (DATA_SIZE > 1 | |
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
>= TARGET_PAGE_SIZE)) { | |
- int i; | |
+ int i, index_page2; | |
+ target_ulong addr_page2, tlb_addr_page2; | |
do_unaligned_access: | |
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
mmu_idx, retaddr); | |
} | |
+ | |
+ /* Ensure the second page is in the TLB. Note that the first page | |
+ * is already guaranteed to be filled into the TLB. */ | |
+ addr_page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; | |
+ index_page2 = (addr_page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
+ tlb_addr_page2 = env->tlb_table[mmu_idx][index_page2].addr_write; | |
+ | |
+ if (((addr_page2 & TARGET_PAGE_MASK) | |
+ != (tlb_addr_page2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) | |
+ && !VICTIM_TLB_HIT(addr_write, addr_page2)) { | |
+ tlb_fill(ENV_GET_CPU(env), addr_page2, MMU_DATA_STORE, | |
+ mmu_idx, retaddr); | |
+ } | |
+ | |
/* XXX: not efficient, but simple */ | |
- /* Note: relies on the fact that tlb_fill() does not remove the | |
- * previous page from the TLB cache. */ | |
- for (i = DATA_SIZE - 1; i >= 0; i--) { | |
+ /* This loop must go in the forward direction to avoid issues with | |
+ * self-modifying code in Windows 64-bit. */ | |
+ for (i = 0; i < DATA_SIZE; i++) { | |
/* Little-endian extract. */ | |
uint8_t val8 = val >> (i * 8); | |
/* Note the adjustment at the beginning of the function. | |
@@ -469,7 +485,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
mmu_idx, retaddr); | |
} | |
- if (!VICTIM_TLB_HIT(addr_write)) { | |
+ if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); | |
} | |
tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
@@ -494,16 +510,31 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, | |
if (DATA_SIZE > 1 | |
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 | |
>= TARGET_PAGE_SIZE)) { | |
- int i; | |
+ int i, index_page2; | |
+ target_ulong addr_page2, tlb_addr_page2; | |
do_unaligned_access: | |
if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) { | |
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, | |
mmu_idx, retaddr); | |
} | |
+ | |
+ /* Ensure the second page is in the TLB. Note that the first page | |
+ * is already guaranteed to be filled into the TLB. */ | |
+ addr_page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; | |
+ index_page2 = (addr_page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
+ tlb_addr_page2 = env->tlb_table[mmu_idx][index_page2].addr_write; | |
+ | |
+ if (((addr_page2 & TARGET_PAGE_MASK) | |
+ != (tlb_addr_page2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) | |
+ && !VICTIM_TLB_HIT(addr_write, addr_page2)) { | |
+ tlb_fill(ENV_GET_CPU(env), addr_page2, MMU_DATA_STORE, | |
+ mmu_idx, retaddr); | |
+ } | |
+ | |
/* XXX: not efficient, but simple */ | |
- /* Note: relies on the fact that tlb_fill() does not remove the | |
- * previous page from the TLB cache. */ | |
- for (i = DATA_SIZE - 1; i >= 0; i--) { | |
+ /* This loop must go in the forward direction to avoid issues with | |
+ * self-modifying code in Windows 64-bit. */ | |
+ for (i = 0; i < DATA_SIZE; i++) { | |
/* Big-endian extract. */ | |
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); | |
/* Note the adjustment at the beginning of the function. | |
@@ -542,7 +573,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, | |
if ((addr & TARGET_PAGE_MASK) | |
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
/* TLB entry is for a different page */ | |
- if (!VICTIM_TLB_HIT(addr_write)) { | |
+ if (!VICTIM_TLB_HIT(addr_write, addr)) { | |
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); | |
} | |
} | |
-- | |
2.9.0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment