Binary file is encrypting string by using a function on each char that produces int (as seen in sub_80488E0, sub_804868B).
This encryption is not chained so we can pass every character to binary, get them encrypted and use them as reference to decode out
file.
//////////////////////// | |
// USER DEFINED RULES // | |
//////////////////////// | |
// MFC/ATL strings | |
R9000 : { ptn : "CStringT<char,\\s*StrTraitMFC(_DLL)?<char,\\s*(ATL::)?ChTraits(CRT|OS)<char>>>", rpl : "CStringA" }, | |
R9001 : { ptn : "CStringT<wchar_t,\\s*StrTraitMFC(_DLL)?<wchar_t,\\s*(ATL::)?ChTraits(CRT|OS)<wchar_t>>>", rpl : "CStringW" }, | |
R9002 : { ptn : "CStringT<char,\\s*StrTraitATL(_DLL)?<char,\\s*(ATL::)?ChTraits(CRT|OS)<char>>>", rpl : "CAtlStringA" }, | |
R9003 : { ptn : "CStringT<wchar_t,\\s*StrTraitATL(_DLL)?<wchar_t,\\s*(ATL::)?ChTraits(CRT|OS)<wchar_t>>>", rpl : "CAtlStringW" }, |
#include <iostream> | |
int main() { | |
for (size_t i = 1; i < 0x100000000ull; ++i) { | |
bool ok = true; | |
bool was[32] = { false }; | |
for (int j = 1; j <= 32 && ok; ++j) { | |
auto x = (1ull << j) - 1; | |
auto res = static_cast<uint32_t>(x * i) >> 27; | |
ok &= !was[res]; |
#!/usr/bin/env python2 | |
from pwn import * | |
import os | |
class Room: | |
def __init__(self, data=None): | |
self.dimX = None | |
self.dimY = None | |
self.player = None | |
self.flag = None |
#include <cstdio> | |
#include <cstring> | |
#include <cstdlib> | |
#include <vector> | |
#include <string> | |
#include <iostream> | |
#include <iomanip> | |
uint64_t arr0[] = { | |
0xFA730603, 0xF8084C29, 0xF4290A55, 0xF17A02CD, |
#!/usr/bin/env python | |
import sys | |
import struct | |
from Crypto.PublicKey import RSA | |
from Crypto.Cipher import PKCS1_OAEP | |
from pwn import * | |
#!/usr/bin/env python | |
import sys | |
import struct | |
from Crypto.PublicKey import RSA | |
from Crypto.Cipher import PKCS1_OAEP | |
from pwn import * | |
import sys | |
import pprint | |
import struct | |
TABLE_SIZE = 4000 | |
table = [[]] | |
iterators = [] | |
locks = set() | |
def request(cur=0, path_diff=2**64): |
#!/usr/bin/env python2 | |
from pwn import * | |
from heapq import * | |
PRIMES = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251) | |
def gen_byte_generators(): | |
res = {} |
The binary is reading format strings one by one from provided file and prints them in /dev/null
.
This fprintf
receives a lot of parameters, which actually are 16 bytes of memory, 16 bytes of flag, and pointers to said bytes. That are 64 parameters in total. Because of using %hhn
specifiers, format strings can write to provided memory addresses, so we can perform additions with them easily.
Since given "virtual program" was pretty big, almost 3400 lines, I wrote a parser to make "virtual instructions" (format strings) more human-readable. For example, %2$*36$s%2$*41$s%4$hhn
becomes mem[3] = mem[3] + mem[8]
.
After parsing int human-readable form patterns in code became more obvious, so the next thing I wrote were two "optimizing" passes that folded additions in multiplications and then multiplications into one big sum.
Next, after parsing we have pretty simple program already. It is clear that flag is checked using a linear system, so we can use z3 to solve it easily.