Last active
August 17, 2024 20:56
-
-
Save Tetralux/08e58a61efbd6c1d7414409fa1656492 to your computer and use it in GitHub Desktop.
Allocator which maps each allocation to a separate page, and locks that page immediately on free
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package util | |
import "base:runtime" | |
import "base:intrinsics" | |
import "core:mem" | |
import "core:mem/virtual" | |
import "core:os" | |
import "core:fmt" | |
// Ensures that freeing or resizing renders the original pointer invalid, such that UAFs are guarenteed to crash. | |
// This is achieved by making a seperate allocation for each and every allocation request. | |
Strict_Allocator :: struct { | |
pointers: map[rawptr]^Block, // pointer -> size | |
frees: map[rawptr]runtime.Source_Code_Location, | |
} | |
strict_allocator :: proc(state: ^Strict_Allocator) -> runtime.Allocator { | |
return { | |
procedure = strict_allocator_proc, | |
data = state, | |
} | |
} | |
strict_allocator_init :: proc(state: ^Strict_Allocator, ally: mem.Allocator) { | |
state^ = {} | |
state.pointers.allocator = ally | |
state.frees.allocator = ally | |
} | |
strict_allocator_destroy :: proc(state: ^Strict_Allocator) { | |
free_all(strict_allocator(state)) | |
delete(state.pointers) | |
delete(state.frees) | |
} | |
page_size := i64(os.get_page_size()) | |
strict_allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode, | |
size, alignment: int, | |
old_memory: rawptr, old_size: int, loc := #caller_location) -> (memory: []byte, err: runtime.Allocator_Error) { | |
state := (^Strict_Allocator)(allocator_data) | |
// NOTE: On freeing, don't unmap the old memory pages; this means that it won't be remapped again. | |
// This will help to catch invalid accesses on long-dead memory, as accessing them will immediately crash. | |
switch mode { | |
case .Alloc, .Alloc_Non_Zeroed: | |
block := strict_allocator_alloc(state, size, loc) or_return | |
return block_user_data(block), nil | |
case .Free: | |
strict_allocator_free(state, old_memory, old_size, loc) | |
return nil, nil | |
case .Free_All: | |
for _, block in state.pointers { | |
if block == nil { continue } | |
block_destroy(block) | |
} | |
clear(&state.frees) | |
clear(&state.pointers) | |
case .Resize, .Resize_Non_Zeroed: | |
new_block := strict_allocator_alloc(state, size, loc) or_return | |
if old_memory != nil { | |
old_block, belongs_to_us := state.pointers[old_memory] | |
if belongs_to_us { | |
copy(block_user_data(new_block), block_user_data(old_block)) | |
} | |
strict_allocator_free(state, old_memory, old_size, loc) | |
} | |
return block_user_data(new_block), nil | |
case .Query_Features, .Query_Info: | |
return nil, .Mode_Not_Implemented | |
} | |
return nil, nil | |
} | |
strict_allocator_alloc :: proc(state: ^Strict_Allocator, n: int, loc: runtime.Source_Code_Location) -> (block: ^Block, err: mem.Allocator_Error) { | |
block = block_init(n, loc) or_return | |
state.pointers[block.user_data] = block | |
return block, nil | |
} | |
strict_allocator_free :: proc(state: ^Strict_Allocator, user_data: rawptr, user_size: int, loc: runtime.Source_Code_Location) { | |
block, belongs_to_us := state.pointers[user_data] | |
if !belongs_to_us { | |
fmt.panicf("memory being freed does not belong to this allocator (%p)", user_data, loc = loc) | |
} | |
freed_loc, has_been_freed := state.frees[user_data] | |
if has_been_freed { | |
fmt.panicf("memory has already been freed (%p) at %v", user_data, freed_loc, loc = loc) | |
} | |
assert(block.user_data == user_data) | |
assert(block._magic1 == BLOCK_START_MAGIC) | |
assert(block._magic2 == BLOCK_END_MAGIC) | |
user_data_offset := mem.ptr_sub((^byte)(user_data), (^byte)(block.block_data)) | |
user_data_size := block.rounded_size - user_data_offset | |
fmt.assertf(user_data_size == user_size, "attempt to free with wrong size: %v, but should be %v (%p)", user_size, user_data_size, user_data) | |
delete_key(&state.pointers, user_data) | |
state.frees[user_data] = loc | |
block_destroy(block) | |
// NOTE: don't release, only decommit so that this page isn't handed out again. | |
} | |
Block :: struct { | |
_magic1: u64, | |
block_data: rawptr, | |
rounded_size: int, | |
user_data: rawptr, | |
user_size: int, | |
allocated_at: runtime.Source_Code_Location, | |
_magic2: u64, | |
} | |
BLOCK_START_MAGIC :: 0xaaaabbbbccccdddd | |
BLOCK_END_MAGIC :: 0xeeeeffffaaaabbbb | |
block_user_data :: proc(b: ^Block) -> []byte { | |
p := ([^]byte)(b.user_data) | |
return p[:b.user_size] | |
} | |
block_init :: proc(size: int, loc: runtime.Source_Code_Location) -> (block: ^Block, err: mem.Allocator_Error) { | |
rounded_size := align_forward_i64(i64(size) + size_of(Block), page_size) | |
memory := virtual.reserve_and_commit(uint(rounded_size)) or_return | |
user_part := memory[len(memory) - size:] | |
block = mem.ptr_offset((^Block)(raw_data(user_part)), -1) | |
block^ = { | |
_magic1 = BLOCK_START_MAGIC, | |
_magic2 = BLOCK_END_MAGIC, | |
block_data = raw_data(memory), | |
rounded_size = int(rounded_size), | |
user_data = raw_data(user_part), | |
user_size = size, | |
allocated_at = loc, | |
} | |
return block, nil | |
} | |
block_destroy :: proc(b: ^Block) { | |
virtual.decommit(b.block_data, cast(uint) b.rounded_size) | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment