#load "./alloc/pool"
#load "./alloc/logging"
+as_allocator :: #match {
+ macro (a: Allocator) => a
+}
+
TEMPORARY_ALLOCATOR_SIZE :: 1 << 12; // 4Kb
// The global heap allocator, set up upon program intialization.
};
}
+#match (package core.alloc).as_allocator make_allocator
make_allocator :: (rs: ^ArenaState) -> Allocator {
return Allocator.{
func = arena_alloc_proc,
};
}
+#match (package core.alloc).as_allocator make_allocator
make_allocator :: (fa_data: ^FixedAllocatorData) -> Allocator {
return Allocator.{
func = fixed_allocator_proc,
// Enable this to enable checking for invalid blocks and other corruptions
// that may happen on the heap, with the added overhead of checking that
// on every alloc/resize/free.
-Enable_Debug :: false
+Enable_Debug :: false
// This is the implementation for the general purpose heap allocator.
// It is a simple bump allocator, with a free list. It is not very good
Allocated_Flag :: 0x1
Free_Block_Magic_Number :: 0xdeadbeef
- Block_Split_Size :: 512
+ Block_Split_Size :: 256
// FIX: This does not respect the choice of alignment
heap_alloc :: (size_: u32, align: u32) -> rawptr {
}
if hb.size - size >= Block_Split_Size {
- hb.size = size;
-
new_block := cast(^heap_freed_block) (cast(uintptr) hb + size);
new_block.size = hb.size - size;
new_block.next = hb.next;
new_block.prev = hb.prev;
new_block.magic_number = Free_Block_Magic_Number;
+ hb.size = size;
if hb.next != null do hb.next.prev = new_block;
*prev = new_block;
};
}
+#match (package core.alloc).as_allocator make_allocator
make_allocator :: (pool: ^PoolAllocator($Elem)) -> Allocator {
return Allocator.{
func = #solidify pool_allocator_proc { Elem = Elem },
};
}
+#match (package core.alloc).as_allocator make_allocator
make_allocator :: (rs: ^RingState) -> Allocator {
return .{
func = ring_alloc_proc,
// }
get_iterator :: (list: ^List($T)) -> Iterator(T) {
- iterator_next :: ($T: type_expr, data: rawptr) -> (T, bool) {
- list_iter := cast(^ListIterator(T)) data;
-
+ iterator_next :: (list_iter: ^ListIterator($T)) -> (T, bool) {
use package core.intrinsics.onyx { __zero_value }
if list_iter.current == null do return __zero_value(T), false;
return list_iter.current.data, true;
}
- iterator_close :: (data: rawptr) {
- cfree(data);
- }
-
ListIterator :: struct (T: type_expr) {
current: ^ListElem(T);
}
return .{
data = list_iterator,
next = #solidify iterator_next { T = T },
- close = iterator_close,
+ close = cfree,
};
}