#!/bin/sh
-! grep -Rns nocheckin compiler interpretter runtime
\ No newline at end of file
+! grep -Rns nocheckin compiler interpretter runtime core
\ No newline at end of file
* many math functions.
* basics of reading environment variables
* Relative file inclusion using "./" at the start of the path
-* Basics of "notes" in the code. Basically special comments that allow you to easily search in the code.
- Currently this is not something very useful in the language, but I wanted a way to write searchable
- notes without having to parse comments. The syntax is "@Note".
Removals:
* the "proc" keyword.
#load "./alloc/ring"
#load "./alloc/pool"
#load "./alloc/logging"
-#load "./alloc/auto_heap"
+#load "./alloc/gc"
as_allocator :: #match {
macro (a: Allocator) => a
+++ /dev/null
-package core.alloc.heap
-
-AutoHeapState :: struct {
- backing_allocator: Allocator;
- set := Set(rawptr).{};
-}
-
-#local auto_heap_alloc_proc :: (data: ^AutoHeapState, aa: AllocationAction, size: u32, align: u32, oldptr: rawptr) -> rawptr {
- newptr := data.backing_allocator.func(data.backing_allocator.data, aa, size, align, oldptr);
-
- switch aa {
- case .Alloc {
- if newptr != null {
- data.set->insert(newptr);
- }
- }
-
- case .Resize {
- data.set->remove(oldptr);
-
- if newptr != null {
- data.set->insert(newptr);
- }
- }
-
- case .Free {
- data.set->remove(oldptr);
- }
- }
-
- return newptr;
-}
-
-auto_heap_make :: (backing := context.allocator) -> AutoHeapState {
- hs: AutoHeapState;
- hs.backing_allocator = backing;
- hs.set->init(allocator = backing);
- return hs;
-}
-
-auto_heap_clear :: (hs: ^AutoHeapState) {
- for^ hs.set.entries {
- raw_free(hs.backing_allocator, it.value);
- }
-
- hs.set->clear();
-}
-
-auto_heap_free :: (hs: ^AutoHeapState) {
- auto_heap_clear(hs);
- hs.set->free();
-}
-
-#match core.alloc.as_allocator auto_heap_make_allocator
-auto_heap_make_allocator :: (hs: ^AutoHeapState) -> Allocator {
- return Allocator.{
- func = auto_heap_alloc_proc,
- data = hs
- };
-}
-
-auto :: #match {
- macro () {
- use core.alloc {heap}
-
- auto_heap := heap.auto_heap_make();
- old_allocator := context.allocator;
- context.allocator = heap.auto_heap_make_allocator(^auto_heap);
- defer {
- heap.auto_heap_free(^auto_heap);
- context.allocator = old_allocator;
- }
- },
-
- macro (body: Code) -> i32 {
- auto :: auto
-
- #context_scope {
- auto();
- #unquote body;
- }
-
- return 0;
- }
-}
--- /dev/null
+package core.alloc.gc
+
+GCState :: struct {
+ backing_allocator: Allocator;
+ first: ^GCLink;
+}
+
+#local
+GCLink :: struct {
+ prev: ^GCLink;
+ next: ^GCLink;
+ magic_number: u32;
+}
+
+make :: (backing := context.allocator) -> GCState {
+ hs: GCState;
+ hs.backing_allocator = backing;
+ return hs;
+}
+
+clear :: (hs: ^GCState) {
+ while l := hs.first; l != null {
+ n := l.next;
+ raw_free(hs.backing_allocator, l);
+ l = n;
+ }
+
+ hs.first = null;
+}
+
+#match core.alloc.as_allocator make_allocator
+make_allocator :: (hs: ^GCState) -> Allocator {
+ return Allocator.{
+ func = gc_alloc_proc,
+ data = hs
+ };
+}
+
+auto :: #match {
+ macro () {
+ use core.alloc {gc}
+
+ gcs := gc.make();
+ old_allocator := context.allocator;
+ context.allocator = core.alloc.as_allocator(^gcs);
+ defer {
+ gc.clear(^gcs);
+ context.allocator = old_allocator;
+ }
+ },
+
+ macro (body: Code) -> i32 {
+ auto :: auto
+
+ #context_scope {
+ auto();
+ #unquote body;
+ }
+
+ return 0;
+ }
+}
+
+
+#local
+GC_Link_Magic_Number :: 0x1337face
+
+#local gc_alloc_proc :: (data: ^GCState, aa: AllocationAction, size: u32, align: u32, oldptr: rawptr) -> rawptr {
+
+ old: ^GCLink;
+
+ if oldptr != null {
+ old = (cast(^GCLink) oldptr) - 1;
+
+ //
+ // If this allocated space was not from an gc allocator,
+ // just try to free it using the backing allocator.
+ if old.magic_number != GC_Link_Magic_Number {
+ return data.backing_allocator.func(
+ data.backing_allocator.data, aa, size, align, oldptr
+ );
+ }
+ }
+
+ if aa == .Resize || aa == .Free {
+ if data.first == old {
+ data.first = data.first.next;
+
+ } else {
+ old.prev.next = old.next;
+ old.next.prev = old.prev;
+ }
+ }
+
+ newptr: ^GCLink = data.backing_allocator.func(
+ data.backing_allocator.data, aa,
+ size + sizeof GCLink, align, old);
+
+ if aa == .Alloc || aa == .Resize {
+ if newptr != null {
+ newptr.magic_number = GC_Link_Magic_Number;
+ newptr.next = data.first;
+ newptr.prev = null;
+
+ if data.first != null {
+ data.first.prev = newptr;
+ }
+
+ data.first = newptr;
+ }
+ }
+
+ return newptr + 1;
+}
// that may happen on the heap, with the added overhead of checking that
// on every alloc/resize/free.
Enable_Debug :: #defined( runtime.vars.Enable_Heap_Debug )
+Enable_Clear_Freed_Memory :: #defined(runtime.vars.Enable_Heap_Clear_Freed_Memory)
// This is the implementation for the general purpose heap allocator.
// It is a simple bump allocator, with a free list. It is not very good
heap_block :: struct {
size : u32;
+ magic_number : u32;
}
heap_freed_block :: struct {
use base: heap_block;
- magic_number : u32;
next : ^heap_freed_block;
prev : ^heap_freed_block;
}
use base: heap_block;
}
- Allocated_Flag :: 0x1
- Free_Block_Magic_Number :: 0xdeadbeef
- Block_Split_Size :: 256
+ Allocated_Flag :: 0x1
+ Free_Block_Magic_Number :: 0xdeadbeef
+ Alloc_Block_Magic_Number :: 0xbabecafe
+ Block_Split_Size :: 256
// FIX: This does not respect the choice of alignment
heap_alloc :: (size_: u32, align: u32) -> rawptr {
best.prev = null;
best.magic_number = 0;
best.size |= Allocated_Flag;
+ best.magic_number = Alloc_Block_Magic_Number;
return cast(rawptr) (cast(uintptr) best + sizeof heap_allocated_block);
}
ret := cast(^heap_allocated_block) heap_state.next_alloc;
ret.size = size;
ret.size |= Allocated_Flag;
+ ret.magic_number = Alloc_Block_Magic_Number;
heap_state.next_alloc = cast(rawptr) (cast(uintptr) heap_state.next_alloc + size);
heap_state.remaining_space -= size;
ret := cast(^heap_allocated_block) heap_state.next_alloc;
ret.size = size;
ret.size |= Allocated_Flag;
+ ret.magic_number = Alloc_Block_Magic_Number;
heap_state.next_alloc = cast(rawptr) (cast(uintptr) heap_state.next_alloc + size);
heap_state.remaining_space -= size;
log("INVALID DOUBLE FREE");
return;
}
+
+ if hb_ptr.magic_number != Alloc_Block_Magic_Number {
+ log("FREEING INVALID BLOCK");
+ return;
+ }
}
hb_ptr.size &= ~Allocated_Flag;
orig_size := hb_ptr.size - sizeof heap_allocated_block;
- #if Enable_Debug do memory_fill(ptr, ~~0xcc, orig_size);
+ #if Enable_Debug && Enable_Clear_Freed_Memory {
+ memory_fill(ptr, ~~0xcc, orig_size);
+ }
if cast(uintptr) hb_ptr + hb_ptr.size < cast(uintptr) heap_state.next_alloc {
next_block := cast(^heap_freed_block) (cast(uintptr) hb_ptr + hb_ptr.size);
hb_ptr.size = new_size;
hb_ptr.size |= Allocated_Flag;
+ hb_ptr.magic_number = Alloc_Block_Magic_Number;
heap_state.next_alloc = cast(rawptr) (cast(uintptr) heap_state.next_alloc + needed_size);
heap_state.remaining_space -= needed_size;
return ptr;
close :: (use c: ^Context($T)) {
sync.mutex_destroy(^c.mutex);
- cfree(c);
+ delete(c);
}
// This iterator's context is allocated from the heap because
proc_reader := io.reader_make(^proc);
output := io.read_all(^proc_reader);
- defer memory.free_slice(^output);
+ defer delete(^output);
if exit := os.process_wait(^proc); exit != .Success {
// Error running the test case