initRuntime() {
printf "${green}Please choose a WebAssembly runtime to use with your Onyx installation.\n${reset}"
- echo "1) Wasmer: An industry standard WebAssembly runtime. Very fast."
+ echo "1) Wasmer: An industry standard WebAssembly runtime. Very fast. (default)"
echo "2) OVM: A custom, lightweight runtime made for Onyx. Supports debugging. Slower than Wasmer."
echo "3) None: Omit using a runtime and only use Onyx as a compiler to WebAssembly."
1) RUNTIME="wasmer"; break ;;
2) RUNTIME="ovm"; break ;;
3) RUNTIME="none"; break ;;
- *) echo "Invalid choice. Quitting."; exit 1 ;;
+ *) echo "Invalid choice. Defaulting to 'wasmer'."; RUNTIME="wasmer" ;;
esac
}
// 'use package' statements have to be reevaluated to pull in the new symbols.
bh_arr(Entity *) use_package_entities;
+ // NOTE: This tracks all #package_doc statements used for this package.
+ bh_arr(OnyxToken *) doc_strings;
+
// NOTE: These are entities that are stored in packages marked with `#allow_stale_code`.
// These entities are flushed to the entity heap when the package has been explicit used
// somewhere.
return;
case Ast_Kind_Typeof:
- bh_buffer_write_string(buffer, "_TYPEOF_");
+ bh_buffer_write_string(buffer, (char *) type_get_name(
+ type_build_from_ast(context.ast_alloc, (AstType *) node)
+ ));
return;
case Ast_Kind_Alias:
fori (j, 0, bh_arr_length(p->sub_packages)) {
bh_buffer_write_u32(&doc_buffer, (u32) p->sub_packages[j] - 1);
}
+
+ bh_buffer_write_u32(&doc_buffer, bh_arr_length(p->doc_strings));
+ bh_arr_each(OnyxToken *, ptkn, p->doc_strings) {
+ OnyxToken *tkn = *ptkn;
+ write_string(&doc_buffer, tkn->length, tkn->text);
+ }
}
//
bh_arr_push(parser->alternate_entity_placement_stack, &parser->package->buffered_entities);
}
+ while (parse_possible_directive(parser, "package_doc")) {
+ OnyxToken *doc_string = expect_token(parser, Token_Type_Literal_String);
+
+ bh_arr_push(parser->package->doc_strings, doc_string);
+ }
+
parse_top_level_statements_until(parser, Token_Type_End_Stream);
parser->current_scope = parser->current_scope->parent;
use core.memory
use core.intrinsics.types {type_is_function}
+#doc "Overloaded procedure for converting something to an Allocator."
as_allocator :: #match {
macro (a: Allocator) => a
}
#thread_local
temp_allocator : Allocator;
+#doc """
+ Initializes the thread-local temporary allocator.
+
+ You do not need to call this. It is called automatically on thread initialization.
+"""
init_temp_allocator :: () {
temp_state = arena.make(heap_allocator, TEMPORARY_ALLOCATOR_SIZE);
temp_allocator = as_allocator(&temp_state);
}
+#doc """
+ Resets the temporary allocator, effectively freeing all allocations made in the temporary allocator.
+"""
clear_temp_allocator :: () {
arena.clear(&temp_state);
}
package core.alloc.arena
+#package_doc """
+ This allocator is mostly used for making many fixed-size
+ allocation (i.e. allocations that will not need to change
+ in size, such as game entities or position structs). The
+ power of this allocator over the heap allocator for this
+ purpose is that it is much faster, since the logic is
+ simpler. Another power of this allocator over something
+ such as a dynamic array is that the dynamic array could
+ relocate and cause any pointers to the data inside to
+ become invalidated; this is definitely not behaviour you
+ want. This arena allocator can grow as large as needed,
+ while guaranteeing that the memory inside of it will
+ never move.
+"""
use core
-// This allocator is mostly used for making many fixed-size
-// allocation (i.e. allocations that will not need to change
-// in size, such as game entities or position structs). The
-// power of this allocator over the heap allocator for this
-// purpose is that it is much faster, since the logic is
-// simpler. Another power of this allocator over something
-// such as a dynamic array is that the dynamic array could
-// relocate and cause any pointers to the data inside to
-// become invalidated; this is definitely not behaviour you
-// want. This arena allocator can grow as large as needed,
-// while guaranteeing that the memory inside of it will
-// never move.
-
// Deprecated struct 'ArenaState'. Use 'Arena' instead.
ArenaState :: Arena
+#doc """
+ Stores internal details used during arena allocations.
+"""
Arena :: struct {
backing_allocator : Allocator;
arena_size : u32;
}
+#local
ArenaBlock :: struct { next : &ArenaBlock; }
#local
return null;
}
-// @Note // `arena_size` must be at least 4
+#doc """
+ Makes a new arena.
+
+ `arena_size` specifies the size of each individual arena page, which must be at least 4 bytes
+ in size (but should be quite a bit large).
+"""
make :: (backing: Allocator, arena_size: u32) -> Arena {
assert(arena_size >= 4, "Arena size was expected to be at least 4 bytes.");
};
}
+#doc """
+ Frees all pages in an arena.
+"""
free :: (arena: &Arena) {
walker := arena.first_arena;
trailer := walker;
arena.size = 0;
}
-#doc "Clears and frees every page, except for first page."
+#doc """
+ Clears and frees every page, except for first page.
+"""
clear :: (arena: &Arena) {
walker := arena.first_arena.next;
arena.size = sizeof rawptr;
}
+#doc """
+ Returns the number of pages in the arena.
+"""
get_allocated_arenas :: (arena: &Arena) -> u32 {
arenas := 0;
walker := arena.first_arena;
return arenas;
}
+#doc """
+ Returns the number of bytes used by the arena.
+"""
get_allocated_bytes :: (arena: &Arena) -> u32 {
return get_allocated_arenas(arena) * (arena.arena_size - 1) + arena.size;
}
+#doc """
+ Creates an arena allocator and automatically applies it to the context's allocator
+ in the current scope.
+
+ foo :: () {
+ alloc.arena.auto();
+
+ // Lazily allocate everything, knowing that it will
+ // be freed when this function returns.
+ for 100 {
+ s := string.alloc_copy("Make a copy of me!");
+ }
+ }
+"""
auto :: #match {
macro (size := 32 * 1024, $dest: Code = [](context.allocator)) {
use core.alloc {arena, heap_allocator}
}
}
+#doc """
+ Creates an arena allocator to be used as the temporary allocator
+ in the code block.
+
+ foo :: () {
+ alloc.arena.auto_temp() {
+ for 1000 {
+ // Will be automatically freed
+ x := new_temp(i32);
+ }
+ }
+ }
+"""
auto_temp :: macro (body: Code) -> i32 {
use core.alloc {arena, heap_allocator}
a := arena.make(heap_allocator, 32 * 1024);
package core.alloc.atomic
+#package_doc """
+ AtomicAllocator wraps another allocator in a mutex,
+ ensuring that every allocation is thread-safe. This
+ is not needed for the general purpose heap allocator,
+ as that already has a thread-safe implementation.
+"""
// This can only be used when the core.sync package exists.
#if #defined(package core.sync) {
-//
-// AtomicAllocator wraps another allocator in a mutex,
-// ensuring that every allocation is thread-safe. This
-// is not needed for the general purpose heap allocator,
-// as that already has a thread-safe implementation.
use core.alloc
use core.sync
+#doc """
+ Stores internal details used by the atomic allocator.
+
+ Simply the wrapped allocator and the mutex.
+"""
AtomicAllocator :: struct {
a: Allocator;
m: sync.Mutex;
}
+#doc """
+ Creates a new AtomicAllocator over an existing allocator.
+"""
make :: (a: Allocator) -> AtomicAllocator {
atomic: AtomicAllocator = .{ a = a };
return atomic;
}
+#doc """
+ Makes an allocator out of the atomic allocator state.
+"""
make_allocator :: (atomic: &AtomicAllocator) =>
Allocator.{ atomic, atomic_alloc };
package core.alloc.fixed
+#package_doc """
+ This allocator is very simple. It is simply a bump allocator from
+ a fixed size buffer. It cannot free or resize, and will return null
+ when it has used all memory in the buffer given to it.
+
+ This kind of allocator is useful for temporary string building or
+ similar circumstances, where you know that the needed memory size
+ will not be exceeded, but you don't what to deal with potential
+ slowness of a general heap allocator. By using this allocator, you
+ can continue to use the same code that does allocations like normal,
+ but can get the speed increase of a simple allocation strategy.
+"""
use core
-// This allocator is very simple. It is simply a bump allocator from
-// a fixed size buffer. It cannot free or resize, and will return null
-// when it has used all memory in the buffer given to it.
-//
-// This kind of allocator is useful for temporary string building or
-// similar circumstances, where you know that the needed memory size
-// will not be exceeded, but you don't what to deal with potential
-// slowness of a general heap allocator. By using this allocator, you
-// can continue to use the same code that does allocations like normal,
-// but can get the speed increase of a simple allocation strategy.
FixedAllocator :: struct {
buffer: [] u8;
package core.alloc.gc
-
-//
-// "Garbage collection" is not somthing Onyx has. Even things
-// like reference counted pointers is not something Onyx can
-// do, because of Onyx's simpler semantics. That being said,
-// with custom allocators and some careful design, GC is
-// "achievable". This allocator wraps another allocator. With
-// each allocation, a little extra space is allocated to build
-// a linked list of all allocations made. This way, when the
-// memory is done being used, everything can be freed automatically.
-//
-// The `auto` macro makes this allocator very easy to use:
-//
-// core.alloc.gc.auto() {
-// // Every allocation here will automatically be freed
-// }
+#package_doc """
+ "Garbage collection" is not somthing Onyx has. Even things
+ like reference counted pointers is not something Onyx can
+ do, because of Onyx's simpler semantics. That being said,
+ with custom allocators and some careful design, GC is
+ "achievable". This allocator wraps another allocator. With
+ each allocation, a little extra space is allocated to build
+ a linked list of all allocations made. This way, when the
+ memory is done being used, everything can be freed automatically.
+
+ The `auto` macro makes this allocator very easy to use:
+ core.alloc.gc.auto() {
+ // Every allocation here will automatically be freed
+ }
+"""
use runtime
return cast([&] GCLink, newptr) + 1;
}
+#doc """
+ Removes an allocation from the garbage collectors tracking list,
+ so it will not be freed automatically.
+"""
untrack :: (ptr: rawptr) -> bool {
link: &GCLink = (cast([&] GCLink) ptr) - 1;
package core.alloc.log
+#package_doc """
+ This allocator simply wraps another allocator and
+ prints every allocation/deallocation made by that
+ allocator.
+"""
-// This allocator simply wraps another allocator and
-// prints every allocation/deallocation made by that
-// allocator.
use core
use runtime
package core.alloc.memdebug
#allow_stale_code
+#package_doc """
+ The memory debugger allocator wraps an existing allocator (normally the heap allocator),
+ and reports on a TCP socket all of the allocation operations done to the underlying
+ allocator. This listener on this socket can use this information to show useful information
+ about the memory usage in the program.
+
+ This is best used when it starts at the very beginning of the program.
+ The easiest way to use this is to define MEMDEBUG in runtime.vars,
+ or pass -DMEMDEBUG on the command line.
+"""
use core {Result}
use core.alloc
package core.alloc.pool
+#package_doc """
+ A pool allocator is an O(1) allocator that is capable of allocating and freeing.
+ It is able to do both in constant time because it maintains a linked list of all
+ the free elements in the pool. When an element is requested the first element of
+ linked list is returned and the list is updated. When an element is freed, it
+ becomes the first element. The catch with this strategy however, is that all of
+ the allocations must be of the same size. This would not be an allocator to use
+ when dealing with heterogenous data, but when doing homogenous data, such as
+ game entities, this allocator is great. It allows you to allocate and free as
+ many times as you want, without worrying about fragmentation or slow allocators.
+ Just make sure you don't allocate more than the pool can provide.
+"""
use core
-// A pool allocator is an O(1) allocator that is capable of allocating and freeing.
-// It is able to do both in constant time because it maintains a linked list of all
-// the free elements in the pool. When an element is requested the first element of
-// linked list is returned and the list is updated. When an element is freed, it
-// becomes the first element. The catch with this strategy however, is that all of
-// the allocations must be of the same size. This would not be an allocator to use
-// when dealing with heterogenous data, but when doing homogenous data, such as
-// game entities, this allocator is great. It allows you to allocate and free as
-// many times as you want, without worrying about fragmentation or slow allocators.
-// Just make sure you don't allocate more than the pool can provide.
-
PoolAllocator :: struct (Elem: type_expr) {
buffer : [] Elem;
first_free : &Elem;
package core.alloc.ring
+#package_doc """
+ This allocator is great for temporary memory, such as returning
+ a pointer from a function, or storing a formatted string. The
+ memory allocated using this allocator does not need to be freed.
+ The idea is that as you keep allocating you will "wrap around"
+ and start writing over memory that was allocated before. For this
+ reason, it is not safe to use this for any kind of permanent
+ allocation. Also, be wary that you provide this allocator with
+ a buffer big enough to store as much data as you are going to need
+ at any given time.
+"""
use core
-// This allocator is great for temporary memory, such as returning
-// a pointer from a function, or storing a formatted string. The
-// memory allocated using this allocator does not need to be freed.
-// The idea is that as you keep allocating you will "wrap around"
-// and start writing over memory that was allocated before. For this
-// reason, it is not safe to use this for any kind of permanent
-// allocation. Also, be wary that you provide this allocator with
-// a buffer big enough to store as much data as you are going to need
-// at any given time.
-
RingState :: struct {
base_ptr : rawptr;
size : u32;
parent: Id;
subpackages: [] Id;
+
+ notes: [] str;
}
Doc_Entity :: struct {