|| type->kind == Type_Kind_Slice
|| type->kind == Type_Kind_DynArray
|| type->kind == Type_Kind_Struct;
-
+ if (!strcmp(interface->name, "type_is_function")) return type->kind == Type_Kind_Function;
return 0;
}
case Type_Kind_Basic: return type->Basic.size;
case Type_Kind_MultiPointer:
case Type_Kind_Pointer: return POINTER_SIZE;
- case Type_Kind_Function: return 2 * POINTER_SIZE;
+ case Type_Kind_Function: return 4 + 4 + POINTER_SIZE;
case Type_Kind_Array: return type->Array.size;
case Type_Kind_Struct: return type->Struct.size;
case Type_Kind_Enum: return type_size_of(type->Enum.backing);
case Type_Kind_Slice: return POINTER_SIZE * 2; // HACK: These should not have to be 16 bytes in size, they should only have to be 12,
case Type_Kind_VarArgs: return POINTER_SIZE * 2; // but there are alignment issues right now with that so I decided to not fight it and just make them 16 bytes in size.
- case Type_Kind_DynArray: return POINTER_SIZE + 8 + 4 * POINTER_SIZE; // data (8), count (4), capacity (4), allocator { func (8), data (8) }
+ case Type_Kind_DynArray: return POINTER_SIZE + 8 + 8 + 2 * POINTER_SIZE; // data (8), count (4), capacity (4), allocator { func (4 + 4 + 8), data (8) }
case Type_Kind_Compound: return type->Compound.size;
case Type_Kind_Distinct: return type_size_of(type->Distinct.base_type);
default: return 0;
};
static const StructMember func_members[] = {
- { 0, 0, &basic_types[Basic_Kind_U32], "__funcidx", NULL, NULL, -1, 0, 0 },
- { POINTER_SIZE, 1, &basic_types[Basic_Kind_Rawptr], "data", NULL, NULL, -1, 0, 0 },
+ { 0, 0, &basic_types[Basic_Kind_U32], "__funcidx", NULL, NULL, -1, 0, 0 },
+ { POINTER_SIZE, 1, &basic_types[Basic_Kind_Rawptr], "closure", NULL, NULL, -1, 0, 0 },
+ { 2 * POINTER_SIZE, 2, &basic_types[Basic_Kind_U32], "closure_size", NULL, NULL, -1, 0, 0 },
};
b32 type_lookup_member(Type* type, char* member, StructMember* smem) {
}
case Type_Kind_Function: {
- if (idx > 2) return 0;
+ if (idx > 3) return 0;
*smem = func_members[idx];
return 1;
switch (type->kind) {
case Type_Kind_Slice:
case Type_Kind_VarArgs: return 2;
- case Type_Kind_Function: return 2;
+ case Type_Kind_Function: return 3;
case Type_Kind_Compound: return bh_arr_length(type->Compound.linear_members);
default: return 1;
}
two->type = &basic_types[Basic_Kind_Rawptr];
two->offset = POINTER_SIZE;
}
+ if (idx == 2) {
+ two->type = &basic_types[Basic_Kind_U32];
+ two->offset = 2 * POINTER_SIZE;
+ }
return 1;
default: {
case Type_Kind_Function: {
if (offset == 0) return 0;
if (offset == POINTER_SIZE) return 1;
+ if (offset == POINTER_SIZE * 2) return 2;
return -1;
}
default:
b32 type_results_in_void(Type* type) {
return (type == NULL)
|| (type->kind == Type_Kind_Basic && type->Basic.kind == Basic_Kind_Void);
- // || ( (type->kind == Type_Kind_Function)
- // && (type->Function.return_type->kind == Type_Kind_Basic)
- // && (type->Function.return_type->Basic.kind == Basic_Kind_Void));
}
b32 type_is_array_accessible(Type* type) {
case Type_Kind_Struct: return type->Struct.mem_count;
case Type_Kind_Slice: return 2;
case Type_Kind_VarArgs: return 2;
- case Type_Kind_Function: return 2;
+ case Type_Kind_Function: return 3;
case Type_Kind_DynArray: return 4;
default: return 0;
}
u64 iterator_remove_func = local_raw_allocate(mod->local_alloc, WASM_TYPE_FUNC);
u64 iterator_done_bool = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
WI(for_node->token, WI_DROP);
+ WI(for_node->token, WI_DROP);
WIL(for_node->token, WI_LOCAL_SET, iterator_remove_func);
WI(for_node->token, WI_DROP);
+ WI(for_node->token, WI_DROP);
WIL(for_node->token, WI_LOCAL_SET, iterator_close_func);
WI(for_node->token, WI_DROP);
+ WI(for_node->token, WI_DROP);
WIL(for_node->token, WI_LOCAL_SET, iterator_next_func);
WIL(for_node->token, WI_LOCAL_SET, iterator_data_ptr);
emit_expression(mod, &code, binop->left);
if (binop->left->type->kind == Type_Kind_Function) { // nocheckin
WI(NULL, WI_DROP);
+ WI(NULL, WI_DROP);
}
emit_expression(mod, &code, binop->right);
if (binop->right->type->kind == Type_Kind_Function) { // nocheckin
WI(NULL, WI_DROP);
+ WI(NULL, WI_DROP);
}
WI(binop->token, binop_instr);
emit_expression(mod, &code, call->callee);
u64 global_closure_base_idx = bh_imap_get(&mod->index_map, (u64) &builtin_closure_base);
+ WI(NULL, WI_DROP);
WIL(NULL, WI_GLOBAL_SET, global_closure_base_idx);
i32 type_idx = generate_type_idx(mod, call->callee->type);
WID(NULL, WI_I32_CONST, elemidx);
if (!func->captures) {
+ WIL(NULL, WI_PTR_CONST, 0);
WIL(NULL, WI_I32_CONST, 0);
break;
}
}
local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
+
+ WIL(NULL, WI_I32_CONST, func->captures->total_size_in_bytes);
break;
}
}
if (to->kind == Type_Kind_Basic && from->kind == Type_Kind_Function) {
+ WI(NULL, WI_DROP);
WI(NULL, WI_DROP);
*pcode = code;
return;
} else if (type->kind == Type_Kind_Function) {
WID(NULL, WI_I32_CONST, mod->null_proc_func_idx);
WIL(NULL, WI_I32_CONST, 0);
+ WIL(NULL, WI_I32_CONST, 0);
} else {
if (type == &basic_types[Basic_Kind_Void]) {
// start of the main loop of your program.
temp_allocator : Allocator;
+ // The procedure to call when allocating space for a closure.
+ // The default is to allocate using the `temp_allocator`.
+ closure_allocate: (size: i32) -> rawptr = default_closure_allocate;
+
// Defines what happens when `log()` is called. Defaults to a
// logger that filters log messages by their severity.
logger : Logger = .{ default_logger_proc, &default_logger };
thread_id : i32;
// Allows you to place any data on the context that you want to.
- // Generally used in place a closure mechanism.
user_data: rawptr;
user_data_type: type_expr;
}
}
+#local default_closure_allocate :: (size: i32) -> rawptr {
+ return raw_alloc(context.temp_allocator, size);
+}
+
+
//
// Basic allocation structures.
changed to a configurable way, but for now it simply allocates out of the heap allocator.
"""
__closure_block_allocate :: (size: i32) -> rawptr {
- return raw_alloc(context.temp_allocator, size);
+ return context.closure_allocate(size);
}
// Compound: array, slice, struct
// Array: [$N] $T
// Slice: [] $T
+// Function: () -> void
type_is_bool :: interface (t: $T) #intrinsic
type_is_int :: interface (t: $T) #intrinsic
type_is_slice :: interface (t: $T) #intrinsic
type_is_struct :: interface (t: $T) #intrinsic
type_is_compound :: interface (t: $T) #intrinsic
+type_is_function :: interface (t: $T) #intrinsic
i32 thread_id = thread->id;
{ // Call the _thread_start procedure
- wasm_val_t args[] = { WASM_I32_VAL(thread_id), WASM_I32_VAL(thread->tls_base), WASM_I32_VAL(thread->stack_base), WASM_I32_VAL(thread->funcidx), WASM_I32_VAL(thread->closureptr), WASM_I32_VAL(thread->dataptr) };
+ wasm_val_t args[] = {
+ WASM_I32_VAL(thread_id),
+ WASM_I32_VAL(thread->tls_base),
+ WASM_I32_VAL(thread->stack_base),
+ WASM_I32_VAL(thread->funcidx),
+ WASM_I32_VAL(thread->closureptr),
+ WASM_I32_VAL(0),
+ WASM_I32_VAL(thread->dataptr)
+ };
wasm_val_vec_t results = { 0, 0 };
wasm_val_vec_t args_array = WASM_ARRAY_VEC(args);
return 0;
}
-ONYX_DEF(__spawn_thread, (WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) {
+ONYX_DEF(__spawn_thread, (WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32, WASM_I32), (WASM_I32)) {
if (threads == NULL) bh_arr_new(bh_heap_allocator(), threads, 128);
bh_arr_insert_end(threads, 1);
OnyxThread *thread = &bh_arr_last(threads);
thread->stack_base = params->data[2].of.i32;
thread->funcidx = params->data[3].of.i32;
thread->closureptr = params->data[4].of.i32;
- thread->dataptr = params->data[5].of.i32;
+ thread->dataptr = params->data[6].of.i32;
#ifdef _BH_LINUX
pthread_create(&thread->thread, NULL, onyx_run_thread, thread);