func: alloc_proc;
}
-alloc :: proc (use a: ^Allocator, size: u32) -> rawptr {
+alloc :: proc (use a: Allocator, size: u32) -> rawptr {
return func(data, AllocAction.Alloc, size, 16, null);
}
-resize :: proc (use a: ^Allocator, ptr: rawptr, size: u32) -> rawptr {
+resize :: proc (use a: Allocator, ptr: rawptr, size: u32) -> rawptr {
return func(data, AllocAction.Resize, size, 16, ptr);
}
-free :: proc (use a: ^Allocator, ptr: rawptr) {
+free :: proc (use a: Allocator, ptr: rawptr) {
func(data, AllocAction.Free, 0, 0, ptr);
}
return null;
}
-malloc :: proc (size: u32) -> rawptr do return alloc(^heap_allocator, size);
-mfree :: proc (ptr: rawptr) do free(^heap_allocator, ptr);
-mresize :: proc (ptr: rawptr, size: u32) -> rawptr do return resize(^heap_allocator, ptr, size);
+malloc :: proc (size: u32) -> rawptr do return alloc(heap_allocator, size);
+mfree :: proc (ptr: rawptr) do free(heap_allocator, ptr);
+mresize :: proc (ptr: rawptr, size: u32) -> rawptr do return resize(heap_allocator, ptr, size);
}
-#private return_scratch_size :: 256
-#private return_scratch_buff : [return_scratch_size] u8
+#private return_scratch_size :: 256
+#private return_scratch_buff : [return_scratch_size] u8
+#private return_scratch_state : ScratchState;
-return_scratch_state : ScratchState;
return_scratch_alloc : Allocator;
memory_init :: proc {
symbol_introduce(semstate.curr_scope, param->token, (AstNode *) param);
if (param->flags & Ast_Flag_Param_Use) {
- if (param->type->kind != Type_Kind_Pointer || param->type->Pointer.elem->kind != Type_Kind_Struct) {
- onyx_message_add(Msg_Type_Literal,
- param->token->pos,
- "can only 'use' pointers to structures.");
- } else {
- AstStructType* st = (AstStructType *) ((AstPointerType *) param->type_node)->elem;
+ if (type_is_struct(param->type)) {
+ AstStructType* st;
+ if (param->type->kind == Type_Kind_Struct) {
+ st = (AstStructType *) param->type_node;
+ } else {
+ st = (AstStructType *) ((AstPointerType *) param->type_node)->elem;
+ }
bh_arr_each(AstStructMember *, mem, st->members) {
AstFieldAccess* fa = onyx_ast_node_new(semstate.node_allocator, sizeof(AstFieldAccess), Ast_Kind_Field_Access);
(AstNode *) fa);
token_toggle_end((*mem)->token);
}
+
+ } else {
+ onyx_message_add(Msg_Type_Literal,
+ param->token->pos,
+ "can only 'use' structures or pointers to structures.");
}
}
}
} else {
if ((*memres)->type_node == NULL) return;
}
-
- (*memres)->type = type_build_from_ast(semstate.allocator, (*memres)->type_node);
-
- if (!type_is_compound((*memres)->type)) {
- Type* ptr_type = type_make_pointer(semstate.allocator, (*memres)->type);
- (*memres)->type = ptr_type;
-
- AstMemRes* new_memres = onyx_ast_node_new(semstate.node_allocator, sizeof(AstMemRes), Ast_Kind_Memres);
- memcpy(new_memres, (*memres), sizeof(AstMemRes));
-
- // BIG HACK: converting the (*memres) node to a dereference node to not break
- // already resolved symbols
- ((AstDereference *) (*memres))->kind = Ast_Kind_Dereference;
- ((AstDereference *) (*memres))->type_node = (*memres)->type_node;
- ((AstDereference *) (*memres))->type = (*memres)->type->Pointer.elem;
- ((AstDereference *) (*memres))->expr = (AstTyped *) new_memres;
-
- // BUT retain the 'old' memres in the entity list
- *memres = new_memres;
- }
}
void onyx_resolve_symbols() {
#define WIL(instr, data) bh_arr_push(code, ((WasmInstruction){ instr, { .l = data } }))
#define COMPILE_FUNC(kind, ...) static void compile_ ## kind (OnyxWasmModule* mod, bh_arr(WasmInstruction)* pcode, __VA_ARGS__)
-COMPILE_FUNC(function_body, AstFunction* fd);
-COMPILE_FUNC(block, AstBlock* block, b32 generate_block_headers);
-COMPILE_FUNC(statement, AstNode* stmt);
-COMPILE_FUNC(assignment, AstBinaryOp* assign);
-COMPILE_FUNC(store_instruction, Type* type, u32 offset);
-COMPILE_FUNC(load_instruction, Type* type, u32 offset);
-COMPILE_FUNC(if, AstIf* if_node);
-COMPILE_FUNC(while, AstWhile* while_node);
-COMPILE_FUNC(for, AstFor* for_node);
-COMPILE_FUNC(defer, AstDefer* defer);
-COMPILE_FUNC(deferred_stmts, AstNode* node);
-COMPILE_FUNC(binop, AstBinaryOp* binop);
-COMPILE_FUNC(unaryop, AstUnaryOp* unop);
-COMPILE_FUNC(call, AstCall* call);
-COMPILE_FUNC(intrinsic_call, AstIntrinsicCall* call);
-COMPILE_FUNC(array_access_location, AstArrayAccess* aa, u64* offset_return);
-COMPILE_FUNC(field_access_location, AstFieldAccess* field, u64* offset_return);
-COMPILE_FUNC(local_location, AstLocal* local, u64* offset_return);
-COMPILE_FUNC(struct_load, Type* type, u64 offset);
-COMPILE_FUNC(struct_store, AstTyped* lval);
-COMPILE_FUNC(expression, AstTyped* expr);
-COMPILE_FUNC(cast, AstUnaryOp* cast);
-COMPILE_FUNC(return, AstReturn* ret);
-COMPILE_FUNC(stack_enter, u64 stacksize);
-COMPILE_FUNC(stack_leave, u32 unused);
+COMPILE_FUNC(function_body, AstFunction* fd);
+COMPILE_FUNC(block, AstBlock* block, b32 generate_block_headers);
+COMPILE_FUNC(statement, AstNode* stmt);
+COMPILE_FUNC(assignment, AstBinaryOp* assign);
+COMPILE_FUNC(store_instruction, Type* type, u32 offset);
+COMPILE_FUNC(load_instruction, Type* type, u32 offset);
+COMPILE_FUNC(if, AstIf* if_node);
+COMPILE_FUNC(while, AstWhile* while_node);
+COMPILE_FUNC(for, AstFor* for_node);
+COMPILE_FUNC(defer, AstDefer* defer);
+COMPILE_FUNC(deferred_stmts, AstNode* node);
+COMPILE_FUNC(binop, AstBinaryOp* binop);
+COMPILE_FUNC(unaryop, AstUnaryOp* unop);
+COMPILE_FUNC(call, AstCall* call);
+COMPILE_FUNC(intrinsic_call, AstIntrinsicCall* call);
+COMPILE_FUNC(array_access_location, AstArrayAccess* aa, u64* offset_return);
+COMPILE_FUNC(field_access_location, AstFieldAccess* field, u64* offset_return);
+COMPILE_FUNC(local_location, AstLocal* local, u64* offset_return);
+COMPILE_FUNC(memory_reservation_location, AstMemRes* memres);
+COMPILE_FUNC(struct_load, Type* type, u64 offset);
+COMPILE_FUNC(struct_store, AstTyped* lval);
+COMPILE_FUNC(expression, AstTyped* expr);
+COMPILE_FUNC(cast, AstUnaryOp* cast);
+COMPILE_FUNC(return, AstReturn* ret);
+COMPILE_FUNC(stack_enter, u64 stacksize);
+COMPILE_FUNC(stack_leave, u32 unused);
COMPILE_FUNC(function_body, AstFunction* fd) {
if (fd->body == NULL) return;
} else if (aa->addr->kind == Ast_Kind_Local
&& aa->addr->type->kind == Type_Kind_Array) {
compile_local_location(mod, &code, (AstLocal *) aa->addr, &offset);
+ } else if (aa->addr->kind == Ast_Kind_Memres
+ && aa->addr->type->kind != Type_Kind_Array) {
+ compile_memory_reservation_location(mod, &code, (AstMemRes *) aa->addr);
} else {
compile_expression(mod, &code, aa->addr);
}
u64 o2 = 0;
compile_local_location(mod, &code, (AstLocal *) source_expr, &o2);
offset += o2;
+ } else if (source_expr->kind == Ast_Kind_Memres
+ && source_expr->type->kind != Type_Kind_Pointer) {
+ compile_memory_reservation_location(mod, &code, (AstMemRes *) source_expr);
} else {
compile_expression(mod, &code, source_expr);
}
*pcode = code;
}
+COMPILE_FUNC(memory_reservation_location, AstMemRes* memres) {
+ bh_arr(WasmInstruction) code = *pcode;
+
+ WID(WI_I32_CONST, memres->addr);
+
+ *pcode = code;
+}
+
COMPILE_FUNC(local_location, AstLocal* local, u64* offset_return) {
bh_arr(WasmInstruction) code = *pcode;
case Ast_Kind_Memres: {
AstMemRes* memres = (AstMemRes *) expr;
WID(WI_I32_CONST, memres->addr);
+ compile_load_instruction(mod, &code, memres->type, 0);
break;
}
mod->local_alloc = &wasm_func.locals;
mod->local_alloc->param_count = localidx;
- mod->stack_base_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
-
mod->has_stack_locals = 0;
bh_arr_each(AstLocal *, local, fd->locals)
mod->has_stack_locals |= !local_is_wasm_local(*local);
- if (mod->has_stack_locals)
+ if (mod->has_stack_locals) {
// NOTE: '5' needs to match the number of instructions it takes
// to setup a stack frame
bh_arr_insert_end(wasm_func.code, 5);
+ mod->stack_base_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
+ }
+
// Generate code
compile_function_body(mod, &wasm_func.code, fd);
static void compile_memory_reservation(OnyxWasmModule* mod, AstMemRes* memres) {
Type* effective_type = memres->type;
- if (!type_is_compound(effective_type)) effective_type = effective_type->Pointer.elem;
u64 alignment = type_alignment_of(effective_type);
u64 size = type_size_of(effective_type);