From: Brendan Hansen Date: Fri, 14 Aug 2020 21:51:51 +0000 (-0500) Subject: code cleanup and bug fixes X-Git-Url: https://git.brendanfh.com/?a=commitdiff_plain;h=80955c6276b87195054ba0552c350c35c650a59c;p=onyx.git code cleanup and bug fixes --- diff --git a/core/alloc.onyx b/core/alloc.onyx index fe975ed3..81a96dd2 100644 --- a/core/alloc.onyx +++ b/core/alloc.onyx @@ -162,3 +162,59 @@ heap_alloc_proc :: proc (data: rawptr, aa: AllocAction, size: u32, align: u32, o malloc :: proc (size: u32) -> rawptr do return alloc(^heap_allocator, size); mfree :: proc (ptr: rawptr) do free(^heap_allocator, ptr); mresize :: proc (ptr: rawptr, size: u32) -> rawptr do return resize(^heap_allocator, ptr, size); + + + +#private +ScratchState :: struct { + base_ptr : rawptr; + size : u32; + curr_ptr : rawptr; +} + +#private +scratch_alloc_proc :: proc (data: rawptr, aa: AllocAction, size: u32, align: u32, oldptr: rawptr) -> rawptr { + ss := cast(^ScratchState) data; + + if aa == AllocAction.Alloc { + retval := null; + rem := ss.size - cast(u32) ss.curr_ptr + cast(u32) ss.base_ptr; + + if size >= rem { + retval = ss.curr_ptr; + ss.curr_ptr = cast(rawptr) (cast(u32) ss.curr_ptr + size); + } else { + ss.curr_ptr = ss.base_ptr; + retval = ss.base_ptr; + } + + return retval; + } + + return null; +} + +scratch_state_init :: proc (use ss: ^ScratchState, buffer: rawptr, length: u32) { + base_ptr = buffer; + curr_ptr = buffer; + size = length; +} + +scratch_alloc_init :: proc (a: ^Allocator, ss: ^ScratchState) { + a.func = scratch_alloc_proc; + a.data = ss; +} + + +#private return_scratch_size :: 256 +#private return_scratch_buff : [return_scratch_size] u8 + +return_scratch_state : ScratchState; +return_scratch_alloc : Allocator; + +memory_init :: proc { + heap_init(); + + scratch_state_init(^return_scratch_state, return_scratch_buff, return_scratch_size); + scratch_alloc_init(^return_scratch_alloc, ^return_scratch_state); +} \ No newline at end of file diff --git a/docs/plan b/docs/plan index d56405af..d35ece7a 100644 --- a/docs/plan +++ b/docs/plan @@ -179,8 +179,6 @@ HOW: b := 5 a, b = b, a; - [ ] multiple return values - [ ] All code paths return correct value [ ] Add slices @@ -197,6 +195,16 @@ HOW: - Dynamic resizing? - They are just very hard to use at the moment + [ ] multiple return values + - THIS IS NOT GOING TO BE FUN. + - Wasm multi-value proposal is not finalized and nothing has implemented it yet + - This means other methods of returning multiple things would be needed: + - globals + - on the stack + + [ ] returning structs + - This will put forward a lot of the work that will be done for multiple return values + [ ] Type parameterized structs [ ] Array literals diff --git a/onyx b/onyx index 301d6b49..9e1bc71e 100755 Binary files a/onyx and b/onyx differ diff --git a/progs/stack_based.onyx b/progs/stack_based.onyx index d3801d6c..dadd2ade 100644 --- a/progs/stack_based.onyx +++ b/progs/stack_based.onyx @@ -83,7 +83,7 @@ stupid_idea :: proc (n: i32) -> proc () -> i32 { some_value := 20 + 30 * 4 + 15 / 5; start :: proc #export { - heap_init(); + memory_init(); print("Hello, World!"); print_hex(cast(u64) some_value); @@ -142,22 +142,32 @@ start :: proc #export { stupid_idea(1234)() |> print(); - varr : [5] Vec3; - varr[2].x = 4; - varr[2].y = 5; - varr[2].z = 6; - mag_squared(varr[2]) |> print(); - - v1 : Vec3; - v1.x = 1; - v1.y = 2; - v1.z = 4; + { + varr : [5] Vec3; + varr[2].x = 4; + varr[2].y = 5; + varr[2].z = 6; + mag_squared(varr[2]) |> print(); + + v1 : Vec3; + v1.x = 1; + v1.y = 2; + v1.z = 4; + + v2 := v1; + + v3 : Vec3 = *vadd(v1, v2); + print(v3.x); + print(v3.y); + print(v3.z); + } +} - v2 := ^v1; +vadd :: proc (v1: Vec3, v2: Vec3) -> ^Vec3 { + out := cast(^Vec3) alloc(^return_scratch_alloc, sizeof Vec3); + out.x = v1.x + v2.x; + out.y = v1.y + v2.y; + out.z = v1.z + v2.z; - v3 : Vec3; - vec_add(v1, *v2, ^v3); - print(v3.x); - print(v3.y); - print(v3.z); + return out; } \ No newline at end of file diff --git a/src/onyxwasm.c b/src/onyxwasm.c index 34f0d74c..debefaae 100644 --- a/src/onyxwasm.c +++ b/src/onyxwasm.c @@ -344,7 +344,7 @@ static u64 local_lookup_idx(LocalAllocator* la, u64 value) { #define COMPILE_FUNC(kind, ...) static void compile_ ## kind (OnyxWasmModule* mod, bh_arr(WasmInstruction)* pcode, __VA_ARGS__) COMPILE_FUNC(function_body, AstFunction* fd); -COMPILE_FUNC(block, AstBlock* block); +COMPILE_FUNC(block, AstBlock* block, b32 generate_block_headers); COMPILE_FUNC(statement, AstNode* stmt); COMPILE_FUNC(assignment, AstBinaryOp* assign); COMPILE_FUNC(store_instruction, Type* type, u32 offset); @@ -361,7 +361,7 @@ COMPILE_FUNC(intrinsic_call, AstIntrinsicCall* call); COMPILE_FUNC(array_access_location, AstArrayAccess* aa, u64* offset_return); COMPILE_FUNC(field_access_location, AstFieldAccess* field, u64* offset_return); COMPILE_FUNC(local_location, AstLocal* local, u64* offset_return); -COMPILE_FUNC(struct_load, AstTyped* expr); +COMPILE_FUNC(struct_load, Type* type, u64 offset); COMPILE_FUNC(struct_store, AstTyped* lval); COMPILE_FUNC(expression, AstTyped* expr); COMPILE_FUNC(cast, AstUnaryOp* cast); @@ -372,44 +372,34 @@ COMPILE_FUNC(stack_leave, u32 unused); COMPILE_FUNC(function_body, AstFunction* fd) { if (fd->body == NULL) return; - bh_arr(WasmInstruction) code = *pcode; - - bh_arr_each(AstLocal *, local, fd->body->locals) - bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - - forll (AstNode, stmt, fd->body->body, next) { - compile_statement(mod, &code, stmt); - } - - compile_deferred_stmts(mod, &code, (AstNode *) fd); - - bh_arr_each(AstLocal *, local, fd->body->locals) - local_free(mod->local_alloc, *local); - - *pcode = code; + compile_block(mod, pcode, fd->body, 0); } -COMPILE_FUNC(block, AstBlock* block) { +COMPILE_FUNC(block, AstBlock* block, b32 generate_block_headers) { bh_arr(WasmInstruction) code = *pcode; + if (generate_block_headers) { + bh_arr_push(mod->structured_jump_target, 1); + WID(WI_BLOCK_START, 0x40); + } + bh_arr_each(AstLocal *, local, block->locals) bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - bh_arr_push(mod->structured_jump_target, 1); - WID(WI_BLOCK_START, 0x40); - forll (AstNode, stmt, block->body, next) { compile_statement(mod, &code, stmt); } compile_deferred_stmts(mod, &code, (AstNode *) block); - WI(WI_BLOCK_END); - bh_arr_pop(mod->structured_jump_target); - bh_arr_each(AstLocal *, local, block->locals) local_free(mod->local_alloc, *local); + if (generate_block_headers) { + WI(WI_BLOCK_END); + bh_arr_pop(mod->structured_jump_target); + } + *pcode = code; } @@ -452,7 +442,7 @@ COMPILE_FUNC(statement, AstNode* stmt) { case Ast_Kind_For: compile_for(mod, &code, (AstFor *) stmt); break; case Ast_Kind_Break: compile_structured_jump(mod, &code, ((AstBreak *) stmt)->count); break; case Ast_Kind_Continue: compile_structured_jump(mod, &code, -((AstContinue *) stmt)->count); break; - case Ast_Kind_Block: compile_block(mod, &code, (AstBlock *) stmt); break; + case Ast_Kind_Block: compile_block(mod, &code, (AstBlock *) stmt, 1); break; case Ast_Kind_Defer: compile_defer(mod, &code, (AstDefer *) stmt); break; default: compile_expression(mod, &code, (AstTyped *) stmt); break; } @@ -566,7 +556,10 @@ COMPILE_FUNC(store_instruction, Type* type, u32 offset) { COMPILE_FUNC(load_instruction, Type* type, u32 offset) { bh_arr(WasmInstruction) code = *pcode; - assert(("Should use compile_struct_load instead", type->kind != Type_Kind_Struct)); + if (type->kind == Type_Kind_Struct) { + compile_struct_load(mod, pcode, type, offset); + return; + } if (type->kind == Type_Kind_Array) { if (offset != 0) { @@ -574,6 +567,7 @@ COMPILE_FUNC(load_instruction, Type* type, u32 offset) { WI(WI_I32_ADD); } + *pcode = code; return; } @@ -630,17 +624,7 @@ COMPILE_FUNC(if, AstIf* if_node) { bh_arr_push(mod->structured_jump_target, 0); - if (if_node->true_stmt) { - bh_arr_each(AstLocal *, local, if_node->true_stmt->locals) - bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - - forll (AstNode, stmt, if_node->true_stmt->body, next) { - compile_statement(mod, &code, stmt); - } - - bh_arr_each(AstLocal *, local, if_node->true_stmt->locals) - local_free(mod->local_alloc, *local); - } + if (if_node->true_stmt) compile_block(mod, &code, if_node->true_stmt, 0); if (if_node->false_stmt) { WI(WI_ELSE); @@ -648,20 +632,10 @@ COMPILE_FUNC(if, AstIf* if_node) { if (if_node->false_stmt->kind == Ast_Kind_If) { compile_if(mod, &code, (AstIf *) if_node->false_stmt); } else { - bh_arr_each(AstLocal *, local, ((AstBlock *) if_node->false_stmt)->locals) - bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - - forll (AstNode, stmt, ((AstBlock *) if_node->false_stmt)->body, next) { - compile_statement(mod, &code, stmt); - } - - bh_arr_each(AstLocal *, local, ((AstBlock *) if_node->false_stmt)->locals) - local_free(mod->local_alloc, *local); + compile_block(mod, &code, if_node->false_stmt, 0); } } - compile_deferred_stmts(mod, &code, (AstNode *) if_node); - bh_arr_pop(mod->structured_jump_target); WI(WI_IF_END); @@ -682,17 +656,7 @@ COMPILE_FUNC(while, AstWhile* while_node) { bh_arr_push(mod->structured_jump_target, 1); bh_arr_push(mod->structured_jump_target, 2); - bh_arr_each(AstLocal *, local, while_node->stmt->locals) - bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - - forll (AstNode, stmt, while_node->stmt->body, next) { - compile_statement(mod, &code, stmt); - } - - bh_arr_each(AstLocal *, local, while_node->stmt->locals) - local_free(mod->local_alloc, *local); - - compile_deferred_stmts(mod, &code, (AstNode *) while_node); + compile_block(mod, &code, while_node->stmt, 0); bh_arr_pop(mod->structured_jump_target); bh_arr_pop(mod->structured_jump_target); @@ -746,15 +710,7 @@ COMPILE_FUNC(for, AstFor* for_node) { WI(ge_instr); WID(WI_COND_JUMP, 0x01); - bh_arr_each(AstLocal *, local, for_node->stmt->locals) - bh_imap_put(&mod->local_map, (u64) *local, local_allocate(mod->local_alloc, *local)); - - forll (AstNode, stmt, for_node->stmt->body, next) { - compile_statement(mod, &code, stmt); - } - - bh_arr_each(AstLocal *, local, for_node->stmt->locals) - local_free(mod->local_alloc, *local); + compile_block(mod, &code, for_node->stmt, 0); if (it_is_local) { WIL(WI_LOCAL_GET, tmp); @@ -772,8 +728,6 @@ COMPILE_FUNC(for, AstFor* for_node) { compile_store_instruction(mod, &code, var->type, offset); } - compile_deferred_stmts(mod, &code, (AstNode *) for_node); - bh_arr_pop(mod->structured_jump_target); bh_arr_pop(mod->structured_jump_target); @@ -1101,61 +1055,73 @@ COMPILE_FUNC(local_location, AstLocal* local, u64* offset_return) { *pcode = code; } -COMPILE_FUNC(struct_load, AstTyped* expr) { - bh_arr(WasmInstruction) code = *pcode; - - assert(expr->type->kind == Type_Kind_Struct); +COMPILE_FUNC(struct_load, Type* type, u64 offset) { + // NOTE: Expects the stack to look like: + // - u64 offset = 0; + bh_arr(WasmInstruction) code = *pcode; - bh_arr_each(StructMember *, smem, expr->type->Struct.memarr) { - offset = 0; + assert(type->kind == Type_Kind_Struct); - switch (expr->kind) { - case Ast_Kind_Local: compile_local_location(mod, &code, (AstLocal *) expr, &offset); break; - case Ast_Kind_Dereference: compile_expression(mod, &code, ((AstDereference *) expr)->expr); break; - case Ast_Kind_Array_Access: compile_array_access_location(mod, &code, (AstArrayAccess *) expr, &offset); break; - case Ast_Kind_Field_Access: compile_field_access_location(mod, &code, (AstFieldAccess *) expr, &offset); break; + if (type->Struct.mem_count == 1) { + compile_load_instruction(mod, &code, type->Struct.memarr[0]->type, 0); + *pcode = code; + return; + } - default: assert(0); - } + u64 tmp_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32); + WIL(WI_LOCAL_SET, tmp_idx); + bh_arr_each(StructMember *, smem, type->Struct.memarr) { + WIL(WI_LOCAL_GET, tmp_idx); compile_load_instruction(mod, &code, (*smem)->type, offset + (*smem)->offset); } + local_raw_free(mod->local_alloc, WASM_TYPE_INT32); + *pcode = code; } COMPILE_FUNC(struct_store, AstTyped* lval) { + // NOTE: Expects the stack to look like: + // mem_1 + // mem_2 + // ... + // mem_n + bh_arr(WasmInstruction) code = *pcode; assert(lval->type->kind == Type_Kind_Struct); u64 offset = 0; - bh_arr_rev_each(StructMember *, smem, lval->type->Struct.memarr) { - offset = 0; + switch (lval->kind) { + case Ast_Kind_Local: compile_local_location(mod, &code, (AstLocal *) lval, &offset); break; + case Ast_Kind_Dereference: compile_expression(mod, &code, ((AstDereference *) lval)->expr); break; + case Ast_Kind_Array_Access: compile_array_access_location(mod, &code, (AstArrayAccess *) lval, &offset); break; + case Ast_Kind_Field_Access: compile_field_access_location(mod, &code, (AstFieldAccess *) lval, &offset); break; - WasmType wt = onyx_type_to_wasm_type((*smem)->type); - u64 localidx = local_raw_allocate(mod->local_alloc, wt); + default: assert(0); + } - WIL(WI_LOCAL_SET, localidx); + u64 loc_idx = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32); + WIL(WI_LOCAL_SET, loc_idx); - switch (lval->kind) { - case Ast_Kind_Local: compile_local_location(mod, &code, (AstLocal *) lval, &offset); break; - case Ast_Kind_Dereference: compile_expression(mod, &code, ((AstDereference *) lval)->expr); break; - case Ast_Kind_Array_Access: compile_array_access_location(mod, &code, (AstArrayAccess *) lval, &offset); break; - case Ast_Kind_Field_Access: compile_field_access_location(mod, &code, (AstFieldAccess *) lval, &offset); break; + bh_arr_rev_each(StructMember *, smem, lval->type->Struct.memarr) { + WasmType wt = onyx_type_to_wasm_type((*smem)->type); + u64 tmp_idx = local_raw_allocate(mod->local_alloc, wt); - default: assert(0); - } - WIL(WI_LOCAL_GET, localidx); + WIL(WI_LOCAL_SET, tmp_idx); + WIL(WI_LOCAL_GET, loc_idx); + WIL(WI_LOCAL_GET, tmp_idx); compile_store_instruction(mod, &code, (*smem)->type, offset + (*smem)->offset); local_raw_free(mod->local_alloc, wt); } + local_raw_free(mod->local_alloc, WASM_TYPE_INT32); + *pcode = code; } @@ -1176,11 +1142,6 @@ COMPILE_FUNC(expression, AstTyped* expr) { WIL(WI_LOCAL_GET, tmp); } else { - if (expr->type->kind == Type_Kind_Struct) { - compile_struct_load(mod, &code, expr); - break; - } - u64 offset = 0; compile_local_location(mod, &code, (AstLocal *) expr, &offset); @@ -1236,7 +1197,7 @@ COMPILE_FUNC(expression, AstTyped* expr) { break; } - case Ast_Kind_Block: compile_block(mod, &code, (AstBlock *) expr); break; + case Ast_Kind_Block: compile_block(mod, &code, (AstBlock *) expr, 1); break; case Ast_Kind_Call: compile_call(mod, &code, (AstCall *) expr); break; case Ast_Kind_Intrinsic_Call: compile_intrinsic_call(mod, &code, (AstIntrinsicCall *) expr); break; case Ast_Kind_Binary_Op: compile_binop(mod, &code, (AstBinaryOp *) expr); break; @@ -1299,11 +1260,6 @@ COMPILE_FUNC(expression, AstTyped* expr) { } case Ast_Kind_Dereference: { - if (expr->type->kind == Type_Kind_Struct) { - compile_struct_load(mod, &code, expr); - break; - } - AstDereference* deref = (AstDereference *) expr; compile_expression(mod, &code, deref->expr); compile_load_instruction(mod, &code, deref->type, 0); @@ -1311,11 +1267,6 @@ COMPILE_FUNC(expression, AstTyped* expr) { } case Ast_Kind_Array_Access: { - if (expr->type->kind == Type_Kind_Struct) { - compile_struct_load(mod, &code, expr); - break; - } - AstArrayAccess* aa = (AstArrayAccess *) expr; u64 offset = 0; compile_array_access_location(mod, &code, aa, &offset); @@ -1324,11 +1275,6 @@ COMPILE_FUNC(expression, AstTyped* expr) { } case Ast_Kind_Field_Access: { - if (expr->type->kind == Type_Kind_Struct) { - compile_struct_load(mod, &code, expr); - break; - } - AstFieldAccess* field = (AstFieldAccess* ) expr; if (field->expr->kind == Ast_Kind_Param && field->expr->type->kind == Type_Kind_Struct) {