+++ /dev/null
-
-//
-// Polymorphic Procedures
-//
-
-// This flag is used by some of the procedures that try working with polymorphic things,
-// but need to wait until more information is known. Instead of passing a out parameter
-// into each of these procedures, a single global variable is used instead. If the type
-// checker ever gets multi-threaded, this would have to become a threadlocal variable.
-static b32 flag_to_yield = 0;
-
-// This flag is used in the very special case that you are passing a polymorphic procedure
-// to a polymorphic procedure, and you have enough information to instantiate said procedure
-// in order to resolve the type of one of the return values.
-static b32 doing_nested_polymorph_lookup = 0;
-
-// The name is pretty self-descriptive, but this is a node that is returned from things
-// like polymorphic_proc_lookup when it is determined that everything works so far, but
-// the caller must yield in order to finish checking this polymorphic procedure.
-AstTyped node_that_signals_a_yield = { Ast_Kind_Function, 0 };
-
-static void ensure_polyproc_cache_is_created(AstPolyProc* pp) {
- if (pp->concrete_funcs == NULL) bh_table_init(global_heap_allocator, pp->concrete_funcs, 16);
- if (pp->active_queries.hashes == NULL) bh_imap_init(&pp->active_queries, global_heap_allocator, 31);
-}
-
-void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln) {
- AstNode *node = NULL;
-
- switch (sln->kind) {
- case PSK_Type:
- node = onyx_ast_node_new(context.ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias);
- ((AstTypeRawAlias *) node)->token = sln->poly_sym->token;
- ((AstTypeRawAlias *) node)->to = sln->type;
- break;
-
- case PSK_Value:
- // CLEANUP: Maybe clone this?
- assert(sln->value->flags & Ast_Flag_Comptime);
- node = (AstNode *) sln->value;
- break;
- }
-
- symbol_introduce(scope, sln->poly_sym->token, node);
-}
-
-static void insert_poly_slns_into_scope(Scope* scope, bh_arr(AstPolySolution) slns) {
- bh_arr_each(AstPolySolution, sln, slns) {
- insert_poly_sln_into_scope(scope, sln);
- }
-}
-
-// NOTE: This might return a volatile string. Do not store it without copying it.
-static char* build_poly_solution_key(AstPolySolution* sln) {
- if (sln->kind == PSK_Type) {
- return (char *) type_get_unique_name(sln->type);
-
- } else if (sln->kind == PSK_Value) {
- static char buffer[256];
-
- fori (i, 0, 256) buffer[i] = 0;
-
- if (sln->value->kind == Ast_Kind_NumLit) {
- strncat(buffer, "NUMLIT:", 127);
- strncat(buffer, bh_bprintf("%l", ((AstNumLit *) sln->value)->value.l), 127);
-
- } else {
- // HACK: For now, the value pointer is just used. This means that
- // sometimes, even through the solution is the same, it won't be
- // stored the same.
- bh_snprintf(buffer, 128, "%p", sln->value);
- }
-
- return buffer;
- }
-
- return NULL;
-}
-
-// NOTE: This returns a volatile string. Do not store it without copying it.
-static char* build_poly_slns_unique_key(bh_arr(AstPolySolution) slns) {
- static char key_buf[1024];
- fori (i, 0, 1024) key_buf[i] = 0;
-
- bh_arr_each(AstPolySolution, sln, slns) {
- token_toggle_end(sln->poly_sym->token);
-
- strncat(key_buf, sln->poly_sym->token->text, 1023);
- strncat(key_buf, "=", 1023);
- strncat(key_buf, build_poly_solution_key(sln), 1023);
- strncat(key_buf, ";", 1023);
-
- token_toggle_end(sln->poly_sym->token);
- }
-
- return key_buf;
-}
-
-// NOTE: This function adds a solidified function to the entity heap for it to be processed
-// later. It optionally can start the function header entity at the code generation state if
-// the header has already been processed.
-static b32 add_solidified_function_entities(AstSolidifiedFunction *solidified_func) {
- solidified_func->func->flags |= Ast_Flag_Function_Used;
- solidified_func->func->flags |= Ast_Flag_From_Polymorphism;
-
- Entity func_header_entity = {
- .state = Entity_State_Resolve_Symbols,
- .type = Entity_Type_Function_Header,
- .function = solidified_func->func,
- .package = NULL,
- .scope = solidified_func->func->poly_scope,
- };
-
- Entity func_entity = {
- .state = Entity_State_Resolve_Symbols,
- .type = Entity_Type_Function,
- .function = solidified_func->func,
- .package = NULL,
- .scope = solidified_func->func->poly_scope,
- };
-
- Entity* entity_header = entity_heap_insert(&context.entities, func_header_entity);
- Entity* entity_body = entity_heap_insert(&context.entities, func_entity);
-
- solidified_func->func_header_entity = entity_header;
- solidified_func->func->entity_header = entity_header;
- solidified_func->func->entity_body = entity_body;
- return 1;
-}
-
-// NOTE: This function is responsible for taking all of the information about generating
-// a new polymorphic variant, and producing a solidified function. It optionally can only
-// generate the header of the function, which is useful for cases such as checking if a
-// set of arguments works for a polymorphic overload option.
-static AstSolidifiedFunction generate_solidified_function(
- AstPolyProc* pp,
- bh_arr(AstPolySolution) slns,
- OnyxToken* tkn,
- b32 header_only) {
-
- AstSolidifiedFunction solidified_func;
- solidified_func.func_header_entity = NULL;
-
- // NOTE: Use the position of token if one was provided, otherwise just use NULL.
- OnyxFilePos poly_scope_pos = { 0 };
- if (tkn) poly_scope_pos = tkn->pos;
-
- if (header_only) {
- solidified_func.func = (AstFunction *) clone_function_header(context.ast_alloc, pp->base_func);
- solidified_func.func->flags |= Ast_Flag_Incomplete_Body;
-
- } else {
- solidified_func.func = (AstFunction *) ast_clone(context.ast_alloc, pp->base_func);
- }
-
- solidified_func.func->poly_scope = scope_create(context.ast_alloc, pp->poly_scope, poly_scope_pos);
- insert_poly_slns_into_scope(solidified_func.func->poly_scope, slns);
-
- solidified_func.func->flags |= Ast_Flag_From_Polymorphism;
- solidified_func.func->generated_from = tkn;
-
- // HACK: Remove the baked parameters from the function defintion so they can be
- // resolved in the poly scope above the function. This does feel kinda of gross
- // and I would love an alternative to tell it to just "skip" the parameter, but
- // that is liable to breaking because it is one more thing to remember.
- // - brendanfh 2021/01/18
- u32 removed_params = 0;
- bh_arr_each(AstPolyParam, param, pp->poly_params) {
- if (param->kind != PPK_Baked_Value) continue;
-
- bh_arr_deleten(solidified_func.func->params, param->idx - removed_params, 1);
- removed_params++;
- }
-
- return solidified_func;
-}
-
-static void ensure_solidified_function_has_body(AstPolyProc* pp, AstSolidifiedFunction *solidified_func) {
- if (solidified_func->func->flags & Ast_Flag_Incomplete_Body) {
- clone_function_body(context.ast_alloc, solidified_func->func, pp->base_func);
-
- // HACK: I'm asserting that this function should return without an error, because
- // the only case where it can return an error is if there was a problem with the
- // header. This should never be the case in this situation, since the header would
- // have to have successfully passed type checking before it would become a solidified
- // procedure.
- assert(add_solidified_function_entities(solidified_func));
-
- solidified_func->func->flags &= ~Ast_Flag_Incomplete_Body;
- }
-}
-
-// NOTE: These are temporary data structures used to represent the pattern matching system
-// of polymorphic type resolution.
-typedef struct PolySolveResult {
- PolySolutionKind kind;
- union {
- AstTyped* value;
- Type* actual;
- };
-} PolySolveResult;
-
-typedef struct PolySolveElem {
- AstType* type_expr;
-
- PolySolutionKind kind;
- union {
- AstTyped* value;
- Type* actual;
- };
-} PolySolveElem;
-
-// NOTE: The job of this function is to solve for the type/value that belongs in a
-// polymorphic variable. This function takes in three arguments:
-// * The symbol node of the polymorphic parameter being searched for
-// * The type expression that should contain the symbol node it is some where
-// * The actual type to pattern match against
-//
-// This function utilizes a basic breadth-first search of the type_expr and actual type
-// trees, always moving along them in parallel, so when the target is reached (if it is
-// ever reached), the "actual" is the matched type/value.
-static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type* actual) {
- bh_arr(PolySolveElem) elem_queue = NULL;
- bh_arr_new(global_heap_allocator, elem_queue, 4);
-
- PolySolveResult result = { PSK_Undefined, { NULL } };
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = type_expr,
- .kind = PSK_Type,
- .actual = actual
- }));
-
- while (!bh_arr_is_empty(elem_queue)) {
- PolySolveElem elem = elem_queue[0];
- bh_arr_deleten(elem_queue, 0, 1);
-
- if (elem.type_expr == (AstType *) target) {
- result.kind = elem.kind;
-
- assert(elem.kind != PSK_Undefined);
- if (result.kind == PSK_Type) result.actual = elem.actual;
- if (result.kind == PSK_Value) result.value = elem.value;
- break;
- }
-
- if (elem.kind != PSK_Type) continue;
-
- switch (elem.type_expr->kind) {
- case Ast_Kind_Pointer_Type: {
- if (elem.actual->kind != Type_Kind_Pointer) break;
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ((AstPointerType *) elem.type_expr)->elem,
- .kind = PSK_Type,
- .actual = elem.actual->Pointer.elem,
- }));
- break;
- }
-
- case Ast_Kind_Array_Type: {
- if (elem.actual->kind != Type_Kind_Array) break;
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = (AstType*) ((AstArrayType *) elem.type_expr)->count_expr,
- .kind = PSK_Value,
-
- // CLEANUP: Making an integer literal every time is very very very gross. This should
- // at least be cached or something.
- .value = (AstTyped *) make_int_literal(context.ast_alloc, elem.actual->Array.count)
- }));
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ((AstArrayType *) elem.type_expr)->elem,
- .kind = PSK_Type,
- .actual = elem.actual->Array.elem,
- }));
- break;
- }
-
- case Ast_Kind_Slice_Type: {
- if (elem.actual->kind != Type_Kind_Slice && elem.actual->kind != Type_Kind_DynArray
- && elem.actual->kind != Type_Kind_VarArgs && elem.actual->kind != Type_Kind_Array) break;
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ((AstSliceType *) elem.type_expr)->elem,
- .kind = PSK_Type,
-
- // HACK: This makes the assumption that arrays, slices, dynamic arrays and varargs have the same element type at the same location.
- .actual = elem.actual->Slice.elem,
- }));
- break;
- }
-
- case Ast_Kind_DynArr_Type: {
- if (elem.actual->kind != Type_Kind_DynArray) break;
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ((AstDynArrType *) elem.type_expr)->elem,
- .kind = PSK_Type,
- .actual = elem.actual->DynArray.elem,
- }));
- break;
- }
-
- case Ast_Kind_VarArg_Type:
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ((AstVarArgType *) elem.type_expr)->elem,
- .kind = PSK_Type,
- .actual = actual,
- }));
- break;
-
- case Ast_Kind_Function_Type: {
- if (elem.actual->kind != Type_Kind_Function) break;
-
- AstFunctionType* ft = (AstFunctionType *) elem.type_expr;
-
- fori (i, 0, (i64) ft->param_count) {
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ft->params[i],
- .kind = PSK_Type,
- .actual = elem.actual->Function.params[i],
- }));
- }
-
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .type_expr = ft->return_type,
- .kind = PSK_Type,
- .actual = elem.actual->Function.return_type,
- }));
-
- break;
- }
-
- case Ast_Kind_Poly_Call_Type: {
- if (elem.actual->kind != Type_Kind_Struct) break;
- if (bh_arr_length(elem.actual->Struct.poly_sln) != bh_arr_length(((AstPolyCallType *) elem.type_expr)->params)) break;
-
- AstPolyCallType* pt = (AstPolyCallType *) elem.type_expr;
-
- fori (i, 0, bh_arr_length(pt->params)) {
- PolySolutionKind kind = elem.actual->Struct.poly_sln[i].kind;
- if (kind == PSK_Type) {
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .kind = kind,
- .type_expr = (AstType *) pt->params[i],
- .actual = elem.actual->Struct.poly_sln[i].type,
- }));
- } else {
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .kind = kind,
- .type_expr = (AstType *) pt->params[i],
- .value = elem.actual->Struct.poly_sln[i].value,
- }));
- }
- }
-
- break;
- }
-
- case Ast_Kind_Type_Compound: {
- if (elem.actual->kind != Type_Kind_Compound) break;
- if (elem.actual->Compound.count != (u32) bh_arr_length(((AstCompoundType *) elem.type_expr)->types)) break;
-
- AstCompoundType* ct = (AstCompoundType *) elem.type_expr;
-
- fori (i, 0, bh_arr_length(ct->types)) {
- bh_arr_push(elem_queue, ((PolySolveElem) {
- .kind = PSK_Type,
- .type_expr = ct->types[i],
- .actual = elem.actual->Compound.types[i],
- }));
- }
-
- break;
- }
-
- default: break;
- }
- }
-
- bh_arr_free(elem_queue);
-
- return result;
-}
-
-// NOTE: The job of this function is to take a polymorphic parameter and a set of arguments
-// and solve for the argument that matches the parameter. This is needed because polymorphic
-// procedure resolution has to happen before the named arguments are placed in their correct
-// positions.
-static AstTyped* lookup_param_in_arguments(AstFunction* func, AstPolyParam* param, Arguments* args, char** err_msg) {
- bh_arr(AstTyped *) arg_arr = args->values;
- bh_arr(AstNamedValue *) named_values = args->named_values;
-
- // NOTE: This check is safe because currently the arguments given without a name
- // always map to the beginning indidies of the argument array.
- if (param->idx >= (u64) bh_arr_length(arg_arr)) {
- OnyxToken* param_name = func->params[param->idx].local->token;
-
- bh_arr_each(AstNamedValue *, named_value, named_values) {
- if (token_equals(param_name, (*named_value)->token)) {
- return (AstTyped *) (*named_value)->value;
- }
- }
-
- // CLEANUP
- if (err_msg) *err_msg = "Not enough arguments to polymorphic procedure. This error message may not be entirely right.";
-
- } else {
- return (AstTyped *) arg_arr[param->idx];
- }
-
- return NULL;
-}
-
-static AstTyped* try_lookup_based_on_partial_function_type(AstPolyProc *pp, AstFunctionType *ft) {
- if (ft->partial_function_type == NULL) {
- AstType *old_return_type = ft->return_type;
- ft->return_type = (AstType *) &basic_type_void;
- ft->partial_function_type = type_build_from_ast(context.ast_alloc, (AstType *) ft);
- if (!ft->partial_function_type) {
- doing_nested_polymorph_lookup = 1;
- return NULL;
- }
-
- assert(ft->partial_function_type);
- ft->return_type = old_return_type;
- }
-
- AstTyped *result = (AstTyped *) polymorphic_proc_lookup(pp, PPLM_By_Function_Type, ft->partial_function_type, pp->token);
- if (result && result->type == NULL) {
- doing_nested_polymorph_lookup = 1;
- result = NULL;
- }
- if (result == &node_that_signals_a_yield) {
- doing_nested_polymorph_lookup = 1;
- result = NULL;
- }
-
- return result;
-}
-
-// NOTE: The job of this function is to solve for type of AstPolySolution using the provided
-// information. It is asssumed that the "param" is of kind PPK_Poly_Type. This function uses
-// either the arguments provided, or a function type to compare against to pattern match for
-// the type that the parameter but be.
-static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
- Type* actual_type = NULL;
-
- switch (pp_lookup) {
- case PPLM_By_Arguments: {
- Arguments* args = (Arguments *) actual;
-
- AstTyped* typed_param = lookup_param_in_arguments(func, param, args, err_msg);
- if (typed_param == NULL) return;
-
- // CLEANUP FIXME HACK TODO GROSS
- if (typed_param->kind == Ast_Kind_Argument) {
- AstTyped* potential = ((AstArgument *) typed_param)->value;
- if (potential->kind == Ast_Kind_Polymorphic_Proc) {
- if (param->idx < (u32) bh_arr_length(func->params)) {
- AstType *param_type = func->params[param->idx].local->type_node;
- if (param_type->kind == Ast_Kind_Function_Type) {
- AstFunctionType *ft = (AstFunctionType *) param_type;
- b32 all_types = 1;
- fori (i, 0, (i32) ft->param_count) {
- if (!node_is_type((AstNode *) ft->params[i])) {
- all_types = 0;
- break;
- }
- }
-
- if (all_types) {
- typed_param = try_lookup_based_on_partial_function_type((AstPolyProc *) potential, ft);
- }
- }
- }
- }
- }
-
- actual_type = resolve_expression_type(typed_param);
- if (actual_type == NULL) return;
-
- break;
- }
-
- case PPLM_By_Function_Type: {
- Type* ft = (Type *) actual;
- if (param->idx >= ft->Function.param_count) {
- if (err_msg) *err_msg = "Incompatible polymorphic argument to function parameter.";
- return;
- }
-
- actual_type = ft->Function.params[param->idx];
- break;
- }
-
- default: return;
- }
-
- *resolved = solve_poly_type(param->poly_sym, param->type_expr, actual_type);
-}
-
-
-// NOTE: The job of this function is to look through the arguments provided and find a matching
-// value that is to be baked into the polymorphic procedures poly-scope. It expected that param
-// will be of kind PPK_Baked_Value. In other words, this handles the ($Baked: type) case.
-// CLEANUP: This function is kind of gross at the moment, because it handles different cases for
-// the argument kind. When type expressions (type_expr) become first-class types in the type
-// system, this code should be able to be a lot cleaner.
-static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
- if (pp_lookup != PPLM_By_Arguments) {
- *err_msg = "Function type cannot be used to solved for baked parameter value.";
- return;
- }
-
- Arguments* args = (Arguments *) actual;
- AstTyped* value = lookup_param_in_arguments(func, param, args, err_msg);
- if (value == NULL) return;
-
- // HACK: Storing the original value because if this was an AstArgument, we need to flag
- // it as baked if it is determined that the argument is of the correct kind and type.
- AstTyped* orig_value = value;
- if (value->kind == Ast_Kind_Argument) {
- ((AstArgument *) orig_value)->is_baked = 0;
- value = ((AstArgument *) value)->value;
- }
-
- Type* param_type = NULL;
- AstType *param_type_expr = func->params[param->idx].local->type_node;
- if (param_type_expr == (AstType *) &basic_type_type_expr) {
- if (!node_is_type((AstNode *) value)) {
- if (err_msg) *err_msg = "Expected type expression.";
- return;
- }
-
- Type* resolved_type = type_build_from_ast(context.ast_alloc, (AstType *) value);
- if (resolved_type == NULL) flag_to_yield = 1;
-
- *resolved = ((PolySolveResult) { PSK_Type, .actual = resolved_type });
-
- } else {
- resolve_expression_type(value);
-
- if ((value->flags & Ast_Flag_Comptime) == 0) {
- if (err_msg) *err_msg = "Expected compile-time known argument.";
- return;
- }
-
- param_type = type_build_from_ast(context.ast_alloc, param_type_expr);
- if (param_type == NULL) {
- flag_to_yield = 1;
- *err_msg = "Waiting to know type for polymorphic value.";
- return;
- }
-
- AstTyped* value_to_use = value;
- if (value->kind == Ast_Kind_Macro) {
- value_to_use = (AstTyped *) get_function_from_node((AstNode *) value);
- }
-
- TypeMatch tm = unify_node_and_type(&value_to_use, param_type);
- if (tm == TYPE_MATCH_FAILED) {
- if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator,
- "The procedure '%s' expects a value of type '%s' for baked %d%s parameter, got '%s'.",
- get_function_name(func),
- type_get_name(param_type),
- param->idx + 1,
- bh_num_suffix(param->idx + 1),
- node_get_type_name(value_to_use));
- return;
- }
-
- if (tm == TYPE_MATCH_YIELD) flag_to_yield = 1;
-
- *resolved = ((PolySolveResult) { PSK_Value, value });
- }
-
- if (orig_value->kind == Ast_Kind_Argument) {
- ((AstArgument *) orig_value)->is_baked = 1;
- }
-}
-
-TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
- // NOTE: Solve for the polymorphic parameter's value
- PolySolveResult resolved = { PSK_Undefined };
- switch (param->kind) {
- case PPK_Poly_Type: solve_for_polymorphic_param_type (&resolved, func, param, pp_lookup, actual, err_msg); break;
- case PPK_Baked_Value: solve_for_polymorphic_param_value(&resolved, func, param, pp_lookup, actual, err_msg); break;
-
- default: if (err_msg) *err_msg = "Invalid polymorphic parameter kind. This is a compiler bug.";
- }
-
- if (doing_nested_polymorph_lookup) {
- doing_nested_polymorph_lookup = 0;
- return TYPE_MATCH_SPECIAL;
- }
-
- if (flag_to_yield) {
- flag_to_yield = 0;
- return TYPE_MATCH_YIELD;
- }
-
- switch (resolved.kind) {
- case PSK_Type:
- out->kind = PSK_Type;
- out->poly_sym = param->poly_sym;
- out->type = resolved.actual;
- return TYPE_MATCH_SUCCESS;
-
- case PSK_Value:
- out->kind = PSK_Value;
- out->poly_sym = param->poly_sym;
- out->value = resolved.value;
- return TYPE_MATCH_SUCCESS;
-
- case PSK_Undefined:
- default:
- // NOTE: If no error message has been assigned to why this polymorphic parameter
- // resolution was unsuccessful, provide a basic dummy one.
- if (err_msg && *err_msg == NULL)
- *err_msg = bh_aprintf(global_scratch_allocator,
- "Unable to solve for polymorphic variable '%b'.",
- param->poly_sym->token->text,
- param->poly_sym->token->length);
-
- out->kind = PSK_Undefined;
- return TYPE_MATCH_FAILED;
- }
-}
-
-// NOTE: The job of this function is to take a polymorphic procedure, as well as a method of
-// solving for the polymorphic variables, in order to return an array of the solutions for all
-// of the polymorphic variables.
-static bh_arr(AstPolySolution) find_polymorphic_slns(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken *tkn, b32 necessary) {
- ensure_polyproc_cache_is_created(pp);
- if (bh_imap_has(&pp->active_queries, (u64) actual)) {
- AstPolyQuery *query = (AstPolyQuery *) bh_imap_get(&pp->active_queries, (u64) actual);
- assert(query->kind == Ast_Kind_Polymorph_Query);
- assert(query->entity);
-
- if (query->entity->state == Entity_State_Finalized) return query->slns;
- if (query->entity->state == Entity_State_Failed) return NULL;
-
- flag_to_yield = 1;
- return NULL;
- }
-
- bh_arr(AstPolySolution) slns = NULL;
- bh_arr_new(global_heap_allocator, slns, bh_arr_length(pp->poly_params));
-
- // NOTE: "known solutions" are given through a '#solidify' directive. If this polymorphic
- // procedure is the result of a partially applied solidification, this array will be non-
- // empty and these solutions will be used.
- bh_arr_each(AstPolySolution, known_sln, pp->known_slns) bh_arr_push(slns, *known_sln);
-
- AstPolyQuery *query = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyQuery), Ast_Kind_Polymorph_Query);
- query->token = pp->token;
- query->proc = pp;
- query->pp_lookup = pp_lookup;
- query->given = actual;
- query->error_loc = tkn;
- query->slns = slns;
- query->function_header = clone_function_header(context.ast_alloc, pp->base_func);
- query->function_header->flags |= Ast_Flag_Header_Check_No_Error;
- query->function_header->scope = NULL;
- query->error_on_fail = necessary;
- query->successful_symres = 1;
-
- bh_imap_put(&pp->active_queries, (u64) actual, (u64) query);
- add_entities_for_node(NULL, (AstNode *) query, NULL, NULL);
-
- flag_to_yield = 1;
- return NULL;
-}
-
-// NOTE: The job of this function is to be a wrapper to other functions, providing an error
-// message if a solution could not be found. This can't be merged with polymorphic_proc_solidify
-// because polymorphic_proc_try_solidify uses the aforementioned function.
-AstFunction* polymorphic_proc_lookup(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn) {
- ensure_polyproc_cache_is_created(pp);
-
- bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, tkn, 1);
- if (slns == NULL) {
- if (flag_to_yield) {
- flag_to_yield = 0;
- return (AstFunction *) &node_that_signals_a_yield;
- }
-
- return NULL;
- }
-
- AstFunction* result = polymorphic_proc_solidify(pp, slns, tkn);
- return result;
-}
-
-AstFunction* polymorphic_proc_solidify(AstPolyProc* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) {
- ensure_polyproc_cache_is_created(pp);
-
- // NOTE: Check if a version of this polyproc has already been created.
- char* unique_key = build_poly_slns_unique_key(slns);
- if (bh_table_has(AstSolidifiedFunction, pp->concrete_funcs, unique_key)) {
- AstSolidifiedFunction solidified_func = bh_table_get(AstSolidifiedFunction, pp->concrete_funcs, unique_key);
-
- // NOTE: If this solution was originally created from a "build_only_header" call, then the body
- // will not have been or type checked, or anything. This ensures that the body is copied, the
- // entities are created and entered into the pipeline.
- ensure_solidified_function_has_body(pp, &solidified_func);
-
- // NOTE: Again, if this came from a "build_only_header" call, then there was no known token and
- // the "generated_from" member will be null. It is best to set it here so errors reported in that
- // function can report where the polymorphic instantiation occurred.
- if (solidified_func.func->generated_from == NULL)
- solidified_func.func->generated_from = tkn;
-
- return solidified_func.func;
- }
-
- AstSolidifiedFunction solidified_func = generate_solidified_function(pp, slns, tkn, 0);
- add_solidified_function_entities(&solidified_func);
-
- // NOTE: Cache the function for later use, reducing duplicate functions.
- bh_table_put(AstSolidifiedFunction, pp->concrete_funcs, unique_key, solidified_func);
-
- return (AstFunction *) &node_that_signals_a_yield;
-}
-
-// NOTE: This can return either a AstFunction or an AstPolyProc, depending if enough parameters were
-// supplied to remove all the polymorphic variables from the function.
-AstNode* polymorphic_proc_try_solidify(AstPolyProc* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) {
- i32 valid_argument_count = 0;
-
- bh_arr_each(AstPolySolution, sln, slns) {
- b32 found_match = 0;
-
- bh_arr_each(AstPolyParam, param, pp->poly_params) {
- if (token_equals(sln->poly_sym->token, param->poly_sym->token)) {
- found_match = 1;
- break;
- }
- }
-
- if (found_match) {
- valid_argument_count++;
- } else {
- onyx_report_error(tkn->pos, "'%b' is not a type variable of '%b'.",
- sln->poly_sym->token->text, sln->poly_sym->token->length,
- pp->token->text, pp->token->length);
- return (AstNode *) pp;
- }
- }
-
- if (valid_argument_count == bh_arr_length(pp->poly_params)) {
- return (AstNode *) polymorphic_proc_solidify(pp, slns, tkn);
-
- } else {
- // HACK: Some of these initializations assume that the entity for this polyproc has
- // made it through the symbol resolution phase.
- // - brendanfh 2020/12/25
- AstPolyProc* new_pp = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyProc), Ast_Kind_Polymorphic_Proc);
- new_pp->token = tkn;
- new_pp->base_func = pp->base_func;
- new_pp->flags = pp->flags;
- new_pp->poly_params = pp->poly_params;
-
- ensure_polyproc_cache_is_created(pp);
- new_pp->concrete_funcs = pp->concrete_funcs;
-
- new_pp->known_slns = NULL;
- bh_arr_new(global_heap_allocator, new_pp->known_slns, bh_arr_length(pp->known_slns) + bh_arr_length(slns));
-
- bh_arr_each(AstPolySolution, sln, pp->known_slns) bh_arr_push(new_pp->known_slns, *sln);
- bh_arr_each(AstPolySolution, sln, slns) bh_arr_push(new_pp->known_slns, *sln);
-
- return (AstNode *) new_pp;
- }
-}
-
-AstFunction* polymorphic_proc_build_only_header(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual) {
- ensure_polyproc_cache_is_created(pp);
- bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, NULL, 0);
- if (flag_to_yield) {
- flag_to_yield = 0;
- return (AstFunction *) &node_that_signals_a_yield;
- }
- if (slns == NULL) return NULL;
-
- ensure_polyproc_cache_is_created(pp);
-
- return polymorphic_proc_build_only_header_with_slns(pp, slns, 0);
-}
-
-AstFunction* polymorphic_proc_build_only_header_with_slns(AstPolyProc* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed) {
- AstSolidifiedFunction solidified_func;
-
- char* unique_key = build_poly_slns_unique_key(slns);
- if (bh_table_has(AstSolidifiedFunction, pp->concrete_funcs, unique_key)) {
- solidified_func = bh_table_get(AstSolidifiedFunction, pp->concrete_funcs, unique_key);
-
- } else {
- // NOTE: This function is only going to have the header of it correctly created.
- // Nothing should happen to this function's body or else the original will be corrupted.
- // - brendanfh 2021/01/10
- solidified_func = generate_solidified_function(pp, slns, NULL, 1);
- }
-
- if (solidified_func.func_header_entity) {
- if (solidified_func.func_header_entity->state == Entity_State_Finalized) return solidified_func.func;
- if (solidified_func.func_header_entity->state == Entity_State_Failed) return NULL;
-
- return (AstFunction *) &node_that_signals_a_yield;
- }
-
- BH_MASK_SET(solidified_func.func->flags, !error_if_failed, Ast_Flag_Header_Check_No_Error);
-
- Entity func_header_entity = {
- .state = Entity_State_Resolve_Symbols,
- .type = Entity_Type_Temp_Function_Header,
- .function = solidified_func.func,
- .package = NULL,
- .scope = solidified_func.func->poly_scope,
- };
-
- Entity* func_header_entity_ptr = entity_heap_insert(&context.entities, func_header_entity);
- solidified_func.func_header_entity = func_header_entity_ptr;
-
- // NOTE: Cache the function for later use.
- bh_table_put(AstSolidifiedFunction, pp->concrete_funcs, unique_key, solidified_func);
-
- return (AstFunction *) &node_that_signals_a_yield;
-}
-
-//
-// Polymorphic Structures
-//
-//
-// Currently, I am not very happy about how polymorphic structure generation works. My biggest problem
-// with it is that it is very different from the polymorhic procedure generation. Also, it needs to
-// completely generate and check the structure right away, which means there is a lot of up-front work
-// done here that could probably be done elsewhere. This really relates to a large problem in the compiler
-// that types need to be known completely by the time symbol resolution is done, even though that
-// information shouldn't need to be known until right before the types are checked.
-//
-// The above documentation is very incorrect but I don't want to fix it right now. Basically, polymorphic
-// structures now have a delay instantiation phase and are not forced to be completed immediately.
-
-char* build_poly_struct_name(AstPolyStructType* ps_type, Type* cs_type) {
- char name_buf[256];
- fori (i, 0, 256) name_buf[i] = 0;
-
- strncat(name_buf, ps_type->name, 255);
- strncat(name_buf, "(", 255);
- bh_arr_each(AstPolySolution, ptype, cs_type->Struct.poly_sln) {
- if (ptype != cs_type->Struct.poly_sln)
- strncat(name_buf, ", ", 255);
-
- // This logic will have to be other places as well.
-
- switch (ptype->kind) {
- case PSK_Undefined: assert(0); break;
- case PSK_Type: strncat(name_buf, type_get_name(ptype->type), 255); break;
- case PSK_Value: {
- // FIX
- AstNode* value = strip_aliases((AstNode *) ptype->value);
-
- if (value->kind == Ast_Kind_NumLit) {
- AstNumLit* nl = (AstNumLit *) value;
- if (type_is_integer(nl->type)) {
- strncat(name_buf, bh_bprintf("%l", nl->value.l), 127);
- } else {
- strncat(name_buf, "numlit (FIX ME)", 127);
- }
- } else if (value->kind == Ast_Kind_Code_Block) {
- AstCodeBlock* code = (AstCodeBlock *) value;
- OnyxFilePos code_loc = code->token->pos;
- strncat(name_buf, bh_bprintf("code at %s:%d,%d", code_loc.filename, code_loc.line, code_loc.column), 127);
- } else {
- strncat(name_buf, "<expr>", 127);
- }
-
- break;
- }
- }
- }
- strncat(name_buf, ")", 255);
-
- return bh_aprintf(global_heap_allocator, "%s", name_buf);
-}
-
-Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos) {
- // @Cleanup
- assert(ps_type->scope != NULL);
-
- if (ps_type->concrete_structs == NULL) {
- bh_table_init(global_heap_allocator, ps_type->concrete_structs, 16);
- }
-
- if (bh_arr_length(slns) != bh_arr_length(ps_type->poly_params)) {
- onyx_report_error(pos, "Wrong number of arguments for '%s'. Expected %d, got %d",
- ps_type->name,
- bh_arr_length(ps_type->poly_params),
- bh_arr_length(slns));
-
- return NULL;
- }
-
- i32 i = 0;
- bh_arr_each(AstPolySolution, sln, slns) {
- sln->poly_sym = (AstNode *) &ps_type->poly_params[i];
-
- PolySolutionKind expected_kind = PSK_Undefined;
- if ((AstNode *) ps_type->poly_params[i].type_node == (AstNode *) &basic_type_type_expr) {
- expected_kind = PSK_Type;
- } else {
- expected_kind = PSK_Value;
- }
-
- if (sln->kind != expected_kind) {
- if (expected_kind == PSK_Type)
- onyx_report_error(pos, "Expected type expression for %d%s argument.", i + 1, bh_num_suffix(i + 1));
-
- if (expected_kind == PSK_Value)
- onyx_report_error(pos, "Expected value expression of type '%s' for %d%s argument.",
- type_get_name(ps_type->poly_params[i].type),
- i + 1, bh_num_suffix(i + 1));
-
- return NULL;
- }
-
- if (sln->kind == PSK_Value) {
- resolve_expression_type(sln->value);
-
- if ((sln->value->flags & Ast_Flag_Comptime) == 0) {
- onyx_report_error(pos,
- "Expected compile-time known argument for '%b'.",
- sln->poly_sym->token->text,
- sln->poly_sym->token->length);
- return NULL;
- }
-
- if (!types_are_compatible(sln->value->type, ps_type->poly_params[i].type)) {
- onyx_report_error(pos, "Expected compile-time argument of type '%s', got '%s'.",
- type_get_name(ps_type->poly_params[i].type),
- type_get_name(sln->value->type));
- return NULL;
- }
- }
-
- i++;
- }
-
- char* unique_key = build_poly_slns_unique_key(slns);
- if (bh_table_has(AstStructType *, ps_type->concrete_structs, unique_key)) {
- AstStructType* concrete_struct = bh_table_get(AstStructType *, ps_type->concrete_structs, unique_key);
-
- if (concrete_struct->entity_type->state < Entity_State_Check_Types) {
- return NULL;
- }
-
- Type* cs_type = type_build_from_ast(context.ast_alloc, (AstType *) concrete_struct);
- if (!cs_type) return NULL;
-
- if (cs_type->Struct.poly_sln == NULL) cs_type->Struct.poly_sln = bh_arr_copy(global_heap_allocator, slns);
- if (cs_type->Struct.name == NULL) cs_type->Struct.name = build_poly_struct_name(ps_type, cs_type);
-
- return cs_type;
- }
-
- Scope* sln_scope = scope_create(context.ast_alloc, ps_type->scope, ps_type->token->pos);
- insert_poly_slns_into_scope(sln_scope, slns);
-
- AstStructType* concrete_struct = (AstStructType *) ast_clone(context.ast_alloc, ps_type->base_struct);
- bh_table_put(AstStructType *, ps_type->concrete_structs, unique_key, concrete_struct);
-
- add_entities_for_node(NULL, (AstNode *) concrete_struct, sln_scope, NULL);
- return NULL;
-}
--- /dev/null
+
+//
+// Polymorphic Procedures
+//
+
+// This flag is used by some of the procedures that try working with polymorphic things,
+// but need to wait until more information is known. Instead of passing a out parameter
+// into each of these procedures, a single global variable is used instead. If the type
+// checker ever gets multi-threaded, this would have to become a threadlocal variable.
+static b32 flag_to_yield = 0;
+
+// This flag is used in the very special case that you are passing a polymorphic procedure
+// to a polymorphic procedure, and you have enough information to instantiate said procedure
+// in order to resolve the type of one of the return values.
+static b32 doing_nested_polymorph_lookup = 0;
+
+// The name is pretty self-descriptive, but this is a node that is returned from things
+// like polymorphic_proc_lookup when it is determined that everything works so far, but
+// the caller must yield in order to finish checking this polymorphic procedure.
+AstTyped node_that_signals_a_yield = { Ast_Kind_Function, 0 };
+
+static void ensure_polyproc_cache_is_created(AstPolyProc* pp) {
+ if (pp->concrete_funcs == NULL) bh_table_init(global_heap_allocator, pp->concrete_funcs, 16);
+ if (pp->active_queries.hashes == NULL) bh_imap_init(&pp->active_queries, global_heap_allocator, 31);
+}
+
+void insert_poly_sln_into_scope(Scope* scope, AstPolySolution *sln) {
+ AstNode *node = NULL;
+
+ switch (sln->kind) {
+ case PSK_Type:
+ node = onyx_ast_node_new(context.ast_alloc, sizeof(AstTypeRawAlias), Ast_Kind_Type_Raw_Alias);
+ ((AstTypeRawAlias *) node)->token = sln->poly_sym->token;
+ ((AstTypeRawAlias *) node)->to = sln->type;
+ break;
+
+ case PSK_Value:
+ // CLEANUP: Maybe clone this?
+ assert(sln->value->flags & Ast_Flag_Comptime);
+ node = (AstNode *) sln->value;
+ break;
+ }
+
+ symbol_introduce(scope, sln->poly_sym->token, node);
+}
+
+static void insert_poly_slns_into_scope(Scope* scope, bh_arr(AstPolySolution) slns) {
+ bh_arr_each(AstPolySolution, sln, slns) {
+ insert_poly_sln_into_scope(scope, sln);
+ }
+}
+
+// NOTE: This might return a volatile string. Do not store it without copying it.
+static char* build_poly_solution_key(AstPolySolution* sln) {
+ if (sln->kind == PSK_Type) {
+ return (char *) type_get_unique_name(sln->type);
+
+ } else if (sln->kind == PSK_Value) {
+ static char buffer[256];
+
+ fori (i, 0, 256) buffer[i] = 0;
+
+ if (sln->value->kind == Ast_Kind_NumLit) {
+ strncat(buffer, "NUMLIT:", 127);
+ strncat(buffer, bh_bprintf("%l", ((AstNumLit *) sln->value)->value.l), 127);
+
+ } else {
+ // HACK: For now, the value pointer is just used. This means that
+ // sometimes, even through the solution is the same, it won't be
+ // stored the same.
+ bh_snprintf(buffer, 128, "%p", sln->value);
+ }
+
+ return buffer;
+ }
+
+ return NULL;
+}
+
+// NOTE: This returns a volatile string. Do not store it without copying it.
+static char* build_poly_slns_unique_key(bh_arr(AstPolySolution) slns) {
+ static char key_buf[1024];
+ fori (i, 0, 1024) key_buf[i] = 0;
+
+ bh_arr_each(AstPolySolution, sln, slns) {
+ token_toggle_end(sln->poly_sym->token);
+
+ strncat(key_buf, sln->poly_sym->token->text, 1023);
+ strncat(key_buf, "=", 1023);
+ strncat(key_buf, build_poly_solution_key(sln), 1023);
+ strncat(key_buf, ";", 1023);
+
+ token_toggle_end(sln->poly_sym->token);
+ }
+
+ return key_buf;
+}
+
+// NOTE: This function adds a solidified function to the entity heap for it to be processed
+// later. It optionally can start the function header entity at the code generation state if
+// the header has already been processed.
+static b32 add_solidified_function_entities(AstSolidifiedFunction *solidified_func) {
+ solidified_func->func->flags |= Ast_Flag_Function_Used;
+ solidified_func->func->flags |= Ast_Flag_From_Polymorphism;
+
+ Entity func_header_entity = {
+ .state = Entity_State_Resolve_Symbols,
+ .type = Entity_Type_Function_Header,
+ .function = solidified_func->func,
+ .package = NULL,
+ .scope = solidified_func->func->poly_scope,
+ };
+
+ Entity func_entity = {
+ .state = Entity_State_Resolve_Symbols,
+ .type = Entity_Type_Function,
+ .function = solidified_func->func,
+ .package = NULL,
+ .scope = solidified_func->func->poly_scope,
+ };
+
+ Entity* entity_header = entity_heap_insert(&context.entities, func_header_entity);
+ Entity* entity_body = entity_heap_insert(&context.entities, func_entity);
+
+ solidified_func->func_header_entity = entity_header;
+ solidified_func->func->entity_header = entity_header;
+ solidified_func->func->entity_body = entity_body;
+ return 1;
+}
+
+// NOTE: This function is responsible for taking all of the information about generating
+// a new polymorphic variant, and producing a solidified function. It optionally can only
+// generate the header of the function, which is useful for cases such as checking if a
+// set of arguments works for a polymorphic overload option.
+static AstSolidifiedFunction generate_solidified_function(
+ AstPolyProc* pp,
+ bh_arr(AstPolySolution) slns,
+ OnyxToken* tkn,
+ b32 header_only) {
+
+ AstSolidifiedFunction solidified_func;
+ solidified_func.func_header_entity = NULL;
+
+ // NOTE: Use the position of token if one was provided, otherwise just use NULL.
+ OnyxFilePos poly_scope_pos = { 0 };
+ if (tkn) poly_scope_pos = tkn->pos;
+
+ if (header_only) {
+ solidified_func.func = (AstFunction *) clone_function_header(context.ast_alloc, pp->base_func);
+ solidified_func.func->flags |= Ast_Flag_Incomplete_Body;
+
+ } else {
+ solidified_func.func = (AstFunction *) ast_clone(context.ast_alloc, pp->base_func);
+ }
+
+ solidified_func.func->poly_scope = scope_create(context.ast_alloc, pp->poly_scope, poly_scope_pos);
+ insert_poly_slns_into_scope(solidified_func.func->poly_scope, slns);
+
+ solidified_func.func->flags |= Ast_Flag_From_Polymorphism;
+ solidified_func.func->generated_from = tkn;
+
+ // HACK: Remove the baked parameters from the function defintion so they can be
+ // resolved in the poly scope above the function. This does feel kinda of gross
+ // and I would love an alternative to tell it to just "skip" the parameter, but
+ // that is liable to breaking because it is one more thing to remember.
+ // - brendanfh 2021/01/18
+ u32 removed_params = 0;
+ bh_arr_each(AstPolyParam, param, pp->poly_params) {
+ if (param->kind != PPK_Baked_Value) continue;
+
+ bh_arr_deleten(solidified_func.func->params, param->idx - removed_params, 1);
+ removed_params++;
+ }
+
+ return solidified_func;
+}
+
+static void ensure_solidified_function_has_body(AstPolyProc* pp, AstSolidifiedFunction *solidified_func) {
+ if (solidified_func->func->flags & Ast_Flag_Incomplete_Body) {
+ clone_function_body(context.ast_alloc, solidified_func->func, pp->base_func);
+
+ // HACK: I'm asserting that this function should return without an error, because
+ // the only case where it can return an error is if there was a problem with the
+ // header. This should never be the case in this situation, since the header would
+ // have to have successfully passed type checking before it would become a solidified
+ // procedure.
+ assert(add_solidified_function_entities(solidified_func));
+
+ solidified_func->func->flags &= ~Ast_Flag_Incomplete_Body;
+ }
+}
+
+// NOTE: These are temporary data structures used to represent the pattern matching system
+// of polymorphic type resolution.
+typedef struct PolySolveResult {
+ PolySolutionKind kind;
+ union {
+ AstTyped* value;
+ Type* actual;
+ };
+} PolySolveResult;
+
+typedef struct PolySolveElem {
+ AstType* type_expr;
+
+ PolySolutionKind kind;
+ union {
+ AstTyped* value;
+ Type* actual;
+ };
+} PolySolveElem;
+
+// NOTE: The job of this function is to solve for the type/value that belongs in a
+// polymorphic variable. This function takes in three arguments:
+// * The symbol node of the polymorphic parameter being searched for
+// * The type expression that should contain the symbol node it is some where
+// * The actual type to pattern match against
+//
+// This function utilizes a basic breadth-first search of the type_expr and actual type
+// trees, always moving along them in parallel, so when the target is reached (if it is
+// ever reached), the "actual" is the matched type/value.
+static PolySolveResult solve_poly_type(AstNode* target, AstType* type_expr, Type* actual) {
+ bh_arr(PolySolveElem) elem_queue = NULL;
+ bh_arr_new(global_heap_allocator, elem_queue, 4);
+
+ PolySolveResult result = { PSK_Undefined, { NULL } };
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = type_expr,
+ .kind = PSK_Type,
+ .actual = actual
+ }));
+
+ while (!bh_arr_is_empty(elem_queue)) {
+ PolySolveElem elem = elem_queue[0];
+ bh_arr_deleten(elem_queue, 0, 1);
+
+ if (elem.type_expr == (AstType *) target) {
+ result.kind = elem.kind;
+
+ assert(elem.kind != PSK_Undefined);
+ if (result.kind == PSK_Type) result.actual = elem.actual;
+ if (result.kind == PSK_Value) result.value = elem.value;
+ break;
+ }
+
+ if (elem.kind != PSK_Type) continue;
+
+ switch (elem.type_expr->kind) {
+ case Ast_Kind_Pointer_Type: {
+ if (elem.actual->kind != Type_Kind_Pointer) break;
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ((AstPointerType *) elem.type_expr)->elem,
+ .kind = PSK_Type,
+ .actual = elem.actual->Pointer.elem,
+ }));
+ break;
+ }
+
+ case Ast_Kind_Array_Type: {
+ if (elem.actual->kind != Type_Kind_Array) break;
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = (AstType*) ((AstArrayType *) elem.type_expr)->count_expr,
+ .kind = PSK_Value,
+
+ // CLEANUP: Making an integer literal every time is very very very gross. This should
+ // at least be cached or something.
+ .value = (AstTyped *) make_int_literal(context.ast_alloc, elem.actual->Array.count)
+ }));
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ((AstArrayType *) elem.type_expr)->elem,
+ .kind = PSK_Type,
+ .actual = elem.actual->Array.elem,
+ }));
+ break;
+ }
+
+ case Ast_Kind_Slice_Type: {
+ if (elem.actual->kind != Type_Kind_Slice && elem.actual->kind != Type_Kind_DynArray
+ && elem.actual->kind != Type_Kind_VarArgs && elem.actual->kind != Type_Kind_Array) break;
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ((AstSliceType *) elem.type_expr)->elem,
+ .kind = PSK_Type,
+
+ // HACK: This makes the assumption that arrays, slices, dynamic arrays and varargs have the same element type at the same location.
+ .actual = elem.actual->Slice.elem,
+ }));
+ break;
+ }
+
+ case Ast_Kind_DynArr_Type: {
+ if (elem.actual->kind != Type_Kind_DynArray) break;
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ((AstDynArrType *) elem.type_expr)->elem,
+ .kind = PSK_Type,
+ .actual = elem.actual->DynArray.elem,
+ }));
+ break;
+ }
+
+ case Ast_Kind_VarArg_Type:
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ((AstVarArgType *) elem.type_expr)->elem,
+ .kind = PSK_Type,
+ .actual = actual,
+ }));
+ break;
+
+ case Ast_Kind_Function_Type: {
+ if (elem.actual->kind != Type_Kind_Function) break;
+
+ AstFunctionType* ft = (AstFunctionType *) elem.type_expr;
+
+ fori (i, 0, (i64) ft->param_count) {
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ft->params[i],
+ .kind = PSK_Type,
+ .actual = elem.actual->Function.params[i],
+ }));
+ }
+
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .type_expr = ft->return_type,
+ .kind = PSK_Type,
+ .actual = elem.actual->Function.return_type,
+ }));
+
+ break;
+ }
+
+ case Ast_Kind_Poly_Call_Type: {
+ if (elem.actual->kind != Type_Kind_Struct) break;
+ if (bh_arr_length(elem.actual->Struct.poly_sln) != bh_arr_length(((AstPolyCallType *) elem.type_expr)->params)) break;
+
+ AstPolyCallType* pt = (AstPolyCallType *) elem.type_expr;
+
+ fori (i, 0, bh_arr_length(pt->params)) {
+ PolySolutionKind kind = elem.actual->Struct.poly_sln[i].kind;
+ if (kind == PSK_Type) {
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .kind = kind,
+ .type_expr = (AstType *) pt->params[i],
+ .actual = elem.actual->Struct.poly_sln[i].type,
+ }));
+ } else {
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .kind = kind,
+ .type_expr = (AstType *) pt->params[i],
+ .value = elem.actual->Struct.poly_sln[i].value,
+ }));
+ }
+ }
+
+ break;
+ }
+
+ case Ast_Kind_Type_Compound: {
+ if (elem.actual->kind != Type_Kind_Compound) break;
+ if (elem.actual->Compound.count != (u32) bh_arr_length(((AstCompoundType *) elem.type_expr)->types)) break;
+
+ AstCompoundType* ct = (AstCompoundType *) elem.type_expr;
+
+ fori (i, 0, bh_arr_length(ct->types)) {
+ bh_arr_push(elem_queue, ((PolySolveElem) {
+ .kind = PSK_Type,
+ .type_expr = ct->types[i],
+ .actual = elem.actual->Compound.types[i],
+ }));
+ }
+
+ break;
+ }
+
+ default: break;
+ }
+ }
+
+ bh_arr_free(elem_queue);
+
+ return result;
+}
+
+// NOTE: The job of this function is to take a polymorphic parameter and a set of arguments
+// and solve for the argument that matches the parameter. This is needed because polymorphic
+// procedure resolution has to happen before the named arguments are placed in their correct
+// positions.
+static AstTyped* lookup_param_in_arguments(AstFunction* func, AstPolyParam* param, Arguments* args, char** err_msg) {
+ bh_arr(AstTyped *) arg_arr = args->values;
+ bh_arr(AstNamedValue *) named_values = args->named_values;
+
+ // NOTE: This check is safe because currently the arguments given without a name
+ // always map to the beginning indidies of the argument array.
+ if (param->idx >= (u64) bh_arr_length(arg_arr)) {
+ OnyxToken* param_name = func->params[param->idx].local->token;
+
+ bh_arr_each(AstNamedValue *, named_value, named_values) {
+ if (token_equals(param_name, (*named_value)->token)) {
+ return (AstTyped *) (*named_value)->value;
+ }
+ }
+
+ // CLEANUP
+ if (err_msg) *err_msg = "Not enough arguments to polymorphic procedure. This error message may not be entirely right.";
+
+ } else {
+ return (AstTyped *) arg_arr[param->idx];
+ }
+
+ return NULL;
+}
+
+static AstTyped* try_lookup_based_on_partial_function_type(AstPolyProc *pp, AstFunctionType *ft) {
+ if (ft->partial_function_type == NULL) {
+ AstType *old_return_type = ft->return_type;
+ ft->return_type = (AstType *) &basic_type_void;
+ ft->partial_function_type = type_build_from_ast(context.ast_alloc, (AstType *) ft);
+ if (!ft->partial_function_type) {
+ doing_nested_polymorph_lookup = 1;
+ return NULL;
+ }
+
+ assert(ft->partial_function_type);
+ ft->return_type = old_return_type;
+ }
+
+ AstTyped *result = (AstTyped *) polymorphic_proc_lookup(pp, PPLM_By_Function_Type, ft->partial_function_type, pp->token);
+ if (result && result->type == NULL) {
+ doing_nested_polymorph_lookup = 1;
+ result = NULL;
+ }
+ if (result == &node_that_signals_a_yield) {
+ doing_nested_polymorph_lookup = 1;
+ result = NULL;
+ }
+
+ return result;
+}
+
+// NOTE: The job of this function is to solve for type of AstPolySolution using the provided
+// information. It is asssumed that the "param" is of kind PPK_Poly_Type. This function uses
+// either the arguments provided, or a function type to compare against to pattern match for
+// the type that the parameter but be.
+static void solve_for_polymorphic_param_type(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
+ Type* actual_type = NULL;
+
+ switch (pp_lookup) {
+ case PPLM_By_Arguments: {
+ Arguments* args = (Arguments *) actual;
+
+ AstTyped* typed_param = lookup_param_in_arguments(func, param, args, err_msg);
+ if (typed_param == NULL) return;
+
+ // CLEANUP FIXME HACK TODO GROSS
+ if (typed_param->kind == Ast_Kind_Argument) {
+ AstTyped* potential = ((AstArgument *) typed_param)->value;
+ if (potential->kind == Ast_Kind_Polymorphic_Proc) {
+ if (param->idx < (u32) bh_arr_length(func->params)) {
+ AstType *param_type = func->params[param->idx].local->type_node;
+ if (param_type->kind == Ast_Kind_Function_Type) {
+ AstFunctionType *ft = (AstFunctionType *) param_type;
+ b32 all_types = 1;
+ fori (i, 0, (i32) ft->param_count) {
+ if (!node_is_type((AstNode *) ft->params[i])) {
+ all_types = 0;
+ break;
+ }
+ }
+
+ if (all_types) {
+ typed_param = try_lookup_based_on_partial_function_type((AstPolyProc *) potential, ft);
+ }
+ }
+ }
+ }
+ }
+
+ actual_type = resolve_expression_type(typed_param);
+ if (actual_type == NULL) return;
+
+ break;
+ }
+
+ case PPLM_By_Function_Type: {
+ Type* ft = (Type *) actual;
+ if (param->idx >= ft->Function.param_count) {
+ if (err_msg) *err_msg = "Incompatible polymorphic argument to function parameter.";
+ return;
+ }
+
+ actual_type = ft->Function.params[param->idx];
+ break;
+ }
+
+ default: return;
+ }
+
+ *resolved = solve_poly_type(param->poly_sym, param->type_expr, actual_type);
+}
+
+
+// NOTE: The job of this function is to look through the arguments provided and find a matching
+// value that is to be baked into the polymorphic procedures poly-scope. It expected that param
+// will be of kind PPK_Baked_Value. In other words, this handles the ($Baked: type) case.
+// CLEANUP: This function is kind of gross at the moment, because it handles different cases for
+// the argument kind. When type expressions (type_expr) become first-class types in the type
+// system, this code should be able to be a lot cleaner.
+static void solve_for_polymorphic_param_value(PolySolveResult* resolved, AstFunction* func, AstPolyParam* param, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
+ if (pp_lookup != PPLM_By_Arguments) {
+ *err_msg = "Function type cannot be used to solved for baked parameter value.";
+ return;
+ }
+
+ Arguments* args = (Arguments *) actual;
+ AstTyped* value = lookup_param_in_arguments(func, param, args, err_msg);
+ if (value == NULL) return;
+
+ // HACK: Storing the original value because if this was an AstArgument, we need to flag
+ // it as baked if it is determined that the argument is of the correct kind and type.
+ AstTyped* orig_value = value;
+ if (value->kind == Ast_Kind_Argument) {
+ ((AstArgument *) orig_value)->is_baked = 0;
+ value = ((AstArgument *) value)->value;
+ }
+
+ Type* param_type = NULL;
+ AstType *param_type_expr = func->params[param->idx].local->type_node;
+ if (param_type_expr == (AstType *) &basic_type_type_expr) {
+ if (!node_is_type((AstNode *) value)) {
+ if (err_msg) *err_msg = "Expected type expression.";
+ return;
+ }
+
+ Type* resolved_type = type_build_from_ast(context.ast_alloc, (AstType *) value);
+ if (resolved_type == NULL) flag_to_yield = 1;
+
+ *resolved = ((PolySolveResult) { PSK_Type, .actual = resolved_type });
+
+ } else {
+ resolve_expression_type(value);
+
+ if ((value->flags & Ast_Flag_Comptime) == 0) {
+ if (err_msg) *err_msg = "Expected compile-time known argument.";
+ return;
+ }
+
+ param_type = type_build_from_ast(context.ast_alloc, param_type_expr);
+ if (param_type == NULL) {
+ flag_to_yield = 1;
+ *err_msg = "Waiting to know type for polymorphic value.";
+ return;
+ }
+
+ AstTyped* value_to_use = value;
+ if (value->kind == Ast_Kind_Macro) {
+ value_to_use = (AstTyped *) get_function_from_node((AstNode *) value);
+ }
+
+ TypeMatch tm = unify_node_and_type(&value_to_use, param_type);
+ if (tm == TYPE_MATCH_FAILED) {
+ if (err_msg) *err_msg = bh_aprintf(global_scratch_allocator,
+ "The procedure '%s' expects a value of type '%s' for baked %d%s parameter, got '%s'.",
+ get_function_name(func),
+ type_get_name(param_type),
+ param->idx + 1,
+ bh_num_suffix(param->idx + 1),
+ node_get_type_name(value_to_use));
+ return;
+ }
+
+ if (tm == TYPE_MATCH_YIELD) flag_to_yield = 1;
+
+ *resolved = ((PolySolveResult) { PSK_Value, value });
+ }
+
+ if (orig_value->kind == Ast_Kind_Argument) {
+ ((AstArgument *) orig_value)->is_baked = 1;
+ }
+}
+
+TypeMatch find_polymorphic_sln(AstPolySolution *out, AstPolyParam *param, AstFunction *func, PolyProcLookupMethod pp_lookup, ptr actual, char** err_msg) {
+ // NOTE: Solve for the polymorphic parameter's value
+ PolySolveResult resolved = { PSK_Undefined };
+ switch (param->kind) {
+ case PPK_Poly_Type: solve_for_polymorphic_param_type (&resolved, func, param, pp_lookup, actual, err_msg); break;
+ case PPK_Baked_Value: solve_for_polymorphic_param_value(&resolved, func, param, pp_lookup, actual, err_msg); break;
+
+ default: if (err_msg) *err_msg = "Invalid polymorphic parameter kind. This is a compiler bug.";
+ }
+
+ if (doing_nested_polymorph_lookup) {
+ doing_nested_polymorph_lookup = 0;
+ return TYPE_MATCH_SPECIAL;
+ }
+
+ if (flag_to_yield) {
+ flag_to_yield = 0;
+ return TYPE_MATCH_YIELD;
+ }
+
+ switch (resolved.kind) {
+ case PSK_Type:
+ out->kind = PSK_Type;
+ out->poly_sym = param->poly_sym;
+ out->type = resolved.actual;
+ return TYPE_MATCH_SUCCESS;
+
+ case PSK_Value:
+ out->kind = PSK_Value;
+ out->poly_sym = param->poly_sym;
+ out->value = resolved.value;
+ return TYPE_MATCH_SUCCESS;
+
+ case PSK_Undefined:
+ default:
+ // NOTE: If no error message has been assigned to why this polymorphic parameter
+ // resolution was unsuccessful, provide a basic dummy one.
+ if (err_msg && *err_msg == NULL)
+ *err_msg = bh_aprintf(global_scratch_allocator,
+ "Unable to solve for polymorphic variable '%b'.",
+ param->poly_sym->token->text,
+ param->poly_sym->token->length);
+
+ out->kind = PSK_Undefined;
+ return TYPE_MATCH_FAILED;
+ }
+}
+
+// NOTE: The job of this function is to take a polymorphic procedure, as well as a method of
+// solving for the polymorphic variables, in order to return an array of the solutions for all
+// of the polymorphic variables.
+static bh_arr(AstPolySolution) find_polymorphic_slns(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken *tkn, b32 necessary) {
+ ensure_polyproc_cache_is_created(pp);
+ if (bh_imap_has(&pp->active_queries, (u64) actual)) {
+ AstPolyQuery *query = (AstPolyQuery *) bh_imap_get(&pp->active_queries, (u64) actual);
+ assert(query->kind == Ast_Kind_Polymorph_Query);
+ assert(query->entity);
+
+ if (query->entity->state == Entity_State_Finalized) return query->slns;
+ if (query->entity->state == Entity_State_Failed) return NULL;
+
+ flag_to_yield = 1;
+ return NULL;
+ }
+
+ bh_arr(AstPolySolution) slns = NULL;
+ bh_arr_new(global_heap_allocator, slns, bh_arr_length(pp->poly_params));
+
+ // NOTE: "known solutions" are given through a '#solidify' directive. If this polymorphic
+ // procedure is the result of a partially applied solidification, this array will be non-
+ // empty and these solutions will be used.
+ bh_arr_each(AstPolySolution, known_sln, pp->known_slns) bh_arr_push(slns, *known_sln);
+
+ AstPolyQuery *query = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyQuery), Ast_Kind_Polymorph_Query);
+ query->token = pp->token;
+ query->proc = pp;
+ query->pp_lookup = pp_lookup;
+ query->given = actual;
+ query->error_loc = tkn;
+ query->slns = slns;
+ query->function_header = clone_function_header(context.ast_alloc, pp->base_func);
+ query->function_header->flags |= Ast_Flag_Header_Check_No_Error;
+ query->function_header->scope = NULL;
+ query->error_on_fail = necessary;
+ query->successful_symres = 1;
+
+ bh_imap_put(&pp->active_queries, (u64) actual, (u64) query);
+ add_entities_for_node(NULL, (AstNode *) query, NULL, NULL);
+
+ flag_to_yield = 1;
+ return NULL;
+}
+
+// NOTE: The job of this function is to be a wrapper to other functions, providing an error
+// message if a solution could not be found. This can't be merged with polymorphic_proc_solidify
+// because polymorphic_proc_try_solidify uses the aforementioned function.
+AstFunction* polymorphic_proc_lookup(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual, OnyxToken* tkn) {
+ ensure_polyproc_cache_is_created(pp);
+
+ bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, tkn, 1);
+ if (slns == NULL) {
+ if (flag_to_yield) {
+ flag_to_yield = 0;
+ return (AstFunction *) &node_that_signals_a_yield;
+ }
+
+ return NULL;
+ }
+
+ AstFunction* result = polymorphic_proc_solidify(pp, slns, tkn);
+ return result;
+}
+
+AstFunction* polymorphic_proc_solidify(AstPolyProc* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) {
+ ensure_polyproc_cache_is_created(pp);
+
+ // NOTE: Check if a version of this polyproc has already been created.
+ char* unique_key = build_poly_slns_unique_key(slns);
+ if (bh_table_has(AstSolidifiedFunction, pp->concrete_funcs, unique_key)) {
+ AstSolidifiedFunction solidified_func = bh_table_get(AstSolidifiedFunction, pp->concrete_funcs, unique_key);
+
+ // NOTE: If this solution was originally created from a "build_only_header" call, then the body
+ // will not have been or type checked, or anything. This ensures that the body is copied, the
+ // entities are created and entered into the pipeline.
+ ensure_solidified_function_has_body(pp, &solidified_func);
+
+ // NOTE: Again, if this came from a "build_only_header" call, then there was no known token and
+ // the "generated_from" member will be null. It is best to set it here so errors reported in that
+ // function can report where the polymorphic instantiation occurred.
+ if (solidified_func.func->generated_from == NULL)
+ solidified_func.func->generated_from = tkn;
+
+ return solidified_func.func;
+ }
+
+ AstSolidifiedFunction solidified_func = generate_solidified_function(pp, slns, tkn, 0);
+ add_solidified_function_entities(&solidified_func);
+
+ // NOTE: Cache the function for later use, reducing duplicate functions.
+ bh_table_put(AstSolidifiedFunction, pp->concrete_funcs, unique_key, solidified_func);
+
+ return (AstFunction *) &node_that_signals_a_yield;
+}
+
+// NOTE: This can return either a AstFunction or an AstPolyProc, depending if enough parameters were
+// supplied to remove all the polymorphic variables from the function.
+AstNode* polymorphic_proc_try_solidify(AstPolyProc* pp, bh_arr(AstPolySolution) slns, OnyxToken* tkn) {
+ i32 valid_argument_count = 0;
+
+ bh_arr_each(AstPolySolution, sln, slns) {
+ b32 found_match = 0;
+
+ bh_arr_each(AstPolyParam, param, pp->poly_params) {
+ if (token_equals(sln->poly_sym->token, param->poly_sym->token)) {
+ found_match = 1;
+ break;
+ }
+ }
+
+ if (found_match) {
+ valid_argument_count++;
+ } else {
+ onyx_report_error(tkn->pos, "'%b' is not a type variable of '%b'.",
+ sln->poly_sym->token->text, sln->poly_sym->token->length,
+ pp->token->text, pp->token->length);
+ return (AstNode *) pp;
+ }
+ }
+
+ if (valid_argument_count == bh_arr_length(pp->poly_params)) {
+ return (AstNode *) polymorphic_proc_solidify(pp, slns, tkn);
+
+ } else {
+ // HACK: Some of these initializations assume that the entity for this polyproc has
+ // made it through the symbol resolution phase.
+ // - brendanfh 2020/12/25
+ AstPolyProc* new_pp = onyx_ast_node_new(context.ast_alloc, sizeof(AstPolyProc), Ast_Kind_Polymorphic_Proc);
+ new_pp->token = tkn;
+ new_pp->base_func = pp->base_func;
+ new_pp->flags = pp->flags;
+ new_pp->poly_params = pp->poly_params;
+
+ ensure_polyproc_cache_is_created(pp);
+ new_pp->concrete_funcs = pp->concrete_funcs;
+
+ new_pp->known_slns = NULL;
+ bh_arr_new(global_heap_allocator, new_pp->known_slns, bh_arr_length(pp->known_slns) + bh_arr_length(slns));
+
+ bh_arr_each(AstPolySolution, sln, pp->known_slns) bh_arr_push(new_pp->known_slns, *sln);
+ bh_arr_each(AstPolySolution, sln, slns) bh_arr_push(new_pp->known_slns, *sln);
+
+ return (AstNode *) new_pp;
+ }
+}
+
+AstFunction* polymorphic_proc_build_only_header(AstPolyProc* pp, PolyProcLookupMethod pp_lookup, ptr actual) {
+ ensure_polyproc_cache_is_created(pp);
+ bh_arr(AstPolySolution) slns = find_polymorphic_slns(pp, pp_lookup, actual, NULL, 0);
+ if (flag_to_yield) {
+ flag_to_yield = 0;
+ return (AstFunction *) &node_that_signals_a_yield;
+ }
+ if (slns == NULL) return NULL;
+
+ ensure_polyproc_cache_is_created(pp);
+
+ return polymorphic_proc_build_only_header_with_slns(pp, slns, 0);
+}
+
+AstFunction* polymorphic_proc_build_only_header_with_slns(AstPolyProc* pp, bh_arr(AstPolySolution) slns, b32 error_if_failed) {
+ AstSolidifiedFunction solidified_func;
+
+ char* unique_key = build_poly_slns_unique_key(slns);
+ if (bh_table_has(AstSolidifiedFunction, pp->concrete_funcs, unique_key)) {
+ solidified_func = bh_table_get(AstSolidifiedFunction, pp->concrete_funcs, unique_key);
+
+ } else {
+ // NOTE: This function is only going to have the header of it correctly created.
+ // Nothing should happen to this function's body or else the original will be corrupted.
+ // - brendanfh 2021/01/10
+ solidified_func = generate_solidified_function(pp, slns, NULL, 1);
+ }
+
+ if (solidified_func.func_header_entity) {
+ if (solidified_func.func_header_entity->state == Entity_State_Finalized) return solidified_func.func;
+ if (solidified_func.func_header_entity->state == Entity_State_Failed) return NULL;
+
+ return (AstFunction *) &node_that_signals_a_yield;
+ }
+
+ BH_MASK_SET(solidified_func.func->flags, !error_if_failed, Ast_Flag_Header_Check_No_Error);
+
+ Entity func_header_entity = {
+ .state = Entity_State_Resolve_Symbols,
+ .type = Entity_Type_Temp_Function_Header,
+ .function = solidified_func.func,
+ .package = NULL,
+ .scope = solidified_func.func->poly_scope,
+ };
+
+ Entity* func_header_entity_ptr = entity_heap_insert(&context.entities, func_header_entity);
+ solidified_func.func_header_entity = func_header_entity_ptr;
+
+ // NOTE: Cache the function for later use.
+ bh_table_put(AstSolidifiedFunction, pp->concrete_funcs, unique_key, solidified_func);
+
+ return (AstFunction *) &node_that_signals_a_yield;
+}
+
+//
+// Polymorphic Structures
+//
+//
+// Currently, I am not very happy about how polymorphic structure generation works. My biggest problem
+// with it is that it is very different from the polymorhic procedure generation. Also, it needs to
+// completely generate and check the structure right away, which means there is a lot of up-front work
+// done here that could probably be done elsewhere. This really relates to a large problem in the compiler
+// that types need to be known completely by the time symbol resolution is done, even though that
+// information shouldn't need to be known until right before the types are checked.
+//
+// The above documentation is very incorrect but I don't want to fix it right now. Basically, polymorphic
+// structures now have a delay instantiation phase and are not forced to be completed immediately.
+
+char* build_poly_struct_name(AstPolyStructType* ps_type, Type* cs_type) {
+ char name_buf[256];
+ fori (i, 0, 256) name_buf[i] = 0;
+
+ strncat(name_buf, ps_type->name, 255);
+ strncat(name_buf, "(", 255);
+ bh_arr_each(AstPolySolution, ptype, cs_type->Struct.poly_sln) {
+ if (ptype != cs_type->Struct.poly_sln)
+ strncat(name_buf, ", ", 255);
+
+ // This logic will have to be other places as well.
+
+ switch (ptype->kind) {
+ case PSK_Undefined: assert(0); break;
+ case PSK_Type: strncat(name_buf, type_get_name(ptype->type), 255); break;
+ case PSK_Value: {
+ // FIX
+ AstNode* value = strip_aliases((AstNode *) ptype->value);
+
+ if (value->kind == Ast_Kind_NumLit) {
+ AstNumLit* nl = (AstNumLit *) value;
+ if (type_is_integer(nl->type)) {
+ strncat(name_buf, bh_bprintf("%l", nl->value.l), 127);
+ } else {
+ strncat(name_buf, "numlit (FIX ME)", 127);
+ }
+ } else if (value->kind == Ast_Kind_Code_Block) {
+ AstCodeBlock* code = (AstCodeBlock *) value;
+ OnyxFilePos code_loc = code->token->pos;
+ strncat(name_buf, bh_bprintf("code at %s:%d,%d", code_loc.filename, code_loc.line, code_loc.column), 127);
+ } else {
+ strncat(name_buf, "<expr>", 127);
+ }
+
+ break;
+ }
+ }
+ }
+ strncat(name_buf, ")", 255);
+
+ return bh_aprintf(global_heap_allocator, "%s", name_buf);
+}
+
+Type* polymorphic_struct_lookup(AstPolyStructType* ps_type, bh_arr(AstPolySolution) slns, OnyxFilePos pos) {
+ // @Cleanup
+ assert(ps_type->scope != NULL);
+
+ if (ps_type->concrete_structs == NULL) {
+ bh_table_init(global_heap_allocator, ps_type->concrete_structs, 16);
+ }
+
+ if (bh_arr_length(slns) != bh_arr_length(ps_type->poly_params)) {
+ onyx_report_error(pos, "Wrong number of arguments for '%s'. Expected %d, got %d",
+ ps_type->name,
+ bh_arr_length(ps_type->poly_params),
+ bh_arr_length(slns));
+
+ return NULL;
+ }
+
+ i32 i = 0;
+ bh_arr_each(AstPolySolution, sln, slns) {
+ sln->poly_sym = (AstNode *) &ps_type->poly_params[i];
+
+ PolySolutionKind expected_kind = PSK_Undefined;
+ if ((AstNode *) ps_type->poly_params[i].type_node == (AstNode *) &basic_type_type_expr) {
+ expected_kind = PSK_Type;
+ } else {
+ expected_kind = PSK_Value;
+ }
+
+ if (sln->kind != expected_kind) {
+ if (expected_kind == PSK_Type)
+ onyx_report_error(pos, "Expected type expression for %d%s argument.", i + 1, bh_num_suffix(i + 1));
+
+ if (expected_kind == PSK_Value)
+ onyx_report_error(pos, "Expected value expression of type '%s' for %d%s argument.",
+ type_get_name(ps_type->poly_params[i].type),
+ i + 1, bh_num_suffix(i + 1));
+
+ return NULL;
+ }
+
+ if (sln->kind == PSK_Value) {
+ resolve_expression_type(sln->value);
+
+ if ((sln->value->flags & Ast_Flag_Comptime) == 0) {
+ onyx_report_error(pos,
+ "Expected compile-time known argument for '%b'.",
+ sln->poly_sym->token->text,
+ sln->poly_sym->token->length);
+ return NULL;
+ }
+
+ if (!types_are_compatible(sln->value->type, ps_type->poly_params[i].type)) {
+ onyx_report_error(pos, "Expected compile-time argument of type '%s', got '%s'.",
+ type_get_name(ps_type->poly_params[i].type),
+ type_get_name(sln->value->type));
+ return NULL;
+ }
+ }
+
+ i++;
+ }
+
+ char* unique_key = build_poly_slns_unique_key(slns);
+ if (bh_table_has(AstStructType *, ps_type->concrete_structs, unique_key)) {
+ AstStructType* concrete_struct = bh_table_get(AstStructType *, ps_type->concrete_structs, unique_key);
+
+ if (concrete_struct->entity_type->state < Entity_State_Check_Types) {
+ return NULL;
+ }
+
+ Type* cs_type = type_build_from_ast(context.ast_alloc, (AstType *) concrete_struct);
+ if (!cs_type) return NULL;
+
+ if (cs_type->Struct.poly_sln == NULL) cs_type->Struct.poly_sln = bh_arr_copy(global_heap_allocator, slns);
+ if (cs_type->Struct.name == NULL) cs_type->Struct.name = build_poly_struct_name(ps_type, cs_type);
+
+ return cs_type;
+ }
+
+ Scope* sln_scope = scope_create(context.ast_alloc, ps_type->scope, ps_type->token->pos);
+ insert_poly_slns_into_scope(sln_scope, slns);
+
+ AstStructType* concrete_struct = (AstStructType *) ast_clone(context.ast_alloc, ps_type->base_struct);
+ bh_table_put(AstStructType *, ps_type->concrete_structs, unique_key, concrete_struct);
+
+ add_entities_for_node(NULL, (AstNode *) concrete_struct, sln_scope, NULL);
+ return NULL;
+}
}
// Polymorphic procedures are in their own file to clean up this file.
-#include "polymorph.c"
+#include "polymorph.h"
//
// Overloaded Procedures
static void emit_raw_data(OnyxWasmModule* mod, ptr data, AstTyped* node);
static b32 emit_raw_data_(OnyxWasmModule* mod, ptr data, AstTyped* node);
-#include "wasm_intrinsics.c"
-#include "wasm_type_table.c"
+#include "wasm_intrinsics.h"
+#include "wasm_type_table.h"
EMIT_FUNC(function_body, AstFunction* fd) {
if (fd->body == NULL) return;
}
-#include "wasm_output.c"
+#include "wasm_output.h"
+++ /dev/null
-// This file is directly included in src/onxywasm.c
-// It is here purely to decrease the amount of clutter in the main file.
-
-
-// IMPROVE: This implementation assumes that the source and destination buffers do not overlap.
-// The specification for memory.copy in WASM does work even if the buffers overlap.
-// Also, this implementation copies byte-by-byte, which is terrible. It should copy
-// quad word by quad word, and then the additional bytes if the count was not divisible by 8.
-// :32BitPointers
-EMIT_FUNC_NO_ARGS(intrinsic_memory_copy) {
- bh_arr(WasmInstruction) code = *pcode;
-
- // The stack should look like this:
- // <count>
- // <source>
- // <dest>
-
- u64 count_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
- u64 source_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
- u64 dest_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
-
- WIL(WI_LOCAL_SET, count_local);
- WIL(WI_LOCAL_SET, source_local);
- WIL(WI_LOCAL_SET, dest_local);
-
- // count is greater than 0
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 0);
- WI(WI_I32_GT_S);
-
- WID(WI_IF_START, 0x40);
- WID(WI_LOOP_START, 0x40);
-
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 1);
- WI(WI_I32_SUB);
- WIL(WI_LOCAL_SET, count_local);
-
- WIL(WI_LOCAL_GET, dest_local);
- WIL(WI_LOCAL_GET, count_local);
- WI(WI_PTR_ADD);
-
- WIL(WI_LOCAL_GET, source_local);
- WIL(WI_LOCAL_GET, count_local);
- WI(WI_PTR_ADD);
-
- WID(WI_I32_LOAD_8_U, ((WasmInstructionData) { 0, 0 }));
- WID(WI_I32_STORE_8, ((WasmInstructionData) { 0, 0 }));
-
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 0);
- WI(WI_I32_GT_S);
- WID(WI_COND_JUMP, 0x00);
-
- WI(WI_LOOP_END);
- WI(WI_IF_END);
-
- local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
- local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
- local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
-
- *pcode = code;
-}
-
-EMIT_FUNC_NO_ARGS(intrinsic_memory_fill) {
- bh_arr(WasmInstruction) code = *pcode;
-
- // The stack should look like this:
- // <count>
- // <byte>
- // <dest>
-
- u64 count_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
- u64 byte_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
- u64 dest_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
-
- WIL(WI_LOCAL_SET, count_local);
- WIL(WI_LOCAL_SET, byte_local);
- WIL(WI_LOCAL_SET, dest_local);
-
- // count is greater than 0
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 0);
- WI(WI_I32_GT_S);
-
- WID(WI_IF_START, 0x40);
- WID(WI_LOOP_START, 0x40);
-
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 1);
- WI(WI_I32_SUB);
- WIL(WI_LOCAL_SET, count_local);
-
- WIL(WI_LOCAL_GET, dest_local);
- WIL(WI_LOCAL_GET, count_local);
- WI(WI_PTR_ADD);
-
- WIL(WI_LOCAL_GET, byte_local);
- WID(WI_I32_STORE_8, ((WasmInstructionData) { 0, 0 }));
-
- WIL(WI_LOCAL_GET, count_local);
- WID(WI_I32_CONST, 0);
- WI(WI_I32_GT_S);
- WID(WI_COND_JUMP, 0x00);
-
- WI(WI_LOOP_END);
- WI(WI_IF_END);
-
- local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
- local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
- local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
-
- *pcode = code;
-}
-
-EMIT_FUNC(initialize_type, Type* type, OnyxToken* where) {
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->kind) {
- case Type_Kind_Pointer:
- case Type_Kind_Basic: {
- WasmType basic_type = onyx_type_to_wasm_type(type);
- emit_zero_value(mod, &code, basic_type);
- emit_store_instruction(mod, &code, type, 0);
- break;
- }
-
- case Type_Kind_Struct: {
- u64 value_ptr = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
- WIL(WI_LOCAL_SET, value_ptr);
-
- bh_arr_each(StructMember *, psmem, type->Struct.memarr) {
- StructMember* smem = *psmem;
- if (smem->initial_value == NULL || *smem->initial_value == NULL) continue;
-
- WIL(WI_LOCAL_GET, value_ptr);
- emit_expression(mod, &code, *smem->initial_value);
- emit_store_instruction(mod, &code, smem->type, smem->offset);
- }
-
- local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
- break;
- }
-
- default:
- onyx_report_error(where->pos,
- "Unable to initialize type, '%s'. The reason for this is largely due to the compiler not knowing what the initial value should be.",
- type_get_name(type));
- break;
- }
-
- *pcode = code;
-}
-
-EMIT_FUNC(intrinsic_atomic_wait, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_WAIT32, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_WAIT64, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic wait, '%s'. Only i32 and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC_NO_ARGS(intrinsic_atomic_notify) {
- bh_arr(WasmInstruction) code = *pcode;
- WID(WI_ATOMIC_NOTIFY, ((WasmInstructionData) { 2, 0 }));
- *pcode = code;
-}
-
-EMIT_FUNC_NO_ARGS(intrinsic_atomic_fence) {
- bh_arr(WasmInstruction) code = *pcode;
- WI(WI_ATOMIC_FENCE);
- *pcode = code;
-}
-
-EMIT_FUNC(intrinsic_atomic_load, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_LOAD8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_LOAD16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_LOAD, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_LOAD, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic load, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_store, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_STORE8, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_STORE16, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_STORE, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_STORE, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic store, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_add, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_ADD8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_ADD16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_ADD, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_ADD, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic add, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_sub, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_SUB8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_SUB16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_SUB, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_SUB, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic sub, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_and, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_AND8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_AND16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_AND, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_AND, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic and, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_or, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_OR8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_OR16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_OR, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_OR, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic or, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_xor, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_XOR8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_XOR16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_XOR, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_XOR, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic xor, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_xchg, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_XCHG8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_XCHG16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_XCHG, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_XCHG, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic xchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC(intrinsic_atomic_cmpxchg, Type* type, OnyxToken* where) {
- if (type->kind != Type_Kind_Basic) goto bad_type;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- switch (type->Basic.kind) {
- case Basic_Kind_U8: WID(WI_ATOMIC_I32_CMPXCHG8_U, ((WasmInstructionData) { 0, 0 })); break;
- case Basic_Kind_U16: WID(WI_ATOMIC_I32_CMPXCHG16_U, ((WasmInstructionData) { 1, 0 })); break;
-
- case Basic_Kind_I32:
- case Basic_Kind_U32: WID(WI_ATOMIC_I32_CMPXCHG, ((WasmInstructionData) { 2, 0 })); break;
-
- case Basic_Kind_I64:
- case Basic_Kind_U64: WID(WI_ATOMIC_I64_CMPXCHG, ((WasmInstructionData) { 3, 0 })); break;
-
- default: goto bad_type;
- }
-
- *pcode = code;
- return;
-
-bad_type:
- onyx_report_error(where->pos, "Bad type for atomic cmpxchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
-}
-
-EMIT_FUNC_NO_ARGS(initialize_data_segments_body) {
- if (!context.options->use_multi_threading || !context.options->use_post_mvp_features) return;
-
- bh_arr(WasmInstruction) code = *pcode;
-
- i32 index = 0;
- bh_arr_each(WasmDatum, datum, mod->data) {
- WID(WI_PTR_CONST, datum->offset);
- WID(WI_PTR_CONST, 0);
- WID(WI_I32_CONST, datum->length);
- WID(WI_MEMORY_INIT, ((WasmInstructionData) { index, 0 }));
-
- index += 1;
- }
-
- *pcode = code;
-}
-
-EMIT_FUNC_NO_ARGS(run_init_procedures) {
- bh_arr(WasmInstruction) code = *pcode;
-
- bh_arr_each(AstFunction *, func, init_procedures) {
- i32 func_idx = (i32) bh_imap_get(&mod->index_map, (u64) *func);
- bh_arr_push(code, ((WasmInstruction){ WI_CALL, func_idx }));
- }
-
- *pcode = code;
-}
--- /dev/null
+// This file is directly included in src/onxywasm.c
+// It is here purely to decrease the amount of clutter in the main file.
+
+
+// IMPROVE: This implementation assumes that the source and destination buffers do not overlap.
+// The specification for memory.copy in WASM does work even if the buffers overlap.
+// Also, this implementation copies byte-by-byte, which is terrible. It should copy
+// quad word by quad word, and then the additional bytes if the count was not divisible by 8.
+// :32BitPointers
+EMIT_FUNC_NO_ARGS(intrinsic_memory_copy) {
+ bh_arr(WasmInstruction) code = *pcode;
+
+ // The stack should look like this:
+ // <count>
+ // <source>
+ // <dest>
+
+ u64 count_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
+ u64 source_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
+ u64 dest_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
+
+ WIL(WI_LOCAL_SET, count_local);
+ WIL(WI_LOCAL_SET, source_local);
+ WIL(WI_LOCAL_SET, dest_local);
+
+ // count is greater than 0
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 0);
+ WI(WI_I32_GT_S);
+
+ WID(WI_IF_START, 0x40);
+ WID(WI_LOOP_START, 0x40);
+
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 1);
+ WI(WI_I32_SUB);
+ WIL(WI_LOCAL_SET, count_local);
+
+ WIL(WI_LOCAL_GET, dest_local);
+ WIL(WI_LOCAL_GET, count_local);
+ WI(WI_PTR_ADD);
+
+ WIL(WI_LOCAL_GET, source_local);
+ WIL(WI_LOCAL_GET, count_local);
+ WI(WI_PTR_ADD);
+
+ WID(WI_I32_LOAD_8_U, ((WasmInstructionData) { 0, 0 }));
+ WID(WI_I32_STORE_8, ((WasmInstructionData) { 0, 0 }));
+
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 0);
+ WI(WI_I32_GT_S);
+ WID(WI_COND_JUMP, 0x00);
+
+ WI(WI_LOOP_END);
+ WI(WI_IF_END);
+
+ local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
+ local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
+ local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
+
+ *pcode = code;
+}
+
+EMIT_FUNC_NO_ARGS(intrinsic_memory_fill) {
+ bh_arr(WasmInstruction) code = *pcode;
+
+ // The stack should look like this:
+ // <count>
+ // <byte>
+ // <dest>
+
+ u64 count_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
+ u64 byte_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_INT32);
+ u64 dest_local = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
+
+ WIL(WI_LOCAL_SET, count_local);
+ WIL(WI_LOCAL_SET, byte_local);
+ WIL(WI_LOCAL_SET, dest_local);
+
+ // count is greater than 0
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 0);
+ WI(WI_I32_GT_S);
+
+ WID(WI_IF_START, 0x40);
+ WID(WI_LOOP_START, 0x40);
+
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 1);
+ WI(WI_I32_SUB);
+ WIL(WI_LOCAL_SET, count_local);
+
+ WIL(WI_LOCAL_GET, dest_local);
+ WIL(WI_LOCAL_GET, count_local);
+ WI(WI_PTR_ADD);
+
+ WIL(WI_LOCAL_GET, byte_local);
+ WID(WI_I32_STORE_8, ((WasmInstructionData) { 0, 0 }));
+
+ WIL(WI_LOCAL_GET, count_local);
+ WID(WI_I32_CONST, 0);
+ WI(WI_I32_GT_S);
+ WID(WI_COND_JUMP, 0x00);
+
+ WI(WI_LOOP_END);
+ WI(WI_IF_END);
+
+ local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
+ local_raw_free(mod->local_alloc, WASM_TYPE_INT32);
+ local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
+
+ *pcode = code;
+}
+
+EMIT_FUNC(initialize_type, Type* type, OnyxToken* where) {
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->kind) {
+ case Type_Kind_Pointer:
+ case Type_Kind_Basic: {
+ WasmType basic_type = onyx_type_to_wasm_type(type);
+ emit_zero_value(mod, &code, basic_type);
+ emit_store_instruction(mod, &code, type, 0);
+ break;
+ }
+
+ case Type_Kind_Struct: {
+ u64 value_ptr = local_raw_allocate(mod->local_alloc, WASM_TYPE_PTR);
+ WIL(WI_LOCAL_SET, value_ptr);
+
+ bh_arr_each(StructMember *, psmem, type->Struct.memarr) {
+ StructMember* smem = *psmem;
+ if (smem->initial_value == NULL || *smem->initial_value == NULL) continue;
+
+ WIL(WI_LOCAL_GET, value_ptr);
+ emit_expression(mod, &code, *smem->initial_value);
+ emit_store_instruction(mod, &code, smem->type, smem->offset);
+ }
+
+ local_raw_free(mod->local_alloc, WASM_TYPE_PTR);
+ break;
+ }
+
+ default:
+ onyx_report_error(where->pos,
+ "Unable to initialize type, '%s'. The reason for this is largely due to the compiler not knowing what the initial value should be.",
+ type_get_name(type));
+ break;
+ }
+
+ *pcode = code;
+}
+
+EMIT_FUNC(intrinsic_atomic_wait, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_WAIT32, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_WAIT64, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic wait, '%s'. Only i32 and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC_NO_ARGS(intrinsic_atomic_notify) {
+ bh_arr(WasmInstruction) code = *pcode;
+ WID(WI_ATOMIC_NOTIFY, ((WasmInstructionData) { 2, 0 }));
+ *pcode = code;
+}
+
+EMIT_FUNC_NO_ARGS(intrinsic_atomic_fence) {
+ bh_arr(WasmInstruction) code = *pcode;
+ WI(WI_ATOMIC_FENCE);
+ *pcode = code;
+}
+
+EMIT_FUNC(intrinsic_atomic_load, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_LOAD8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_LOAD16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_LOAD, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_LOAD, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic load, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_store, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_STORE8, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_STORE16, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_STORE, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_STORE, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic store, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_add, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_ADD8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_ADD16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_ADD, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_ADD, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic add, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_sub, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_SUB8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_SUB16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_SUB, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_SUB, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic sub, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_and, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_AND8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_AND16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_AND, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_AND, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic and, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_or, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_OR8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_OR16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_OR, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_OR, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic or, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_xor, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_XOR8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_XOR16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_XOR, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_XOR, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic xor, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_xchg, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_XCHG8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_XCHG16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_XCHG, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_XCHG, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic xchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC(intrinsic_atomic_cmpxchg, Type* type, OnyxToken* where) {
+ if (type->kind != Type_Kind_Basic) goto bad_type;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ switch (type->Basic.kind) {
+ case Basic_Kind_U8: WID(WI_ATOMIC_I32_CMPXCHG8_U, ((WasmInstructionData) { 0, 0 })); break;
+ case Basic_Kind_U16: WID(WI_ATOMIC_I32_CMPXCHG16_U, ((WasmInstructionData) { 1, 0 })); break;
+
+ case Basic_Kind_I32:
+ case Basic_Kind_U32: WID(WI_ATOMIC_I32_CMPXCHG, ((WasmInstructionData) { 2, 0 })); break;
+
+ case Basic_Kind_I64:
+ case Basic_Kind_U64: WID(WI_ATOMIC_I64_CMPXCHG, ((WasmInstructionData) { 3, 0 })); break;
+
+ default: goto bad_type;
+ }
+
+ *pcode = code;
+ return;
+
+bad_type:
+ onyx_report_error(where->pos, "Bad type for atomic cmpxchg, '%s'. Only u8, u16, u32, i32, u64, and i64 are supported.", type_get_name(type));
+}
+
+EMIT_FUNC_NO_ARGS(initialize_data_segments_body) {
+ if (!context.options->use_multi_threading || !context.options->use_post_mvp_features) return;
+
+ bh_arr(WasmInstruction) code = *pcode;
+
+ i32 index = 0;
+ bh_arr_each(WasmDatum, datum, mod->data) {
+ WID(WI_PTR_CONST, datum->offset);
+ WID(WI_PTR_CONST, 0);
+ WID(WI_I32_CONST, datum->length);
+ WID(WI_MEMORY_INIT, ((WasmInstructionData) { index, 0 }));
+
+ index += 1;
+ }
+
+ *pcode = code;
+}
+
+EMIT_FUNC_NO_ARGS(run_init_procedures) {
+ bh_arr(WasmInstruction) code = *pcode;
+
+ bh_arr_each(AstFunction *, func, init_procedures) {
+ i32 func_idx = (i32) bh_imap_get(&mod->index_map, (u64) *func);
+ bh_arr_push(code, ((WasmInstruction){ WI_CALL, func_idx }));
+ }
+
+ *pcode = code;
+}
+++ /dev/null
-// This file is included in src/onyxwasm.c.
-// It is separated because of its fundamentally different goals.
-
-//-------------------------------------------------
-// BINARY OUPUT
-//-------------------------------------------------
-
-#define WASM_SECTION_ID_TYPE 1
-#define WASM_SECTION_ID_IMPORT 2
-#define WASM_SECTION_ID_FUNCTION 3
-#define WASM_SECTION_ID_TABLE 4
-#define WASM_SECTION_ID_MEMORY 5
-#define WASM_SECTION_ID_GLOBAL 6
-#define WASM_SECTION_ID_EXPORT 7
-#define WASM_SECTION_ID_START 8
-#define WASM_SECTION_ID_ELEMENT 9
-#define WASM_SECTION_ID_DATACOUNT 12
-#define WASM_SECTION_ID_CODE 10
-#define WASM_SECTION_ID_DATA 11
-
-typedef i32 vector_func(void*, bh_buffer*);
-
-static const u8 WASM_MAGIC_STRING[] = { 0x00, 0x61, 0x73, 0x6D };
-static const u8 WASM_VERSION[] = { 0x01, 0x00, 0x00, 0x00 };
-
-static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff);
-
-static i32 output_vector(void** arr, i32 stride, i32 arrlen, vector_func elem, bh_buffer* vec_buff) {
- i32 len;
- u8* leb = uint_to_uleb128((u64) arrlen, &len);
- bh_buffer_append(vec_buff, leb, len);
-
- i32 i = 0;
- while (i < arrlen) {
- elem(*arr, vec_buff);
- arr = bh_pointer_add(arr, stride);
- i++;
- }
-
- return vec_buff->length;
-}
-
-static i32 output_name(const char* start, i32 length, bh_buffer* buff) {
- i32 leb_len, prev_len = buff->length;
- u8* leb = uint_to_uleb128((u64) length, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_append(buff, start, length);
- return buff->length - prev_len;
-}
-
-static i32 output_limits(i32 min, i32 max, b32 shared, bh_buffer* buff) {
- i32 leb_len, prev_len = buff->length;
- u8* leb;
-
- u8 mem_type = 0x00;
- if (max >= 0) mem_type |= 0x01;
- if (shared) mem_type |= 0x02;
-
- bh_buffer_write_byte(buff, mem_type);
-
- leb = uint_to_uleb128((u64) min, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- if (max >= 0) {
- leb = uint_to_uleb128((u64) max, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- }
-
- return buff->length - prev_len;
-}
-
-static i32 output_functype(WasmFuncType* type, bh_buffer* buff) {
- i32 prev_len = buff->length;
-
- bh_buffer_write_byte(buff, 0x60);
-
- i32 len;
- u8* leb_buff = uint_to_uleb128(type->param_count, &len);
- bh_buffer_append(buff, leb_buff, len);
- bh_buffer_append(buff, type->param_types, type->param_count);
-
- if (type->return_type != WASM_TYPE_VOID) {
- bh_buffer_write_byte(buff, 0x01);
- bh_buffer_write_byte(buff, type->return_type);
- } else {
- bh_buffer_write_byte(buff, 0x00);
- }
-
- return buff->length - prev_len;
-}
-
-static i32 output_typesection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, 0x01);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 vec_len = output_vector(
- (void**) module->types,
- sizeof(WasmFuncType*),
- bh_arr_length(module->types),
- (vector_func *) output_functype,
- &vec_buff);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) vec_len, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_funcsection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_FUNCTION);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->funcs)), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- bh_arr_each(WasmFunc, func, module->funcs) {
- leb = uint_to_uleb128((u64) (func->type_idx), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- }
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_tablesection(OnyxWasmModule* module, bh_buffer* buff) {
- if (bh_arr_length(module->elems) == 0) return 0;
-
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_TABLE);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) 1, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- // NOTE: funcrefs are the only valid table element type
- bh_buffer_write_byte(&vec_buff, 0x70);
- output_limits(bh_arr_length(module->elems), -1, 0, &vec_buff);
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_memorysection(OnyxWasmModule* module, bh_buffer* buff) {
- if (context.options->use_multi_threading) return 0;
-
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_MEMORY);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) 1, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- // FIXME: This needs to be dynamically chosen depending on the size of
- // the data section and stack size pre-requeseted.
- // :WasmMemory
- output_limits(1024, -1, 0, &vec_buff);
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_globalsection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_GLOBAL);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->globals)), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- bh_arr_each(WasmGlobal, global, module->globals) {
- bh_buffer_write_byte(&vec_buff, global->type);
- bh_buffer_write_byte(&vec_buff, 0x01);
-
- bh_arr_each(WasmInstruction, instr, global->initial_value)
- output_instruction(NULL, instr, &vec_buff);
-
- // NOTE: Initial value expression terminator
- bh_buffer_write_byte(&vec_buff, (u8) WI_BLOCK_END);
- }
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_importsection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_IMPORT);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->imports)), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- bh_arr_each(WasmImport, import, module->imports) {
- output_name(import->mod, strlen(import->mod), &vec_buff);
- output_name(import->name, strlen(import->name), &vec_buff);
- bh_buffer_write_byte(&vec_buff, (u8) import->kind);
-
- switch (import->kind) {
- case WASM_FOREIGN_FUNCTION:
- leb = uint_to_uleb128((u64) import->idx, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- break;
-
- case WASM_FOREIGN_MEMORY:
- output_limits(import->min, import->max, import->shared, &vec_buff);
- break;
-
- case WASM_FOREIGN_TABLE: assert(0);
- }
- }
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_exportsection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
- bh_buffer_write_byte(buff, WASM_SECTION_ID_EXPORT);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) (module->export_count), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- i32 key_len = 0;
- bh_table_each_start(WasmExport, module->exports);
- key_len = strlen(key);
- output_name(key, key_len, &vec_buff);
-
- bh_buffer_write_byte(&vec_buff, (u8) (value.kind));
- leb = uint_to_uleb128((u64) value.idx, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- bh_table_each_end;
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_startsection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
-
- i32 start_idx = -1;
- bh_table_each_start(WasmExport, module->exports) {
- if (value.kind == WASM_FOREIGN_FUNCTION) {
- if (strncmp("main", key, 5) == 0) {
- start_idx = value.idx;
- break;
- }
- }
- } bh_table_each_end;
-
- if (start_idx != -1) {
- bh_buffer_write_byte(buff, WASM_SECTION_ID_START);
-
- i32 start_leb_len, section_leb_len;
- uint_to_uleb128((u64) start_idx, &start_leb_len);
- u8* section_leb = uint_to_uleb128((u64) start_leb_len, §ion_leb_len);
- bh_buffer_append(buff, section_leb, section_leb_len);
-
- u8* start_leb = uint_to_uleb128((u64) start_idx, &start_leb_len);
- bh_buffer_append(buff, start_leb, start_leb_len);
- }
-
- return buff->length - prev_len;
-}
-
-static i32 output_elemsection(OnyxWasmModule* module, bh_buffer* buff) {
- if (bh_arr_length(module->elems) == 0) return 0;
-
- i32 prev_len = buff->length;
-
- bh_buffer_write_byte(buff, WASM_SECTION_ID_ELEMENT);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb;
-
- // NOTE: 0x01 count of elems
- bh_buffer_write_byte(&vec_buff, 0x01);
-
- // NOTE: 0x00 table index
- bh_buffer_write_byte(&vec_buff, 0x00);
-
- bh_buffer_write_byte(&vec_buff, WI_I32_CONST);
- bh_buffer_write_byte(&vec_buff, 0x00);
- bh_buffer_write_byte(&vec_buff, WI_BLOCK_END);
-
- leb = uint_to_uleb128((u64) bh_arr_length(module->elems), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- bh_arr_each(i32, elem, module->elems) {
- leb = uint_to_uleb128((u64) *elem, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- }
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_locals(WasmFunc* func, bh_buffer* buff) {
- i32 prev_len = buff->length;
-
- // NOTE: Output vector length
- i32 total_locals =
- (i32) (func->locals.allocated[0] != 0) +
- (i32) (func->locals.allocated[1] != 0) +
- (i32) (func->locals.allocated[2] != 0) +
- (i32) (func->locals.allocated[3] != 0) +
- (i32) (func->locals.allocated[4] != 0);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) total_locals, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- if (func->locals.allocated[0] != 0) {
- leb = uint_to_uleb128((u64) func->locals.allocated[0], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_write_byte(buff, WASM_TYPE_INT32);
- }
- if (func->locals.allocated[1] != 0) {
- leb = uint_to_uleb128((u64) func->locals.allocated[1], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_write_byte(buff, WASM_TYPE_INT64);
- }
- if (func->locals.allocated[2] != 0) {
- leb = uint_to_uleb128((u64) func->locals.allocated[2], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_write_byte(buff, WASM_TYPE_FLOAT32);
- }
- if (func->locals.allocated[3] != 0) {
- leb = uint_to_uleb128((u64) func->locals.allocated[3], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_write_byte(buff, WASM_TYPE_FLOAT64);
- }
- if (func->locals.allocated[4] != 0) {
- leb = uint_to_uleb128((u64) func->locals.allocated[4], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- bh_buffer_write_byte(buff, WASM_TYPE_VAR128);
- }
-
- return buff->length - prev_len;
-}
-
-static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff) {
- i32 leb_len;
- u8* leb;
-
- if (instr->type == WI_NOP) return;
- if (instr->type == WI_UNREACHABLE) assert(("EMITTING UNREACHABLE!!", 0));
-
- if (instr->type & SIMD_INSTR_MASK) {
- bh_buffer_write_byte(buff, 0xFD);
- leb = uint_to_uleb128((u64) (instr->type &~ SIMD_INSTR_MASK), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- } else if (instr->type & EXT_INSTR_MASK) {
- bh_buffer_write_byte(buff, 0xFC);
- leb = uint_to_uleb128((u64) (instr->type &~ EXT_INSTR_MASK), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- } else if (instr->type & ATOMIC_INSTR_MASK) {
- bh_buffer_write_byte(buff, 0xFE);
- leb = uint_to_uleb128((u64) (instr->type &~ ATOMIC_INSTR_MASK), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- if (instr->type == WI_ATOMIC_FENCE) {
- bh_buffer_write_byte(buff, 0x00);
-
- } else {
- leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- }
-
- } else {
- bh_buffer_write_byte(buff, (u8) instr->type);
- }
-
- switch (instr->type) {
- case WI_LOCAL_GET:
- case WI_LOCAL_SET:
- case WI_LOCAL_TEE: {
- u64 actual_idx = local_lookup_idx(&func->locals, instr->data.l);
- leb = uint_to_uleb128(actual_idx, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
- }
-
- case WI_GLOBAL_GET:
- case WI_GLOBAL_SET:
- case WI_CALL:
- case WI_BLOCK_START:
- case WI_LOOP_START:
- case WI_JUMP:
- case WI_COND_JUMP:
- case WI_IF_START:
- case WI_MEMORY_SIZE:
- case WI_MEMORY_GROW:
- case WI_MEMORY_FILL:
- leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
-
- case WI_MEMORY_INIT:
- case WI_MEMORY_COPY:
- leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
-
- case WI_JUMP_TABLE: {
- BranchTable* bt = (BranchTable *) instr->data.p;
-
- leb = uint_to_uleb128((u64) bt->count, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- fori (i, 0, bt->count) {
- leb = uint_to_uleb128((u64) bt->cases[i], &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- }
-
- leb = uint_to_uleb128((u64) bt->default_case, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
- }
-
-
- case WI_CALL_INDIRECT:
- case WI_I32_STORE: case WI_I32_STORE_8: case WI_I32_STORE_16:
- case WI_I64_STORE: case WI_I64_STORE_8: case WI_I64_STORE_16: case WI_I64_STORE_32:
- case WI_F32_STORE: case WI_F64_STORE:
- case WI_V128_STORE:
- case WI_I32_LOAD:
- case WI_I32_LOAD_8_S: case WI_I32_LOAD_8_U:
- case WI_I32_LOAD_16_S: case WI_I32_LOAD_16_U:
- case WI_I64_LOAD:
- case WI_I64_LOAD_8_S: case WI_I64_LOAD_8_U:
- case WI_I64_LOAD_16_S: case WI_I64_LOAD_16_U:
- case WI_I64_LOAD_32_S: case WI_I64_LOAD_32_U:
- case WI_F32_LOAD: case WI_F64_LOAD:
- case WI_V128_LOAD:
- leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
-
- case WI_I32_CONST:
- leb = int_to_leb128((i64) instr->data.i1, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
- case WI_I64_CONST:
- leb = int_to_leb128((i64) instr->data.l, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
- break;
- case WI_F32_CONST:
- leb = float_to_ieee754(instr->data.f, 0);
- bh_buffer_append(buff, leb, 4);
- break;
- case WI_F64_CONST:
- leb = double_to_ieee754(instr->data.d, 0);
- bh_buffer_append(buff, leb, 8);
- break;
-
- case WI_V128_CONST:
- case WI_I8X16_SHUFFLE:
- fori (i, 0, 16) bh_buffer_write_byte(buff, ((u8*) instr->data.p)[i]);
- break;
-
- case WI_I8X16_EXTRACT_LANE_S: case WI_I8X16_EXTRACT_LANE_U: case WI_I8X16_REPLACE_LANE:
- case WI_I16X8_EXTRACT_LANE_S: case WI_I16X8_EXTRACT_LANE_U: case WI_I16X8_REPLACE_LANE:
- case WI_I32X4_EXTRACT_LANE: case WI_I32X4_REPLACE_LANE:
- case WI_I64X2_EXTRACT_LANE: case WI_I64X2_REPLACE_LANE:
- case WI_F32X4_EXTRACT_LANE: case WI_F32X4_REPLACE_LANE:
- case WI_F64X2_EXTRACT_LANE: case WI_F64X2_REPLACE_LANE:
- bh_buffer_write_byte(buff, (u8) instr->data.i1);
- break;
-
- default: break;
- }
-}
-
-static i32 output_code(WasmFunc* func, bh_buffer* buff) {
-
- bh_buffer code_buff;
- bh_buffer_init(&code_buff, buff->allocator, 128);
-
- // Output locals
- output_locals(func, &code_buff);
-
- assert(func->code);
-
- // Output code
- bh_arr_each(WasmInstruction, instr, func->code) output_instruction(func, instr, &code_buff);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) code_buff.length, &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, code_buff);
- bh_buffer_free(&code_buff);
-
- return 0;
-}
-
-static i32 output_codesection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
-
- bh_buffer_write_byte(buff, WASM_SECTION_ID_CODE);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) bh_arr_length(module->funcs), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- // DEBUG_HERE;
-
- bh_arr_each(WasmFunc, func, module->funcs) output_code(func, &vec_buff);
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_datacountsection(OnyxWasmModule* module, bh_buffer* buff) {
- if (!context.options->use_post_mvp_features) return 0;
-
- i32 prev_len = buff->length;
-
- bh_buffer_write_byte(buff, WASM_SECTION_ID_DATACOUNT);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) bh_arr_length(module->data), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-static i32 output_datasection(OnyxWasmModule* module, bh_buffer* buff) {
- i32 prev_len = buff->length;
-
- bh_buffer_write_byte(buff, WASM_SECTION_ID_DATA);
-
- bh_buffer vec_buff;
- bh_buffer_init(&vec_buff, buff->allocator, 128);
-
- i32 leb_len;
- u8* leb = uint_to_uleb128((u64) bh_arr_length(module->data), &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
-
- bh_arr_each(WasmDatum, datum, module->data) {
- assert(datum->data != NULL);
-
- i32 memory_flags = 0x00;
- if (context.options->use_multi_threading) memory_flags |= 0x01;
-
- bh_buffer_write_byte(&vec_buff, memory_flags);
-
- if (!context.options->use_multi_threading) {
- bh_buffer_write_byte(&vec_buff, WI_I32_CONST);
- leb = int_to_leb128((i64) datum->offset, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- bh_buffer_write_byte(&vec_buff, WI_BLOCK_END);
- }
-
- leb = uint_to_uleb128((u64) datum->length, &leb_len);
- bh_buffer_append(&vec_buff, leb, leb_len);
- fori (i, 0, datum->length) bh_buffer_write_byte(&vec_buff, ((u8 *) datum->data)[i]);
- }
-
- leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
- bh_buffer_append(buff, leb, leb_len);
-
- bh_buffer_concat(buff, vec_buff);
- bh_buffer_free(&vec_buff);
-
- return buff->length - prev_len;
-}
-
-void onyx_wasm_module_write_to_buffer(OnyxWasmModule* module, bh_buffer* buffer) {
- bh_buffer_init(buffer, global_heap_allocator, 128);
- bh_buffer_append(buffer, WASM_MAGIC_STRING, 4);
- bh_buffer_append(buffer, WASM_VERSION, 4);
-
- output_typesection(module, buffer);
- output_importsection(module, buffer);
- output_funcsection(module, buffer);
- output_tablesection(module, buffer);
- output_memorysection(module, buffer);
- output_globalsection(module, buffer);
- output_exportsection(module, buffer);
- output_startsection(module, buffer);
- output_elemsection(module, buffer);
- output_datacountsection(module, buffer);
- output_codesection(module, buffer);
- output_datasection(module, buffer);
-}
-
-void onyx_wasm_module_write_to_file(OnyxWasmModule* module, bh_file file) {
- bh_buffer master_buffer;
- onyx_wasm_module_write_to_buffer(module, &master_buffer);
-
- bh_file_write(&file, master_buffer.data, master_buffer.length);
-}
--- /dev/null
+// This file is included in src/onyxwasm.c.
+// It is separated because of its fundamentally different goals.
+
+//-------------------------------------------------
+// BINARY OUPUT
+//-------------------------------------------------
+
+#define WASM_SECTION_ID_TYPE 1
+#define WASM_SECTION_ID_IMPORT 2
+#define WASM_SECTION_ID_FUNCTION 3
+#define WASM_SECTION_ID_TABLE 4
+#define WASM_SECTION_ID_MEMORY 5
+#define WASM_SECTION_ID_GLOBAL 6
+#define WASM_SECTION_ID_EXPORT 7
+#define WASM_SECTION_ID_START 8
+#define WASM_SECTION_ID_ELEMENT 9
+#define WASM_SECTION_ID_DATACOUNT 12
+#define WASM_SECTION_ID_CODE 10
+#define WASM_SECTION_ID_DATA 11
+
+typedef i32 vector_func(void*, bh_buffer*);
+
+static const u8 WASM_MAGIC_STRING[] = { 0x00, 0x61, 0x73, 0x6D };
+static const u8 WASM_VERSION[] = { 0x01, 0x00, 0x00, 0x00 };
+
+static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff);
+
+static i32 output_vector(void** arr, i32 stride, i32 arrlen, vector_func elem, bh_buffer* vec_buff) {
+ i32 len;
+ u8* leb = uint_to_uleb128((u64) arrlen, &len);
+ bh_buffer_append(vec_buff, leb, len);
+
+ i32 i = 0;
+ while (i < arrlen) {
+ elem(*arr, vec_buff);
+ arr = bh_pointer_add(arr, stride);
+ i++;
+ }
+
+ return vec_buff->length;
+}
+
+static i32 output_name(const char* start, i32 length, bh_buffer* buff) {
+ i32 leb_len, prev_len = buff->length;
+ u8* leb = uint_to_uleb128((u64) length, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_append(buff, start, length);
+ return buff->length - prev_len;
+}
+
+static i32 output_limits(i32 min, i32 max, b32 shared, bh_buffer* buff) {
+ i32 leb_len, prev_len = buff->length;
+ u8* leb;
+
+ u8 mem_type = 0x00;
+ if (max >= 0) mem_type |= 0x01;
+ if (shared) mem_type |= 0x02;
+
+ bh_buffer_write_byte(buff, mem_type);
+
+ leb = uint_to_uleb128((u64) min, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ if (max >= 0) {
+ leb = uint_to_uleb128((u64) max, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ }
+
+ return buff->length - prev_len;
+}
+
+static i32 output_functype(WasmFuncType* type, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+
+ bh_buffer_write_byte(buff, 0x60);
+
+ i32 len;
+ u8* leb_buff = uint_to_uleb128(type->param_count, &len);
+ bh_buffer_append(buff, leb_buff, len);
+ bh_buffer_append(buff, type->param_types, type->param_count);
+
+ if (type->return_type != WASM_TYPE_VOID) {
+ bh_buffer_write_byte(buff, 0x01);
+ bh_buffer_write_byte(buff, type->return_type);
+ } else {
+ bh_buffer_write_byte(buff, 0x00);
+ }
+
+ return buff->length - prev_len;
+}
+
+static i32 output_typesection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, 0x01);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 vec_len = output_vector(
+ (void**) module->types,
+ sizeof(WasmFuncType*),
+ bh_arr_length(module->types),
+ (vector_func *) output_functype,
+ &vec_buff);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) vec_len, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_funcsection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_FUNCTION);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->funcs)), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ bh_arr_each(WasmFunc, func, module->funcs) {
+ leb = uint_to_uleb128((u64) (func->type_idx), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ }
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_tablesection(OnyxWasmModule* module, bh_buffer* buff) {
+ if (bh_arr_length(module->elems) == 0) return 0;
+
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_TABLE);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) 1, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ // NOTE: funcrefs are the only valid table element type
+ bh_buffer_write_byte(&vec_buff, 0x70);
+ output_limits(bh_arr_length(module->elems), -1, 0, &vec_buff);
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_memorysection(OnyxWasmModule* module, bh_buffer* buff) {
+ if (context.options->use_multi_threading) return 0;
+
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_MEMORY);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) 1, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ // FIXME: This needs to be dynamically chosen depending on the size of
+ // the data section and stack size pre-requeseted.
+ // :WasmMemory
+ output_limits(1024, -1, 0, &vec_buff);
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_globalsection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_GLOBAL);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->globals)), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ bh_arr_each(WasmGlobal, global, module->globals) {
+ bh_buffer_write_byte(&vec_buff, global->type);
+ bh_buffer_write_byte(&vec_buff, 0x01);
+
+ bh_arr_each(WasmInstruction, instr, global->initial_value)
+ output_instruction(NULL, instr, &vec_buff);
+
+ // NOTE: Initial value expression terminator
+ bh_buffer_write_byte(&vec_buff, (u8) WI_BLOCK_END);
+ }
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_importsection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_IMPORT);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) (bh_arr_length(module->imports)), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ bh_arr_each(WasmImport, import, module->imports) {
+ output_name(import->mod, strlen(import->mod), &vec_buff);
+ output_name(import->name, strlen(import->name), &vec_buff);
+ bh_buffer_write_byte(&vec_buff, (u8) import->kind);
+
+ switch (import->kind) {
+ case WASM_FOREIGN_FUNCTION:
+ leb = uint_to_uleb128((u64) import->idx, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ break;
+
+ case WASM_FOREIGN_MEMORY:
+ output_limits(import->min, import->max, import->shared, &vec_buff);
+ break;
+
+ case WASM_FOREIGN_TABLE: assert(0);
+ }
+ }
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_exportsection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_EXPORT);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) (module->export_count), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ i32 key_len = 0;
+ bh_table_each_start(WasmExport, module->exports);
+ key_len = strlen(key);
+ output_name(key, key_len, &vec_buff);
+
+ bh_buffer_write_byte(&vec_buff, (u8) (value.kind));
+ leb = uint_to_uleb128((u64) value.idx, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ bh_table_each_end;
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_startsection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+
+ i32 start_idx = -1;
+ bh_table_each_start(WasmExport, module->exports) {
+ if (value.kind == WASM_FOREIGN_FUNCTION) {
+ if (strncmp("main", key, 5) == 0) {
+ start_idx = value.idx;
+ break;
+ }
+ }
+ } bh_table_each_end;
+
+ if (start_idx != -1) {
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_START);
+
+ i32 start_leb_len, section_leb_len;
+ uint_to_uleb128((u64) start_idx, &start_leb_len);
+ u8* section_leb = uint_to_uleb128((u64) start_leb_len, §ion_leb_len);
+ bh_buffer_append(buff, section_leb, section_leb_len);
+
+ u8* start_leb = uint_to_uleb128((u64) start_idx, &start_leb_len);
+ bh_buffer_append(buff, start_leb, start_leb_len);
+ }
+
+ return buff->length - prev_len;
+}
+
+static i32 output_elemsection(OnyxWasmModule* module, bh_buffer* buff) {
+ if (bh_arr_length(module->elems) == 0) return 0;
+
+ i32 prev_len = buff->length;
+
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_ELEMENT);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb;
+
+ // NOTE: 0x01 count of elems
+ bh_buffer_write_byte(&vec_buff, 0x01);
+
+ // NOTE: 0x00 table index
+ bh_buffer_write_byte(&vec_buff, 0x00);
+
+ bh_buffer_write_byte(&vec_buff, WI_I32_CONST);
+ bh_buffer_write_byte(&vec_buff, 0x00);
+ bh_buffer_write_byte(&vec_buff, WI_BLOCK_END);
+
+ leb = uint_to_uleb128((u64) bh_arr_length(module->elems), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ bh_arr_each(i32, elem, module->elems) {
+ leb = uint_to_uleb128((u64) *elem, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ }
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_locals(WasmFunc* func, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+
+ // NOTE: Output vector length
+ i32 total_locals =
+ (i32) (func->locals.allocated[0] != 0) +
+ (i32) (func->locals.allocated[1] != 0) +
+ (i32) (func->locals.allocated[2] != 0) +
+ (i32) (func->locals.allocated[3] != 0) +
+ (i32) (func->locals.allocated[4] != 0);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) total_locals, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ if (func->locals.allocated[0] != 0) {
+ leb = uint_to_uleb128((u64) func->locals.allocated[0], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_write_byte(buff, WASM_TYPE_INT32);
+ }
+ if (func->locals.allocated[1] != 0) {
+ leb = uint_to_uleb128((u64) func->locals.allocated[1], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_write_byte(buff, WASM_TYPE_INT64);
+ }
+ if (func->locals.allocated[2] != 0) {
+ leb = uint_to_uleb128((u64) func->locals.allocated[2], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_write_byte(buff, WASM_TYPE_FLOAT32);
+ }
+ if (func->locals.allocated[3] != 0) {
+ leb = uint_to_uleb128((u64) func->locals.allocated[3], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_write_byte(buff, WASM_TYPE_FLOAT64);
+ }
+ if (func->locals.allocated[4] != 0) {
+ leb = uint_to_uleb128((u64) func->locals.allocated[4], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ bh_buffer_write_byte(buff, WASM_TYPE_VAR128);
+ }
+
+ return buff->length - prev_len;
+}
+
+static void output_instruction(WasmFunc* func, WasmInstruction* instr, bh_buffer* buff) {
+ i32 leb_len;
+ u8* leb;
+
+ if (instr->type == WI_NOP) return;
+ if (instr->type == WI_UNREACHABLE) assert(("EMITTING UNREACHABLE!!", 0));
+
+ if (instr->type & SIMD_INSTR_MASK) {
+ bh_buffer_write_byte(buff, 0xFD);
+ leb = uint_to_uleb128((u64) (instr->type &~ SIMD_INSTR_MASK), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ } else if (instr->type & EXT_INSTR_MASK) {
+ bh_buffer_write_byte(buff, 0xFC);
+ leb = uint_to_uleb128((u64) (instr->type &~ EXT_INSTR_MASK), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ } else if (instr->type & ATOMIC_INSTR_MASK) {
+ bh_buffer_write_byte(buff, 0xFE);
+ leb = uint_to_uleb128((u64) (instr->type &~ ATOMIC_INSTR_MASK), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ if (instr->type == WI_ATOMIC_FENCE) {
+ bh_buffer_write_byte(buff, 0x00);
+
+ } else {
+ leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ }
+
+ } else {
+ bh_buffer_write_byte(buff, (u8) instr->type);
+ }
+
+ switch (instr->type) {
+ case WI_LOCAL_GET:
+ case WI_LOCAL_SET:
+ case WI_LOCAL_TEE: {
+ u64 actual_idx = local_lookup_idx(&func->locals, instr->data.l);
+ leb = uint_to_uleb128(actual_idx, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+ }
+
+ case WI_GLOBAL_GET:
+ case WI_GLOBAL_SET:
+ case WI_CALL:
+ case WI_BLOCK_START:
+ case WI_LOOP_START:
+ case WI_JUMP:
+ case WI_COND_JUMP:
+ case WI_IF_START:
+ case WI_MEMORY_SIZE:
+ case WI_MEMORY_GROW:
+ case WI_MEMORY_FILL:
+ leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+
+ case WI_MEMORY_INIT:
+ case WI_MEMORY_COPY:
+ leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+
+ case WI_JUMP_TABLE: {
+ BranchTable* bt = (BranchTable *) instr->data.p;
+
+ leb = uint_to_uleb128((u64) bt->count, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ fori (i, 0, bt->count) {
+ leb = uint_to_uleb128((u64) bt->cases[i], &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ }
+
+ leb = uint_to_uleb128((u64) bt->default_case, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+ }
+
+
+ case WI_CALL_INDIRECT:
+ case WI_I32_STORE: case WI_I32_STORE_8: case WI_I32_STORE_16:
+ case WI_I64_STORE: case WI_I64_STORE_8: case WI_I64_STORE_16: case WI_I64_STORE_32:
+ case WI_F32_STORE: case WI_F64_STORE:
+ case WI_V128_STORE:
+ case WI_I32_LOAD:
+ case WI_I32_LOAD_8_S: case WI_I32_LOAD_8_U:
+ case WI_I32_LOAD_16_S: case WI_I32_LOAD_16_U:
+ case WI_I64_LOAD:
+ case WI_I64_LOAD_8_S: case WI_I64_LOAD_8_U:
+ case WI_I64_LOAD_16_S: case WI_I64_LOAD_16_U:
+ case WI_I64_LOAD_32_S: case WI_I64_LOAD_32_U:
+ case WI_F32_LOAD: case WI_F64_LOAD:
+ case WI_V128_LOAD:
+ leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+
+ case WI_I32_CONST:
+ leb = int_to_leb128((i64) instr->data.i1, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+ case WI_I64_CONST:
+ leb = int_to_leb128((i64) instr->data.l, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+ break;
+ case WI_F32_CONST:
+ leb = float_to_ieee754(instr->data.f, 0);
+ bh_buffer_append(buff, leb, 4);
+ break;
+ case WI_F64_CONST:
+ leb = double_to_ieee754(instr->data.d, 0);
+ bh_buffer_append(buff, leb, 8);
+ break;
+
+ case WI_V128_CONST:
+ case WI_I8X16_SHUFFLE:
+ fori (i, 0, 16) bh_buffer_write_byte(buff, ((u8*) instr->data.p)[i]);
+ break;
+
+ case WI_I8X16_EXTRACT_LANE_S: case WI_I8X16_EXTRACT_LANE_U: case WI_I8X16_REPLACE_LANE:
+ case WI_I16X8_EXTRACT_LANE_S: case WI_I16X8_EXTRACT_LANE_U: case WI_I16X8_REPLACE_LANE:
+ case WI_I32X4_EXTRACT_LANE: case WI_I32X4_REPLACE_LANE:
+ case WI_I64X2_EXTRACT_LANE: case WI_I64X2_REPLACE_LANE:
+ case WI_F32X4_EXTRACT_LANE: case WI_F32X4_REPLACE_LANE:
+ case WI_F64X2_EXTRACT_LANE: case WI_F64X2_REPLACE_LANE:
+ bh_buffer_write_byte(buff, (u8) instr->data.i1);
+ break;
+
+ default: break;
+ }
+}
+
+static i32 output_code(WasmFunc* func, bh_buffer* buff) {
+
+ bh_buffer code_buff;
+ bh_buffer_init(&code_buff, buff->allocator, 128);
+
+ // Output locals
+ output_locals(func, &code_buff);
+
+ assert(func->code);
+
+ // Output code
+ bh_arr_each(WasmInstruction, instr, func->code) output_instruction(func, instr, &code_buff);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) code_buff.length, &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, code_buff);
+ bh_buffer_free(&code_buff);
+
+ return 0;
+}
+
+static i32 output_codesection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_CODE);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) bh_arr_length(module->funcs), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ // DEBUG_HERE;
+
+ bh_arr_each(WasmFunc, func, module->funcs) output_code(func, &vec_buff);
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_datacountsection(OnyxWasmModule* module, bh_buffer* buff) {
+ if (!context.options->use_post_mvp_features) return 0;
+
+ i32 prev_len = buff->length;
+
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_DATACOUNT);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) bh_arr_length(module->data), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+static i32 output_datasection(OnyxWasmModule* module, bh_buffer* buff) {
+ i32 prev_len = buff->length;
+
+ bh_buffer_write_byte(buff, WASM_SECTION_ID_DATA);
+
+ bh_buffer vec_buff;
+ bh_buffer_init(&vec_buff, buff->allocator, 128);
+
+ i32 leb_len;
+ u8* leb = uint_to_uleb128((u64) bh_arr_length(module->data), &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+
+ bh_arr_each(WasmDatum, datum, module->data) {
+ assert(datum->data != NULL);
+
+ i32 memory_flags = 0x00;
+ if (context.options->use_multi_threading) memory_flags |= 0x01;
+
+ bh_buffer_write_byte(&vec_buff, memory_flags);
+
+ if (!context.options->use_multi_threading) {
+ bh_buffer_write_byte(&vec_buff, WI_I32_CONST);
+ leb = int_to_leb128((i64) datum->offset, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ bh_buffer_write_byte(&vec_buff, WI_BLOCK_END);
+ }
+
+ leb = uint_to_uleb128((u64) datum->length, &leb_len);
+ bh_buffer_append(&vec_buff, leb, leb_len);
+ fori (i, 0, datum->length) bh_buffer_write_byte(&vec_buff, ((u8 *) datum->data)[i]);
+ }
+
+ leb = uint_to_uleb128((u64) (vec_buff.length), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ bh_buffer_concat(buff, vec_buff);
+ bh_buffer_free(&vec_buff);
+
+ return buff->length - prev_len;
+}
+
+void onyx_wasm_module_write_to_buffer(OnyxWasmModule* module, bh_buffer* buffer) {
+ bh_buffer_init(buffer, global_heap_allocator, 128);
+ bh_buffer_append(buffer, WASM_MAGIC_STRING, 4);
+ bh_buffer_append(buffer, WASM_VERSION, 4);
+
+ output_typesection(module, buffer);
+ output_importsection(module, buffer);
+ output_funcsection(module, buffer);
+ output_tablesection(module, buffer);
+ output_memorysection(module, buffer);
+ output_globalsection(module, buffer);
+ output_exportsection(module, buffer);
+ output_startsection(module, buffer);
+ output_elemsection(module, buffer);
+ output_datacountsection(module, buffer);
+ output_codesection(module, buffer);
+ output_datasection(module, buffer);
+}
+
+void onyx_wasm_module_write_to_file(OnyxWasmModule* module, bh_file file) {
+ bh_buffer master_buffer;
+ onyx_wasm_module_write_to_buffer(module, &master_buffer);
+
+ bh_file_write(&file, master_buffer.data, master_buffer.length);
+}
+++ /dev/null
-// This file is directly included in src/onxywasm.c
-// It is here purely to decrease the amount of clutter in the main file.
-
-
-u64 build_type_table(OnyxWasmModule* module) {
-
- bh_arr(u32) base_patch_locations=NULL;
- bh_arr_new(global_heap_allocator, base_patch_locations, 256);
-
-#define PATCH (bh_arr_push(base_patch_locations, table_buffer.length))
-
- // This is the data behind the "type_table" slice in type_info.onyx
- u32 type_count = bh_arr_length(type_map.entries) + 1;
- u64* table_info = bh_alloc_array(global_heap_allocator, u64, type_count); // HACK
- memset(table_info, 0, type_count * sizeof(u64));
-
- bh_buffer table_buffer;
- bh_buffer_init(&table_buffer, global_heap_allocator, 4096);
-
- // Write a "NULL" at the beginning so nothing will have to point to the first byte of the buffer.
- bh_buffer_write_u64(&table_buffer, 0);
-
- bh_arr_each(bh__imap_entry, type_entry, type_map.entries) {
- u64 type_idx = type_entry->key;
- Type* type = (Type *) type_entry->value;
-
- switch (type->kind) {
- case Type_Kind_Basic: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Basic.kind);
- break;
- }
-
- case Type_Kind_Pointer: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Pointer.elem->id);
- break;
- }
-
- case Type_Kind_Array: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Array.elem->id);
- bh_buffer_write_u32(&table_buffer, type->Array.count);
- break;
- }
-
- case Type_Kind_Slice: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Slice.elem->id);
- break;
- }
-
- case Type_Kind_DynArray: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->DynArray.elem->id);
- break;
- }
-
- case Type_Kind_VarArgs: {
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->VarArgs.elem->id);
- break;
- }
-
- case Type_Kind_Compound: {
- u32 components_base = table_buffer.length;
-
- u32 components_count = type->Compound.count;
- fori (i, 0, components_count) {
- u32 type_idx = type->Compound.types[i]->id;
- bh_buffer_write_u32(&table_buffer, type_idx);
- }
-
- bh_buffer_align(&table_buffer, 8);
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_align(&table_buffer, 8);
- PATCH;
- bh_buffer_write_u64(&table_buffer, components_base);
- bh_buffer_write_u64(&table_buffer, components_count);
- break;
- }
-
- case Type_Kind_Function: {
- u32 parameters_base = table_buffer.length;
-
- u32 parameters_count = type->Function.param_count;
- fori (i, 0, parameters_count) {
- u32 type_idx = type->Function.params[i]->id;
- bh_buffer_write_u32(&table_buffer, type_idx);
- }
-
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Function.return_type->id);
-
- PATCH;
- bh_buffer_write_u64(&table_buffer, parameters_base);
- bh_buffer_write_u64(&table_buffer, parameters_count);
-
- bh_buffer_write_u32(&table_buffer, type->Function.vararg_arg_pos > 0 ? 1 : 0);
- break;
- }
-
- case Type_Kind_Enum: {
- AstEnumType* ast_enum = (AstEnumType *) type->ast_type;
- u32 member_count = bh_arr_length(ast_enum->values);
- u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, member_count);
-
- u32 i = 0;
- bh_arr_each(AstEnumValue *, value, ast_enum->values) {
- name_locations[i++] = table_buffer.length;
-
- bh_buffer_append(&table_buffer, (*value)->token->text, (*value)->token->length);
- }
- bh_buffer_align(&table_buffer, 8);
-
- u32 member_base = table_buffer.length;
- i = 0;
- bh_arr_each(AstEnumValue *, value, ast_enum->values) {
- u32 name_loc = name_locations[i++];
-
- bh_buffer_align(&table_buffer, 8);
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_loc);
- bh_buffer_write_u64(&table_buffer, (*value)->token->length);
-
- assert((*value)->value->kind == Ast_Kind_NumLit);
- AstNumLit *num = (AstNumLit *) (*value)->value;
- bh_buffer_write_u64(&table_buffer, num->value.l);
- }
-
- u32 name_base = table_buffer.length;
- u32 name_length = strlen(type->Enum.name);
- bh_buffer_append(&table_buffer, type->Enum.name, name_length);
- bh_buffer_align(&table_buffer, 8);
-
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Enum.backing->id);
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_base);
- bh_buffer_write_u64(&table_buffer, name_length);
- PATCH;
- bh_buffer_write_u64(&table_buffer, member_base);
- bh_buffer_write_u64(&table_buffer, member_count);
- bh_buffer_write_u32(&table_buffer, type->Enum.is_flags ? 1 : 0);
- break;
- }
-
- case Type_Kind_Struct: {
- TypeStruct* s = &type->Struct;
- u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
- u32* param_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->poly_sln));
- u32* value_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
- u32* meta_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
- u32* struct_tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->meta_tags));
- memset(value_locations, 0, s->mem_count * sizeof(u32));
- memset(meta_locations, 0, s->mem_count * sizeof(u32));
- memset(struct_tag_locations, 0, bh_arr_length(s->meta_tags) * sizeof(u32));
-
- u32 i = 0;
- bh_arr_each(StructMember*, pmem, s->memarr) {
- StructMember* mem = *pmem;
-
- name_locations[i++] = table_buffer.length;
- bh_buffer_append(&table_buffer, mem->name, strlen(mem->name));
- }
-
- bh_buffer_align(&table_buffer, 8);
-
- i = 0;
- bh_arr_each(AstPolySolution, sln, s->poly_sln) {
- bh_buffer_align(&table_buffer, 8);
- param_locations[i++] = table_buffer.length;
-
- switch (sln->kind) {
- case PSK_Type: {
- // NOTE: This assumes a little endian compiler (which is assumed in other part of the code too)
- bh_buffer_append(&table_buffer, &sln->type->id, 4);
- break;
- }
-
- case PSK_Value: {
- assert(sln->value->type);
- u32 size = type_size_of(sln->value->type);
-
- bh_buffer_grow(&table_buffer, table_buffer.length + size);
- u8* buffer = table_buffer.data + table_buffer.length;
- emit_raw_data(module, buffer, sln->value);
- table_buffer.length += size;
- break;
- }
-
- default: {
- // Set to null if this is not known how to encode
- param_locations[i-1] = 0;
- break;
- }
- }
- }
-
- bh_buffer_align(&table_buffer, 8);
-
- i = 0;
- bh_arr_each(StructMember*, pmem, s->memarr) {
- StructMember* mem = *pmem;
-
- if (mem->initial_value == NULL || *mem->initial_value == NULL) {
- i++;
- continue;
- }
-
- AstTyped* value = *mem->initial_value;
- assert(value->type);
-
- if ((value->flags & Ast_Flag_Comptime) == 0) {
- // onyx_report_warning(value->token->pos, "Warning: skipping generating default value for '%s' in '%s' because it is not compile-time known.\n", mem->name, s->name);
- i++;
- continue;
- }
-
- u32 size = type_size_of(value->type);
- bh_buffer_align(&table_buffer, type_alignment_of(value->type));
-
- bh_buffer_grow(&table_buffer, table_buffer.length + size);
- u8* buffer = table_buffer.data + table_buffer.length;
-
- if (!emit_raw_data_(module, buffer, value)) {
- // Failed to generate raw data
- // onyx_report_warning(value->token->pos, "Warning: failed to generate default value for '%s' in '%s'.\n", mem->name, s->name);
- value_locations[i++] = 0;
-
- } else {
- // Success
- value_locations[i++] = table_buffer.length;
- table_buffer.length += size;
- }
- }
-
- i = 0;
- bh_arr_each(StructMember*, pmem, s->memarr) {
- StructMember* mem = *pmem;
-
- if (mem->meta_tags == NULL) {
- i += 1;
- continue;
- }
-
- bh_arr(AstTyped *) meta_tags = mem->meta_tags;
- assert(meta_tags);
-
- bh_arr(u64) meta_tag_locations=NULL;
- bh_arr_new(global_heap_allocator, meta_tag_locations, bh_arr_length(meta_tags));
-
- int j = 0;
- bh_arr_each(AstTyped *, meta, meta_tags) {
- AstTyped* value = *meta;
- assert(value->flags & Ast_Flag_Comptime);
- assert(value->type);
-
- u32 size = type_size_of(value->type);
- bh_buffer_align(&table_buffer, type_alignment_of(value->type));
- meta_tag_locations[j] = table_buffer.length;
-
- bh_buffer_grow(&table_buffer, table_buffer.length + size);
- u8* buffer = table_buffer.data + table_buffer.length;
-
- assert(emit_raw_data_(module, buffer, value));
- table_buffer.length += size;
-
- j += 1;
- }
-
- bh_buffer_align(&table_buffer, 8);
- meta_locations[i] = table_buffer.length;
-
- fori (k, 0, bh_arr_length(meta_tags)) {
- PATCH;
- bh_buffer_write_u64(&table_buffer, meta_tag_locations[k]);
- bh_buffer_write_u64(&table_buffer, meta_tags[k]->type->id);
- }
-
- bh_arr_free(meta_tag_locations);
- i += 1;
- }
-
- bh_buffer_align(&table_buffer, 8);
- u32 members_base = table_buffer.length;
-
- i = 0;
- bh_arr_each(StructMember*, pmem, s->memarr) {
- StructMember* mem = *pmem;
-
- u32 name_loc = name_locations[i];
- u32 value_loc = value_locations[i];
- u32 meta_loc = meta_locations[i++];
-
- bh_buffer_align(&table_buffer, 8);
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_loc);
- bh_buffer_write_u64(&table_buffer, strlen(mem->name));
- bh_buffer_write_u32(&table_buffer, mem->offset);
- bh_buffer_write_u32(&table_buffer, mem->type->id);
- bh_buffer_write_byte(&table_buffer, mem->used ? 1 : 0);
-
- bh_buffer_align(&table_buffer, 8);
- PATCH;
- bh_buffer_write_u64(&table_buffer, value_loc);
-
- PATCH;
- bh_buffer_write_u64(&table_buffer, meta_loc);
- bh_buffer_write_u64(&table_buffer, bh_arr_length(mem->meta_tags));
- }
-
- bh_buffer_align(&table_buffer, 8);
- u32 params_base = table_buffer.length;
-
- i = 0;
- bh_arr_each(AstPolySolution, sln, s->poly_sln) {
- bh_buffer_align(&table_buffer, 8);
- PATCH;
- bh_buffer_write_u64(&table_buffer, param_locations[i++]);
-
- if (sln->kind == PSK_Type) bh_buffer_write_u32(&table_buffer, basic_types[Basic_Kind_Type_Index].id);
- else bh_buffer_write_u32(&table_buffer, sln->value->type->id);
- }
-
- i = 0;
- bh_arr_each(AstTyped *, tag, s->meta_tags) {
- AstTyped* value = *tag;
- assert(value->flags & Ast_Flag_Comptime);
- assert(value->type);
-
- u32 size = type_size_of(value->type);
- bh_buffer_align(&table_buffer, type_alignment_of(value->type));
- struct_tag_locations[i] = table_buffer.length;
-
- bh_buffer_grow(&table_buffer, table_buffer.length + size);
- u8* buffer = table_buffer.data + table_buffer.length;
-
- assert(emit_raw_data_(module, buffer, value));
- table_buffer.length += size;
-
- i += 1;
- }
-
- bh_buffer_align(&table_buffer, 8);
- u32 struct_tag_base = table_buffer.length;
-
- fori (i, 0, bh_arr_length(s->meta_tags)) {
- PATCH;
- bh_buffer_write_u64(&table_buffer, struct_tag_locations[i]);
- bh_buffer_write_u64(&table_buffer, s->meta_tags[i]->type->id);
- }
-
- u32 name_base = 0;
- u32 name_length = 0;
- if (s->name) {
- name_length = strlen(s->name);
- name_base = table_buffer.length;
- bh_buffer_append(&table_buffer, s->name, name_length);
- }
-
- bh_buffer_align(&table_buffer, 8);
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
-
- if (type->Struct.constructed_from != NULL) {
- bh_buffer_write_u32(&table_buffer, type->Struct.constructed_from->type_id);
- } else {
- bh_buffer_write_u32(&table_buffer, 0);
- }
-
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_base);
- bh_buffer_write_u64(&table_buffer, name_length);
- PATCH;
- bh_buffer_write_u64(&table_buffer, members_base);
- bh_buffer_write_u64(&table_buffer, s->mem_count);
- PATCH;
- bh_buffer_write_u64(&table_buffer, params_base);
- bh_buffer_write_u64(&table_buffer, bh_arr_length(s->poly_sln));
- PATCH;
- bh_buffer_write_u64(&table_buffer, struct_tag_base);
- bh_buffer_write_u64(&table_buffer, bh_arr_length(s->meta_tags));
-
- break;
- }
-
- case Type_Kind_PolyStruct: {
- u32* tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(type->PolyStruct.meta_tags));
- memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyStruct.meta_tags));
-
- u32 name_base = table_buffer.length;
- u32 name_length = strlen(type->PolyStruct.name);
- bh_buffer_append(&table_buffer, type->PolyStruct.name, name_length);
-
- i32 i = 0;
- bh_arr_each(AstTyped *, tag, type->PolyStruct.meta_tags) {
- AstTyped* value = *tag;
- assert(value->flags & Ast_Flag_Comptime);
- assert(value->type);
-
- u32 size = type_size_of(value->type);
- bh_buffer_align(&table_buffer, type_alignment_of(value->type));
- tag_locations[i] = table_buffer.length;
-
- bh_buffer_grow(&table_buffer, table_buffer.length + size);
- u8* buffer = table_buffer.data + table_buffer.length;
-
- assert(emit_raw_data_(module, buffer, value));
- table_buffer.length += size;
-
- i += 1;
- }
-
- bh_buffer_align(&table_buffer, 8);
- u32 tags_base = table_buffer.length;
- u32 tags_count = bh_arr_length(type->PolyStruct.meta_tags);
-
- fori (i, 0, tags_count) {
- PATCH;
- bh_buffer_write_u64(&table_buffer, tag_locations[i]);
- bh_buffer_write_u64(&table_buffer, type->PolyStruct.meta_tags[i]->type->id);
- }
-
- bh_buffer_align(&table_buffer, 8);
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, 0);
- bh_buffer_write_u32(&table_buffer, 0);
- bh_buffer_write_u32(&table_buffer, 0);
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_base);
- bh_buffer_write_u64(&table_buffer, name_length);
- PATCH;
- bh_buffer_write_u64(&table_buffer, tags_base);
- bh_buffer_write_u64(&table_buffer, tags_count);
-
- break;
- }
-
- case Type_Kind_Distinct: {
- u32 name_base = table_buffer.length;
- u32 name_length = strlen(type->Distinct.name);
- bh_buffer_append(&table_buffer, type->Distinct.name, name_length);
- bh_buffer_align(&table_buffer, 8);
-
- table_info[type_idx] = table_buffer.length;
- bh_buffer_write_u32(&table_buffer, type->kind);
- bh_buffer_write_u32(&table_buffer, type_size_of(type));
- bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
- bh_buffer_write_u32(&table_buffer, type->Distinct.base_type->id);
- PATCH;
- bh_buffer_write_u64(&table_buffer, name_base);
- bh_buffer_write_u64(&table_buffer, name_length);
- break;
- }
- }
- }
-
- if (context.options->verbose_output == 1) {
- bh_printf("Type table size: %d bytes.\n", table_buffer.length);
- }
-
- u32 offset = module->next_datum_offset;
- bh_align(offset, 8);
-
- u64 type_table_location = offset;
-
- WasmDatum type_table_data = {
- .offset = offset,
- .length = type_count * 8,
- .data = table_info,
- };
- bh_arr_push(module->data, type_table_data);
-
- offset += type_table_data.length;
-
- fori (i, 0, type_count) {
- table_info[i] += offset;
- }
-
- bh_arr_each(u32, patch_loc, base_patch_locations) {
- u64* loc = bh_pointer_add(table_buffer.data, *patch_loc);
- if (*loc == 0) continue;
-
- *loc += offset;
- }
-
- WasmDatum type_info_data = {
- .offset = offset,
- .length = table_buffer.length,
- .data = table_buffer.data,
- };
- bh_arr_push(module->data, type_info_data);
- offset += type_info_data.length;
-
- u64 global_data_ptr = offset;
-
- u64* tmp_data = bh_alloc(global_heap_allocator, 16);
- tmp_data[0] = type_table_location;
- tmp_data[1] = type_count;
- WasmDatum type_table_global_data = {
- .offset = offset,
- .length = 16,
- .data = tmp_data,
- };
- bh_arr_push(module->data, type_table_global_data);
- offset += type_table_global_data.length;
-
- module->next_datum_offset = offset;
-
- return global_data_ptr;
-
-#undef PATCH
-}
--- /dev/null
+// This file is directly included in src/onxywasm.c
+// It is here purely to decrease the amount of clutter in the main file.
+
+
+u64 build_type_table(OnyxWasmModule* module) {
+
+ bh_arr(u32) base_patch_locations=NULL;
+ bh_arr_new(global_heap_allocator, base_patch_locations, 256);
+
+#define PATCH (bh_arr_push(base_patch_locations, table_buffer.length))
+
+ // This is the data behind the "type_table" slice in type_info.onyx
+ u32 type_count = bh_arr_length(type_map.entries) + 1;
+ u64* table_info = bh_alloc_array(global_heap_allocator, u64, type_count); // HACK
+ memset(table_info, 0, type_count * sizeof(u64));
+
+ bh_buffer table_buffer;
+ bh_buffer_init(&table_buffer, global_heap_allocator, 4096);
+
+ // Write a "NULL" at the beginning so nothing will have to point to the first byte of the buffer.
+ bh_buffer_write_u64(&table_buffer, 0);
+
+ bh_arr_each(bh__imap_entry, type_entry, type_map.entries) {
+ u64 type_idx = type_entry->key;
+ Type* type = (Type *) type_entry->value;
+
+ switch (type->kind) {
+ case Type_Kind_Basic: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Basic.kind);
+ break;
+ }
+
+ case Type_Kind_Pointer: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Pointer.elem->id);
+ break;
+ }
+
+ case Type_Kind_Array: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Array.elem->id);
+ bh_buffer_write_u32(&table_buffer, type->Array.count);
+ break;
+ }
+
+ case Type_Kind_Slice: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Slice.elem->id);
+ break;
+ }
+
+ case Type_Kind_DynArray: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->DynArray.elem->id);
+ break;
+ }
+
+ case Type_Kind_VarArgs: {
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->VarArgs.elem->id);
+ break;
+ }
+
+ case Type_Kind_Compound: {
+ u32 components_base = table_buffer.length;
+
+ u32 components_count = type->Compound.count;
+ fori (i, 0, components_count) {
+ u32 type_idx = type->Compound.types[i]->id;
+ bh_buffer_write_u32(&table_buffer, type_idx);
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_align(&table_buffer, 8);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, components_base);
+ bh_buffer_write_u64(&table_buffer, components_count);
+ break;
+ }
+
+ case Type_Kind_Function: {
+ u32 parameters_base = table_buffer.length;
+
+ u32 parameters_count = type->Function.param_count;
+ fori (i, 0, parameters_count) {
+ u32 type_idx = type->Function.params[i]->id;
+ bh_buffer_write_u32(&table_buffer, type_idx);
+ }
+
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Function.return_type->id);
+
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, parameters_base);
+ bh_buffer_write_u64(&table_buffer, parameters_count);
+
+ bh_buffer_write_u32(&table_buffer, type->Function.vararg_arg_pos > 0 ? 1 : 0);
+ break;
+ }
+
+ case Type_Kind_Enum: {
+ AstEnumType* ast_enum = (AstEnumType *) type->ast_type;
+ u32 member_count = bh_arr_length(ast_enum->values);
+ u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, member_count);
+
+ u32 i = 0;
+ bh_arr_each(AstEnumValue *, value, ast_enum->values) {
+ name_locations[i++] = table_buffer.length;
+
+ bh_buffer_append(&table_buffer, (*value)->token->text, (*value)->token->length);
+ }
+ bh_buffer_align(&table_buffer, 8);
+
+ u32 member_base = table_buffer.length;
+ i = 0;
+ bh_arr_each(AstEnumValue *, value, ast_enum->values) {
+ u32 name_loc = name_locations[i++];
+
+ bh_buffer_align(&table_buffer, 8);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_loc);
+ bh_buffer_write_u64(&table_buffer, (*value)->token->length);
+
+ assert((*value)->value->kind == Ast_Kind_NumLit);
+ AstNumLit *num = (AstNumLit *) (*value)->value;
+ bh_buffer_write_u64(&table_buffer, num->value.l);
+ }
+
+ u32 name_base = table_buffer.length;
+ u32 name_length = strlen(type->Enum.name);
+ bh_buffer_append(&table_buffer, type->Enum.name, name_length);
+ bh_buffer_align(&table_buffer, 8);
+
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Enum.backing->id);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_base);
+ bh_buffer_write_u64(&table_buffer, name_length);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, member_base);
+ bh_buffer_write_u64(&table_buffer, member_count);
+ bh_buffer_write_u32(&table_buffer, type->Enum.is_flags ? 1 : 0);
+ break;
+ }
+
+ case Type_Kind_Struct: {
+ TypeStruct* s = &type->Struct;
+ u32* name_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
+ u32* param_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->poly_sln));
+ u32* value_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
+ u32* meta_locations = bh_alloc_array(global_scratch_allocator, u32, s->mem_count);
+ u32* struct_tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(s->meta_tags));
+ memset(value_locations, 0, s->mem_count * sizeof(u32));
+ memset(meta_locations, 0, s->mem_count * sizeof(u32));
+ memset(struct_tag_locations, 0, bh_arr_length(s->meta_tags) * sizeof(u32));
+
+ u32 i = 0;
+ bh_arr_each(StructMember*, pmem, s->memarr) {
+ StructMember* mem = *pmem;
+
+ name_locations[i++] = table_buffer.length;
+ bh_buffer_append(&table_buffer, mem->name, strlen(mem->name));
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+
+ i = 0;
+ bh_arr_each(AstPolySolution, sln, s->poly_sln) {
+ bh_buffer_align(&table_buffer, 8);
+ param_locations[i++] = table_buffer.length;
+
+ switch (sln->kind) {
+ case PSK_Type: {
+ // NOTE: This assumes a little endian compiler (which is assumed in other part of the code too)
+ bh_buffer_append(&table_buffer, &sln->type->id, 4);
+ break;
+ }
+
+ case PSK_Value: {
+ assert(sln->value->type);
+ u32 size = type_size_of(sln->value->type);
+
+ bh_buffer_grow(&table_buffer, table_buffer.length + size);
+ u8* buffer = table_buffer.data + table_buffer.length;
+ emit_raw_data(module, buffer, sln->value);
+ table_buffer.length += size;
+ break;
+ }
+
+ default: {
+ // Set to null if this is not known how to encode
+ param_locations[i-1] = 0;
+ break;
+ }
+ }
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+
+ i = 0;
+ bh_arr_each(StructMember*, pmem, s->memarr) {
+ StructMember* mem = *pmem;
+
+ if (mem->initial_value == NULL || *mem->initial_value == NULL) {
+ i++;
+ continue;
+ }
+
+ AstTyped* value = *mem->initial_value;
+ assert(value->type);
+
+ if ((value->flags & Ast_Flag_Comptime) == 0) {
+ // onyx_report_warning(value->token->pos, "Warning: skipping generating default value for '%s' in '%s' because it is not compile-time known.\n", mem->name, s->name);
+ i++;
+ continue;
+ }
+
+ u32 size = type_size_of(value->type);
+ bh_buffer_align(&table_buffer, type_alignment_of(value->type));
+
+ bh_buffer_grow(&table_buffer, table_buffer.length + size);
+ u8* buffer = table_buffer.data + table_buffer.length;
+
+ if (!emit_raw_data_(module, buffer, value)) {
+ // Failed to generate raw data
+ // onyx_report_warning(value->token->pos, "Warning: failed to generate default value for '%s' in '%s'.\n", mem->name, s->name);
+ value_locations[i++] = 0;
+
+ } else {
+ // Success
+ value_locations[i++] = table_buffer.length;
+ table_buffer.length += size;
+ }
+ }
+
+ i = 0;
+ bh_arr_each(StructMember*, pmem, s->memarr) {
+ StructMember* mem = *pmem;
+
+ if (mem->meta_tags == NULL) {
+ i += 1;
+ continue;
+ }
+
+ bh_arr(AstTyped *) meta_tags = mem->meta_tags;
+ assert(meta_tags);
+
+ bh_arr(u64) meta_tag_locations=NULL;
+ bh_arr_new(global_heap_allocator, meta_tag_locations, bh_arr_length(meta_tags));
+
+ int j = 0;
+ bh_arr_each(AstTyped *, meta, meta_tags) {
+ AstTyped* value = *meta;
+ assert(value->flags & Ast_Flag_Comptime);
+ assert(value->type);
+
+ u32 size = type_size_of(value->type);
+ bh_buffer_align(&table_buffer, type_alignment_of(value->type));
+ meta_tag_locations[j] = table_buffer.length;
+
+ bh_buffer_grow(&table_buffer, table_buffer.length + size);
+ u8* buffer = table_buffer.data + table_buffer.length;
+
+ assert(emit_raw_data_(module, buffer, value));
+ table_buffer.length += size;
+
+ j += 1;
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ meta_locations[i] = table_buffer.length;
+
+ fori (k, 0, bh_arr_length(meta_tags)) {
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, meta_tag_locations[k]);
+ bh_buffer_write_u64(&table_buffer, meta_tags[k]->type->id);
+ }
+
+ bh_arr_free(meta_tag_locations);
+ i += 1;
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ u32 members_base = table_buffer.length;
+
+ i = 0;
+ bh_arr_each(StructMember*, pmem, s->memarr) {
+ StructMember* mem = *pmem;
+
+ u32 name_loc = name_locations[i];
+ u32 value_loc = value_locations[i];
+ u32 meta_loc = meta_locations[i++];
+
+ bh_buffer_align(&table_buffer, 8);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_loc);
+ bh_buffer_write_u64(&table_buffer, strlen(mem->name));
+ bh_buffer_write_u32(&table_buffer, mem->offset);
+ bh_buffer_write_u32(&table_buffer, mem->type->id);
+ bh_buffer_write_byte(&table_buffer, mem->used ? 1 : 0);
+
+ bh_buffer_align(&table_buffer, 8);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, value_loc);
+
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, meta_loc);
+ bh_buffer_write_u64(&table_buffer, bh_arr_length(mem->meta_tags));
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ u32 params_base = table_buffer.length;
+
+ i = 0;
+ bh_arr_each(AstPolySolution, sln, s->poly_sln) {
+ bh_buffer_align(&table_buffer, 8);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, param_locations[i++]);
+
+ if (sln->kind == PSK_Type) bh_buffer_write_u32(&table_buffer, basic_types[Basic_Kind_Type_Index].id);
+ else bh_buffer_write_u32(&table_buffer, sln->value->type->id);
+ }
+
+ i = 0;
+ bh_arr_each(AstTyped *, tag, s->meta_tags) {
+ AstTyped* value = *tag;
+ assert(value->flags & Ast_Flag_Comptime);
+ assert(value->type);
+
+ u32 size = type_size_of(value->type);
+ bh_buffer_align(&table_buffer, type_alignment_of(value->type));
+ struct_tag_locations[i] = table_buffer.length;
+
+ bh_buffer_grow(&table_buffer, table_buffer.length + size);
+ u8* buffer = table_buffer.data + table_buffer.length;
+
+ assert(emit_raw_data_(module, buffer, value));
+ table_buffer.length += size;
+
+ i += 1;
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ u32 struct_tag_base = table_buffer.length;
+
+ fori (i, 0, bh_arr_length(s->meta_tags)) {
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, struct_tag_locations[i]);
+ bh_buffer_write_u64(&table_buffer, s->meta_tags[i]->type->id);
+ }
+
+ u32 name_base = 0;
+ u32 name_length = 0;
+ if (s->name) {
+ name_length = strlen(s->name);
+ name_base = table_buffer.length;
+ bh_buffer_append(&table_buffer, s->name, name_length);
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+
+ if (type->Struct.constructed_from != NULL) {
+ bh_buffer_write_u32(&table_buffer, type->Struct.constructed_from->type_id);
+ } else {
+ bh_buffer_write_u32(&table_buffer, 0);
+ }
+
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_base);
+ bh_buffer_write_u64(&table_buffer, name_length);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, members_base);
+ bh_buffer_write_u64(&table_buffer, s->mem_count);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, params_base);
+ bh_buffer_write_u64(&table_buffer, bh_arr_length(s->poly_sln));
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, struct_tag_base);
+ bh_buffer_write_u64(&table_buffer, bh_arr_length(s->meta_tags));
+
+ break;
+ }
+
+ case Type_Kind_PolyStruct: {
+ u32* tag_locations = bh_alloc_array(global_scratch_allocator, u32, bh_arr_length(type->PolyStruct.meta_tags));
+ memset(tag_locations, 0, sizeof(u32) * bh_arr_length(type->PolyStruct.meta_tags));
+
+ u32 name_base = table_buffer.length;
+ u32 name_length = strlen(type->PolyStruct.name);
+ bh_buffer_append(&table_buffer, type->PolyStruct.name, name_length);
+
+ i32 i = 0;
+ bh_arr_each(AstTyped *, tag, type->PolyStruct.meta_tags) {
+ AstTyped* value = *tag;
+ assert(value->flags & Ast_Flag_Comptime);
+ assert(value->type);
+
+ u32 size = type_size_of(value->type);
+ bh_buffer_align(&table_buffer, type_alignment_of(value->type));
+ tag_locations[i] = table_buffer.length;
+
+ bh_buffer_grow(&table_buffer, table_buffer.length + size);
+ u8* buffer = table_buffer.data + table_buffer.length;
+
+ assert(emit_raw_data_(module, buffer, value));
+ table_buffer.length += size;
+
+ i += 1;
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ u32 tags_base = table_buffer.length;
+ u32 tags_count = bh_arr_length(type->PolyStruct.meta_tags);
+
+ fori (i, 0, tags_count) {
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, tag_locations[i]);
+ bh_buffer_write_u64(&table_buffer, type->PolyStruct.meta_tags[i]->type->id);
+ }
+
+ bh_buffer_align(&table_buffer, 8);
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, 0);
+ bh_buffer_write_u32(&table_buffer, 0);
+ bh_buffer_write_u32(&table_buffer, 0);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_base);
+ bh_buffer_write_u64(&table_buffer, name_length);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, tags_base);
+ bh_buffer_write_u64(&table_buffer, tags_count);
+
+ break;
+ }
+
+ case Type_Kind_Distinct: {
+ u32 name_base = table_buffer.length;
+ u32 name_length = strlen(type->Distinct.name);
+ bh_buffer_append(&table_buffer, type->Distinct.name, name_length);
+ bh_buffer_align(&table_buffer, 8);
+
+ table_info[type_idx] = table_buffer.length;
+ bh_buffer_write_u32(&table_buffer, type->kind);
+ bh_buffer_write_u32(&table_buffer, type_size_of(type));
+ bh_buffer_write_u32(&table_buffer, type_alignment_of(type));
+ bh_buffer_write_u32(&table_buffer, type->Distinct.base_type->id);
+ PATCH;
+ bh_buffer_write_u64(&table_buffer, name_base);
+ bh_buffer_write_u64(&table_buffer, name_length);
+ break;
+ }
+ }
+ }
+
+ if (context.options->verbose_output == 1) {
+ bh_printf("Type table size: %d bytes.\n", table_buffer.length);
+ }
+
+ u32 offset = module->next_datum_offset;
+ bh_align(offset, 8);
+
+ u64 type_table_location = offset;
+
+ WasmDatum type_table_data = {
+ .offset = offset,
+ .length = type_count * 8,
+ .data = table_info,
+ };
+ bh_arr_push(module->data, type_table_data);
+
+ offset += type_table_data.length;
+
+ fori (i, 0, type_count) {
+ table_info[i] += offset;
+ }
+
+ bh_arr_each(u32, patch_loc, base_patch_locations) {
+ u64* loc = bh_pointer_add(table_buffer.data, *patch_loc);
+ if (*loc == 0) continue;
+
+ *loc += offset;
+ }
+
+ WasmDatum type_info_data = {
+ .offset = offset,
+ .length = table_buffer.length,
+ .data = table_buffer.data,
+ };
+ bh_arr_push(module->data, type_info_data);
+ offset += type_info_data.length;
+
+ u64 global_data_ptr = offset;
+
+ u64* tmp_data = bh_alloc(global_heap_allocator, 16);
+ tmp_data[0] = type_table_location;
+ tmp_data[1] = type_count;
+ WasmDatum type_table_global_data = {
+ .offset = offset,
+ .length = 16,
+ .data = tmp_data,
+ };
+ bh_arr_push(module->data, type_table_global_data);
+ offset += type_table_global_data.length;
+
+ module->next_datum_offset = offset;
+
+ return global_data_ptr;
+
+#undef PATCH
+}