// function.
bh_imap all_overloads;
+ AstType *expected_return_node;
+ Type *expected_return_type;
+
b32 locked : 1;
b32 only_local_functions : 1;
};
u32 foreign_block_number;
};
+typedef struct EntityJobData {
+ enum TypeMatch (*func)(void *job_data);
+ void *job_data;
+} EntityJobData;
+
typedef enum EntityState {
Entity_State_Error,
Entity_Type_Struct_Member_Default,
Entity_Type_Memory_Reservation,
Entity_Type_Expression,
+ Entity_Type_Job, // Represents an arbitrary job (function pointer).
Entity_Type_Global,
Entity_Type_Overloaded_Function,
Entity_Type_Function,
AstInterface *interface;
AstConstraint *constraint;
AstDirectiveLibrary *library;
+ EntityJobData *job_data;
};
} Entity;
void entity_heap_change_top(EntityHeap* entities, Entity* new_top);
void entity_heap_remove_top(EntityHeap* entities);
void entity_change_type(EntityHeap* entities, Entity *ent, EntityType new_type);
+void entity_heap_add_job(EntityHeap *entities, enum TypeMatch (*func)(void *), void *job_data);
// If target_arr is null, the entities will be placed directly in the heap.
void add_entities_for_node(bh_arr(Entity *)* target_arr, AstNode* node, Scope* scope, Package* package);
TYPE_MATCH_YIELD,
TYPE_MATCH_SPECIAL, // Only used for nest polymorph function lookups
} TypeMatch;
+
#define unify_node_and_type(node, type) (unify_node_and_type_((node), (type), 1))
TypeMatch unify_node_and_type_(AstTyped** pnode, Type* type, b32 permanent);
b32 potentially_convert_function_to_polyproc(AstFunction *func);
AstPolyCallType* convert_call_to_polycall(AstCall* call);
+
+typedef struct OverloadReturnTypeCheck {
+ Type *expected_type;
+ AstTyped *node;
+ OnyxToken *group;
+} OverloadReturnTypeCheck;
+
void add_overload_option(bh_arr(OverloadOption)* poverloads, u64 precedence, AstTyped* overload);
AstTyped* find_matching_overload_by_arguments(bh_arr(OverloadOption) overloads, Arguments* args);
AstTyped* find_matching_overload_by_type(bh_arr(OverloadOption) overloads, Type* type);
void report_unable_to_match_overload(AstCall* call, bh_arr(OverloadOption) overloads);
+void report_incorrect_overload_expected_type(Type *given, Type *expected, OnyxToken *overload, OnyxToken *group);
+void ensure_overload_returns_correct_type(AstTyped *overload, AstOverloadedFunction *group);
void expand_macro(AstCall** pcall, AstFunction* template);
AstFunction* macro_resolve_header(AstMacro* macro, Arguments* args, OnyxToken* callsite, b32 error_if_failed);
void onyx_errors_init(bh_arr(bh_file_contents)* files);
void onyx_errors_enable();
void onyx_errors_disable();
+b32 onyx_errors_are_enabled();
void onyx_submit_error(OnyxError error);
void onyx_report_error(OnyxFilePos pos, OnyxErrorRank rank, char * format, ...);
void onyx_submit_warning(OnyxError error);
"Struct Member Default",
"Memory Reservation",
"Expression",
+ "Job",
"Global",
"Overloaded_Function",
"Function",
if (func->kind == Ast_Kind_Function)
func->flags |= Ast_Flag_Function_Used;
- *pnode = func;
- node = *pnode;
+ if (permanent) {
+ ensure_overload_returns_correct_type(func, (AstOverloadedFunction *) node);
+ *pnode = func;
+ }
+
+ node = func;
}
if (node->kind == Ast_Kind_Polymorphic_Proc) {
if (call->kind == Ast_Kind_Intrinsic_Call) return Check_Success;
AstTyped* callee = (AstTyped *) strip_aliases((AstNode *) call->callee);
+ AstTyped* original_callee = callee;
+
b32 calling_a_macro = 0;
+ b32 need_to_check_overload_return_type = 0;
if (callee->kind == Ast_Kind_Overloaded_Function) {
AstTyped* new_callee = find_matching_overload_by_arguments(
YIELD(call->token->pos, "Waiting for overloaded function option to pass type-checking.");
}
+ need_to_check_overload_return_type = 1;
+
callee = new_callee;
}
callee->token->text, callee->token->length);
}
+ if (need_to_check_overload_return_type) {
+ ensure_overload_returns_correct_type(callee, (AstOverloadedFunction *) original_callee);
+ }
+
*effective_callee = callee;
return Check_Success;
}
return Check_Success;
}
-CheckStatus check_overloaded_function(AstOverloadedFunction* func) {
+CheckStatus check_overloaded_function(AstOverloadedFunction* ofunc) {
b32 done = 1;
bh_imap all_overloads;
bh_imap_init(&all_overloads, global_heap_allocator, 4);
- build_all_overload_options(func->overloads, &all_overloads);
+ build_all_overload_options(ofunc->overloads, &all_overloads);
bh_arr_each(bh__imap_entry, entry, all_overloads.entries) {
AstTyped* node = (AstTyped *) entry->key;
}
}
- bh_imap_free(&all_overloads);
+ if (!done) {
+ bh_imap_free(&all_overloads);
+ YIELD(ofunc->token->pos, "Waiting for all options to pass type-checking.");
+ }
+
+ if (ofunc->expected_return_node) {
+ ofunc->expected_return_type = type_build_from_ast(context.ast_alloc, ofunc->expected_return_node);
+ if (!ofunc->expected_return_type) YIELD(ofunc->token->pos, "Waiting to construct expected return type.");
- if (done) return Check_Success;
- else YIELD(func->token->pos, "Waiting for all options to pass type-checking.");
+ bh_arr_each(bh__imap_entry, entry, all_overloads.entries) {
+ AstTyped* node = (AstTyped *) entry->key;
+
+ if (node->kind == Ast_Kind_Function) {
+ AstFunction *func = (AstFunction *) node;
+
+ if (!func->type) continue;
+ if (!func->type->Function.return_type) continue;
+
+ Type *return_type = func->type->Function.return_type;
+ if (return_type == &type_auto_return) continue;
+
+ if (!types_are_compatible(return_type, ofunc->expected_return_type)) {
+ report_incorrect_overload_expected_type(return_type, ofunc->expected_return_type, func->token, ofunc->token);
+ bh_imap_free(&all_overloads);
+ return Check_Error;
+ }
+ }
+ }
+ }
+
+
+ bh_imap_free(&all_overloads);
+ return Check_Success;
}
CheckStatus check_struct(AstStructType* s_node) {
return Check_Complete;
}
+CheckStatus check_arbitrary_job(EntityJobData *job) {
+ TypeMatch result = job->func(job->job_data);
+
+ switch (result) {
+ case TYPE_MATCH_SUCCESS: return Check_Complete;
+ case TYPE_MATCH_FAILED: return Check_Error;
+ case TYPE_MATCH_YIELD: return Check_Yield_Macro;
+ }
+
+ return Check_Error;
+}
+
void check_entity(Entity* ent) {
CheckStatus cs = Check_Success;
}
break;
+ case Entity_Type_Job: cs = check_arbitrary_job(ent->job_data); break;
+
default: break;
}
ent->type = new_type;
}
+void entity_heap_add_job(EntityHeap *entities, TypeMatch (*func)(void *), void *job_data) {
+ EntityJobData *job = bh_alloc(global_heap_allocator, sizeof(*job));
+ job->func = func;
+ job->job_data = job_data;
+
+ Entity ent;
+ ent.type = Entity_Type_Job;
+ ent.state = Entity_State_Check_Types;
+ ent.job_data = job;
+
+ entity_heap_insert(entities, ent);
+}
+
// NOTE(Brendan Hansen): Uses the entity heap in the context structure
void add_entities_for_node(bh_arr(Entity *) *target_arr, AstNode* node, Scope* scope, Package* package) {
#define ENTITY_INSERT(_ent) \
errors_enabled = 0;
}
+b32 onyx_errors_are_enabled() {
+ return errors_enabled;
+}
+
b32 onyx_has_errors() {
bh_arr_each(OnyxError, err, errors.errors) {
if (err->rank >= Error_Waiting_On) return 1;
else if (parse_possible_directive(parser, "unquote")) {
AstDirectiveInsert* insert = make_node(AstDirectiveInsert, Ast_Kind_Directive_Insert);
insert->token = parser->curr - 1;
+
+ // Parsing calls is disabled here for the potential future feature
+ // of using a call-like syntax for passing "parameters" to inserted
+ // code blocks. Something like `#unquote foo(x, y)`. This would require
+ // different parsing than the normal call so it would just be detected
+ // here manually. Also, it does not hurt having this here because there
+ // is currently no way to dynamically get a code block to insert from
+ // a call, because it is impossible to "return" a code block.
+ parser->parse_calls = 0;
insert->code_expr = parse_expression(parser, 0);
+ parser->parse_calls = 1;
retval = (AstTyped *) insert;
break;
// This could be checked elsewhere?
if (locked && local) {
- onyx_report_error(token->pos, Error_Critical, "Only one of '#locked' and '#local' can because use at a time.");
+ onyx_report_error(token->pos, Error_Critical, "Only one of '#locked' and '#local' can be use at a time.");
}
- expect_token(parser, '{');
-
AstOverloadedFunction* ofunc = make_node(AstOverloadedFunction, Ast_Kind_Overloaded_Function);
ofunc->token = token;
ofunc->flags |= Ast_Flag_Comptime;
bh_arr_new(global_heap_allocator, ofunc->overloads, 4);
+ if (peek_token(0)->type == Token_Type_Right_Arrow) {
+ expect_token(parser, Token_Type_Right_Arrow);
+
+ ofunc->expected_return_node = parse_type(parser);
+ }
+
+ expect_token(parser, '{');
+
u64 precedence = 0;
while (!consume_token_if_next(parser, '}')) {
if (parser->hit_unexpected_token) return ofunc;
bh_arr_each(OverloadOption, overload, ofunc->overloads) {
SYMRES(expression, &overload->option);
}
+
+ if (ofunc->expected_return_node) {
+ SYMRES(type, &ofunc->expected_return_node);
+ }
+
return Symres_Success;
}
bh_imap_free(&all_overloads);
}
+void report_incorrect_overload_expected_type(Type *given, Type *expected, OnyxToken *overload, OnyxToken *group) {
+ onyx_report_error(overload->pos, Error_Critical,
+ "Expected this overload option to return '%s', but instead it returns '%s'.",
+ type_get_name(expected), type_get_name(given));
+
+ onyx_report_error(group->pos, Error_Critical, "Here is where the overloaded function was defined.");
+}
+
+static TypeMatch ensure_overload_returns_correct_type_job(void *raw_data) {
+ OverloadReturnTypeCheck *data = raw_data;
+ Type *expected_type = data->expected_type;
+ AstTyped *node = data->node;
+
+ assert(expected_type && node);
+
+ // If the entity on the node has been completed and unused,
+ // skip checking this because the function is likely not used.
+ if (node->entity && node->entity->state >= Entity_State_Finalized) {
+ return TYPE_MATCH_SUCCESS;
+ }
+
+ // HACK: This case should go away, but there were issues with some overloads
+ // not ever completing there auto return type resolution, likely because they
+ // were not actually used. This creates a problem here because this code
+ // will still wait for them. As a cheap solution, if there is a cycle detected,
+ // return success, even if the types may not match.
+ if (context.cycle_almost_detected > 0) {
+ return TYPE_MATCH_SUCCESS;
+ }
+
+ AstFunction *func = (AstFunction *) node;
+ if (func->kind == Ast_Kind_Macro) {
+ func = (AstFunction *) ((AstMacro *) func)->body;
+ }
+
+ if (!func->type) return TYPE_MATCH_YIELD;
+ if (!func->type->Function.return_type) return TYPE_MATCH_YIELD;
+
+ Type *return_type = func->type->Function.return_type;
+ if (return_type == &type_auto_return) return TYPE_MATCH_YIELD;
+
+ if (!types_are_compatible(return_type, expected_type)) {
+ report_incorrect_overload_expected_type(return_type, expected_type, func->token, data->group);
+ return TYPE_MATCH_FAILED;
+ }
+
+ return TYPE_MATCH_SUCCESS;
+}
+
+void ensure_overload_returns_correct_type(AstTyped *overload, AstOverloadedFunction *group) {
+ // This might not be entirely right as the type might not have been constructed yet, I think?
+ //
+ // Also, as a HACK, this does not check for the correct return type when errors are disabled.
+ // Errors are only disabled when doing something non-permantent, like checking an interface
+ // constraint, so this is a cheap way to tell if that is where we are coming from.
+ //
+ if (group->expected_return_type && onyx_errors_are_enabled()) {
+ OverloadReturnTypeCheck *data = bh_alloc_item(context.ast_alloc, OverloadReturnTypeCheck);
+ data->expected_type = group->expected_return_type;
+ data->node = overload;
+ data->group = group->token;
+
+ entity_heap_add_job(&context.entities, ensure_overload_returns_correct_type_job, data);
+ }
+}
+
+
//
// Macros
if (context.options->generate_tag_file) {
bh_arr_push(context.tag_locations, node);
}
-}
\ No newline at end of file
+}
// assert(hb_ptr.size & Allocated_Flag == Allocated_Flag, "Corrupted heap on free. This could be due to a double free, or using memory past were you allocated it.");
if cast(uintptr) hb_ptr < cast(uintptr) __heap_start {
- log("FREEING STATIC DATA");
+ log(.Error, "Core", "FREEING STATIC DATA");
return;
}
if hb_ptr.size & Allocated_Flag != Allocated_Flag {
- log("INVALID DOUBLE FREE");
+ log(.Error, "Core", "INVALID DOUBLE FREE");
return;
}
if hb_ptr.magic_number != Alloc_Block_Magic_Number {
- log("FREEING INVALID BLOCK");
+ log(.Error, "Core", "FREEING INVALID BLOCK");
return;
}
}
package core.hash
-to_u32 :: #match {
+to_u32 :: #match -> u32 {
// Does this need to have a higher precedence value?
// Because if I wanted to have a custom type as the key
// of a map that only looks at some of the members of the
use core
#doc "Generic procedure for turning something into a string."
-as_str :: #match {}
+as_str :: #match -> str {}
#local HasAsStrMethod :: interface (t: $T) {
{ T.as_str(t) } -> str;
- The stack
- The data section
- The stack size
- - The memory constraints (in pages)
\ No newline at end of file
+ - The memory constraints (in pages)
+
+Add mandatory return type for a matched group.
+ [x] Parse the syntax
+ - `#match -> u32`
+ - `#match #local -> u32 {}`
+ [x] When the return type of an overload is definitely known,
+ check it immediately.
+ [x] Otherwise, when the return type is known, through polymorph
+ substitution or auto return evaluation, then check the return
+ type is correctly.
+ [ ] This should have the same rules as the interface
+ return type matching, meaning a polymorphic structure can match an
+ instance of that structure. For example,
+ `#match -> Iterator { x => iter.comp(x, #(it)) }`
+
+Parameterized Code Blocks:
+ As code blocks are being used more and more as an alternative to closures
+ or lambdas, I'm seeing the possibility for more confusion and less readability
+ in code that heavily uses them. In order to make things a little clearer,
+ code blocks are now going to be able to take "parameters" that allow you
+ to explicitly give a name to the important symbol(s) in a code block.
+
+ [ ] Add parsing for this feature
+ - #(...) Code block with no parameters
+ - #|x|(...) Code block with one parameter
+ - #|x, y|(...) Code block with two parameters
+
+ - #quote |x| {...}
+ - #quote |x, y| {...}
+
+ - #unquote code
+ - #unquote code(x)
+ - #unquote code(x, y)
+
+
+