#include <fcntl.h>
#include <unistd.h>
#include <dirent.h>
+ #include <pthread.h>
#endif
#include <stdlib.h>
+// ATOMIC ARENA ALLOCATOR
+typedef struct bh_atomic_arena {
+ bh_allocator backing;
+ ptr first_arena, current_arena;
+ isize size, arena_size; // in bytes
+
+ pthread_mutex_t mutex;
+} bh_atomic_arena;
+
+typedef struct bh__atomic_arena_internal {
+ ptr next_arena;
+ void* data; // Not actually a pointer, just used for the offset
+} bh__atomic_arena_internal;
+
+BH_DEF void bh_atomic_arena_init(bh_atomic_arena* alloc, bh_allocator backing, isize arena_size);
+BH_DEF void bh_atomic_arena_free(bh_atomic_arena* alloc);
+BH_DEF bh_allocator bh_atomic_arena_allocator(bh_atomic_arena* alloc);
+BH_DEF BH_ALLOCATOR_PROC(bh_atomic_arena_allocator_proc);
+
+
// SCRATCH ALLOCATOR
}
+// ATOMIC ARENA ALLOCATOR IMPLEMENTATION
+BH_DEF void bh_atomic_arena_init(bh_atomic_arena* alloc, bh_allocator backing, isize arena_size) {
+ arena_size = bh_max(arena_size, size_of(ptr));
+ ptr data = bh_alloc(backing, arena_size);
+
+ alloc->backing = backing;
+ alloc->arena_size = arena_size;
+ alloc->size = sizeof(ptr);
+ alloc->first_arena = data;
+ alloc->current_arena = data;
+ pthread_mutex_init(&alloc->mutex, NULL);
+
+ ((bh__arena_internal *)(alloc->first_arena))->next_arena = NULL;
+}
+
+BH_DEF void bh_atomic_arena_free(bh_atomic_arena* alloc) {
+ bh__atomic_arena_internal *walker = (bh__atomic_arena_internal *) alloc->first_arena;
+ bh__atomic_arena_internal *trailer = walker;
+ while (walker != NULL) {
+ walker = walker->next_arena;
+ bh_free(alloc->backing, trailer);
+ trailer = walker;
+ }
+
+ alloc->first_arena = NULL;
+ alloc->current_arena = NULL;
+ alloc->arena_size = 0;
+ alloc->size = 0;
+ pthread_mutex_destroy(&alloc->mutex);
+}
+
+BH_DEF bh_allocator bh_atomic_arena_allocator(bh_atomic_arena* alloc) {
+ return (bh_allocator) {
+ .proc = bh_atomic_arena_allocator_proc,
+ .data = alloc,
+ };
+}
+
+BH_DEF BH_ALLOCATOR_PROC(bh_atomic_arena_allocator_proc) {
+ bh_atomic_arena* alloc_arena = (bh_atomic_arena*) data;
+ pthread_mutex_lock(&alloc_arena->mutex);
+
+ ptr retval = NULL;
+
+ switch (action) {
+ case bh_allocator_action_alloc: {
+ bh_align(size, alignment);
+ bh_align(alloc_arena->size, alignment);
+
+ if (size > alloc_arena->arena_size - size_of(ptr)) {
+ // Size too large for the arena
+ break;
+ }
+
+ if (alloc_arena->size + size >= alloc_arena->arena_size) {
+ bh__arena_internal* new_arena = (bh__arena_internal *) bh_alloc(alloc_arena->backing, alloc_arena->arena_size);
+
+ if (new_arena == NULL) {
+ bh_printf_err("Arena Allocator: couldn't allocate new arena");
+ break;
+ }
+
+ new_arena->next_arena = NULL;
+ ((bh__arena_internal *)(alloc_arena->current_arena))->next_arena = new_arena;
+ alloc_arena->current_arena = new_arena;
+ alloc_arena->size = sizeof(ptr);
+ }
+
+ retval = bh_pointer_add(alloc_arena->current_arena, alloc_arena->size);
+ alloc_arena->size += size;
+ } break;
+
+ case bh_allocator_action_resize: {
+ // Do nothing since this is a fixed allocator
+ } break;
+
+ case bh_allocator_action_free: {
+ // Do nothing since this allocator isn't made for freeing memory
+ } break;
+ }
+
+ pthread_mutex_unlock(&alloc_arena->mutex);
+ return retval;
+}
+
+
+
+
+
// SCRATCH ALLOCATOR IMPLEMENTATION
}
if ((shift < size) && (byte & 0x40) != 0) {
- return res | ((~0) << shift);
+ return res | ((~(u64) 0x0) << shift);
}
return res;
store->heap_allocator = bh_heap_allocator();
- bh_arena_init(&store->arena, store->heap_allocator, 1 << 20);
- store->arena_allocator = bh_arena_allocator(&store->arena);
+ bh_atomic_arena_init(&store->arena, store->heap_allocator, 1 << 20);
+ store->arena_allocator = bh_atomic_arena_allocator(&store->arena);
return store;
}
void ovm_store_delete(ovm_store_t *store) {
- bh_arena_free(&store->arena);
+ bh_atomic_arena_free(&store->arena);
free(store);
}
// Should there be another mechanism for this? or is this the most concise way?
ovm_state_t *ovm_state_new(ovm_engine_t *engine, ovm_program_t *program) {
ovm_store_t *store = engine->store;
- ovm_state_t *state = bh_alloc_item(store->heap_allocator, ovm_state_t);
+ ovm_state_t *state = bh_alloc_item(store->arena_allocator, ovm_state_t);
state->store = store;
state->pc = 0;
bh_arr_free(state->stack_frames);
bh_arr_free(state->registers);
bh_arr_free(state->external_funcs);
-
- bh_free(store->heap_allocator, state);
}
void ovm_state_register_external_func(ovm_state_t *state, i32 idx, void (*func)(void *, ovm_value_t *, ovm_value_t *), void *data) {
ovm_stack_frame_t frame = bh_arr_pop(state->stack_frames);
bh_arr_fastdeleten(state->numbered_values, frame.value_number_count);
- state->value_number_offset = bh_arr_last(state->stack_frames).value_number_base;
+ if (bh_arr_length(state->stack_frames) == 0) {
+ state->value_number_offset = 0;
+ } else {
+ state->value_number_offset = bh_arr_last(state->stack_frames).value_number_base;
+ }
+
return frame;
}
switch (func.kind) {
case OVM_FUNC_INTERNAL: {
- bh_arr_insert_end(state->numbered_values, 1);
- state->value_number_offset += 1;
-
ovm__func_setup_stack_frame(engine, state, program, func_idx, 0);
fori (i, 0, param_count) {
}
state->pc = func.start_instr;
- ovm_run_code(engine, state, program);
+ ovm_value_t result = ovm_run_code(engine, state, program);
- state->value_number_offset -= 1;
- return bh_arr_pop(state->numbered_values);
+ return result;
}
case OVM_FUNC_EXTERNAL: {
ovm_value_t result = {0};
ovm_external_func_t external_func = state->external_funcs[func.external_func_idx];
- external_func.native_func(external_func.userdata, state->params, &result);
- bh_arr_fastdeleten(state->params, func.param_count);
+ external_func.native_func(external_func.userdata, params, &result);
ovm__func_teardown_stack_frame(engine, state, program);
return result;
}
-void ovm_run_code(ovm_engine_t *engine, ovm_state_t *state, ovm_program_t *program) {
+ovm_value_t ovm_run_code(ovm_engine_t *engine, ovm_state_t *state, ovm_program_t *program) {
assert(engine);
assert(state);
assert(program);
OVM_OP(OVMI_REM_S, OVM_TYPE_I32, %, i32)
OVM_OP(OVMI_REM_S, OVM_TYPE_I64, %, i64)
- OVM_OP(OVMI_AND, OVM_TYPE_I8 , &, i8)
- OVM_OP(OVMI_AND, OVM_TYPE_I16, &, i16)
- OVM_OP(OVMI_AND, OVM_TYPE_I32, &, i32)
- OVM_OP(OVMI_AND, OVM_TYPE_I64, &, i64)
+ OVM_OP(OVMI_AND, OVM_TYPE_I8 , &, u8)
+ OVM_OP(OVMI_AND, OVM_TYPE_I16, &, u16)
+ OVM_OP(OVMI_AND, OVM_TYPE_I32, &, u32)
+ OVM_OP(OVMI_AND, OVM_TYPE_I64, &, u64)
- OVM_OP(OVMI_OR, OVM_TYPE_I8 , |, i8)
- OVM_OP(OVMI_OR, OVM_TYPE_I16, |, i16)
- OVM_OP(OVMI_OR, OVM_TYPE_I32, |, i32)
- OVM_OP(OVMI_OR, OVM_TYPE_I64, |, i64)
+ OVM_OP(OVMI_OR, OVM_TYPE_I8 , |, u8)
+ OVM_OP(OVMI_OR, OVM_TYPE_I16, |, u16)
+ OVM_OP(OVMI_OR, OVM_TYPE_I32, |, u32)
+ OVM_OP(OVMI_OR, OVM_TYPE_I64, |, u64)
- OVM_OP(OVMI_XOR, OVM_TYPE_I8 , ^, i8)
- OVM_OP(OVMI_XOR, OVM_TYPE_I16, ^, i16)
- OVM_OP(OVMI_XOR, OVM_TYPE_I32, ^, i32)
- OVM_OP(OVMI_XOR, OVM_TYPE_I64, ^, i64)
+ OVM_OP(OVMI_XOR, OVM_TYPE_I8 , ^, u8)
+ OVM_OP(OVMI_XOR, OVM_TYPE_I16, ^, u16)
+ OVM_OP(OVMI_XOR, OVM_TYPE_I32, ^, u32)
+ OVM_OP(OVMI_XOR, OVM_TYPE_I64, ^, u64)
- OVM_OP(OVMI_SHL, OVM_TYPE_I8 , <<, i8)
- OVM_OP(OVMI_SHL, OVM_TYPE_I16, <<, i16)
- OVM_OP(OVMI_SHL, OVM_TYPE_I32, <<, i32)
- OVM_OP(OVMI_SHL, OVM_TYPE_I64, <<, i64)
+ OVM_OP(OVMI_SHL, OVM_TYPE_I8 , <<, u8)
+ OVM_OP(OVMI_SHL, OVM_TYPE_I16, <<, u16)
+ OVM_OP(OVMI_SHL, OVM_TYPE_I32, <<, u32)
+ OVM_OP(OVMI_SHL, OVM_TYPE_I64, <<, u64)
OVM_OP(OVMI_SHR, OVM_TYPE_I8 , >>, u8)
OVM_OP(OVMI_SHR, OVM_TYPE_I16, >>, u16)
}
case OVMI_REG_GET: {
- VAL(instr.r) = ovm_state_register_get(state, instr.a);
+ VAL(instr.r) = state->registers[instr.a];
break;
}
case OVMI_REG_SET: {
- ovm_state_register_set(state, instr.r, VAL(instr.a));
+ state->registers[instr.r] = VAL(instr.a);
break;
}
ovm_value_t val = VAL(instr.a);
ovm_stack_frame_t frame = ovm__func_teardown_stack_frame(engine, state, program);
- if (frame.return_number_value >= 0) {
- VAL(frame.return_number_value) = val;
- }
-
if (bh_arr_length(state->stack_frames) == 0) {
- return;
+ return val;
}
ovm_func_t *new_func = bh_arr_last(state->stack_frames).func;
if (new_func->kind == OVM_FUNC_EXTERNAL) {
- return;
+ return val;
+ }
+
+ if (frame.return_number_value >= 0) {
+ VAL(frame.return_number_value) = val;
}
// printf("Returning from %s to %s: ", frame.func->name, bh_arr_last(state->stack_frames).func->name);
#define OVM_CALL_CODE(func_idx) \
i32 fidx = func_idx; \
ovm_func_t *func = &program->funcs[fidx]; \
+ i32 extra_params = bh_arr_length(state->params) - func->param_count; \
if (func->kind == OVM_FUNC_INTERNAL) { \
ovm__func_setup_stack_frame(engine, state, program, fidx, instr.r); \
\
- i32 extra_params = bh_arr_length(state->params) - func->param_count; \
fori (i, 0, func->param_count) { \
VAL(i) = state->params[i + extra_params]; \
} \
\
state->pc = func->start_instr; \
} else { \
- ovm__func_setup_stack_frame(engine, state, program, fidx, 0); \
- \
- i32 extra_params = bh_arr_length(state->params) - func->param_count; \
+ ovm__func_setup_stack_frame(engine, state, program, fidx, instr.r); \
\
ovm_value_t result = {0}; \
ovm_external_func_t external_func = state->external_funcs[func->external_func_idx]; \
- external_func.native_func(external_func.userdata, state->params + extra_params, &result); \
+ external_func.native_func(external_func.userdata, &state->params[extra_params], &result); \
bh_arr_fastdeleten(state->params, func->param_count); \
\
ovm__func_teardown_stack_frame(engine, state, program); \
release_mutex_at_end = false;
}
}
+
+ return ((ovm_value_t) {0});
}
(w).kind = WASM_F64; \
(w).of.f64 = (o).f64; \
break; \
+ \
+ default: \
+ printf("INVALID: %d\n", (o).type); \
+ assert(("invalid ovm value type for conversion", 0)); \
} }
static wasm_trap_t *wasm_to_ovm_func_call_binding(void *vbinding, const wasm_val_vec_t *args, wasm_val_vec_t *res) {
OVM_TO_WASM(params[i], wasm_params.data[i]);
}
+ wasm_val_t return_value;
wasm_val_vec_t wasm_results;
- wasm_results.data = alloca(sizeof(wasm_val_t) * binding->result_count);
+ wasm_results.data = &return_value;
wasm_results.size = binding->result_count;
wasm_trap_t *trap = wasm_func_call(binding->func, &wasm_params, &wasm_results);
assert(!trap);
- if (binding->result_count > 0) WASM_TO_OVM(wasm_results.data[0], *res);
+ if (binding->result_count > 0) {
+ assert(wasm_results.data[0].kind == binding->func->inner.type->func.results.data[0]->kind);
+ WASM_TO_OVM(return_value, *res);
+ }
}
static void wasm_memory_init(void *env, ovm_value_t* params, ovm_value_t *res) {
//
// Place imports in their corresponding "bucket"
fori (i, 0, (int) imports->size) {
+ assert(instance->module->imports.data[i]->type->kind == imports->data[i]->type->kind);
+
switch (wasm_extern_kind(imports->data[i])) {
case WASM_EXTERN_FUNC: {
- wasm_func_t *func = wasm_extern_as_func(imports->data[i]);
- bh_arr_push(instance->funcs, func);
-
wasm_importtype_t *importtype = instance->module->imports.data[i];
struct wasm_functype_inner_t *functype = &importtype->type->func;
+ if (!wasm_functype_equals(wasm_externtype_as_functype(importtype->type), wasm_externtype_as_functype(imports->data[i]->type))) {
+ assert(("MISMATCHED FUNCTION TYPE", 0));
+ }
+
+ wasm_func_t *func = wasm_extern_as_func(imports->data[i]);
+ bh_arr_push(instance->funcs, func);
+
ovm_wasm_binding *binding = bh_alloc(ovm_store->arena_allocator, sizeof(*binding));
binding->param_count = functype->params.size;
binding->result_count = functype->results.size;
wasm_instance_t *wasm_instance_new(wasm_store_t *store, const wasm_module_t *module,
const wasm_extern_vec_t *imports, wasm_trap_t **trap) {
- wasm_instance_t *instance = bh_alloc(store->engine->store->arena_allocator, sizeof(*instance));
+ wasm_instance_t *instance = bh_alloc(store->engine->store->heap_allocator, sizeof(*instance));
instance->store = store;
instance->module = module;
wasm_extern_vec_delete(&instance->exports);
ovm_state_delete(instance->state);
+ bh_free(instance->store->engine->store->heap_allocator, instance);
}
void wasm_instance_exports(const wasm_instance_t *instance, wasm_extern_vec_t *out) {