--- /dev/null
+package simd
+
+// NOTE: These u8 values must be compile time known values.
+v128_const :: proc (b1: u8, b2: u8, b3: u8, b4: u8,
+ b5: u8, b6: u8, b7: u8, b8: u8,
+ b9: u8, b10: u8, b11: u8, b12: u8,
+ b13: u8, b14: u8, b15: u8, b16: u8) -> v128 #intrinsic ---
+
+i8x16_const :: proc (b1: i8, b2: i8, b3: i8, b4: i8,
+ b5: i8, b6: i8, b7: i8, b8: i8,
+ b9: i8, b10: i8, b11: i8, b12: i8,
+ b13: i8, b14: i8, b15: i8, b16: i8) -> i8x16 #intrinsic ---
+i16x8_const :: proc (b1: i16, b2: i16, b3: i16, b4: i16,
+ b5: i16, b6: i16, b7: i16, b8: i16) -> i16x8 #intrinsic ---
+i32x4_const :: proc (b1: i32, b2: i32, b3: i32, b4: i32) -> i32x4 #intrinsic ---
+i64x2_const :: proc (b1: i64, b2: i64) -> i64x2 #intrinsic ---
+f32x4_const :: proc (b1: f32, b2: f32, b3: f32, b4: f32) -> f32x4 #intrinsic ---
+f64x2_const :: proc (b1: f64, b2: f64) -> f64x2 #intrinsic ---
+
+// NOTE: These u8 values must be compile time known values.
+i8x16_shuffle :: proc (v: v128,
+ b1: u8, b2: u8, b3: u8, b4: u8,
+ b5: u8, b6: u8, b7: u8, b8: u8,
+ b9: u8, b10: u8, b11: u8, b12: u8,
+ b13: u8, b14: u8, b15: u8, b16: u8) -> v128 #intrinsic ---
+
+i8x16_extract_lane_s :: proc (v: i8x16, l: u8) -> i8 #intrinsic ---
+i8x16_extract_lane_u :: proc (v: i8x16, l: u8) -> u8 #intrinsic ---
+i8x16_replace_lane :: proc (v: i8x16, l: u8, val: i8) -> i8x16 #intrinsic ---
+i16x8_extract_lane_s :: proc (v: i16x8, l: u8) -> i16 #intrinsic ---
+i16x8_extract_lane_u :: proc (v: i16x8, l: u8) -> u16 #intrinsic ---
+i16x8_replace_lane :: proc (v: i16x8, l: u8, val: i16) -> i16x8 #intrinsic ---
+i32x4_extract_lane :: proc (v: i32x4, l: u8) -> i32 #intrinsic ---
+i32x4_replace_lane :: proc (v: i32x4, l: u8, val: i32) -> i32x4 #intrinsic ---
+i64x2_extract_lane :: proc (v: i64x2, l: u8) -> i64 #intrinsic ---
+i64x2_replace_lane :: proc (v: i64x2, l: u8, val: i64) -> i64x2 #intrinsic ---
+f32x4_extract_lane :: proc (v: f32x4, l: u8) -> f32 #intrinsic ---
+f32x4_replace_lane :: proc (v: f32x4, l: u8, val: f32) -> f32x4 #intrinsic ---
+f64x2_extract_lane :: proc (v: f64x2, l: u8) -> f64 #intrinsic ---
+f64x2_replace_lane :: proc (v: f64x2, l: u8, val: f64) -> f64x2 #intrinsic ---
+
+i8x16_swizzle :: proc (v: v128) -> v128 #intrinsic ---
+i8x16_splat :: proc (val: i8) -> i8x16 #intrinsic ---
+i16x8_splat :: proc (val: i16) -> i16x8 #intrinsic ---
+i32x4_splat :: proc (val: i32) -> i32x4 #intrinsic ---
+i64x2_splat :: proc (val: i64) -> i64x2 #intrinsic ---
+f32x4_splat :: proc (val: f32) -> f32x4 #intrinsic ---
+f64x2_splat :: proc (val: f64) -> f64x2 #intrinsic ---
+
+i8x16_eq :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_neq :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_lt_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_lt_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_gt_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_gt_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_le_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_le_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_ge_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_ge_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+
+i16x8_eq :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_neq :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_lt_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_lt_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_gt_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_gt_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_le_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_le_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_ge_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_ge_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+
+i32x4_eq :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_neq :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_lt_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_lt_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_gt_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_gt_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_le_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_le_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_ge_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_ge_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+
+f32x4_eq :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+f32x4_neq :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+f32x4_lt :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+f32x4_gt :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+f32x4_le :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+f32x4_ge :: proc (a: f32x4, b: f32x4) -> i32x4 #intrinsic ---
+
+f64x2_eq :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_neq :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_lt :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_gt :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_le :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_ge :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+
+v128_not :: proc (v: v128) -> v128 #intrinsic ---
+v128_and :: proc (a: v128, b: v128) -> v128 #intrinsic ---
+v128_andnot :: proc (a: v128, b: v128) -> v128 #intrinsic ---
+v128_or :: proc (a: v128, b: v128) -> v128 #intrinsic ---
+v128_xor :: proc (a: v128, b: v128) -> v128 #intrinsic ---
+v128_bitselect :: proc (a: v128, b: v128, c: v128) -> v128 #intrinsic ---
+
+i8x16_abs :: proc (a: i8x16) -> i8x16 #intrinsic ---
+i8x16_neg :: proc (a: i8x16) -> i8x16 #intrinsic ---
+i8x16_any_true :: proc (a: i8x16) -> bool #intrinsic ---
+i8x16_all_true :: proc (a: i8x16) -> bool #intrinsic ---
+i8x16_bitmask :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_narrow_i16x8_s :: proc (a: i16x8) -> i8x16 #intrinsic ---
+i8x16_narrow_i16x8_u :: proc (a: i16x8) -> i8x16 #intrinsic ---
+i8x16_shl :: proc (a: i8x16, s: i32) -> i8x16 #intrinsic ---
+i8x16_shr_s :: proc (a: i8x16, s: i32) -> i8x16 #intrinsic ---
+i8x16_shr_u :: proc (a: i8x16, s: i32) -> i8x16 #intrinsic ---
+i8x16_add :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_add_sat_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_add_sat_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_sub :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_sub_sat_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_sub_sat_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_min_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_min_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_max_s :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_max_u :: proc (a: i8x16, b: i8x16) -> i8x16 #intrinsic ---
+i8x16_avgr_u :: proc (a: i8x16) -> i8x16 #intrinsic ---
+
+i16x8_abs :: proc (a: i16x8) -> i16x8 #intrinsic ---
+i16x8_neg :: proc (a: i16x8) -> i16x8 #intrinsic ---
+i16x8_any_true :: proc (a: i16x8) -> bool #intrinsic ---
+i16x8_all_true :: proc (a: i16x8) -> bool #intrinsic ---
+i16x8_bitmask :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_narrow_i32x4_s :: proc (a: i32x4) -> i16x8 #intrinsic ---
+i16x8_narrow_i32x4_u :: proc (a: i32x4) -> i16x8 #intrinsic ---
+i16x8_widen_low_i8x16_s :: proc (a: i8x16) -> i16x8 #intrinsic ---
+i16x8_widen_high_i8x16_s :: proc (a: i8x16) -> i16x8 #intrinsic ---
+i16x8_widen_low_i8x16_u :: proc (a: i8x16) -> i16x8 #intrinsic ---
+i16x8_widen_high_i8x16_u :: proc (a: i8x16) -> i16x8 #intrinsic ---
+i16x8_shl :: proc (a: i16x8, s: i32) -> i16x8 #intrinsic ---
+i16x8_shr_s :: proc (a: i16x8, s: i32) -> i16x8 #intrinsic ---
+i16x8_shr_u :: proc (a: i16x8, s: i32) -> i16x8 #intrinsic ---
+i16x8_add :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_add_sat_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_add_sat_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_sub :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_sub_sat_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_sub_sat_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_mul :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_min_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_min_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_max_s :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_max_u :: proc (a: i16x8, b: i16x8) -> i16x8 #intrinsic ---
+i16x8_avgr_u :: proc (a: i16x8) -> i16x8 #intrinsic ---
+
+i32x4_abs :: proc (a: i32x4) -> i32x4 #intrinsic ---
+i32x4_neg :: proc (a: i32x4) -> i32x4 #intrinsic ---
+i32x4_any_true :: proc (a: i32x4) -> bool #intrinsic ---
+i32x4_all_true :: proc (a: i32x4) -> bool #intrinsic ---
+i32x4_bitmask :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_widen_low_i16x8_s :: proc (a: i16x8) -> i32x4 #intrinsic ---
+i32x4_widen_high_i16x8_s :: proc (a: i16x8) -> i32x4 #intrinsic ---
+i32x4_widen_low_i16x8_u :: proc (a: i16x8) -> i32x4 #intrinsic ---
+i32x4_widen_high_i16x8_u :: proc (a: i16x8) -> i32x4 #intrinsic ---
+i32x4_shl :: proc (a: i32x4, s: i32) -> i32x4 #intrinsic ---
+i32x4_shr_s :: proc (a: i32x4, s: i32) -> i32x4 #intrinsic ---
+i32x4_shl_u :: proc (a: i32x4, s: i32) -> i32x4 #intrinsic ---
+i32x4_add :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_sub :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_mul :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_min_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_min_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_max_s :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+i32x4_max_u :: proc (a: i32x4, b: i32x4) -> i32x4 #intrinsic ---
+
+i64x2_neg :: proc (a: i64x2) -> i64x2 #intrinsic ---
+i64x2_shl :: proc (a: i64x2, s: i32) -> i64x2 #intrinsic ---
+i64x2_shr_s :: proc (a: i64x2, s: i32) -> i64x2 #intrinsic ---
+i64x2_shr_u :: proc (a: i64x2, s: i32) -> i64x2 #intrinsic ---
+i64x2_add :: proc (a: i64x2, b: i64x2) -> i64x2 #intrinsic ---
+i64x2_sub :: proc (a: i64x2, b: i64x2) -> i64x2 #intrinsic ---
+i64x2_mul :: proc (a: i64x2, b: i64x2) -> i64x2 #intrinsic ---
+
+f32x4_abs :: proc (a: f32x4) -> f32x4 #intrinsic ---
+f32x4_neg :: proc (a: f32x4) -> f32x4 #intrinsic ---
+f32x4_sqrt :: proc (a: f32x4) -> f32x4 #intrinsic ---
+f32x4_add :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+f32x4_sub :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+f32x4_mul :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+f32x4_div :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+f32x4_min :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+f32x4_max :: proc (a: f32x4, b: f32x4) -> f32x4 #intrinsic ---
+
+f64x2_abs :: proc (a: f64x2) -> f64x2 #intrinsic ---
+f64x2_neg :: proc (a: f64x2) -> f64x2 #intrinsic ---
+f64x2_sqrt :: proc (a: f64x2) -> f64x2 #intrinsic ---
+f64x2_add :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_sub :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_mul :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_div :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_min :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+f64x2_max :: proc (a: f64x2, b: f64x2) -> f64x2 #intrinsic ---
+
+// NOTE: These may be backwards
+i32x4_trunc_sat_f32x4_s :: proc (v: f32x4) -> i32x4 #intrinsic ---
+i32x4_trunc_sat_f32x4_u :: proc (v: f32x4) -> i32x4 #intrinsic ---
+f32x4_convert_i32x4_s :: proc (v: i32x4) -> f32x4 #intrinsic ---
+f32x4_convert_i32x4_u :: proc (v: i32x4) -> f32x4 #intrinsic ---
ONYX_INTRINSIC_F64_TRUNC, ONYX_INTRINSIC_F64_NEAREST,
ONYX_INTRINSIC_F64_MIN, ONYX_INTRINSIC_F64_MAX,
ONYX_INTRINSIC_F64_COPYSIGN,
+
+
+
+ ONYX_INTRINSIC_V128_CONST,
+ ONYX_INTRINSIC_I8X16_CONST, ONYX_INTRINSIC_I16X8_CONST,
+ ONYX_INTRINSIC_I32X4_CONST, ONYX_INTRINSIC_I64X2_CONST,
+ ONYX_INTRINSIC_F32X4_CONST, ONYX_INTRINSIC_F64X2_CONST,
+ ONYX_INTRINSIC_I8X16_SHUFFLE,
+
+ ONYX_INTRINSIC_I8X16_EXTRACT_LANE_S, ONYX_INTRINSIC_I8X16_EXTRACT_LANE_U, ONYX_INTRINSIC_I8X16_REPLACE_LANE,
+ ONYX_INTRINSIC_I16X8_EXTRACT_LANE_S, ONYX_INTRINSIC_I16X8_EXTRACT_LANE_U, ONYX_INTRINSIC_I16X8_REPLACE_LANE,
+ ONYX_INTRINSIC_I32X4_EXTRACT_LANE, ONYX_INTRINSIC_I32X4_REPLACE_LANE,
+ ONYX_INTRINSIC_I64X2_EXTRACT_LANE, ONYX_INTRINSIC_I64X2_REPLACE_LANE,
+ ONYX_INTRINSIC_F32X4_EXTRACT_LANE, ONYX_INTRINSIC_F32X4_REPLACE_LANE,
+ ONYX_INTRINSIC_F64X2_EXTRACT_LANE, ONYX_INTRINSIC_F64X2_REPLACE_LANE,
+
+ ONYX_INTRINSIC_I8X16_SWIZZLE,
+ ONYX_INTRINSIC_I8X16_SPLAT, ONYX_INTRINSIC_I16X8_SPLAT,
+ ONYX_INTRINSIC_I32X4_SPLAT, ONYX_INTRINSIC_I64X2_SPLAT,
+ ONYX_INTRINSIC_F32X4_SPLAT, ONYX_INTRINSIC_F64X2_SPLAT,
+
+ ONYX_INTRINSIC_I8X16_EQ, ONYX_INTRINSIC_I8X16_NEQ,
+ ONYX_INTRINSIC_I8X16_LT_S, ONYX_INTRINSIC_I8X16_LT_U,
+ ONYX_INTRINSIC_I8X16_GT_S, ONYX_INTRINSIC_I8X16_GT_U,
+ ONYX_INTRINSIC_I8X16_LE_S, ONYX_INTRINSIC_I8X16_LE_U,
+ ONYX_INTRINSIC_I8X16_GE_S, ONYX_INTRINSIC_I8X16_GE_U,
+
+ ONYX_INTRINSIC_I16X8_EQ, ONYX_INTRINSIC_I16X8_NEQ,
+ ONYX_INTRINSIC_I16X8_LT_S, ONYX_INTRINSIC_I16X8_LT_U,
+ ONYX_INTRINSIC_I16X8_GT_S, ONYX_INTRINSIC_I16X8_GT_U,
+ ONYX_INTRINSIC_I16X8_LE_S, ONYX_INTRINSIC_I16X8_LE_U,
+ ONYX_INTRINSIC_I16X8_GE_S, ONYX_INTRINSIC_I16X8_GE_U,
+
+ ONYX_INTRINSIC_I32X4_EQ, ONYX_INTRINSIC_I32X4_NEQ,
+ ONYX_INTRINSIC_I32X4_LT_S, ONYX_INTRINSIC_I32X4_LT_U,
+ ONYX_INTRINSIC_I32X4_GT_S, ONYX_INTRINSIC_I32X4_GT_U,
+ ONYX_INTRINSIC_I32X4_LE_S, ONYX_INTRINSIC_I32X4_LE_U,
+ ONYX_INTRINSIC_I32X4_GE_S, ONYX_INTRINSIC_I32X4_GE_U,
+
+ ONYX_INTRINSIC_F32X4_EQ, ONYX_INTRINSIC_F32X4_NEQ,
+ ONYX_INTRINSIC_F32X4_LT, ONYX_INTRINSIC_F32X4_GT,
+ ONYX_INTRINSIC_F32X4_LE, ONYX_INTRINSIC_F32X4_GE,
+
+ ONYX_INTRINSIC_F64X2_EQ, ONYX_INTRINSIC_F64X2_NEQ,
+ ONYX_INTRINSIC_F64X2_LT, ONYX_INTRINSIC_F64X2_GT,
+ ONYX_INTRINSIC_F64X2_LE, ONYX_INTRINSIC_F64X2_GE,
+
+ ONYX_INTRINSIC_V128_NOT, ONYX_INTRINSIC_V128_AND, ONYX_INTRINSIC_V128_ANDNOT,
+ ONYX_INTRINSIC_V128_OR, ONYX_INTRINSIC_V128_XOR, ONYX_INTRINSIC_V128_BITSELECT,
+
+ ONYX_INTRINSIC_I8X16_ABS, ONYX_INTRINSIC_I8X16_NEG,
+ ONYX_INTRINSIC_I8X16_ANY_TRUE, ONYX_INTRINSIC_I8X16_ALL_TRUE,
+ ONYX_INTRINSIC_I8X16_BITMASK,
+ ONYX_INTRINSIC_I8X16_NARROW_I16X8_S, ONYX_INTRINSIC_I8X16_NARROW_I16X8_U,
+ ONYX_INTRINSIC_I8X16_SHL, ONYX_INTRINSIC_I8X16_SHR_S, ONYX_INTRINSIC_I8X16_SHR_U,
+ ONYX_INTRINSIC_I8X16_ADD, ONYX_INTRINSIC_I8X16_ADD_SAT_S, ONYX_INTRINSIC_I8X16_ADD_SAT_U,
+ ONYX_INTRINSIC_I8X16_SUB, ONYX_INTRINSIC_I8X16_SUB_SAT_S, ONYX_INTRINSIC_I8X16_SUB_SAT_U,
+ ONYX_INTRINSIC_I8X16_MIN_S, ONYX_INTRINSIC_I8X16_MIN_U,
+ ONYX_INTRINSIC_I8X16_MAX_S, ONYX_INTRINSIC_I8X16_MAX_U,
+ ONYX_INTRINSIC_I8X16_AVGR_U,
+
+ ONYX_INTRINSIC_I16X8_ABS, ONYX_INTRINSIC_I16X8_NEG,
+ ONYX_INTRINSIC_I16X8_ANY_TRUE, ONYX_INTRINSIC_I16X8_ALL_TRUE,
+ ONYX_INTRINSIC_I16X8_BITMASK,
+ ONYX_INTRINSIC_I16X8_NARROW_I32X4_S, ONYX_INTRINSIC_I16X8_NARROW_I32X4_U,
+ ONYX_INTRINSIC_I16X8_WIDEN_LOW_I8X16_S, ONYX_INTRINSIC_I16X8_WIDEN_HIGH_I8X16_S,
+ ONYX_INTRINSIC_I16X8_WIDEN_LOW_I8X16_U, ONYX_INTRINSIC_I16X8_WIDEN_HIGH_I8X16_U,
+ ONYX_INTRINSIC_I16X8_SHL, ONYX_INTRINSIC_I16X8_SHR_S, ONYX_INTRINSIC_I16X8_SHR_U,
+ ONYX_INTRINSIC_I16X8_ADD, ONYX_INTRINSIC_I16X8_ADD_SAT_S, ONYX_INTRINSIC_I16X8_ADD_SAT_U,
+ ONYX_INTRINSIC_I16X8_SUB, ONYX_INTRINSIC_I16X8_SUB_SAT_S, ONYX_INTRINSIC_I16X8_SUB_SAT_U,
+ ONYX_INTRINSIC_I16X8_MUL,
+ ONYX_INTRINSIC_I16X8_MIN_S, ONYX_INTRINSIC_I16X8_MIN_U,
+ ONYX_INTRINSIC_I16X8_MAX_S, ONYX_INTRINSIC_I16X8_MAX_U,
+ ONYX_INTRINSIC_I16X8_AVGR_U,
+
+ ONYX_INTRINSIC_I32X4_ABS, ONYX_INTRINSIC_I32X4_NEG,
+ ONYX_INTRINSIC_I32X4_ANY_TRUE, ONYX_INTRINSIC_I32X4_ALL_TRUE,
+ ONYX_INTRINSIC_I32X4_BITMASK,
+ ONYX_INTRINSIC_I32X4_WIDEN_LOW_I16X8_S, ONYX_INTRINSIC_I32X4_WIDEN_HIGH_I16X8_S,
+ ONYX_INTRINSIC_I32X4_WIDEN_LOW_I16X8_U, ONYX_INTRINSIC_I32X4_WIDEN_HIGH_I16X8_U,
+ ONYX_INTRINSIC_I32X4_SHL, ONYX_INTRINSIC_I32X4_SHR_S, ONYX_INTRINSIC_I32X4_SHR_U,
+ ONYX_INTRINSIC_I32X4_ADD, ONYX_INTRINSIC_I32X4_SUB, ONYX_INTRINSIC_I32X4_MUL,
+ ONYX_INTRINSIC_I32X4_MIN_S, ONYX_INTRINSIC_I32X4_MIN_U,
+ ONYX_INTRINSIC_I32X4_MAX_S, ONYX_INTRINSIC_I32X4_MAX_U,
+
+ ONYX_INTRINSIC_I64X2_NEG, ONYX_INTRINSIC_I64X2_SHL,
+ ONYX_INTRINSIC_I64X2_SHR_S, ONYX_INTRINSIC_I64X2_SHR_U,
+ ONYX_INTRINSIC_I64X2_ADD, ONYX_INTRINSIC_I64X2_SUB, ONYX_INTRINSIC_I64X2_MUL,
+
+ ONYX_INTRINSIC_F32X4_ABS, ONYX_INTRINSIC_F32X4_NEG, ONYX_INTRINSIC_F32X4_SQRT,
+ ONYX_INTRINSIC_F32X4_ADD, ONYX_INTRINSIC_F32X4_SUB,
+ ONYX_INTRINSIC_F32X4_MUL, ONYX_INTRINSIC_F32X4_DIV,
+ ONYX_INTRINSIC_F32X4_MIN, ONYX_INTRINSIC_F32X4_MAX,
+
+ ONYX_INTRINSIC_F64X2_ABS, ONYX_INTRINSIC_F64X2_NEG, ONYX_INTRINSIC_F64X2_SQRT,
+ ONYX_INTRINSIC_F64X2_ADD, ONYX_INTRINSIC_F64X2_SUB,
+ ONYX_INTRINSIC_F64X2_MUL, ONYX_INTRINSIC_F64X2_DIV,
+ ONYX_INTRINSIC_F64X2_MIN, ONYX_INTRINSIC_F64X2_MAX,
+
+ ONYX_INTRINSIC_I32X4_TRUNC_SAT_F32X4_S,
+ ONYX_INTRINSIC_I32X4_TRUNC_SAT_F32X4_U,
+ ONYX_INTRINSIC_F32X4_CONVERT_I32X4_S,
+ ONYX_INTRINSIC_F32X4_CONVERT_I32X4_U,
} OnyxIntrinsic;
typedef enum CallingConvention {
extern const BuiltinSymbol builtin_symbols[];
+typedef struct IntrinsicMap {
+ char* name;
+ OnyxIntrinsic intrinsic;
+} IntrinsicMap;
+
+extern bh_table(OnyxIntrinsic) intrinsic_table;
+
void initialize_builtins(bh_allocator a, ProgramInfo* prog);
Basic_Kind_I64X2,
Basic_Kind_F32X4,
Basic_Kind_F64X2,
+ Basic_Kind_V128,
};
enum BasicFlag {
b32 type_is_integer(Type* type);
b32 type_is_numeric(Type* type);
b32 type_is_compound(Type* type);
+b32 type_is_simd(Type* type);
b32 type_results_in_void(Type* type);
b32 type_is_array_accessible(Type* type);
b32 type_is_structlike(Type* type);
WasmType param_types[];
} WasmFuncType;
+#define SIMD_INSTR_MASK 0x10000
typedef enum WasmInstructionType {
WI_UNREACHABLE = 0x00,
WI_I64_EXTEND_8_S = 0xC2,
WI_I64_EXTEND_16_S = 0xC3,
WI_I64_EXTEND_32_S = 0xC4,
+
+
+
+ WI_V128_LOAD = SIMD_INSTR_MASK | 0,
+ WI_V128_STORE = SIMD_INSTR_MASK | 11,
+
+ WI_V128_CONST = SIMD_INSTR_MASK | 12,
+
+ WI_I8X16_SHUFFLE = SIMD_INSTR_MASK | 13,
+
+ WI_I8X16_EXTRACT_LANE_S = SIMD_INSTR_MASK | 21,
+ WI_I8X16_EXTRACT_LANE_U = SIMD_INSTR_MASK | 22,
+ WI_I8X16_REPLACE_LANE = SIMD_INSTR_MASK | 23,
+ WI_I16X8_EXTRACT_LANE_S = SIMD_INSTR_MASK | 24,
+ WI_I16X8_EXTRACT_LANE_U = SIMD_INSTR_MASK | 25,
+ WI_I16X8_REPLACE_LANE = SIMD_INSTR_MASK | 26,
+ WI_I32X4_EXTRACT_LANE = SIMD_INSTR_MASK | 27,
+ WI_I32X4_REPLACE_LANE = SIMD_INSTR_MASK | 28,
+ WI_I64X2_EXTRACT_LANE = SIMD_INSTR_MASK | 29,
+ WI_I64X2_REPLACE_LANE = SIMD_INSTR_MASK | 30,
+ WI_F32X4_EXTRACT_LANE = SIMD_INSTR_MASK | 31,
+ WI_F32X4_REPLACE_LANE = SIMD_INSTR_MASK | 32,
+ WI_F64X2_EXTRACT_LANE = SIMD_INSTR_MASK | 33,
+ WI_F64X2_REPLACE_LANE = SIMD_INSTR_MASK | 34,
+
+ WI_I8X16_SWIZZLE = SIMD_INSTR_MASK | 14,
+ WI_I8X16_SPLAT = SIMD_INSTR_MASK | 15,
+ WI_I16X8_SPLAT = SIMD_INSTR_MASK | 16,
+ WI_I32X4_SPLAT = SIMD_INSTR_MASK | 17,
+ WI_I64X2_SPLAT = SIMD_INSTR_MASK | 18,
+ WI_F32X4_SPLAT = SIMD_INSTR_MASK | 19,
+ WI_F64X2_SPLAT = SIMD_INSTR_MASK | 20,
+
+ WI_I8X16_EQ = SIMD_INSTR_MASK | 35,
+ WI_I8X16_NEQ = SIMD_INSTR_MASK | 36,
+ WI_I8X16_LT_S = SIMD_INSTR_MASK | 37,
+ WI_I8X16_LT_U = SIMD_INSTR_MASK | 38,
+ WI_I8X16_GT_S = SIMD_INSTR_MASK | 39,
+ WI_I8X16_GT_U = SIMD_INSTR_MASK | 40,
+ WI_I8X16_LE_S = SIMD_INSTR_MASK | 41,
+ WI_I8X16_LE_U = SIMD_INSTR_MASK | 42,
+ WI_I8X16_GE_S = SIMD_INSTR_MASK | 43,
+ WI_I8X16_GE_U = SIMD_INSTR_MASK | 44,
+
+ WI_I16X8_EQ = SIMD_INSTR_MASK | 45,
+ WI_I16X8_NEQ = SIMD_INSTR_MASK | 46,
+ WI_I16X8_LT_S = SIMD_INSTR_MASK | 47,
+ WI_I16X8_LT_U = SIMD_INSTR_MASK | 48,
+ WI_I16X8_GT_S = SIMD_INSTR_MASK | 49,
+ WI_I16X8_GT_U = SIMD_INSTR_MASK | 50,
+ WI_I16X8_LE_S = SIMD_INSTR_MASK | 51,
+ WI_I16X8_LE_U = SIMD_INSTR_MASK | 52,
+ WI_I16X8_GE_S = SIMD_INSTR_MASK | 53,
+ WI_I16X8_GE_U = SIMD_INSTR_MASK | 54,
+
+ WI_I32X4_EQ = SIMD_INSTR_MASK | 55,
+ WI_I32X4_NEQ = SIMD_INSTR_MASK | 56,
+ WI_I32X4_LT_S = SIMD_INSTR_MASK | 57,
+ WI_I32X4_LT_U = SIMD_INSTR_MASK | 58,
+ WI_I32X4_GT_S = SIMD_INSTR_MASK | 59,
+ WI_I32X4_GT_U = SIMD_INSTR_MASK | 60,
+ WI_I32X4_LE_S = SIMD_INSTR_MASK | 61,
+ WI_I32X4_LE_U = SIMD_INSTR_MASK | 62,
+ WI_I32X4_GE_S = SIMD_INSTR_MASK | 63,
+ WI_I32X4_GE_U = SIMD_INSTR_MASK | 64,
+
+ WI_F32X4_EQ = SIMD_INSTR_MASK | 65,
+ WI_F32X4_NEQ = SIMD_INSTR_MASK | 66,
+ WI_F32X4_LT = SIMD_INSTR_MASK | 67,
+ WI_F32X4_GT = SIMD_INSTR_MASK | 68,
+ WI_F32X4_LE = SIMD_INSTR_MASK | 69,
+ WI_F32X4_GE = SIMD_INSTR_MASK | 70,
+
+ WI_F64X2_EQ = SIMD_INSTR_MASK | 71,
+ WI_F64X2_NEQ = SIMD_INSTR_MASK | 72,
+ WI_F64X2_LT = SIMD_INSTR_MASK | 73,
+ WI_F64X2_GT = SIMD_INSTR_MASK | 74,
+ WI_F64X2_LE = SIMD_INSTR_MASK | 75,
+ WI_F64X2_GE = SIMD_INSTR_MASK | 76,
+
+ WI_V128_NOT = SIMD_INSTR_MASK | 77,
+ WI_V128_AND = SIMD_INSTR_MASK | 78,
+ WI_V128_ANDNOT = SIMD_INSTR_MASK | 79,
+ WI_V128_OR = SIMD_INSTR_MASK | 80,
+ WI_V128_XOR = SIMD_INSTR_MASK | 81,
+ WI_V128_BITSELECT = SIMD_INSTR_MASK | 82,
+
+ WI_I8X16_ABS = SIMD_INSTR_MASK | 96,
+ WI_I8X16_NEG = SIMD_INSTR_MASK | 97,
+ WI_I8X16_ANY_TRUE = SIMD_INSTR_MASK | 98,
+ WI_I8X16_ALL_TRUE = SIMD_INSTR_MASK | 99,
+ WI_I8X16_BITMASK = SIMD_INSTR_MASK | 100,
+ WI_I8X16_NARROW_I16X8_S = SIMD_INSTR_MASK | 101,
+ WI_I8X16_NARROW_I16X8_U = SIMD_INSTR_MASK | 102,
+ WI_I8X16_SHL = SIMD_INSTR_MASK | 107,
+ WI_I8X16_SHR_S = SIMD_INSTR_MASK | 108,
+ WI_I8X16_SHR_U = SIMD_INSTR_MASK | 109,
+ WI_I8X16_ADD = SIMD_INSTR_MASK | 110,
+ WI_I8X16_ADD_SAT_S = SIMD_INSTR_MASK | 111,
+ WI_I8X16_ADD_SAT_U = SIMD_INSTR_MASK | 112,
+ WI_I8X16_SUB = SIMD_INSTR_MASK | 113,
+ WI_I8X16_SUB_SAT_S = SIMD_INSTR_MASK | 114,
+ WI_I8X16_SUB_SAT_U = SIMD_INSTR_MASK | 115,
+ WI_I8X16_MIN_S = SIMD_INSTR_MASK | 118,
+ WI_I8X16_MIN_U = SIMD_INSTR_MASK | 119,
+ WI_I8X16_MAX_S = SIMD_INSTR_MASK | 120,
+ WI_I8X16_MAX_U = SIMD_INSTR_MASK | 121,
+ WI_I8X16_AVGR_U = SIMD_INSTR_MASK | 123,
+
+ WI_I16X8_ABS = SIMD_INSTR_MASK | 128,
+ WI_I16X8_NEG = SIMD_INSTR_MASK | 129,
+ WI_I16X8_ANY_TRUE = SIMD_INSTR_MASK | 130,
+ WI_I16X8_ALL_TRUE = SIMD_INSTR_MASK | 131,
+ WI_I16X8_BITMASK = SIMD_INSTR_MASK | 132,
+ WI_I16X8_NARROW_I32X4_S = SIMD_INSTR_MASK | 133,
+ WI_I16X8_NARROW_I32X4_U = SIMD_INSTR_MASK | 134,
+ WI_I16X8_WIDEN_LOW_I8X16_S = SIMD_INSTR_MASK | 135,
+ WI_I16X8_WIDEN_HIGH_I8X16_S = SIMD_INSTR_MASK | 136,
+ WI_I16X8_WIDEN_LOW_I8X16_U = SIMD_INSTR_MASK | 137,
+ WI_I16X8_WIDEN_HIGH_I8X16_U = SIMD_INSTR_MASK | 138,
+ WI_I16X8_SHL = SIMD_INSTR_MASK | 139,
+ WI_I16X8_SHR_S = SIMD_INSTR_MASK | 140,
+ WI_I16X8_SHR_U = SIMD_INSTR_MASK | 141,
+ WI_I16X8_ADD = SIMD_INSTR_MASK | 142,
+ WI_I16X8_ADD_SAT_S = SIMD_INSTR_MASK | 143,
+ WI_I16X8_ADD_SAT_U = SIMD_INSTR_MASK | 144,
+ WI_I16X8_SUB = SIMD_INSTR_MASK | 145,
+ WI_I16X8_SUB_SAT_S = SIMD_INSTR_MASK | 146,
+ WI_I16X8_SUB_SAT_U = SIMD_INSTR_MASK | 147,
+ WI_I16X8_MUL = SIMD_INSTR_MASK | 149,
+ WI_I16X8_MIN_S = SIMD_INSTR_MASK | 150,
+ WI_I16X8_MIN_U = SIMD_INSTR_MASK | 151,
+ WI_I16X8_MAX_S = SIMD_INSTR_MASK | 152,
+ WI_I16X8_MAX_U = SIMD_INSTR_MASK | 153,
+ WI_I16X8_AVGR_U = SIMD_INSTR_MASK | 155,
+
+ WI_I32X4_ABS = SIMD_INSTR_MASK | 160,
+ WI_I32X4_NEG = SIMD_INSTR_MASK | 161,
+ WI_I32X4_ANY_TRUE = SIMD_INSTR_MASK | 162,
+ WI_I32X4_ALL_TRUE = SIMD_INSTR_MASK | 163,
+ WI_I32X4_BITMASK = SIMD_INSTR_MASK | 164,
+ WI_I32X4_WIDEN_LOW_I16X8_S = SIMD_INSTR_MASK | 167,
+ WI_I32X4_WIDEN_HIGH_I16X8_S = SIMD_INSTR_MASK | 168,
+ WI_I32X4_WIDEN_LOW_I16X8_U = SIMD_INSTR_MASK | 169,
+ WI_I32X4_WIDEN_HIGH_I16X8_U = SIMD_INSTR_MASK | 170,
+ WI_I32X4_SHL = SIMD_INSTR_MASK | 171,
+ WI_I32X4_SHR_S = SIMD_INSTR_MASK | 172,
+ WI_I32X4_SHR_U = SIMD_INSTR_MASK | 173,
+ WI_I32X4_ADD = SIMD_INSTR_MASK | 174,
+ WI_I32X4_SUB = SIMD_INSTR_MASK | 177,
+ WI_I32X4_MUL = SIMD_INSTR_MASK | 181,
+ WI_I32X4_MIN_S = SIMD_INSTR_MASK | 182,
+ WI_I32X4_MIN_U = SIMD_INSTR_MASK | 183,
+ WI_I32X4_MAX_S = SIMD_INSTR_MASK | 184,
+ WI_I32X4_MAX_U = SIMD_INSTR_MASK | 185,
+
+ WI_I64X2_NEG = SIMD_INSTR_MASK | 193,
+ WI_I64X2_SHL = SIMD_INSTR_MASK | 203,
+ WI_I64X2_SHR_S = SIMD_INSTR_MASK | 204,
+ WI_I64X2_SHR_U = SIMD_INSTR_MASK | 205,
+ WI_I64X2_ADD = SIMD_INSTR_MASK | 206,
+ WI_I64X2_SUB = SIMD_INSTR_MASK | 209,
+ WI_I64X2_MUL = SIMD_INSTR_MASK | 213,
+
+ WI_F32X4_ABS = SIMD_INSTR_MASK | 224,
+ WI_F32X4_NEG = SIMD_INSTR_MASK | 225,
+ WI_F32X4_SQRT = SIMD_INSTR_MASK | 227,
+ WI_F32X4_ADD = SIMD_INSTR_MASK | 228,
+ WI_F32X4_SUB = SIMD_INSTR_MASK | 229,
+ WI_F32X4_MUL = SIMD_INSTR_MASK | 230,
+ WI_F32X4_DIV = SIMD_INSTR_MASK | 231,
+ WI_F32X4_MIN = SIMD_INSTR_MASK | 232,
+ WI_F32X4_MAX = SIMD_INSTR_MASK | 233,
+
+ WI_F64X2_ABS = SIMD_INSTR_MASK | 236,
+ WI_F64X2_NEG = SIMD_INSTR_MASK | 237,
+ WI_F64X2_SQRT = SIMD_INSTR_MASK | 239,
+ WI_F64X2_ADD = SIMD_INSTR_MASK | 240,
+ WI_F64X2_SUB = SIMD_INSTR_MASK | 241,
+ WI_F64X2_MUL = SIMD_INSTR_MASK | 242,
+ WI_F64X2_DIV = SIMD_INSTR_MASK | 243,
+ WI_F64X2_MIN = SIMD_INSTR_MASK | 244,
+ WI_F64X2_MAX = SIMD_INSTR_MASK | 245,
+
+ WI_I32X4_TRUNC_SAT_F32X4_S = SIMD_INSTR_MASK | 248,
+ WI_I32X4_TRUNC_SAT_F32X4_U = SIMD_INSTR_MASK | 249,
+ WI_F32X4_CONVERT_I32X4_S = SIMD_INSTR_MASK | 250,
+ WI_F32X4_CONVERT_I32X4_U = SIMD_INSTR_MASK | 251,
} WasmInstructionType;
typedef union {
- match: '\b(bool|void|i8|u8|i16|u16|i32|u32|i64|u64|f32|f64|rawptr)\b'
scope: keyword.control.onyx
- - match: '\b(i8x16|i16x8|i32x4|i64x2|f32x4|f64x2)\b'
+ - match: '\b(i8x16|i16x8|i32x4|i64x2|f32x4|f64x2|v128)\b'
scope: keyword.control.onyx
- match: '\b(true|false|null|context)\b'
syn keyword onyxType f32
syn keyword onyxType f64
syn keyword onyxType rawptr
-syn keyword onyxType i8x16
-syn keyword onyxType i16x8
-syn keyword onyxType i32x4
-syn keyword onyxType i64x2
-syn keyword onyxType f32x4
-syn keyword onyxType f64x2
+syn keyword onyxType i8x16 i16x8 i32x4 i64x2 f32x4 f64x2 v128
syn keyword onyxConstant true false null
package main
#include_file "core/std/wasi"
+#include_file "core/simd_intrinsics"
use package core
use package simd
+dummy :: proc (p: ^i32x4) {
+ v := cast(^i32x4) calloc(sizeof i32x4);
+ y := *p;
+
+ *v = *p;
+}
+
main :: proc (args: [] cstring) {
- x : i32x4;
+ x := i32x4_splat(10);
+ d : i8x16;
+ dummy(^x);
}
AstBasicType basic_type_f64 = { Ast_Kind_Basic_Type, 0, NULL, "f64" , &basic_types[Basic_Kind_F64] };
AstBasicType basic_type_rawptr = { Ast_Kind_Basic_Type, 0, NULL, "rawptr", &basic_types[Basic_Kind_Rawptr] };
-static OnyxToken simd_token = { Token_Type_Symbol, 14, "simd intrinsic", { 0 } };
+static OnyxToken simd_token = { Token_Type_Symbol, 0, "", { 0 } };
AstBasicType basic_type_i8x16 = { Ast_Kind_Basic_Type, 0, &simd_token, "i8x16", &basic_types[Basic_Kind_I8X16] };
AstBasicType basic_type_i16x8 = { Ast_Kind_Basic_Type, 0, &simd_token, "i16x8", &basic_types[Basic_Kind_I16X8] };
AstBasicType basic_type_i32x4 = { Ast_Kind_Basic_Type, 0, &simd_token, "i32x4", &basic_types[Basic_Kind_I32X4] };
AstBasicType basic_type_i64x2 = { Ast_Kind_Basic_Type, 0, &simd_token, "i64x2", &basic_types[Basic_Kind_I64X2] };
AstBasicType basic_type_f32x4 = { Ast_Kind_Basic_Type, 0, &simd_token, "f32x4", &basic_types[Basic_Kind_F32X4] };
AstBasicType basic_type_f64x2 = { Ast_Kind_Basic_Type, 0, &simd_token, "f64x2", &basic_types[Basic_Kind_F64X2] };
+AstBasicType basic_type_v128 = { Ast_Kind_Basic_Type, 0, &simd_token, "v128", &basic_types[Basic_Kind_V128] };
static OnyxToken builtin_package_token = { Token_Type_Symbol, 7, "builtin ", { 0 } };
AstNode builtin_package_node = { Ast_Kind_Symbol, Ast_Flag_No_Clone, &builtin_package_token, NULL };
{ "simd", "i64x2", (AstNode *) &basic_type_i64x2 },
{ "simd", "f32x4", (AstNode *) &basic_type_f32x4 },
{ "simd", "f64x2", (AstNode *) &basic_type_f64x2 },
+ { "simd", "v128", (AstNode *) &basic_type_v128 },
{ "builtin", "__heap_start", (AstNode *) &builtin_heap_start },
{ "builtin", "__stack_top", (AstNode *) &builtin_stack_top },
{ NULL, NULL, NULL },
};
+bh_table(OnyxIntrinsic) intrinsic_table;
+
+static IntrinsicMap builtin_intrinsics[] = {
+ { "memory_size", ONYX_INTRINSIC_MEMORY_SIZE },
+ { "memory_grow", ONYX_INTRINSIC_MEMORY_GROW },
+
+ { "clz_i32", ONYX_INTRINSIC_I32_CLZ },
+ { "ctz_i32", ONYX_INTRINSIC_I32_CTZ },
+ { "popcnt_i32", ONYX_INTRINSIC_I32_POPCNT },
+ { "and_i32", ONYX_INTRINSIC_I32_AND },
+ { "or_i32", ONYX_INTRINSIC_I32_OR },
+ { "xor_i32", ONYX_INTRINSIC_I32_XOR },
+ { "shl_i32", ONYX_INTRINSIC_I32_SHL },
+ { "slr_i32", ONYX_INTRINSIC_I32_SLR },
+ { "sar_i32", ONYX_INTRINSIC_I32_SAR },
+ { "rotl_i32", ONYX_INTRINSIC_I32_ROTL },
+ { "rotr_i32", ONYX_INTRINSIC_I32_ROTR },
+
+ { "clz_i64", ONYX_INTRINSIC_I64_CLZ },
+ { "ctz_i64", ONYX_INTRINSIC_I64_CTZ },
+ { "popcnt_i64", ONYX_INTRINSIC_I64_POPCNT },
+ { "and_i64", ONYX_INTRINSIC_I64_AND },
+ { "or_i64", ONYX_INTRINSIC_I64_OR },
+ { "xor_i64", ONYX_INTRINSIC_I64_XOR },
+ { "shl_i64", ONYX_INTRINSIC_I64_SHL },
+ { "slr_i64", ONYX_INTRINSIC_I64_SLR },
+ { "sar_i64", ONYX_INTRINSIC_I64_SAR },
+ { "rotl_i64", ONYX_INTRINSIC_I64_ROTL },
+ { "rotr_i64", ONYX_INTRINSIC_I64_ROTR },
+
+ { "abs_f32", ONYX_INTRINSIC_F32_ABS },
+ { "ceil_f32", ONYX_INTRINSIC_F32_CEIL },
+ { "floor_f32", ONYX_INTRINSIC_F32_FLOOR },
+ { "trunc_f32", ONYX_INTRINSIC_F32_TRUNC },
+ { "nearest_f32", ONYX_INTRINSIC_F32_NEAREST },
+ { "sqrt_f32", ONYX_INTRINSIC_F32_SQRT },
+ { "min_f32", ONYX_INTRINSIC_F32_MIN },
+ { "max_f32", ONYX_INTRINSIC_F32_MAX },
+ { "copysign_f32", ONYX_INTRINSIC_F32_COPYSIGN },
+
+ { "abs_f64", ONYX_INTRINSIC_F64_ABS },
+ { "ceil_f64", ONYX_INTRINSIC_F64_CEIL },
+ { "floor_f64", ONYX_INTRINSIC_F64_FLOOR },
+ { "trunc_f64", ONYX_INTRINSIC_F64_TRUNC },
+ { "nearest_f64", ONYX_INTRINSIC_F64_NEAREST },
+ { "sqrt_f64", ONYX_INTRINSIC_F64_SQRT },
+ { "min_f64", ONYX_INTRINSIC_F64_MIN },
+ { "max_f64", ONYX_INTRINSIC_F64_MAX },
+ { "copysign_f64", ONYX_INTRINSIC_F64_COPYSIGN },
+
+
+ // SIMD Intrinsics
+ { "v128_const", ONYX_INTRINSIC_V128_CONST },
+ { "i8x16_const", ONYX_INTRINSIC_I8X16_CONST },
+ { "i16x8_const", ONYX_INTRINSIC_I16X8_CONST },
+ { "i32x4_const", ONYX_INTRINSIC_I32X4_CONST },
+ { "i64x2_const", ONYX_INTRINSIC_I64X2_CONST },
+ { "f32x4_const", ONYX_INTRINSIC_F32X4_CONST },
+ { "f64x2_const", ONYX_INTRINSIC_F64X2_CONST },
+ { "i8x16_shuffle", ONYX_INTRINSIC_I8X16_SHUFFLE },
+
+ { "i8x16_extract_lane_s", ONYX_INTRINSIC_I8X16_EXTRACT_LANE_S },
+ { "i8x16_extract_lane_u", ONYX_INTRINSIC_I8X16_EXTRACT_LANE_U },
+ { "i8x16_replace_lane", ONYX_INTRINSIC_I8X16_REPLACE_LANE },
+ { "i16x8_extract_lane_s", ONYX_INTRINSIC_I16X8_EXTRACT_LANE_S },
+ { "i16x8_extract_lane_u", ONYX_INTRINSIC_I16X8_EXTRACT_LANE_U },
+ { "i16x8_replace_lane", ONYX_INTRINSIC_I16X8_REPLACE_LANE },
+ { "i32x4_extract_lane", ONYX_INTRINSIC_I32X4_EXTRACT_LANE },
+ { "i32x4_replace_lane", ONYX_INTRINSIC_I32X4_REPLACE_LANE },
+ { "i64x2_extract_lane", ONYX_INTRINSIC_I64X2_EXTRACT_LANE },
+ { "i64x2_replace_lane", ONYX_INTRINSIC_I64X2_REPLACE_LANE },
+ { "f32x4_extract_lane", ONYX_INTRINSIC_F32X4_EXTRACT_LANE },
+ { "f32x4_replace_lane", ONYX_INTRINSIC_F32X4_REPLACE_LANE },
+ { "f64x2_extract_lane", ONYX_INTRINSIC_F64X2_EXTRACT_LANE },
+ { "f64x2_replace_lane", ONYX_INTRINSIC_F64X2_REPLACE_LANE },
+
+ { "i8x16_swizzle", ONYX_INTRINSIC_I8X16_SWIZZLE },
+ { "i8x16_splat", ONYX_INTRINSIC_I8X16_SPLAT },
+ { "i16x8_splat", ONYX_INTRINSIC_I16X8_SPLAT },
+ { "i32x4_splat", ONYX_INTRINSIC_I32X4_SPLAT },
+ { "i64x2_splat", ONYX_INTRINSIC_I64X2_SPLAT },
+ { "f32x4_splat", ONYX_INTRINSIC_F32X4_SPLAT },
+ { "f64x2_splat", ONYX_INTRINSIC_F64X2_SPLAT },
+
+ { "i8x16_eq", ONYX_INTRINSIC_I8X16_EQ },
+ { "i8x16_neq", ONYX_INTRINSIC_I8X16_NEQ },
+ { "i8x16_lt_s", ONYX_INTRINSIC_I8X16_LT_S },
+ { "i8x16_lt_u", ONYX_INTRINSIC_I8X16_LT_U },
+ { "i8x16_gt_s", ONYX_INTRINSIC_I8X16_GT_S },
+ { "i8x16_gt_u", ONYX_INTRINSIC_I8X16_GT_U },
+ { "i8x16_le_s", ONYX_INTRINSIC_I8X16_LE_S },
+ { "i8x16_le_u", ONYX_INTRINSIC_I8X16_LE_U },
+ { "i8x16_ge_s", ONYX_INTRINSIC_I8X16_GE_S },
+ { "i8x16_ge_u", ONYX_INTRINSIC_I8X16_GE_U },
+
+ { "i16x8_eq", ONYX_INTRINSIC_I16X8_EQ },
+ { "i16x8_neq", ONYX_INTRINSIC_I16X8_NEQ },
+ { "i16x8_lt_s", ONYX_INTRINSIC_I16X8_LT_S },
+ { "i16x8_lt_u", ONYX_INTRINSIC_I16X8_LT_U },
+ { "i16x8_gt_s", ONYX_INTRINSIC_I16X8_GT_S },
+ { "i16x8_gt_u", ONYX_INTRINSIC_I16X8_GT_U },
+ { "i16x8_le_s", ONYX_INTRINSIC_I16X8_LE_S },
+ { "i16x8_le_u", ONYX_INTRINSIC_I16X8_LE_U },
+ { "i16x8_ge_s", ONYX_INTRINSIC_I16X8_GE_S },
+ { "i16x8_ge_u", ONYX_INTRINSIC_I16X8_GE_U },
+
+ { "i32x4_eq", ONYX_INTRINSIC_I32X4_EQ },
+ { "i32x4_neq", ONYX_INTRINSIC_I32X4_NEQ },
+ { "i32x4_lt_s", ONYX_INTRINSIC_I32X4_LT_S },
+ { "i32x4_lt_u", ONYX_INTRINSIC_I32X4_LT_U },
+ { "i32x4_gt_s", ONYX_INTRINSIC_I32X4_GT_S },
+ { "i32x4_gt_u", ONYX_INTRINSIC_I32X4_GT_U },
+ { "i32x4_le_s", ONYX_INTRINSIC_I32X4_LE_S },
+ { "i32x4_le_u", ONYX_INTRINSIC_I32X4_LE_U },
+ { "i32x4_ge_s", ONYX_INTRINSIC_I32X4_GE_S },
+ { "i32x4_ge_u", ONYX_INTRINSIC_I32X4_GE_U },
+
+ { "f32x4_eq", ONYX_INTRINSIC_F32X4_EQ },
+ { "f32x4_neq", ONYX_INTRINSIC_F32X4_NEQ },
+ { "f32x4_lt", ONYX_INTRINSIC_F32X4_LT },
+ { "f32x4_gt", ONYX_INTRINSIC_F32X4_GT },
+ { "f32x4_le", ONYX_INTRINSIC_F32X4_LE },
+ { "f32x4_ge", ONYX_INTRINSIC_F32X4_GE },
+
+ { "f64x2_eq", ONYX_INTRINSIC_F64X2_EQ },
+ { "f64x2_neq", ONYX_INTRINSIC_F64X2_NEQ },
+ { "f64x2_lt", ONYX_INTRINSIC_F64X2_LT },
+ { "f64x2_gt", ONYX_INTRINSIC_F64X2_GT },
+ { "f64x2_le", ONYX_INTRINSIC_F64X2_LE },
+ { "f64x2_ge", ONYX_INTRINSIC_F64X2_GE },
+
+ { "v128_not", ONYX_INTRINSIC_V128_NOT },
+ { "v128_and", ONYX_INTRINSIC_V128_AND },
+ { "v128_andnot", ONYX_INTRINSIC_V128_ANDNOT },
+ { "v128_or", ONYX_INTRINSIC_V128_OR },
+ { "v128_xor", ONYX_INTRINSIC_V128_XOR },
+ { "v128_bitselect", ONYX_INTRINSIC_V128_BITSELECT },
+
+ { "i8x16_abs", ONYX_INTRINSIC_I8X16_ABS },
+ { "i8x16_neg", ONYX_INTRINSIC_I8X16_NEG },
+ { "i8x16_any_true", ONYX_INTRINSIC_I8X16_ANY_TRUE },
+ { "i8x16_all_true", ONYX_INTRINSIC_I8X16_ALL_TRUE },
+ { "i8x16_bitmask", ONYX_INTRINSIC_I8X16_BITMASK },
+ { "i8x16_narrow_i16x8_s", ONYX_INTRINSIC_I8X16_NARROW_I16X8_S },
+ { "i8x16_narrow_i16x8_u", ONYX_INTRINSIC_I8X16_NARROW_I16X8_U },
+ { "i8x16_shl", ONYX_INTRINSIC_I8X16_SHL },
+ { "i8x16_shr_s", ONYX_INTRINSIC_I8X16_SHR_S },
+ { "i8x16_shr_u", ONYX_INTRINSIC_I8X16_SHR_U },
+ { "i8x16_add", ONYX_INTRINSIC_I8X16_ADD },
+ { "i8x16_add_sat_s", ONYX_INTRINSIC_I8X16_ADD_SAT_S },
+ { "i8x16_add_sat_u", ONYX_INTRINSIC_I8X16_ADD_SAT_U },
+ { "i8x16_sub", ONYX_INTRINSIC_I8X16_SUB },
+ { "i8x16_sub_sat_s", ONYX_INTRINSIC_I8X16_SUB_SAT_S },
+ { "i8x16_sub_sat_u", ONYX_INTRINSIC_I8X16_SUB_SAT_U },
+ { "i8x16_min_s", ONYX_INTRINSIC_I8X16_MIN_S },
+ { "i8x16_min_u", ONYX_INTRINSIC_I8X16_MIN_U },
+ { "i8x16_max_s", ONYX_INTRINSIC_I8X16_MAX_S },
+ { "i8x16_max_u", ONYX_INTRINSIC_I8X16_MAX_U },
+ { "i8x16_avgr_u", ONYX_INTRINSIC_I8X16_AVGR_U },
+
+ { "i16x8_abs", ONYX_INTRINSIC_I16X8_ABS },
+ { "i16x8_neg", ONYX_INTRINSIC_I16X8_NEG },
+ { "i16x8_any_true", ONYX_INTRINSIC_I16X8_ANY_TRUE },
+ { "i16x8_all_true", ONYX_INTRINSIC_I16X8_ALL_TRUE },
+ { "i16x8_bitmask", ONYX_INTRINSIC_I16X8_BITMASK },
+ { "i16x8_narrow_i32x4_s", ONYX_INTRINSIC_I16X8_NARROW_I32X4_S },
+ { "i16x8_narrow_i32x4_u", ONYX_INTRINSIC_I16X8_NARROW_I32X4_U },
+ { "i16x8_widen_low_i8x16_s", ONYX_INTRINSIC_I16X8_WIDEN_LOW_I8X16_S },
+ { "i16x8_widen_high_i8x16_s", ONYX_INTRINSIC_I16X8_WIDEN_HIGH_I8X16_S },
+ { "i16x8_widen_low_i8x16_u", ONYX_INTRINSIC_I16X8_WIDEN_LOW_I8X16_U },
+ { "i16x8_widen_high_i8x16_u", ONYX_INTRINSIC_I16X8_WIDEN_HIGH_I8X16_U },
+ { "i16x8_shl", ONYX_INTRINSIC_I16X8_SHL },
+ { "i16x8_shr_s", ONYX_INTRINSIC_I16X8_SHR_S },
+ { "i16x8_shr_u", ONYX_INTRINSIC_I16X8_SHR_U },
+ { "i16x8_add", ONYX_INTRINSIC_I16X8_ADD },
+ { "i16x8_add_sat_s", ONYX_INTRINSIC_I16X8_ADD_SAT_S },
+ { "i16x8_add_sat_u", ONYX_INTRINSIC_I16X8_ADD_SAT_U },
+ { "i16x8_sub", ONYX_INTRINSIC_I16X8_SUB },
+ { "i16x8_sub_sat_s", ONYX_INTRINSIC_I16X8_SUB_SAT_S },
+ { "i16x8_sub_sat_u", ONYX_INTRINSIC_I16X8_SUB_SAT_U },
+ { "i16x8_mul", ONYX_INTRINSIC_I16X8_MUL },
+ { "i16x8_min_s", ONYX_INTRINSIC_I16X8_MIN_S },
+ { "i16x8_min_u", ONYX_INTRINSIC_I16X8_MIN_U },
+ { "i16x8_max_s", ONYX_INTRINSIC_I16X8_MAX_S },
+ { "i16x8_max_u", ONYX_INTRINSIC_I16X8_MAX_U },
+ { "i16x8_avgr_u", ONYX_INTRINSIC_I16X8_AVGR_U },
+
+ { "i32x4_abs", ONYX_INTRINSIC_I32X4_ABS },
+ { "i32x4_neg", ONYX_INTRINSIC_I32X4_NEG },
+ { "i32x4_any_true", ONYX_INTRINSIC_I32X4_ANY_TRUE },
+ { "i32x4_all_true", ONYX_INTRINSIC_I32X4_ALL_TRUE },
+ { "i32x4_bitmask", ONYX_INTRINSIC_I32X4_BITMASK },
+ { "i32x4_widen_low_i16x8_s", ONYX_INTRINSIC_I32X4_WIDEN_LOW_I16X8_S },
+ { "i32x4_widen_high_i16x8_s", ONYX_INTRINSIC_I32X4_WIDEN_HIGH_I16X8_S },
+ { "i32x4_widen_low_i16x8_u", ONYX_INTRINSIC_I32X4_WIDEN_LOW_I16X8_U },
+ { "i32x4_widen_high_i16x8_u", ONYX_INTRINSIC_I32X4_WIDEN_HIGH_I16X8_U },
+ { "i32x4_shl", ONYX_INTRINSIC_I32X4_SHL },
+ { "i32x4_shr_s", ONYX_INTRINSIC_I32X4_SHR_S },
+ { "i32x4_shl_u", ONYX_INTRINSIC_I32X4_SHR_U },
+ { "i32x4_add", ONYX_INTRINSIC_I32X4_ADD },
+ { "i32x4_sub", ONYX_INTRINSIC_I32X4_SUB },
+ { "i32x4_mul", ONYX_INTRINSIC_I32X4_MUL },
+ { "i32x4_min_s", ONYX_INTRINSIC_I32X4_MIN_S },
+ { "i32x4_min_u", ONYX_INTRINSIC_I32X4_MIN_U },
+ { "i32x4_max_s", ONYX_INTRINSIC_I32X4_MAX_S },
+ { "i32x4_max_u", ONYX_INTRINSIC_I32X4_MAX_U },
+
+ { "i64x2_neg", ONYX_INTRINSIC_I64X2_NEG },
+ { "i64x2_shl", ONYX_INTRINSIC_I64X2_SHL },
+ { "i64x2_shr_s", ONYX_INTRINSIC_I64X2_SHR_S },
+ { "i64x2_shr_u", ONYX_INTRINSIC_I64X2_SHR_U },
+ { "i64x2_add", ONYX_INTRINSIC_I64X2_ADD },
+ { "i64x2_sub", ONYX_INTRINSIC_I64X2_SUB },
+ { "i64x2_mul", ONYX_INTRINSIC_I64X2_MUL },
+
+ { "f32x4_abs", ONYX_INTRINSIC_F32X4_ABS },
+ { "f32x4_neg", ONYX_INTRINSIC_F32X4_NEG },
+ { "f32x4_sqrt", ONYX_INTRINSIC_F32X4_SQRT },
+ { "f32x4_add", ONYX_INTRINSIC_F32X4_ADD },
+ { "f32x4_sub", ONYX_INTRINSIC_F32X4_SUB },
+ { "f32x4_mul", ONYX_INTRINSIC_F32X4_MUL },
+ { "f32x4_div", ONYX_INTRINSIC_F32X4_DIV },
+ { "f32x4_min", ONYX_INTRINSIC_F32X4_MIN },
+ { "f32x4_max", ONYX_INTRINSIC_F32X4_MAX },
+
+ { "f64x2_abs", ONYX_INTRINSIC_F64X2_ABS },
+ { "f64x2_neg", ONYX_INTRINSIC_F64X2_NEG },
+ { "f64x2_sqrt", ONYX_INTRINSIC_F64X2_SQRT },
+ { "f64x2_add", ONYX_INTRINSIC_F64X2_ADD },
+ { "f64x2_sub", ONYX_INTRINSIC_F64X2_SUB },
+ { "f64x2_mul", ONYX_INTRINSIC_F64X2_MUL },
+ { "f64x2_div", ONYX_INTRINSIC_F64X2_DIV },
+ { "f64x2_min", ONYX_INTRINSIC_F64X2_MIN },
+ { "f64x2_max", ONYX_INTRINSIC_F64X2_MAX },
+
+ { "i32x4_trunc_sat_f32x4_s", ONYX_INTRINSIC_I32X4_TRUNC_SAT_F32X4_S },
+ { "i32x4_trunc_sat_f32x4_u", ONYX_INTRINSIC_I32X4_TRUNC_SAT_F32X4_U },
+ { "f32x4_convert_i32x4_s", ONYX_INTRINSIC_F32X4_CONVERT_I32X4_S },
+ { "f32x4_convert_i32x4_u", ONYX_INTRINSIC_F32X4_CONVERT_I32X4_U },
+
+ { NULL, ONYX_INTRINSIC_UNDEFINED },
+};
+
void initialize_builtins(bh_allocator a, ProgramInfo* prog) {
// HACK
builtin_package_token.text = bh_strdup(global_heap_allocator, builtin_package_token.text);
onyx_report_error((OnyxFilePos) { 0 }, "'range' struct not found in builtin package.");
return;
}
+
+
+
+ bh_table_init(global_heap_allocator, intrinsic_table, 128);
+ IntrinsicMap* intrinsic = &builtin_intrinsics[0];
+ while (intrinsic->name != NULL) {
+ bh_table_put(OnyxIntrinsic, intrinsic_table, intrinsic->name, intrinsic->intrinsic);
+ intrinsic++;
+ }
}
token_toggle_end(callee->intrinsic_name);
char* intr_name = callee->intrinsic_name->text;
- OnyxIntrinsic intrinsic = ONYX_INTRINSIC_UNDEFINED;
-
- if (!strcmp("memory_size", intr_name)) intrinsic = ONYX_INTRINSIC_MEMORY_SIZE;
- else if (!strcmp("memory_grow", intr_name)) intrinsic = ONYX_INTRINSIC_MEMORY_GROW;
-
- else if (!strcmp("clz_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_CLZ;
- else if (!strcmp("ctz_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_CTZ;
- else if (!strcmp("popcnt_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_POPCNT;
- else if (!strcmp("and_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_AND;
- else if (!strcmp("or_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_OR;
- else if (!strcmp("xor_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_XOR;
- else if (!strcmp("shl_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_SHL;
- else if (!strcmp("slr_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_SLR;
- else if (!strcmp("sar_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_SAR;
- else if (!strcmp("rotl_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_ROTL;
- else if (!strcmp("rotr_i32", intr_name)) intrinsic = ONYX_INTRINSIC_I32_ROTR;
-
- else if (!strcmp("clz_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_CLZ;
- else if (!strcmp("ctz_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_CTZ;
- else if (!strcmp("popcnt_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_POPCNT;
- else if (!strcmp("and_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_AND;
- else if (!strcmp("or_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_OR;
- else if (!strcmp("xor_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_XOR;
- else if (!strcmp("shl_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_SHL;
- else if (!strcmp("slr_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_SLR;
- else if (!strcmp("sar_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_SAR;
- else if (!strcmp("rotl_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_ROTL;
- else if (!strcmp("rotr_i64", intr_name)) intrinsic = ONYX_INTRINSIC_I64_ROTR;
-
- else if (!strcmp("abs_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_ABS;
- else if (!strcmp("ceil_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_CEIL;
- else if (!strcmp("floor_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_FLOOR;
- else if (!strcmp("trunc_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_TRUNC;
- else if (!strcmp("nearest_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_NEAREST;
- else if (!strcmp("sqrt_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_SQRT;
- else if (!strcmp("min_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_MIN;
- else if (!strcmp("max_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_MAX;
- else if (!strcmp("copysign_f32", intr_name)) intrinsic = ONYX_INTRINSIC_F32_COPYSIGN;
-
- else if (!strcmp("abs_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_ABS;
- else if (!strcmp("ceil_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_CEIL;
- else if (!strcmp("floor_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_FLOOR;
- else if (!strcmp("trunc_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_TRUNC;
- else if (!strcmp("nearest_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_NEAREST;
- else if (!strcmp("sqrt_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_SQRT;
- else if (!strcmp("min_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_MIN;
- else if (!strcmp("max_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_MAX;
- else if (!strcmp("copysign_f64", intr_name)) intrinsic = ONYX_INTRINSIC_F64_COPYSIGN;
-
- ((AstIntrinsicCall *)call)->intrinsic = intrinsic;
+
+ if (bh_table_has(OnyxIntrinsic, intrinsic_table, intr_name)) {
+ ((AstIntrinsicCall *)call)->intrinsic = bh_table_get(OnyxIntrinsic, intrinsic_table, intr_name);
+
+ } else {
+ onyx_report_error(callee->token->pos, "Intrinsic not supported, '%s'.", intr_name);
+ token_toggle_end(callee->intrinsic_name);
+ return 1;
+ }
token_toggle_end(callee->intrinsic_name);
}
{ Type_Kind_Basic, 0, { Basic_Kind_I64X2, Basic_Flag_SIMD, 16, 16, "i64x2" } },
{ Type_Kind_Basic, 0, { Basic_Kind_F32X4, Basic_Flag_SIMD, 16, 16, "f32x4" } },
{ Type_Kind_Basic, 0, { Basic_Kind_F64X2, Basic_Flag_SIMD, 16, 16, "f64x2" } },
+ { Type_Kind_Basic, 0, { Basic_Kind_V128, Basic_Flag_SIMD, 16, 16, "v128" } },
};
b32 types_are_surface_compatible(Type* t1, Type* t2) {
else if (store_size == 2) return 1;
else if (store_size == 4) return 2;
else if (store_size == 8) return 3;
+ else if (store_size == 16) return 4;
return 2;
}
|| type->kind == Type_Kind_Struct;
}
+b32 type_is_simd(Type* type) {
+ if (type->kind != Type_Kind_Basic) return 0;
+ return type->Basic.flags & Basic_Flag_SIMD;
+}
+
b32 type_results_in_void(Type* type) {
return (type == NULL)
|| (type->kind == Type_Kind_Basic && type->Basic.kind == Basic_Kind_Void)
i32 is_pointer = is_basic && (type->Basic.flags & Basic_Flag_Pointer);
i32 is_integer = is_basic && ((type->Basic.flags & Basic_Flag_Integer) || (type->Basic.flags & Basic_Flag_Boolean));
i32 is_float = is_basic && (type->Basic.flags & Basic_Flag_Float);
+ i32 is_simd = is_basic && (type->Basic.flags & Basic_Flag_SIMD);
if (is_pointer) {
WID(WI_I32_STORE, ((WasmInstructionData) { alignment, offset }));
} else if (is_float) {
if (store_size == 4) WID(WI_F32_STORE, ((WasmInstructionData) { alignment, offset }));
else if (store_size == 8) WID(WI_F64_STORE, ((WasmInstructionData) { alignment, offset }));
+ } else if (is_simd) {
+ WID(WI_V128_STORE, ((WasmInstructionData) { alignment, offset }));
} else {
onyx_report_error((OnyxFilePos) { 0 },
"Failed to generate store instruction for type '%s'.",
i32 is_integer = is_basic && ((type->Basic.flags & Basic_Flag_Integer) || (type->Basic.flags & Basic_Flag_Boolean));
i32 is_float = is_basic && (type->Basic.flags & Basic_Flag_Float);
i32 is_unsigned = is_basic && (type->Basic.flags & Basic_Flag_Unsigned);
+ i32 is_simd = is_basic && (type->Basic.flags & Basic_Flag_SIMD);
WasmInstructionType instr = WI_NOP;
i32 alignment = type_get_alignment_log2(type);
if (load_size == 4) instr = WI_F32_LOAD;
else if (load_size == 8) instr = WI_F64_LOAD;
}
+ else if (is_simd) {
+ instr = WI_V128_LOAD;
+ }
WID(instr, ((WasmInstructionData) { alignment, offset }));
return;
}
+ if (type_is_simd(to) && !type_is_simd(from)) {
+ onyx_report_error(cast->token->pos, "Can only perform a SIMD cast between SIMD types.");
+ return;
+ }
+
+ if (type_is_simd(from) && !type_is_simd(to)) {
+ onyx_report_error(cast->token->pos, "Can only perform a SIMD cast between SIMD types.");
+ return;
+ }
+
+ if (type_is_simd(from) && type_is_simd(to)) {
+ *pcode = code;
+ return;
+ }
+
if (from->kind == Type_Kind_Enum) from = from->Enum.backing;
if (to->kind == Type_Kind_Enum) to = to->Enum.backing;
i32 leb_len;
u8* leb;
- bh_buffer_write_byte(buff, (u8) instr->type);
+ if (instr->type & SIMD_INSTR_MASK) {
+ bh_buffer_write_byte(buff, 0xFD);
+ leb = uint_to_uleb128((u64) (instr->type &~ SIMD_INSTR_MASK), &leb_len);
+ bh_buffer_append(buff, leb, leb_len);
+
+ } else {
+ bh_buffer_write_byte(buff, (u8) instr->type);
+ }
switch (instr->type) {
case WI_LOCAL_GET:
case WI_CALL_INDIRECT:
- case WI_I32_STORE:
- case WI_I32_STORE_8:
- case WI_I32_STORE_16:
- case WI_I64_STORE:
- case WI_I64_STORE_8:
- case WI_I64_STORE_16:
- case WI_I64_STORE_32:
- case WI_F32_STORE:
- case WI_F64_STORE:
+ case WI_I32_STORE: case WI_I32_STORE_8: case WI_I32_STORE_16:
+ case WI_I64_STORE: case WI_I64_STORE_8: case WI_I64_STORE_16: case WI_I64_STORE_32:
+ case WI_F32_STORE: case WI_F64_STORE:
+ case WI_V128_STORE:
case WI_I32_LOAD:
- case WI_I32_LOAD_8_S:
- case WI_I32_LOAD_8_U:
- case WI_I32_LOAD_16_S:
- case WI_I32_LOAD_16_U:
+ case WI_I32_LOAD_8_S: case WI_I32_LOAD_8_U:
+ case WI_I32_LOAD_16_S: case WI_I32_LOAD_16_U:
case WI_I64_LOAD:
- case WI_I64_LOAD_8_S:
- case WI_I64_LOAD_8_U:
- case WI_I64_LOAD_16_S:
- case WI_I64_LOAD_16_U:
- case WI_I64_LOAD_32_S:
- case WI_I64_LOAD_32_U:
- case WI_F32_LOAD:
- case WI_F64_LOAD:
+ case WI_I64_LOAD_8_S: case WI_I64_LOAD_8_U:
+ case WI_I64_LOAD_16_S: case WI_I64_LOAD_16_U:
+ case WI_I64_LOAD_32_S: case WI_I64_LOAD_32_U:
+ case WI_F32_LOAD: case WI_F64_LOAD:
+ case WI_V128_LOAD:
leb = uint_to_uleb128((u64) instr->data.i1, &leb_len);
bh_buffer_append(buff, leb, leb_len);
leb = uint_to_uleb128((u64) instr->data.i2, &leb_len);