for i: input.count do input[i] = (cast(float) cast(u32) example[i]) / 255;
neural_net_forward(nn, ~~ input);
+ neural_net_backward(nn, ~~ expected);
+
if ex % 100 == 0 {
- print_array(expected);
+ print_colored_array :: (arr: [] $T) {
+ greatest_idx := 0;
+ for i: arr.count do if arr[i] > arr[greatest_idx] do greatest_idx = i;
+
+ for i: arr.count {
+ if i == greatest_idx {
+ printf("\x1b[94m%f\x1b[0m ", arr[i]);
+ } else {
+ printf("%f ", arr[i]);
+ }
+ }
+ print("\n");
+ }
+
+ print_colored_array(cast([] f32) expected);
output := neural_net_get_output(nn);
- print_array(output);
+ print_colored_array(output);
loss := neural_net_loss(nn, ~~ expected);
printf("MSE loss: %f\n", cast(f32) loss);
}
-
- neural_net_backward(nn, ~~ expected);
}
}
deltas = memory.make_slice(float, layer_size, allocator);
- activation = tanh_activation;
+ activation = sigmoid_activation;
if prev_layer_size > 0 {
weights = memory.make_slice(#type [] float, layer_size, allocator);
randomize_weights_and_biases :: (use layer: ^Layer) {
for ^weight: weights {
for ^w: *weight {
- *w = cast(float) random.float(-1.0f, 1.0f);
+ *w = cast(float) random.float(-0.5f, 0.5f);
}
}
ActivationFunction :: struct {
- forward : proc (x : float) -> float;
- backward : proc (fx: float, x: float) -> float;
+ forward : (x : float) -> float;
+ backward : (fx: float, x: float) -> float;
}