// Load the data
// Feed forward neural net
+load_example :: (fs: ^io.FileStream, example: u32, out: [784] u8) {
+ location := 16 + example * 784;
+ _, bytes_read := io.stream_read_at(fs, location, ~~ out);
+
+ assert(bytes_read == 784, "Incorrect number of bytes read.");
+}
+
main :: (args: [] cstr) {
// Enables a logging allocator to print every allocation
// main_allocator := context.allocator;
// context.allocator = alloc.log.logging_allocator(^main_allocator);
+ random.set_seed(1234);
+
+ err, training_example := io.open("data/train-images-idx3-ubyte");
+ if err != io.Error.None {
+ println("There was an error loading the file.");
+ return;
+ }
+ defer io.stream_close(^training_example);
+
+ example : [784] u8;
+ load_example(^training_example, 0, example);
+
nn := make_neural_net(28 * 28, 1000, 10);
+ defer neural_net_free(^nn);
- neural_net_forward(^nn, ~~ input);
+ input := memory.make_slice(f32, 784);
+ defer cfree(input.data);
+
+ // CLEANUP: The double cast that is necessary here is gross.
+ for i: input.count do input[i] = (cast(f32) cast(u32) example[i]) / 255;
+ neural_net_forward(^nn, ~~ input);
output := neural_net_get_output(^nn);
for o: output do println(o);
+
+ expected := f32.[ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ];
+ loss := neural_net_loss(^nn, ~~ expected);
+ printf("MSE loss: %f\n", loss);
}
\ No newline at end of file
make_neural_net :: (layer_sizes: ..i32) -> NeuralNet {
net : NeuralNet;
- net.layer_arena = alloc.arena.make(alloc.heap_allocator, 64 * 1024 * 1024); // 64 MiB
+ net.layer_arena = alloc.arena.make(context.allocator, 64 * 1024 * 1024); // 64 MiB
layer_allocator := alloc.arena.make_allocator(^net.layer_arena);
- net.layers = memory.make_slice(Layer, layer_sizes.count);
+ net.layers = memory.make_slice(Layer, layer_sizes.count, allocator = layer_allocator);
init_layer(^net.layers[0], layer_sizes[0], 0, allocator = layer_allocator);
for i: 1 .. net.layers.count {
return net;
}
+neural_net_free :: (use nn: ^NeuralNet) {
+ alloc.arena.free(^layer_arena);
+}
+
neural_net_forward :: (use nn: ^NeuralNet, input: [] float) {
assert(input.count == layers[0].neurons.count, "Input does not have the same size as the first layer.");
}
}
+neural_net_backward :: (use nn: ^NeuralNet, expected_output: [] float) {
+
+}
+
neural_net_get_output :: (use nn: ^NeuralNet) -> [] float {
return layers[layers.count - 1].neurons;
}
+neural_net_loss :: (use nn: ^NeuralNet, expected_output: [] float) -> float {
+ // MSE loss
+ assert(layers[layers.count - 1].neurons.count == expected_output.count,
+ "Expected output does not have the same size as the last layer.");
+
+ output := layers[layers.count - 1].neurons;
+
+ squared_sum: float = 0;
+ for i: expected_output.count {
+ diff := output[i] - expected_output[i];
+ squared_sum += diff * diff;
+ }
+
+ loss := math.sqrt(squared_sum);
+ return loss;
+}
+
Layer :: struct {
neurons : [] float;
- weights : [][] float;
+ weights : [][] float; // CLEANUP: Make this a rank 1 slice
}
init_layer :: (use layer: ^Layer, layer_size: u32, prev_layer_size: u32, allocator := context.allocator) {
randomize_weights :: (use layer: ^Layer) {
for ^weight: weights {
for ^w: *weight {
- *w = random.float(-2.0f, 2.0f);
+ *w = random.float(-1.0f, 1.0f);
}
}
}