From: Brendan Hansen Date: Wed, 27 Jan 2021 16:16:15 +0000 (-0600) Subject: CIFAR-10 is working; needs convolutions to be better X-Git-Url: https://git.brendanfh.com/?a=commitdiff_plain;h=f7ae2999e487faa7675f9ba28634aad39f15eee1;p=onyx-mnist.git CIFAR-10 is working; needs convolutions to be better --- diff --git a/src/cifar10.onyx b/src/cifar10.onyx index edfbc32..9b657ea 100644 --- a/src/cifar10.onyx +++ b/src/cifar10.onyx @@ -5,6 +5,8 @@ use package core +// NOTE(Brendan Hansen): Currently, this supports only loading one of the dataset files, +// even through there are 6 of them. CIFAR10_DataLoader :: struct { use data : DataLoader; @@ -42,7 +44,14 @@ cifar10_dataloader_functions := DataLoader_Functions.{ _, bytes_read := io.stream_read_at(^data_file, location, ~~ sample); label := ~~sample[0]; - // TODO(Brendan Hansen): NOT DONE + for ^o: output do *o = 0; + output[cast(u32) label] = 1; + + for i: 3072 { + input[i] = (cast(f32) cast(u32) sample[i + 1]) / 255; + } + + return true; } }; @@ -96,22 +105,33 @@ stocastic_gradient_descent :: (nn: ^NeuralNet, dataloader: ^DataLoader, criterio printf("Loss: %f Correct: %i / 100\n", cast(f32) loss, past_100_correct); past_100_correct = 0; - - /* - if ex % 10000 == 0 { + + if ex % 1000 == 0 { println("Saving neural network..."); - neural_net_save(nn, "data/test_4.nn"); + neural_net_save(nn, output_file); } - */ } } } } +// :Cleanup +output_file := "data/tmp.nn" main :: (args: [] cstr) { - println("Hello World!"); + if args.count > 1 { + output_file = string.make(args[0]); + } + + printf("Network save location: %s\n", output_file); + + random.set_seed(5432); cifar10_dataloader := cifar10_create(); defer cifar10_close(^cifar10_dataloader); + + nn := make_neural_net(3072, 1024, 256, 10); + defer neural_net_free(^nn); + + stocastic_gradient_descent(^nn, ^cifar10_dataloader); } \ No newline at end of file diff --git a/src/neuralnet.onyx b/src/neuralnet.onyx index e91535a..093b0f8 100644 --- a/src/neuralnet.onyx +++ b/src/neuralnet.onyx @@ -141,8 +141,6 @@ layer_init :: (use layer: ^Layer, layer_size: u32, prev_layer_size: u32, allocat neurons = memory.make_slice(f32, layer_size, allocator); pre_activation_neurons = memory.make_slice(f32, layer_size, allocator); - weights = memory.make_slice(#type [] f32, layer_size, allocator); - use_bias = true; deltas = memory.make_slice(f32, layer_size, allocator); activation = sigmoid_activation; @@ -153,7 +151,8 @@ layer_init :: (use layer: ^Layer, layer_size: u32, prev_layer_size: u32, allocat if use_bias { biases = memory.make_slice(f32, layer_size, allocator); } - + + weights = memory.make_slice(#type [] f32, layer_size, allocator); for ^weight: weights { *weight = memory.make_slice(f32, prev_layer_size, allocator); }