61 lines
1.5 KiB
Zig
61 lines
1.5 KiB
Zig
const std = @import("std");
|
|
const Tensor = @import("tensor.zig").Tensor;
|
|
|
|
pub const Neuron = struct {
|
|
weights: Tensor,
|
|
bias: f32,
|
|
|
|
pub fn init(allocator: std.mem.Allocator, input_size: usize) !Neuron {
|
|
const weights = try Tensor.init(allocator, &[_]usize{input_size});
|
|
|
|
var seed: u64 = undefined;
|
|
try std.posix.getrandom(std.mem.asBytes(&seed));
|
|
|
|
var prng = std.Random.DefaultPrng.init(seed);
|
|
const rand = prng.random();
|
|
|
|
for (weights.data) |*w| {
|
|
w.* = rand.float(f32) * 2.0 - 1.0;
|
|
}
|
|
|
|
return Neuron{
|
|
.weights = weights,
|
|
.bias = 0.0,
|
|
};
|
|
}
|
|
|
|
pub fn deinit(self: *Neuron) void {
|
|
self.weights.deinit();
|
|
}
|
|
|
|
pub fn forward(self: *Neuron, inputs: Tensor) f32 {
|
|
std.debug.assert(inputs.data.len == self.weights.data.len);
|
|
|
|
var sum: f32 = 0.0;
|
|
for (inputs.data, self.weights.data) |x, w| {
|
|
sum += x * w;
|
|
}
|
|
|
|
const z = sum + self.bias;
|
|
|
|
return if (z > 0) z else 0;
|
|
}
|
|
|
|
pub fn train(self: *Neuron, inputs: Tensor, target: f32, learning_rate: f32) f32 {
|
|
const prediction = self.forward(inputs);
|
|
|
|
const err = prediction - target;
|
|
|
|
const d_relu: f32 = if (prediction > 0) 1.0 else 0.0;
|
|
|
|
for (self.weights.data, inputs.data) |*w, x| {
|
|
const gradient = err * d_relu * x;
|
|
w.* -= learning_rate * gradient;
|
|
}
|
|
|
|
self.bias -= learning_rate * err * d_relu;
|
|
|
|
return err * err;
|
|
}
|
|
};
|