Test porta logica XOR con network di neuroni

This commit is contained in:
Riccardo Forese 2026-02-03 10:14:52 +01:00
parent 33941367e4
commit 98f557370f
2 changed files with 100 additions and 47 deletions

View file

@ -1,69 +1,39 @@
const std = @import("std"); const std = @import("std");
const Neuron = @import("neuron.zig").Neuron; const SimpleNetwork = @import("network.zig").SimpleNetwork;
const Tensor = @import("tensor.zig").Tensor;
pub fn main() !void { pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var net = SimpleNetwork.init(1234);
const allocator = gpa.allocator();
defer _ = gpa.deinit();
// 1. Inizializziamo il neurone (2 input perché la porta AND ha 2 ingressi) const inputs = [_][2]f32{
var my_neuron = try Neuron.init(allocator, 2);
defer my_neuron.deinit();
// 2. Prepariamo il Dataset (AND Gate)
// Creiamo un tensore riutilizzabile per gli input
var input_tensor = try Tensor.init(allocator, &[_]usize{2});
defer input_tensor.deinit();
// I 4 casi possibili (Training Data)
const training_data = [_][2]f32{
.{ 0.0, 0.0 }, .{ 0.0, 0.0 },
.{ 0.0, 1.0 }, .{ 0.0, 1.0 },
.{ 1.0, 0.0 }, .{ 1.0, 0.0 },
.{ 1.0, 1.0 }, .{ 1.0, 1.0 },
}; };
// Le 4 risposte corrette (Labels) const targets = [_]f32{ 0.0, 1.0, 1.0, 0.0 };
const targets = [_]f32{ 0.0, 0.0, 0.0, 1.0 };
const lr: f32 = 0.1; // Learning rate un po' più aggressivo const lr: f32 = 0.5;
std.debug.print("--- INIZIO TRAINING (AND GATE) ---\n", .{}); std.debug.print("--- TRAINING XOR (2 LAYERS) ---\n", .{});
// 3. Ciclo di Training
var epoch: usize = 0; var epoch: usize = 0;
while (epoch < 2000) : (epoch += 1) { // 2000 Epoche while (epoch < 10000) : (epoch += 1) {
var total_error: f32 = 0.0; var total_loss: f32 = 0.0;
// Per ogni epoca, passiamo attraverso TUTTI gli esempi for (inputs, 0..) |inp, i| {
for (training_data, 0..) |data, index| { total_loss += net.train(inp, targets[i], lr);
// Carichiamo i dati nel tensore
input_tensor.data[0] = data[0];
input_tensor.data[1] = data[1];
// Train su questo specifico esempio
const loss = my_neuron.train(input_tensor, targets[index], lr);
total_error += loss;
} }
// Stampiamo ogni 200 epoche if (epoch % 1000 == 0) {
if (epoch % 200 == 0) { std.debug.print("Epoca {d}: Loss = {d:.6}\n", .{ epoch, total_loss / 4.0 });
std.debug.print("Epoca {d}: Errore Medio = {d:.6}\n", .{ epoch, total_error / 4.0 });
} }
} }
std.debug.print("\n--- TEST FINALE ---\n", .{}); std.debug.print("\n--- TEST XOR ---\n", .{});
for (inputs) |inp| {
// Verifichiamo cosa ha imparato const out = net.forward(inp);
for (training_data) |data| { const bit: u8 = if (out > 0.5) 1 else 0;
input_tensor.data[0] = data[0]; std.debug.print("In: {d:.0},{d:.0} -> Out: {d:.4} -> {d}\n", .{ inp[0], inp[1], out, bit });
input_tensor.data[1] = data[1];
const prediction = my_neuron.forward(input_tensor);
// Arrotondiamo visivamente per capire se è 0 o 1
const result_bool: u8 = if (prediction > 0.5) 1 else 0;
std.debug.print("Input: {d:.1}, {d:.1} -> Predizione: {d:.4} (Interpretato: {d})\n", .{ data[0], data[1], prediction, result_bool });
} }
} }

83
src/network.zig Normal file
View file

@ -0,0 +1,83 @@
const std = @import("std");
pub const SimpleNetwork = struct {
w_hidden: [2][2]f32,
b_hidden: [2]f32,
w_output: [2]f32,
b_output: f32,
hidden_outputs: [2]f32,
pub fn init(seed: u64) SimpleNetwork {
var prng = std.Random.DefaultPrng.init(seed);
const rand = prng.random();
var net = SimpleNetwork{
.w_hidden = undefined,
.b_hidden = undefined,
.w_output = undefined,
.b_output = 0.0,
.hidden_outputs = undefined,
};
for (&net.w_hidden) |*row| {
row[0] = rand.float(f32) * 2.0 - 1.0;
row[1] = rand.float(f32) * 2.0 - 1.0;
}
for (&net.b_hidden) |*b| b.* = 0.0;
for (&net.w_output) |*w| w.* = rand.float(f32) * 2.0 - 1.0;
return net;
}
fn sigmoid(x: f32) f32 {
return 1.0 / (1.0 + std.math.exp(-x));
}
fn sigmoid_derivative(x: f32) f32 {
return x * (1.0 - x);
}
pub fn forward(self: *SimpleNetwork, input: [2]f32) f32 {
for (0..2) |i| {
const sum = (input[0] * self.w_hidden[i][0]) +
(input[1] * self.w_hidden[i][1]) +
self.b_hidden[i];
self.hidden_outputs[i] = sigmoid(sum);
}
const sum_out = (self.hidden_outputs[0] * self.w_output[0]) +
(self.hidden_outputs[1] * self.w_output[1]) +
self.b_output;
return sigmoid(sum_out);
}
pub fn train(self: *SimpleNetwork, input: [2]f32, target: f32, lr: f32) f32 {
const prediction = self.forward(input);
const output_error = target - prediction;
const output_delta = output_error * sigmoid_derivative(prediction);
var hidden_deltas: [2]f32 = undefined;
for (0..2) |i| {
const error_contrib = output_delta * self.w_output[i];
hidden_deltas[i] = error_contrib * sigmoid_derivative(self.hidden_outputs[i]);
}
for (0..2) |i| {
self.w_output[i] += lr * output_delta * self.hidden_outputs[i];
}
self.b_output += lr * output_delta;
for (0..2) |i| {
self.w_hidden[i][0] += lr * hidden_deltas[i] * input[0];
self.w_hidden[i][1] += lr * hidden_deltas[i] * input[1];
self.b_hidden[i] += lr * hidden_deltas[i];
}
return output_error * output_error;
}
};