solidc
Robust collection of general-purpose cross-platform C libraries and data structures designed for rapid and safe development in C
Loading...
Searching...
No Matches
ml.c
1#include "../include/ml.h"
2#include <math.h>
3#include <stdio.h>
4#include <stdlib.h>
5
6/* --- Helper Functions --- */
7
8static float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
9
10static float sigmoid_derivative(float sig) { return sig * (1.0f - sig); }
11
12static Vec4 vec4_sigmoid(Vec4 v) { return (Vec4){sigmoid(v.x), sigmoid(v.y), sigmoid(v.z), sigmoid(v.w)}; }
13
14static Vec4 vec4_sigmoid_derivative(Vec4 v) {
15 return (Vec4){sigmoid_derivative(v.x), sigmoid_derivative(v.y), sigmoid_derivative(v.z), sigmoid_derivative(v.w)};
16}
17
18static float random_float() {
19 return ((float)rand() / (float)RAND_MAX) * 2.0f - 1.0f; // -1 to 1
20}
21
22static Mat4 random_mat4() {
23 Mat4 m;
24 for (int i = 0; i < 4; i++) {
25 for (int j = 0; j < 4; j++) {
26 m.m[i][j] = random_float();
27 }
28 }
29 return m;
30}
31
32static Vec4 random_vec4() { return (Vec4){random_float(), random_float(), random_float(), random_float()}; }
33
34/* --- ML Implementation --- */
35
36void ml_init(ML_Network* net, int num_layers, float learning_rate) {
37 if (num_layers > ML_MAX_LAYERS) num_layers = ML_MAX_LAYERS;
38 net->layer_count = num_layers;
39 net->learning_rate = learning_rate;
40
41 for (int i = 0; i < num_layers; i++) {
42 net->layers[i].weights = random_mat4();
43 net->layers[i].bias = random_vec4();
44 }
45}
46
48 Vec4 current_input = input;
49
50 for (int i = 0; i < net->layer_count; i++) {
51 ML_Layer* layer = &net->layers[i];
52
53 layer->input = current_input;
54
55 // Linear: z = W * x + b
56 Vec4 wx = mat4_mul_vec4(layer->weights, current_input);
57
58 // Add bias (component-wise)
59 // Need to load to SIMD for addition
60 SimdVec4 s_wx = vec4_load(wx);
61 SimdVec4 s_bias = vec4_load(layer->bias);
62 SimdVec4 s_z = vec4_add(s_wx, s_bias);
63
64 layer->z = vec4_store(s_z);
65
66 // Activation
67 layer->output = vec4_sigmoid(layer->z);
68 current_input = layer->output;
69 }
70
71 return current_input;
72}
73
74float ml_train_step(ML_Network* net, Vec4 input, Vec4 target) {
75 // 1. Forward Pass
76 Vec4 prediction = ml_forward(net, input);
77
78 // 2. Compute Loss (MSE) and Output Gradient
79 // Loss = 0.5 * (target - output)^2
80 // dLoss/dOutput = (output - target)
81 SimdVec4 s_target = vec4_load(target);
82 SimdVec4 s_output = vec4_load(prediction);
83 SimdVec4 s_error = vec4_sub(s_output, s_target); // Gradient of Loss w.r.t Output
84
85 float mse = vec4_length_sq(s_error) * 0.5f;
86
87 // 3. Backpropagation
88 // We propagate 'delta' backwards.
89 // Delta = dLoss/dZ = dLoss/dOutput * dOutput/dZ
90 // dOutput/dZ = sigmoid'(output)
91
92 Vec4 current_delta_vec = vec4_store(s_error); // dL/dY
93
94 for (int i = net->layer_count - 1; i >= 0; i--) {
95 ML_Layer* layer = &net->layers[i];
96
97 // Calculate Activation Derivative
98 Vec4 d_act = vec4_sigmoid_derivative(layer->output);
99 SimdVec4 s_d_act = vec4_load(d_act);
100 SimdVec4 s_curr_delta = vec4_load(current_delta_vec);
101
102 // Element-wise multiply: delta = error * sigmoid'
103 SimdVec4 s_delta = vec4_scale(s_curr_delta, s_d_act);
104 Vec4 delta = vec4_store(s_delta);
105
106 // Gradients
107 // dW = delta * input^T
108 // db = delta
109
110 // Update Weights: W = W - lr * dW
111 // dW[r][c] = delta[r] * input[c]
112 // This is an outer product: delta (col) * input (row)
113
114 // We need to construct dW matrix.
115 Mat4 dW;
116 // Col 0 = input.x * delta
117 // Col 1 = input.y * delta
118 // ...
119 // Wait. dW_ij = delta_i * input_j.
120 // In column-major M[col][row], this is M[j][i] = delta[i] * input[j].
121 // So Col j contains (delta_0 * input_j, delta_1 * input_j, ...) = delta * input_j.
122
123 SimdVec4 s_delta_simd = vec4_load(delta);
124
125 dW.cols[0] = vec4_mul(s_delta_simd, layer->input.x).v;
126 dW.cols[1] = vec4_mul(s_delta_simd, layer->input.y).v;
127 dW.cols[2] = vec4_mul(s_delta_simd, layer->input.z).v;
128 dW.cols[3] = vec4_mul(s_delta_simd, layer->input.w).v;
129
130 // Store Gradient (optional if we want batching, but we do SGD here)
131 layer->d_weights = dW;
132 layer->d_bias = delta;
133
134 // Propagate error to previous layer
135 // PrevError = W^T * Delta
136 if (i > 0) {
137 Mat4 W_T = mat4_transpose(layer->weights);
138 // mat4_mul_vec4 computes M * v.
139 // We need W^T * delta.
140 current_delta_vec = mat4_mul_vec4(W_T, delta);
141 }
142
143 // Apply Updates
144 // W = W - lr * dW
145 Mat4 step_W = mat4_scalar_mul(dW, net->learning_rate);
146 layer->weights = mat4_sub(layer->weights, step_W);
147
148 // b = b - lr * db
149 SimdVec4 s_db = vec4_load(delta);
150 SimdVec4 s_step_b = vec4_mul(s_db, net->learning_rate);
151 SimdVec4 s_new_b = vec4_sub(vec4_load(layer->bias), s_step_b);
152 layer->bias = vec4_store(s_new_b);
153 }
154
155 return mse;
156}
Vec4 ml_forward(ML_Network *net, Vec4 input)
Perform forward propagation.
Definition ml.c:47
float ml_train_step(ML_Network *net, Vec4 input, Vec4 target)
Train the network on a single sample.
Definition ml.c:74
void ml_init(ML_Network *net, int num_layers, float learning_rate)
Initialize a new network.
Definition ml.c:36
Represents a single fully connected layer 4 inputs -> 4 outputs.
Definition ml.h:30
A neural network composed of multiple layers.
Definition ml.h:48
A 4x4 matrix for 3D transformations.
4D vector storage type (16 bytes, naturally aligned).
Definition vec.h:75
float y
Y component.
Definition vec.h:77
float w
W component (also used for homogeneous coordinates)
Definition vec.h:79
float x
X component.
Definition vec.h:76
float z
Z component.
Definition vec.h:78