Skip to content

Commit c94e2ea

Browse files
committed
Ref : New implementation with updated APIs
1 parent 9d99314 commit c94e2ea

File tree

3 files changed

+68
-177
lines changed

3 files changed

+68
-177
lines changed

README.md

+18-51
Original file line numberDiff line numberDiff line change
@@ -101,73 +101,40 @@ The `main.c` file demonstrates how to use the library to create a simple neural
101101
```c
102102
#include <stdio.h>
103103
#include <stdlib.h>
104-
#include <time.h>
105-
#include "include/Core/training.h"
104+
#include "../include/Core/training.h"
105+
#include "../include/Core/dataset.h"
106106

107107
int main()
108108
{
109-
srand(time(NULL));
110109
NeuralNetwork *network = create_neural_network(2);
111-
112-
build_network(network, OPTIMIZER_ADAM, 0.01f, LOSS_MSE, 0.0f, 0.0f);
110+
build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
113111
model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
114112
model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
115113
model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
116114

117-
int num_samples = 4;
118-
float **X_train = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
119-
float **y_train = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
120-
121-
for (int i = 0; i < num_samples; i++)
122-
{
123-
X_train[i] = (float *)cm_safe_malloc(2 * sizeof(float), __FILE__, __LINE__);
124-
y_train[i] = (float *)cm_safe_malloc(1 * sizeof(float), __FILE__, __LINE__);
125-
}
115+
float X_data[4][2] = {
116+
{0.0f, 0.0f},
117+
{0.0f, 1.0f},
118+
{1.0f, 0.0f},
119+
{1.0f, 1.0f}};
126120

127-
X_train[0][0] = 0.0f;
128-
X_train[0][1] = 0.0f;
129-
y_train[0][0] = 0.0f;
130-
X_train[1][0] = 0.0f;
131-
X_train[1][1] = 1.0f;
132-
y_train[1][0] = 1.0f;
121+
float y_data[4][1] = {
122+
{0.0f},
123+
{1.0f},
124+
{1.0f},
125+
{1.0f}};
133126

134-
X_train[2][0] = 1.0f;
135-
X_train[2][1] = 0.0f;
136-
y_train[2][0] = 1.0f;
137-
138-
X_train[3][0] = 1.0f;
139-
X_train[3][1] = 1.0f;
140-
y_train[3][0] = 1.0f;
127+
Dataset *dataset = dataset_create();
128+
dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
141129

142130
summary(network);
143-
train_network(network, X_train, y_train, num_samples, 2, 1, 1, 300);
144-
145-
MetricType metrics[] = {METRIC_R2_SCORE};
146131

147-
int num_metrics = sizeof(metrics) / sizeof(metrics[0]);
148-
float results[num_metrics];
149-
150-
test_network(network, X_train, y_train, num_samples, 2, 1, (int *)metrics, num_metrics, results);
151-
printf("R2 Score: %.2f\n", results[0]);
152-
153-
for (int i = 0; i < num_samples; i++)
154-
{
155-
float prediction = 0.0f;
156-
forward_pass(network, X_train[i], &prediction, 2, 1, 0);
157-
printf("Input: [%.0f, %.0f], Expected: %.0f, Predicted: %.4f\n",
158-
X_train[i][0], X_train[i][1], y_train[i][0], prediction);
159-
}
132+
train_network(network, dataset, 30);
133+
test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
160134

135+
dataset_free(dataset);
161136
free_neural_network(network);
162137

163-
for (int i = 0; i < num_samples; i++)
164-
{
165-
cm_safe_free((void **)&X_train[i]);
166-
cm_safe_free((void **)&y_train[i]);
167-
}
168-
cm_safe_free((void **)&X_train);
169-
cm_safe_free((void **)&y_train);
170-
171138
return 0;
172139
}
173140
```

docs/usage.md

+24-60
Original file line numberDiff line numberDiff line change
@@ -4,86 +4,50 @@ This page provides an example of how to use the C-ML library to create and train
44

55
## Neural Network Training Example
66

7-
The following code demonstrates how to:
8-
9-
- Create a neural network.
10-
- Add dense layers with ReLU, Tanh, and Sigmoid activations.
11-
- Train the network using the Adam optimizer and Mean Squared Error loss.
12-
- Evaluate the network using the R2 score metric.
13-
- Make predictions with the trained network.
14-
157
```c
168
#include <stdio.h>
179
#include <stdlib.h>
18-
#include <time.h>
19-
#include "include/Core/training.h"
10+
#include "../include/Core/training.h"
11+
#include "../include/Core/dataset.h"
2012

2113
int main()
2214
{
23-
srand(time(NULL));
2415
NeuralNetwork *network = create_neural_network(2);
25-
26-
build_network(network, OPTIMIZER_ADAM, 0.01f, LOSS_MSE, 0.0f, 0.0f);
16+
build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
2717
model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
2818
model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
2919
model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
3020

31-
int num_samples = 4;
32-
float **X_train = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
33-
float **y_train = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
34-
35-
for (int i = 0; i < num_samples; i++)
36-
{
37-
X_train[i] = (float *)cm_safe_malloc(2 * sizeof(float), __FILE__, __LINE__);
38-
y_train[i] = (float *)cm_safe_malloc(1 * sizeof(float), __FILE__, __LINE__);
39-
}
40-
41-
X_train[0][0] = 0.0f;
42-
X_train[0][1] = 0.0f;
43-
y_train[0][0] = 0.0f;
44-
X_train[1][0] = 0.0f;
45-
X_train[1][1] = 1.0f;
46-
y_train[1][0] = 1.0f;
21+
float X_data[4][2] = {
22+
{0.0f, 0.0f},
23+
{0.0f, 1.0f},
24+
{1.0f, 0.0f},
25+
{1.0f, 1.0f}};
4726

48-
X_train[2][0] = 1.0f;
49-
X_train[2][1] = 0.0f;
50-
y_train[2][0] = 1.0f;
27+
float y_data[4][1] = {
28+
{0.0f},
29+
{1.0f},
30+
{1.0f},
31+
{1.0f}};
5132

52-
X_train[3][0] = 1.0f;
53-
X_train[3][1] = 1.0f;
54-
y_train[3][0] = 1.0f;
33+
Dataset *dataset = dataset_create();
34+
dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
5535

5636
summary(network);
57-
train_network(network, X_train, y_train, num_samples, 2, 1, 1, 300);
5837

59-
MetricType metrics[] = {METRIC_R2_SCORE};
60-
61-
int num_metrics = sizeof(metrics) / sizeof(metrics[0]);
62-
float results[num_metrics];
63-
64-
test_network(network, X_train, y_train, num_samples, 2, 1, (int *)metrics, num_metrics, results);
65-
printf("R2 Score: %.2f\n", results[0]);
66-
67-
for (int i = 0; i < num_samples; i++)
68-
{
69-
float prediction = 0.0f;
70-
forward_pass(network, X_train[i], &prediction, 2, 1, 0);
71-
printf("Input: [%.0f, %.0f], Expected: %.0f, Predicted: %.4f\n",
72-
X_train[i][0], X_train[i][1], y_train[i][0], prediction);
73-
}
38+
train_network(network, dataset, 30);
39+
test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
7440

41+
dataset_free(dataset);
7542
free_neural_network(network);
7643

77-
for (int i = 0; i < num_samples; i++)
78-
{
79-
cm_safe_free((void **)&X_train[i]);
80-
cm_safe_free((void **)&y_train[i]);
81-
}
82-
cm_safe_free((void **)&X_train);
83-
cm_safe_free((void **)&y_train);
84-
8544
return 0;
8645
}
8746
```
8847

89-
This example provides a basic framework for building and training neural networks using the C-ML library. You can modify and extend this example to create more complex models and solve a variety of machine learning problems.
48+
This example demonstrates how to:
49+
- Create a neural network with three dense layers
50+
- Use different activation functions (ReLU, Tanh, Sigmoid)
51+
- Create and load a dataset
52+
- Train the network using the Adam optimizer
53+
- Test the network's performance

main.c

+26-66
Original file line numberDiff line numberDiff line change
@@ -1,78 +1,38 @@
1+
#include <stdio.h>
12
#include <stdlib.h>
2-
#include <math.h>
3-
#include "include/Layers/dense.h"
4-
#include "include/Layers/flatten.h"
5-
#include "include/Activations/relu.h"
6-
#include "include/Loss_Functions/mean_squared_error.h"
7-
#include "include/Core/error_codes.h"
3+
#include "include/Core/training.h"
4+
#include "include/Core/dataset.h"
85

96
int main()
107
{
11-
float input[] = {1.0, 2.0, 3.0};
12-
int input_size = 3;
8+
NeuralNetwork *network = create_neural_network(2);
9+
build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
10+
model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
11+
model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
12+
model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
1313

14-
float target[] = {0.0, 1.0};
15-
int output_size = 2;
14+
float X_data[4][2] = {
15+
{0.0f, 0.0f},
16+
{0.0f, 1.0f},
17+
{1.0f, 0.0f},
18+
{1.0f, 1.0f}};
1619

17-
FlattenLayer flatten_layer = {0, 0};
18-
if (initialize_flatten(&flatten_layer, input_size) != CM_SUCCESS)
19-
{
20-
fprintf(stderr, "Failed to initialize Flatten Layer\n");
21-
return CM_LAYER_NOT_INITIALIZED_ERROR;
22-
}
20+
float y_data[4][1] = {
21+
{0.0f},
22+
{1.0f},
23+
{1.0f},
24+
{1.0f}};
2325

24-
float flattened_output[3];
25-
if (forward_flatten(&flatten_layer, input, flattened_output) != CM_SUCCESS)
26-
{
27-
fprintf(stderr, "Failed to perform forward pass for Flatten Layer\n");
28-
return CM_INVALID_LAYER_DIMENSIONS_ERROR;
29-
}
26+
Dataset *dataset = dataset_create();
27+
dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
3028

31-
DenseLayer dense_layer = {NULL, NULL, 0, 0};
32-
if (initialize_dense(&dense_layer, input_size, output_size) != CM_SUCCESS)
33-
{
34-
fprintf(stderr, "Failed to initialize Dense Layer\n");
35-
return CM_LAYER_NOT_INITIALIZED_ERROR;
36-
}
29+
summary(network);
3730

38-
float dense_output[2];
39-
if (forward_dense(&dense_layer, flattened_output, dense_output) != CM_SUCCESS)
40-
{
41-
fprintf(stderr, "Failed to perform forward pass for Dense Layer\n");
42-
return CM_INVALID_LAYER_DIMENSIONS_ERROR;
43-
}
31+
train_network(network, dataset, 30);
32+
test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
4433

45-
for (int i = 0; i < output_size; i++)
46-
{
47-
dense_output[i] = relu(dense_output[i]);
48-
}
34+
dataset_free(dataset);
35+
free_neural_network(network);
4936

50-
float loss = mean_squared_error(target, dense_output, output_size);
51-
if (loss == CM_INVALID_INPUT_ERROR)
52-
{
53-
fprintf(stderr, "Failed to compute Mean Squared Error\n");
54-
return CM_INVALID_INPUT_ERROR;
55-
}
56-
57-
float d_output[2] = {dense_output[0] - target[0], dense_output[1] - target[1]};
58-
float d_input[3] = {0};
59-
float d_weights[6] = {0};
60-
float d_biases[2] = {0};
61-
if (backward_dense(&dense_layer, flattened_output, dense_output, d_output, d_input, d_weights, d_biases) != CM_SUCCESS)
62-
{
63-
fprintf(stderr, "Failed to perform backward pass for Dense Layer\n");
64-
return CM_INVALID_LAYER_DIMENSIONS_ERROR;
65-
}
66-
67-
float learning_rate = 0.01;
68-
if (update_dense(&dense_layer, d_weights, d_biases, learning_rate) != CM_SUCCESS)
69-
{
70-
fprintf(stderr, "Failed to update Dense Layer\n");
71-
return CM_INVALID_LAYER_DIMENSIONS_ERROR;
72-
}
73-
74-
free_dense(&dense_layer);
75-
free_flatten(&flatten_layer);
76-
printf("Program completed successfully.\n");
77-
return CM_SUCCESS;
37+
return 0;
7838
}

0 commit comments

Comments
 (0)