|
| 1 | +// bindings.cpp |
| 2 | +#include <pybind11/pybind11.h> |
| 3 | +#include <pybind11/stl.h> |
| 4 | +#include <pybind11/numpy.h> |
| 5 | + |
| 6 | +#include "include/modules.h" |
| 7 | +#include "include/activations.h" |
| 8 | +#include "include/functionals.h" |
| 9 | +#include "include/cross_entropy.h" |
| 10 | +#include "include/dataloader.h" |
| 11 | + |
| 12 | +namespace py = pybind11; |
| 13 | + |
| 14 | +PYBIND11_MODULE(libmnist, m) { |
| 15 | + // Bind the ReLu class |
| 16 | + py::class_<ReLu>(m, "ReLu") |
| 17 | + .def(py::init<>()) |
| 18 | + .def("forward", [](ReLu& self, const std::vector<float>& input) { |
| 19 | + auto output = self.forward(input); |
| 20 | + // Return as NumPy array |
| 21 | + return py::array_t<float>(output.size(), output.data()); |
| 22 | + }, "Apply ReLu activation") |
| 23 | + .def("backward", [](ReLu& self, const std::vector<float>& grad_output) { |
| 24 | + auto grad_input = self.backward(grad_output); |
| 25 | + return py::array_t<float>(grad_input.size(), grad_input.data()); |
| 26 | + }, "Compute the backward pass of ReLu") |
| 27 | + .def("update", &ReLu::update, py::arg("lr"), "Update the parameters of ReLu"); |
| 28 | + |
| 29 | + // Bind the LinearLayer class |
| 30 | + py::class_<LinearLayer>(m, "LinearLayer") |
| 31 | + .def(py::init<int, int>()) |
| 32 | + .def("forward", [](LinearLayer& self, const std::vector<float>& input) { |
| 33 | + auto output = self.forward(input); |
| 34 | + // Return as NumPy array |
| 35 | + return py::array_t<float>(output.size(), output.data()); |
| 36 | + }, "Perform forward pass with Linear Layer") |
| 37 | + .def("backward", [](LinearLayer& self, const std::vector<float>& grad_output) { |
| 38 | + auto grad_input = self.backward(grad_output); |
| 39 | + return py::array_t<float>(grad_input.size(), grad_input.data()); |
| 40 | + }, "Compute the backward pass of Linear Layer") |
| 41 | + .def("update", &LinearLayer::update, py::arg("lr"), "Update the parameters of LinearLayer") |
| 42 | + .def_readwrite("weights", &LinearLayer::weights) |
| 43 | + .def_readwrite("bias", &LinearLayer::bias) |
| 44 | + .def_readwrite("grad_weights", &LinearLayer::grad_weights) |
| 45 | + .def_readwrite("grad_bias", &LinearLayer::grad_bias); |
| 46 | + |
| 47 | + |
| 48 | + // Bind the SoftmaxndCrossEntropy class |
| 49 | + py::class_<SoftmaxndCrossEntropy>(m, "SoftmaxndCrossEntropy") |
| 50 | + .def(py::init<int>()) |
| 51 | + .def("forward", [](SoftmaxndCrossEntropy& self, const std::vector<float>& input, int class_label) { |
| 52 | + return self.forward(input, class_label); |
| 53 | + }, "Compute the forward pass of Softmax and Cross Entropy") |
| 54 | + .def("backward", [](SoftmaxndCrossEntropy& self) { |
| 55 | + auto grad = self.backward(); |
| 56 | + return py::array_t<float>(grad.size(), grad.data()); |
| 57 | + }, "Compute the backward pass of Softmax and Cross Entropy"); |
| 58 | + // Bind the DataLoader class |
| 59 | + py::class_<DataLoader>(m, "DataLoader") |
| 60 | + .def_static("load_images", [](const std::string& filepath) { |
| 61 | + auto images = DataLoader::load_images(filepath); |
| 62 | + py::ssize_t num_images = static_cast<py::ssize_t>(images.size()); |
| 63 | + if (num_images == 0) { |
| 64 | + throw std::runtime_error("No images loaded"); |
| 65 | + } |
| 66 | + py::ssize_t image_size = static_cast<py::ssize_t>(images[0].size()); |
| 67 | + |
| 68 | + // Create a NumPy array of shape (num_images, image_size) |
| 69 | + py::array_t<float> result({num_images, image_size}); |
| 70 | + |
| 71 | + auto buf = result.mutable_unchecked<2>(); |
| 72 | + |
| 73 | + for (py::ssize_t i = 0; i < num_images; ++i) { |
| 74 | + if (static_cast<py::ssize_t>(images[i].size()) != image_size) { |
| 75 | + throw std::runtime_error("Inconsistent image sizes"); |
| 76 | + } |
| 77 | + for (py::ssize_t j = 0; j < image_size; ++j) { |
| 78 | + buf(i, j) = images[i][j]; |
| 79 | + } |
| 80 | + } |
| 81 | + return result; |
| 82 | + }, "Load images from file") |
| 83 | + .def_static("load_labels", [](const std::string& filepath) { |
| 84 | + auto labels = DataLoader::load_labels(filepath); |
| 85 | + py::ssize_t num_labels = static_cast<py::ssize_t>(labels.size()); |
| 86 | + |
| 87 | + py::array_t<int> result({num_labels}); |
| 88 | + auto buf = result.mutable_unchecked<1>(); |
| 89 | + for (py::ssize_t i = 0; i < num_labels; ++i) { |
| 90 | + buf(i) = labels[i]; |
| 91 | + } |
| 92 | + return result; |
| 93 | + }, "Load labels from file"); |
| 94 | + |
| 95 | + // Bind the functionals submodule |
| 96 | + py::module_ functionals = m.def_submodule("functionals", "Submodule for functional operations"); |
| 97 | + functionals.def("softmax", [](const std::vector<float>& input) { |
| 98 | + auto output = functionals::softmax(input); |
| 99 | + return py::array_t<float>(output.size(), output.data()); |
| 100 | + }, "Compute the softmax of a 1D vector"); |
| 101 | + functionals.def("flatten2d", [](const std::vector<std::vector<float>>& input) { |
| 102 | + auto output = functionals::flatten2d(input); |
| 103 | + return py::array_t<float>(output.size(), output.data()); |
| 104 | + }, "Flatten a 2D vector into a 1D vector"); |
| 105 | +} |
| 106 | + |
0 commit comments