diff --git a/examples/mnist-learn/CMakeLists.txt b/examples/mnist-learn/CMakeLists.txt index ff5df638e..14112758d 100644 --- a/examples/mnist-learn/CMakeLists.txt +++ b/examples/mnist-learn/CMakeLists.txt @@ -27,5 +27,5 @@ if (NOT TARGET KNP::BaseFramework::Core) find_package(knp-base-framework REQUIRED) endif() -add_executable(mnist_learn_example main.cpp data_read.cpp construct_network.cpp evaluation.cpp train.cpp inference.cpp wta.cpp time_string.cpp) +add_executable(mnist_learn_example main.cpp construct_network.cpp train.cpp inference.cpp time_string.cpp) target_link_libraries(mnist_learn_example PRIVATE KNP::BaseFramework::Core ${Boost_LIBRARIES}) diff --git a/examples/mnist-learn/construct_network.cpp b/examples/mnist-learn/construct_network.cpp index 5d98c3331..457c75e47 100644 --- a/examples/mnist-learn/construct_network.cpp +++ b/examples/mnist-learn/construct_network.cpp @@ -21,51 +21,21 @@ #include "construct_network.h" -#include -#include -#include -#include -#include +#include "shared_network.h" -#include "generators.h" // A list of short type names to make reading easier. +using DeltaSynapseData = knp::synapse_traits::synapse_parameters; +using DeltaProjection = knp::core::Projection; +using ResourceSynapse = knp::synapse_traits::SynapticResourceSTDPDeltaSynapse; +using ResourceDeltaProjection = knp::core::Projection; +using ResourceSynapseData = ResourceDeltaProjection::Synapse; +using ResourceSynapseParams = knp::synapse_traits::synapse_parameters; using BlifatPopulation = knp::core::Population; using ResourceBlifatPopulation = knp::core::Population; using ResourceNeuron = knp::neuron_traits::SynapticResourceSTDPBLIFATNeuron; using ResourceNeuronData = knp::neuron_traits::neuron_parameters; -// Network hyperparameters. You may want to fine-tune these. -constexpr float default_threshold = 8.571F; -constexpr float min_synaptic_weight = -0.7; -constexpr float max_synaptic_weight = 0.864249F; -constexpr float base_weight_value = 0.000F; -constexpr int neuron_dopamine_period = 10; -constexpr int synapse_dopamine_period = 10; -constexpr float l_neuron_potential_decay = 1.0 - 1.0 / 3.0; -constexpr float dopamine_parameter = 0.042F; -constexpr float dopamine_value = dopamine_parameter; -constexpr float threshold_weight_coeff = 0.023817F; - -// -// Network geometry. -// - -// Number of neurons reserved per a single digit. -constexpr int neurons_per_column = 15; - -// Ten possible digits, one column per each. -constexpr int num_possible_labels = classes_in_mnist; - -// All columns are a part of the same population. -constexpr int num_input_neurons = neurons_per_column * num_possible_labels; - -// Number of pixels for a single MNIST image. -constexpr int input_size = 28 * 28; - -// Dense input projection from 28 * 28 image to population of 150 neurons. -constexpr int input_projection_size = input_size * num_input_neurons; - // Intermediate population neurons. template @@ -148,7 +118,6 @@ auto add_subnetwork_populations(AnnotatedNetwork &result) return std::make_pair(population_uids, pop_data); } - // Create network for MNIST. AnnotatedNetwork create_example_network(int num_compound_networks) { @@ -169,89 +138,79 @@ AnnotatedNetwork create_example_network(int num_compound_networks) afferent_synapse.rule_.w_max_ = max_synaptic_weight; // 1. Trainable input projection. - ResourceDeltaProjection input_projection{ - knp::core::UID{false}, population_uids[INPUT], make_dense_generator(input_size, afferent_synapse), - input_projection_size}; + ResourceDeltaProjection input_projection = knp::framework::projection::creators::all_to_all( + knp::core::UID{false}, population_uids[INPUT], input_size, num_input_neurons, + [&afferent_synapse](size_t, size_t) { return afferent_synapse; }); result.data_.projections_from_raster_.push_back(input_projection.get_uid()); input_projection.unlock_weights(); // Trainable result.network_.add_projection(input_projection); result.data_.inference_internal_projection_.insert(input_projection.get_uid()); - default_synapse.weight_ = 9; - // 2. Activating projection. It sends signals from labels to dopamine population. const DeltaSynapseData default_activating_synapse{1, 1, knp::synapse_traits::OutputType::BLOCKING}; - DeltaProjection projection_2{ - knp::core::UID{false}, population_uids[DOPAMINE], - make_aligned_generator(pop_data[INPUT].pd_.size_, pop_data[DOPAMINE].pd_.size_, default_activating_synapse), - pop_data[INPUT].pd_.size_}; + DeltaProjection projection_2 = knp::framework::projection::creators::aligned( + knp::core::UID{false}, population_uids[DOPAMINE], pop_data[INPUT].pd_.size_, pop_data[DOPAMINE].pd_.size_, + [&default_activating_synapse](size_t, size_t) { return default_activating_synapse; }); result.network_.add_projection(projection_2); result.data_.wta_data_[i].second.push_back(projection_2.get_uid()); // 3. Dopamine projection, it goes from dopamine population to input population. const DeltaSynapseData default_dopamine_synapse{dopamine_value, 1, knp::synapse_traits::OutputType::DOPAMINE}; - DeltaProjection projection_3{ - population_uids[DOPAMINE], population_uids[INPUT], - make_aligned_generator(pop_data[DOPAMINE].pd_.size_, pop_data[INPUT].pd_.size_, default_dopamine_synapse), - pop_data[INPUT].pd_.size_}; - + DeltaProjection projection_3 = knp::framework::projection::creators::aligned( + population_uids[DOPAMINE], population_uids[INPUT], pop_data[DOPAMINE].pd_.size_, pop_data[INPUT].pd_.size_, + [&default_dopamine_synapse](size_t, size_t) { return default_dopamine_synapse; }); result.network_.add_projection(projection_3); result.data_.inference_internal_projection_.insert(projection_3.get_uid()); // 4. Strong excitatory projection going to output neurons. - DeltaProjection projection_4{ - knp::core::UID{false}, population_uids[OUTPUT], - make_aligned_generator(pop_data[INPUT].pd_.size_, pop_data[OUTPUT].pd_.size_, default_synapse), - pop_data[INPUT].pd_.size_}; + default_synapse.weight_ = 9; + DeltaProjection projection_4 = knp::framework::projection::creators::aligned( + knp::core::UID{false}, population_uids[OUTPUT], pop_data[INPUT].pd_.size_, pop_data[OUTPUT].pd_.size_, + [&default_synapse](size_t, size_t) { return default_synapse; }); result.data_.wta_data_[i].second.push_back(projection_4.get_uid()); - result.network_.add_projection(projection_4); result.data_.inference_internal_projection_.insert(projection_4.get_uid()); // 5. Blocking projection. const DeltaSynapseData default_blocking_synapse{-20, 1, knp::synapse_traits::OutputType::BLOCKING}; - DeltaProjection projection_5{ - population_uids[OUTPUT], population_uids[GATE], - make_aligned_generator(pop_data[OUTPUT].pd_.size_, pop_data[GATE].pd_.size_, default_blocking_synapse), - num_possible_labels}; + DeltaProjection projection_5 = knp::framework::projection::creators::aligned( + population_uids[OUTPUT], population_uids[GATE], pop_data[OUTPUT].pd_.size_, pop_data[GATE].pd_.size_, + [&default_blocking_synapse](size_t, size_t) { return default_blocking_synapse; }); result.network_.add_projection(projection_5); result.data_.inference_internal_projection_.insert(projection_5.get_uid()); // 6. Strong excitatory projection going from ground truth classes. - DeltaProjection projection_6{ - knp::core::UID{false}, population_uids[DOPAMINE], - make_aligned_generator(num_possible_labels, pop_data[DOPAMINE].pd_.size_, default_synapse), - pop_data[DOPAMINE].pd_.size_}; + DeltaProjection projection_6 = knp::framework::projection::creators::aligned( + knp::core::UID{false}, population_uids[DOPAMINE], num_possible_labels, pop_data[DOPAMINE].pd_.size_, + [&default_synapse](size_t, size_t) { return default_synapse; }); result.network_.add_projection(projection_6); result.data_.projections_from_classes_.push_back(projection_6.get_uid()); // 7. Strong slow excitatory projection going from ground truth classes. auto slow_synapse = default_synapse; slow_synapse.delay_ = 10; - DeltaProjection projection_7{ - knp::core::UID{false}, population_uids[GATE], - make_aligned_generator(num_possible_labels, pop_data[GATE].pd_.size_, slow_synapse), - pop_data[GATE].pd_.size_}; + DeltaProjection projection_7 = knp::framework::projection::creators::aligned( + knp::core::UID{false}, population_uids[GATE], num_possible_labels, pop_data[GATE].pd_.size_, + [&slow_synapse](size_t, size_t) { return slow_synapse; }); result.network_.add_projection(projection_7); result.data_.projections_from_classes_.push_back(projection_7.get_uid()); // 8. Strong inhibitory projection from ground truth input. auto inhibitory_synapse = default_synapse; inhibitory_synapse.weight_ = -30; - DeltaProjection projection_8{ - knp::core::UID{false}, population_uids[GATE], - make_exclusive_generator(num_possible_labels, inhibitory_synapse), - num_possible_labels * (pop_data[GATE].pd_.size_ - 1)}; + DeltaProjection projection_8 = + knp::framework::projection::creators::exclusive( + knp::core::UID{false}, population_uids[GATE], num_possible_labels, + [&inhibitory_synapse](size_t, size_t) { return inhibitory_synapse; }); result.data_.projections_from_classes_.push_back(projection_8.get_uid()); result.network_.add_projection(projection_8); // 9. Weak excitatory projection. auto weak_excitatory_synapse = default_synapse; weak_excitatory_synapse.weight_ = 3; - DeltaProjection projection_9{ - population_uids[GATE], population_uids[INPUT], - make_aligned_generator(pop_data[GATE].pd_.size_, pop_data[INPUT].pd_.size_, weak_excitatory_synapse), - pop_data[INPUT].pd_.size_}; + DeltaProjection projection_9 = knp::framework::projection::creators::aligned( + population_uids[GATE], population_uids[INPUT], pop_data[GATE].pd_.size_, pop_data[INPUT].pd_.size_, + [&weak_excitatory_synapse](size_t, size_t) { return weak_excitatory_synapse; }); result.network_.add_projection(projection_9); result.data_.inference_internal_projection_.insert(projection_9.get_uid()); } diff --git a/examples/mnist-learn/construct_network.h b/examples/mnist-learn/construct_network.h index 1dfb7c01a..911f941ca 100644 --- a/examples/mnist-learn/construct_network.h +++ b/examples/mnist-learn/construct_network.h @@ -31,27 +31,30 @@ #include -/// How many steps to use for learning. 20 steps are used for a single image. -constexpr int learning_period = 200000; -/// Classes in MNIST. -constexpr int classes_in_mnist = 10; - - struct AnnotatedNetwork { knp::framework::Network network_; struct Annotation { + // cppcheck-suppress unusedStructMember std::vector output_uids_; + // cppcheck-suppress unusedStructMember std::vector projections_from_raster_; + // cppcheck-suppress unusedStructMember std::vector projections_from_classes_; + // cppcheck-suppress unusedStructMember std::set inference_population_uids_; + // cppcheck-suppress unusedStructMember std::set inference_internal_projection_; // For each compound network: a vector of senders and a vector of receivers. + // cppcheck-suppress unusedStructMember std::vector, std::vector>> wta_data_; + // cppcheck-suppress unusedStructMember std::map population_names_; - } data_; + } + // cppcheck-suppress unusedStructMember + data_; }; diff --git a/examples/mnist-learn/data_read.cpp b/examples/mnist-learn/data_read.cpp deleted file mode 100644 index cbec2a269..000000000 --- a/examples/mnist-learn/data_read.cpp +++ /dev/null @@ -1,141 +0,0 @@ -/** - * @file data_read.cpp - * @brief Reading from dataset. - * @kaspersky_support A. Vartenkov - * @date 06.12.2024 - * @license Apache 2.0 - * @copyright © 2024 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "data_read.h" - -#include -#include -#include -#include - - -/** - * @brief Read buffers from binary data file. - * @param path_to_data path to binary data file. - */ -std::vector> read_images_from_file(const std::filesystem::path &path_to_data) -{ - std::ifstream file_stream(path_to_data, std::ios::binary); - std::vector buffer; - std::vector> result; - - while (file_stream.good()) - { - buffer.resize(input_size); - file_stream.read(reinterpret_cast(buffer.data()), static_cast(input_size)); - result.push_back(std::move(buffer)); - buffer.clear(); - } - - return result; -} - - -std::vector> image_to_spikes( - const std::vector &image, std::vector &state, int num_intensity_levels) -{ - std::vector> ret; - ret.reserve(frames_per_image); - int i; - - for (i = 0; i < intensity_levels; ++i) - { - std::vector spikes(input_size, false); - for (int l = 0; l < input_size; ++l) - { - state[l] += state_increment_factor * image[l]; - if (state[l] >= 1.) - { - spikes[l] = true; - --state[l]; - } - } - ret.push_back(spikes); - } - - for (; i < frames_per_image; ++i) ret.push_back(std::vector(input_size, false)); - - return ret; -} - - -// Read image dataset from a binary file and trasnform it into a vector of boolean frames. -std::vector> read_spike_frames(const std::filesystem::path &path_to_data, int num_intensity_levels) -{ - auto images = read_images_from_file(path_to_data); - std::vector> result; - result.reserve(images.size() * frames_per_image); - std::vector state(input_size, 0.); - - for (size_t img_num = 0; img_num < images.size(); ++img_num) - { - std::vector> spikes_per_image = image_to_spikes(images[img_num], state, num_intensity_levels); - std::transform( - spikes_per_image.begin(), spikes_per_image.end(), std::back_inserter(result), - [](auto &v) { return std::move(v); }); - } - return result; -} - - -Labels read_labels(const std::filesystem::path &classes_file, int learning_period, int offset) -{ - std::ifstream file_stream(classes_file); - Labels labels; - int cla; - - while (file_stream.good()) - { - std::string str; - if (!std::getline(file_stream, str).good()) break; - if (offset > 0) - { - --offset; - continue; - } - std::stringstream ss(str); - ss >> cla; - std::vector buffer(input_size, false); - buffer[cla] = true; - if (labels.train_.size() >= learning_period) labels.test_.push_back(cla); - for (int i = 0; i < frames_per_image; ++i) labels.train_.push_back(buffer); - } - return labels; -} - - -std::function make_input_generator( - const std::vector> &spike_frames, int64_t offset) -{ - auto generator = [&spike_frames, offset](knp::core::Step step) - { - knp::core::messaging::SpikeData message; - if ((step + offset) >= spike_frames.size()) return message; - - for (size_t i = 0; i < spike_frames[step + offset].size(); ++i) - { - if (spike_frames[step + offset][i]) message.push_back(i); - } - return message; - }; - - return generator; -} diff --git a/examples/mnist-learn/data_read.h b/examples/mnist-learn/data_read.h deleted file mode 100644 index 90a5b0edf..000000000 --- a/examples/mnist-learn/data_read.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * @file data_read.h - * @brief Reading from dataset. - * @kaspersky_support A. Vartenkov - * @date 06.12.2024 - * @license Apache 2.0 - * @copyright © 2024 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -#include -#include -#include - -constexpr int intensity_levels = 10; -constexpr int frames_per_image = 20; -constexpr size_t input_size = 28 * 28; -constexpr double state_increment_factor = 1. / 255; - -/** - * @brief Labels for testing and training data. - */ -struct Labels -{ - std::vector test_; - std::vector> train_; -}; - - -/** - * @brief Read input images. - * @param path_to_data path to coded input images file. - * @return vector of spike-based frames. - */ -std::vector> read_spike_frames( - const std::filesystem::path &path_to_data, int num_intensity_levels = intensity_levels); - - -/** - * @brief Read labels for training and testing. - * @param classes_file file with labels. - * @param learning_period period for training. - * @param offset how many frames to skip. - * @return labels for training and testing. - */ -Labels read_labels(const std::filesystem::path &classes_file, int learning_period, int offset = 0); - - -// Create a spike message generator from an array of boolean frames. -std::function make_input_generator( - const std::vector> &spike_frames, int64_t offset); diff --git a/examples/mnist-learn/evaluation.cpp b/examples/mnist-learn/evaluation.cpp deleted file mode 100644 index bd32f17ca..000000000 --- a/examples/mnist-learn/evaluation.cpp +++ /dev/null @@ -1,195 +0,0 @@ -/** - * @file evaluation.cpp - * @brief Functions for network quality estimation. - * @kaspersky_support A. Vartenkov - * @date 12.03.2025 - * @license Apache 2.0 - * @copyright © 2025 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "evaluation.h" - -#include - -#include -#include -#include -#include -#include - -#include "construct_network.h" - - -constexpr int num_possible_labels = 10; - -/** - * @brief Prediction result structure. - */ -struct Result -{ - int real_ = 0; - int predicted_ = 0; - int correcty_predicted_ = 0; -}; - - -/** - * @brief A class used for accuracy evaluation. - */ -class Target -{ -public: - Target(int num_target_classes, const std::vector &classes) - : prediction_votes_(num_target_classes, 0), states_(classes), max_vote_(num_target_classes, 0) - { - } - - void obtain_output_spikes(const knp::core::messaging::SpikeData &firing_neuron_indices); - - [[nodiscard]] int get_num_targets() const { return static_cast(prediction_votes_.size()); } - - [[nodiscard]] int finalize(const std::filesystem::path &strPredictionFile = "") const; - -private: - void write_predictions_to_file( - const std::filesystem::path &out_file_path, const std::vector &prediction_results, - const std::vector &predictions) const; - - const std::vector &states_; - std::vector> predicted_states_; - size_t tact_ = 0; - const int state_duration_ = 20; - std::vector prediction_votes_; - std::vector max_vote_; - int index_offset_ = 0; -}; - - -void Target::obtain_output_spikes(const knp::core::messaging::SpikeData &firing_neuron_indices) -{ - for (auto i : firing_neuron_indices) ++prediction_votes_[i % get_num_targets()]; - if (!((tact_ + 1) % state_duration_)) - { - int starting_index = index_offset_; - int j = starting_index; - int n_max = 0; - int predicted_state = -1; - do - { - if (prediction_votes_[j] > n_max) - { - n_max = prediction_votes_[j]; - predicted_state = j; - } - if (++j == get_num_targets()) j = 0; - } while (j != starting_index); - if (++index_offset_ == get_num_targets()) index_offset_ = 0; - predicted_states_.push_back(std::make_pair(predicted_state, n_max)); - if (n_max) max_vote_[predicted_state] = std::max(max_vote_[predicted_state], n_max); - std::fill(prediction_votes_.begin(), prediction_votes_.end(), 0); - } - ++tact_; -} - - -double get_precision(const Result &prediction_result) -{ - if (prediction_result.predicted_ == 0) return 0.F; - return static_cast(prediction_result.correcty_predicted_) / prediction_result.predicted_; -} - - -double get_recall(const Result &prediction_result) -{ - if (prediction_result.real_ == 0) return 0.0F; - return static_cast(prediction_result.correcty_predicted_) / prediction_result.real_; -} - - -double get_f_measure(double precision, double recall) -{ - if (precision * recall == 0) return 0.0F; - return 2.0F * precision * recall / (precision + recall); -} - - -void Target::write_predictions_to_file( - const std::filesystem::path &out_file_path, const std::vector &prediction_results, - const std::vector &predictions) const -{ - if (out_file_path.empty()) return; - std::ofstream out_stream(out_file_path); - out_stream << "TARGET,PRECISION,RECALL,F\n"; - for (int label = 0; label < get_num_targets(); ++label) - { - double precision = get_precision(prediction_results[label]); - double recall = get_recall(prediction_results[label]); - double f_measure = get_f_measure(precision, recall); - out_stream << label << ',' << precision << ',' << recall << ',' << f_measure << std::endl; - } - for (size_t i = 0; i < predicted_states_.size(); ++i) - out_stream << states_[i] << ',' << predicted_states_[i].first << ',' << predicted_states_[i].second << ',' - << predictions[i] << std::endl; -} - - -int Target::finalize(const std::filesystem::path &out_file_path) const -{ - if (none_of(max_vote_.begin(), max_vote_.end(), std::identity())) // No predictions at all... - return 0; - std::vector predictions; - for (size_t i = 0; i < predicted_states_.size(); ++i) - predictions.push_back( - predicted_states_[i].first == -1 || predicted_states_[i].second < 1 ? -1 : predicted_states_[i].first); - int num_errors = 0; - - std::vector prediction_results(get_num_targets()); - int num_true_negatives = 0; - for (size_t i = 0; i < predicted_states_.size(); ++i) - { - int predicted = predictions[i]; - if (states_[i] != -1) ++prediction_results[states_[i]].real_; - if (predicted != -1) ++prediction_results[predicted].predicted_; - if (predicted != states_[i]) - ++num_errors; - else if (predicted != -1) - ++prediction_results[predicted].correcty_predicted_; - else - ++num_true_negatives; - } - write_predictions_to_file(out_file_path, prediction_results, predictions); - return static_cast(std::lround(10000.0 * (1 - static_cast(num_errors) / predicted_states_.size()))); -} - - -void process_inference_results( - const std::vector &spikes, const std::vector &classes_for_testing, - int testing_period) -{ - auto j = spikes.begin(); - Target target(num_possible_labels, classes_for_testing); - for (int tact = 0; tact < testing_period; ++tact) - { - knp::core::messaging::SpikeData firing_neuron_indices; - while (j != spikes.end() && j->header_.send_time_ == tact) - { - firing_neuron_indices.insert( - firing_neuron_indices.end(), j->neuron_indexes_.begin(), j->neuron_indexes_.end()); - ++j; - } - target.obtain_output_spikes(firing_neuron_indices); - } - auto res = target.finalize("mnist.log"); - std::cout << "ACCURACY: " << res / 100.F << "%\n"; -} diff --git a/examples/mnist-learn/evaluation.h b/examples/mnist-learn/evaluation.h deleted file mode 100644 index 5a10cdf64..000000000 --- a/examples/mnist-learn/evaluation.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @file evaluation.h - * @brief Functions for network quality estimation. - * @kaspersky_support A. Vartenkov - * @date 12.03.2025 - * @license Apache 2.0 - * @copyright © 2025 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -#include - - -void process_inference_results( - const std::vector &spikes, const std::vector &classes_for_testing, - int testing_period); diff --git a/examples/mnist-learn/generators.h b/examples/mnist-learn/generators.h deleted file mode 100644 index e5ad433ca..000000000 --- a/examples/mnist-learn/generators.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * @file generators.h - * @brief Generators creators. - * @kaspersky_support A. Vartenkov - * @date 28.03.2025 - * @license Apache 2.0 - * @copyright © 2025 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -using DeltaSynapseData = knp::synapse_traits::synapse_parameters; -using DeltaProjection = knp::core::Projection; -using ResourceSynapse = knp::synapse_traits::SynapticResourceSTDPDeltaSynapse; -using ResourceDeltaProjection = knp::core::Projection; -using ResourceSynapseData = ResourceDeltaProjection::Synapse; -using ResourceSynapseParams = knp::synapse_traits::synapse_parameters; -using ResourceSynapseGenerator = std::function; - - -// A dense projection generator from a default synapse. -ResourceSynapseGenerator make_dense_generator(size_t from_size, const ResourceSynapseParams &default_synapse) -{ - ResourceSynapseGenerator synapse_generator = [from_size, default_synapse](size_t index) - { - size_t from_index = index % from_size; - size_t to_index = index / from_size; - // If you need to have synapses with different parameters, change them here. - return ResourceSynapseData{default_synapse, from_index, to_index}; - }; - return synapse_generator; -} - - -DeltaProjection::SynapseGenerator make_aligned_generator( - size_t prepopulation_size, size_t postpopulation_size, const DeltaSynapseData &default_synapse) -{ - DeltaProjection::SynapseGenerator synapse_generator = - [prepopulation_size, postpopulation_size, default_synapse](size_t index) - { - size_t from_index; - size_t pack_size; - size_t to_index; - if (prepopulation_size >= postpopulation_size) - { - from_index = index; - pack_size = prepopulation_size / postpopulation_size; - to_index = index / pack_size; - } - else - { - to_index = index; - pack_size = postpopulation_size / prepopulation_size; - from_index = index / pack_size; - } - return DeltaProjection::Synapse{default_synapse, from_index, to_index}; - }; - return synapse_generator; -} - - -// This generator makes all-to-all projection without 1-to-1 element. -DeltaProjection::SynapseGenerator make_exclusive_generator( - size_t population_size, const DeltaSynapseData &default_synapse) -{ - DeltaProjection::SynapseGenerator synapse_generator = [population_size, default_synapse](size_t index) - { - size_t from_index; - size_t to_index; - from_index = index / (population_size - 1); - to_index = index % (population_size - 1); - if (to_index >= from_index) ++to_index; - return DeltaProjection::Synapse{default_synapse, from_index, to_index}; - }; - return synapse_generator; -} diff --git a/examples/mnist-learn/inference.cpp b/examples/mnist-learn/inference.cpp index 91d8d760a..13aa0996e 100644 --- a/examples/mnist-learn/inference.cpp +++ b/examples/mnist-learn/inference.cpp @@ -26,26 +26,27 @@ #include #include #include +#include #include -#include #include #include #include #include "construct_network.h" -#include "data_read.h" +#include "shared_network.h" #include "time_string.h" -#include "wta.h" constexpr size_t aggregated_spikes_logging_period = 4e3; +constexpr size_t wta_winners_amount = 1; + namespace fs = std::filesystem; std::vector run_mnist_inference( const fs::path &path_to_backend, AnnotatedNetwork &described_network, - const std::vector> &spike_frames, const fs::path &log_path) + knp::framework::data_processing::classification::images::Dataset const &dataset, const fs::path &log_path) { knp::framework::BackendLoader backend_loader; knp::framework::Model model(std::move(described_network.network_)); @@ -56,7 +57,7 @@ std::vector run_mnist_inference( // and the population IDs. knp::framework::ModelLoader::InputChannelMap channel_map; knp::core::UID input_image_channel_uid; - channel_map.insert({input_image_channel_uid, make_input_generator(spike_frames, learning_period)}); + channel_map.insert({input_image_channel_uid, dataset.make_inference_images_spikes_generator()}); for (auto i : described_network.data_.output_uids_) model.add_output_channel(o_channel_uid, i); for (auto image_proj_uid : described_network.data_.projections_from_raster_) @@ -77,7 +78,14 @@ std::vector run_mnist_inference( // cppcheck-suppress variableScope std::map spike_accumulator; - auto wta_uids = add_wta_handlers(described_network, model_executor); + std::vector wta_uids; + { + std::vector wta_borders; + for (size_t i = 0; i < num_possible_labels; ++i) wta_borders.push_back(neurons_per_column * i); + wta_uids = knp::framework::projection::add_wta_handlers( + model_executor, wta_winners_amount, wta_borders, described_network.data_.wta_data_); + } + auto all_senders_names = described_network.data_.population_names_; for (const auto &uid : wta_uids) { @@ -99,10 +107,10 @@ std::vector run_mnist_inference( // Start model. std::cout << get_time_string() << ": inference started\n"; model_executor.start( - [](size_t step) + [&dataset](size_t step) { if (step % 20 == 0) std::cout << "Inference step: " << step << std::endl; - return step != testing_period; + return step != dataset.get_steps_required_for_inference(); }); // Updates the output channel. auto spikes = out_channel.update(); diff --git a/examples/mnist-learn/inference.h b/examples/mnist-learn/inference.h index ce374f6d0..676d75d48 100644 --- a/examples/mnist-learn/inference.h +++ b/examples/mnist-learn/inference.h @@ -21,6 +21,8 @@ #pragma once +#include + #include #include #include @@ -29,10 +31,6 @@ #include "construct_network.h" -/// How many steps to use for testing. -constexpr int testing_period = 10000; - - /** * @brief Run inference on MNIST dataset. * @param path_to_backend path to backend. @@ -43,4 +41,5 @@ constexpr int testing_period = 10000; */ std::vector run_mnist_inference( const std::filesystem::path &path_to_backend, AnnotatedNetwork &described_network, - const std::vector> &spike_frames, const std::filesystem::path &log_path = ""); + const knp::framework::data_processing::classification::images::Dataset &dataset, + const std::filesystem::path &log_path); diff --git a/examples/mnist-learn/main.cpp b/examples/mnist-learn/main.cpp index 5934d97ae..2e1c75022 100644 --- a/examples/mnist-learn/main.cpp +++ b/examples/mnist-learn/main.cpp @@ -19,45 +19,71 @@ * limitations under the License. */ +#include + #include -#include +#include #include -#include "data_read.h" -#include "evaluation.h" #include "inference.h" +#include "shared_network.h" #include "time_string.h" #include "train.h" +constexpr size_t active_steps = 10; +constexpr size_t steps_per_image = 20; +constexpr float state_increment_factor = 1.f / 255; +constexpr size_t images_amount_to_train = 10000; +constexpr float dataset_split = 0.8; +constexpr size_t classes_amount = 10; + +namespace data_processing = knp::framework::data_processing::classification::images; +namespace inference_evaluation = knp::framework::inference_evaluation::classification; -int main(int argc, char **argv) +int main(int argc, char** argv) { - if (argc < 3) + if (argc < 3 || argc > 4) { - std::cerr << "Not enough parameters.\n First parameter: path to frames file.\n " - "Second parameter: path to labels file.\n Third parameter (optional) path to log output directory." + std::cerr << "You need to provide 2[3] arguments,\n1: path to images raw data\n2: path to images labels\n[3]: " + "path to folder for logs" << std::endl; return EXIT_FAILURE; } + + std::filesystem::path images_file_path = argv[1]; + std::filesystem::path labels_file_path = argv[2]; + std::filesystem::path log_path; - if (argc >= 4) log_path = argv[3]; + if (4 == argc) log_path = argv[3]; // Defines path to backend, on which to run a network. std::filesystem::path path_to_backend = std::filesystem::path(argv[0]).parent_path() / "knp-cpu-multi-threaded-backend"; - // Read data from corresponding files. - auto spike_frames = read_spike_frames(argv[1]); - auto labels = read_labels(argv[2], learning_period); + std::ifstream images_stream(images_file_path, std::ios::binary); + std::ifstream labels_stream(labels_file_path, std::ios::in); + + data_processing::Dataset dataset; + dataset.process_labels_and_images( + images_stream, labels_stream, images_amount_to_train, classes_amount, input_size, steps_per_image, + dataset.make_incrementing_image_to_spikes_converter(active_steps, state_increment_factor)); + dataset.split(dataset_split); + + std::cout << "Processed dataset, training will last " << dataset.get_steps_required_for_training() + << " steps, inference " << dataset.get_steps_required_for_inference() << " steps" << std::endl; // Construct network and run training. - AnnotatedNetwork trained_network = train_mnist_network(path_to_backend, spike_frames, labels.train_, log_path); + AnnotatedNetwork trained_network = train_mnist_network(path_to_backend, dataset, log_path); // Run inference for the same network. - auto spikes = run_mnist_inference(path_to_backend, trained_network, spike_frames, log_path); + auto spikes = run_mnist_inference(path_to_backend, trained_network, dataset, log_path); std::cout << get_time_string() << ": inference finished -- output spike count is " << spikes.size() << std::endl; // Evaluate results. - process_inference_results(spikes, labels.test_, testing_period); + inference_evaluation::InferenceResultForClass::InferenceResultsProcessor inference_processor; + inference_processor.process_inference_results(spikes, dataset); + + inference_processor.write_inference_results_to_stream_as_csv(std::cout); + return EXIT_SUCCESS; } diff --git a/examples/mnist-learn/shared_network.h b/examples/mnist-learn/shared_network.h new file mode 100644 index 000000000..c37f21b46 --- /dev/null +++ b/examples/mnist-learn/shared_network.h @@ -0,0 +1,63 @@ +/** + * @file construct_network.cpp + * @brief Functions for network construction. + * @kaspersky_support D. Postnikov + * @date 28.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include +#include +#include + +// Network hyperparameters. You may want to fine-tune these. +constexpr float default_threshold = 8.571F; +constexpr float min_synaptic_weight = -0.7; +constexpr float max_synaptic_weight = 0.864249F; +constexpr float base_weight_value = 0.000F; +constexpr int neuron_dopamine_period = 10; +constexpr int synapse_dopamine_period = 10; +constexpr float l_neuron_potential_decay = 1.0 - 1.0 / 3.0; +constexpr float dopamine_parameter = 0.042F; +constexpr float dopamine_value = dopamine_parameter; +constexpr float threshold_weight_coeff = 0.023817F; + +// +// Network geometry. +// + +// Number of neurons reserved per a single digit. +constexpr size_t neurons_per_column = 15; + +// Ten possible digits, one column per each. +constexpr size_t num_possible_labels = 10; + +// All columns are a part of the same population. +constexpr size_t num_input_neurons = neurons_per_column * num_possible_labels; + +// Number of pixels in width for a single MNIST image. +constexpr size_t input_size_width = 28; + +// Number of pixels in height for a single MNIST image. +constexpr size_t input_size_height = 28; + +// Number of pixels for a single MNIST image. +constexpr size_t input_size = input_size_width * input_size_height; + +// Dense input projection from 28 * 28 image to population of 150 neurons. +constexpr size_t input_projection_size = input_size * num_input_neurons; diff --git a/examples/mnist-learn/time_string.cpp b/examples/mnist-learn/time_string.cpp index 8933e9b3e..8220f1ba4 100644 --- a/examples/mnist-learn/time_string.cpp +++ b/examples/mnist-learn/time_string.cpp @@ -1,6 +1,6 @@ /** * @file time_string.cpp - * @brief Functions for network training. + * @brief Function for converting time to string. * @kaspersky_support D. Postnikov * @date 28.03.2025 * @license Apache 2.0 diff --git a/examples/mnist-learn/time_string.h b/examples/mnist-learn/time_string.h index 48356550d..cf3b83ccd 100644 --- a/examples/mnist-learn/time_string.h +++ b/examples/mnist-learn/time_string.h @@ -1,6 +1,6 @@ /** * @file time_string.h - * @brief Functions for network training. + * @brief Function for converting time to string. * @kaspersky_support D. Postnikov * @date 28.03.2025 * @license Apache 2.0 diff --git a/examples/mnist-learn/train.cpp b/examples/mnist-learn/train.cpp index 24e0ed832..02ed94947 100644 --- a/examples/mnist-learn/train.cpp +++ b/examples/mnist-learn/train.cpp @@ -26,33 +26,35 @@ #include #include #include +#include #include -#include #include #include #include #include "construct_network.h" -#include "data_read.h" +#include "shared_network.h" #include "time_string.h" -#include "wta.h" constexpr size_t aggregated_spikes_logging_period = 4e3; constexpr size_t projection_weights_logging_period = 1e5; +constexpr size_t wta_winners_amount = 1; + namespace fs = std::filesystem; +namespace images_classification = knp::framework::data_processing::classification::images; + // Create channel map for training. auto build_channel_map_train( - const AnnotatedNetwork &network, knp::framework::Model &model, const std::vector> &spike_frames, - const std::vector> &spike_classes) + const AnnotatedNetwork &network, knp::framework::Model &model, const images_classification::Dataset &dataset) { // Create future channels uids randomly. knp::core::UID input_image_channel_raster; - knp::core::UID input_image_channel_classses; + knp::core::UID input_image_channel_classes; // Add input channel for each image input projection. for (auto image_proj_uid : network.data_.projections_from_raster_) @@ -60,12 +62,12 @@ auto build_channel_map_train( // Add input channel for data labels. for (auto target_proj_uid : network.data_.projections_from_classes_) - model.add_input_channel(input_image_channel_classses, target_proj_uid); + model.add_input_channel(input_image_channel_classes, target_proj_uid); // Create and fill a channel map. knp::framework::ModelLoader::InputChannelMap channel_map; - channel_map.insert({input_image_channel_raster, make_input_generator(spike_frames, 0)}); - channel_map.insert({input_image_channel_classses, make_input_generator(spike_classes, 0)}); + channel_map.insert({input_image_channel_raster, dataset.make_training_images_spikes_generator()}); + channel_map.insert({input_image_channel_classes, dataset.make_training_labels_generator()}); return channel_map; } @@ -97,16 +99,14 @@ knp::framework::Network get_network_for_inference( AnnotatedNetwork train_mnist_network( - const fs::path &path_to_backend, const std::vector> &spike_frames, - const std::vector> &spike_classes, const fs::path &log_path) + const fs::path &path_to_backend, const images_classification::Dataset &dataset, const fs::path &log_path) { AnnotatedNetwork example_network = create_example_network(num_subnetworks); std::filesystem::create_directory("mnist_network"); knp::framework::sonata::save_network(example_network.network_, "mnist_network"); knp::framework::Model model(std::move(example_network.network_)); - knp::framework::ModelLoader::InputChannelMap channel_map = - build_channel_map_train(example_network, model, spike_frames, spike_classes); + knp::framework::ModelLoader::InputChannelMap channel_map = build_channel_map_train(example_network, model, dataset); knp::framework::BackendLoader backend_loader; knp::framework::ModelExecutor model_executor(model, backend_loader.load(path_to_backend), std::move(channel_map)); @@ -119,7 +119,13 @@ AnnotatedNetwork train_mnist_network( // cppcheck-suppress variableScope std::map spike_accumulator; - add_wta_handlers(example_network, model_executor); + { + std::vector wta_borders; + for (size_t i = 0; i < num_possible_labels; ++i) wta_borders.push_back(neurons_per_column * i); + knp::framework::projection::add_wta_handlers( + model_executor, wta_winners_amount, wta_borders, example_network.data_.wta_data_); + } + // All loggers go here if (!log_path.empty()) { @@ -144,10 +150,10 @@ AnnotatedNetwork train_mnist_network( std::cout << get_time_string() << ": learning started\n"; model_executor.start( - [](size_t step) + [&dataset](size_t step) { if (step % 20 == 0) std::cout << "Step: " << step << std::endl; - return step != learning_period; + return step != dataset.get_steps_required_for_training(); }); std::cout << get_time_string() << ": learning finished\n"; diff --git a/examples/mnist-learn/train.h b/examples/mnist-learn/train.h index 92b371d10..4d2c560f4 100644 --- a/examples/mnist-learn/train.h +++ b/examples/mnist-learn/train.h @@ -21,6 +21,8 @@ #pragma once +#include + #include #include #include @@ -37,10 +39,11 @@ constexpr int num_subnetworks = 5; * @param path_to_backend path to backend. * @param spike_frames images file. * @param spike_classes labels file. - * @param log_path path to log folder. + * @param log_path path to log folder. If its empty, then no logging will be done. * @return trained network with added descriptions. * @note the returned network is configured for inference. */ AnnotatedNetwork train_mnist_network( - const std::filesystem::path &path_to_backend, const std::vector> &spike_frames, - const std::vector> &spike_classes, const std::filesystem::path &log_path = ""); + const std::filesystem::path &path_to_backend, + const knp::framework::data_processing::classification::images::Dataset &dataset, + const std::filesystem::path &log_path); diff --git a/knp/base-framework/CMakeLists.txt b/knp/base-framework/CMakeLists.txt index 07ee12425..ddc80faed 100644 --- a/knp/base-framework/CMakeLists.txt +++ b/knp/base-framework/CMakeLists.txt @@ -64,6 +64,7 @@ knp_add_library("${PROJECT_NAME}-core" impl/output_channel.cpp impl/synchronization.cpp impl/monitoring/model.cpp + impl/projection/wta.cpp impl/sonata/save_network.cpp impl/sonata/load_network.cpp impl/sonata/csv_content.cpp @@ -73,6 +74,10 @@ knp_add_library("${PROJECT_NAME}-core" impl/sonata/types/altai_lif_neuron.cpp impl/sonata/types/resource_delta_synapse.cpp impl/sonata/types/additive_delta_synapse.cpp + impl/data_processing/classification/dataset.cpp + impl/data_processing/classification/image.cpp + impl/inference_evaluation/classification.cpp + impl/inference_evaluation/perfomance_metrics.cpp impl/observer.cpp ${${PROJECT_NAME}_headers} ALIAS KNP::BaseFramework::Core diff --git a/knp/base-framework/impl/data_processing/classification/dataset.cpp b/knp/base-framework/impl/data_processing/classification/dataset.cpp new file mode 100644 index 000000000..b8a78f578 --- /dev/null +++ b/knp/base-framework/impl/data_processing/classification/dataset.cpp @@ -0,0 +1,55 @@ +/** + * @file dataset.cpp + * @brief Definition of classification dataset. + * @kaspersky_support D. Postnikov + * @date 29.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + + +namespace knp::framework::data_processing::classification +{ + +void Dataset::split(float split_percent) +{ + size_t split_beginning = static_cast(data_for_training_.size()) * split_percent + 0.5F; + for (size_t i = split_beginning; i < data_for_training_.size(); ++i) + data_for_inference_.emplace_back(std::move(data_for_training_[i])); + data_for_training_.erase(data_for_training_.begin() + split_beginning, data_for_training_.end()); + + /* + * The idea is that, if is too big for required training amount, then inference will be bigger than + * training, so to compensate we make inference smaller, according to split. + */ + if (required_training_amount_ < data_for_training_.size()) + { + data_for_training_.resize(required_training_amount_); + data_for_inference_.resize( + static_cast(static_cast(data_for_training_.size()) / split_percent) - + data_for_training_.size()); + steps_required_for_training_ = steps_per_frame_ * data_for_training_.size(); + steps_required_for_inference_ = steps_per_frame_ * data_for_inference_.size(); + } + else + { + steps_required_for_training_ = steps_per_frame_ * required_training_amount_; + steps_required_for_inference_ = steps_per_frame_ * data_for_inference_.size(); + } +} + +} // namespace knp::framework::data_processing::classification diff --git a/knp/base-framework/impl/data_processing/classification/image.cpp b/knp/base-framework/impl/data_processing/classification/image.cpp new file mode 100644 index 000000000..516ec92f0 --- /dev/null +++ b/knp/base-framework/impl/data_processing/classification/image.cpp @@ -0,0 +1,148 @@ +/** + * @file image.cpp + * @brief Processing of dataset of images. + * @kaspersky_support D. Postnikov + * @date 14.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + + +namespace knp::framework::data_processing::classification::images +{ + +void Dataset::process_labels_and_images( + std::istream &images_stream, std::istream &labels_stream, size_t training_amount, size_t classes_amount, + size_t image_size, size_t steps_per_image, + std::function const &)> const &image_to_spikes) +{ + image_size_ = image_size; + steps_per_frame_ = steps_per_image; + required_training_amount_ = training_amount; + classes_amount_ = classes_amount; + + std::vector image_reading_buffer(image_size, 0); + + while (images_stream.good() && labels_stream.good()) + { + images_stream.read(reinterpret_cast(image_reading_buffer.data()), image_size); + auto spikes_frame = image_to_spikes(image_reading_buffer); + + std::string str; + if (!std::getline(labels_stream, str).good()) break; + int label = std::stoi(str); + + // Push to training data set because we dont know dataset size yet for a split + data_for_training_.push_back({label, std::move(spikes_frame)}); + } +} + + +std::function Dataset::make_training_labels_generator() const +{ + return [this](knp::core::Step step) + { + knp::core::messaging::SpikeData message; + + const size_t frame_index = step / steps_per_frame_; + const size_t looped_frame_index = frame_index % data_for_training_.size(); + + message.push_back(data_for_training_[looped_frame_index].first); + return message; + }; +} + + +std::function Dataset::make_training_images_spikes_generator() const +{ + return [this](knp::core::Step step) + { + knp::core::messaging::SpikeData message; + + const size_t frame_index = step / steps_per_frame_; + const size_t looped_frame_index = frame_index % data_for_training_.size(); + + auto const &data = data_for_training_[looped_frame_index].second.spikes_; + + const size_t local_step = step % steps_per_frame_; + const size_t frame_start = local_step * image_size_; + + for (size_t i = frame_start; i < frame_start + image_size_; ++i) + { + if (data[i]) message.push_back(i - frame_start); + } + return message; + }; +} + + +std::function Dataset::make_inference_images_spikes_generator() const +{ + return [this](knp::core::Step step) + { + knp::core::messaging::SpikeData message; + + const size_t frame_index = step / steps_per_frame_; + const size_t looped_frame_index = frame_index % data_for_inference_.size(); + + auto const &data = data_for_inference_[looped_frame_index].second.spikes_; + + const size_t local_step = step % steps_per_frame_; + const size_t frame_start = local_step * image_size_; + + for (size_t i = frame_start; i < frame_start + image_size_; ++i) + { + if (data[i]) message.push_back(i - frame_start); + } + return message; + }; +} + + +std::function const &)> Dataset::make_incrementing_image_to_spikes_converter( + size_t active_steps, float state_increment_factor) const + +{ + std::vector states; + return [this, active_steps, state_increment_factor, states](std::vector const &image) mutable -> Frame + { + if (!states.size()) states.resize(image_size_, 0.F); + + std::vector ret; + ret.reserve(steps_per_frame_ * image_size_); + + for (size_t i = 0; i < active_steps; ++i) + { + ret.insert(ret.end(), image_size_, false); + for (size_t l = 0; l < image_size_; ++l) + { + states[l] += state_increment_factor * static_cast(image[l]); + if (states[l] >= 1.F) + { + ret[ret.size() - image_size_ + l] = true; + --states[l]; + } + } + } + + ret.insert(ret.end(), (steps_per_frame_ - active_steps) * image_size_, false); + + return {std::move(ret)}; + }; +} + +} // namespace knp::framework::data_processing::classification::images diff --git a/knp/base-framework/impl/inference_evaluation/classification.cpp b/knp/base-framework/impl/inference_evaluation/classification.cpp new file mode 100644 index 000000000..a73de8df0 --- /dev/null +++ b/knp/base-framework/impl/inference_evaluation/classification.cpp @@ -0,0 +1,165 @@ +/** + * @file classification.cpp + * @brief Evaluation of how good model performs by inference results. + * @kaspersky_support D. Postnikov + * @date 16.07.2025 + * @license Apache 2.0 + * @copyright © 2025 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include + + +namespace knp::framework::inference_evaluation::classification +{ + +class InferenceResultForClass::InferenceResultsProcessor::EvaluationHelper +{ +public: + explicit EvaluationHelper(const knp::framework::data_processing::classification::Dataset &dataset); + + void process_spikes(const knp::core::messaging::SpikeData &firing_neuron_indices, size_t step); + + [[nodiscard]] std::vector process_inference_predictions() const; + +private: + struct Prediction + { + const size_t predicted_class_; + const size_t votes_; + }; + + // All predictions of model. + std::vector predictions_; + + // Votes for some class each steps_per_class_ steps. + std::vector class_votes_; + + const knp::framework::data_processing::classification::Dataset &dataset_; +}; + + +InferenceResultForClass::InferenceResultsProcessor::EvaluationHelper::EvaluationHelper( + const knp::framework::data_processing::classification::Dataset &dataset) + : class_votes_(dataset.get_amount_of_classes(), 0), dataset_(dataset) +{ +} + + +void InferenceResultForClass::InferenceResultsProcessor::EvaluationHelper::process_spikes( + const knp::core::messaging::SpikeData &firing_neuron_indices, size_t step) +{ + for (auto i : firing_neuron_indices) ++class_votes_[i % dataset_.get_amount_of_classes()]; + if (!((step + 1) % dataset_.get_steps_per_frame())) + { + size_t n_max = 0; + size_t predicted_state = 0; + + for (size_t i = 0; i < dataset_.get_amount_of_classes(); ++i) + { + if (class_votes_[i] > n_max) + { + n_max = class_votes_[i]; + predicted_state = i; + } + } + predictions_.emplace_back(Prediction{predicted_state, n_max}); + std::fill(class_votes_.begin(), class_votes_.end(), 0); + } +} + + +std::vector +InferenceResultForClass::InferenceResultsProcessor::EvaluationHelper::process_inference_predictions() const +{ + std::vector prediction_results(dataset_.get_amount_of_classes()); + for (size_t i = 0; i < predictions_.size(); ++i) + { + auto const &prediction = predictions_[i]; + auto const &cur_data = dataset_.get_data_for_inference()[i]; + + if (!prediction.votes_) + ++prediction_results[cur_data.first].false_negatives_; + else if (prediction.predicted_class_ != cur_data.first) + ++prediction_results[cur_data.first].false_positives_; + else //votes have been cast and predicted class == correct class + ++prediction_results[cur_data.first].true_positives_; + } + + // Calculate true negatives. + for (auto &res : prediction_results) + { + res.true_negatives_ = predictions_.size() - res.true_positives_ - res.false_negatives_ - res.false_positives_; + } + + return prediction_results; +} + + +void InferenceResultForClass::InferenceResultsProcessor::process_inference_results( + const std::vector &spikes, + knp::framework::data_processing::classification::Dataset const &dataset) +{ + EvaluationHelper helper(dataset); + knp::core::messaging::SpikeData firing_neuron_indices; + auto spikes_iter = spikes.begin(); + + for (size_t step = 0; step < dataset.get_steps_required_for_inference(); ++step) + { + while (spikes_iter != spikes.end() && spikes_iter->header_.send_time_ == step) + { + firing_neuron_indices.insert( + firing_neuron_indices.end(), spikes_iter->neuron_indexes_.begin(), spikes_iter->neuron_indexes_.end()); + ++spikes_iter; + } + helper.process_spikes(firing_neuron_indices, step); + firing_neuron_indices.clear(); + } + + inference_results_ = helper.process_inference_predictions(); +} + + +void InferenceResultForClass::InferenceResultsProcessor::write_inference_results_to_stream_as_csv( + std::ostream &results_stream) +{ + results_stream << "CLASS,TOTAL_VOTES,TRUE_POSITIVES,FALSE_NEGATIVES,FALSE_POSITIVES,TRUE_NEGATIVES,PRECISION," + "RECALL,PREVALENCE,ACCURACY,F_MEASURE\n"; + for (size_t label = 0; label < inference_results_.size(); ++label) + { + auto const &prediction = inference_results_[label]; + const float precision = get_precision(prediction.true_positives_, prediction.false_positives_); + const float recall = get_recall(prediction.true_positives_, prediction.false_positives_); + const float prevalence = get_prevalence( + prediction.true_positives_, prediction.false_negatives_, prediction.false_positives_, + prediction.true_negatives_); + const float accuracy = get_accuracy( + prediction.true_positives_, prediction.false_negatives_, prediction.false_positives_, + prediction.true_negatives_); + const float f_measure = get_f_measure(precision, recall); + + results_stream << label << ',' << prediction.get_total_votes() << ',' << prediction.true_positives_ << ',' + << prediction.false_negatives_ << ',' << prediction.false_positives_ << ',' + << prediction.true_negatives_ << ',' << precision << ',' << recall << ',' << prevalence << ',' + << accuracy << ',' << f_measure << std::endl; + } +} + +} // namespace knp::framework::inference_evaluation::classification diff --git a/knp/base-framework/impl/inference_evaluation/perfomance_metrics.cpp b/knp/base-framework/impl/inference_evaluation/perfomance_metrics.cpp new file mode 100644 index 000000000..8c2e19ab1 --- /dev/null +++ b/knp/base-framework/impl/inference_evaluation/perfomance_metrics.cpp @@ -0,0 +1,64 @@ +/** + * @file perfomance_metrics.cpp + * @brief Functions to calculate model statistics. + * @kaspersky_support D. Postnikov + * @date 24.07.2025 + * @license Apache 2.0 + * @copyright © 2025 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + + +namespace knp::framework::inference_evaluation +{ + +float get_precision(size_t true_positives, size_t false_positives) +{ + if (true_positives + false_positives == 0) return 0.F; + return static_cast(true_positives) / (true_positives + false_positives); +} + + +float get_recall(size_t true_positives, size_t false_negatives) +{ + if (true_positives + false_negatives == 0) return 0.F; + return static_cast(true_positives) / (true_positives + false_negatives); +} + + +float get_prevalence(size_t true_positives, size_t false_negatives, size_t false_positives, size_t true_negatives) +{ + const size_t total = true_positives + false_negatives + false_positives + true_negatives; + if (total == 0) return 0.F; + return static_cast(true_positives + false_negatives) / total; +} + + +float get_accuracy(size_t true_positives, size_t false_negatives, size_t false_positives, size_t true_negatives) +{ + const size_t total = true_positives + false_negatives + false_positives + true_negatives; + if (total == 0) return 0.F; + return static_cast(true_positives + true_negatives) / total; +} + + +float get_f_measure(float precision, float recall) +{ + if (precision * recall == 0) return 0.F; + return 2.F * precision * recall / (precision + recall); +} + +} // namespace knp::framework::inference_evaluation diff --git a/knp/base-framework/impl/monitoring/model.cpp b/knp/base-framework/impl/monitoring/model.cpp index 32438973a..7d9e255cb 100644 --- a/knp/base-framework/impl/monitoring/model.cpp +++ b/knp/base-framework/impl/monitoring/model.cpp @@ -1,6 +1,6 @@ /** * @file model.cpp - * @brief Functions for network construction. + * @brief Functions for network monitoring. * @kaspersky_support D. Postnikov * @date 24.03.2025 * @license Apache 2.0 diff --git a/examples/mnist-learn/wta.cpp b/knp/base-framework/impl/projection/wta.cpp similarity index 57% rename from examples/mnist-learn/wta.cpp rename to knp/base-framework/impl/projection/wta.cpp index 88910e60e..5dbbfce46 100644 --- a/examples/mnist-learn/wta.cpp +++ b/knp/base-framework/impl/projection/wta.cpp @@ -1,8 +1,8 @@ /** * @file wta.cpp * @brief Functions for Winner Takes All implementation. - * @kaspersky_support A. Vartenkov - * @date 28.03.2025 + * @kaspersky_support D. Postnikov + * @date 03.07.2025 * @license Apache 2.0 * @copyright © 2025 AO Kaspersky Lab * @@ -19,30 +19,34 @@ * limitations under the License. */ -#include "wta.h" - #include +#include #include #include -#include +namespace knp::framework::projection +{ + -std::vector add_wta_handlers(const AnnotatedNetwork &network, knp::framework::ModelExecutor &executor) +std::vector add_wta_handlers( + knp::framework::ModelExecutor& executor, size_t winners_amount, std::vector const& borders, + std::vector, std::vector>> const& wta_data) { - std::vector borders; std::vector result; - for (size_t i = 0; i < 10; ++i) borders.push_back(15 * i); - // std::random_device rnd_device; - int seed = 0; // rnd_device(); - std::cout << "Seed " << seed << std::endl; - for (const auto &senders_receivers : network.data_.wta_data_) + // Generating seed for WTA randomness + std::mt19937 rand_gen(std::random_device{}()); + std::uniform_int_distribution distr(-std::numeric_limits::max(), std::numeric_limits::max()); + + for (const auto& senders_receivers : wta_data) { knp::core::UID handler_uid; executor.add_spike_message_handler( - knp::framework::modifier::KWtaPerGroup{borders, 1, seed++}, senders_receivers.first, + knp::framework::modifier::KWtaPerGroup{borders, winners_amount, distr(rand_gen)}, senders_receivers.first, senders_receivers.second, handler_uid); result.push_back(handler_uid); } return result; } + +} // namespace knp::framework::projection diff --git a/knp/base-framework/include/knp/framework/data_processing/classification/dataset.h b/knp/base-framework/include/knp/framework/data_processing/classification/dataset.h new file mode 100644 index 000000000..1e7bb26b8 --- /dev/null +++ b/knp/base-framework/include/knp/framework/data_processing/classification/dataset.h @@ -0,0 +1,190 @@ +/** + * @file dataset.h + * @brief Definition of classification dataset. + * @kaspersky_support D. Postnikov + * @date 21.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include +#include + + +namespace knp::framework::data_processing::classification +{ + +/** + * @brief A class that represents dataset. + * @details Dataset is supposed to abstract from actual dataset processing, and dataset characteristics, for example + * size of dataset. Size of dataset is not that important becase its all handled when dataset is split. Correct workflow + * would be to firstly process dataset, then split it, and then you can use it as you want. Splitting dataset is + * important because it also calculates amount of steps for training/inference. + */ +class KNP_DECLSPEC Dataset +{ +protected: + /** + * @brief Destructor. + */ + virtual ~Dataset() = default; + + /** + * @brief Default constructor. + */ + Dataset() = default; + + /** + * @brief Copy constructor. + * @param dataset Dataset. + */ + Dataset(const Dataset& dataset) = default; + + /** + * @brief Copy assignment operator. + * @param dataset Dataset. + * @return Dataset. + */ + Dataset& operator=(const Dataset& dataset) = default; + + /** + * @brief Move constructor. + * @param dataset Dataset. + */ + Dataset(Dataset&& dataset) = default; + + /** + * @brief Move assignment operator. + * @param dataset Dataset. + * @return Dataset. + */ + Dataset& operator=(Dataset&& dataset) = default; + +public: + /** + * @brief Split dataset on train/inference. + * @param split_percent Percentage that shows how to split dataset. + * @pre Must be from 0 to 1. + * @details For example split_percent=0.8 dataset will be split so 80% dedicated for tranining and 20% for + * inference. This function not only splits dataset, it also calculates amount of training/inference steps. + * If dataset it too big, for example dataset have 1000 records, but we want to train only on 100 records, + * this function will consider that. if split_percent is 0.8, then inference will be not 1000*(1-0.8)=200, but + * it will be calculated according to training amount, so inference size will be 100/0.8-100=25. So actual size of + * dataset is not that important. + */ + virtual void split(float split_percent); + + /** + * @brief Get data for training. + * @return Data for training. + */ + [[nodiscard]] auto const& get_data_for_training() const { return data_for_training_; } + + /** + * @brief Get data for inference. + * @return Data for inference. + */ + [[nodiscard]] auto const& get_data_for_inference() const { return data_for_inference_; } + + /** + * @brief Get steps amount per frame. + * @return Steps amount per frame. + */ + [[nodiscard]] size_t get_steps_per_frame() const { return steps_per_frame_; } + + /** + * @brief Get amount of steps required for training. + * @return Amount of steps required for training. + */ + [[nodiscard]] size_t get_steps_required_for_training() const { return steps_required_for_training_; } + + /** + * @brief Get steps amount required for inference. + * @return Steps amount required for inference. + */ + [[nodiscard]] size_t get_steps_required_for_inference() const { return steps_required_for_inference_; } + + /** + * @brief Get required training amount. + * @return Required training amount. + */ + [[nodiscard]] size_t get_required_training_amount() const { return required_training_amount_; } + + /** + * @brief Get amount of classes.. + * @return Amount of classes. + */ + [[nodiscard]] size_t get_amount_of_classes() const { return classes_amount_; } + + /** + * @brief A struct that represents a class instance in form of spikes. + * @details In classification we want to send class instance, that is converted to spikes form, in several steps. + * For example and image can be sent over 20 steps. This struct represents class instance data on those several + * steps. So it stores a vector of bools that represents where to send spikes over specified amount of steps. So + * with image example, length of this vector would be equal to steps_per_frame * image_size. + */ + struct Frame + { + // cppcheck-suppress unusedStructMember + /** + * @brief All spikes in frame. + */ + std::vector spikes_; + }; + +protected: + /** + * @brief Vector of pairs of label and frame. + */ + std::vector> data_for_training_; + + /** + * @brief Vector of pairs of label and frame. + */ + std::vector> data_for_inference_; + + /** + * @brief Amount of steps frame is discributed to. + */ + size_t steps_per_frame_ = 0; + + /** + * @brief Amount of steps required for training. + */ + size_t steps_required_for_training_ = 0; + + /** + * @brief Amount of steps required for inference. + */ + size_t steps_required_for_inference_ = 0; + + /** + * @brief Training amount required by user. + */ + size_t required_training_amount_ = 0; + + /** + * @brief Amount of classes. + */ + size_t classes_amount_ = 0; +}; + +} // namespace knp::framework::data_processing::classification diff --git a/knp/base-framework/include/knp/framework/data_processing/classification/image.h b/knp/base-framework/include/knp/framework/data_processing/classification/image.h new file mode 100644 index 000000000..5b7b874de --- /dev/null +++ b/knp/base-framework/include/knp/framework/data_processing/classification/image.h @@ -0,0 +1,105 @@ +/** + * @file image.h + * @brief Processing of dataset of images. + * @kaspersky_support D. Postnikov + * @date 14.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include + +#include "dataset.h" + + +namespace knp::framework::data_processing::classification::images +{ + +/** + * @brief A class that represents processed dataset of images. + */ +class KNP_DECLSPEC Dataset final : public classification::Dataset +{ +public: + /** + * @brief Create data pairs from labels and images, that are converted to spikes form. + * @param images_stream Stream of raw images. + * @param labels_stream Stream of labels. + * @param training_amount Amount of images you want to train model on. + * @param classes_amount Amount of classes. + * @param image_size Size of an images. + * @param steps_per_image Amount of steps required to send image in spikes form to a model. + * @param image_to_spikes Converter or raw image data to spikes form. + */ + void process_labels_and_images( + std::istream &images_stream, std::istream &labels_stream, size_t training_amount, size_t classes_amount, + size_t image_size, size_t steps_per_image, + std::function const &)> const &image_to_spikes); + + /** + * @brief Make generator of spikes, from training labels, for channel. + * @return A functor for generating spikes from dataset. + */ + [[nodiscard]] std::function make_training_labels_generator() + const; + + /** + * @brief Make generator of spikes, from training images in form of spikes, for channel. + * @return A functor for generating spikes from dataset. + */ + [[nodiscard]] std::function + make_training_images_spikes_generator() const; + + /** + * @brief Make generator of spikes, from inference images in form of spikes, for channel. + * @return A functor for generating spikes from dataset. + */ + [[nodiscard]] std::function + make_inference_images_spikes_generator() const; + + /** + * @brief Create a incrementing image to spikes converter + * @detail Spikes will be sent for active_steps steps, and spikes wont be sent for steps_per_image-active_steps + * steps. This converter considered incrementing because it will add state_increment_factor * image_pixel to states, + * and when value is greater than one, this considered a spike. + * @param active_steps Amount of active steps, active steps are steps when spikes being sent, must be < + * steps_per_image. + * @param state_increment_factor How much to increment to spike accumulator. + * @return A functor that converts image raw data to spikes. + */ + [[nodiscard]] std::function const &)> make_incrementing_image_to_spikes_converter( + size_t active_steps, float state_increment_factor) const; + + /** + * @brief Get image size. + * @ret Image size. + */ + [[nodiscard]] size_t get_image_size() const { return image_size_; } + +protected: + /** + * @brief Total image size. + */ + size_t image_size_ = 0; +}; + + +} // namespace knp::framework::data_processing::classification::images diff --git a/knp/base-framework/include/knp/framework/inference_evaluation/classification.h b/knp/base-framework/include/knp/framework/inference_evaluation/classification.h new file mode 100644 index 000000000..f86442452 --- /dev/null +++ b/knp/base-framework/include/knp/framework/inference_evaluation/classification.h @@ -0,0 +1,116 @@ +/** + * @file classification.h + * @brief Evaluation of how good model performs by inference results. + * @kaspersky_support D. Postnikov + * @date 16.07.2025 + * @license Apache 2.0 + * @copyright © 2025 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include + +#include + +#include "perfomance_metrics.h" + + +namespace knp::framework::inference_evaluation::classification +{ + +/** + * @brief Processed inference result for single class. + */ +class KNP_DECLSPEC InferenceResultForClass +{ +public: + /** + * @brief Get true positives. + * @ret Amount of times model, that is supposed to predict dog, predicted dog when it is a dog. + */ + [[nodiscard]] size_t get_true_positives() const { return true_positives_; } + + /** + * @brief Get false negatives. + * @ret Amount of times model, that is supposed to predict dog, predicted not a dog when it is a dog. + */ + [[nodiscard]] size_t get_false_negatives() const { return false_negatives_; } + + /** + * @brief Get false positives. + * @ret Amount of times model, that is supposed to predict dog, predicted dog when it is not a dog. + */ + [[nodiscard]] size_t get_false_positives() const { return false_positives_; } + + /** + * @brief Get true negatives. + * @ret Amount of times model, that is supposed to predict dog, predicted not a dog when it is a not a dog. + */ + [[nodiscard]] size_t get_true_negatives() const { return true_negatives_; } + + /** + * @brief Shortcut for getting total votes. + * @ret Total votes. + */ + [[nodiscard]] size_t get_total_votes() const { return true_positives_ + false_negatives_ + false_positives_; } + + /** + * @detail A class to process inference results. + */ + class KNP_DECLSPEC InferenceResultsProcessor + { + public: + /** + * @brief Process inference results. Suited for classification models. + * @param spikes All spikes from inference. + * @param dataset Dataset. + * @return processed inference results for each class. + */ + void process_inference_results( + const std::vector &spikes, + const knp::framework::data_processing::classification::Dataset &dataset); + + /** + * @brief Put inference results for each class to a stream in form of csv. + * @param results_stream stream for output. + */ + void write_inference_results_to_stream_as_csv(std::ostream &results_stream); + + /** + * @brief Get inference results. + * @ret Inference results. + */ + [[nodiscard]] auto const &get_inference_results() const { return inference_results_; } + + private: + /** + * @brief Processed inference results. + */ + std::vector inference_results_; + + /** + * @brief An internal class to help with evaluation. + */ + class EvaluationHelper; + }; + +private: + size_t true_positives_ = 0, false_negatives_ = 0, false_positives_ = 0, true_negatives_ = 0; +}; + +} //namespace knp::framework::inference_evaluation::classification diff --git a/knp/base-framework/include/knp/framework/inference_evaluation/perfomance_metrics.h b/knp/base-framework/include/knp/framework/inference_evaluation/perfomance_metrics.h new file mode 100644 index 000000000..f0e2fdbcd --- /dev/null +++ b/knp/base-framework/include/knp/framework/inference_evaluation/perfomance_metrics.h @@ -0,0 +1,77 @@ +/** + * @file perfomance_metrics.h + * @brief Functions to calculate model statistics. + * @kaspersky_support D. Postnikov + * @date 24.07.2025 + * @license Apache 2.0 + * @copyright © 2025 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include + + +namespace knp::framework::inference_evaluation +{ + +/** + * @brief Calculate precision. + * @param true_positives Amount of times model, that is supposed to predict dog, predicted dog when it is a dog. + * @param false_positives Amount of times model, that is supposed to predict dog, predicted dog when it is not a dog. + */ +KNP_DECLSPEC float get_precision(size_t true_positives, size_t false_positives); + + +/** + * @brief Calculate recall. + * @param true_positives Amount of times model, that is supposed to predict dog, predicted dog when it is a dog. + * @param false_negatives Amount of times model, that is supposed to predict dog, predicted not a dog when it is a dog. + */ +KNP_DECLSPEC float get_recall(size_t true_positives, size_t false_negatives); + + +/** + * @brief Calculate prevalence. + * @param true_positives Amount of times model, that is supposed to predict dog, predicted dog when it is a dog. + * @param false_negatives Amount of times model, that is supposed to predict dog, predicted not a dog when it is a dog. + * @param false_positives Amount of times model, that is supposed to predict dog, predicted dog when it is not a dog. + * @param true_negatives Amount of times model, that is supposed to predict dog, predicted not a dog when it is a not a + * dog. + */ +KNP_DECLSPEC float get_prevalence( + size_t true_positives, size_t false_negatives, size_t false_positives, size_t true_negatives); + + +/** + * @brief Calculate accuracy. + * @param true_positives Amount of times model, that is supposed to predict dog, predicted dog when it is a dog. + * @param false_negatives Amount of times model, that is supposed to predict dog, predicted not a dog when it is a dog. + * @param false_positives Amount of times model, that is supposed to predict dog, predicted dog when it is not a dog. + * @param true_negatives Amount of times model, that is supposed to predict dog, predicted not a dog when it is a not a + * dog. + */ +KNP_DECLSPEC float get_accuracy( + size_t true_positives, size_t false_negatives, size_t false_positives, size_t true_negatives); + + +/** + * @brief Calculate f measure. + * @param precision Precision. + * @param recall Recall. + */ +KNP_DECLSPEC float get_f_measure(float precision, float recall); + +} //namespace knp::framework::inference_evaluation diff --git a/knp/base-framework/include/knp/framework/model_executor.h b/knp/base-framework/include/knp/framework/model_executor.h index f6e82d72f..8cb436a70 100644 --- a/knp/base-framework/include/knp/framework/model_executor.h +++ b/knp/base-framework/include/knp/framework/model_executor.h @@ -128,6 +128,12 @@ class KNP_DECLSPEC ModelExecutor */ std::shared_ptr get_backend() { return loader_.get_backend(); } + /** + * @brief Get message handlers vector. + * @return Message handlers vector. + */ + [[nodiscard]] auto const &get_message_handlers() const { return message_handlers_; } + /** * @brief Get model loader object. * @return reference to `ModelLoader` object. diff --git a/knp/base-framework/include/knp/framework/monitoring/model.h b/knp/base-framework/include/knp/framework/monitoring/model.h index 6a54daeb8..466f787c8 100644 --- a/knp/base-framework/include/knp/framework/monitoring/model.h +++ b/knp/base-framework/include/knp/framework/monitoring/model.h @@ -1,6 +1,6 @@ /** * @file model.h - * @brief Functions for network construction. + * @brief Functions for network monitoring. * @kaspersky_support D. Postnikov * @date 24.03.2025 * @license Apache 2.0 diff --git a/knp/base-framework/include/knp/framework/projection/creators.h b/knp/base-framework/include/knp/framework/projection/creators.h index fcd8917e6..e3201cfa7 100644 --- a/knp/base-framework/include/knp/framework/projection/creators.h +++ b/knp/base-framework/include/knp/framework/projection/creators.h @@ -4,24 +4,25 @@ * @kaspersky_support Artiom N. * @date 10.08.2024 * @license Apache 2.0 - * @copyright © 2024 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include +#include #include #include #include @@ -31,25 +32,17 @@ #include "synapse_generators.h" #include "synapse_parameters_generators.h" -/** - * @brief Projection namespace. - */ -namespace knp::framework::projection -{ /** * @brief Namespace for framework projection creators. + * @detail Creators make generators */ -namespace creators +namespace knp::framework::projection::creators { /** - * @brief Make connections between each presynaptic population (source) neuron to each postsynaptic population - * (destination) neuron. - * @details Simple connector that generates connections from source neuron index to all destination indexes and - * otherwise. For populations of size `N x M` the connector generates connections such as: `0 -> 0`, `0 -> 1`, `0 -> 2`, - * ..., `0 -> M`, `1 -> 0`, `1 -> 1`, ..., `1 -> M`, ..., `N -> M`. - * @warning It doesn't get "real" populations and can't be used with populations that contain non-contiguous indexes. + * @brief For populations of size `N x M` generates connections such as: `0 -> + * 0`, `0 -> 1`, `0 -> 2`, ..., `0 -> M`, `1 -> 0`, `1 -> 1`, ..., `1 -> M`, ..., `N -> M`. * @param presynaptic_uid presynaptic population UID. * @param postsynaptic_uid postsynaptic population UID. * @param presynaptic_pop_size presynaptic population neuron count. @@ -73,14 +66,61 @@ template /** - * @brief Make one-to-one connections between neurons of presynaptic and postsynaptic populations. - * @details Simple connector that generates connections from source neuron index to the same destination index. - * For the populations of size `N x N` the connector generates connections such as: `0 -> 0`, `1 -> 1`, `2 -> 2`, ..., - * `N -> N`. + * @brief For example if population0_size is 2 and population1_size is 4, then synapses amount + * must be 4, and generator will create synapses as follows: 0-0, 0-1, 1-2, 1-3. So generator will distribute + * connections evenly. + * @param presynaptic_uid presynaptic population UID. + * @param postsynaptic_uid postsynaptic population UID. + * @param presynaptic_pop_size size of first population + * @param postsynaptic_pop_size size of second population + * @param syn_gen generator of synapse parameters + * @return projection + * tparam SynapseType projection synapse type + */ +template +[[nodiscard]] knp::core::Projection aligned( + const knp::core::UID &presynaptic_uid, const knp::core::UID &postsynaptic_uid, size_t presynaptic_pop_size, + size_t postsynaptic_pop_size, + parameters_generators::SynGen2ParamsType syn_gen = + parameters_generators::default_synapse_gen) +{ + return knp::core::Projection( + presynaptic_uid, postsynaptic_uid, + synapse_generators::aligned(presynaptic_pop_size, postsynaptic_pop_size, syn_gen), + std::max(presynaptic_pop_size, postsynaptic_pop_size)); +} + + +/** + * @brief For example if populations size is 3, then synapses amount is 6, + * and generator will generate synapses as follows: 0-1, 0-2, 1-0, 1-2, 2-0, 2-1. So it excludes one synapse at a time. * @pre Population sizes must be equal. * @warning It doesn't get "real" populations and can't be used with populations that contain non-contiguous indexes. * @param presynaptic_uid presynaptic population UID. * @param postsynaptic_uid postsynaptic population UID. + * @param pops_size size of populations, they are supposed to be the same + * @param syn_gen generator of synapse parameters + * @return projection + * tparam SynapseType projection synapse type + */ +template +[[nodiscard]] knp::core::Projection exclusive( + const knp::core::UID &presynaptic_uid, const knp::core::UID &postsynaptic_uid, size_t pops_size, + parameters_generators::SynGen2ParamsType syn_gen = + parameters_generators::default_synapse_gen) +{ + return knp::core::Projection( + presynaptic_uid, postsynaptic_uid, synapse_generators::exclusive(pops_size, syn_gen), + pops_size * (pops_size - 1)); +} + + +/** + * @brief For the populations of size `N x N` generates connections such as: `0 -> 0`, + * `1 -> 1`, `2 -> 2`, ..., `N -> N`. + * @pre Population sizes must be equal. + * @param presynaptic_uid presynaptic population UID. + * @param postsynaptic_uid postsynaptic population UID. * @param population_size neuron count in populations. * @param syn_gen generator of synapse parameters. * @tparam SynapseType projection synapse type. @@ -281,6 +321,4 @@ template source_proj.size()); } -} // namespace creators - -} // namespace knp::framework::projection +} // namespace knp::framework::projection::creators diff --git a/knp/base-framework/include/knp/framework/projection/synapse_generators.h b/knp/base-framework/include/knp/framework/projection/synapse_generators.h index 8e66a1477..09ac590be 100644 --- a/knp/base-framework/include/knp/framework/projection/synapse_generators.h +++ b/knp/base-framework/include/knp/framework/projection/synapse_generators.h @@ -4,18 +4,18 @@ * @kaspersky_support Artiom N. * @date 10.08.2024 * @license Apache 2.0 - * @copyright © 2024 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -33,16 +34,11 @@ /** - * @brief Projection namespace. + * @brief Synapse generators namespace. */ -namespace knp::framework::projection +namespace knp::framework::projection::synapse_generators { -/** - * @brief Namespace for framework projection generators. - */ -namespace synapse_generators -{ /** * @brief Make connections between each presynaptic population (source) neuron to each postsynaptic population * (destination) neuron. @@ -58,7 +54,7 @@ namespace synapse_generators template [[nodiscard]] typename knp::core::Projection::SynapseGenerator all_to_all( size_t presynaptic_pop_size, size_t postsynaptic_pop_size, - parameters_generators::SynGen2ParamsType syn_gen = + const parameters_generators::SynGen2ParamsType &syn_gen = parameters_generators::default_synapse_gen) { return [presynaptic_pop_size, postsynaptic_pop_size, @@ -72,6 +68,78 @@ template } +/** + * @brief Make connections between neurons of presynaptic and postsynaptic populations, so that neurons from a + * population with a less size have consequent connections with neurons from the other population, and the number of + * connections for each neuron of a population with less size is determined by that size. + * @details For example if presynaptic population's size is 2 and postsynaptic population's size is 4, then synapses + * amount must be 4, and generator will create synapses as follows: 0-0, 0-1, 1-2, 1-3. So generator will distribute + * connections evenly. + * @param presynaptic_pop_size size of first population. + * @param postsynaptic_pop_size size of second population. + * @param syn_gen generator of synapse parameters. + * @return synapse generator. + * tparam SynapseType projection synapse type. + */ +template +[[nodiscard]] typename knp::core::Projection::SynapseGenerator aligned( + size_t presynaptic_pop_size, size_t postsynaptic_pop_size, + parameters_generators::SynGen2ParamsType const &syn_gen = + parameters_generators::default_synapse_gen) +{ + return [presynaptic_pop_size, postsynaptic_pop_size, + syn_gen](size_t index) -> std::optional::Synapse> + { + size_t from_index = 0; + size_t pack_size = 0; + size_t to_index = 0; + + if (presynaptic_pop_size >= postsynaptic_pop_size) + { + from_index = index; + pack_size = presynaptic_pop_size / postsynaptic_pop_size; + to_index = index / pack_size; + } + else + { + to_index = index; + pack_size = postsynaptic_pop_size / presynaptic_pop_size; + from_index = index / pack_size; + } + return std::make_tuple(syn_gen(from_index, to_index), from_index, to_index); + }; +} + + +/** + * @brief Make connections between each presynaptic population neuron to each postsynaptic population neuron with + * exception of neurons whose indexes are the same. + * @details For example if populations size is 3, then synapses amount is 6, + * and generator will generate synapses as follows: 0-1, 0-2, 1-0, 1-2, 2-0, 2-1. So it excludes one synapse at a time. + * @pre Population sizes must be equal. + * @param populations_size size of populations, they are supposed to be the same. + * @param syn_gen generator of synapse parameters. + * @return synapse generator. + * tparam SynapseType projection synapse type. + */ +template +[[nodiscard]] typename knp::core::Projection::SynapseGenerator exclusive( + size_t populations_size, parameters_generators::SynGen2ParamsType const &syn_gen = + parameters_generators::default_synapse_gen) +{ + return + [populations_size, syn_gen](size_t index) -> std::optional::Synapse> + { + size_t from_index = 0; + size_t to_index = 0; + from_index = index / (populations_size - 1); + to_index = index % (populations_size - 1); + if (to_index >= from_index) ++to_index; + return std::make_tuple(syn_gen(from_index, to_index), from_index, to_index); + }; +} + + /** * @brief Make one-to-one connections between neurons of presynaptic and postsynaptic populations. * @details Simple generator that generates connections from source neuron index to the same destination index. @@ -153,7 +221,7 @@ class FromMap /** - * @brief The FixedProbability class is a definition of a generator that makes connections with some probability + * @brief The FixedProbability class is a definition of a generator that makes connections with some probability * between each presynaptic population (source) neuron to each postsynaptic population (destination) neuron. * @warning It doesn't get "real" populations and can't be used with populations that contain non-contiguous indexes. * @tparam SynapseType projection synapse type. @@ -236,8 +304,8 @@ template /** - * @brief The FixedNumberPost class is a definition of a generator that makes connections between each presynaptic neuron - * and a fixed number of random postsynaptic neurons. + * @brief The FixedNumberPost class is a definition of a generator that makes connections between each presynaptic + * neuron and a fixed number of random postsynaptic neurons. * @details This connector uses MT19937 generator with uniform integer distribution. * @warning It doesn't get "real" populations and can't be used with populations that contain non-contiguous indexes. * @tparam SynapseType projection synapse type. @@ -287,8 +355,8 @@ class FixedNumberPost /** - * @brief The FixedNumberPre class is a definition of a generator that makes connections between each postsynaptic neuron - * and a fixed number of random presynaptic neurons. + * @brief The FixedNumberPre class is a definition of a generator that makes connections between each postsynaptic + * neuron and a fixed number of random presynaptic neurons. * @details This uses MT19937 generator with uniform integer distribution. * @warning It doesn't get "real" populations and can't be used with populations that contain non-contiguous indexes. * @tparam SynapseType projection synapse type. @@ -364,6 +432,4 @@ template }; } -} // namespace synapse_generators - -} // namespace knp::framework::projection +} // namespace knp::framework::projection::synapse_generators diff --git a/examples/mnist-learn/wta.h b/knp/base-framework/include/knp/framework/projection/wta.h similarity index 51% rename from examples/mnist-learn/wta.h rename to knp/base-framework/include/knp/framework/projection/wta.h index 1f8f5afc3..3db19a430 100644 --- a/examples/mnist-learn/wta.h +++ b/knp/base-framework/include/knp/framework/projection/wta.h @@ -1,8 +1,8 @@ /** * @file wta.h * @brief Functions for Winner Takes All. - * @kaspersky_support A. Vartenkov - * @date 28.03.2025 + * @kaspersky_support D. Postnikov + * @date 03.07.2025 * @license Apache 2.0 * @copyright © 2025 AO Kaspersky Lab * @@ -24,9 +24,24 @@ #include #include +#include #include -#include "construct_network.h" +namespace knp::framework::projection +{ -std::vector add_wta_handlers(const AnnotatedNetwork &network, knp::framework::ModelExecutor &executor); +/** + * @brief add WTA handlers to network. + * @details WTA stands for winner takes all. + * @param executor model executor. + * @param winners_amount amount of winners. + * @param borders borders for wta. + * @param wta_data for each compound network: a vector of senders and a vector of receivers. + * @return vector with uids of handlers. + */ +KNP_DECLSPEC std::vector add_wta_handlers( + knp::framework::ModelExecutor& executor, size_t winners_amount, const std::vector& borders, + const std::vector, std::vector>>& wta_data); + +} // namespace knp::framework::projection diff --git a/knp/core-library/include/knp/core/population.h b/knp/core-library/include/knp/core/population.h index 576dbdff4..a8973f44a 100644 --- a/knp/core-library/include/knp/core/population.h +++ b/knp/core-library/include/knp/core/population.h @@ -4,18 +4,18 @@ * @kaspersky_support Artiom N. * @date 18.01.2023 * @license Apache 2.0 - * @copyright © 2024 AO Kaspersky Lab - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and * limitations under the License. */ @@ -118,18 +118,23 @@ class Population final /** * @brief Set parameters for the specific neuron in the population. * @param index index of the population neuron. - * @param parameters vector of neuron parameters defined in NeuronParameters for the population. - * @note Move method. + * @param parameters neuron parameters. */ - void set_neuron_parameters(size_t index, NeuronParameters &¶meters) { neurons_[index] = std::move(parameters); } + void set_neuron_parameters(size_t index, const NeuronParameters ¶meters) { neurons_[index] = parameters; } /** - * @brief Set parameters for the specific neuron in the population. - * @param index index of the population neuron. - * @param parameters vector of neuron parameters defined in NeuronParameters for the population. + * @brief Set parameters for all neurons in the population. + * @param parameters vector of neuron parameters. * @note Copy method. */ - void set_neurons_parameters(size_t index, const NeuronParameters ¶meters) { neurons_[index] = parameters; } + void set_neurons_parameters(const std::vector ¶meters) { neurons_ = parameters; } + + /** + * @brief Set parameters for all neurons in the population. + * @param parameters vector of neuron parameters + * @note Move method. + */ + void set_neurons_parameters(std::vector &¶meters) { neurons_ = std::move(parameters); } public: // NOLINT /** diff --git a/knp/core-library/include/knp/core/subscription.h b/knp/core-library/include/knp/core/subscription.h index d910d6bf9..41bac1334 100644 --- a/knp/core-library/include/knp/core/subscription.h +++ b/knp/core-library/include/knp/core/subscription.h @@ -23,8 +23,8 @@ #include #include -#include #include +#include #include #include @@ -115,11 +115,13 @@ class Subscription final /** * @brief Add a message to the subscription. * @param message message to add. + * @note move */ - void add_message(MessageType &&message) { messages_.push_back(message); } + void add_message(MessageType &&message) { messages_.push_back(std::move(message)); } /** * @brief Add a message to the subscription. * @param message constant message to add. + * @note copy */ void add_message(const MessageType &message) { messages_.push_back(message); } diff --git a/knp/tests/framework/data_processing_test.cpp b/knp/tests/framework/data_processing_test.cpp new file mode 100644 index 000000000..822796555 --- /dev/null +++ b/knp/tests/framework/data_processing_test.cpp @@ -0,0 +1,83 @@ +/** + * @file data_processing_test.cpp + * @brief Data processing test. + * @kaspersky_support D. Postnikov + * @date 21.08.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + + +TEST(DataProcessing, ImageClassification) +{ + constexpr size_t training_amount = 10, classes_amount = 3, image_size = 1, steps_per_image = 1; + constexpr float dataset_split = 2.F / 3.F; + std::stringstream images_stream("\x01\x02\x03"); + std::stringstream labels_stream("0\n1\n2\n"); + knp::framework::data_processing::classification::images::Dataset dataset; + dataset.process_labels_and_images( + images_stream, labels_stream, training_amount, classes_amount, image_size, steps_per_image, + [](std::vector const&) -> knp::framework::data_processing::classification::Dataset::Frame + { return {{true}}; }); + dataset.split(dataset_split); + + ASSERT_EQ(dataset.get_image_size(), image_size); + ASSERT_EQ(dataset.get_amount_of_classes(), classes_amount); + ASSERT_EQ(dataset.get_required_training_amount(), training_amount); + ASSERT_EQ(dataset.get_steps_per_frame(), steps_per_image); + ASSERT_EQ(dataset.get_steps_required_for_training(), training_amount); + ASSERT_EQ(dataset.get_steps_required_for_inference(), 1); + + ASSERT_EQ(dataset.get_data_for_training().size(), 2); + ASSERT_EQ(dataset.get_data_for_training()[0].first, 0); + ASSERT_EQ(dataset.get_data_for_training()[0].second.spikes_.size(), 1); + ASSERT_EQ(dataset.get_data_for_training()[0].second.spikes_[0], true); + ASSERT_EQ(dataset.get_data_for_training()[1].first, 1); + ASSERT_EQ(dataset.get_data_for_training()[1].second.spikes_.size(), 1); + ASSERT_EQ(dataset.get_data_for_training()[1].second.spikes_[0], true); + + ASSERT_EQ(dataset.get_data_for_inference().size(), 1); + ASSERT_EQ(dataset.get_data_for_inference()[0].first, 2); + ASSERT_EQ(dataset.get_data_for_inference()[0].second.spikes_.size(), 1); + ASSERT_EQ(dataset.get_data_for_inference()[0].second.spikes_[0], true); + + auto train_images_spikes_gen = dataset.make_training_images_spikes_generator(); + for (size_t i = 0; i < dataset.get_steps_required_for_training(); ++i) + { + const auto res = train_images_spikes_gen(i); + ASSERT_EQ(res.size(), 1); + ASSERT_EQ(res[0], 0); + } + + auto train_labels_gen = dataset.make_training_labels_generator(); + for (size_t i = 0; i < dataset.get_steps_required_for_training(); ++i) + { + const auto res = train_labels_gen(i); + ASSERT_EQ(res.size(), 1); + ASSERT_EQ(res[0], i % dataset.get_data_for_training().size()); + } + + auto inf_images_spikes_gen = dataset.make_inference_images_spikes_generator(); + for (size_t i = 0; i < dataset.get_steps_required_for_inference(); ++i) + { + const auto res = train_images_spikes_gen(i); + ASSERT_EQ(res.size(), 1); + ASSERT_EQ(res[0], 0); + } +} diff --git a/knp/tests/framework/inference_evaluation_test.cpp b/knp/tests/framework/inference_evaluation_test.cpp new file mode 100644 index 000000000..86d2cc4cb --- /dev/null +++ b/knp/tests/framework/inference_evaluation_test.cpp @@ -0,0 +1,72 @@ +/** + * @file inference_evaluation_test.cpp + * @brief Inference evaluation test. + * @kaspersky_support D. Postnikov + * @date 21.08.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + + +class ClassificationTestHelper : public knp::framework::data_processing::classification::Dataset +{ +public: + ClassificationTestHelper() + { + steps_required_for_inference_ = 4; + steps_per_frame_ = 1; + classes_amount_ = 2; + data_for_inference_ = {{1, {}}, {1, {}}, {0, {}}, {0, {}}}; + } +}; + + +TEST(InferenceEvaluation, Classification) +{ + ClassificationTestHelper dataset; + std::vector spikes; + spikes.push_back({{knp::core::UID(false), 0}, {1, 3}}); + spikes.push_back({{knp::core::UID(false), 1}, {5, 1}}); + spikes.push_back({{knp::core::UID(false), 2}, {2, 0}}); + spikes.push_back({{knp::core::UID(false), 3}, {3, 1}}); + + knp::framework::inference_evaluation::classification::InferenceResultForClass::InferenceResultsProcessor processor; + processor.process_inference_results(spikes, dataset); + + auto const& res = processor.get_inference_results(); + + ASSERT_EQ(res[0].get_total_votes(), 2); + ASSERT_EQ(res[0].get_true_positives(), 1); + ASSERT_EQ(res[0].get_false_negatives(), 0); + ASSERT_EQ(res[0].get_false_positives(), 1); + ASSERT_EQ(res[0].get_true_negatives(), 2); + ASSERT_EQ(res[1].get_total_votes(), 2); + ASSERT_EQ(res[1].get_true_positives(), 2); + ASSERT_EQ(res[1].get_false_negatives(), 0); + ASSERT_EQ(res[1].get_false_positives(), 0); + ASSERT_EQ(res[1].get_true_negatives(), 2); + + std::stringstream csv_res; + processor.write_inference_results_to_stream_as_csv(csv_res); + + ASSERT_EQ( + csv_res.str(), + "CLASS,TOTAL_VOTES,TRUE_POSITIVES,FALSE_NEGATIVES,FALSE_POSITIVES,TRUE_NEGATIVES,PRECISION,RECALL,PREVALENCE," + "ACCURACY,F_MEASURE\n0,2,1,0,1,2,0.5,0.5,0.25,0.75,0.5\n1,2,2,0,0,2,1,1,0.5,1,1\n"); +} diff --git a/knp/tests/framework/model_monitoring_test.cpp b/knp/tests/framework/model_monitoring_test.cpp index b9053d44b..3c4ba702f 100644 --- a/knp/tests/framework/model_monitoring_test.cpp +++ b/knp/tests/framework/model_monitoring_test.cpp @@ -1,8 +1,8 @@ /** * @file model_monitoring_test.cpp - * @brief Single-threaded backend test. + * @brief Model monitoring test. * @kaspersky_support D. Postnikov - * @date 07.04.2023 + * @date 10.08.2025 * @license Apache 2.0 * @copyright © 2024 AO Kaspersky Lab * @@ -31,14 +31,12 @@ TEST(ModelMonitoring, AggregatedSpikesLogger) knp::testing::BLIFATPopulation population{knp::testing::neuron_generator, 1}; { //stop spikes from happening - const auto& params = population.get_neurons_parameters(); - for (size_t i = 0; i < params.size(); i++) + auto params = population.get_neurons_parameters(); + for (auto& param : params) { - auto param = params[i]; param.activation_threshold_ = std::numeric_limits::max(); - //todo change this to set_neuron_parameters when #69 gets fixed - population.set_neurons_parameters(i, param); } + population.set_neurons_parameters(params); } knp::testing::DeltaProjection input_projection = knp::testing::DeltaProjection{ @@ -136,14 +134,12 @@ TEST(ModelMonitoring, SpikesLogger) knp::testing::BLIFATPopulation population{knp::testing::neuron_generator, 1}; { //stop spikes from happening - const auto& params = population.get_neurons_parameters(); - for (size_t i = 0; i < params.size(); i++) + auto params = population.get_neurons_parameters(); + for (auto& param : params) { - auto param = params[i]; param.activation_threshold_ = std::numeric_limits::max(); - //todo change this to set_neuron_parameters when #69 gets fixed - population.set_neurons_parameters(i, param); } + population.set_neurons_parameters(params); } knp::testing::DeltaProjection input_projection = knp::testing::DeltaProjection{ diff --git a/knp/tests/framework/projection_creators_test.cpp b/knp/tests/framework/projection_creators_test.cpp index c6d3e127a..652c0578b 100644 --- a/knp/tests/framework/projection_creators_test.cpp +++ b/knp/tests/framework/projection_creators_test.cpp @@ -29,28 +29,73 @@ TEST(ProjectionConnectors, AllToAll) { - constexpr size_t src_pop_size = 3; - constexpr size_t dest_pop_size = 3; + constexpr size_t src_pop_size = 2; + constexpr size_t dest_pop_size = 4; auto proj = knp::framework::projection::creators::all_to_all( knp::core::UID(), knp::core::UID(), src_pop_size, dest_pop_size); ASSERT_EQ(proj.size(), src_pop_size * dest_pop_size); - std::map> conn_count; - + size_t index = 0; for (const auto& synapse : proj) { + const auto source_syn_index = std::get(synapse); const auto target_syn_index = std::get(synapse); + + SPDLOG_DEBUG("Synapse: {} -> {}.", source_syn_index, target_syn_index); + ASSERT_EQ(source_syn_index, index % src_pop_size); + ASSERT_EQ(target_syn_index, index / src_pop_size); + ++index; + } +} + + +TEST(ProjectionConnectors, Aligned) +{ + constexpr size_t src_pop_size = 3; + constexpr size_t dest_pop_size = 6; + + auto proj = knp::framework::projection::creators::aligned( + knp::core::UID(), knp::core::UID(), src_pop_size, dest_pop_size); + + ASSERT_EQ(proj.size(), std::max(src_pop_size, dest_pop_size)); + + size_t index = 0; + for (const auto& synapse : proj) + { const auto source_syn_index = std::get(synapse); + const auto target_syn_index = std::get(synapse); - SPDLOG_DEBUG("Synapse: {} -> {}.", target_syn_index, source_syn_index); - conn_count[target_syn_index].push_back(source_syn_index); + SPDLOG_DEBUG("Synapse: {} -> {}.", source_syn_index, target_syn_index); + ASSERT_EQ(target_syn_index, index); + ASSERT_EQ(source_syn_index, index / 2); + ++index; } +} + + +TEST(ProjectionConnectors, Exclusive) +{ + constexpr size_t pops_size = 3; + + auto proj = knp::framework::projection::creators::exclusive( + knp::core::UID(), knp::core::UID(), pops_size); - for (const auto& [key, value] : conn_count) + ASSERT_EQ(proj.size(), pops_size * (pops_size - 1)); + + constexpr std::array correct_target{1, 2, 0, 2, 0, 1}; + + size_t index = 0; + for (const auto& synapse : proj) { - ASSERT_EQ(value.size(), dest_pop_size); + const auto source_syn_index = std::get(synapse); + const auto target_syn_index = std::get(synapse); + + SPDLOG_DEBUG("Synapse: {} -> {}.", source_syn_index, target_syn_index); + ASSERT_EQ(source_syn_index, index / 2); + ASSERT_EQ(target_syn_index, correct_target[index]); + ++index; } } @@ -64,21 +109,13 @@ TEST(ProjectionConnectors, OneToOne) ASSERT_EQ(proj.size(), pop_size); - std::map> conn_count; - for (const auto& synapse : proj) { - const auto target_syn_index = std::get(synapse); const auto source_syn_index = std::get(synapse); + const auto target_syn_index = std::get(synapse); - SPDLOG_DEBUG("Synapse: {} -> {}.", target_syn_index, source_syn_index); - conn_count[target_syn_index].push_back(source_syn_index); - } - - for (const auto& [key, value] : conn_count) - { - ASSERT_EQ(value.size(), 1); - ASSERT_EQ(key, value[0]); + SPDLOG_DEBUG("Synapse: {} -> {}.", source_syn_index, target_syn_index); + ASSERT_EQ(source_syn_index, target_syn_index); } } diff --git a/knp/tests/framework/wta_test.cpp b/knp/tests/framework/wta_test.cpp new file mode 100644 index 000000000..919d0024f --- /dev/null +++ b/knp/tests/framework/wta_test.cpp @@ -0,0 +1,39 @@ +/** + * @file wta_test.cpp + * @brief Test for winner takes all. + * @kaspersky_support D. Postnikov + * @date 23.07.2025 + * @license Apache 2.0 + * @copyright © 2024 AO Kaspersky Lab + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + + +TEST(WinnerTakesAllTest, HandlersPush) +{ + knp::framework::Model model({}); + knp::framework::BackendLoader backend_loader; + knp::framework::ModelExecutor model_executor(model, backend_loader.load(knp::testing::get_backend_path()), {}); + + knp::framework::projection::add_wta_handlers(model_executor, 1, {}, {{{}, {}}}); + + auto const& handlers = model_executor.get_message_handlers(); + + ASSERT_EQ(handlers.size(), 1); +}