-
Notifications
You must be signed in to change notification settings - Fork 18
Rudimentary fluid.polynomialregressor object
#246
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
lewardo
wants to merge
30
commits into
flucoma:main
Choose a base branch
from
lewardo:polynomial-regressor
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 16 commits
Commits
Show all changes
30 commits
Select commit
Hold shift + click to select a range
cac7645
base PolynomialRegressor object
lewardo 430ac9c
method call structure
lewardo 3883e5c
design matrix construction
lewardo c633e49
linear algebra algorithm implementation
lewardo 96f715c
copy constructor and assignment operator optimisations
lewardo 1719d8f
getting obejct to show up in max (with lewardo/flucoma-max@02d3b22)
lewardo c3fa23a
temporary commit
lewardo 3997c5b
client update test
lewardo 971dce8
json functions now configured
lewardo b6a3701
init method for setting initial value on load
lewardo b591674
removed superfluous mIn/mOut members
lewardo d880081
predictpoint message now working and regressing
lewardo eb940de
predict message now working with datasets
lewardo 8792d20
multi-regressor interface
lewardo 53aff44
ranme mDims for consistency
lewardo cd15ed2
actually initialise algorithm from read
lewardo be581ff
fix parameter read/load updating
lewardo 6cf06aa
automatic dimensioin setting from training mapping
lewardo ee63aae
bugfix saving in wrong dimensionality
lewardo 7b92c53
rename algo methods for consistency
lewardo 983560a
added tikhonov regularisation, currently only ridge normalisation
lewardo ab42687
slimmed json saving by removing redundant data
lewardo b615117
saving of tikhonov factor
lewardo fa0dbfb
added write regression state catch
lewardo 5b0bc48
remove parameter update bug on first `fit`
lewardo cd16032
fix get<>() definition location
lewardo c0023d2
increase assignment clarity
lewardo dea9281
run clang-format
lewardo 8702f6d
re-allocate memory on regression for first run
lewardo 59d4932
now using the fluid memory allocator
lewardo File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,149 @@ | ||
| /* | ||
| Part of the Fluid Corpus Manipulation Project (http://www.flucoma.org/) | ||
| Copyright University of Huddersfield. | ||
| Licensed under the BSD-3 License. | ||
| See license.md file in the project root for full license information. | ||
| This project has received funding from the European Research Council (ERC) | ||
| under the European Union’s Horizon 2020 research and innovation programme | ||
| (grant agreement No 725899). | ||
| */ | ||
|
|
||
| #pragma once | ||
|
|
||
| #include "../util/AlgorithmUtils.hpp" | ||
| #include "../util/FluidEigenMappings.hpp" | ||
| #include "../../data/FluidIndex.hpp" | ||
| #include "../../data/FluidMemory.hpp" | ||
| #include "../../data/TensorTypes.hpp" | ||
| #include <Eigen/Core> | ||
| #include <Eigen/Dense> | ||
| #include <cassert> | ||
| #include <cmath> | ||
|
|
||
| namespace fluid { | ||
| namespace algorithm { | ||
|
|
||
| class PolynomialRegressor | ||
| { | ||
| public: | ||
| explicit PolynomialRegressor() = default; | ||
| ~PolynomialRegressor() = default; | ||
|
|
||
| void init(index degree, index dims) | ||
| { | ||
| mInitialized = true; | ||
| setDegree(degree); | ||
| setDims(dims); | ||
| }; | ||
|
|
||
| index degree() const { return mInitialized ? asSigned(mDegree) : 0; }; | ||
| index dims() const { return mInitialized ? asSigned(mDims) : 0; }; | ||
| index size() const { return mInitialized ? asSigned(mDegree) : 0; }; | ||
|
|
||
| void clear() { mRegressed = false; } | ||
|
|
||
| bool regressed() const { return mRegressed; }; | ||
| bool initialized() const { return mInitialized; }; | ||
|
|
||
| void setDegree(index degree) { | ||
| if (mDegree == degree) return; | ||
|
|
||
| mDegree = degree; | ||
| mCoefficients.conservativeResize(mDegree + 1, mDims); | ||
| mRegressed = false; | ||
| } | ||
|
|
||
| void setDims(index dims) { | ||
| if (mDims == dims) return; | ||
|
|
||
| mDims = dims; | ||
| mCoefficients.conservativeResize(mDegree + 1, mDims); | ||
| mRegressed = false; | ||
| } | ||
|
|
||
| void calculateRegressionCoefficients(InputRealMatrixView in, | ||
| InputRealMatrixView out, | ||
| Allocator& alloc = FluidDefaultAllocator()) | ||
| { | ||
| using namespace _impl; | ||
|
|
||
| ScopedEigenMap<Eigen::MatrixXd> input(in.rows(), in.cols(), alloc), | ||
| output(out.rows(), out.cols(), alloc); | ||
| input = asEigen<Eigen::Array>(in); | ||
| output = asEigen<Eigen::Array>(out); | ||
|
|
||
| for(index i = 0; i < mDims; ++i) | ||
| { | ||
| generateDesignMatrix(input.col(i)); | ||
|
|
||
| Eigen::MatrixXd transposeProduct = mDesignMatrix.transpose() * mDesignMatrix; | ||
| mCoefficients.col(i) = transposeProduct.inverse() * mDesignMatrix.transpose() * output.col(i); | ||
| } | ||
|
|
||
|
|
||
| mRegressed = true; | ||
| }; | ||
|
|
||
| void getCoefficients(RealMatrixView coefficients) const | ||
| { | ||
| if (mInitialized) _impl::asEigen<Eigen::Array>(coefficients) = mCoefficients; | ||
| }; | ||
|
|
||
| void setCoefficients(InputRealMatrixView coefficients) | ||
| { | ||
| setDegree(coefficients.rows() - 1); | ||
| setDims(coefficients.cols()); | ||
|
|
||
| mCoefficients = _impl::asEigen<Eigen::Array>(coefficients); | ||
| mRegressed = true; | ||
| } | ||
|
|
||
| void getMappedSpace(InputRealMatrixView in, | ||
| RealMatrixView out, | ||
| Allocator& alloc = FluidDefaultAllocator()) const | ||
| { | ||
| using namespace _impl; | ||
|
|
||
| ScopedEigenMap<Eigen::MatrixXd> input(in.rows(), in.cols(), alloc), | ||
| output(out.rows(), out.cols(), alloc); | ||
| input = asEigen<Eigen::Array>(in); | ||
| output = asEigen<Eigen::Array>(out); | ||
|
|
||
| calculateMappings(input, output); | ||
|
|
||
| asEigen<Eigen::Array>(out) = output; | ||
| } | ||
|
|
||
| private: | ||
| void calculateMappings(Eigen::Ref<Eigen::MatrixXd> in, Eigen::Ref<Eigen::MatrixXd> out) const | ||
| { | ||
| for(index i = 0; i < mDims; ++i) | ||
| { | ||
| generateDesignMatrix(in.col(i)); | ||
| out.col(i) = mDesignMatrix * mCoefficients.col(i); | ||
| } | ||
| } | ||
|
|
||
| void generateDesignMatrix(Eigen::Ref<Eigen::VectorXd> in) const | ||
| { | ||
| Eigen::VectorXd designColumn = Eigen::VectorXd::Ones(in.size()); | ||
| Eigen::ArrayXd inArray = in.array(); | ||
|
|
||
| mDesignMatrix.conservativeResize(in.size(), mDegree + 1); | ||
|
|
||
| for(index i = 0; i < mDegree + 1; ++i, designColumn = designColumn.array() * inArray) | ||
| mDesignMatrix.col(i) = designColumn; | ||
| } | ||
|
|
||
| index mDegree {2}; | ||
| index mDims {1}; | ||
| bool mRegressed {false}; | ||
| bool mInitialized {false}; | ||
|
|
||
| mutable Eigen::MatrixXd mDesignMatrix; | ||
| Eigen::MatrixXd mCoefficients; | ||
|
|
||
| }; | ||
|
|
||
| } // namespace algorithm | ||
| } // namespace fluid | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.