Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
65d0357
Update MAINTAINERS.md
icfaust Oct 3, 2025
d9344ce
Update README.md
icfaust Oct 3, 2025
d2ee05f
Update makefile
icfaust Oct 3, 2025
5747ec4
Update threading.h
icfaust Oct 3, 2025
faeff75
Update service_topo.cpp
icfaust Oct 3, 2025
0ec4706
Update service_daal_load_win_dynamic_lib.cpp
icfaust Oct 3, 2025
516511f
Update error_handling.cpp
icfaust Oct 3, 2025
65c86c4
Update error_handling.cpp
icfaust Oct 3, 2025
8317221
Update dll.rc
icfaust Oct 3, 2025
11b1dc4
Update em_gmm_dense_default_batch_task.h
icfaust Oct 3, 2025
384b448
Update cosdistance_batch_container.h
icfaust Oct 3, 2025
1374b26
Update linear_model_train_qr_kernel.h
icfaust Oct 3, 2025
0726192
Update linear_model_train_normeq_kernel.h
icfaust Oct 3, 2025
2ce7b4e
Update assoc_rules_apriori_kernel.h
icfaust Oct 3, 2025
e4faaad
Update kmeans_plusplus_init_impl.i
icfaust Oct 3, 2025
82e2aea
Update kmeans_lloyd_batch_impl.i
icfaust Oct 3, 2025
30ddf99
Update gbt_regression_train_container.h
icfaust Oct 3, 2025
97d4b37
Update gbt_classification_predict_dense_default_batch_impl.i
icfaust Oct 3, 2025
aedfa02
Update treeshap.cpp
icfaust Oct 3, 2025
163e4f8
Update gbt_train_updater.i
icfaust Oct 3, 2025
82be882
Update gbt_train_tree_builder.i
icfaust Oct 3, 2025
85899e6
Update gbt_train_aux.i
icfaust Oct 3, 2025
8bbc185
Update df_classification_train_dense_default_impl.i
icfaust Oct 3, 2025
72d8637
Update dtrees_train_data_helper.i
icfaust Oct 3, 2025
7630779
Update tsne_gradient_descent_impl.i
icfaust Oct 3, 2025
0a4cdd7
Update logitboost_train_kernel.h
icfaust Oct 3, 2025
2636d5d
Update logistic_loss_dense_default_batch_impl.i
icfaust Oct 3, 2025
9a95b9b
Update qr_dense_default_pcl_impl.i
icfaust Oct 3, 2025
7da0cdc
Update cordistance_batch_container.h
icfaust Oct 3, 2025
64a3e30
Update implicit_als_predict_ratings_dense_default_impl.i
icfaust Oct 3, 2025
0f092b0
Update implicit_als_train_csr_default_distr_impl.i
icfaust Oct 3, 2025
5f76ca3
Update implicit_als_train_dense_default_batch_impl.i
icfaust Oct 3, 2025
e870c13
clang-formatting
icfaust Oct 7, 2025
cc6732f
Update cpp/daal/src/services/error_handling.cpp
icfaust Oct 7, 2025
5fb5092
Update cpp/daal/src/algorithms/dtrees/forest/classification/df_classi…
icfaust Oct 7, 2025
54163e5
Merge branch 'main' into dev/spelling_corrections_round1
icfaust Dec 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions MAINTAINERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,14 @@ Responsibilities:
* Follow and enforce the project [contributing guidelines](CONTRIBUTING.md)
* Co-own with other component Maintainers on the technical direction of a specific component.
* Co-own with other Maintainers on the project as a whole, including determining strategy and policy for the project.
* Suppport and guide Contributors and Code Owners.
* Support and guide Contributors and Code Owners.

Requirements:
* Experience as a Code Owner for at least 12 months.
* Track record of major project contributions to a specific project component.
* Demonstrated deep knowledge of a specific project component.
* Demonstrated broad knowledge of the project across multiple areas.
* Commits to using priviledges responsibly for the good of the project.
* Commits to using privileges responsibly for the good of the project.
* Is able to exercise judgment for the good of the project, independent of
their employer, friends, or team.

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ You can contribute to this project and also contribute to the specification for

## Support

Ask questions and engage in discussions with oneDAL developers, contributers, and other users through the following channels:
Ask questions and engage in discussions with oneDAL developers, contributors, and other users through the following channels:

- [GitHub Discussions](https://github.com/uxlfoundation/oneDAL/discussions)
- [Community Forum](https://community.intel.com/t5/Intel-oneAPI-Data-Analytics/bd-p/oneapi-data-analytics-library)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class AssociationRulesKernel<apriori, algorithmFPType, cpu> : public Kernel
hash_tree<cpu> * C_tree, services::Status & s);

/** Test that all {n-1}-item subsets of {n}-item set are "large" item sets */
bool pruneCandidate(size_t iset_size, const size_t * cadidate, size_t * subset, hash_tree<cpu> & C_tree);
bool pruneCandidate(size_t iset_size, const size_t * candidate, size_t * subset, hash_tree<cpu> & C_tree);

size_t binarySearch(size_t nUniqueItems, assocRulesUniqueItem<cpu> * uniqueItems, size_t itemID);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace algorithms
namespace correlation_distance
{
/**
* \brief Initialize list of correlation distance, double precission
* \brief Initialize list of correlation distance, double precision
* kernels with implementations for supported architectures
*/
template <typename algorithmFPType, Method method, CpuType cpu>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace algorithms
namespace cosine_distance
{
/**
* \brief Initialize list of correlation distance, double precission
* \brief Initialize list of correlation distance, double precision
* kernels with implementations for supported architectures
*/
template <typename algorithmFPType, Method method, CpuType cpu>
Expand Down
4 changes: 2 additions & 2 deletions cpp/daal/src/algorithms/dtrees/dtrees_train_data_helper.i
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ struct SResponse
//////////////////////////////////////////////////////////////////////////////////////////
// DataHelper. Base class for response-specific services classes.
// Keeps indices of the bootstrap samples and provides optimal access to columns in case
// of homogenious numeric table
// of homogeneous numeric table
//////////////////////////////////////////////////////////////////////////////////////////
template <typename algorithmFPType, CpuType cpu>
class DataHelperBase
Expand Down Expand Up @@ -194,7 +194,7 @@ protected:
//////////////////////////////////////////////////////////////////////////////////////////
// DataHelper. Base class for response-specific services classes.
// Keeps indices of the bootstrap samples and provides optimal access to columns in case
// of homogenious numeric table
// of homogeneous numeric table
//////////////////////////////////////////////////////////////////////////////////////////
template <typename algorithmFPType, typename TResponse, CpuType cpu>
class DataHelper : public DataHelperBase<algorithmFPType, cpu>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ protected: //enables specific functions for UnorderedRespHelperBest
imp.hist[iClass] -= moveWeights;
}

// Calculate impurity for left and right childs
// Calculate impurity for left and right children
static void updateImpurity(ImpurityData & left, ImpurityData & right, ClassIndexType iClass, intermSummFPType totalWeights,
intermSummFPType startWeights, intermSummFPType & moveWeights)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ protected:
* \param hasUnorderedFeatures Data has unordered features yes/no
* \param hasAnyMissing Data has missing values yes/no
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param vectorBlockSize Vector instruction block size
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
Expand All @@ -287,7 +287,7 @@ protected:
*
* \param hasAnyMissing Data has missing values yes/no
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param vectorBlockSize Vector instruction block size
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
Expand All @@ -305,7 +305,7 @@ protected:
* \brief Traverse a number of trees to get prediction results
*
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param vectorBlockSize Vector instruction block size
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
Expand All @@ -323,7 +323,7 @@ protected:
* \brief Traverse a number of trees to get prediction results
*
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param vectorBlockSizeFactor Vector instruction block size - recursively decremented until it becomes equal to dim.vectorBlockSizeFactor or equal to DimType::minVectorBlockSizeFactor
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
Expand All @@ -341,7 +341,7 @@ protected:
* \brief Traverse a number of trees to get prediction results
*
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param vectorBlockSizeFactor Vector instruction block size - recursively decremented until it becomes equal to dim.vectorBlockSizeFactor or equal to DimType::minVectorBlockSizeFactor
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
Expand All @@ -361,7 +361,7 @@ protected:
* \brief Traverse a number of trees to get prediction results
*
* \param isResValidPtr Result pointer is valid yes/no (write result to the pointer if yes, skip if no)
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
* \param nRows Number of rows in observation data for which prediction is run
Expand All @@ -378,7 +378,7 @@ protected:
/**
* \brief Traverse a number of trees to get prediction results
*
* \param reuseBuffer Re-use buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param reuseBuffer Reuse buffer yes/no (will fill buffer with zero if yes, shift buff pointer if no)
* \param nTrees Number of trees contributing to prediction
* \param nClasses Number of data classes
* \param nRows Number of rows in observation data for which prediction is run
Expand Down
8 changes: 4 additions & 4 deletions cpp/daal/src/algorithms/dtrees/gbt/gbt_train_aux.i
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ public:
}
}

~BuffersStorage() { destoy(); }
~BuffersStorage() { destroy(); }

T * getBlockFromStorage()
{
Expand Down Expand Up @@ -595,7 +595,7 @@ public:
return alloc[alloc.size() - 1];
}

void destoy()
void destroy()
{
for (size_t i = 0; i < alloc.size(); ++i)
{
Expand Down Expand Up @@ -626,7 +626,7 @@ class GHSumsStorage
public:
GHSumsStorage(size_t nGH, size_t nInitElems) : _nGH(nGH), _capacity(nInitElems), _curIdx(0) { allocate(_capacity); }

~GHSumsStorage() { destoy(); }
~GHSumsStorage() { destroy(); }

T * getBlockFromStorage()
{
Expand Down Expand Up @@ -663,7 +663,7 @@ protected:
}
}

void destoy()
void destroy()
{
for (size_t i = 0; i < alloc.size(); ++i)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ protected:

ImpurityType imp;
getInitialImpurity(imp);
typename NodeType::Base * res = buildLeaf(0, nSamples, 0, imp); // use node creater
typename NodeType::Base * res = buildLeaf(0, nSamples, 0, imp); // use node creator
if (res) return res;

SplitJobType job(0, nSamples, 0, imp, res);
Expand Down
12 changes: 6 additions & 6 deletions cpp/daal/src/algorithms/dtrees/gbt/gbt_train_updater.i
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ public:

if (_iFeature >= 0) // best split has been found
{
PartitionTaskType partion(_iFeature, idxFeatureValueBestSplit, _data, _node, _bestSplit);
partion.execute();
PartitionTaskType partition(_iFeature, idxFeatureValueBestSplit, _data, _node, _bestSplit);
partition.execute();
}

return nullptr;
Expand Down Expand Up @@ -276,13 +276,13 @@ public:
LoopHelper<cpu>::run(true, 2, [&](size_t i) {
if (_iFeature1 >= 0 && i == 0)
{
PartitionTaskType partion(_iFeature1, idxFeatureValueBestSplit1, _data, _node1, _bestSplit1);
partion.execute();
PartitionTaskType partition(_iFeature1, idxFeatureValueBestSplit1, _data, _node1, _bestSplit1);
partition.execute();
}
if (_iFeature2 >= 0 && i == 1)
{
PartitionTaskType partion(_iFeature2, idxFeatureValueBestSplit2, _data, _node2, _bestSplit2);
partion.execute();
PartitionTaskType partition(_iFeature2, idxFeatureValueBestSplit2, _data, _node2, _bestSplit2);
partition.execute();
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ BatchContainer<algorithmFPType, method, cpu>::~BatchContainer()
* \brief Choose appropriate kernel to calculate gradient boosted trees model.
*
* \param env[in] Environment
* \param a[in] Array of numeric tables contating input data
* \param a[in] Array of numeric tables containing input data
* \param r[out] Resulting model
* \param par[in] Decision forest algorithm parameters
*/
Expand Down
2 changes: 1 addition & 1 deletion cpp/daal/src/algorithms/dtrees/gbt/treeshap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ void unwindPath(PathElement * uniquePath, float * partialWeights, uint32_t uniqu
}
}

// determine what the total permuation weight would be if
// determine what the total permutation weight would be if
// we unwound a previous extension in the decision path (for feature satisfying the threshold)
float unwoundPathSum(const PathElement * uniquePath, const float * partialWeights, uint32_t uniqueDepth, uint32_t uniqueDepthPartialWeights,
uint32_t pathIndex)
Expand Down
10 changes: 5 additions & 5 deletions cpp/daal/src/algorithms/em/em_gmm_dense_default_batch_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,15 +146,15 @@ class GmmModelFull : public GmmModel<algorithmFPType, cpu>

void finalize(size_t k, algorithmFPType denominator)
{
algorithmFPType multplier = 1.0 / denominator;
algorithmFPType multiplier = 1.0 / denominator;
for (size_t i = 0; i < nFeatures; i++)
{
for (size_t j = 0; j < i; j++)
{
sigma[k][i * nFeatures + j] *= multplier;
sigma[k][i * nFeatures + j] *= multiplier;
sigma[k][j * nFeatures + i] = sigma[k][i * nFeatures + j];
}
sigma[k][i * nFeatures + i] *= multplier;
sigma[k][i * nFeatures + i] *= multiplier;
}
}

Expand Down Expand Up @@ -270,10 +270,10 @@ class GmmModelDiag : public GmmModel<algorithmFPType, cpu>

void finalize(size_t k, algorithmFPType denominator)
{
algorithmFPType multplier = 1.0 / denominator;
algorithmFPType multiplier = 1.0 / denominator;
for (size_t i = 0; i < nFeatures; i++)
{
sigma[k][i] *= multplier;
sigma[k][i] *= multiplier;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

/*
//++
// Implementation of impicit ALS prediction algorithm
// Implementation of implicit ALS prediction algorithm
//--
*/

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

/*
//++
// Implementation of impicit ALS training algorithm for distributed processing mode
// Implementation of implicit ALS training algorithm for distributed processing mode
//--
*/

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

/*
//++
// Implementation of impicit ALS training algorithm for batch processing mode
// Implementation of implicit ALS training algorithm for batch processing mode
//--
*/

Expand Down
15 changes: 8 additions & 7 deletions cpp/daal/src/algorithms/kmeans/kmeans_lloyd_batch_impl.i
Original file line number Diff line number Diff line change
Expand Up @@ -80,17 +80,17 @@ Status KMeansBatchKernel<method, algorithmFPType, cpu>::compute(const NumericTab
clusters = tClusters.get();
}

NumericTable * assignmetsNT = nullptr;
NumericTable * assignmentsNT = nullptr;
NumericTablePtr assignmentsPtr;
if (r[1])
{
assignmetsNT = const_cast<NumericTable *>(r[1]);
assignmentsNT = const_cast<NumericTable *>(r[1]);
}
else if (par->resultsToEvaluate & computeExactObjectiveFunction)
{
assignmentsPtr = HomogenNumericTableCPU<int, cpu>::create(1, n, &s);
DAAL_CHECK_MALLOC(s);
assignmetsNT = assignmentsPtr.get();
assignmentsNT = assignmentsPtr.get();
}

DAAL_OVERFLOW_CHECK_BY_MULTIPLICATION(size_t, p, sizeof(double));
Expand Down Expand Up @@ -120,8 +120,9 @@ Status KMeansBatchKernel<method, algorithmFPType, cpu>::compute(const NumericTab
DAAL_CHECK(task.get(), services::ErrorMemoryAllocationFailed);
{
DAAL_PROFILER_TASK(addNTToTaskThreaded);
/* For the last iteration we do not need to recount of assignmets */
s = task->template addNTToTaskThreaded<method>(ntData, nullptr, blockSize, assignmetsNT && (kIter == nIter - 1) ? assignmetsNT : nullptr);
/* For the last iteration we do not need to recount of assignments */
s = task->template addNTToTaskThreaded<method>(ntData, nullptr, blockSize,
assignmentsNT && (kIter == nIter - 1) ? assignmentsNT : nullptr);
}

if (!s)
Expand Down Expand Up @@ -213,15 +214,15 @@ Status KMeansBatchKernel<method, algorithmFPType, cpu>::compute(const NumericTab

if (par->resultsToEvaluate & computeAssignments || par->assignFlag || par->resultsToEvaluate & computeExactObjectiveFunction)
{
PostProcessing<method, algorithmFPType, cpu>::computeAssignments(p, nClusters, clusters, ntData, nullptr, assignmetsNT, blockSize);
PostProcessing<method, algorithmFPType, cpu>::computeAssignments(p, nClusters, clusters, ntData, nullptr, assignmentsNT, blockSize);
}

if (par->resultsToEvaluate & computeExactObjectiveFunction)
{
WriteOnlyRows<algorithmFPType, cpu> mtTarget(*const_cast<NumericTable *>(r[2]), 0, 1);
DAAL_CHECK_BLOCK_STATUS(mtTarget);
algorithmFPType exactTargetFunc = algorithmFPType(0);
PostProcessing<method, algorithmFPType, cpu>::computeExactObjectiveFunction(p, nClusters, clusters, ntData, nullptr, assignmetsNT,
PostProcessing<method, algorithmFPType, cpu>::computeExactObjectiveFunction(p, nClusters, clusters, ntData, nullptr, assignmentsNT,
exactTargetFunc, blockSize);

*mtTarget.get() = exactTargetFunc;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,7 @@ Status TaskPlusPlusBatchBase<algorithmFPType, cpu, DataHelper>::updateMinDist(co
template <typename algorithmFPType, CpuType cpu, typename DataHelper>
void TaskPlusPlusBatch<algorithmFPType, cpu, DataHelper>::calcCenter(size_t iCluster)
{
// nTrials new candidats
// nTrials new candidates
for (size_t iTrials = 0u; iTrials < this->_nTrials; iTrials++)
{
const algorithmFPType probability = this->_aProbability[iTrials * this->_nClusters + iCluster];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ class ThreadingTask

/**
* Creates thread local storage of the requested size
* \param[in] nBetasIntercept Number of colums in the partial result
* \param[in] nBetasIntercept Number of columms in the partial result
* \param[in] nResponses Number of responses
* \return Pointer on the thread local storage object if the object was created successfully, NULL otherwise
*/
Expand All @@ -165,7 +165,7 @@ class ThreadingTask
protected:
/**
* Construct thread local storage of the requested size
* \param[in] nBetasIntercept Number of colums in the partial result
* \param[in] nBetasIntercept Number of columns in the partial result
* \param[in] nResponses Number of responses
* \param[out] st Status of the object construction
*/
Expand Down Expand Up @@ -200,7 +200,7 @@ class UpdateKernel
* \param[in] initializeResult Flag. True if results initialization is required, false otherwise
* \param[in] interceptFlag Flag.
* - True if it is required to compute an intercept term and P' = P + 1
* - False otherwis, P' = P
* - False otherwise, P' = P
* \return Status of the computations
*/
static Status compute(const NumericTable & x, const NumericTable & y, NumericTable & xtx, NumericTable & xty, bool initializeResult,
Expand All @@ -223,7 +223,7 @@ class MergeKernel

/**
* Merges an array of partial results into one partial result
* \param[in] n Number of partial resuts in the input array
* \param[in] n Number of partial results in the input array
* \param[in] partialxtx Array of n numeric tables of size P x P
* \param[in] partialxty Array of n numeric tables of size Ny x P
* \param[out] xtx Numeric table of size P x P
Expand Down
Loading
Loading