Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
aaa5564
Fix: Add Dynamic logic of Col2Im (#33472)
daehyun99 Jan 5, 2026
40e2a53
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 5, 2026
762c2fc
Feat: Add separation logic of Batched input and Non-batched input bas…
daehyun99 Jan 5, 2026
9a50173
Feat: Add separation logic of Batched input and Non-batched input bas…
daehyun99 Jan 5, 2026
493f498
Feat: Add separation logic of Batched input and Non-batched input bas…
daehyun99 Jan 5, 2026
16108a4
Test: Add Test case for testing `col2im`'s batched and non-batched in…
daehyun99 Jan 6, 2026
f5d73a4
human error
daehyun99 Jan 6, 2026
1c4e606
Test: Add Test case for testing `col2im`'s batched and non-batched in…
daehyun99 Jan 6, 2026
7e200e3
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 6, 2026
3498146
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 7, 2026
c2692f3
Test: Fix Test case to cover the dynamic logic
daehyun99 Jan 7, 2026
abe36b9
Apply clang-format
daehyun99 Jan 7, 2026
b0e3c25
Test: Fix Test case to cover the dynamic logic && Apply clang-format …
daehyun99 Jan 7, 2026
d39a3b6
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 7, 2026
f27346b
Fix: Modify input's `L` shape
daehyun99 Jan 7, 2026
7d36fcd
Fix: Modify input's `L` shape (#33472)
daehyun99 Jan 7, 2026
8d3f518
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 7, 2026
1bc8c84
Fix: Add `cstddef` && use `auto` when initializing with a cast to av…
daehyun99 Jan 8, 2026
31b1cad
Fix: Add `cstddef` && use `auto` when initializing with a cast to av…
daehyun99 Jan 8, 2026
6019bf8
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 8, 2026
7007315
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 9, 2026
b91153e
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 9, 2026
2a58a99
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 11, 2026
8a99b46
Merge branch 'master' into Bug-fix/33472-1
daehyun99 Jan 12, 2026
c7fe865
Merge branch 'master' into Bug-fix/33472-1
maxnick Jan 14, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 34 additions & 1 deletion src/plugins/intel_cpu/src/nodes/col2im.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,40 @@ bool Col2Im::needPrepareParams() const {
}

void Col2Im::executeDynamicImpl(const dnnl::stream& strm) {
execute(strm);
// 1. get data shape
auto data_shape = getSrcMemoryAtPort(0)->getStaticDims();
size_t data_rank = data_shape.size();

// 2. get output_size
auto output_size_mem = getSrcMemoryAtPort(1);
const auto* output_size_ptr = output_size_mem->getDataAs<const int32_t>();

// 3. get kernel_size
auto kernel_size_mem = getSrcMemoryAtPort(2);
const auto* kernel_size_ptr = kernel_size_mem->getDataAs<const int32_t>();

// 4. calculate output_shape
size_t kernel_prod = static_cast<size_t>(kernel_size_ptr[0] * kernel_size_ptr[1]);

size_t H = static_cast<size_t>(output_size_ptr[0]);
size_t W = static_cast<size_t>(output_size_ptr[1]);

ov::Shape output_shape;
if (data_rank == 2) { // Case of Non-batched inputs
size_t C = data_shape[0] / kernel_prod;
output_shape = {C, H, W};
redefineOutputMemory({output_shape});
execute(strm);
}
else if (data_rank == 3) { // Case of Batched inputs
size_t N = data_shape[0];
size_t C = data_shape[1] / kernel_prod;
output_shape = {N, C, H, W};
redefineOutputMemory({output_shape});
execute(strm);
} else {
OPENVINO_THROW("Col2Im node supports only 2D(Non-Batched) or 3D(Batched) input tensors");
}
}

template <class T, class T_idx>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,78 @@ const std::vector<Col2ImSpecificParams> col2ImParamsVector = {
ov::Strides{2, 2},
ov::Shape{3, 3},
ov::Shape{3, 3}
},
Col2ImSpecificParams { // Batched default
InputShape{{}, {{1, 4, 4}}},
std::vector<int64_t>{3, 3},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{1, 1},
ov::Shape{0, 0},
ov::Shape{0, 0}
},
Col2ImSpecificParams { // Batched dilations
InputShape{{}, {{1, 4, 9}}},
std::vector<int64_t>{5, 5},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{2, 2},
ov::Shape{0, 0},
ov::Shape{0, 0}
},
Col2ImSpecificParams { // Batched pads
InputShape{{}, {{1, 4, 9}}},
std::vector<int64_t>{2, 2},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{1, 1},
ov::Shape{1, 1},
ov::Shape{1, 1}
},
Col2ImSpecificParams { // Batched strides
InputShape{{}, {{1, 4, 4}}},
std::vector<int64_t>{4, 4},
std::vector<int64_t>{2, 2},
ov::Strides{2, 2},
ov::Strides{1, 1},
ov::Shape{0, 0},
ov::Shape{0, 0}
},
Col2ImSpecificParams { // Non-batched default
InputShape{{}, {{4, 4}}},
std::vector<int64_t>{3, 3},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{1, 1},
ov::Shape{0, 0},
ov::Shape{0, 0}
},
Col2ImSpecificParams { // Non-batched dilations
InputShape{{}, {{4, 9}}},
std::vector<int64_t>{5, 5},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{2, 2},
ov::Shape{0, 0},
ov::Shape{0, 0}
},
Col2ImSpecificParams { // Non-batched pads
InputShape{{}, {{4, 9}}},
std::vector<int64_t>{2, 2},
std::vector<int64_t>{2, 2},
ov::Strides{1, 1},
ov::Strides{1, 1},
ov::Shape{1, 1},
ov::Shape{1, 1}
},
Col2ImSpecificParams { // Non-batched strides
InputShape{{}, {{4, 4}}},
std::vector<int64_t>{4, 4},
std::vector<int64_t>{2, 2},
ov::Strides{2, 2},
ov::Strides{1, 1},
ov::Shape{0, 0},
ov::Shape{0, 0}
}
};

Expand Down
Loading