Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions src/frontends/pytorch/src/op/ravel.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// Copyright (C) 2018-2025 Intel Corporation
Copy link

Copilot AI Mar 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[LOW] New files use a 2018-2025 copyright header, while most nearby PyTorch FE sources use 2018-2026 (e.g., op_table.cpp, range_length.cpp). Please align the year range in this new source file with the repository’s current convention.

Suggested change
// Copyright (C) 2018-2025 Intel Corporation
// Copyright (C) 2018-2026 Intel Corporation

Copilot uses AI. Check for mistakes.
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/reshape.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_ravel(const NodeContext& context) {
// aten::ravel(Tensor self) -> Tensor
num_inputs_check(context, 1, 1);
auto input = context.get_input(0);
auto neg_1 = context.mark_node(v0::Constant::create(element::i64, Shape{1}, {-1}));
return {context.mark_node(std::make_shared<v1::Reshape>(input, neg_1, false))};
};

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ OP_CONVERTER(translate_randn);
OP_CONVERTER(translate_randint);
OP_CONVERTER(translate_rand_like);
OP_CONVERTER(translate_randn_like);
OP_CONVERTER(translate_ravel);
OP_CONVERTER(translate_reciprocal);
OP_CONVERTER(translate_reflection_pad_nd);
OP_CONVERTER(translate_relu6);
Expand Down Expand Up @@ -676,6 +677,7 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::randint", op::translate_randint},
{"aten::randn", op::translate_randn},
{"aten::randn_like", op::translate_randn_like},
{"aten::ravel", op::translate_ravel},
{"aten::real", common_translators::translate_real},
{"aten::reciprocal", op::optional_out<op::translate_reciprocal, 1>},
{"aten::reciprocal_", op::inplace_op<op::translate_reciprocal>},
Expand Down
41 changes: 41 additions & 0 deletions tests/layer_tests/pytorch_tests/test_ravel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import numpy as np
import pytest

from pytorch_layer_test_class import PytorchLayerTest


class TestRavel(PytorchLayerTest):
def _prepare_input(self, shape, dtype="float32"):
return (np.random.randn(*shape).astype(dtype),)
Copy link

Copilot AI Mar 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[MEDIUM] _prepare_input uses np.random.randn, which bypasses the PytorchLayerTest RNG (self.random) used across the suite for deterministic, reproducible inputs (e.g., test_trilu.py, test_reshape.py). Consider switching to self.random.randn(..., dtype=dtype) (or equivalent) so the test is stable and consistent with other PyTorch layer tests.

Suggested change
return (np.random.randn(*shape).astype(dtype),)
return (self.random.randn(*shape).astype(dtype),)

Copilot uses AI. Check for mistakes.

def create_model(self):
import torch

class aten_ravel(torch.nn.Module):
def forward(self, x):
return torch.ravel(x)

return aten_ravel(), "aten::ravel"

@pytest.mark.parametrize("shape", [
[2, 3],
[2, 3, 4],
[2, 3, 4, 5],
[1],
[5],
[1, 1, 1],
])
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64", "int8"])
@pytest.mark.nightly
@pytest.mark.precommit
def test_ravel(self, shape, dtype, ie_device, precision, ir_version):
self._test(
*self.create_model(),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={"shape": shape, "dtype": dtype},
)
Loading