Skip to content

Commit bc60de8

Browse files
committed
renaming it back to Tensor
1 parent 9399e23 commit bc60de8

File tree

9 files changed

+77
-79
lines changed

9 files changed

+77
-79
lines changed

deep_core/DEVELOPING.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
## Design Principles
44

5-
- TensorPtr is a smart pointer, not a traditional tensor class, it points to data in memory allocated by the backend memory allocator
5+
- Tensor is a smart pointer, not a traditional tensor class, it points to data in memory allocated by the backend memory allocator
66
- DeepNodeBase handles plugin loading automatically via parameters
77
- All backends are plugins - no hard framework dependencies
88
- Memory allocators enable zero-copy GPU integration
@@ -37,7 +37,7 @@ Key lifecycle callbacks to override:
3737

3838
**Your node just needs to:**
3939
- Set up ROS interfaces (topics, services, actions)
40-
- Process incoming data using `run_inference(TensorPtr)`
40+
- Process incoming data using `run_inference(Tensor)`
4141
- Handle your specific business logic
4242

4343
Don't forget: `RCLCPP_COMPONENTS_REGISTER_NODE(your_namespace::YourNode)`

deep_core/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,13 @@ Core abstractions for ML inference in ROS 2 lifecycle nodes.
55
## Overview
66

77
Provides:
8-
- `TensorPtr`: Smart pointer for tensor data with custom memory allocators
8+
- `Tensor`: Smart pointer for tensor data with custom memory allocators
99
- `DeepNodeBase`: Lifecycle node base class with plugin loading and optional bond support
1010
- Plugin interfaces for backend inference engines and memory management
1111

1212
## Key Components
1313

14-
### TensorPtr
14+
### Tensor
1515
Multi-dimensional tensor smart pointer supporting:
1616
- Custom memory allocators (CPU/GPU/aligned memory)
1717
- View semantics (wrap existing data without copying)

deep_core/include/deep_core/deep_node_base.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ class DeepNodeBase : public rclcpp_lifecycle::LifecycleNode
133133
* @param inputs Input tensor for inference
134134
* @return Output tensor from inference
135135
*/
136-
TensorPtr run_inference(TensorPtr inputs);
136+
Tensor run_inference(Tensor inputs);
137137

138138
/**
139139
* @brief Check if a backend plugin is loaded

deep_core/include/deep_core/plugin_interfaces/backend_inference_executor.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ class BackendInferenceExecutor
4747
* @param input Input tensor
4848
* @return Output tensor
4949
*/
50-
virtual TensorPtr run_inference(TensorPtr input) = 0;
50+
virtual Tensor run_inference(Tensor input) = 0;
5151

5252
/**
5353
* @brief Unload the currently loaded model

deep_core/include/deep_core/types/tensor.hpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -51,65 +51,65 @@ size_t get_dtype_size(DataType dtype);
5151
/**
5252
* @brief A smart pointer for multi-dimensional tensor data with automatic memory management
5353
*
54-
* The TensorPtr class provides a lightweight smart pointer for multi-dimensional arrays
54+
* The Tensor class provides a lightweight smart pointer for multi-dimensional arrays
5555
* with support for different data types. It handles memory allocation automatically
5656
* and supports both owned and borrowed memory patterns.
5757
*/
58-
class TensorPtr
58+
class Tensor
5959
{
6060
public:
6161
/**
6262
* @brief Default constructor - creates an empty tensor
6363
*/
64-
TensorPtr();
64+
Tensor();
6565

6666
/**
6767
* @brief Create a new tensor with specified shape and data type
6868
* @param shape Dimensions of the tensor
6969
* @param dtype Data type of tensor elements
7070
*/
71-
TensorPtr(const std::vector<size_t> & shape, DataType dtype);
71+
Tensor(const std::vector<size_t> & shape, DataType dtype);
7272

7373
/**
7474
* @brief Create a new tensor with specified shape, data type, and allocator
7575
* @param shape Dimensions of the tensor
7676
* @param dtype Data type of tensor elements
7777
* @param allocator Memory allocator to use (uses CPU allocator if nullptr)
7878
*/
79-
TensorPtr(const std::vector<size_t> & shape, DataType dtype, std::shared_ptr<BackendMemoryAllocator> allocator);
79+
Tensor(const std::vector<size_t> & shape, DataType dtype, std::shared_ptr<BackendMemoryAllocator> allocator);
8080

8181
/**
8282
* @brief Wrap existing data in a tensor (non-owning)
8383
* @param data Pointer to existing data
8484
* @param shape Dimensions of the tensor
8585
* @param dtype Data type of tensor elements
8686
*/
87-
TensorPtr(void * data, const std::vector<size_t> & shape, DataType dtype);
87+
Tensor(void * data, const std::vector<size_t> & shape, DataType dtype);
8888

8989
/**
9090
* @brief Destructor - frees owned memory
9191
*/
92-
~TensorPtr();
92+
~Tensor();
9393

9494
/**
9595
* @brief Copy constructor - creates a deep copy
9696
*/
97-
TensorPtr(const TensorPtr & other);
97+
Tensor(const Tensor & other);
9898

9999
/**
100100
* @brief Copy assignment - creates a deep copy
101101
*/
102-
TensorPtr & operator=(const TensorPtr & other);
102+
Tensor & operator=(const Tensor & other);
103103

104104
/**
105105
* @brief Move constructor
106106
*/
107-
TensorPtr(TensorPtr && other) noexcept;
107+
Tensor(Tensor && other) noexcept;
108108

109109
/**
110110
* @brief Move assignment
111111
*/
112-
TensorPtr & operator=(TensorPtr && other) noexcept;
112+
Tensor & operator=(Tensor && other) noexcept;
113113

114114
/**
115115
* @brief Get tensor dimensions
@@ -201,7 +201,7 @@ class TensorPtr
201201
* @param new_shape New dimensions (total size must match)
202202
* @return New tensor view with different shape
203203
*/
204-
TensorPtr reshape(const std::vector<size_t> & new_shape) const;
204+
Tensor reshape(const std::vector<size_t> & new_shape) const;
205205

206206
/**
207207
* @brief Get total number of elements

deep_core/src/deep_node_base.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ void DeepNodeBase::unload_model()
176176
}
177177
}
178178

179-
TensorPtr DeepNodeBase::run_inference(TensorPtr inputs)
179+
Tensor DeepNodeBase::run_inference(Tensor inputs)
180180
{
181181
if (!plugin_) {
182182
throw std::runtime_error("No plugin loaded");

deep_core/src/tensor.cpp

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -57,26 +57,24 @@ size_t get_dtype_size(DataType dtype)
5757
}
5858
}
5959

60-
TensorPtr::TensorPtr()
60+
Tensor::Tensor()
6161
: dtype_(DataType::FLOAT32)
6262
, byte_size_(0)
6363
, data_(nullptr)
6464
, is_view_(false)
6565
, allocator_(nullptr)
6666
{}
6767

68-
TensorPtr::TensorPtr(const std::vector<size_t> & shape, DataType dtype)
68+
Tensor::Tensor(const std::vector<size_t> & shape, DataType dtype)
6969
: shape_(shape)
7070
, dtype_(dtype)
7171
, is_view_(false)
7272
, allocator_(nullptr)
7373
{
74-
throw std::runtime_error(
75-
"TensorPtr construction requires an allocator. Use TensorPtr(shape, dtype, allocator) instead.");
74+
throw std::runtime_error("Tensor construction requires an allocator. Use Tensor(shape, dtype, allocator) instead.");
7675
}
7776

78-
TensorPtr::TensorPtr(
79-
const std::vector<size_t> & shape, DataType dtype, std::shared_ptr<BackendMemoryAllocator> allocator)
77+
Tensor::Tensor(const std::vector<size_t> & shape, DataType dtype, std::shared_ptr<BackendMemoryAllocator> allocator)
8078
: shape_(shape)
8179
, dtype_(dtype)
8280
, is_view_(false)
@@ -94,7 +92,7 @@ TensorPtr::TensorPtr(
9492
allocate_memory();
9593
}
9694

97-
TensorPtr::TensorPtr(void * data, const std::vector<size_t> & shape, DataType dtype)
95+
Tensor::Tensor(void * data, const std::vector<size_t> & shape, DataType dtype)
9896
: shape_(shape)
9997
, dtype_(dtype)
10098
, data_(data)
@@ -111,12 +109,12 @@ TensorPtr::TensorPtr(void * data, const std::vector<size_t> & shape, DataType dt
111109
byte_size_ = total_elements * get_dtype_size(dtype_);
112110
}
113111

114-
TensorPtr::~TensorPtr()
112+
Tensor::~Tensor()
115113
{
116114
deallocate_memory();
117115
}
118116

119-
TensorPtr::TensorPtr(const TensorPtr & other)
117+
Tensor::Tensor(const Tensor & other)
120118
: shape_(other.shape_)
121119
, strides_(other.strides_)
122120
, dtype_(other.dtype_)
@@ -136,7 +134,7 @@ TensorPtr::TensorPtr(const TensorPtr & other)
136134
}
137135
}
138136

139-
TensorPtr & TensorPtr::operator=(const TensorPtr & other)
137+
Tensor & Tensor::operator=(const Tensor & other)
140138
{
141139
if (this != &other) {
142140
deallocate_memory();
@@ -162,7 +160,7 @@ TensorPtr & TensorPtr::operator=(const TensorPtr & other)
162160
return *this;
163161
}
164162

165-
TensorPtr::TensorPtr(TensorPtr && other) noexcept
163+
Tensor::Tensor(Tensor && other) noexcept
166164
: shape_(std::move(other.shape_))
167165
, strides_(std::move(other.strides_))
168166
, dtype_(other.dtype_)
@@ -177,7 +175,7 @@ TensorPtr::TensorPtr(TensorPtr && other) noexcept
177175
other.allocator_ = nullptr;
178176
}
179177

180-
TensorPtr & TensorPtr::operator=(TensorPtr && other) noexcept
178+
Tensor & Tensor::operator=(Tensor && other) noexcept
181179
{
182180
if (this != &other) {
183181
deallocate_memory();
@@ -198,7 +196,7 @@ TensorPtr & TensorPtr::operator=(TensorPtr && other) noexcept
198196
return *this;
199197
}
200198

201-
void TensorPtr::calculate_strides()
199+
void Tensor::calculate_strides()
202200
{
203201
strides_.resize(shape_.size());
204202
if (shape_.empty()) return;
@@ -209,7 +207,7 @@ void TensorPtr::calculate_strides()
209207
}
210208
}
211209

212-
void TensorPtr::allocate_memory()
210+
void Tensor::allocate_memory()
213211
{
214212
if (byte_size_ > 0 && !is_view_) {
215213
if (allocator_) {
@@ -223,7 +221,7 @@ void TensorPtr::allocate_memory()
223221
}
224222
}
225223

226-
void TensorPtr::deallocate_memory()
224+
void Tensor::deallocate_memory()
227225
{
228226
if (!is_view_ && data_) {
229227
if (allocator_) {
@@ -235,7 +233,7 @@ void TensorPtr::deallocate_memory()
235233
}
236234
}
237235

238-
TensorPtr TensorPtr::reshape(const std::vector<size_t> & new_shape) const
236+
Tensor Tensor::reshape(const std::vector<size_t> & new_shape) const
239237
{
240238
size_t new_total = std::accumulate(new_shape.begin(), new_shape.end(), 1UL, std::multiplies<size_t>());
241239
size_t current_total = std::accumulate(shape_.begin(), shape_.end(), 1UL, std::multiplies<size_t>());
@@ -248,15 +246,15 @@ TensorPtr TensorPtr::reshape(const std::vector<size_t> & new_shape) const
248246
throw std::runtime_error("Cannot reshape non-contiguous tensor");
249247
}
250248

251-
return TensorPtr(data_, new_shape, dtype_);
249+
return Tensor(data_, new_shape, dtype_);
252250
}
253251

254-
size_t TensorPtr::size() const
252+
size_t Tensor::size() const
255253
{
256254
return std::accumulate(shape_.begin(), shape_.end(), 1UL, std::multiplies<size_t>());
257255
}
258256

259-
bool TensorPtr::is_contiguous() const
257+
bool Tensor::is_contiguous() const
260258
{
261259
if (shape_.empty()) return true;
262260

deep_core/test/test_deep_core.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,12 @@ class MockMemoryAllocator : public BackendMemoryAllocator
8080
size_t allocated_bytes_{0};
8181
};
8282

83-
TEST_CASE("TensorPtr construction with allocator", "[tensor]")
83+
TEST_CASE("Tensor construction with allocator", "[tensor]")
8484
{
8585
auto allocator = std::make_shared<MockMemoryAllocator>();
8686
std::vector<size_t> shape{2, 3, 4};
8787

88-
TensorPtr tensor(shape, DataType::FLOAT32, allocator);
88+
Tensor tensor(shape, DataType::FLOAT32, allocator);
8989

9090
REQUIRE(tensor.shape() == shape);
9191
REQUIRE(tensor.dtype() == DataType::FLOAT32);
@@ -94,20 +94,20 @@ TEST_CASE("TensorPtr construction with allocator", "[tensor]")
9494
REQUIRE(allocator->allocated_bytes() > 0);
9595
}
9696

97-
TEST_CASE("TensorPtr construction without allocator throws", "[tensor]")
97+
TEST_CASE("Tensor construction without allocator throws", "[tensor]")
9898
{
9999
std::vector<size_t> shape{2, 3};
100-
REQUIRE_THROWS_AS(TensorPtr(shape, DataType::FLOAT32), std::runtime_error);
100+
REQUIRE_THROWS_AS(Tensor(shape, DataType::FLOAT32), std::runtime_error);
101101
}
102102

103103
TEST_CASE("Different data types have correct sizes", "[tensor]")
104104
{
105105
auto allocator = std::make_shared<MockMemoryAllocator>();
106106

107-
TensorPtr float_tensor({10}, DataType::FLOAT32, allocator);
108-
TensorPtr int32_tensor({10}, DataType::INT32, allocator);
109-
TensorPtr int64_tensor({10}, DataType::INT64, allocator);
110-
TensorPtr uint8_tensor({10}, DataType::UINT8, allocator);
107+
Tensor float_tensor({10}, DataType::FLOAT32, allocator);
108+
Tensor int32_tensor({10}, DataType::INT32, allocator);
109+
Tensor int64_tensor({10}, DataType::INT64, allocator);
110+
Tensor uint8_tensor({10}, DataType::UINT8, allocator);
111111

112112
REQUIRE(float_tensor.size() == 10);
113113
REQUIRE(int32_tensor.size() == 10);
@@ -120,15 +120,15 @@ TEST_CASE("Empty shape throws exception", "[tensor]")
120120
auto allocator = std::make_shared<MockMemoryAllocator>();
121121
std::vector<size_t> empty_shape;
122122

123-
REQUIRE_THROWS_AS(TensorPtr(empty_shape, DataType::FLOAT32, allocator), std::invalid_argument);
123+
REQUIRE_THROWS_AS(Tensor(empty_shape, DataType::FLOAT32, allocator), std::invalid_argument);
124124
}
125125

126126
TEST_CASE("Large shape allocation", "[tensor]")
127127
{
128128
auto allocator = std::make_shared<MockMemoryAllocator>();
129129
std::vector<size_t> large_shape{100, 100, 3};
130130

131-
TensorPtr tensor(large_shape, DataType::UINT8, allocator);
131+
Tensor tensor(large_shape, DataType::UINT8, allocator);
132132

133133
REQUIRE(tensor.size() == 30000);
134134
REQUIRE(tensor.shape() == large_shape);
@@ -144,15 +144,15 @@ class MockBackendExecutor : public BackendInferenceExecutor
144144
return true;
145145
}
146146

147-
TensorPtr run_inference(TensorPtr input) override
147+
Tensor run_inference(Tensor input) override
148148
{
149149
if (!model_loaded_) {
150150
throw std::runtime_error("No model loaded");
151151
}
152152

153153
// Mock inference: return tensor with same shape but all zeros
154154
auto allocator = std::make_shared<MockMemoryAllocator>();
155-
TensorPtr output(input.shape(), input.dtype(), allocator);
155+
Tensor output(input.shape(), input.dtype(), allocator);
156156

157157
// Calculate correct byte size based on data type
158158
size_t dtype_size = get_dtype_size(input.dtype());
@@ -242,7 +242,7 @@ TEST_CASE("Backend inference workflow", "[plugin][inference]")
242242

243243
// Create input tensor
244244
std::vector<size_t> shape{1, 3, 224, 224};
245-
TensorPtr input(shape, DataType::FLOAT32, allocator);
245+
Tensor input(shape, DataType::FLOAT32, allocator);
246246

247247
// Run inference
248248
auto output = executor->run_inference(std::move(input));
@@ -280,7 +280,7 @@ class TestInferenceNode : public DeepNodeBase
280280
return load_model(model_path);
281281
}
282282

283-
TensorPtr test_run_inference(TensorPtr input)
283+
Tensor test_run_inference(Tensor input)
284284
{
285285
return run_inference(input);
286286
}

0 commit comments

Comments
 (0)