forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathTHCTensor.hpp
124 lines (107 loc) · 3.42 KB
/
THCTensor.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#pragma once
// STOP!!! Thinking of including this header directly? Please
// read Note [TH abstraction violation]
#include <THC/THCTensor.h>
#include <TH/THTensor.hpp>
#include <THC/THCStorage.hpp>
#include <THC/THCGeneral.hpp>
#include <atomic>
#include <ATen/ATen.h>
// See [NOTE: nDimension vs nDimensionLegacyNoScalars vs nDimensionLegacyAll]
TORCH_CUDA_CU_API int THCTensor_nDimension(
THCState* state,
const THCTensor* self);
TORCH_CUDA_CU_API int THCTensor_nDimensionLegacyNoScalars(
THCState* state,
const THCTensor* self);
TORCH_CUDA_CU_API int THCTensor_nDimensionLegacyAll(
THCState* state,
const THCTensor* self);
TORCH_CUDA_CU_API int64_t
THCTensor_size(THCState* state, const THCTensor* self, int dim);
TORCH_CUDA_CU_API int64_t
THCTensor_sizeLegacyNoScalars(THCState* state, const THCTensor* self, int dim);
TORCH_CUDA_CU_API int64_t
THCTensor_stride(THCState* state, const THCTensor* self, int dim);
TORCH_CUDA_CU_API int64_t THCTensor_strideLegacyNoScalars(
THCState* state,
const THCTensor* self,
int dim);
TORCH_CUDA_CU_API THCTensor* THCTensor_new(
THCState* state,
caffe2::TypeMeta type_meta);
TORCH_CUDA_CU_API void THCTensor_resize(
THCState* state,
THCTensor* tensor,
at::IntArrayRef size,
at::IntArrayRef stride);
TORCH_CUDA_CU_API void THCTensor_resizeNd(
THCState* state,
THCTensor* tensor,
int nDimension,
const int64_t* size,
const int64_t* stride);
TORCH_CUDA_CU_API void THCTensor_resizeAs(
THCState* state,
THCTensor* tensor,
THCTensor* src);
TORCH_CUDA_CU_API void THCTensor_set(
THCState* state,
THCTensor* self,
THCTensor* src);
TORCH_CUDA_CU_API void THCTensor_setStorage(
THCState* state,
THCTensor* self,
THCStorage* storage_,
ptrdiff_t storageOffset_,
at::IntArrayRef size_,
at::IntArrayRef stride_);
TORCH_CUDA_CU_API void THCTensor_squeeze1d(
THCState* state,
THCTensor* self,
THCTensor* src,
int dimension_);
TORCH_CUDA_CU_API void THCTensor_unsqueeze1d(
THCState* state,
THCTensor* self,
THCTensor* src,
int dimension_);
TORCH_CUDA_CU_API bool THCTensor_allContiguous(
THCState* state,
THCTensor** inputs,
int numInputs);
TORCH_CUDA_CU_API ptrdiff_t
THCTensor_nElement(THCState* state, const THCTensor* self);
TORCH_CUDA_CU_API void THCTensor_retain(THCState* state, THCTensor* self);
TORCH_CUDA_CU_API void THCTensor_free(THCState* state, THCTensor* self);
TORCH_CUDA_CU_API int THCTensor_getDevice(
THCState* state,
const THCTensor* tensor);
TORCH_CUDA_CU_API bool THCTensor_allSameDevice(
THCState* state,
THCTensor** inputs,
int numInputs);
/* Can we use 32 bit math for indexing? */
TORCH_CUDA_CU_API bool THCTensor_canUse32BitIndexMath(
THCState* state,
const THCTensor* t,
ptrdiff_t max_elem = INT32_MAX);
/* Are all tensors 32-bit indexable? */
TORCH_CUDA_CU_API bool THCTensor_all32BitIndexable(
THCState* state,
THCTensor** inputs,
int numInputs);
TORCH_CUDA_CU_API void THCTensor_preserveReduceDimSemantics(
THCState* state,
THCTensor* tensor,
int in_dims,
int64_t dimension,
int keepdim);
#include <THC/generic/THCTensor.hpp>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensor.hpp>
#include <THC/THCGenerateComplexTypes.h>
#include <THC/generic/THCTensor.hpp>
#include <THC/THCGenerateBoolType.h>
#include <THC/generic/THCTensor.hpp>
#include <THC/THCGenerateBFloat16Type.h>