Skip to content

Commit f231be2

Browse files
cyyeverpytorchmergebot
authored andcommitted
Mark unused parameters in C++ code (pytorch#164912)
This PR adds unused parameter name comments in C++ declarations to improve code readability. Pull Request resolved: pytorch#164912 Approved by: https://github.com/Skylion007
1 parent a753ffa commit f231be2

File tree

102 files changed

+387
-357
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

102 files changed

+387
-357
lines changed

aten/src/ATen/Context.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -226,15 +226,15 @@ class TORCH_API Context {
226226
bool userEnabledMkldnn() const;
227227
void setUserEnabledMkldnn(bool e);
228228
bool benchmarkCuDNN() const;
229-
void setBenchmarkCuDNN(bool);
229+
void setBenchmarkCuDNN(bool /*b*/);
230230
int benchmarkLimitCuDNN() const;
231-
void setBenchmarkLimitCuDNN(int);
231+
void setBenchmarkLimitCuDNN(int /*b*/);
232232
bool immediateMiopen() const;
233-
void setImmediateMiopen(bool);
233+
void setImmediateMiopen(bool /*b*/);
234234
bool deterministicCuDNN() const;
235-
void setDeterministicCuDNN(bool);
235+
void setDeterministicCuDNN(bool /*b*/);
236236
bool deterministicMkldnn() const;
237-
void setDeterministicMkldnn(bool);
237+
void setDeterministicMkldnn(bool /*b*/);
238238
bool userEnabledNNPACK() const;
239239
void setUserEnabledNNPACK(bool e);
240240

@@ -252,32 +252,32 @@ class TORCH_API Context {
252252
void setSDPPriorityOrder(const std::vector<int64_t>& order);
253253
std::array<at::SDPBackend, at::num_sdp_backends> sDPPriorityOrder();
254254

255-
void setSDPUseFlash(bool);
255+
void setSDPUseFlash(bool /*e*/);
256256
bool userEnabledFlashSDP() const;
257257

258-
void setSDPUseMemEfficient(bool);
258+
void setSDPUseMemEfficient(bool /*e*/);
259259
bool userEnabledMemEfficientSDP() const;
260260

261-
void setSDPUseMath(bool);
261+
void setSDPUseMath(bool /*e*/);
262262
bool userEnabledMathSDP() const;
263263

264-
void setSDPUseCuDNN(bool);
264+
void setSDPUseCuDNN(bool /*e*/);
265265
bool userEnabledCuDNNSDP() const;
266266

267-
void setAllowFP16BF16ReductionMathSDP(bool);
267+
void setAllowFP16BF16ReductionMathSDP(bool /*e*/);
268268
bool allowFP16BF16ReductionMathSDP() const;
269269

270-
void setSDPUseOverrideable(bool);
270+
void setSDPUseOverrideable(bool /*e*/);
271271
bool userEnabledOverrideableSDP() const;
272272

273273
at::LinalgBackend linalgPreferredBackend() const;
274-
void setLinalgPreferredBackend(at::LinalgBackend);
274+
void setLinalgPreferredBackend(at::LinalgBackend /*b*/);
275275

276276
at::BlasBackend blasPreferredBackend();
277-
void setBlasPreferredBackend(at::BlasBackend);
277+
void setBlasPreferredBackend(at::BlasBackend /*b*/);
278278

279279
at::ROCmFABackend getROCmFAPreferredBackend();
280-
void setROCmFAPreferredBackend(at::ROCmFABackend);
280+
void setROCmFAPreferredBackend(at::ROCmFABackend /*b*/);
281281

282282
// Note [Enabling Deterministic Operations]
283283
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -310,9 +310,9 @@ class TORCH_API Context {
310310

311311
bool deterministicAlgorithms() const;
312312
bool deterministicAlgorithmsWarnOnly() const;
313-
void setDeterministicAlgorithms(bool, bool);
313+
void setDeterministicAlgorithms(bool /*b*/, bool /*warn_only*/);
314314
bool deterministicFillUninitializedMemory() const;
315-
void setDeterministicFillUninitializedMemory(bool);
315+
void setDeterministicFillUninitializedMemory(bool /*b*/);
316316

317317
// Note [Writing Nondeterministic Operations]
318318
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -356,11 +356,11 @@ class TORCH_API Context {
356356
Float32Op op,
357357
Float32Precision p);
358358
bool allowTF32CuDNN(std::optional<Float32Op> op = std::nullopt) const;
359-
void setAllowTF32CuDNN(bool);
359+
void setAllowTF32CuDNN(bool /*b*/);
360360
bool allowTF32OneDNN() const;
361-
void setAllowTF32OneDNN(bool);
361+
void setAllowTF32OneDNN(bool /*b*/);
362362
bool allowTF32CuBLAS() const;
363-
void setAllowTF32CuBLAS(bool);
363+
void setAllowTF32CuBLAS(bool /*b*/);
364364
Float32MatmulPrecision float32MatmulPrecision() const;
365365
Float32Precision float32Precision(Float32Backend backend, Float32Op op) const;
366366
CuBLASReductionOption allowFP16ReductionCuBLAS() const;
@@ -372,7 +372,7 @@ class TORCH_API Context {
372372
bool allow_reduced_precision,
373373
bool allow_splitk = true);
374374
bool allowFP16AccumulationCuBLAS() const;
375-
void setAllowFP16AccumulationCuBLAS(bool);
375+
void setAllowFP16AccumulationCuBLAS(bool /*b*/);
376376

377377
// Matmuls can use a so-called "persistent" kernel which launches one CUDA
378378
// block for each SM on the GPU, and each block then iterates over multiple
@@ -384,7 +384,7 @@ class TORCH_API Context {
384384
// to make matmuls target only a subset of the SMs, so they can fully schedule
385385
// even next to a comms kernel, and only be a few percent slower.
386386
std::optional<int32_t> _SMCarveout_EXPERIMENTAL() const;
387-
void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t>);
387+
void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t> /*c*/);
388388

389389
at::QEngine qEngine() const;
390390
void setQEngine(at::QEngine e);
@@ -405,7 +405,7 @@ class TORCH_API Context {
405405
void setDefaultMobileCPUAllocator();
406406
void unsetDefaultMobileCPUAllocator();
407407
bool allowFP16ReductionCPU() const;
408-
void setAllowFP16ReductionCPU(bool);
408+
void setAllowFP16ReductionCPU(bool /*b*/);
409409

410410
// Preserved for BC
411411
void lazyInitCUDA() {

aten/src/ATen/MapAllocator.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ constexpr const char* unknown_eventname = "eventname not specified";
6262
#endif
6363
} // namespace (anonymous)
6464

65-
MapAllocator::MapAllocator(WithFd, std::string_view filename, int fd, int flags, size_t size)
65+
MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd, int flags, size_t size)
6666
: filename_(filename.empty() ? unknown_filename : filename)
6767
, size_(0) // to be filled later
6868
#ifdef _WIN32
@@ -494,7 +494,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags,
494494

495495
initializeAlloc();
496496
}
497-
RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
497+
RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size)
498498
: RefcountedMapAllocatorArgCheck(flags)
499499
, MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) {
500500

@@ -614,7 +614,7 @@ at::DataPtr MapAllocator::makeDataPtr(std::string_view filename, int flags, size
614614
return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
615615
}
616616

617-
at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
617+
at::DataPtr MapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
618618
auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size);
619619
if (actual_size_out) *actual_size_out = context->size();
620620
return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
@@ -626,7 +626,7 @@ at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags,
626626
return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
627627
}
628628

629-
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
629+
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
630630
auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size);
631631
if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
632632
return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};

aten/src/ATen/MapAllocator.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ class TORCH_API MapAllocator {
2525
public:
2626
MapAllocator(std::string_view filename, int flags, size_t size);
2727
MapAllocator(
28-
WithFd,
28+
WithFd /*unused*/,
2929
std::string_view filename,
3030
int fd,
3131
int flags,
@@ -59,14 +59,14 @@ class TORCH_API MapAllocator {
5959
return flags_;
6060
}
6161

62-
static MapAllocator* fromDataPtr(const at::DataPtr&);
62+
static MapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
6363
static at::DataPtr makeDataPtr(
6464
std::string_view filename,
6565
int flags,
6666
size_t size,
6767
size_t* actual_size_out);
6868
static at::DataPtr makeDataPtr(
69-
WithFd,
69+
WithFd /*unused*/,
7070
const char* filename,
7171
int fd,
7272
int flags,
@@ -105,13 +105,13 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
105105
public:
106106
RefcountedMapAllocator(const char* filename, int flags, size_t size);
107107
RefcountedMapAllocator(
108-
WithFd,
108+
WithFd /*unused*/,
109109
const char* filename,
110110
int fd,
111111
int flags,
112112
size_t size);
113113

114-
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
114+
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
115115
RefcountedMapAllocator(const RefcountedMapAllocator&) = delete;
116116
RefcountedMapAllocator(RefcountedMapAllocator&&) = delete;
117117
RefcountedMapAllocator& operator=(const RefcountedMapAllocator&) = delete;
@@ -122,7 +122,7 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
122122
size_t size,
123123
size_t* actual_size_out);
124124
static at::DataPtr makeDataPtr(
125-
WithFd,
125+
WithFd /*unused*/,
126126
const char* filename,
127127
int fd,
128128
int flags,

aten/src/ATen/NestedTensorImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ c10::SymInt NestedTensorImpl::sym_numel_custom() const {
273273
return NestedTensorImpl::numel_custom();
274274
}
275275

276-
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
276+
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
277277
return nested_tensor_impl_is_contiguous(this);
278278
}
279279
IntArrayRef NestedTensorImpl::sizes_custom() const {

aten/src/ATen/NestedTensorImpl.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,8 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
115115
// with real implementations
116116
int64_t numel_custom() const override;
117117
c10::SymInt sym_numel_custom() const override;
118-
c10::SymBool sym_is_contiguous_custom(MemoryFormat) const override;
118+
c10::SymBool sym_is_contiguous_custom(
119+
MemoryFormat /*memory_format*/) const override;
119120
int64_t size_custom(int64_t d) const override {
120121
return this->size(d);
121122
}

aten/src/ATen/Parallel.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ inline int64_t divup(int64_t x, int64_t y) {
1414
TORCH_API void init_num_threads();
1515

1616
// Sets the number of threads to be used in parallel region
17-
TORCH_API void set_num_threads(int);
17+
TORCH_API void set_num_threads(int /*nthreads*/);
1818

1919
// Returns the maximum number of threads that may be used in a parallel region
2020
TORCH_API int get_num_threads();
@@ -37,7 +37,7 @@ inline void lazy_init_num_threads() {
3737
}
3838
}
3939

40-
TORCH_API void set_thread_num(int);
40+
TORCH_API void set_thread_num(int /*id*/);
4141

4242
class TORCH_API ThreadIdGuard {
4343
public:
@@ -130,7 +130,7 @@ inline scalar_t parallel_reduce(
130130
TORCH_API std::string get_parallel_info();
131131

132132
// Sets number of threads used for inter-op parallelism
133-
TORCH_API void set_num_interop_threads(int);
133+
TORCH_API void set_num_interop_threads(int /*nthreads*/);
134134

135135
// Returns the number of threads used for inter-op parallelism
136136
TORCH_API size_t get_num_interop_threads();

aten/src/ATen/SparseCsrTensorImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ void SparseCsrTensorImpl::set_stride(int64_t dim, int64_t new_stride) {
252252
void SparseCsrTensorImpl::set_storage_offset(int64_t storage_offset) {
253253
TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have set_storage_offset.");
254254
}
255-
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
255+
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
256256
TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have is_contiguous");
257257
}
258258
} // namespace at

aten/src/ATen/SparseCsrTensorImpl.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
3232

3333
public:
3434
explicit SparseCsrTensorImpl(
35-
at::DispatchKeySet,
35+
at::DispatchKeySet /*key_set*/,
3636
at::Device device,
3737
Layout layout,
38-
const caffe2::TypeMeta);
38+
const caffe2::TypeMeta /*data_type*/);
3939

4040
void resize_(int64_t nnz, IntArrayRef size);
4141
void resize_and_clear_(
@@ -86,7 +86,8 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
8686
protected:
8787
IntArrayRef strides_custom() const override;
8888
SymIntArrayRef sym_strides_custom() const override;
89-
SymBool sym_is_contiguous_custom(MemoryFormat) const override;
89+
SymBool sym_is_contiguous_custom(
90+
MemoryFormat /*memory_format*/) const override;
9091

9192
public:
9293
void set_size(int64_t dim, int64_t new_size) override;

aten/src/ATen/SparseTensorImpl.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,9 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
4646

4747
public:
4848
// Public for now...
49-
explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
49+
explicit SparseTensorImpl(
50+
at::DispatchKeySet /*key_set*/,
51+
const caffe2::TypeMeta /*data_type*/);
5052

5153
void release_resources() override;
5254

@@ -384,8 +386,8 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
384386

385387
private:
386388
explicit SparseTensorImpl(
387-
at::DispatchKeySet,
388-
const caffe2::TypeMeta,
389+
at::DispatchKeySet /*key_set*/,
390+
const caffe2::TypeMeta /*data_type*/,
389391
at::Tensor indices,
390392
at::Tensor values);
391393

aten/src/ATen/TensorIndexing.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,10 +112,10 @@ TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
112112
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
113113
struct TORCH_API TensorIndex final {
114114
// Case 1: `at::indexing::None`
115-
TensorIndex(std::nullopt_t) : type_(TensorIndexType::None) {}
115+
TensorIndex(std::nullopt_t /*unused*/) : type_(TensorIndexType::None) {}
116116

117117
// Case 2: "..." / `at::indexing::Ellipsis`
118-
TensorIndex(at::indexing::EllipsisIndexType)
118+
TensorIndex(at::indexing::EllipsisIndexType /*unused*/)
119119
: type_(TensorIndexType::Ellipsis) {}
120120
TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
121121
TORCH_CHECK_VALUE(

0 commit comments

Comments
 (0)