Skip to content
This repository was archived by the owner on Feb 7, 2023. It is now read-only.

Commit b410c51

Browse files
vgao1996facebook-github-bot
authored andcommitted
comment out unused parameters
Summary: This uses `clang-tidy` to comment out unused parameters (in functions, methods and lambdas) in fbcode. Cases that the tool failed to handle are fixed manually. Reviewed By: igorsugak Differential Revision: D5454343 fbshipit-source-id: 5dee339b4334e25e963891b519a5aa81fbf627b2
1 parent 7df0d66 commit b410c51

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+327
-260
lines changed

caffe2/binaries/make_image_db.cc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,10 @@ CAFFE2_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");
3737
namespace caffe2 {
3838

3939
void ConvertImageDataset(
40-
const string& input_folder, const string& list_filename,
41-
const string& output_db_name, const bool shuffle) {
40+
const string& input_folder,
41+
const string& list_filename,
42+
const string& output_db_name,
43+
const bool /*shuffle*/) {
4244
std::ifstream list_file(list_filename);
4345
std::vector<std::pair<std::string, int> > lines;
4446
std::string filename;

caffe2/contrib/nervana/nervana_math_gpu.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ void Gemm<float, CUDAContext, NervanaEngine>(
2222
const float beta,
2323
float* C,
2424
CUDAContext* context,
25-
TensorProto::DataType math_type) {
25+
TensorProto::DataType /*math_type*/) {
2626
// Note that cublas follows fortran order, so the order is different from
2727
// the cblas convention.
2828
int lda = (TransA == CblasNoTrans) ? K : M;

caffe2/contrib/prof/cuda_profile_ops.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ class CudaProfileInitializeOp : public OperatorBase {
5656
unlink(config_.c_str());
5757
}
5858

59-
virtual bool Run(int /* unused */ stream_id = 0) {
59+
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
6060
// If this fails, check the contents of "output" for hints.
6161
CUDA_CHECK(
6262
cudaProfilerInitialize(config_.c_str(), output_.c_str(), cudaCSV));
@@ -73,7 +73,7 @@ class CudaProfileStartOp : public OperatorBase {
7373
CudaProfileStartOp(const OperatorDef& operator_def, Workspace* ws)
7474
: OperatorBase(operator_def, ws) {}
7575

76-
virtual bool Run(int /* unused */ stream_id = 0) {
76+
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
7777
CUDA_ENFORCE(cudaProfilerStart());
7878
return true;
7979
}
@@ -84,7 +84,7 @@ class CudaProfileStopOp : public OperatorBase {
8484
CudaProfileStopOp(const OperatorDef& operator_def, Workspace* ws)
8585
: OperatorBase(operator_def, ws) {}
8686

87-
virtual bool Run(int /* unused */ stream_id = 0) {
87+
virtual bool Run(int /* unused */ /*stream_id*/ = 0) {
8888
CUDA_ENFORCE(cudaProfilerStop());
8989
return true;
9090
}

caffe2/contrib/torch/torch_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ class Torch final {
5656
return Traits::tensorTy;
5757
}
5858

59-
void setContext(Context* context) {}
59+
void setContext(Context* /*context*/) {}
6060

6161
void setTensor(typename Traits::Tensor* t, Blob* blob) {
6262
CAFFE_ENFORCE_EQ(tensorTy(*blob), Traits::tensorTy);

caffe2/contrib/transform/transform.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,9 @@ class Transform {
6363
* Given the current subgraph (ordered), should we append the new node at idx?
6464
*/
6565
virtual bool PatternRule(
66-
const transform::Graph& g,
67-
const std::vector<int>& subgraph,
68-
int idx) {
66+
const transform::Graph& /*g*/,
67+
const std::vector<int>& /*subgraph*/,
68+
int /*idx*/) {
6969
CAFFE_NOT_IMPLEMENTED;
7070
}
7171

@@ -74,8 +74,8 @@ class Transform {
7474
* Given a subgraph, can we accept it?
7575
*/
7676
virtual bool ValidatorRule(
77-
const transform::Graph& g,
78-
const std::vector<int>& subgraph) {
77+
const transform::Graph& /*g*/,
78+
const std::vector<int>& /*subgraph*/) {
7979
CAFFE_NOT_IMPLEMENTED;
8080
}
8181

@@ -84,8 +84,8 @@ class Transform {
8484
* upon the subgraph.
8585
*/
8686
virtual bool ReplaceRule(
87-
const std::vector<int>& subgraph,
88-
transform::Graph* g_ptr) {
87+
const std::vector<int>& /*subgraph*/,
88+
transform::Graph* /*g_ptr*/) {
8989
CAFFE_NOT_IMPLEMENTED;
9090
}
9191

caffe2/contrib/warpctc/ctc_op.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ namespace caffe2 {
66

77
namespace detail {
88
template <>
9-
ctcComputeInfo workspaceInfo<CPUContext>(const CPUContext& context) {
9+
ctcComputeInfo workspaceInfo<CPUContext>(const CPUContext& /*context*/) {
1010
ctcComputeInfo result;
1111
result.loc = CTC_CPU;
1212
result.num_threads = 1;

caffe2/core/blob_serialization.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ std::string Blob::Serialize(const string& name) const {
8686
// Specialization for StoreDeviceDetail for CPU - nothing needs to be done.
8787
template <>
8888
void TensorSerializer<CPUContext>::StoreDeviceDetail(
89-
const Tensor<CPUContext>& input, TensorProto* proto) {}
89+
const Tensor<CPUContext>& /*input*/,
90+
TensorProto* /*proto*/) {}
9091

9192
// The actual serialization registry objects.
9293
CAFFE_DEFINE_TYPED_REGISTRY(

caffe2/core/blob_serialization.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,8 +263,11 @@ void TensorSerializer<Context>::SerializeWithChunkSize(
263263

264264
template <class Context>
265265
void TensorSerializer<Context>::Serialize(
266-
const Tensor<Context>& input, const string& name,
267-
TensorProto* proto_ptr, size_t chunkBegin, int32_t chunkSize) {
266+
const Tensor<Context>& input,
267+
const string& /*name*/,
268+
TensorProto* proto_ptr,
269+
size_t chunkBegin,
270+
int32_t chunkSize) {
268271
CAFFE_ENFORCE(
269272
chunkBegin <= input.size(),
270273
"Chunk begin is out of tensor: ",

caffe2/core/blob_serializer_base.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class BlobSerializerBase {
4646
const Blob& blob,
4747
const std::string& name,
4848
SerializationAcceptor acceptor,
49-
int chunk_size) {
49+
int /*chunk_size*/) {
5050
// Base implementation.
5151
Serialize(blob, name, acceptor);
5252
}

caffe2/core/blob_test.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -649,7 +649,7 @@ class VectorCursor : public db::Cursor {
649649
pos_ = 0;
650650
}
651651
~VectorCursor() {}
652-
void Seek(const string& key) override {}
652+
void Seek(const string& /*key*/) override {}
653653
void SeekToFirst() override {}
654654
void Next() override {
655655
++pos_;
@@ -790,7 +790,8 @@ TEST(CustomChunkSize, BigTensorSerialization) {
790790
tensor->mutable_data<float>();
791791
std::mutex mutex;
792792
int counter = 0;
793-
auto acceptor = [&](const std::string& key, const std::string& value) {
793+
auto acceptor = [&](const std::string& /*key*/,
794+
const std::string& /*value*/) {
794795
std::lock_guard<std::mutex> guard(mutex);
795796
counter++;
796797
};

0 commit comments

Comments
 (0)