Skip to content

Commit 65c26cb

Browse files
Merge pull request #4174 from halide/srj-tidy
Remove unused 'using' decls to appease clang-tidy
2 parents 806f467 + feb095d commit 65c26cb

File tree

1 file changed

+11
-13
lines changed

1 file changed

+11
-13
lines changed

src/CodeGen_PyTorch.cpp

+11-13
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@
1717
namespace Halide {
1818
namespace Internal {
1919

20-
using std::endl;
21-
using std::map;
2220
using std::ostream;
2321
using std::ostringstream;
2422
using std::string;
@@ -47,7 +45,7 @@ CodeGen_PyTorch::CodeGen_PyTorch(ostream &s, Target t, std::string cpp_header) :
4745
"UserContext feature to properly manage the GPU memory. "
4846
"Please add \"-user_context\" to the generator's target options.\n";
4947
}
50-
stream << "#include \"ATen/cuda/CUDAContext.h\"\n";
48+
stream << "#include \"ATen/cuda/CUDAContext.h\"\n";
5149
stream << "#include \"HalidePyTorchCudaHelpers.h\"\n";
5250
}
5351

@@ -92,7 +90,7 @@ void CodeGen_PyTorch::compile(const LoweredFunc &f, bool is_cuda) {
9290
continue;
9391
} else if (args[i].is_buffer()) {
9492
buffer_args.push_back(args[i]);
95-
stream
93+
stream
9694
<< type_to_pytorch_tensor(args[i].type, is_cuda)
9795
<< " &"
9896
<< c_print_name(args[i].name);
@@ -134,14 +132,14 @@ void CodeGen_PyTorch::compile(const LoweredFunc &f, bool is_cuda) {
134132
do_indent();
135133
stream
136134
<< "HLPT_CHECK_CONTIGUOUS("
137-
<< c_print_name(buffer_args[i].name)
135+
<< c_print_name(buffer_args[i].name)
138136
<< ");\n";
139137

140138
if (is_cuda) {
141139
do_indent();
142140
stream
143141
<< "HLPT_CHECK_DEVICE("
144-
<< c_print_name(buffer_args[i].name)
142+
<< c_print_name(buffer_args[i].name)
145143
<< ", device_id);\n";
146144
}
147145
}
@@ -157,9 +155,9 @@ void CodeGen_PyTorch::compile(const LoweredFunc &f, bool is_cuda) {
157155
string tp = type_to_c_type(buffer_args[i].type, false);
158156
stream
159157
<< "Buffer<" << tp << "> "
160-
<< c_print_name(buffer_args[i].name)
158+
<< c_print_name(buffer_args[i].name)
161159
<< "_buffer = Halide::PyTorch::wrap<" << tp << ">("
162-
<< c_print_name(buffer_args[i].name)
160+
<< c_print_name(buffer_args[i].name)
163161
<< ");\n"
164162
;
165163
}
@@ -172,7 +170,7 @@ void CodeGen_PyTorch::compile(const LoweredFunc &f, bool is_cuda) {
172170
stream << "int err = " << simple_name << "(";
173171
for (size_t i = 0; i < args.size(); i++) {
174172
if (args[i].is_buffer()) {
175-
stream
173+
stream
176174
<< c_print_name(args[i].name)
177175
<< "_buffer";
178176
} else {
@@ -194,15 +192,15 @@ void CodeGen_PyTorch::compile(const LoweredFunc &f, bool is_cuda) {
194192
for (size_t i = 0; i < buffer_args.size(); i++) {
195193
if (buffer_args[i].is_buffer()) {
196194
do_indent();
197-
stream
195+
stream
198196
<< "AT_ASSERTM(!"
199197
<< c_print_name(buffer_args[i].name) << "_buffer.host_dirty(),"
200198
<< "\"device not synchronized for buffer "
201199
<< c_print_name(buffer_args[i].name)
202200
<< ", make sure all update stages are excplicitly computed on GPU."
203201
<<"\");\n";
204202
do_indent();
205-
stream
203+
stream
206204
<< c_print_name(buffer_args[i].name) << "_buffer"
207205
<< ".device_detach_native();\n";
208206
}
@@ -260,7 +258,7 @@ void CodeGen_PyTorch::test() {
260258
{
261259
// TODO(mgharbi): test that Target("host-cuda") raises an exception since
262260
// we require the "user_context" feature when using CUDA
263-
261+
264262
CodeGen_PyTorch cg(source, Target("host"), "PyTorchTestOp.h");
265263
cg.compile(m);
266264

@@ -270,7 +268,7 @@ void CodeGen_PyTorch::test() {
270268
string src = source.str() + "\n" + source_cuda.str();
271269

272270
// The correct source concatenates CPU and GPU headers
273-
string correct_src =
271+
string correct_src =
274272
R"GOLDEN_CODE(#include "torch/extension.h"
275273
#include "HalideBuffer.h"
276274
#include "HalidePyTorchHelpers.h"

0 commit comments

Comments
 (0)