Skip to content

Commit e9f6045

Browse files
cyyeverpytorchmergebot
authored andcommitted
[15/N] Fix extra warnings brought by clang-tidy-17 (pytorch#143100)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#143100 Approved by: https://github.com/Skylion007
1 parent 33dee72 commit e9f6045

9 files changed

+46
-34
lines changed

aten/src/ATen/functorch/BatchRulesBinaryOps.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ static Tensor rrelu_with_noise_batch(
313313
auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level);
314314
TORCH_CHECK(!noise_bdim.has_value(), "vmap: Attempted to vmap over 'noise' in torch.rrelu_with_noise. This is not supported.");
315315
auto res = rrelu_with_noise_batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, std::move(generator));
316-
return makeBatched(std::get<0>(res), std::get<1>(res), cur_level);
316+
return makeBatched(std::move(std::get<0>(res)), std::get<1>(res), cur_level);
317317
}
318318

319319
static std::tuple<Tensor, std::optional<int64_t>> log_sigmoid_backward_batch_rule(

aten/src/ATen/functorch/BatchRulesHelper.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ std::tuple<Tensor, Tensor> _binary_pointwise_helper(
199199
tensor_ = maybePadToLogicalRank(tensor_, tensor_batch_dim, max_logical_rank);
200200
other_ = maybePadToLogicalRank(other_, other_batch_dim, max_logical_rank);
201201

202-
return std::make_tuple(tensor_, other_);
202+
return std::make_tuple(std::move(tensor_), std::move(other_));
203203
}
204204

205205
} // namespace at::functorch

aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ struct LinalgCheckMatrixUnaryRuleHelper<op_name, F, Func, typelist<A, T...>> {
213213
T... extra_args) {
214214
auto tensor_ = check_and_reshape_input(tensor, batch_dim);
215215
auto res = Func(std::move(tensor_), std::forward<T>(extra_args)...);
216-
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0, std::get<3>(res), 0);
216+
return std::make_tuple(std::move(std::get<0>(res)), 0, std::move(std::get<1>(res)), 0, std::move(std::get<2>(res)), 0, std::get<3>(res), 0);
217217
}
218218
};
219219

@@ -279,8 +279,8 @@ threeOutputs linalg_lu_unpack_batch_rule(
279279
LU_bdim = 0;
280280
}
281281

282-
const auto res = at::lu_unpack(LU_, pivots_, unpack_data, unpack_pivots);
283-
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0);
282+
auto res = at::lu_unpack(LU_, pivots_, unpack_data, unpack_pivots);
283+
return std::make_tuple(std::move(std::get<0>(res)), 0, std::move(std::get<1>(res)), 0, std::move(std::get<2>(res)), 0);
284284
}
285285

286286
oneOutput linalg_lu_solve_batch_rule(

aten/src/ATen/native/quantized/FakeQuantPerChannelAffine.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ Tensor fake_quantize_per_channel_affine(
3636
int64_t axis,
3737
int64_t quant_min,
3838
int64_t quant_max) {
39-
const auto res = at::fake_quantize_per_channel_affine_cachemask(
39+
auto res = at::fake_quantize_per_channel_affine_cachemask(
4040
self, scale, zero_point, axis, quant_min, quant_max);
41-
return std::get<0>(res);
41+
return std::get<0>(std::move(res));
4242
}
4343

4444
std::tuple<Tensor, Tensor> fake_quantize_per_channel_affine_cachemask(

aten/src/ATen/native/quantized/FakeQuantPerTensorAffine.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,9 @@ Tensor fake_quantize_per_tensor_affine(
3434
int64_t zero_point,
3535
int64_t quant_min,
3636
int64_t quant_max) {
37-
const auto res = at::fake_quantize_per_tensor_affine_cachemask(
37+
auto res = at::fake_quantize_per_tensor_affine_cachemask(
3838
self, scale, zero_point, quant_min, quant_max);
39-
return std::get<0>(res);
39+
return std::get<0>(std::move(res));
4040
}
4141

4242
Tensor fake_quantize_per_tensor_affine(
@@ -45,9 +45,9 @@ Tensor fake_quantize_per_tensor_affine(
4545
const Tensor& zero_point,
4646
int64_t quant_min,
4747
int64_t quant_max) {
48-
const auto res = at::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
48+
auto res = at::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
4949
self, scale, zero_point, at::ones(1, self.options().dtype(at::kLong)), quant_min, quant_max);
50-
return std::get<0>(res);
50+
return std::get<0>(std::move(res));
5151
}
5252

5353
/* Fake-quantizes the 'inputs' tensor, saving a mask for the backward pass.

aten/src/ATen/native/quantized/cpu/conv_serialization.h

+18-18
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ using ConvParamsSerializationTypeV3 = std::tuple<
8686
// Parses any historical conv packed params format into
8787
// the current format.
8888
template <uint32_t kSpatialDim>
89-
ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) {
89+
ConvParamsSerializationTypeV3 parse_conv_serialized_state(const c10::IValue& v) {
9090

9191
// determine the version based on IValue contents
9292
int version = -1;
@@ -131,15 +131,15 @@ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) {
131131
dilation_x_kSpatialDim.size() + kSpatialDim + 3);
132132
config_vals.push_back(kSpatialDim);
133133
for (const auto i : c10::irange(stride_x_kSpatialDim.size())) {
134-
auto stride = stride_x_kSpatialDim.get(i);
134+
auto const & stride = stride_x_kSpatialDim.get(i);
135135
config_vals.push_back(stride[0].item<int16_t>());
136136
}
137137
for (const auto i : c10::irange(padding_x_kSpatialDim.size())) {
138-
auto padding = padding_x_kSpatialDim.get(i);
138+
auto const &padding = padding_x_kSpatialDim.get(i);
139139
config_vals.push_back(padding[0].item<int16_t>());
140140
}
141141
for (const auto i : c10::irange(dilation_x_kSpatialDim.size())) {
142-
auto dilation = dilation_x_kSpatialDim.get(i);
142+
auto const &dilation = dilation_x_kSpatialDim.get(i);
143143
config_vals.push_back(dilation[0].item<int16_t>());
144144
}
145145
// output_padding does not exist in v1, so we fill in a default value
@@ -283,13 +283,13 @@ ConvParamsSerializationTypeV3 serialize_conv(
283283
template <uint32_t kSpatialDim>
284284
c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
285285
ConvParamsSerializationTypeV3 state) {
286-
auto [version, config_vals, tensors] = state;
286+
auto & [version, config_vals, tensors] = state;
287287
TORCH_INTERNAL_ASSERT(version == 3, "Unexpected serialized qconv version: ", version);
288288

289289
TORCH_CHECK(tensors.size() == 3, "Wrong number of tensors", tensors.size());
290-
std::optional<at::Tensor> weight = tensors[1];
291-
std::optional<at::Tensor> bias = tensors[2];
292-
TORCH_INTERNAL_ASSERT(weight, "Weight should always be present in serialized qconv.");
290+
auto & weight = tensors[1];
291+
auto & bias = tensors[2];
292+
TORCH_INTERNAL_ASSERT(weight.has_value(), "Weight should always be present in serialized qconv.");
293293

294294
torch::List<int64_t> stride, padding, output_padding, dilation;
295295
// skip kSpatialDim
@@ -340,8 +340,8 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
340340
weight.value(), transpose, groups, output_padding);
341341
if (use_onednn) {
342342
return PackedConvWeightsOnednn<kSpatialDim>::prepack(
343-
weight.value(),
344-
bias,
343+
std::move(weight.value()),
344+
std::move(bias),
345345
stride,
346346
padding,
347347
output_padding,
@@ -352,8 +352,8 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
352352
}
353353
#endif
354354
return PackedConvWeight<kSpatialDim>::prepack(
355-
weight.value(),
356-
bias,
355+
std::move(weight.value()),
356+
std::move(bias),
357357
stride,
358358
padding,
359359
output_padding,
@@ -367,8 +367,8 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
367367
#ifdef USE_FBGEMM
368368
if (ctx.qEngine() == at::QEngine::FBGEMM) {
369369
return PackedConvWeight<kSpatialDim>::prepack(
370-
weight.value(),
371-
bias,
370+
std::move(weight.value()),
371+
std::move(bias),
372372
stride,
373373
padding,
374374
output_padding,
@@ -385,8 +385,8 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
385385
"prepack/__setstate__: QNNPACK only supports Conv2d "
386386
"now.");
387387
return PackedConvWeightsQnnp<kSpatialDim>::prepack(
388-
weight.value(),
389-
bias,
388+
std::move(weight.value()),
389+
std::move(bias),
390390
stride,
391391
padding,
392392
output_padding,
@@ -399,8 +399,8 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> deserialize_conv(
399399
#if AT_MKLDNN_ENABLED()
400400
if (ctx.qEngine() == at::QEngine::ONEDNN) {
401401
return PackedConvWeightsOnednn<kSpatialDim>::prepack(
402-
weight.value(),
403-
bias,
402+
std::move(weight.value()),
403+
std::move(bias),
404404
stride,
405405
padding,
406406
output_padding,

aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ at::Tensor fused_moving_avg_obs_fake_quant(
235235
if (self.sym_numel() == 0) {
236236
return self.clone();
237237
}
238-
const auto res = at::_fused_moving_avg_obs_fq_helper(
238+
auto res = at::_fused_moving_avg_obs_fq_helper(
239239
self,
240240
observer_on,
241241
fake_quant_on,
@@ -249,6 +249,6 @@ at::Tensor fused_moving_avg_obs_fake_quant(
249249
ch_axis,
250250
per_row_fake_quant,
251251
symmetric_quant);
252-
return std::get<0>(res);
252+
return std::get<0>(std::move(res));
253253
}
254254
} // namespace at::native

torch/csrc/jit/python/pybind_utils.h

+14-2
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,10 @@ class ToIValueAllowNumbersAsTensors {
7979
// type of its field 'torch::jit::PythonFunctionGuard::func_'
8080
struct VISIBILITY_HIDDEN PythonFunctionGuard {
8181
explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {}
82+
PythonFunctionGuard(const PythonFunctionGuard&) = delete;
83+
PythonFunctionGuard(PythonFunctionGuard&&) = delete;
84+
PythonFunctionGuard& operator=(const PythonFunctionGuard&) = delete;
85+
PythonFunctionGuard& operator=(PythonFunctionGuard&&) = delete;
8286

8387
~PythonFunctionGuard() {
8488
pybind11::gil_scoped_acquire ag;
@@ -112,6 +116,9 @@ struct VISIBILITY_HIDDEN PythonFutureWrapper
112116

113117
explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete;
114118
PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete;
119+
PythonFutureWrapper(PythonFutureWrapper&&) = default;
120+
PythonFutureWrapper& operator=(PythonFutureWrapper&&) = default;
121+
~PythonFutureWrapper() = default;
115122

116123
bool done() {
117124
return fut->completed();
@@ -275,6 +282,9 @@ struct VISIBILITY_HIDDEN PythonAwaitWrapper
275282

276283
explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete;
277284
PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete;
285+
PythonAwaitWrapper(PythonAwaitWrapper&&) = default;
286+
PythonAwaitWrapper& operator=(PythonAwaitWrapper&&) = default;
287+
~PythonAwaitWrapper() = default;
278288

279289
py::object wait() {
280290
py::gil_scoped_acquire acquire;
@@ -390,6 +400,7 @@ inline InferredType tryToInferType(py::handle input) {
390400
return InferredType(FloatType::get());
391401
} else if (PyComplex_CheckExact(input.ptr())) {
392402
return InferredType(ComplexType::get());
403+
// NOLINTNEXTLINE(bugprone-branch-clone)
393404
} else if (py::isinstance<py::bytes>(input)) {
394405
// NOTE: We may need a ByteType in the future
395406
return InferredType(StringType::get());
@@ -855,9 +866,9 @@ inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) {
855866

856867
struct VISIBILITY_HIDDEN tuple_slice {
857868
/*implicit*/ tuple_slice(py::tuple tup_)
858-
: tup(std::move(tup_)), b(0), e(tup.size()) {}
869+
: tup(std::move(tup_)), b(0), e(static_cast<int64_t>(tup.size())) {}
859870
tuple_slice(py::tuple tup_, int64_t b_)
860-
: tup(std::move(tup_)), b(b_), e(tup.size()) {}
871+
: tup(std::move(tup_)), b(b_), e(static_cast<int64_t>(tup.size())) {}
861872
tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
862873
: tup(std::move(tup_)), b(b_), e(e_) {}
863874
py::detail::tuple_iterator begin() const {
@@ -1064,6 +1075,7 @@ inline Stack createStackForSchema(
10641075
return stack;
10651076
}
10661077

1078+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
10671079
inline py::object createPyObjectForStack(Stack&& stack) {
10681080
if (stack.empty()) {
10691081
return py::none();

torch/csrc/profiler/util.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -541,7 +541,7 @@ std::unordered_map<std::string, std::string> saveNcclMeta(
541541
}
542542
}
543543
if (config.introspectOutputs) {
544-
const auto outputs = fn.outputs();
544+
const auto& outputs = fn.outputs();
545545
auto num_outputs = fn.num_outputs();
546546
if (checkFunctionOutputsForLogging(fn)) {
547547
// need to account for Stack mode where the outputs are at the end.

0 commit comments

Comments
 (0)