Skip to content

Commit 79e8a69

Browse files
cyyeverpytorchmergebot
authored andcommitted
Enable move warnings for torch targets (pytorch#149923)
This PR enables more move warnings for torch targets and fixes some code. Pull Request resolved: pytorch#149923 Approved by: https://github.com/malfet
1 parent de68ddc commit 79e8a69

File tree

6 files changed

+40
-3
lines changed

6 files changed

+40
-3
lines changed

CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -1084,7 +1084,6 @@ if(NOT MSVC)
10841084
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13)
10851085
append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)
10861086
append_cxx_flag_if_supported("-Wno-error=dangling-reference" CMAKE_CXX_FLAGS)
1087-
append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS)
10881087
endif()
10891088
else()
10901089
# Define export functions for AOTI.

c10/macros/Macros.h

+10
Original file line numberDiff line numberDiff line change
@@ -508,4 +508,14 @@ __host__ __device__
508508

509509
#endif
510510

511+
// This macro is used to find older C++ compilers
512+
// that don't support move optimization for return values.
513+
514+
#if (defined(__GNUC__) && __GNUC__ < 13) || \
515+
(defined(__clang_major__) && __clang_major__ < 13)
516+
#define C10_RETURN_MOVE_IF_OLD_COMPILER 1
517+
#else
518+
#define C10_RETURN_MOVE_IF_OLD_COMPILER 0
519+
#endif
520+
511521
#endif // C10_MACROS_MACROS_H_

cmake/public/utils.cmake

+2-2
Original file line numberDiff line numberDiff line change
@@ -393,10 +393,10 @@ function(torch_compile_options libname)
393393
list(APPEND private_compile_options -Wunused-function)
394394
list(APPEND private_compile_options -Wunused-variable)
395395
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
396-
list(APPEND private_compile_options -Wunused-but-set-variable)
396+
list(APPEND private_compile_options -Wunused-but-set-variable -Wredundant-move)
397397
endif()
398398
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
399-
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi)
399+
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi -Wmove)
400400
else()
401401
list(APPEND private_compile_options
402402
# Considered to be flaky. See the discussion at

torch/csrc/jit/python/pybind_utils.cpp

+12
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,11 @@ py::object toPyObject(IValue ivalue) {
641641
for (const auto i : c10::irange(list.size())) {
642642
t[i] = toPyObject(IValue{list.get(i)});
643643
}
644+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
644645
return std::move(t);
646+
#else
647+
return t;
648+
#endif
645649
} else if (ivalue.isTuple()) {
646650
auto tuple = std::move(ivalue).toTuple();
647651
const auto& elements = tuple->elements();
@@ -676,7 +680,11 @@ py::object toPyObject(IValue ivalue) {
676680
.attr("_create_named_tuple")(
677681
t, unqualName, fieldNames, py::make_tuple(defaults));
678682
} else {
683+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
679684
return std::move(t);
685+
#else
686+
return t;
687+
#endif
680688
}
681689
} else if (ivalue.isDevice()) {
682690
return py::cast(std::move(ivalue).toDevice());
@@ -689,7 +697,11 @@ py::object toPyObject(IValue ivalue) {
689697
py_dict[toPyObject(IValue{pair.key()})] =
690698
toPyObject(IValue{pair.value()});
691699
}
700+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
692701
return std::move(py_dict);
702+
#else
703+
return py_dict;
704+
#endif
693705
} else if (ivalue.isRRef()) {
694706
#ifdef USE_RPC
695707
auto RRefPtr =

torch/csrc/jit/python/python_arg_flatten.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,11 @@ py::object cast_sequence(std::vector<py::object> objs) {
117117
for (const auto i : c10::irange(num_objs)) {
118118
sequence[i] = std::move(objs[i]);
119119
}
120+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
120121
return std::move(sequence);
122+
#else
123+
return sequence;
124+
#endif
121125
}
122126

123127
py::object cast_dict(std::vector<py::object> objs) {
@@ -127,7 +131,11 @@ py::object cast_dict(std::vector<py::object> objs) {
127131
py::tuple obj = py::reinterpret_borrow<py::tuple>(objs[i]);
128132
sequence[obj[0]] = obj[1];
129133
}
134+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
130135
return std::move(sequence);
136+
#else
137+
return sequence;
138+
#endif
131139
}
132140

133141
py::object unflatten_rec(

torch/csrc/jit/tensorexpr/kernel.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,11 @@ ExprHandle TensorExprKernel::getVarForShape(const c10::ShapeSymbol& ss) {
483483
if (it == shapeSymbolToVar_.end()) {
484484
VarHandle var("ss" + std::to_string(-value), kLong);
485485
shapeSymbolToVar_.emplace(value, var);
486+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
486487
return std::move(var);
488+
#else
489+
return var;
490+
#endif
487491
}
488492
return it->second;
489493
}
@@ -1020,7 +1024,11 @@ ExprHandle TensorExprKernel::getStrideArg(
10201024
kLong);
10211025
strideArgToVar_[std::pair<size_t, size_t>(
10221026
tensor_input_index, stride_index)] = var;
1027+
#if C10_RETURN_MOVE_IF_OLD_COMPILER
10231028
return std::move(var);
1029+
#else
1030+
return var;
1031+
#endif
10241032
}
10251033
return it->second;
10261034
}

0 commit comments

Comments
 (0)