diff --git a/.clang-tidy b/.clang-tidy index ddcbd2e45648e2..eea5c737d0c496 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -42,7 +42,6 @@ modernize-*, -modernize-use-trailing-return-type, -modernize-use-nodiscard, performance-*, --performance-unnecessary-value-param, readability-container-size-empty, ' HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$' diff --git a/aten/src/ATen/core/IListRef_test.cpp b/aten/src/ATen/core/IListRef_test.cpp index d86b9c4d2f0cc0..2875d05c392b2d 100644 --- a/aten/src/ATen/core/IListRef_test.cpp +++ b/aten/src/ATen/core/IListRef_test.cpp @@ -30,15 +30,10 @@ static std::vector get_unboxed_opt_tensor_vector() { static std::vector tensors; std::vector optional_tensors; constexpr size_t SIZE = 5; - for (size_t i = 0; i < SIZE * 2; i++) { - if (i % 2 == 0) { - if (tensors.size() + 1 < i / 2) { - tensors.push_back(at::empty({0})); - } - optional_tensors.emplace_back(tensors[i / 2]); - } else { - optional_tensors.emplace_back(); - } + for (size_t i = 0; i < SIZE; i++) { + tensors.push_back(at::empty({0})); + optional_tensors.emplace_back(tensors[i]); + optional_tensors.emplace_back(); } return optional_tensors; } diff --git a/aten/src/ATen/core/List.h b/aten/src/ATen/core/List.h index 011ecf160b8172..d1271dadec2ac2 100644 --- a/aten/src/ATen/core/List.h +++ b/aten/src/ATen/core/List.h @@ -78,7 +78,7 @@ class ListElementReference final { ListElementReference& operator=(const T& new_value) &&; // assigning another ref to this assigns the underlying value - ListElementReference& operator=(ListElementReference&& rhs) &&; + ListElementReference& operator=(ListElementReference&& rhs) && noexcept; const IValue& get() const& { return *iterator_; @@ -124,7 +124,7 @@ class ListIterator final { ListIterator(const ListIterator&) = default; ListIterator(ListIterator&&) noexcept = default; ListIterator& operator=(const ListIterator&) = default; - ListIterator& operator=(ListIterator&&) = default; + ListIterator& operator=(ListIterator&&) noexcept = default; ListIterator& operator++() { ++iterator_; diff --git a/aten/src/ATen/core/List_inl.h b/aten/src/ATen/core/List_inl.h index 97bafd95a0672d..110ef5747e5b0d 100644 --- a/aten/src/ATen/core/List_inl.h +++ b/aten/src/ATen/core/List_inl.h @@ -140,7 +140,7 @@ ListElementReference& ListElementReference::operator=( } template -ListElementReference& ListElementReference::operator=(ListElementReference&& rhs) && { +ListElementReference& ListElementReference::operator=(ListElementReference&& rhs) && noexcept { *iterator_ = *rhs.iterator_; return *this; } diff --git a/aten/src/ATen/core/ivalue.cpp b/aten/src/ATen/core/ivalue.cpp index 9c4fba46841502..d2936c5d73eed1 100644 --- a/aten/src/ATen/core/ivalue.cpp +++ b/aten/src/ATen/core/ivalue.cpp @@ -488,9 +488,9 @@ template std::ostream& printList( std::ostream& out, const T& list, - const std::string start, - const std::string finish, - IValueFormatter formatter) { + const std::string& start, + const std::string& finish, + const IValueFormatter& formatter) { out << start; for (const auto i : c10::irange(list.size())) { if (i > 0) { @@ -506,16 +506,16 @@ std::ostream& printList( std::ostream& printMaybeAnnotatedList( std::ostream& out, const IValue& the_list, - IValueFormatter formatter) { + const IValueFormatter& formatter) { auto list_elem_type = the_list.type()->containedType(0); if (the_list.toListRef().empty() || !elementTypeCanBeInferredFromMembers(list_elem_type)) { out << "annotate(" << the_list.type()->annotation_str() << ", "; - printList(out, the_list.toListRef(), "[", "]", std::move(formatter)); + printList(out, the_list.toListRef(), "[", "]", formatter); out << ")"; return out; } else { - return printList(out, the_list.toListRef(), "[", "]", std::move(formatter)); + return printList(out, the_list.toListRef(), "[", "]", formatter); } } @@ -523,7 +523,7 @@ template std::ostream& printDict( std::ostream& out, const Dict& v, - IValueFormatter formatter) { + const IValueFormatter& formatter) { out << "{"; bool first = true; @@ -547,14 +547,14 @@ std::ostream& printDict( static std::ostream& printMaybeAnnotatedDict( std::ostream& out, const IValue& the_dict, - IValueFormatter formatter) { + const IValueFormatter& formatter) { auto value_type = the_dict.type()->castRaw()->getValueType(); if (the_dict.toGenericDict().empty() || !elementTypeCanBeInferredFromMembers(value_type)) { out << "annotate(" << the_dict.type()->annotation_str() << ","; - printDict(out, the_dict.toGenericDict(), std::move(formatter)) << ")"; + printDict(out, the_dict.toGenericDict(), formatter) << ")"; } else { - return printDict(out, the_dict.toGenericDict(), std::move(formatter)); + return printDict(out, the_dict.toGenericDict(), formatter); } return out; } diff --git a/aten/src/ATen/core/ivalue_inl.h b/aten/src/ATen/core/ivalue_inl.h index 3b57b5bfd781a6..80faacc8c8b1b1 100644 --- a/aten/src/ATen/core/ivalue_inl.h +++ b/aten/src/ATen/core/ivalue_inl.h @@ -1919,7 +1919,7 @@ template < std::is_lvalue_reference..., guts::negation>...>::value, std::nullptr_t> = nullptr> -std::tuple generic_to(IValue ivalue, _fake_type>) { +std::tuple generic_to(const IValue& ivalue, _fake_type>) { const auto& vals = ivalue.toTupleRef().elements(); TORCH_CHECK(vals.size() == sizeof...(Args)); return detail::generic_to_tuple_impl>(vals, Indices{}); diff --git a/c10/core/StorageImpl.h b/c10/core/StorageImpl.h index c42ddb905fcf25..c3471b8bcd0c12 100644 --- a/c10/core/StorageImpl.h +++ b/c10/core/StorageImpl.h @@ -55,7 +55,7 @@ struct C10_API StorageImpl : public c10::intrusive_ptr_target { StorageImpl( use_byte_size_t /*use_byte_size*/, - SymInt size_bytes, + const SymInt& size_bytes, at::Allocator* allocator, bool resizable) : StorageImpl( diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 4db02bf0c2b928..8c0cdd489da59b 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -260,10 +260,11 @@ struct C10_API ExtraMeta { std::unique_ptr symbolic_shape_meta, std::unique_ptr named_tensor_meta, intrusive_ptr backend_meta, - c10::optional custom_data_ptr_error_msg_ = c10::nullopt) + c10::optional custom_data_ptr_error_msg = c10::nullopt) : symbolic_shape_meta_(std::move(symbolic_shape_meta)), named_tensor_meta_(std::move(named_tensor_meta)), - backend_meta_(std::move(backend_meta)) {} + backend_meta_(std::move(backend_meta)), + custom_data_ptr_error_msg_(std::move(custom_data_ptr_error_msg)) {} std::unique_ptr clone() const { return std::make_unique(*this); diff --git a/functorch/csrc/dim/dim.cpp b/functorch/csrc/dim/dim.cpp index 62dfcb599eee08..af6a1aee158f86 100644 --- a/functorch/csrc/dim/dim.cpp +++ b/functorch/csrc/dim/dim.cpp @@ -880,7 +880,7 @@ mpy::obj Tensor::create_delayed(mpy::object op, mpy::vector_args args, S mpy::obj self = Tensor::create(); self->capture_levels(levels); self->has_device_ = has_device; - self->delayed_ = std::make_unique(op, args); + self->delayed_ = std::make_unique(std::move(op), args); return self; } @@ -1082,7 +1082,7 @@ PyObject* py_tree_flatten(PyObject *self, -mpy::object tree_map(Arena& A, std::function fn, mpy::handle agg) { +mpy::object tree_map(Arena& A, const std::function& fn, mpy::handle agg) { Slice elements; auto unflatten = tree_flatten(A, agg, elements); for (auto i : elements.enumerate()) { diff --git a/torch/csrc/jit/api/module.cpp b/torch/csrc/jit/api/module.cpp index 319ff292163774..4686014394b574 100644 --- a/torch/csrc/jit/api/module.cpp +++ b/torch/csrc/jit/api/module.cpp @@ -471,7 +471,7 @@ IValue Module::create_class(const c10::QualifiedName& name, Stack stack) const { Module freeze( const Module& module, - c10::optional> preserved_attrs, + const c10::optional>& preserved_attrs, bool optimize_numerics) { TORCH_CHECK( !module.hasattr("training") || !module.is_training(), diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index 1e5c408602a504..6224ae7fc9ec1c 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -90,6 +90,8 @@ struct TORCH_API Module : public Object { explicit Module(c10::QualifiedName class_name); Module(std::shared_ptr cu, const c10::ClassTypePtr& type); Module() = default; + Module(const Module&) = default; + Module& operator=(const Module&) = default; Module( c10::QualifiedName, std::shared_ptr cu, @@ -268,7 +270,7 @@ struct TORCH_API Module : public Object { } void set_delete_memory(std::shared_ptr delete_mem) { - mem_to_delete_ = delete_mem; + mem_to_delete_ = std::move(delete_mem); } // A set of functions to maintain input shapes through torch.jit.save and @@ -279,11 +281,11 @@ struct TORCH_API Module : public Object { return; } auto c10_inputs = c10::impl::GenericList(AnyType::get()); - for (const IValue& value : inputs) { + for (IValue& value : inputs) { // Not checking whether this is traceable type as that is already checked // higher up in the stack and changing that would require a larger // restructuring. - c10_inputs.push_back(value); + c10_inputs.emplace_back(std::move(value)); } traced_inputs_.insert_or_assign(func_name, c10_inputs); } @@ -326,7 +328,8 @@ struct TORCH_API Module : public Object { // details. TORCH_API Module freeze( const Module& module, - c10::optional> preserved_attrs = c10::nullopt, + const c10::optional>& preserved_attrs = + c10::nullopt, bool optimize_numerics = true); // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation @@ -400,7 +403,7 @@ struct slot_iterator_impl { // slots of root bool return_module) // if true include root itself as the first thing // visited (used in modules()) - : cursors_({SlotCursor{root, return_module ? -1 : 0}}), + : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), recurse_(recurse) { // advance iterator to first valid element (or the end, if empty) while_not_valid_next(); @@ -543,7 +546,7 @@ struct slot_list_impl { } slot_list_impl(Module module, bool recurse, bool return_module) - : module_(module), + : module_(std::move(module)), recurse_(recurse), return_module_(return_module), size_(c10::nullopt) { diff --git a/torch/csrc/jit/api/object.h b/torch/csrc/jit/api/object.h index b32ad1f1d35d0d..15da200de26065 100644 --- a/torch/csrc/jit/api/object.h +++ b/torch/csrc/jit/api/object.h @@ -22,10 +22,10 @@ class ObjectAttributeError : public std::runtime_error { ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} }; -// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct TORCH_API Object { Object() = default; - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Object(const Object&) = default; + Object& operator=(const Object&) = default; Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} Object(std::shared_ptr cu, const c10::ClassTypePtr& type); Object( @@ -146,7 +146,9 @@ struct TORCH_API Object { setter = Method(_ivalue(), prop.setter); } return Property{ - prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; + std::move(prop.name), + Method(_ivalue(), prop.getter), + std::move(setter)}; }); } diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp index 3e6c06a7c26163..179ee69da86362 100644 --- a/torch/csrc/profiler/collection.cpp +++ b/torch/csrc/profiler/collection.cpp @@ -465,7 +465,7 @@ void materialize_vulkan( std::vector>& out, AppendOnlyList::raw_event_t, BlockSize>& raw_events, - const std::function time_converter, + const std::function& time_converter, const uint64_t tid, const kineto::DeviceAndResource& kineto_info) { for (const auto& i : raw_events) { diff --git a/torch/csrc/utils/init.cpp b/torch/csrc/utils/init.cpp index d1c94b6629cd51..0f7ab000b467c4 100644 --- a/torch/csrc/utils/init.cpp +++ b/torch/csrc/utils/init.cpp @@ -40,13 +40,15 @@ void initThroughputBenchmarkBindings(PyObject* module) { // the GIL or not further down in the stack return self.runOnce(std::move(args), std::move(kwargs)); }) - .def("benchmark", [](ThroughputBenchmark& self, BenchmarkConfig config) { - // The benchmark always runs without the GIL. GIL will be used where - // needed. This will happen only in the nn.Module mode when manipulating - // inputs and running actual inference - pybind11::gil_scoped_release no_gil_guard; - return self.benchmark(config); - }); + .def( + "benchmark", + [](ThroughputBenchmark& self, const BenchmarkConfig& config) { + // The benchmark always runs without the GIL. GIL will be used where + // needed. This will happen only in the nn.Module mode when + // manipulating inputs and running actual inference + pybind11::gil_scoped_release no_gil_guard; + return self.benchmark(config); + }); } } // namespace throughput_benchmark diff --git a/torch/csrc/utils/throughput_benchmark.cpp b/torch/csrc/utils/throughput_benchmark.cpp index dbd89b9f5368ec..93b95024810b9f 100644 --- a/torch/csrc/utils/throughput_benchmark.cpp +++ b/torch/csrc/utils/throughput_benchmark.cpp @@ -39,7 +39,7 @@ py::object ThroughputBenchmark::runOnce(py::args&& args, py::kwargs&& kwargs) { } } -ThroughputBenchmark::ThroughputBenchmark(jit::Module script_module) +ThroughputBenchmark::ThroughputBenchmark(const jit::Module& script_module) : script_module_(script_module) {} ThroughputBenchmark::ThroughputBenchmark(py::object module) diff --git a/torch/csrc/utils/throughput_benchmark.h b/torch/csrc/utils/throughput_benchmark.h index a01d9e63c7c5b9..e540022e0f50ff 100644 --- a/torch/csrc/utils/throughput_benchmark.h +++ b/torch/csrc/utils/throughput_benchmark.h @@ -171,7 +171,7 @@ void ModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs); */ class C10_HIDDEN ThroughputBenchmark { public: - explicit ThroughputBenchmark(jit::Module module); + explicit ThroughputBenchmark(const jit::Module& module); explicit ThroughputBenchmark(py::object module); // Add one more input example. This input example should be in the exact