Skip to content

Commit

Permalink
Fix: Apply clang-tidy to c10/core (pytorch#90699)
Browse files Browse the repository at this point in the history
Enables clang-tidy on 'c10/core'. Request by @ezyang to extend coverage of clang-tidy for better performance linting.

Pull Request resolved: pytorch#90699
Approved by: https://github.com/ezyang
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Dec 13, 2022
1 parent ff1bbc2 commit 96a36c9
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 7 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ performance-*,
-performance-noexcept-move-constructor,
-performance-unnecessary-value-param,
'
HeaderFilterRegex: 'torch/csrc/(?!deploy/interpreter/cpython).*'
HeaderFilterRegex: '(c10/(?!test)/|torch/csrc/(?!deploy/interpreter/cpython)).*'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
CheckOptions:
Expand Down
3 changes: 3 additions & 0 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ command = [
[[linter]]
code = 'CLANGTIDY'
include_patterns = [
'c10/core/*.cpp',
'c10/core/**/*.cpp',
'torch/csrc/fx/**/*.cpp',
'torch/csrc/generic/**/*.cpp',
'torch/csrc/onnx/**/*.cpp',
Expand All @@ -222,6 +224,7 @@ exclude_patterns = [
# in a follow up PR.
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# that are not easily converted to accepted c++
'c10/test/**/*.cpp',
'torch/csrc/jit/passes/onnx/helper.cpp',
'torch/csrc/jit/passes/onnx/shape_type_inference.cpp',
'torch/csrc/jit/serialization/onnx.cpp',
Expand Down
2 changes: 1 addition & 1 deletion c10/core/DeviceType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ std::string DeviceTypeName(DeviceType d, bool lower_case) {
case DeviceType::IPU:
return lower_case ? "ipu" : "IPU";
case DeviceType::PrivateUse1:
return get_privateuse1_backend(/*lowercase=*/lower_case);
return get_privateuse1_backend(/*lower_case=*/lower_case);
default:
TORCH_CHECK(
false,
Expand Down
4 changes: 2 additions & 2 deletions c10/core/TensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -970,8 +970,8 @@ void TensorImpl::ShareExternalPointer(
void clone_symvec(SymIntArrayRef src, SymDimVector& dst) {
dst.clear();
dst.reserve(src.size());
for (size_t i = 0; i < src.size(); i++) {
dst.emplace_back(src[i].clone());
for (const auto& i : src) {
dst.emplace_back(i.clone());
}
}

Expand Down
2 changes: 1 addition & 1 deletion c10/core/impl/SizesAndStrides.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ void SizesAndStrides::resizeSlowPath(
if (isInline()) {
// CANNOT USE allocateOutOfLineStorage(newSize) HERE! WOULD
// OVERWRITE inlineStorage_!
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
int64_t* tempStorage =
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
static_cast<int64_t*>(malloc(storageBytes(newSize)));
TORCH_CHECK(
tempStorage,
Expand Down
3 changes: 1 addition & 2 deletions c10/core/impl/TorchDispatchModeTLS.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
TORCH_CHECK(
torchDispatchModeState.stack_.size() > 0,
"trying to pop from empty mode stack");
const std::shared_ptr<SafePyObject> out =
torchDispatchModeState.stack_.back();
std::shared_ptr<SafePyObject> out = torchDispatchModeState.stack_.back();
torchDispatchModeState.stack_.pop_back();

if (torchDispatchModeState.stack_.size() == 0) {
Expand Down

0 comments on commit 96a36c9

Please sign in to comment.