Skip to content

Commit

Permalink
[11/N] Fix extra warnings brought by clang-tidy-17 (pytorch#139599)
Browse files Browse the repository at this point in the history
Follows pytorch#139385
Pull Request resolved: pytorch#139599
Approved by: https://github.com/sraikund16
  • Loading branch information
cyyever authored and pytorchmergebot committed Nov 4, 2024
1 parent 3f248a5 commit 64d9ee8
Show file tree
Hide file tree
Showing 26 changed files with 54 additions and 34 deletions.
3 changes: 2 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,14 @@ cppcoreguidelines-*,
hicpp-exception-baseclass,
hicpp-avoid-goto,
misc-*,
-misc-confusable-identifiers,
-misc-const-correctness,
-misc-include-cleaner,
-misc-use-anonymous-namespace,
-misc-unused-parameters,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-confusable-identifiers,
-misc-unused-using-decls,
modernize-*,
-modernize-macro-to-enum,
-modernize-return-braced-init-list,
Expand Down
3 changes: 3 additions & 0 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,9 @@ exclude_patterns = [
'**/generated/**',
'**/*pb.h',
'**/*inl.h',
'aten/src/ATen/cpu/FlushDenormal.cpp',
'aten/src/ATen/cpu/Utils.cpp',
'aten/src/ATen/cpu/vml.h',
'aten/src/ATen/CPUFixedAllocator.h',
'aten/src/ATen/Parallel*.h',
'c10/xpu/**/*.h',
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/core/CachingHostAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ struct alignas(64) FreeBlockList {

namespace {
// Max cached block sizes: (1 << MAX_SIZE_INDEX) bytes
// NOLINTNEXTLINE(misc-definitions-in-headers)
constexpr size_t MAX_SIZE_INDEX = 64;
}

Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/cuda/CUDASparseBlas.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDASparse.h>

// NOLINTBEGIN(misc-misplaced-const)
namespace at::cuda::sparse {

#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
Expand Down Expand Up @@ -316,3 +317,4 @@ void bsrsm2_solve<c10::complex<double>>(
#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE

} // namespace at::cuda::sparse
// NOLINTEND(misc-misplaced-const)
2 changes: 2 additions & 0 deletions aten/src/ATen/cuda/CUDASparseDescriptors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
namespace at::cuda::sparse {

cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr) {
// NOLINTNEXTLINE(*const-cast)
return cusparseDestroyDnMat(const_cast<cusparseDnMatDescr*>(dnMatDescr));
}

Expand Down Expand Up @@ -83,6 +84,7 @@ cusparseDnMatDescr_t createRawDnMatDescriptor(const Tensor& input, int64_t batch
#endif

auto batch_stride = ndim > 2 && batch_offset >= 0 ? input_strides[ndim - 3] : 0;
// NOLINTNEXTLINE(*const-cast)
void* data_ptr = is_const ? const_cast<void*>(input.const_data_ptr()) : input.data_ptr();
void* values_ptr = static_cast<char*>(data_ptr) +
batch_offset * batch_stride * input.itemsize();
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/cudnn/AutocastRNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Autocast wrapper for CuDNN RNNs (the weight reflattening needs special attention

// To be registered for the "_cudnn_rnn(...)" schema.
// _cudnn_rnn is autograd-exposed (test_autocast_cudnn_rnn in test_cuda.py includes a test to confirm)
std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
static std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
_cudnn_rnn_cast_reflatten(const Tensor & input,
TensorList weight,
int64_t weight_stride0,
Expand Down
13 changes: 8 additions & 5 deletions aten/src/ATen/cudnn/Descriptors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <iostream>
#include <sstream>

// NOLINTBEGIN(*c-arrays*)
namespace at::native {

namespace {
Expand Down Expand Up @@ -101,7 +102,7 @@ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d) {
int nbDims = 0;
int dimA[CUDNN_DIM_MAX];
int strideA[CUDNN_DIM_MAX];
cudnnDataType_t dtype;
cudnnDataType_t dtype{};
cudnnGetTensorNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &nbDims, dimA, strideA);
out << " type = " << cudnnTypeToString(dtype) << "\n";
out << " nbDims = " << nbDims << "\n";
Expand Down Expand Up @@ -143,7 +144,7 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
size[i] = (int) 1;
}
dim = std::max(dim, pad);
cudnnTensorFormat_t filter_format;
cudnnTensorFormat_t filter_format{};
switch(memory_format) {
case at::MemoryFormat::Contiguous:
filter_format = CUDNN_TENSOR_NCHW;
Expand All @@ -155,7 +156,8 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
default:
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
}
set(getDataType(t), (int) dim, size, filter_format);
// NOLINTNEXTLINE(*narrowing-conversions)
set(getDataType(t), static_cast<int64_t>(dim), size, filter_format);
}

std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {
Expand All @@ -175,8 +177,8 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
out << "FilterDescriptor " << static_cast<void*>(d.desc()) << "\n";
int nbDims = 0;
int dimA[CUDNN_DIM_MAX];
cudnnDataType_t dtype;
cudnnTensorFormat_t tformat;
cudnnDataType_t dtype{};
cudnnTensorFormat_t tformat{};
cudnnGetFilterNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &tformat, &nbDims, dimA);
out << " type = " << cudnnTypeToString(dtype) << "\n";
out << " tensor_format = " << cudnnMemoryFormatToString(tformat) << "\n";
Expand All @@ -193,3 +195,4 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
void FilterDescriptor::print() { std::cout << *this; }

}
// NOLINTEND(*c-arrays*)
7 changes: 4 additions & 3 deletions aten/src/ATen/cudnn/Descriptors.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ struct DescriptorDeleter {
// initialized the first time you call set() or any other initializing
// function.
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
// NOLINTNEXTLINE(bugprone-exception-escape)
class TORCH_CUDA_CPP_API Descriptor {
public:
// TODO: Figure out why const-correctness doesn't work here
Expand Down Expand Up @@ -128,7 +129,7 @@ class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
private:
void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, nullptr));
}
};

Expand Down Expand Up @@ -224,6 +225,7 @@ struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
}
};

// NOLINTNEXTLINE(bugprone-exception-escape)
struct TORCH_CUDA_CPP_API DropoutDescriptor
: public Descriptor<
cudnnDropoutStruct,
Expand All @@ -244,9 +246,8 @@ struct TORCH_CUDA_CPP_API DropoutDescriptor
}

// Restore a dropout descriptor given a dropout probability and existing RNG state.
void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
void set(cudnnHandle_t handle, float dropout, const at::Tensor& state) {
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
state = state_;
void *state_ptr = state.data_ptr();
size_t state_size = state.size(0);
// NB: The seed doesn't actually matter, so we give a dummy value
Expand Down
4 changes: 1 addition & 3 deletions aten/src/ATen/cudnn/Types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
namespace at::native {

cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
if (dtype == c10::kQInt8) {
if (dtype == c10::kQInt8 || dtype == at::kChar) {
return CUDNN_DATA_INT8;
} else if (dtype == at::kFloat) {
return CUDNN_DATA_FLOAT;
Expand All @@ -19,8 +19,6 @@ cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
return CUDNN_DATA_INT32;
} else if (dtype == at::kByte) {
return CUDNN_DATA_UINT8;
} else if (dtype == at::kChar) {
return CUDNN_DATA_INT8;
}
std::string msg("getCudnnDataTypeFromScalarType() not supported for ");
msg += toString(dtype);
Expand Down
14 changes: 7 additions & 7 deletions aten/src/ATen/native/nested/NestedTensorUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ struct NestedTensorImpl;
// The following functions are used to construct nested tensors from buffers and
// metadata.

inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
inline at::Tensor wrap_buffer(const at::Tensor& buffer, const at::Tensor& nested_sizes) {
TORCH_CHECK(
buffer.dim() == 1,
"Expected given buffer to be 1dim, but got ",
Expand All @@ -41,19 +41,19 @@ inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
TORCH_CHECK(
buffer.is_contiguous(), "Expected given buffer to be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer), std::move(nested_sizes));
buffer, nested_sizes);
}

// TODO: Figure out if we need a non-moving wrap_buffer()
inline at::Tensor wrap_buffer(
at::Tensor buffer,
const at::Tensor& buffer,
at::Tensor nested_sizes,
at::Tensor nested_strides,
at::Tensor storage_offsets) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
buffer.is_contiguous(), "Given buffer must be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer),
buffer,
std::move(nested_sizes),
std::move(nested_strides),
std::move(storage_offsets));
Expand Down Expand Up @@ -95,9 +95,9 @@ inline at::Tensor create_nested_view_tensor(
return at::detail::make_tensor<NestedTensorImpl>(
c10::TensorImpl::VIEW,
base,
nested_sizes,
nested_strides,
storage_offsets);
std::move(nested_sizes),
std::move(nested_strides),
std::move(storage_offsets));
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/templates/TensorBody.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ class TORCH_API Tensor: public TensorBase {
//
// TODO: temporarily disabled

Tensor& operator=(const TensorBase& x) & {
Tensor& operator=(const TensorBase& x) & noexcept {
impl_ = x.getIntrusivePtr();
return *this;
}
Expand All @@ -204,7 +204,7 @@ class TORCH_API Tensor: public TensorBase {
return *this;
}

Tensor& operator=(const Tensor &x) & {
Tensor& operator=(const Tensor &x) & noexcept {
return operator=(static_cast<const TensorBase&>(x));
}
Tensor& operator=(Tensor &&x) & noexcept {
Expand Down
8 changes: 5 additions & 3 deletions c10/util/intrusive_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -664,15 +664,17 @@ struct MaybeOwnedTraits<c10::intrusive_ptr<T>> {
toDestroy.release();
}

static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
static const owned_type& referenceFromBorrow(
const borrow_type& borrow) noexcept {
return borrow;
}

static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
static const owned_type* pointerFromBorrow(
const borrow_type& borrow) noexcept {
return &borrow;
}

static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) noexcept {
return true;
}
};
Expand Down
1 change: 1 addition & 0 deletions tools/onnx/templates/rules.h.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <cstdint>

/**
${generated_comment}
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/python_autograd.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#ifndef THP_AUTOGRAD_H
#define THP_AUTOGRAD_H
#include <torch/csrc/utils/pythoncapi_compat.h>

PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
void THPAutograd_initFunctions();
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/python_fft_functions.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>

namespace torch::autograd {

Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/python_linalg_functions.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>

namespace torch::autograd {

Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/python_sparse_functions.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>

namespace torch::autograd {

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_special_functions.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#pragma once

#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {

void initSpecialFunctions(PyObject* module);
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/cuda/Module.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#ifndef THCP_CUDA_MODULE_INC
#define THCP_CUDA_MODULE_INC
#include <torch/csrc/utils/pythoncapi_compat.h>

PyObject* THCPModule_getDevice_wrap(PyObject* self);
PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/cuda/nccl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,7 @@ struct NcclCommList {
devices.data()));
}
NcclCommList(NcclCommList&& foo) = default;
// NOLINTNEXTLINE(bugprone-exception-escape)
~NcclCommList() {
if (comms) {
for (const auto i : c10::irange(ndevices)) {
Expand Down Expand Up @@ -457,6 +458,7 @@ AutoNcclGroup::AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking)
#endif
}

// NOLINTNEXTLINE(bugprone-exception-escape)
AutoNcclGroup::~AutoNcclGroup() noexcept(false) {
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
if (comm_nonblocking_ && comm_ != nullptr) {
Expand Down
3 changes: 1 addition & 2 deletions torch/csrc/cuda/shared/cudnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#if defined(USE_CUDNN) || defined(USE_ROCM)
#include <torch/csrc/utils/pybind.h>

#include <array>
#include <tuple>

namespace {
Expand All @@ -22,7 +21,7 @@ version_tuple getCompileVersion() {

version_tuple getRuntimeVersion() {
#ifndef USE_STATIC_CUDNN
int major, minor, patch;
int major = 0, minor = 0, patch = 0;
cudnnGetProperty(MAJOR_VERSION, &major);
cudnnGetProperty(MINOR_VERSION, &minor);
cudnnGetProperty(PATCH_LEVEL, &patch);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/profiler/collection.h
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ struct TORCH_API Result : public std::enable_shared_from_this<Result> {
}

template <typename T, typename Fn>
void visit_if_base(Fn&& fn) const {
void visit_if_base(const Fn& fn) const {
visit([&](const auto& extra_fields) {
using extra_fields_t = typename std::remove_cv_t<
typename std::remove_reference_t<decltype(extra_fields)>>;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/profiler/kineto_client_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@
namespace torch {

// declare global_kineto_init for libtorch_cpu.so to call
TORCH_API void global_kineto_init(void);
TORCH_API void global_kineto_init();

} // namespace torch
2 changes: 1 addition & 1 deletion torch/csrc/profiler/kineto_shim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ bool collectivesProfilerExists() {

#ifdef USE_KINETO
static const std::string setTraceID(const std::string& trace_id) {
if (trace_id == "") {
if (trace_id.empty()) {
return "";
}
std::stringstream configss;
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/profiler/orchestration/observer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ ProfilerConfig::ProfilerConfig(
bool with_flops,
bool with_modules,
ExperimentalConfig experimental_config,
const std::string& trace_id)
std::string trace_id)
: state{state},
experimental_config{std::move(experimental_config)},
report_input_shapes{report_input_shapes},
profile_memory{profile_memory},
with_stack{with_stack},
with_flops{with_flops},
with_modules{with_modules},
trace_id{trace_id} {}
trace_id{std::move(trace_id)} {}

bool ProfilerConfig::disabled() const {
return state == torch::profiler::impl::ProfilerState::Disabled;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/profiler/orchestration/observer.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ struct TORCH_API ProfilerConfig {
bool with_flops = false,
bool with_modules = false,
ExperimentalConfig experimental_config = ExperimentalConfig(),
const std::string& trace_id = "");
std::string trace_id = "");

bool disabled() const;
bool global() const;
Expand Down

0 comments on commit 64d9ee8

Please sign in to comment.