Skip to content

Commit

Permalink
[BE] Fix signed-unsigned warnings (pytorch#48848)
Browse files Browse the repository at this point in the history
Summary:
Switch to range loops when possible
Replace `ptrdiff_t`(signed type) with `size_t`(unsigned type)

Pull Request resolved: pytorch#48848

Reviewed By: walterddr

Differential Revision: D25338250

Pulled By: malfet

fbshipit-source-id: e840618b113b8bc0d8bb067c2fdf06e3ec9233d4
  • Loading branch information
malfet authored and facebook-github-bot committed Dec 5, 2020
1 parent 55b9373 commit 6317e0b
Show file tree
Hide file tree
Showing 9 changed files with 20 additions and 22 deletions.
4 changes: 2 additions & 2 deletions aten/src/ATen/BatchingRegistrations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -941,8 +941,8 @@ Tensor new_empty_strided_batching_rule(
size.size(), ") must match dimensionality of strides (",
stride.size(), ")");
auto storage_size = native::storage_size_for(size, stride);
for (int64_t idx = 0; idx < physical_strides.size(); ++idx) {
physical_strides[idx] *= storage_size;
for (auto& physical_stride : physical_strides) {
physical_stride *= storage_size;
}

// physical_strides = [B1 * B2 * S, B2 * S, S] + strides
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/NamedTensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -264,11 +264,11 @@ static std::vector<Dimname> compute_dot_product_outnames(
}
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
int64_t index = 0;
for (int64_t j = 0; j < tensor_names.size(); ++j) {
for (size_t j = 0; j < tensor_names.size(); ++j) {
if (j == tensor_dotted_dim) continue;
outnames[index++] = tensor_names[j];
}
for (int64_t j = 0; j < other_names.size(); ++j) {
for (size_t j = 0; j < other_names.size(); ++j) {
if (j == other_dotted_dim) continue;
outnames[index++] = other_names[j];
}
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/TensorIterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -939,8 +939,8 @@ TensorIterator TensorIterator::reduce_op(Tensor& out1, Tensor& out2, const Tenso
}

void TensorIteratorBase::populate_operands(TensorIteratorConfig& config) {
for (int i = 0; i < config.tensors_.size(); i++) {
operands_.emplace_back(std::move(config.tensors_[i]));
for (auto& tensor: config.tensors_) {
operands_.emplace_back(std::move(tensor));
}
num_outputs_ = config.num_outputs_;
}
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/TensorNames.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ TensorNames::TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end) {
}

TensorNames& TensorNames::unifyFromRightInplace(const TensorNames& other, const char* op_name) {
int64_t size_diff = std::labs(names_.size() - other.names_.size());
size_t size_diff = std::labs(names_.size() - other.names_.size());

if (names_.size() > other.names_.size()) {
for (int64_t idx = size_diff; idx < names_.size(); ++idx) {
for (size_t idx = size_diff; idx < names_.size(); ++idx) {
names_[idx] = names_[idx].unify(other.names_[idx - size_diff], op_name);
}
} else {
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/Convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,13 @@ auto ConvParams::needs_64bit_indexing_no_split(const at::Tensor& input, const at
int64_t outsize = 1;
if (transposed) {
std::vector<int64_t> o = conv_input_size(input.sizes(), weight.sizes(), padding, output_padding, stride, dilation, groups);
for (int64_t i = 1; i < o.size(); i++) {
outsize *= o[i];
for (const auto& e: o) {
outsize *= e;
}
} else {
std::vector<int64_t> o = conv_output_size(input.sizes(), weight.sizes(), padding, stride, dilation);
for (int64_t i = 1; i < o.size(); i++) {
outsize *= o[i];
for (const auto& e: o) {
outsize *= e;
}
}
return outsize > int_max;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/ForeachOpsKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ std::vector<Tensor> foreach_tensor_##NAME##_slow(TensorList tensors1, TensorList
\
std::vector<Tensor> result; \
result.reserve(tensors1.size()); \
for (int i = 0; i < tensors1.size(); i++) { \
for (size_t i = 0; i < tensors1.size(); i++) { \
result.emplace_back(at::NAME(tensors1[i], tensors2[i])); \
} \
\
Expand Down
3 changes: 1 addition & 2 deletions aten/src/TH/generic/THStorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,10 +115,9 @@ void THStorage_(resizeBytes)(THStorage* storage, ptrdiff_t size_bytes) {

void THStorage_(fill)(THStorage *storage, scalar_t value)
{
ptrdiff_t i;
auto type_meta = caffe2::TypeMeta::Make<scalar_t>();
size_t numel = storage->nbytes() / type_meta.itemsize();
for (i = 0; i < numel; i++)
for (size_t i = 0; i < numel; i++)
THStorage_(data)(storage)[i] = value;
}

Expand Down
5 changes: 2 additions & 3 deletions aten/src/TH/generic/THStorageCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ void THStorage_(copy)(THStorage *storage, THStorage *src)
scalar_t *scalar_src = THStorage_(data)(src);
scalar_t *data = THStorage_(data)(storage);
uint64_t numel = storage->nbytes() / sizeof(scalar_t);
for (ptrdiff_t i = 0; i < numel; ++i) {
for (uint64_t i = 0; i < numel; ++i) {
data[i] = scalar_src[i];
}
}
Expand All @@ -19,11 +19,10 @@ void THStorage_(copy)(THStorage *storage, THStorage *src)
#define IMPLEMENT_THStorage_COPY(TYPENAMESRC) \
void THStorage_(copy##TYPENAMESRC)( \
THStorage * storage, TH##TYPENAMESRC##Storage * src) { \
ptrdiff_t i; \
auto data = THStorage_(data)(storage); \
auto src_data = TH##TYPENAMESRC##Storage_data(src); \
uint64_t numel = storage->nbytes() / sizeof(scalar_t); \
for (i = 0; i < numel; i++) \
for (uint64_t i = 0; i < numel; i++) \
data[i] = static_cast<scalar_t>(src_data[i]); \
}

Expand Down
8 changes: 4 additions & 4 deletions caffe2/serialize/crc_alt.h
Original file line number Diff line number Diff line change
Expand Up @@ -680,12 +680,12 @@ uint32_t crc32_combine(uint32_t crcA, uint32_t crcB, size_t lengthB)

// put operator for one zero bit in odd
odd[0] = Polynomial; // CRC-32 polynomial
for (int i = 1; i < CrcBits; i++)
for (uint32_t i = 1; i < CrcBits; i++)
odd[i] = 1 << (i - 1);

// put operator for two zero bits in even
// same as gf2_matrix_square(even, odd);
for (int i = 0; i < CrcBits; i++)
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = odd[i];
even[i] = 0;
Expand All @@ -695,7 +695,7 @@ uint32_t crc32_combine(uint32_t crcA, uint32_t crcB, size_t lengthB)
}
// put operator for four zero bits in odd
// same as gf2_matrix_square(odd, even);
for (int i = 0; i < CrcBits; i++)
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = even[i];
odd[i] = 0;
Expand All @@ -711,7 +711,7 @@ uint32_t crc32_combine(uint32_t crcA, uint32_t crcB, size_t lengthB)
for (; lengthB > 0; lengthB >>= 1)
{
// same as gf2_matrix_square(a, b);
for (int i = 0; i < CrcBits; i++)
for (uint32_t i = 0; i < CrcBits; i++)
{
uint32_t vec = b[i];
a[i] = 0;
Expand Down

0 comments on commit 6317e0b

Please sign in to comment.