Skip to content

Commit

Permalink
Improve error messages for tensor size errors
Browse files Browse the repository at this point in the history
Differential Revision: D67665846

Pull Request resolved: #7453
  • Loading branch information
GregoryComer authored Jan 7, 2025
1 parent a29b208 commit b09f8e4
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 9 deletions.
32 changes: 32 additions & 0 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
#pragma once

#include <cstdint>
#include <cstdio>
#include <cstring>

#include <executorch/runtime/core/error.h>
#include <executorch/runtime/platform/assert.h>
Expand Down Expand Up @@ -257,6 +259,36 @@ ET_NODISCARD inline Error stride_to_dim_order(
}
return Error::Ok;
}

/**
* Print a string representation of an ArrayRef of tensor sizes into a
* user-provided string buffer. If the user buffer is too small, the string
* will be truncated. The output is of the format (1,2,3,4).
*
* Note that we cannot use ArrayRef here due to a circular dependency (see
* above comments).
*/
template <class SizesType>
inline void sizes_to_string(
char* output,
size_t output_size,
SizesType* sizes,
size_t rank) {
auto remaining_size = output_size;
for (auto i = 0; remaining_size > 0 && i < rank; i++) {
snprintf(
output,
remaining_size,
"%s%zd",
i == 0 ? "(" : ",",
static_cast<size_t>(sizes[i]));
auto len = strlen(output);
output += len;
remaining_size -= len;
}
snprintf(output, remaining_size, ")");
}

} // namespace runtime
} // namespace executorch

Expand Down
36 changes: 29 additions & 7 deletions runtime/core/portable_type/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,24 +90,46 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {
if (dim_ == 0) {
return Error::Ok;
}

switch (shape_dynamism_) {
case TensorShapeDynamism::STATIC:
ET_CHECK_OR_RETURN_ERROR(
std::equal(sizes_, sizes_ + dim_, new_sizes.begin()),
NotSupported,
"Attempted to resize a static tensor");
if (!std::equal(sizes_, sizes_ + dim_, new_sizes.begin())) {
#ifdef ET_LOG_ENABLED
std::array<char, 5> old_sizes_str, new_sizes_str;

executorch::runtime::sizes_to_string(
old_sizes_str.data(),
old_sizes_str.size(),
sizes().data(),
sizes().size());
executorch::runtime::sizes_to_string(
new_sizes_str.data(),
new_sizes_str.size(),
new_sizes.data(),
new_sizes.size());
#endif

ET_CHECK_OR_RETURN_ERROR(
false,
NotSupported,
"Attempted to resize a static tensor. Expected shape %s, but received %s.",
old_sizes_str.data(),
new_sizes_str.data())
}

break;
case TensorShapeDynamism::DYNAMIC_BOUND:
// TODO(T175194371): Unbounded dynamic tensor resizing is not yet
// supported: treat them as upper-bounded.
case TensorShapeDynamism::DYNAMIC_UNBOUND: {
const auto new_numel = compute_numel(new_sizes.data(), dim_);

ET_CHECK_OR_RETURN_ERROR(
new_numel <= numel_bound_,
NotSupported,
"Attempted to resize a bounded tensor with capacity of %zu elements to %zu elements.",
new_numel,
numel_bound_);
"Attempted to resize a bounded tensor with a maximum capacity of %zu elements to %zu elements.",
numel_bound_,
new_numel);

if (strides_ && dim_order_) {
auto error =
Expand Down
2 changes: 1 addition & 1 deletion runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ Error Method::execute_instruction() {
// We know that instr_args_as_KernelCall is non-null because it was
// checked at init time.
auto op_index = instruction->instr_args_as_KernelCall()->op_index();
auto op = serialization_plan_->operators()->Get(op_index);
ET_UNUSED auto op = serialization_plan_->operators()->Get(op_index);
ET_LOG(
Error,
"KernelCall failed at instruction %zu:%zu in operator %s.%s: 0x%x",
Expand Down
3 changes: 2 additions & 1 deletion runtime/kernel/operator_registry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ Error register_kernels_internal(const Span<const Kernel> kernels) {
return Error::Internal;
}
// for debugging purpose
const char* lib_name = et_pal_get_shared_library_name(kernels.data());
ET_UNUSED const char* lib_name =
et_pal_get_shared_library_name(kernels.data());

for (const auto& kernel : kernels) {
// Linear search. This is fine if the number of kernels is small.
Expand Down

0 comments on commit b09f8e4

Please sign in to comment.