Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 41 additions & 2 deletions devtools/bundled_program/bundled_program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <executorch/devtools/bundled_program/schema/bundled_program_schema_generated.h>
#include <executorch/runtime/core/event_tracer_hooks.h>
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/core/memory_allocator.h>
#include <executorch/runtime/executor/method.h>
#include <executorch/runtime/platform/log.h>
Expand Down Expand Up @@ -56,7 +57,9 @@ at::Tensor tensor_like(bundled_program_flatbuffer::Tensor* bundled_tensor) {
at::dtype(static_cast<ScalarType>(bundled_tensor->scalar_type())));

// Validate data buffer exists and has sufficient size
ET_CHECK(bundled_tensor->data() != nullptr);
ET_CHECK(
bundled_tensor->data() != nullptr,
"Tensor flatbuffer is missing its data field");
ET_CHECK_MSG(
bundled_tensor->data()->size() >= ret_tensor.nbytes(),
"Tensor data buffer too small: got %zu bytes, need %zu bytes",
Expand All @@ -77,11 +80,47 @@ TensorImpl impl_like(bundled_program_flatbuffer::Tensor* bundled_tensor) {
ScalarType scalar_type =
static_cast<ScalarType>(bundled_tensor->scalar_type());
Comment on lines 80 to 81
Copy link

Copilot AI Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider adding validation for the scalar_type before using it in elementSize(). While elementSize() does check for invalid types, an early validation with a more specific error message would be more consistent with the pattern in runtime/executor/tensor_parser_portable.cpp:46-50. This would make debugging easier if a malicious file provides an invalid scalar type.

Copilot uses AI. Check for mistakes.
ssize_t dim = bundled_tensor->sizes()->size();

// Validate dimension count
ET_CHECK(
dim <= static_cast<ssize_t>(kMaxDim),
"Tensor rank too large: got %zd, max allowed %zu",
dim,
kMaxDim);

executorch::aten::SizesType* sizes = bundled_tensor->mutable_sizes()->data();
void* data = bundled_tensor->mutable_data()->data();
executorch::aten::DimOrderType* dim_order =
bundled_tensor->mutable_dim_order()->data();

// Calculate expected tensor size in bytes
size_t numel = 1;
for (ssize_t i = 0; i < dim; i++) {
ET_CHECK_MSG(
sizes[i] >= 0,
"Tensor has negative size at dimension %zd: %d",
static_cast<ssize_t>(i),
static_cast<int>(sizes[i]));
size_t new_numel;
ET_CHECK_MSG(
!c10::mul_overflows(numel, static_cast<size_t>(sizes[i]), &new_numel),
"Integer overflow calculating tensor numel at dim %zd",
static_cast<size_t>(i));
numel = new_numel;
}
size_t expected_bytes = numel * executorch::runtime::elementSize(scalar_type);
Copy link

Copilot AI Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The multiplication of numel by elementSize can also overflow. After fixing the overflow checks for numel calculation, you should also add an overflow check for the final multiplication at this line using c10::mul_overflows, similar to the pattern in runtime/executor/method_meta.cpp:76-82.

Copilot uses AI. Check for mistakes.

// Validate data buffer exists and has sufficient size
ET_CHECK(
bundled_tensor->data() != nullptr,
"Tensor flatbuffer is missing its data field");
ET_CHECK_MSG(
bundled_tensor->data()->size() >= expected_bytes,
"Tensor data buffer too small: got %zu bytes, need %zu bytes",
static_cast<size_t>(bundled_tensor->data()->size()),
static_cast<size_t>(expected_bytes));

void* data = bundled_tensor->mutable_data()->data();

// The strides of created tensorimpl will only be actually used when
// comparsion (`tensor_are_close` below). To eliminate the usage of memory
// allocator, here we set the initial strides as null and reconstruct the
Expand Down
Loading