- 
                Notifications
    
You must be signed in to change notification settings  - Fork 953
 
Porting Dynamic_Update_Slice operator from TFLite #3246
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 19 commits
87dbda9
              fa5fd01
              0105bf8
              c3231d4
              9abd7fa
              dd994fa
              cb6be01
              780c702
              b9ea8b1
              04faaa8
              f27ec75
              cf694b5
              32162b2
              0b0f771
              38b259e
              29e40f6
              c4007aa
              ae96a48
              665cf67
              af40c6e
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,241 @@ | ||
| /* Copyright 2025 The TensorFlow Authors. All Rights Reserved. | ||
| 
     | 
||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||
| you may not use this file except in compliance with the License. | ||
| You may obtain a copy of the License at | ||
| 
     | 
||
| http://www.apache.org/licenses/LICENSE-2.0 | ||
| 
     | 
||
| Unless required by applicable law or agreed to in writing, software | ||
| distributed under the License is distributed on an "AS IS" BASIS, | ||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| See the License for the specific language governing permissions and | ||
| limitations under the License. | ||
| ==============================================================================*/ | ||
| #include "tensorflow/lite/micro/kernels/dynamic_update_slice.h" | ||
| 
     | 
||
| #include "tensorflow/lite/c/builtin_op_data.h" | ||
| #include "tensorflow/lite/c/common.h" | ||
| #include "tensorflow/lite/kernels/internal/common.h" | ||
| #include "tensorflow/lite/kernels/internal/quantization_util.h" | ||
| #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" | ||
| #include "tensorflow/lite/kernels/kernel_util.h" | ||
| #include "tensorflow/lite/kernels/op_macros.h" | ||
| #include "tensorflow/lite/micro/kernels/kernel_util.h" | ||
| #include "tensorflow/lite/micro/micro_log.h" | ||
| #include "tensorflow/lite/micro/micro_utils.h" | ||
| 
     | 
||
| namespace tflite { | ||
| 
     | 
||
| constexpr int kMaxDimensions = 6; | ||
| 
     | 
||
| namespace { | ||
| 
     | 
||
| TfLiteStatus CalculateClampedStartIndices( | ||
| int num_dims, const int64_t* raw_indices_data, | ||
| const int32_t* input_dims_data, const int32_t* update_dims_data, | ||
| int32_t* clamped_start_indices_output) { | ||
| for (int i = 0; i < num_dims; ++i) { | ||
| clamped_start_indices_output[i] = static_cast<int32_t>( | ||
| std::min<int64_t>(std::max<int64_t>(0, raw_indices_data[i]), | ||
| input_dims_data[i] - update_dims_data[i])); | ||
| } | ||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| // Recursive helper for N-dimensional slice update. | ||
| template <typename T> | ||
| TfLiteStatus UpdateSliceRecursive(int current_dim, int max_dims, | ||
| const int32_t* output_strides, | ||
| const int32_t* update_strides, | ||
| const int32_t* update_dims_data, | ||
| const T* update_tensor_data, | ||
| const int32_t* clamped_start_indices, | ||
| T* output_tensor_data) { | ||
| if (current_dim == max_dims) { | ||
| return kTfLiteOk; | ||
| } | ||
| 
         
      Comment on lines
    
      +55
     to 
      +57
    
   
  There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess this is unreachable?  | 
||
| 
     | 
||
| output_tensor_data += | ||
| clamped_start_indices[current_dim] * output_strides[current_dim]; | ||
| 
     | 
||
| if (current_dim == max_dims - 1) { | ||
| std::memcpy(output_tensor_data, update_tensor_data, | ||
| update_dims_data[max_dims - 1] * sizeof(T)); | ||
| } else { | ||
| for (int i = 0; i < update_dims_data[current_dim]; ++i) { | ||
| UpdateSliceRecursive<T>(current_dim + 1, max_dims, output_strides, | ||
| update_strides, update_dims_data, | ||
| update_tensor_data, clamped_start_indices, | ||
| output_tensor_data); | ||
| 
     | 
||
| output_tensor_data += output_strides[current_dim]; | ||
| update_tensor_data += update_strides[current_dim]; | ||
| } | ||
| } | ||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| // Main dispatch function for Eval, templated on data type. | ||
| template <typename T> | ||
| TfLiteStatus EvalImpl(const TfLiteEvalTensor* operand_eval, | ||
| const TfLiteEvalTensor* update_eval, | ||
| const int64_t* indices_eval, | ||
| TfLiteEvalTensor* output_eval) { | ||
| const RuntimeShape operand_shape = | ||
| tflite::micro::GetTensorShape(operand_eval); | ||
| const RuntimeShape update_shape = tflite::micro::GetTensorShape(update_eval); | ||
| const T* update_tensor_data = tflite::micro::GetTensorData<T>(update_eval); | ||
| T* output_tensor_data = tflite::micro::GetTensorData<T>(output_eval); | ||
| 
     | 
||
| const int num_dims = operand_shape.DimensionsCount(); | ||
| if (operand_shape.FlatSize() == update_shape.FlatSize()) { | ||
| std::memcpy(output_tensor_data, update_tensor_data, | ||
| ElementCount(*operand_eval->dims) * sizeof(T)); | ||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| if (num_dims > kMaxDimensions) { | ||
| MicroPrintf( | ||
| "DYNAMIC_UPDATE_SLICE: Operand rank %d exceeds max supported %d.", | ||
| num_dims, kMaxDimensions); | ||
| return kTfLiteError; | ||
| } | ||
| 
     | 
||
| if (operand_eval->data.data != output_eval->data.data) { | ||
| std::memcpy(output_eval->data.data, operand_eval->data.data, | ||
| ElementCount(*operand_eval->dims) * sizeof(T)); | ||
| } | ||
| 
     | 
||
| // If update tensor is empty, no actual update is needed after operand copy. | ||
| if (ElementCount(*update_eval->dims) == 0) { | ||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| // Calculate clamped start indices (stack-allocated) | ||
| int32_t clamped_start_indices[kMaxDimensions]; | ||
| TF_LITE_ENSURE_STATUS(CalculateClampedStartIndices( | ||
| num_dims, indices_eval, operand_shape.DimsData(), update_shape.DimsData(), | ||
| clamped_start_indices)); | ||
| 
     | 
||
| // Calculate strides (stack-allocated) | ||
| int32_t output_stride[kMaxDimensions]; | ||
| int32_t update_stride[kMaxDimensions]; | ||
| output_stride[num_dims - 1] = 1; | ||
| update_stride[num_dims - 1] = 1; | ||
| for (int i = num_dims - 2; i >= 0; --i) { | ||
| output_stride[i] = output_stride[i + 1] * operand_shape.Dims(i + 1); | ||
| update_stride[i] = update_stride[i + 1] * update_shape.Dims(i + 1); | ||
| } | ||
| 
     | 
||
| // Perform the N-dimensional update | ||
| // The recursive function needs base pointers and initial offsets. | ||
| return UpdateSliceRecursive<T>( | ||
| /*current_dim=*/0, num_dims, output_stride, update_stride, | ||
| update_shape.DimsData(), update_tensor_data, clamped_start_indices, | ||
| output_tensor_data); | ||
| } | ||
| 
     | 
||
| TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { | ||
| MicroContext* micro_context = GetMicroContext(context); | ||
| TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); | ||
| TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); | ||
| 
     | 
||
| // Use MicroContext to allocate temporary tensors for inspection | ||
| // This is a robust pattern shown in EMBEDDING_LOOKUP. | ||
| TfLiteTensor* operand = micro_context->AllocateTempInputTensor( | ||
| node, kDynamicUpdateSliceOperandTensor); | ||
| TF_LITE_ENSURE(context, operand != nullptr); | ||
| 
     | 
||
| TfLiteTensor* update = micro_context->AllocateTempInputTensor( | ||
| node, kDynamicUpdateSliceUpdateTensor); | ||
| TF_LITE_ENSURE(context, update != nullptr); | ||
| 
     | 
||
| TfLiteTensor* start_indices = micro_context->AllocateTempInputTensor( | ||
| node, kDynamicUpdateSliceStartIndicesTensor); | ||
| TF_LITE_ENSURE(context, start_indices != nullptr); | ||
| 
     | 
||
| TfLiteTensor* output = micro_context->AllocateTempOutputTensor( | ||
| node, kDynamicUpdateSliceOutputTensor); | ||
| TF_LITE_ENSURE(context, output != nullptr); | ||
| 
     | 
||
| // Type checks | ||
| TF_LITE_ENSURE_TYPES_EQ(context, operand->type, update->type); | ||
| TF_LITE_ENSURE(context, start_indices->type == kTfLiteInt32 || | ||
| start_indices->type == kTfLiteInt64); | ||
| 
     | 
||
| TF_LITE_ENSURE_EQ(context, NumDimensions(start_indices), 1); | ||
| TF_LITE_ENSURE_EQ(context, SizeOfDimension(start_indices, 0), | ||
| NumDimensions(operand)); | ||
| 
     | 
||
| TF_LITE_ENSURE_EQ(context, NumDimensions(update), NumDimensions(operand)); | ||
| // Check that update dimensions are not larger than operand dimensions | ||
| for (int i = 0; i < NumDimensions(operand); ++i) { | ||
| TF_LITE_ENSURE(context, | ||
| SizeOfDimension(update, i) <= SizeOfDimension(operand, i)); | ||
| } | ||
| output->type = operand->type; | ||
| 
         There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm curious what's the purpose of this? Output tensor will be deallocated right away? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @rameshkunasi I am also curious as to why the output tensor type is not already set by the TfLiteConverter?  | 
||
| 
     | 
||
| // Deallocate temporary tensors | ||
| micro_context->DeallocateTempTfLiteTensor(operand); | ||
| micro_context->DeallocateTempTfLiteTensor(update); | ||
| micro_context->DeallocateTempTfLiteTensor(start_indices); | ||
| micro_context->DeallocateTempTfLiteTensor( | ||
| output); // Output tensor metadata also temp | ||
| 
     | 
||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { | ||
| const TfLiteEvalTensor* operand_eval = tflite::micro::GetEvalInput( | ||
| context, node, kDynamicUpdateSliceOperandTensor); | ||
| const TfLiteEvalTensor* update_eval = tflite::micro::GetEvalInput( | ||
| context, node, kDynamicUpdateSliceUpdateTensor); | ||
| const TfLiteEvalTensor* indices_eval = tflite::micro::GetEvalInput( | ||
| context, node, kDynamicUpdateSliceStartIndicesTensor); | ||
| TfLiteEvalTensor* output_eval = tflite::micro::GetEvalOutput( | ||
| context, node, kDynamicUpdateSliceOutputTensor); | ||
| 
     | 
||
| const auto& input_shape = tflite::micro::GetTensorShape(operand_eval); | ||
| const int input_dims = input_shape.DimensionsCount(); | ||
| int64_t indices_data_i64[kMaxDimensions]; | ||
| if (indices_eval->type == kTfLiteInt32) { | ||
| for (int i = 0; i < input_dims; i++) | ||
| indices_data_i64[i] = static_cast<int64_t>(indices_eval->data.i32[i]); | ||
| } else if (indices_eval->type == kTfLiteInt64) { | ||
| for (int i = 0; i < input_dims; i++) | ||
| indices_data_i64[i] = indices_eval->data.i64[i]; | ||
| } else { | ||
| TF_LITE_KERNEL_LOG(context, | ||
| "DynamicUpdateSlice only currently supports " | ||
| "int32 or int64 indices type, got %d.", | ||
| indices_eval->type); | ||
| return kTfLiteError; | ||
| } | ||
| // Dispatch based on tensor type | ||
| switch (operand_eval->type) { | ||
| case kTfLiteFloat32: | ||
| return EvalImpl<float>(operand_eval, update_eval, indices_data_i64, | ||
| output_eval); | ||
| case kTfLiteInt8: | ||
| return EvalImpl<int8_t>(operand_eval, update_eval, indices_data_i64, | ||
| output_eval); | ||
| case kTfLiteInt32: | ||
| return EvalImpl<int32_t>(operand_eval, update_eval, indices_data_i64, | ||
| output_eval); | ||
| default: | ||
| MicroPrintf("DYNAMIC_UPDATE_SLICE: Operand type %s not supported.", | ||
| TfLiteTypeGetName(operand_eval->type)); | ||
| return kTfLiteError; | ||
| } | ||
| return kTfLiteOk; | ||
| } | ||
| 
     | 
||
| } // namespace | ||
| 
     | 
||
| TFLMRegistration Register_DYNAMIC_UPDATE_SLICE() { | ||
| return tflite::micro::RegisterOp(/*init=*/nullptr, /*prepare=*/Prepare, | ||
| /*invoke=*/Eval); | ||
| } | ||
| 
     | 
||
| } // namespace tflite | ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,36 @@ | ||
| /* Copyright 2025 The TensorFlow Authors. All Rights Reserved. | ||
| 
     | 
||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||
| you may not use this file except in compliance with the License. | ||
| You may obtain a copy of the License at | ||
| 
     | 
||
| http://www.apache.org/licenses/LICENSE-2.0 | ||
| 
     | 
||
| Unless required by applicable law or agreed to in writing, software | ||
| distributed under the License is distributed on an "AS IS" BASIS, | ||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| See the License for the specific language governing permissions and | ||
| limitations under the License. | ||
| ==============================================================================*/ | ||
| #ifndef TENSORFLOW_LITE_MICRO_KERNELS_DYNAMIC_UPDATE_SLICE_H_ | ||
| #define TENSORFLOW_LITE_MICRO_KERNELS_DYNAMIC_UPDATE_SLICE_H_ | ||
| 
     | 
||
| #include "tensorflow/lite/c/builtin_op_data.h" | ||
| #include "tensorflow/lite/kernels/internal/types.h" | ||
| #include "tensorflow/lite/micro/micro_common.h" | ||
| 
     | 
||
| namespace tflite { | ||
| 
     | 
||
| constexpr int kDynamicUpdateSliceOperandTensor = 0; | ||
| constexpr int kDynamicUpdateSliceUpdateTensor = 1; | ||
| constexpr int kDynamicUpdateSliceStartIndicesTensor = 2; | ||
| constexpr int kDynamicUpdateSliceOutputTensor = 0; | ||
| 
     | 
||
| TfLiteStatus PrepareDynamicUpdateSlice(TfLiteContext* context, | ||
| TfLiteNode* node); | ||
| 
     | 
||
| TFLMRegistration Register_DYNAMIC_UPDATE_SLICE(); | ||
| 
     | 
||
| } // namespace tflite | ||
| 
     | 
||
| #endif // TENSORFLOW_LITE_MICRO_KERNELS_DYNAMIC_UPDATE_SLICE_H_ | 
Uh oh!
There was an error while loading. Please reload this page.