| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283 |
- /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- ==============================================================================*/
- #ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_
- #define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_
- #include <cstdint>
- #include "tensorflow/lite/c/common.h"
- #include "tensorflow/lite/kernels/internal/compatibility.h"
- #include "tensorflow/lite/kernels/internal/types.h"
- namespace tflite {
- namespace micro {
- // Returns a mutable tensor for a given input index. is_variable must be checked
- // during prepare when the full TfLiteTensor is available.
- inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context,
- const TfLiteNode* node,
- int index) {
- TFLITE_DCHECK(context != nullptr);
- TFLITE_DCHECK(node != nullptr);
- return context->GetEvalTensor(context, node->inputs->data[index]);
- }
- // Returns the TfLiteEvalTensor struct for a given input index in a node.
- inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context,
- const TfLiteNode* node, int index) {
- return GetMutableEvalInput(context, node, index);
- }
- // Returns the TfLiteEvalTensor struct for a given output index in a node.
- inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context,
- const TfLiteNode* node, int index) {
- TFLITE_DCHECK(context != nullptr);
- TFLITE_DCHECK(node != nullptr);
- return context->GetEvalTensor(context, node->outputs->data[index]);
- }
- // Returns data for a TfLiteEvalTensor struct.
- template <typename T>
- T* GetTensorData(TfLiteEvalTensor* tensor) {
- return tensor != nullptr ? reinterpret_cast<T*>(tensor->data.raw) : nullptr;
- }
- // Returns const data for a TfLiteEvalTensor struct.
- template <typename T>
- const T* GetTensorData(const TfLiteEvalTensor* tensor) {
- TFLITE_DCHECK(tensor != nullptr);
- return reinterpret_cast<const T*>(tensor->data.raw);
- }
- // Returns the shape of a TfLiteEvalTensor struct.
- inline const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) {
- if (tensor == nullptr) {
- return RuntimeShape();
- }
- TfLiteIntArray* dims = tensor->dims;
- const int dims_size = dims->size;
- const int32_t* dims_data = reinterpret_cast<const int32_t*>(dims->data);
- return RuntimeShape(dims_size, dims_data);
- }
- // Return true if the given tensors have the same shape.
- bool HaveSameShapes(const TfLiteEvalTensor* input1,
- const TfLiteEvalTensor* input2);
- } // namespace micro
- } // namespace tflite
- #endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_
|