Browse Source

对Tensorflow Lite Micro库进行重构,实现Tensoflow Lite在win平台下顺利编译

QingChuanWS 5 years ago
parent
commit
5ac2d0ff77
100 changed files with 2339 additions and 7751 deletions
  1. 7 9
      README.md
  2. 5 7
      README_en.md
  3. 8 5
      examples/SConscript
  4. 1 1
      examples/audio_main.cc
  5. 0 9
      fixedpoint/SConscript
  6. 0 9
      flatbuffers/SConscript
  7. 0 231
      flatbuffers/code_generators.h
  8. 0 100
      flatbuffers/flatc.h
  9. 0 39
      flatbuffers/flatc_pch.h
  10. 0 1618
      flatbuffers/flexbuffers.h
  11. 0 330
      flatbuffers/grpc.h
  12. 0 127
      flatbuffers/hash.h
  13. 0 1136
      flatbuffers/idl.h
  14. 0 408
      flatbuffers/minireflect.h
  15. 0 38
      flatbuffers/pch.h
  16. 0 9
      flatbuffers/pch/SConscript
  17. 0 39
      flatbuffers/pch/flatc_pch.h
  18. 0 38
      flatbuffers/pch/pch.h
  19. 0 500
      flatbuffers/reflection.h
  20. 0 1216
      flatbuffers/reflection_generated.h
  21. 0 127
      flatbuffers/registry.h
  22. 0 683
      flatbuffers/util.h
  23. 0 15
      ruy/SConscript
  24. 0 9
      ruy/profiler/SConscript
  25. 0 132
      ruy/profiler/instrumentation.cc
  26. 0 0
      tensorflow/SConscript
  27. 2 2
      tensorflow/core/public/version.h
  28. 0 0
      tensorflow/lite/SConscript
  29. 29 0
      tensorflow/lite/c/SConscript
  30. 8 3
      tensorflow/lite/c/builtin_op_data.h
  31. 3 1
      tensorflow/lite/c/common.c
  32. 185 30
      tensorflow/lite/c/common.h
  33. 0 0
      tensorflow/lite/core/SConscript
  34. 29 0
      tensorflow/lite/core/api/SConscript
  35. 1 1
      tensorflow/lite/core/api/error_reporter.cc
  36. 0 0
      tensorflow/lite/core/api/error_reporter.h
  37. 1115 471
      tensorflow/lite/core/api/flatbuffer_conversions.cc
  38. 253 0
      tensorflow/lite/core/api/flatbuffer_conversions.h
  39. 3 3
      tensorflow/lite/core/api/op_resolver.cc
  40. 3 3
      tensorflow/lite/core/api/op_resolver.h
  41. 0 0
      tensorflow/lite/core/api/profiler.h
  42. 2 2
      tensorflow/lite/core/api/tensor_utils.cc
  43. 1 1
      tensorflow/lite/core/api/tensor_utils.h
  44. 0 0
      tensorflow/lite/experimental/SConscript
  45. 0 0
      tensorflow/lite/experimental/microfrontend/SConscript
  46. 28 0
      tensorflow/lite/experimental/microfrontend/lib/SConscript
  47. 0 0
      tensorflow/lite/experimental/microfrontend/lib/bits.h
  48. 3 2
      tensorflow/lite/experimental/microfrontend/lib/fft.cc
  49. 0 0
      tensorflow/lite/experimental/microfrontend/lib/fft.h
  50. 2 2
      tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
  51. 1 1
      tensorflow/lite/experimental/microfrontend/lib/fft_util.h
  52. 2 2
      tensorflow/lite/experimental/microfrontend/lib/filterbank.c
  53. 1 1
      tensorflow/lite/experimental/microfrontend/lib/filterbank.h
  54. 1 1
      tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
  55. 1 1
      tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h
  56. 2 2
      tensorflow/lite/experimental/microfrontend/lib/frontend.c
  57. 6 6
      tensorflow/lite/experimental/microfrontend/lib/frontend.h
  58. 2 2
      tensorflow/lite/experimental/microfrontend/lib/frontend_util.c
  59. 7 7
      tensorflow/lite/experimental/microfrontend/lib/frontend_util.h
  60. 1 1
      tensorflow/lite/experimental/microfrontend/lib/log_lut.c
  61. 0 0
      tensorflow/lite/experimental/microfrontend/lib/log_lut.h
  62. 3 3
      tensorflow/lite/experimental/microfrontend/lib/log_scale.c
  63. 0 0
      tensorflow/lite/experimental/microfrontend/lib/log_scale.h
  64. 1 1
      tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
  65. 1 1
      tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h
  66. 1 1
      tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c
  67. 0 0
      tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h
  68. 1 1
      tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c
  69. 1 1
      tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h
  70. 2 2
      tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c
  71. 0 0
      tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h
  72. 1 1
      tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c
  73. 1 1
      tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h
  74. 1 1
      tensorflow/lite/experimental/microfrontend/lib/window.c
  75. 0 0
      tensorflow/lite/experimental/microfrontend/lib/window.h
  76. 1 1
      tensorflow/lite/experimental/microfrontend/lib/window_util.c
  77. 1 1
      tensorflow/lite/experimental/microfrontend/lib/window_util.h
  78. 29 0
      tensorflow/lite/kernels/SConscript
  79. 73 54
      tensorflow/lite/kernels/internal/common.h
  80. 4 2
      tensorflow/lite/kernels/internal/compatibility.h
  81. 1 1
      tensorflow/lite/kernels/internal/cppmath.h
  82. 1 1
      tensorflow/lite/kernels/internal/max.h
  83. 1 1
      tensorflow/lite/kernels/internal/min.h
  84. 0 0
      tensorflow/lite/kernels/internal/optimized/neon_check.h
  85. 7 7
      tensorflow/lite/kernels/internal/quantization_util.cc
  86. 3 3
      tensorflow/lite/kernels/internal/quantization_util.h
  87. 106 71
      tensorflow/lite/kernels/internal/reference/add.h
  88. 1 1
      tensorflow/lite/kernels/internal/reference/arg_min_max.h
  89. 3 3
      tensorflow/lite/kernels/internal/reference/binary_function.h
  90. 1 1
      tensorflow/lite/kernels/internal/reference/ceil.h
  91. 26 26
      tensorflow/lite/kernels/internal/reference/comparisons.h
  92. 10 10
      tensorflow/lite/kernels/internal/reference/concatenation.h
  93. 24 22
      tensorflow/lite/kernels/internal/reference/conv.h
  94. 3 3
      tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
  95. 47 47
      tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
  96. 9 9
      tensorflow/lite/kernels/internal/reference/dequantize.h
  97. 1 1
      tensorflow/lite/kernels/internal/reference/floor.h
  98. 71 70
      tensorflow/lite/kernels/internal/reference/fully_connected.h
  99. 166 0
      tensorflow/lite/kernels/internal/reference/hard_swish.h
  100. 25 23
      tensorflow/lite/kernels/internal/reference/integer_ops/add.h

+ 7 - 9
README.md

@@ -22,10 +22,8 @@ Tensorflow Lite Micro软件包(简称TFLu)是针对RT-Thread实时操作系统
 | ---- | :--- |
 | docs  | 文档目录 |
 | examples | Tensorflow Lite Micro示例 |
-| fixedpoint | Google Fixedpoint定点量化库 |
-| flatbuffers | Google Flatbuffer模型解释库 |
-| ruy | Google Ruy矩阵计算加速库 |
-| tflite | Google Tensorflow Lite Micro推理框架 |
+| third_party | Tensorflow Lite Micro依赖的第三方库 |
+| tensorflow | Google Tensorflow Lite Micro推理框架 |
 
 ### 1.2 许可证
 
@@ -51,7 +49,7 @@ RT-Thread online packages
 
 在成功下载 Tensorflow Lite Micro package 之后:
 
-- 首先通过RT-Thread 的 env工具中 menuconfig 进行功能配置, 其中在menuconfig中的配置选项为:
+- 首先通过RT-Thread 的 env 工具中 menuconfig 进行功能配置, 其中在 menuconfig 中的配置选项为:
 
 ```
 RT-Thread online packages
@@ -62,25 +60,25 @@ RT-Thread online packages
             Select Tensorflow Lite Operations Type (Using Tensorflow Lite reference operations)  --->
 ```
 
-其中, Select Offical Example中有两个选项:
+其中, Select Offical Example 中有两个选项:
 
 ```
 (X) Enable Tensorflow Lite Micro audio example
 ( ) No Tensorflow Lite Micro example
 ```
 
-注 : audio example是执行官方携带的语音示例, No example则是不集成example文件, 只使用Tensorflow Lite Micro标准框架. 
+注 : audio example 是执行官方携带的语音示例, No example 则是不集成 example 文件, 只使用 Tensorflow Lite Micro 标准框架. 
 
 - 如果选择了语音示例, 请将example文件夹下的audio_main.cc文件拷贝到工程的Application目录中, 然后编译, 烧录/下载查看效果了
 
-Select Tensorflow Lite Operations Type中有两个选项:
+Select Tensorflow Lite Operations Type 中有两个选项:
 
 ```
 (X) Using Tensorflow Lite reference operations
 ( ) Using Tensorflow Lite CMSIS NN operations 
 ```
 
-注 : reference operation是应用TFLMicro的通用算子(算子与平台隔离,可移植性好),  CMSIS NN operations是应用CMSIS库对具有ARM内核的平台进行算子的加速. **有关注意事项请参照第四部分!!**
+注 : reference operation 是应用 TFLMicro 的通用算子(算子与平台隔离,可移植性好),  CMSIS NN operations 是应用CMSIS库对具有ARM内核的平台进行算子的加速. **有关注意事项请参照第四部分!!**
 
 - Tensorflow Lite Micro整个框架功能较为复杂, 各类API比较多, 请先参考文档中[introduction.md](introduction.md), 然后通过 [user-guide.md](user-guide.md) 来了解基本的深度学习端测部署流程. 在有了以上基础之后, 就可以尝试开发自己端测部署任务了.
 

+ 5 - 7
README_en.md

@@ -4,7 +4,7 @@
 
 ## 1. Introduction
 
-The Tensorflow Lite Micro software package (TFLu) is an embedded reasoning framework for RT-Thread real-time operating system transplantation. It mainly solves the problem of deployment based on the Tensorflow Lite framework in embedded systems with resource constraints such as resources, power consumption, and performance.
+The Tensorflow Lite Micro software package (TFLu) is an embedded inference framework for RT-Thread real-time operating system transplantation. It mainly solves the problem of deployment based on the Tensorflow Lite framework in embedded systems with resource constraints such as resources, power consumption, and performance.
 
 Platforms currently planned to be optimized:
 
@@ -21,10 +21,8 @@ Platforms currently planned to be optimized:
 | ----------- | :----------------------------------------------------------- |
 | docs        | Document                                                     |
 | examples    | Tensorflow Lite Micro offical audio demo                     |
-| fixedpoint  | Fixed-point quantization library required by Tensorflow Lite Micro library |
-| flatbuffers | Model interpretation library flatbuffer required by Tensorflow Lite Micro library |
-| ruy         | Matrix acceleration library required by Tensorflow Lite Micro library ruy |
 | tensorflow  | Tensorflow Lite Micro library                                |
+| third_party | Third party libraries on which tensorflow Lite micro depends |
 
 
 ### 1.2 License
@@ -89,9 +87,9 @@ Note: Reference operation is a general-purpose operator using TFLMicro (the oper
 
 ## 4. Matters needing attention
 
-- About ʻUsing Tensorflow Lite CMSIS NN operations` option:
-  - At present, CMSIS's optimization of operators is mainly for the calculation and optimization of cores above ARM Cortex M4 (which are equipped with DSP, FPU and other hardware acceleration components).It is not recommended to apply this option to MCUs below M4.
-  - Currently, the operator optimization of CMSIS only supports M series MCUs, A series, R series does not recommend this option.
+- About `Using Tensorflow Lite CMSIS NN operations` option:
+  - At present, CMSIS's optimization of operators is mainly for the calculation and optimization of cores above ARM Cortex M4 (which are equipped with DSP, FPU and other hardware acceleration components). It is not recommended to apply this option to MCUs below M4.
+  - Currently, the operator optimization of CMSIS only supports M series MCUs, ARM A series, R series does not recommend this option.
   - At present, the CMSIS NN operator is still in the testing stage, and there may be problems.
 - This software package occupies 16KB RAM space at runtime, and the built-in speech recognition case occupies a total of 22KB memory at runtime. **Please pay attention to modify the size of the main function stack and the memory management algorithm through menuconfig!**
 

+ 8 - 5
tflite/kernels/internal/SConscript → examples/SConscript

@@ -1,17 +1,20 @@
+import rtconfig
 from building import *
 
-cwd = GetCurrentDir()
-src = Glob('*.c') + Glob('*.cc')
+cwd  = GetCurrentDir()
+src  = Glob('*.cc')
+
+#.
 root =  str(Dir('#'))
 packages = os.path.join(root, 'packages')
 file_list = os.listdir(packages)
 for f in file_list:
     if(f.split('-')[0] == 'TensorflowLiteMicro'):
-        tflite = os.path.join(packages, f)
+        tflm_pkg = os.path.join(packages, f)
         break
 
-CPPPATH = [cwd, tflite]
+CPPPATH = [tflm_pkg]
 
-group = DefineGroup('lite', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
+group = DefineGroup('Applications', src, depend = [''], CPPPATH = CPPPATH)
 
 Return('group')

+ 1 - 1
examples/audio_main.cc

@@ -16,7 +16,7 @@ limitations under the License.
 #include <rtthread.h>
 #include <rtdevice.h>
 #include <board.h>
-#include "tflite/micro/examples/micro_speech/main_functions.h"
+#include "tensorflow/lite/micro/examples/micro_speech/main_functions.h"
 
 // This is the default main used on systems that have the standard C entry
 // point. Other devices (for example FreeRTOS or ESP32) that have different

+ 0 - 9
fixedpoint/SConscript

@@ -1,9 +0,0 @@
-from building import *
-
-cwd     = GetCurrentDir()
-src     = Glob('*.c') + Glob('*.cc')
-CPPPATH = [cwd]
-
-group = DefineGroup('fixedpoint', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
-
-Return('group')

+ 0 - 9
flatbuffers/SConscript

@@ -1,9 +0,0 @@
-from building import *
-
-cwd     = GetCurrentDir()
-src     = Glob('*.c') + Glob('*.cc')
-CPPPATH = [cwd]
-
-group = DefineGroup('flatbuffers', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
-
-Return('group')

+ 0 - 231
flatbuffers/code_generators.h

@@ -1,231 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_CODE_GENERATORS_H_
-#define FLATBUFFERS_CODE_GENERATORS_H_
-
-#include <map>
-#include <sstream>
-
-#include "idl.h"
-
-namespace flatbuffers {
-
-// Utility class to assist in generating code through use of text templates.
-//
-// Example code:
-//   CodeWriter code("\t");
-//   code.SetValue("NAME", "Foo");
-//   code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }";
-//   code.SetValue("NAME", "Bar");
-//   code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }";
-//   std::cout << code.ToString() << std::endl;
-//
-// Output:
-//  void Foo() { printf("%s", "Foo"); }
-//  void Bar() { printf("%s", "Bar"); }
-class CodeWriter {
- public:
-  CodeWriter(std::string pad = std::string())
-      : pad_(pad), cur_ident_lvl_(0), ignore_ident_(false) {}
-
-  // Clears the current "written" code.
-  void Clear() {
-    stream_.str("");
-    stream_.clear();
-  }
-
-  // Associates a key with a value.  All subsequent calls to operator+=, where
-  // the specified key is contained in {{ and }} delimiters will be replaced by
-  // the given value.
-  void SetValue(const std::string &key, const std::string &value) {
-    value_map_[key] = value;
-  }
-
-  std::string GetValue(const std::string &key) const {
-    const auto it = value_map_.find(key);
-    return it == value_map_.end() ? "" : it->second;
-  }
-
-  // Appends the given text to the generated code as well as a newline
-  // character.  Any text within {{ and }} delimiters is replaced by values
-  // previously stored in the CodeWriter by calling SetValue above.  The newline
-  // will be suppressed if the text ends with the \\ character.
-  void operator+=(std::string text);
-
-  // Returns the current contents of the CodeWriter as a std::string.
-  std::string ToString() const { return stream_.str(); }
-
-  // Increase ident level for writing code
-  void IncrementIdentLevel() { cur_ident_lvl_++; }
-  // Decrease ident level for writing code
-  void DecrementIdentLevel() {
-    if (cur_ident_lvl_) cur_ident_lvl_--;
-  }
-
-  void SetPadding(const std::string &padding) { pad_ = padding; }
-
- private:
-  std::map<std::string, std::string> value_map_;
-  std::stringstream stream_;
-  std::string pad_;
-  int cur_ident_lvl_;
-  bool ignore_ident_;
-
-  // Add ident padding (tab or space) based on ident level
-  void AppendIdent(std::stringstream &stream);
-};
-
-class BaseGenerator {
- public:
-  virtual bool generate() = 0;
-
-  static std::string NamespaceDir(const Parser &parser, const std::string &path,
-                                  const Namespace &ns);
-
-  std::string GeneratedFileName(const std::string &path,
-                                const std::string &file_name,
-                                const IDLOptions &options) const;
-
- protected:
-  BaseGenerator(const Parser &parser, const std::string &path,
-                const std::string &file_name, std::string qualifying_start,
-                std::string qualifying_separator, std::string default_extension)
-      : parser_(parser),
-        path_(path),
-        file_name_(file_name),
-        qualifying_start_(qualifying_start),
-        qualifying_separator_(qualifying_separator),
-        default_extension_(default_extension) {}
-  virtual ~BaseGenerator() {}
-
-  // No copy/assign.
-  BaseGenerator &operator=(const BaseGenerator &);
-  BaseGenerator(const BaseGenerator &);
-
-  std::string NamespaceDir(const Namespace &ns) const;
-
-  static const char *FlatBuffersGeneratedWarning();
-
-  static std::string FullNamespace(const char *separator, const Namespace &ns);
-
-  static std::string LastNamespacePart(const Namespace &ns);
-
-  // tracks the current namespace for early exit in WrapInNameSpace
-  // c++, java and csharp returns a different namespace from
-  // the following default (no early exit, always fully qualify),
-  // which works for js and php
-  virtual const Namespace *CurrentNameSpace() const { return nullptr; }
-
-  // Ensure that a type is prefixed with its namespace even within
-  // its own namespace to avoid conflict between generated method
-  // names and similarly named classes or structs
-  std::string WrapInNameSpace(const Namespace *ns,
-                              const std::string &name) const;
-
-  std::string WrapInNameSpace(const Definition &def) const;
-
-  std::string GetNameSpace(const Definition &def) const;
-
-  const Parser &parser_;
-  const std::string &path_;
-  const std::string &file_name_;
-  const std::string qualifying_start_;
-  const std::string qualifying_separator_;
-  const std::string default_extension_;
-};
-
-struct CommentConfig {
-  const char *first_line;
-  const char *content_line_prefix;
-  const char *last_line;
-};
-
-extern void GenComment(const std::vector<std::string> &dc,
-                       std::string *code_ptr, const CommentConfig *config,
-                       const char *prefix = "");
-
-class FloatConstantGenerator {
- public:
-  virtual ~FloatConstantGenerator() {}
-  std::string GenFloatConstant(const FieldDef &field) const;
-
- private:
-  virtual std::string Value(double v, const std::string &src) const = 0;
-  virtual std::string Inf(double v) const = 0;
-  virtual std::string NaN(double v) const = 0;
-
-  virtual std::string Value(float v, const std::string &src) const = 0;
-  virtual std::string Inf(float v) const = 0;
-  virtual std::string NaN(float v) const = 0;
-
-  template<typename T>
-  std::string GenFloatConstantImpl(const FieldDef &field) const;
-};
-
-class SimpleFloatConstantGenerator : public FloatConstantGenerator {
- public:
-  SimpleFloatConstantGenerator(const char *nan_number,
-                               const char *pos_inf_number,
-                               const char *neg_inf_number);
-
- private:
-  std::string Value(double v,
-                    const std::string &src) const FLATBUFFERS_OVERRIDE;
-  std::string Inf(double v) const FLATBUFFERS_OVERRIDE;
-  std::string NaN(double v) const FLATBUFFERS_OVERRIDE;
-
-  std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE;
-  std::string Inf(float v) const FLATBUFFERS_OVERRIDE;
-  std::string NaN(float v) const FLATBUFFERS_OVERRIDE;
-
-  const std::string nan_number_;
-  const std::string pos_inf_number_;
-  const std::string neg_inf_number_;
-};
-
-// C++, C#, Java like generator.
-class TypedFloatConstantGenerator : public FloatConstantGenerator {
- public:
-  TypedFloatConstantGenerator(const char *double_prefix,
-                              const char *single_prefix, const char *nan_number,
-                              const char *pos_inf_number,
-                              const char *neg_inf_number = "");
-
- private:
-  std::string Value(double v,
-                    const std::string &src) const FLATBUFFERS_OVERRIDE;
-  std::string Inf(double v) const FLATBUFFERS_OVERRIDE;
-
-  std::string NaN(double v) const FLATBUFFERS_OVERRIDE;
-
-  std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE;
-  std::string Inf(float v) const FLATBUFFERS_OVERRIDE;
-  std::string NaN(float v) const FLATBUFFERS_OVERRIDE;
-
-  std::string MakeNaN(const std::string &prefix) const;
-  std::string MakeInf(bool neg, const std::string &prefix) const;
-
-  const std::string double_prefix_;
-  const std::string single_prefix_;
-  const std::string nan_number_;
-  const std::string pos_inf_number_;
-  const std::string neg_inf_number_;
-};
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_CODE_GENERATORS_H_

+ 0 - 100
flatbuffers/flatc.h

@@ -1,100 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_FLATC_H_
-#define FLATBUFFERS_FLATC_H_
-
-#include <functional>
-#include <limits>
-#include <string>
-
-#include "flatbuffers.h"
-#include "idl.h"
-#include "util.h"
-
-namespace flatbuffers {
-
-extern void LogCompilerWarn(const std::string &warn);
-extern void LogCompilerError(const std::string &err);
-
-class FlatCompiler {
- public:
-  // Output generator for the various programming languages and formats we
-  // support.
-  struct Generator {
-    typedef bool (*GenerateFn)(const flatbuffers::Parser &parser,
-                               const std::string &path,
-                               const std::string &file_name);
-    typedef std::string (*MakeRuleFn)(const flatbuffers::Parser &parser,
-                                      const std::string &path,
-                                      const std::string &file_name);
-
-    GenerateFn generate;
-    const char *generator_opt_short;
-    const char *generator_opt_long;
-    const char *lang_name;
-    bool schema_only;
-    GenerateFn generateGRPC;
-    flatbuffers::IDLOptions::Language lang;
-    const char *generator_help;
-    MakeRuleFn make_rule;
-  };
-
-  typedef void (*WarnFn)(const FlatCompiler *flatc, const std::string &warn,
-                         bool show_exe_name);
-
-  typedef void (*ErrorFn)(const FlatCompiler *flatc, const std::string &err,
-                          bool usage, bool show_exe_name);
-
-  // Parameters required to initialize the FlatCompiler.
-  struct InitParams {
-    InitParams()
-        : generators(nullptr),
-          num_generators(0),
-          warn_fn(nullptr),
-          error_fn(nullptr) {}
-
-    const Generator *generators;
-    size_t num_generators;
-    WarnFn warn_fn;
-    ErrorFn error_fn;
-  };
-
-  explicit FlatCompiler(const InitParams &params) : params_(params) {}
-
-  int Compile(int argc, const char **argv);
-
-  std::string GetUsageString(const char *program_name) const;
-
- private:
-  void ParseFile(flatbuffers::Parser &parser, const std::string &filename,
-                 const std::string &contents,
-                 std::vector<const char *> &include_directories) const;
-
-  void LoadBinarySchema(Parser &parser, const std::string &filename,
-                        const std::string &contents);
-
-  void Warn(const std::string &warn, bool show_exe_name = true) const;
-
-  void Error(const std::string &err, bool usage = true,
-             bool show_exe_name = true) const;
-
-  InitParams params_;
-};
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_FLATC_H_

+ 0 - 39
flatbuffers/flatc_pch.h

@@ -1,39 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_FLATC_PCH_H_
-#define FLATBUFFERS_FLATC_PCH_H_
-
-// stl
-#include <cmath>
-#include <sstream>
-#include <cassert>
-#include <unordered_set>
-#include <unordered_map>
-#include <iostream>
-#include <functional>
-#include <set>
-#include <iterator>
-#include <tuple>
-
-// flatbuffers
-#include "pch.h"
-#include "code_generators.h"
-#include "flatbuffers.h"
-#include "flexbuffers.h"
-#include "idl.h"
-
-#endif // FLATBUFFERS_FLATC_PCH_H_

+ 0 - 1618
flatbuffers/flexbuffers.h

@@ -1,1618 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_FLEXBUFFERS_H_
-#define FLATBUFFERS_FLEXBUFFERS_H_
-
-#include <map>
-// Used to select STL variant.
-#include "base.h"
-// We use the basic binary writing functions from the regular FlatBuffers.
-#include "util.h"
-
-#ifdef _MSC_VER
-#  include <intrin.h>
-#endif
-
-#if defined(_MSC_VER)
-#  pragma warning(push)
-#  pragma warning(disable : 4127)  // C4127: conditional expression is constant
-#endif
-
-namespace flexbuffers {
-
-class Reference;
-class Map;
-
-// These are used in the lower 2 bits of a type field to determine the size of
-// the elements (and or size field) of the item pointed to (e.g. vector).
-enum BitWidth {
-  BIT_WIDTH_8 = 0,
-  BIT_WIDTH_16 = 1,
-  BIT_WIDTH_32 = 2,
-  BIT_WIDTH_64 = 3,
-};
-
-// These are used as the upper 6 bits of a type field to indicate the actual
-// type.
-enum Type {
-  FBT_NULL = 0,
-  FBT_INT = 1,
-  FBT_UINT = 2,
-  FBT_FLOAT = 3,
-  // Types above stored inline, types below store an offset.
-  FBT_KEY = 4,
-  FBT_STRING = 5,
-  FBT_INDIRECT_INT = 6,
-  FBT_INDIRECT_UINT = 7,
-  FBT_INDIRECT_FLOAT = 8,
-  FBT_MAP = 9,
-  FBT_VECTOR = 10,      // Untyped.
-  FBT_VECTOR_INT = 11,  // Typed any size (stores no type table).
-  FBT_VECTOR_UINT = 12,
-  FBT_VECTOR_FLOAT = 13,
-  FBT_VECTOR_KEY = 14,
-  // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
-  // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
-  FBT_VECTOR_STRING_DEPRECATED = 15,
-  FBT_VECTOR_INT2 = 16,  // Typed tuple (no type table, no size field).
-  FBT_VECTOR_UINT2 = 17,
-  FBT_VECTOR_FLOAT2 = 18,
-  FBT_VECTOR_INT3 = 19,  // Typed triple (no type table, no size field).
-  FBT_VECTOR_UINT3 = 20,
-  FBT_VECTOR_FLOAT3 = 21,
-  FBT_VECTOR_INT4 = 22,  // Typed quad (no type table, no size field).
-  FBT_VECTOR_UINT4 = 23,
-  FBT_VECTOR_FLOAT4 = 24,
-  FBT_BLOB = 25,
-  FBT_BOOL = 26,
-  FBT_VECTOR_BOOL =
-      36,  // To Allow the same type of conversion of type to vector type
-};
-
-inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
-
-inline bool IsTypedVectorElementType(Type t) {
-  return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
-}
-
-inline bool IsTypedVector(Type t) {
-  return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) ||
-         t == FBT_VECTOR_BOOL;
-}
-
-inline bool IsFixedTypedVector(Type t) {
-  return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4;
-}
-
-inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
-  FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
-  switch (fixed_len) {
-    case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
-    case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
-    case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
-    case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
-    default: FLATBUFFERS_ASSERT(0); return FBT_NULL;
-  }
-}
-
-inline Type ToTypedVectorElementType(Type t) {
-  FLATBUFFERS_ASSERT(IsTypedVector(t));
-  return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
-}
-
-inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
-  FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
-  auto fixed_type = t - FBT_VECTOR_INT2;
-  *len = static_cast<uint8_t>(fixed_type / 3 +
-                              2);  // 3 types each, starting from length 2.
-  return static_cast<Type>(fixed_type % 3 + FBT_INT);
-}
-
-// TODO: implement proper support for 8/16bit floats, or decide not to
-// support them.
-typedef int16_t half;
-typedef int8_t quarter;
-
-// TODO: can we do this without conditionals using intrinsics or inline asm
-// on some platforms? Given branch prediction the method below should be
-// decently quick, but it is the most frequently executed function.
-// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
-// which that doesn't work (or where we'd read into un-owned memory).
-template<typename R, typename T1, typename T2, typename T4, typename T8>
-R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) {
-  return byte_width < 4
-             ? (byte_width < 2
-                    ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
-                    : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
-             : (byte_width < 8
-                    ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
-                    : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
-}
-
-inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) {
-  return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(
-      data, byte_width);
-}
-
-inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
-  // This is the "hottest" function (all offset lookups use this), so worth
-  // optimizing if possible.
-  // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
-  // constant, which here it isn't. Test if memcpy is still faster than
-  // the conditionals in ReadSizedScalar. Can also use inline asm.
-  // clang-format off
-  #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86)
-    uint64_t u = 0;
-    __movsb(reinterpret_cast<uint8_t *>(&u),
-            reinterpret_cast<const uint8_t *>(data), byte_width);
-    return flatbuffers::EndianScalar(u);
-  #else
-    return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(
-             data, byte_width);
-  #endif
-  // clang-format on
-}
-
-inline double ReadDouble(const uint8_t *data, uint8_t byte_width) {
-  return ReadSizedScalar<double, quarter, half, float, double>(data,
-                                                               byte_width);
-}
-
-inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) {
-  return offset - ReadUInt64(offset, byte_width);
-}
-
-template<typename T> const uint8_t *Indirect(const uint8_t *offset) {
-  return offset - flatbuffers::ReadScalar<T>(offset);
-}
-
-inline BitWidth WidthU(uint64_t u) {
-#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width)                   \
-  {                                                                     \
-    if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \
-  }
-  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
-  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
-  FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
-#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
-  return BIT_WIDTH_64;
-}
-
-inline BitWidth WidthI(int64_t i) {
-  auto u = static_cast<uint64_t>(i) << 1;
-  return WidthU(i >= 0 ? u : ~u);
-}
-
-inline BitWidth WidthF(double f) {
-  return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32
-                                                         : BIT_WIDTH_64;
-}
-
-// Base class of all types below.
-// Points into the data buffer and allows access to one type.
-class Object {
- public:
-  Object(const uint8_t *data, uint8_t byte_width)
-      : data_(data), byte_width_(byte_width) {}
-
- protected:
-  const uint8_t *data_;
-  uint8_t byte_width_;
-};
-
-// Object that has a size, obtained either from size prefix, or elsewhere.
-class Sized : public Object {
- public:
-  // Size prefix.
-  Sized(const uint8_t *data, uint8_t byte_width)
-      : Object(data, byte_width), size_(read_size()) {}
-  // Manual size.
-  Sized(const uint8_t *data, uint8_t byte_width, size_t sz)
-      : Object(data, byte_width), size_(sz) {}
-  size_t size() const { return size_; }
-  // Access size stored in `byte_width_` bytes before data_ pointer.
-  size_t read_size() const {
-    return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
-  }
-
- protected:
-  size_t size_;
-};
-
-class String : public Sized {
- public:
-  // Size prefix.
-  String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
-  // Manual size.
-  String(const uint8_t *data, uint8_t byte_width, size_t sz)
-      : Sized(data, byte_width, sz) {}
-
-  size_t length() const { return size(); }
-  const char *c_str() const { return reinterpret_cast<const char *>(data_); }
-  std::string str() const { return std::string(c_str(), size()); }
-
-  static String EmptyString() {
-    static const char *empty_string = "";
-    return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
-  }
-  bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
-};
-
-class Blob : public Sized {
- public:
-  Blob(const uint8_t *data_buf, uint8_t byte_width)
-      : Sized(data_buf, byte_width) {}
-
-  static Blob EmptyBlob() {
-    static const uint8_t empty_blob[] = { 0 /*len*/ };
-    return Blob(empty_blob + 1, 1);
-  }
-  bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
-  const uint8_t *data() const { return data_; }
-};
-
-class Vector : public Sized {
- public:
-  Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
-
-  Reference operator[](size_t i) const;
-
-  static Vector EmptyVector() {
-    static const uint8_t empty_vector[] = { 0 /*len*/ };
-    return Vector(empty_vector + 1, 1);
-  }
-  bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
-};
-
-class TypedVector : public Sized {
- public:
-  TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
-      : Sized(data, byte_width), type_(element_type) {}
-
-  Reference operator[](size_t i) const;
-
-  static TypedVector EmptyTypedVector() {
-    static const uint8_t empty_typed_vector[] = { 0 /*len*/ };
-    return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
-  }
-  bool IsTheEmptyVector() const {
-    return data_ == TypedVector::EmptyTypedVector().data_;
-  }
-
-  Type ElementType() { return type_; }
-
-  friend Reference;
-
- private:
-  Type type_;
-
-  friend Map;
-};
-
-class FixedTypedVector : public Object {
- public:
-  FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type,
-                   uint8_t len)
-      : Object(data, byte_width), type_(element_type), len_(len) {}
-
-  Reference operator[](size_t i) const;
-
-  static FixedTypedVector EmptyFixedTypedVector() {
-    static const uint8_t fixed_empty_vector[] = { 0 /* unused */ };
-    return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
-  }
-  bool IsTheEmptyFixedTypedVector() const {
-    return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
-  }
-
-  Type ElementType() { return type_; }
-  uint8_t size() { return len_; }
-
- private:
-  Type type_;
-  uint8_t len_;
-};
-
-class Map : public Vector {
- public:
-  Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
-
-  Reference operator[](const char *key) const;
-  Reference operator[](const std::string &key) const;
-
-  Vector Values() const { return Vector(data_, byte_width_); }
-
-  TypedVector Keys() const {
-    const size_t num_prefixed_fields = 3;
-    auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
-    return TypedVector(Indirect(keys_offset, byte_width_),
-                       static_cast<uint8_t>(
-                           ReadUInt64(keys_offset + byte_width_, byte_width_)),
-                       FBT_KEY);
-  }
-
-  static Map EmptyMap() {
-    static const uint8_t empty_map[] = {
-      0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
-    };
-    return Map(empty_map + 4, 1);
-  }
-
-  bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
-};
-
-template<typename T>
-void AppendToString(std::string &s, T &&v, bool keys_quoted) {
-  s += "[ ";
-  for (size_t i = 0; i < v.size(); i++) {
-    if (i) s += ", ";
-    v[i].ToString(true, keys_quoted, s);
-  }
-  s += " ]";
-}
-
-class Reference {
- public:
-  Reference()
-      : data_(nullptr),
-        parent_width_(0),
-        byte_width_(BIT_WIDTH_8),
-        type_(FBT_NULL) {}
-
-  Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
-            Type type)
-      : data_(data),
-        parent_width_(parent_width),
-        byte_width_(byte_width),
-        type_(type) {}
-
-  Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
-      : data_(data), parent_width_(parent_width) {
-    byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
-    type_ = static_cast<Type>(packed_type >> 2);
-  }
-
-  Type GetType() const { return type_; }
-
-  bool IsNull() const { return type_ == FBT_NULL; }
-  bool IsBool() const { return type_ == FBT_BOOL; }
-  bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
-  bool IsUInt() const {
-    return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT;
-  }
-  bool IsIntOrUint() const { return IsInt() || IsUInt(); }
-  bool IsFloat() const {
-    return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT;
-  }
-  bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
-  bool IsString() const { return type_ == FBT_STRING; }
-  bool IsKey() const { return type_ == FBT_KEY; }
-  bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
-  bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
-  bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
-  bool IsFixedTypedVector() const {
-    return flexbuffers::IsFixedTypedVector(type_);
-  }
-  bool IsAnyVector() const {
-    return (IsTypedVector() || IsFixedTypedVector() || IsVector());
-  }
-  bool IsMap() const { return type_ == FBT_MAP; }
-  bool IsBlob() const { return type_ == FBT_BLOB; }
-  bool AsBool() const {
-    return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_)
-                              : AsUInt64()) != 0;
-  }
-
-  // Reads any type as a int64_t. Never fails, does most sensible conversion.
-  // Truncates floats, strings are attempted to be parsed for a number,
-  // vectors/maps return their size. Returns 0 if all else fails.
-  int64_t AsInt64() const {
-    if (type_ == FBT_INT) {
-      // A fast path for the common case.
-      return ReadInt64(data_, parent_width_);
-    } else
-      switch (type_) {
-        case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
-        case FBT_UINT: return ReadUInt64(data_, parent_width_);
-        case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
-        case FBT_FLOAT:
-          return static_cast<int64_t>(ReadDouble(data_, parent_width_));
-        case FBT_INDIRECT_FLOAT:
-          return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
-        case FBT_NULL: return 0;
-        case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str());
-        case FBT_VECTOR: return static_cast<int64_t>(AsVector().size());
-        case FBT_BOOL: return ReadInt64(data_, parent_width_);
-        default:
-          // Convert other things to int.
-          return 0;
-      }
-  }
-
-  // TODO: could specialize these to not use AsInt64() if that saves
-  // extension ops in generated code, and use a faster op than ReadInt64.
-  int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
-  int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
-  int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
-
-  uint64_t AsUInt64() const {
-    if (type_ == FBT_UINT) {
-      // A fast path for the common case.
-      return ReadUInt64(data_, parent_width_);
-    } else
-      switch (type_) {
-        case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_);
-        case FBT_INT: return ReadInt64(data_, parent_width_);
-        case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_);
-        case FBT_FLOAT:
-          return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
-        case FBT_INDIRECT_FLOAT:
-          return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
-        case FBT_NULL: return 0;
-        case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str());
-        case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size());
-        case FBT_BOOL: return ReadUInt64(data_, parent_width_);
-        default:
-          // Convert other things to uint.
-          return 0;
-      }
-  }
-
-  uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
-  uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
-  uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
-
-  double AsDouble() const {
-    if (type_ == FBT_FLOAT) {
-      // A fast path for the common case.
-      return ReadDouble(data_, parent_width_);
-    } else
-      switch (type_) {
-        case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_);
-        case FBT_INT:
-          return static_cast<double>(ReadInt64(data_, parent_width_));
-        case FBT_UINT:
-          return static_cast<double>(ReadUInt64(data_, parent_width_));
-        case FBT_INDIRECT_INT:
-          return static_cast<double>(ReadInt64(Indirect(), byte_width_));
-        case FBT_INDIRECT_UINT:
-          return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
-        case FBT_NULL: return 0.0;
-        case FBT_STRING: {
-          double d;
-          flatbuffers::StringToNumber(AsString().c_str(), &d);
-          return d;
-        }
-        case FBT_VECTOR: return static_cast<double>(AsVector().size());
-        case FBT_BOOL:
-          return static_cast<double>(ReadUInt64(data_, parent_width_));
-        default:
-          // Convert strings and other things to float.
-          return 0;
-      }
-  }
-
-  float AsFloat() const { return static_cast<float>(AsDouble()); }
-
-  const char *AsKey() const {
-    if (type_ == FBT_KEY || type_ == FBT_STRING) {
-      return reinterpret_cast<const char *>(Indirect());
-    } else {
-      return "";
-    }
-  }
-
-  // This function returns the empty string if you try to read something that
-  // is not a string or key.
-  String AsString() const {
-    if (type_ == FBT_STRING) {
-      return String(Indirect(), byte_width_);
-    } else if (type_ == FBT_KEY) {
-      auto key = Indirect();
-      return String(key, byte_width_,
-                    strlen(reinterpret_cast<const char *>(key)));
-    } else {
-      return String::EmptyString();
-    }
-  }
-
-  // Unlike AsString(), this will convert any type to a std::string.
-  std::string ToString() const {
-    std::string s;
-    ToString(false, false, s);
-    return s;
-  }
-
-  // Convert any type to a JSON-like string. strings_quoted determines if
-  // string values at the top level receive "" quotes (inside other values
-  // they always do). keys_quoted determines if keys are quoted, at any level.
-  // TODO(wvo): add further options to have indentation/newlines.
-  void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const {
-    if (type_ == FBT_STRING) {
-      String str(Indirect(), byte_width_);
-      if (strings_quoted) {
-        flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
-      } else {
-        s.append(str.c_str(), str.length());
-      }
-    } else if (IsKey()) {
-      auto str = AsKey();
-      if (keys_quoted) {
-        flatbuffers::EscapeString(str, strlen(str), &s, true, false);
-      } else {
-        s += str;
-      }
-    } else if (IsInt()) {
-      s += flatbuffers::NumToString(AsInt64());
-    } else if (IsUInt()) {
-      s += flatbuffers::NumToString(AsUInt64());
-    } else if (IsFloat()) {
-      s += flatbuffers::NumToString(AsDouble());
-    } else if (IsNull()) {
-      s += "null";
-    } else if (IsBool()) {
-      s += AsBool() ? "true" : "false";
-    } else if (IsMap()) {
-      s += "{ ";
-      auto m = AsMap();
-      auto keys = m.Keys();
-      auto vals = m.Values();
-      for (size_t i = 0; i < keys.size(); i++) {
-        keys[i].ToString(true, keys_quoted, s);
-        s += ": ";
-        vals[i].ToString(true, keys_quoted, s);
-        if (i < keys.size() - 1) s += ", ";
-      }
-      s += " }";
-    } else if (IsVector()) {
-      AppendToString<Vector>(s, AsVector(), keys_quoted);
-    } else if (IsTypedVector()) {
-      AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
-    } else if (IsFixedTypedVector()) {
-      AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
-    } else if (IsBlob()) {
-      auto blob = AsBlob();
-      flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()),
-                                blob.size(), &s, true, false);
-    } else {
-      s += "(?)";
-    }
-  }
-
-  // This function returns the empty blob if you try to read a not-blob.
-  // Strings can be viewed as blobs too.
-  Blob AsBlob() const {
-    if (type_ == FBT_BLOB || type_ == FBT_STRING) {
-      return Blob(Indirect(), byte_width_);
-    } else {
-      return Blob::EmptyBlob();
-    }
-  }
-
-  // This function returns the empty vector if you try to read a not-vector.
-  // Maps can be viewed as vectors too.
-  Vector AsVector() const {
-    if (type_ == FBT_VECTOR || type_ == FBT_MAP) {
-      return Vector(Indirect(), byte_width_);
-    } else {
-      return Vector::EmptyVector();
-    }
-  }
-
-  TypedVector AsTypedVector() const {
-    if (IsTypedVector()) {
-      auto tv =
-          TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
-      if (tv.type_ == FBT_STRING) {
-        // These can't be accessed as strings, since we don't know the bit-width
-        // of the size field, see the declaration of
-        // FBT_VECTOR_STRING_DEPRECATED above for details.
-        // We change the type here to be keys, which are a subtype of strings,
-        // and will ignore the size field. This will truncate strings with
-        // embedded nulls.
-        tv.type_ = FBT_KEY;
-      }
-      return tv;
-    } else {
-      return TypedVector::EmptyTypedVector();
-    }
-  }
-
-  FixedTypedVector AsFixedTypedVector() const {
-    if (IsFixedTypedVector()) {
-      uint8_t len = 0;
-      auto vtype = ToFixedTypedVectorElementType(type_, &len);
-      return FixedTypedVector(Indirect(), byte_width_, vtype, len);
-    } else {
-      return FixedTypedVector::EmptyFixedTypedVector();
-    }
-  }
-
-  Map AsMap() const {
-    if (type_ == FBT_MAP) {
-      return Map(Indirect(), byte_width_);
-    } else {
-      return Map::EmptyMap();
-    }
-  }
-
-  template<typename T> T As() const;
-
-  // Experimental: Mutation functions.
-  // These allow scalars in an already created buffer to be updated in-place.
-  // Since by default scalars are stored in the smallest possible space,
-  // the new value may not fit, in which case these functions return false.
-  // To avoid this, you can construct the values you intend to mutate using
-  // Builder::ForceMinimumBitWidth.
-  bool MutateInt(int64_t i) {
-    if (type_ == FBT_INT) {
-      return Mutate(data_, i, parent_width_, WidthI(i));
-    } else if (type_ == FBT_INDIRECT_INT) {
-      return Mutate(Indirect(), i, byte_width_, WidthI(i));
-    } else if (type_ == FBT_UINT) {
-      auto u = static_cast<uint64_t>(i);
-      return Mutate(data_, u, parent_width_, WidthU(u));
-    } else if (type_ == FBT_INDIRECT_UINT) {
-      auto u = static_cast<uint64_t>(i);
-      return Mutate(Indirect(), u, byte_width_, WidthU(u));
-    } else {
-      return false;
-    }
-  }
-
-  bool MutateBool(bool b) {
-    return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
-  }
-
-  bool MutateUInt(uint64_t u) {
-    if (type_ == FBT_UINT) {
-      return Mutate(data_, u, parent_width_, WidthU(u));
-    } else if (type_ == FBT_INDIRECT_UINT) {
-      return Mutate(Indirect(), u, byte_width_, WidthU(u));
-    } else if (type_ == FBT_INT) {
-      auto i = static_cast<int64_t>(u);
-      return Mutate(data_, i, parent_width_, WidthI(i));
-    } else if (type_ == FBT_INDIRECT_INT) {
-      auto i = static_cast<int64_t>(u);
-      return Mutate(Indirect(), i, byte_width_, WidthI(i));
-    } else {
-      return false;
-    }
-  }
-
-  bool MutateFloat(float f) {
-    if (type_ == FBT_FLOAT) {
-      return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
-    } else if (type_ == FBT_INDIRECT_FLOAT) {
-      return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
-    } else {
-      return false;
-    }
-  }
-
-  bool MutateFloat(double d) {
-    if (type_ == FBT_FLOAT) {
-      return MutateF(data_, d, parent_width_, WidthF(d));
-    } else if (type_ == FBT_INDIRECT_FLOAT) {
-      return MutateF(Indirect(), d, byte_width_, WidthF(d));
-    } else {
-      return false;
-    }
-  }
-
-  bool MutateString(const char *str, size_t len) {
-    auto s = AsString();
-    if (s.IsTheEmptyString()) return false;
-    // This is very strict, could allow shorter strings, but that creates
-    // garbage.
-    if (s.length() != len) return false;
-    memcpy(const_cast<char *>(s.c_str()), str, len);
-    return true;
-  }
-  bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
-  bool MutateString(const std::string &str) {
-    return MutateString(str.data(), str.length());
-  }
-
- private:
-  const uint8_t *Indirect() const {
-    return flexbuffers::Indirect(data_, parent_width_);
-  }
-
-  template<typename T>
-  bool Mutate(const uint8_t *dest, T t, size_t byte_width,
-              BitWidth value_width) {
-    auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <=
-                byte_width;
-    if (fits) {
-      t = flatbuffers::EndianScalar(t);
-      memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
-    }
-    return fits;
-  }
-
-  template<typename T>
-  bool MutateF(const uint8_t *dest, T t, size_t byte_width,
-               BitWidth value_width) {
-    if (byte_width == sizeof(double))
-      return Mutate(dest, static_cast<double>(t), byte_width, value_width);
-    if (byte_width == sizeof(float))
-      return Mutate(dest, static_cast<float>(t), byte_width, value_width);
-    FLATBUFFERS_ASSERT(false);
-    return false;
-  }
-
-  const uint8_t *data_;
-  uint8_t parent_width_;
-  uint8_t byte_width_;
-  Type type_;
-};
-
-// Template specialization for As().
-template<> inline bool Reference::As<bool>() const { return AsBool(); }
-
-template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
-template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
-template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
-template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
-
-template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
-template<> inline uint16_t Reference::As<uint16_t>() const {
-  return AsUInt16();
-}
-template<> inline uint32_t Reference::As<uint32_t>() const {
-  return AsUInt32();
-}
-template<> inline uint64_t Reference::As<uint64_t>() const {
-  return AsUInt64();
-}
-
-template<> inline double Reference::As<double>() const { return AsDouble(); }
-template<> inline float Reference::As<float>() const { return AsFloat(); }
-
-template<> inline String Reference::As<String>() const { return AsString(); }
-template<> inline std::string Reference::As<std::string>() const {
-  return AsString().str();
-}
-
-template<> inline Blob Reference::As<Blob>() const { return AsBlob(); }
-template<> inline Vector Reference::As<Vector>() const { return AsVector(); }
-template<> inline TypedVector Reference::As<TypedVector>() const {
-  return AsTypedVector();
-}
-template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const {
-  return AsFixedTypedVector();
-}
-template<> inline Map Reference::As<Map>() const { return AsMap(); }
-
-inline uint8_t PackedType(BitWidth bit_width, Type type) {
-  return static_cast<uint8_t>(bit_width | (type << 2));
-}
-
-inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
-
-// Vector accessors.
-// Note: if you try to access outside of bounds, you get a Null value back
-// instead. Normally this would be an assert, but since this is "dynamically
-// typed" data, you may not want that (someone sends you a 2d vector and you
-// wanted 3d).
-// The Null converts seamlessly into a default value for any other type.
-// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
-inline Reference Vector::operator[](size_t i) const {
-  auto len = size();
-  if (i >= len) return Reference(nullptr, 1, NullPackedType());
-  auto packed_type = (data_ + len * byte_width_)[i];
-  auto elem = data_ + i * byte_width_;
-  return Reference(elem, byte_width_, packed_type);
-}
-
-inline Reference TypedVector::operator[](size_t i) const {
-  auto len = size();
-  if (i >= len) return Reference(nullptr, 1, NullPackedType());
-  auto elem = data_ + i * byte_width_;
-  return Reference(elem, byte_width_, 1, type_);
-}
-
-inline Reference FixedTypedVector::operator[](size_t i) const {
-  if (i >= len_) return Reference(nullptr, 1, NullPackedType());
-  auto elem = data_ + i * byte_width_;
-  return Reference(elem, byte_width_, 1, type_);
-}
-
-template<typename T> int KeyCompare(const void *key, const void *elem) {
-  auto str_elem = reinterpret_cast<const char *>(
-      Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
-  auto skey = reinterpret_cast<const char *>(key);
-  return strcmp(skey, str_elem);
-}
-
-inline Reference Map::operator[](const char *key) const {
-  auto keys = Keys();
-  // We can't pass keys.byte_width_ to the comparison function, so we have
-  // to pick the right one ahead of time.
-  int (*comp)(const void *, const void *) = nullptr;
-  switch (keys.byte_width_) {
-    case 1: comp = KeyCompare<uint8_t>; break;
-    case 2: comp = KeyCompare<uint16_t>; break;
-    case 4: comp = KeyCompare<uint32_t>; break;
-    case 8: comp = KeyCompare<uint64_t>; break;
-  }
-  auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
-  if (!res) return Reference(nullptr, 1, NullPackedType());
-  auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
-  return (*static_cast<const Vector *>(this))[i];
-}
-
-inline Reference Map::operator[](const std::string &key) const {
-  return (*this)[key.c_str()];
-}
-
-inline Reference GetRoot(const uint8_t *buffer, size_t size) {
-  // See Finish() below for the serialization counterpart of this.
-  // The root starts at the end of the buffer, so we parse backwards from there.
-  auto end = buffer + size;
-  auto byte_width = *--end;
-  auto packed_type = *--end;
-  end -= byte_width;  // The root data item.
-  return Reference(end, byte_width, packed_type);
-}
-
-inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
-  return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
-}
-
-// Flags that configure how the Builder behaves.
-// The "Share" flags determine if the Builder automatically tries to pool
-// this type. Pooling can reduce the size of serialized data if there are
-// multiple maps of the same kind, at the expense of slightly slower
-// serialization (the cost of lookups) and more memory use (std::set).
-// By default this is on for keys, but off for strings.
-// Turn keys off if you have e.g. only one map.
-// Turn strings on if you expect many non-unique string values.
-// Additionally, sharing key vectors can save space if you have maps with
-// identical field populations.
-enum BuilderFlag {
-  BUILDER_FLAG_NONE = 0,
-  BUILDER_FLAG_SHARE_KEYS = 1,
-  BUILDER_FLAG_SHARE_STRINGS = 2,
-  BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
-  BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
-  BUILDER_FLAG_SHARE_ALL = 7,
-};
-
-class Builder FLATBUFFERS_FINAL_CLASS {
- public:
-  Builder(size_t initial_size = 256,
-          BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
-      : buf_(initial_size),
-        finished_(false),
-        flags_(flags),
-        force_min_bit_width_(BIT_WIDTH_8),
-        key_pool(KeyOffsetCompare(buf_)),
-        string_pool(StringOffsetCompare(buf_)) {
-    buf_.clear();
-  }
-
-  /// @brief Get the serialized buffer (after you call `Finish()`).
-  /// @return Returns a vector owned by this class.
-  const std::vector<uint8_t> &GetBuffer() const {
-    Finished();
-    return buf_;
-  }
-
-  // Size of the buffer. Does not include unfinished values.
-  size_t GetSize() const { return buf_.size(); }
-
-  // Reset all state so we can re-use the buffer.
-  void Clear() {
-    buf_.clear();
-    stack_.clear();
-    finished_ = false;
-    // flags_ remains as-is;
-    force_min_bit_width_ = BIT_WIDTH_8;
-    key_pool.clear();
-    string_pool.clear();
-  }
-
-  // All value constructing functions below have two versions: one that
-  // takes a key (for placement inside a map) and one that doesn't (for inside
-  // vectors and elsewhere).
-
-  void Null() { stack_.push_back(Value()); }
-  void Null(const char *key) {
-    Key(key);
-    Null();
-  }
-
-  void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
-  void Int(const char *key, int64_t i) {
-    Key(key);
-    Int(i);
-  }
-
-  void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
-  void UInt(const char *key, uint64_t u) {
-    Key(key);
-    UInt(u);
-  }
-
-  void Float(float f) { stack_.push_back(Value(f)); }
-  void Float(const char *key, float f) {
-    Key(key);
-    Float(f);
-  }
-
-  void Double(double f) { stack_.push_back(Value(f)); }
-  void Double(const char *key, double d) {
-    Key(key);
-    Double(d);
-  }
-
-  void Bool(bool b) { stack_.push_back(Value(b)); }
-  void Bool(const char *key, bool b) {
-    Key(key);
-    Bool(b);
-  }
-
-  void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
-  void IndirectInt(const char *key, int64_t i) {
-    Key(key);
-    IndirectInt(i);
-  }
-
-  void IndirectUInt(uint64_t u) {
-    PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u));
-  }
-  void IndirectUInt(const char *key, uint64_t u) {
-    Key(key);
-    IndirectUInt(u);
-  }
-
-  void IndirectFloat(float f) {
-    PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32);
-  }
-  void IndirectFloat(const char *key, float f) {
-    Key(key);
-    IndirectFloat(f);
-  }
-
-  void IndirectDouble(double f) {
-    PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f));
-  }
-  void IndirectDouble(const char *key, double d) {
-    Key(key);
-    IndirectDouble(d);
-  }
-
-  size_t Key(const char *str, size_t len) {
-    auto sloc = buf_.size();
-    WriteBytes(str, len + 1);
-    if (flags_ & BUILDER_FLAG_SHARE_KEYS) {
-      auto it = key_pool.find(sloc);
-      if (it != key_pool.end()) {
-        // Already in the buffer. Remove key we just serialized, and use
-        // existing offset instead.
-        buf_.resize(sloc);
-        sloc = *it;
-      } else {
-        key_pool.insert(sloc);
-      }
-    }
-    stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
-    return sloc;
-  }
-
-  size_t Key(const char *str) { return Key(str, strlen(str)); }
-  size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
-
-  size_t String(const char *str, size_t len) {
-    auto reset_to = buf_.size();
-    auto sloc = CreateBlob(str, len, 1, FBT_STRING);
-    if (flags_ & BUILDER_FLAG_SHARE_STRINGS) {
-      StringOffset so(sloc, len);
-      auto it = string_pool.find(so);
-      if (it != string_pool.end()) {
-        // Already in the buffer. Remove string we just serialized, and use
-        // existing offset instead.
-        buf_.resize(reset_to);
-        sloc = it->first;
-        stack_.back().u_ = sloc;
-      } else {
-        string_pool.insert(so);
-      }
-    }
-    return sloc;
-  }
-  size_t String(const char *str) { return String(str, strlen(str)); }
-  size_t String(const std::string &str) {
-    return String(str.c_str(), str.size());
-  }
-  void String(const flexbuffers::String &str) {
-    String(str.c_str(), str.length());
-  }
-
-  void String(const char *key, const char *str) {
-    Key(key);
-    String(str);
-  }
-  void String(const char *key, const std::string &str) {
-    Key(key);
-    String(str);
-  }
-  void String(const char *key, const flexbuffers::String &str) {
-    Key(key);
-    String(str);
-  }
-
-  size_t Blob(const void *data, size_t len) {
-    return CreateBlob(data, len, 0, FBT_BLOB);
-  }
-  size_t Blob(const std::vector<uint8_t> &v) {
-    return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
-  }
-
-  // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
-  // e.g. Vector etc. Also in overloaded versions.
-  // Also some FlatBuffers types?
-
-  size_t StartVector() { return stack_.size(); }
-  size_t StartVector(const char *key) {
-    Key(key);
-    return stack_.size();
-  }
-  size_t StartMap() { return stack_.size(); }
-  size_t StartMap(const char *key) {
-    Key(key);
-    return stack_.size();
-  }
-
-  // TODO(wvo): allow this to specify an aligment greater than the natural
-  // alignment.
-  size_t EndVector(size_t start, bool typed, bool fixed) {
-    auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
-    // Remove temp elements and return vector.
-    stack_.resize(start);
-    stack_.push_back(vec);
-    return static_cast<size_t>(vec.u_);
-  }
-
-  size_t EndMap(size_t start) {
-    // We should have interleaved keys and values on the stack.
-    // Make sure it is an even number:
-    auto len = stack_.size() - start;
-    FLATBUFFERS_ASSERT(!(len & 1));
-    len /= 2;
-    // Make sure keys are all strings:
-    for (auto key = start; key < stack_.size(); key += 2) {
-      FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
-    }
-    // Now sort values, so later we can do a binary search lookup.
-    // We want to sort 2 array elements at a time.
-    struct TwoValue {
-      Value key;
-      Value val;
-    };
-    // TODO(wvo): strict aliasing?
-    // TODO(wvo): allow the caller to indicate the data is already sorted
-    // for maximum efficiency? With an assert to check sortedness to make sure
-    // we're not breaking binary search.
-    // Or, we can track if the map is sorted as keys are added which would be
-    // be quite cheap (cheaper than checking it here), so we can skip this
-    // step automatically when appliccable, and encourage people to write in
-    // sorted fashion.
-    // std::sort is typically already a lot faster on sorted data though.
-    auto dict =
-        reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
-    std::sort(dict, dict + len,
-              [&](const TwoValue &a, const TwoValue &b) -> bool {
-                auto as = reinterpret_cast<const char *>(
-                    flatbuffers::vector_data(buf_) + a.key.u_);
-                auto bs = reinterpret_cast<const char *>(
-                    flatbuffers::vector_data(buf_) + b.key.u_);
-                auto comp = strcmp(as, bs);
-                // If this assertion hits, you've added two keys with the same
-                // value to this map.
-                // TODO: Have to check for pointer equality, as some sort
-                // implementation apparently call this function with the same
-                // element?? Why?
-                FLATBUFFERS_ASSERT(comp || &a == &b);
-                return comp < 0;
-              });
-    // First create a vector out of all keys.
-    // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
-    // the first vector.
-    auto keys = CreateVector(start, len, 2, true, false);
-    auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
-    // Remove temp elements and return map.
-    stack_.resize(start);
-    stack_.push_back(vec);
-    return static_cast<size_t>(vec.u_);
-  }
-
-  template<typename F> size_t Vector(F f) {
-    auto start = StartVector();
-    f();
-    return EndVector(start, false, false);
-  }
-  template<typename F, typename T> size_t Vector(F f, T &state) {
-    auto start = StartVector();
-    f(state);
-    return EndVector(start, false, false);
-  }
-  template<typename F> size_t Vector(const char *key, F f) {
-    auto start = StartVector(key);
-    f();
-    return EndVector(start, false, false);
-  }
-  template<typename F, typename T>
-  size_t Vector(const char *key, F f, T &state) {
-    auto start = StartVector(key);
-    f(state);
-    return EndVector(start, false, false);
-  }
-
-  template<typename T> void Vector(const T *elems, size_t len) {
-    if (flatbuffers::is_scalar<T>::value) {
-      // This path should be a lot quicker and use less space.
-      ScalarVector(elems, len, false);
-    } else {
-      auto start = StartVector();
-      for (size_t i = 0; i < len; i++) Add(elems[i]);
-      EndVector(start, false, false);
-    }
-  }
-  template<typename T>
-  void Vector(const char *key, const T *elems, size_t len) {
-    Key(key);
-    Vector(elems, len);
-  }
-  template<typename T> void Vector(const std::vector<T> &vec) {
-    Vector(flatbuffers::vector_data(vec), vec.size());
-  }
-
-  template<typename F> size_t TypedVector(F f) {
-    auto start = StartVector();
-    f();
-    return EndVector(start, true, false);
-  }
-  template<typename F, typename T> size_t TypedVector(F f, T &state) {
-    auto start = StartVector();
-    f(state);
-    return EndVector(start, true, false);
-  }
-  template<typename F> size_t TypedVector(const char *key, F f) {
-    auto start = StartVector(key);
-    f();
-    return EndVector(start, true, false);
-  }
-  template<typename F, typename T>
-  size_t TypedVector(const char *key, F f, T &state) {
-    auto start = StartVector(key);
-    f(state);
-    return EndVector(start, true, false);
-  }
-
-  template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
-    // We only support a few fixed vector lengths. Anything bigger use a
-    // regular typed vector.
-    FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
-    // And only scalar values.
-    static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
-    return ScalarVector(elems, len, true);
-  }
-
-  template<typename T>
-  size_t FixedTypedVector(const char *key, const T *elems, size_t len) {
-    Key(key);
-    return FixedTypedVector(elems, len);
-  }
-
-  template<typename F> size_t Map(F f) {
-    auto start = StartMap();
-    f();
-    return EndMap(start);
-  }
-  template<typename F, typename T> size_t Map(F f, T &state) {
-    auto start = StartMap();
-    f(state);
-    return EndMap(start);
-  }
-  template<typename F> size_t Map(const char *key, F f) {
-    auto start = StartMap(key);
-    f();
-    return EndMap(start);
-  }
-  template<typename F, typename T> size_t Map(const char *key, F f, T &state) {
-    auto start = StartMap(key);
-    f(state);
-    return EndMap(start);
-  }
-  template<typename T> void Map(const std::map<std::string, T> &map) {
-    auto start = StartMap();
-    for (auto it = map.begin(); it != map.end(); ++it)
-      Add(it->first.c_str(), it->second);
-    EndMap(start);
-  }
-
-  // If you wish to share a value explicitly (a value not shared automatically
-  // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
-  // functions. Or if you wish to turn those flags off for performance reasons
-  // and still do some explicit sharing. For example:
-  // builder.IndirectDouble(M_PI);
-  // auto id = builder.LastValue();  // Remember where we stored it.
-  // .. more code goes here ..
-  // builder.ReuseValue(id);  // Refers to same double by offset.
-  // LastValue works regardless of whether the value has a key or not.
-  // Works on any data type.
-  struct Value;
-  Value LastValue() { return stack_.back(); }
-  void ReuseValue(Value v) { stack_.push_back(v); }
-  void ReuseValue(const char *key, Value v) {
-    Key(key);
-    ReuseValue(v);
-  }
-
-  // Overloaded Add that tries to call the correct function above.
-  void Add(int8_t i) { Int(i); }
-  void Add(int16_t i) { Int(i); }
-  void Add(int32_t i) { Int(i); }
-  void Add(int64_t i) { Int(i); }
-  void Add(uint8_t u) { UInt(u); }
-  void Add(uint16_t u) { UInt(u); }
-  void Add(uint32_t u) { UInt(u); }
-  void Add(uint64_t u) { UInt(u); }
-  void Add(float f) { Float(f); }
-  void Add(double d) { Double(d); }
-  void Add(bool b) { Bool(b); }
-  void Add(const char *str) { String(str); }
-  void Add(const std::string &str) { String(str); }
-  void Add(const flexbuffers::String &str) { String(str); }
-
-  template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
-
-  template<typename T> void Add(const char *key, const T &t) {
-    Key(key);
-    Add(t);
-  }
-
-  template<typename T> void Add(const std::map<std::string, T> &map) {
-    Map(map);
-  }
-
-  template<typename T> void operator+=(const T &t) { Add(t); }
-
-  // This function is useful in combination with the Mutate* functions above.
-  // It forces elements of vectors and maps to have a minimum size, such that
-  // they can later be updated without failing.
-  // Call with no arguments to reset.
-  void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) {
-    force_min_bit_width_ = bw;
-  }
-
-  void Finish() {
-    // If you hit this assert, you likely have objects that were never included
-    // in a parent. You need to have exactly one root to finish a buffer.
-    // Check your Start/End calls are matched, and all objects are inside
-    // some other object.
-    FLATBUFFERS_ASSERT(stack_.size() == 1);
-
-    // Write root value.
-    auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
-    WriteAny(stack_[0], byte_width);
-    // Write root type.
-    Write(stack_[0].StoredPackedType(), 1);
-    // Write root size. Normally determined by parent, but root has no parent :)
-    Write(byte_width, 1);
-
-    finished_ = true;
-  }
-
- private:
-  void Finished() const {
-    // If you get this assert, you're attempting to get access a buffer
-    // which hasn't been finished yet. Be sure to call
-    // Builder::Finish with your root object.
-    FLATBUFFERS_ASSERT(finished_);
-  }
-
-  // Align to prepare for writing a scalar with a certain size.
-  uint8_t Align(BitWidth alignment) {
-    auto byte_width = 1U << alignment;
-    buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width),
-                0);
-    return static_cast<uint8_t>(byte_width);
-  }
-
-  void WriteBytes(const void *val, size_t size) {
-    buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
-                reinterpret_cast<const uint8_t *>(val) + size);
-  }
-
-  template<typename T> void Write(T val, size_t byte_width) {
-    FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
-    val = flatbuffers::EndianScalar(val);
-    WriteBytes(&val, byte_width);
-  }
-
-  void WriteDouble(double f, uint8_t byte_width) {
-    switch (byte_width) {
-      case 8: Write(f, byte_width); break;
-      case 4: Write(static_cast<float>(f), byte_width); break;
-      // case 2: Write(static_cast<half>(f), byte_width); break;
-      // case 1: Write(static_cast<quarter>(f), byte_width); break;
-      default: FLATBUFFERS_ASSERT(0);
-    }
-  }
-
-  void WriteOffset(uint64_t o, uint8_t byte_width) {
-    auto reloff = buf_.size() - o;
-    FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
-    Write(reloff, byte_width);
-  }
-
-  template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) {
-    auto byte_width = Align(bit_width);
-    auto iloc = buf_.size();
-    Write(val, byte_width);
-    stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
-  }
-
-  static BitWidth WidthB(size_t byte_width) {
-    switch (byte_width) {
-      case 1: return BIT_WIDTH_8;
-      case 2: return BIT_WIDTH_16;
-      case 4: return BIT_WIDTH_32;
-      case 8: return BIT_WIDTH_64;
-      default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
-    }
-  }
-
-  template<typename T> static Type GetScalarType() {
-    static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
-    return flatbuffers::is_floating_point<T>::value
-               ? FBT_FLOAT
-               : flatbuffers::is_same<T, bool>::value
-                     ? FBT_BOOL
-                     : (flatbuffers::is_unsigned<T>::value ? FBT_UINT
-                                                           : FBT_INT);
-  }
-
- public:
-  // This was really intended to be private, except for LastValue/ReuseValue.
-  struct Value {
-    union {
-      int64_t i_;
-      uint64_t u_;
-      double f_;
-    };
-
-    Type type_;
-
-    // For scalars: of itself, for vector: of its elements, for string: length.
-    BitWidth min_bit_width_;
-
-    Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
-
-    Value(bool b)
-        : u_(static_cast<uint64_t>(b)),
-          type_(FBT_BOOL),
-          min_bit_width_(BIT_WIDTH_8) {}
-
-    Value(int64_t i, Type t, BitWidth bw)
-        : i_(i), type_(t), min_bit_width_(bw) {}
-    Value(uint64_t u, Type t, BitWidth bw)
-        : u_(u), type_(t), min_bit_width_(bw) {}
-
-    Value(float f) : f_(f), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {}
-    Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
-
-    uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
-      return PackedType(StoredWidth(parent_bit_width_), type_);
-    }
-
-    BitWidth ElemWidth(size_t buf_size, size_t elem_index) const {
-      if (IsInline(type_)) {
-        return min_bit_width_;
-      } else {
-        // We have an absolute offset, but want to store a relative offset
-        // elem_index elements beyond the current buffer end. Since whether
-        // the relative offset fits in a certain byte_width depends on
-        // the size of the elements before it (and their alignment), we have
-        // to test for each size in turn.
-        for (size_t byte_width = 1;
-             byte_width <= sizeof(flatbuffers::largest_scalar_t);
-             byte_width *= 2) {
-          // Where are we going to write this offset?
-          auto offset_loc = buf_size +
-                            flatbuffers::PaddingBytes(buf_size, byte_width) +
-                            elem_index * byte_width;
-          // Compute relative offset.
-          auto offset = offset_loc - u_;
-          // Does it fit?
-          auto bit_width = WidthU(offset);
-          if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) ==
-              byte_width)
-            return bit_width;
-        }
-        FLATBUFFERS_ASSERT(false);  // Must match one of the sizes above.
-        return BIT_WIDTH_64;
-      }
-    }
-
-    BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
-      if (IsInline(type_)) {
-        return (std::max)(min_bit_width_, parent_bit_width_);
-      } else {
-        return min_bit_width_;
-      }
-    }
-  };
-
- private:
-  void WriteAny(const Value &val, uint8_t byte_width) {
-    switch (val.type_) {
-      case FBT_NULL:
-      case FBT_INT: Write(val.i_, byte_width); break;
-      case FBT_BOOL:
-      case FBT_UINT: Write(val.u_, byte_width); break;
-      case FBT_FLOAT: WriteDouble(val.f_, byte_width); break;
-      default: WriteOffset(val.u_, byte_width); break;
-    }
-  }
-
-  size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) {
-    auto bit_width = WidthU(len);
-    auto byte_width = Align(bit_width);
-    Write<uint64_t>(len, byte_width);
-    auto sloc = buf_.size();
-    WriteBytes(data, len + trailing);
-    stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
-    return sloc;
-  }
-
-  template<typename T>
-  size_t ScalarVector(const T *elems, size_t len, bool fixed) {
-    auto vector_type = GetScalarType<T>();
-    auto byte_width = sizeof(T);
-    auto bit_width = WidthB(byte_width);
-    // If you get this assert, you're trying to write a vector with a size
-    // field that is bigger than the scalars you're trying to write (e.g. a
-    // byte vector > 255 elements). For such types, write a "blob" instead.
-    // TODO: instead of asserting, could write vector with larger elements
-    // instead, though that would be wasteful.
-    FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
-    Align(bit_width);
-    if (!fixed) Write<uint64_t>(len, byte_width);
-    auto vloc = buf_.size();
-    for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
-    stack_.push_back(Value(static_cast<uint64_t>(vloc),
-                           ToTypedVector(vector_type, fixed ? len : 0),
-                           bit_width));
-    return vloc;
-  }
-
-  Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed,
-                     bool fixed, const Value *keys = nullptr) {
-    FLATBUFFERS_ASSERT(
-        !fixed ||
-        typed);  // typed=false, fixed=true combination is not supported.
-    // Figure out smallest bit width we can store this vector with.
-    auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
-    auto prefix_elems = 1;
-    if (keys) {
-      // If this vector is part of a map, we will pre-fix an offset to the keys
-      // to this vector.
-      bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
-      prefix_elems += 2;
-    }
-    Type vector_type = FBT_KEY;
-    // Check bit widths and types for all elements.
-    for (size_t i = start; i < stack_.size(); i += step) {
-      auto elem_width =
-          stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
-      bit_width = (std::max)(bit_width, elem_width);
-      if (typed) {
-        if (i == start) {
-          vector_type = stack_[i].type_;
-        } else {
-          // If you get this assert, you are writing a typed vector with
-          // elements that are not all the same type.
-          FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
-        }
-      }
-    }
-    // If you get this assert, your fixed types are not one of:
-    // Int / UInt / Float / Key.
-    FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
-    auto byte_width = Align(bit_width);
-    // Write vector. First the keys width/offset if available, and size.
-    if (keys) {
-      WriteOffset(keys->u_, byte_width);
-      Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
-    }
-    if (!fixed) Write<uint64_t>(vec_len, byte_width);
-    // Then the actual data.
-    auto vloc = buf_.size();
-    for (size_t i = start; i < stack_.size(); i += step) {
-      WriteAny(stack_[i], byte_width);
-    }
-    // Then the types.
-    if (!typed) {
-      for (size_t i = start; i < stack_.size(); i += step) {
-        buf_.push_back(stack_[i].StoredPackedType(bit_width));
-      }
-    }
-    return Value(static_cast<uint64_t>(vloc),
-                 keys ? FBT_MAP
-                      : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0)
-                               : FBT_VECTOR),
-                 bit_width);
-  }
-
-  // You shouldn't really be copying instances of this class.
-  Builder(const Builder &);
-  Builder &operator=(const Builder &);
-
-  std::vector<uint8_t> buf_;
-  std::vector<Value> stack_;
-
-  bool finished_;
-
-  BuilderFlag flags_;
-
-  BitWidth force_min_bit_width_;
-
-  struct KeyOffsetCompare {
-    explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
-    bool operator()(size_t a, size_t b) const {
-      auto stra =
-          reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
-      auto strb =
-          reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
-      return strcmp(stra, strb) < 0;
-    }
-    const std::vector<uint8_t> *buf_;
-  };
-
-  typedef std::pair<size_t, size_t> StringOffset;
-  struct StringOffsetCompare {
-    explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
-        : buf_(&buf) {}
-    bool operator()(const StringOffset &a, const StringOffset &b) const {
-      auto stra = reinterpret_cast<const char *>(
-          flatbuffers::vector_data(*buf_) + a.first);
-      auto strb = reinterpret_cast<const char *>(
-          flatbuffers::vector_data(*buf_) + b.first);
-      return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
-    }
-    const std::vector<uint8_t> *buf_;
-  };
-
-  typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
-  typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
-
-  KeyOffsetMap key_pool;
-  StringOffsetMap string_pool;
-};
-
-}  // namespace flexbuffers
-
-#if defined(_MSC_VER)
-#  pragma warning(pop)
-#endif
-
-#endif  // FLATBUFFERS_FLEXBUFFERS_H_

+ 0 - 330
flatbuffers/grpc.h

@@ -1,330 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_GRPC_H_
-#define FLATBUFFERS_GRPC_H_
-
-// Helper functionality to glue FlatBuffers and GRPC.
-
-#include "flatbuffers/flatbuffers.h"
-#include "grpc++/support/byte_buffer.h"
-#include "grpc/byte_buffer_reader.h"
-
-namespace flatbuffers {
-namespace grpc {
-
-// Message is a typed wrapper around a buffer that manages the underlying
-// `grpc_slice` and also provides flatbuffers-specific helpers such as `Verify`
-// and `GetRoot`. Since it is backed by a `grpc_slice`, the underlying buffer
-// is refcounted and ownership is be managed automatically.
-template<class T> class Message {
- public:
-  Message() : slice_(grpc_empty_slice()) {}
-
-  Message(grpc_slice slice, bool add_ref)
-      : slice_(add_ref ? grpc_slice_ref(slice) : slice) {}
-
-  Message &operator=(const Message &other) = delete;
-
-  Message(Message &&other) : slice_(other.slice_) {
-    other.slice_ = grpc_empty_slice();
-  }
-
-  Message(const Message &other) = delete;
-
-  Message &operator=(Message &&other) {
-    grpc_slice_unref(slice_);
-    slice_ = other.slice_;
-    other.slice_ = grpc_empty_slice();
-    return *this;
-  }
-
-  ~Message() { grpc_slice_unref(slice_); }
-
-  const uint8_t *mutable_data() const { return GRPC_SLICE_START_PTR(slice_); }
-
-  const uint8_t *data() const { return GRPC_SLICE_START_PTR(slice_); }
-
-  size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
-
-  bool Verify() const {
-    Verifier verifier(data(), size());
-    return verifier.VerifyBuffer<T>(nullptr);
-  }
-
-  T *GetMutableRoot() { return flatbuffers::GetMutableRoot<T>(mutable_data()); }
-
-  const T *GetRoot() const { return flatbuffers::GetRoot<T>(data()); }
-
-  // This is only intended for serializer use, or if you know what you're doing
-  const grpc_slice &BorrowSlice() const { return slice_; }
-
- private:
-  grpc_slice slice_;
-};
-
-class MessageBuilder;
-
-// SliceAllocator is a gRPC-specific allocator that uses the `grpc_slice`
-// refcounted slices to manage memory ownership. This makes it easy and
-// efficient to transfer buffers to gRPC.
-class SliceAllocator : public Allocator {
- public:
-  SliceAllocator() : slice_(grpc_empty_slice()) {}
-
-  SliceAllocator(const SliceAllocator &other) = delete;
-  SliceAllocator &operator=(const SliceAllocator &other) = delete;
-
-  SliceAllocator(SliceAllocator &&other) : slice_(grpc_empty_slice()) {
-    // default-construct and swap idiom
-    swap(other);
-  }
-
-  SliceAllocator &operator=(SliceAllocator &&other) {
-    // move-construct and swap idiom
-    SliceAllocator temp(std::move(other));
-    swap(temp);
-    return *this;
-  }
-
-  void swap(SliceAllocator &other) {
-    using std::swap;
-    swap(slice_, other.slice_);
-  }
-
-  virtual ~SliceAllocator() { grpc_slice_unref(slice_); }
-
-  virtual uint8_t *allocate(size_t size) override {
-    FLATBUFFERS_ASSERT(GRPC_SLICE_IS_EMPTY(slice_));
-    slice_ = grpc_slice_malloc(size);
-    return GRPC_SLICE_START_PTR(slice_);
-  }
-
-  virtual void deallocate(uint8_t *p, size_t size) override {
-    FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
-    FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
-    grpc_slice_unref(slice_);
-    slice_ = grpc_empty_slice();
-  }
-
-  virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
-                                       size_t new_size, size_t in_use_back,
-                                       size_t in_use_front) override {
-    FLATBUFFERS_ASSERT(old_p == GRPC_SLICE_START_PTR(slice_));
-    FLATBUFFERS_ASSERT(old_size == GRPC_SLICE_LENGTH(slice_));
-    FLATBUFFERS_ASSERT(new_size > old_size);
-    grpc_slice old_slice = slice_;
-    grpc_slice new_slice = grpc_slice_malloc(new_size);
-    uint8_t *new_p = GRPC_SLICE_START_PTR(new_slice);
-    memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
-                    in_use_front);
-    slice_ = new_slice;
-    grpc_slice_unref(old_slice);
-    return new_p;
-  }
-
- private:
-  grpc_slice &get_slice(uint8_t *p, size_t size) {
-    FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
-    FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
-    return slice_;
-  }
-
-  grpc_slice slice_;
-
-  friend class MessageBuilder;
-};
-
-// SliceAllocatorMember is a hack to ensure that the MessageBuilder's
-// slice_allocator_ member is constructed before the FlatBufferBuilder, since
-// the allocator is used in the FlatBufferBuilder ctor.
-namespace detail {
-struct SliceAllocatorMember {
-  SliceAllocator slice_allocator_;
-};
-}  // namespace detail
-
-// MessageBuilder is a gRPC-specific FlatBufferBuilder that uses SliceAllocator
-// to allocate gRPC buffers.
-class MessageBuilder : private detail::SliceAllocatorMember,
-                       public FlatBufferBuilder {
- public:
-  explicit MessageBuilder(uoffset_t initial_size = 1024)
-      : FlatBufferBuilder(initial_size, &slice_allocator_, false) {}
-
-  MessageBuilder(const MessageBuilder &other) = delete;
-  MessageBuilder &operator=(const MessageBuilder &other) = delete;
-
-  MessageBuilder(MessageBuilder &&other)
-      : FlatBufferBuilder(1024, &slice_allocator_, false) {
-    // Default construct and swap idiom.
-    Swap(other);
-  }
-
-  /// Create a MessageBuilder from a FlatBufferBuilder.
-  explicit MessageBuilder(FlatBufferBuilder &&src,
-                          void (*dealloc)(void *,
-                                          size_t) = &DefaultAllocator::dealloc)
-      : FlatBufferBuilder(1024, &slice_allocator_, false) {
-    src.Swap(*this);
-    src.SwapBufAllocator(*this);
-    if (buf_.capacity()) {
-      uint8_t *buf = buf_.scratch_data();  // pointer to memory
-      size_t capacity = buf_.capacity();   // size of memory
-      slice_allocator_.slice_ = grpc_slice_new_with_len(buf, capacity, dealloc);
-    } else {
-      slice_allocator_.slice_ = grpc_empty_slice();
-    }
-  }
-
-  /// Move-assign a FlatBufferBuilder to a MessageBuilder.
-  /// Only FlatBufferBuilder with default allocator (basically, nullptr) is
-  /// supported.
-  MessageBuilder &operator=(FlatBufferBuilder &&src) {
-    // Move construct a temporary and swap
-    MessageBuilder temp(std::move(src));
-    Swap(temp);
-    return *this;
-  }
-
-  MessageBuilder &operator=(MessageBuilder &&other) {
-    // Move construct a temporary and swap
-    MessageBuilder temp(std::move(other));
-    Swap(temp);
-    return *this;
-  }
-
-  void Swap(MessageBuilder &other) {
-    slice_allocator_.swap(other.slice_allocator_);
-    FlatBufferBuilder::Swap(other);
-    // After swapping the FlatBufferBuilder, we swap back the allocator, which
-    // restores the original allocator back in place. This is necessary because
-    // MessageBuilder's allocator is its own member (SliceAllocatorMember). The
-    // allocator passed to FlatBufferBuilder::vector_downward must point to this
-    // member.
-    buf_.swap_allocator(other.buf_);
-  }
-
-  // Releases the ownership of the buffer pointer.
-  // Returns the size, offset, and the original grpc_slice that
-  // allocated the buffer. Also see grpc_slice_unref().
-  uint8_t *ReleaseRaw(size_t &size, size_t &offset, grpc_slice &slice) {
-    uint8_t *buf = FlatBufferBuilder::ReleaseRaw(size, offset);
-    slice = slice_allocator_.slice_;
-    slice_allocator_.slice_ = grpc_empty_slice();
-    return buf;
-  }
-
-  ~MessageBuilder() {}
-
-  // GetMessage extracts the subslice of the buffer corresponding to the
-  // flatbuffers-encoded region and wraps it in a `Message<T>` to handle buffer
-  // ownership.
-  template<class T> Message<T> GetMessage() {
-    auto buf_data = buf_.scratch_data();  // pointer to memory
-    auto buf_size = buf_.capacity();      // size of memory
-    auto msg_data = buf_.data();          // pointer to msg
-    auto msg_size = buf_.size();          // size of msg
-    // Do some sanity checks on data/size
-    FLATBUFFERS_ASSERT(msg_data);
-    FLATBUFFERS_ASSERT(msg_size);
-    FLATBUFFERS_ASSERT(msg_data >= buf_data);
-    FLATBUFFERS_ASSERT(msg_data + msg_size <= buf_data + buf_size);
-    // Calculate offsets from the buffer start
-    auto begin = msg_data - buf_data;
-    auto end = begin + msg_size;
-    // Get the slice we are working with (no refcount change)
-    grpc_slice slice = slice_allocator_.get_slice(buf_data, buf_size);
-    // Extract a subslice of the existing slice (increment refcount)
-    grpc_slice subslice = grpc_slice_sub(slice, begin, end);
-    // Wrap the subslice in a `Message<T>`, but don't increment refcount
-    Message<T> msg(subslice, false);
-    return msg;
-  }
-
-  template<class T> Message<T> ReleaseMessage() {
-    Message<T> msg = GetMessage<T>();
-    Reset();
-    return msg;
-  }
-
- private:
-  // SliceAllocator slice_allocator_;  // part of SliceAllocatorMember
-};
-
-}  // namespace grpc
-}  // namespace flatbuffers
-
-namespace grpc {
-
-template<class T> class SerializationTraits<flatbuffers::grpc::Message<T>> {
- public:
-  static grpc::Status Serialize(const flatbuffers::grpc::Message<T> &msg,
-                                grpc_byte_buffer **buffer, bool *own_buffer) {
-    // We are passed in a `Message<T>`, which is a wrapper around a
-    // `grpc_slice`. We extract it here using `BorrowSlice()`. The const cast
-    // is necessary because the `grpc_raw_byte_buffer_create` func expects
-    // non-const slices in order to increment their refcounts.
-    grpc_slice *slice = const_cast<grpc_slice *>(&msg.BorrowSlice());
-    // Now use `grpc_raw_byte_buffer_create` to package the single slice into a
-    // `grpc_byte_buffer`, incrementing the refcount in the process.
-    *buffer = grpc_raw_byte_buffer_create(slice, 1);
-    *own_buffer = true;
-    return grpc::Status::OK;
-  }
-
-  // Deserialize by pulling the
-  static grpc::Status Deserialize(grpc_byte_buffer *buffer,
-                                  flatbuffers::grpc::Message<T> *msg) {
-    if (!buffer) {
-      return ::grpc::Status(::grpc::StatusCode::INTERNAL, "No payload");
-    }
-    // Check if this is a single uncompressed slice.
-    if ((buffer->type == GRPC_BB_RAW) &&
-        (buffer->data.raw.compression == GRPC_COMPRESS_NONE) &&
-        (buffer->data.raw.slice_buffer.count == 1)) {
-      // If it is, then we can reference the `grpc_slice` directly.
-      grpc_slice slice = buffer->data.raw.slice_buffer.slices[0];
-      // We wrap a `Message<T>` around the slice, incrementing the refcount.
-      *msg = flatbuffers::grpc::Message<T>(slice, true);
-    } else {
-      // Otherwise, we need to use `grpc_byte_buffer_reader_readall` to read
-      // `buffer` into a single contiguous `grpc_slice`. The gRPC reader gives
-      // us back a new slice with the refcount already incremented.
-      grpc_byte_buffer_reader reader;
-      grpc_byte_buffer_reader_init(&reader, buffer);
-      grpc_slice slice = grpc_byte_buffer_reader_readall(&reader);
-      grpc_byte_buffer_reader_destroy(&reader);
-      // We wrap a `Message<T>` around the slice, but don't increment refcount
-      *msg = flatbuffers::grpc::Message<T>(slice, false);
-    }
-    grpc_byte_buffer_destroy(buffer);
-#if FLATBUFFERS_GRPC_DISABLE_AUTO_VERIFICATION
-    return ::grpc::Status::OK;
-#else
-    if (msg->Verify()) {
-      return ::grpc::Status::OK;
-    } else {
-      return ::grpc::Status(::grpc::StatusCode::INTERNAL,
-                            "Message verification failed");
-    }
-#endif
-  }
-};
-
-}  // namespace grpc
-
-#endif  // FLATBUFFERS_GRPC_H_

+ 0 - 127
flatbuffers/hash.h

@@ -1,127 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_HASH_H_
-#define FLATBUFFERS_HASH_H_
-
-#include <cstdint>
-#include <cstring>
-
-#include "flatbuffers.h"
-
-namespace flatbuffers {
-
-template<typename T> struct FnvTraits {
-  static const T kFnvPrime;
-  static const T kOffsetBasis;
-};
-
-template<> struct FnvTraits<uint32_t> {
-  static const uint32_t kFnvPrime = 0x01000193;
-  static const uint32_t kOffsetBasis = 0x811C9DC5;
-};
-
-template<> struct FnvTraits<uint64_t> {
-  static const uint64_t kFnvPrime = 0x00000100000001b3ULL;
-  static const uint64_t kOffsetBasis = 0xcbf29ce484222645ULL;
-};
-
-template<typename T> T HashFnv1(const char *input) {
-  T hash = FnvTraits<T>::kOffsetBasis;
-  for (const char *c = input; *c; ++c) {
-    hash *= FnvTraits<T>::kFnvPrime;
-    hash ^= static_cast<unsigned char>(*c);
-  }
-  return hash;
-}
-
-template<typename T> T HashFnv1a(const char *input) {
-  T hash = FnvTraits<T>::kOffsetBasis;
-  for (const char *c = input; *c; ++c) {
-    hash ^= static_cast<unsigned char>(*c);
-    hash *= FnvTraits<T>::kFnvPrime;
-  }
-  return hash;
-}
-
-template<> inline uint16_t HashFnv1<uint16_t>(const char *input) {
-  uint32_t hash = HashFnv1<uint32_t>(input);
-  return (hash >> 16) ^ (hash & 0xffff);
-}
-
-template<> inline uint16_t HashFnv1a<uint16_t>(const char *input) {
-  uint32_t hash = HashFnv1a<uint32_t>(input);
-  return (hash >> 16) ^ (hash & 0xffff);
-}
-
-template<typename T> struct NamedHashFunction {
-  const char *name;
-
-  typedef T (*HashFunction)(const char *);
-  HashFunction function;
-};
-
-const NamedHashFunction<uint16_t> kHashFunctions16[] = {
-  { "fnv1_16", HashFnv1<uint16_t> },
-  { "fnv1a_16", HashFnv1a<uint16_t> },
-};
-
-const NamedHashFunction<uint32_t> kHashFunctions32[] = {
-  { "fnv1_32", HashFnv1<uint32_t> },
-  { "fnv1a_32", HashFnv1a<uint32_t> },
-};
-
-const NamedHashFunction<uint64_t> kHashFunctions64[] = {
-  { "fnv1_64", HashFnv1<uint64_t> },
-  { "fnv1a_64", HashFnv1a<uint64_t> },
-};
-
-inline NamedHashFunction<uint16_t>::HashFunction FindHashFunction16(
-    const char *name) {
-  std::size_t size = sizeof(kHashFunctions16) / sizeof(kHashFunctions16[0]);
-  for (std::size_t i = 0; i < size; ++i) {
-    if (std::strcmp(name, kHashFunctions16[i].name) == 0) {
-      return kHashFunctions16[i].function;
-    }
-  }
-  return nullptr;
-}
-
-inline NamedHashFunction<uint32_t>::HashFunction FindHashFunction32(
-    const char *name) {
-  std::size_t size = sizeof(kHashFunctions32) / sizeof(kHashFunctions32[0]);
-  for (std::size_t i = 0; i < size; ++i) {
-    if (std::strcmp(name, kHashFunctions32[i].name) == 0) {
-      return kHashFunctions32[i].function;
-    }
-  }
-  return nullptr;
-}
-
-inline NamedHashFunction<uint64_t>::HashFunction FindHashFunction64(
-    const char *name) {
-  std::size_t size = sizeof(kHashFunctions64) / sizeof(kHashFunctions64[0]);
-  for (std::size_t i = 0; i < size; ++i) {
-    if (std::strcmp(name, kHashFunctions64[i].name) == 0) {
-      return kHashFunctions64[i].function;
-    }
-  }
-  return nullptr;
-}
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_HASH_H_

+ 0 - 1136
flatbuffers/idl.h

@@ -1,1136 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_IDL_H_
-#define FLATBUFFERS_IDL_H_
-
-#include <map>
-#include <memory>
-#include <stack>
-
-#include "base.h"
-#include "flatbuffers.h"
-#include "flexbuffers.h"
-#include "hash.h"
-#include "reflection.h"
-
-#if !defined(FLATBUFFERS_CPP98_STL)
-#  include <functional>
-#endif  // !defined(FLATBUFFERS_CPP98_STL)
-
-// This file defines the data types representing a parsed IDL (Interface
-// Definition Language) / schema file.
-
-// Limits maximum depth of nested objects.
-// Prevents stack overflow while parse flatbuffers or json.
-#if !defined(FLATBUFFERS_MAX_PARSING_DEPTH)
-#  define FLATBUFFERS_MAX_PARSING_DEPTH 64
-#endif
-
-namespace flatbuffers {
-
-// The order of these matters for Is*() functions below.
-// Additionally, Parser::ParseType assumes bool..string is a contiguous range
-// of type tokens.
-// clang-format off
-#define FLATBUFFERS_GEN_TYPES_SCALAR(TD) \
-  TD(NONE,   "",       uint8_t,  byte,   byte,    byte,   uint8,   u8,   UByte, UInt8) \
-  TD(UTYPE,  "",       uint8_t,  byte,   byte,    byte,   uint8,   u8,   UByte, UInt8) /* begin scalar/int */ \
-  TD(BOOL,   "bool",   uint8_t,  boolean,bool,    bool,   bool,    bool, Boolean, Bool) \
-  TD(CHAR,   "byte",   int8_t,   byte,   int8,    sbyte,  int8,    i8,   Byte, Int8) \
-  TD(UCHAR,  "ubyte",  uint8_t,  byte,   byte,    byte,   uint8,   u8,   UByte, UInt8) \
-  TD(SHORT,  "short",  int16_t,  short,  int16,   short,  int16,   i16,  Short, Int16) \
-  TD(USHORT, "ushort", uint16_t, short,  uint16,  ushort, uint16,  u16,  UShort, UInt16) \
-  TD(INT,    "int",    int32_t,  int,    int32,   int,    int32,   i32,  Int, Int32) \
-  TD(UINT,   "uint",   uint32_t, int,    uint32,  uint,   uint32,  u32,  UInt, UInt32) \
-  TD(LONG,   "long",   int64_t,  long,   int64,   long,   int64,   i64,  Long, Int64) \
-  TD(ULONG,  "ulong",  uint64_t, long,   uint64,  ulong,  uint64,  u64,  ULong, UInt64) /* end int */ \
-  TD(FLOAT,  "float",  float,    float,  float32, float,  float32, f32,  Float, Float32) /* begin float */ \
-  TD(DOUBLE, "double", double,   double, float64, double, float64, f64,  Double, Double) /* end float/scalar */
-#define FLATBUFFERS_GEN_TYPES_POINTER(TD) \
-  TD(STRING, "string", Offset<void>, int, int, StringOffset, int, unused, Int, Offset<String>) \
-  TD(VECTOR, "",       Offset<void>, int, int, VectorOffset, int, unused, Int, Offset<UOffset>) \
-  TD(STRUCT, "",       Offset<void>, int, int, int,          int, unused, Int, Offset<UOffset>) \
-  TD(UNION,  "",       Offset<void>, int, int, int,          int, unused, Int, Offset<UOffset>)
-#define FLATBUFFERS_GEN_TYPE_ARRAY(TD) \
-  TD(ARRAY,  "",       int,          int, int, int,          int, unused, Int, Offset<UOffset>)
-// The fields are:
-// - enum
-// - FlatBuffers schema type.
-// - C++ type.
-// - Java type.
-// - Go type.
-// - C# / .Net type.
-// - Python type.
-// - Rust type.
-// - Kotlin type.
-
-// using these macros, we can now write code dealing with types just once, e.g.
-
-/*
-switch (type) {
-  #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \
-                         RTYPE, KTYPE) \
-    case BASE_TYPE_ ## ENUM: \
-      // do something specific to CTYPE here
-    FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
-  #undef FLATBUFFERS_TD
-}
-*/
-
-// If not all FLATBUFFERS_GEN_() arguments are necessary for implementation
-// of FLATBUFFERS_TD, you can use a variadic macro (with __VA_ARGS__ if needed).
-// In the above example, only CTYPE is used to generate the code, it can be rewritten:
-
-/*
-switch (type) {
-  #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
-    case BASE_TYPE_ ## ENUM: \
-      // do something specific to CTYPE here
-    FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
-  #undef FLATBUFFERS_TD
-}
-*/
-
-#define FLATBUFFERS_GEN_TYPES(TD) \
-        FLATBUFFERS_GEN_TYPES_SCALAR(TD) \
-        FLATBUFFERS_GEN_TYPES_POINTER(TD) \
-        FLATBUFFERS_GEN_TYPE_ARRAY(TD)
-
-// Create an enum for all the types above.
-#ifdef __GNUC__
-__extension__  // Stop GCC complaining about trailing comma with -Wpendantic.
-#endif
-enum BaseType {
-  #define FLATBUFFERS_TD(ENUM, ...) \
-    BASE_TYPE_ ## ENUM,
-    FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
-  #undef FLATBUFFERS_TD
-};
-
-#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
-  static_assert(sizeof(CTYPE) <= sizeof(largest_scalar_t), \
-                "define largest_scalar_t as " #CTYPE);
-  FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
-#undef FLATBUFFERS_TD
-
-inline bool IsScalar (BaseType t) { return t >= BASE_TYPE_UTYPE &&
-                                           t <= BASE_TYPE_DOUBLE; }
-inline bool IsInteger(BaseType t) { return t >= BASE_TYPE_UTYPE &&
-                                           t <= BASE_TYPE_ULONG; }
-inline bool IsFloat  (BaseType t) { return t == BASE_TYPE_FLOAT ||
-                                           t == BASE_TYPE_DOUBLE; }
-inline bool IsLong   (BaseType t) { return t == BASE_TYPE_LONG ||
-                                           t == BASE_TYPE_ULONG; }
-inline bool IsBool   (BaseType t) { return t == BASE_TYPE_BOOL; }
-inline bool IsOneByte(BaseType t) { return t >= BASE_TYPE_UTYPE &&
-                                           t <= BASE_TYPE_UCHAR; }
-
-inline bool IsUnsigned(BaseType t) {
-  return (t == BASE_TYPE_UTYPE)  || (t == BASE_TYPE_UCHAR) ||
-         (t == BASE_TYPE_USHORT) || (t == BASE_TYPE_UINT)  ||
-         (t == BASE_TYPE_ULONG);
-}
-
-// clang-format on
-
-extern const char *const kTypeNames[];
-extern const char kTypeSizes[];
-
-inline size_t SizeOf(BaseType t) { return kTypeSizes[t]; }
-
-struct StructDef;
-struct EnumDef;
-class Parser;
-
-// Represents any type in the IDL, which is a combination of the BaseType
-// and additional information for vectors/structs_.
-struct Type {
-  explicit Type(BaseType _base_type = BASE_TYPE_NONE, StructDef *_sd = nullptr,
-                EnumDef *_ed = nullptr, uint16_t _fixed_length = 0)
-      : base_type(_base_type),
-        element(BASE_TYPE_NONE),
-        struct_def(_sd),
-        enum_def(_ed),
-        fixed_length(_fixed_length) {}
-
-  bool operator==(const Type &o) {
-    return base_type == o.base_type && element == o.element &&
-           struct_def == o.struct_def && enum_def == o.enum_def;
-  }
-
-  Type VectorType() const {
-    return Type(element, struct_def, enum_def, fixed_length);
-  }
-
-  Offset<reflection::Type> Serialize(FlatBufferBuilder *builder) const;
-
-  bool Deserialize(const Parser &parser, const reflection::Type *type);
-
-  BaseType base_type;
-  BaseType element;       // only set if t == BASE_TYPE_VECTOR
-  StructDef *struct_def;  // only set if t or element == BASE_TYPE_STRUCT
-  EnumDef *enum_def;      // set if t == BASE_TYPE_UNION / BASE_TYPE_UTYPE,
-                          // or for an integral type derived from an enum.
-  uint16_t fixed_length;  // only set if t == BASE_TYPE_ARRAY
-};
-
-// Represents a parsed scalar value, it's type, and field offset.
-struct Value {
-  Value()
-      : constant("0"),
-        offset(static_cast<voffset_t>(~(static_cast<voffset_t>(0U)))) {}
-  Type type;
-  std::string constant;
-  voffset_t offset;
-};
-
-// Helper class that retains the original order of a set of identifiers and
-// also provides quick lookup.
-template<typename T> class SymbolTable {
- public:
-  ~SymbolTable() {
-    for (auto it = vec.begin(); it != vec.end(); ++it) { delete *it; }
-  }
-
-  bool Add(const std::string &name, T *e) {
-    vector_emplace_back(&vec, e);
-    auto it = dict.find(name);
-    if (it != dict.end()) return true;
-    dict[name] = e;
-    return false;
-  }
-
-  void Move(const std::string &oldname, const std::string &newname) {
-    auto it = dict.find(oldname);
-    if (it != dict.end()) {
-      auto obj = it->second;
-      dict.erase(it);
-      dict[newname] = obj;
-    } else {
-      FLATBUFFERS_ASSERT(false);
-    }
-  }
-
-  T *Lookup(const std::string &name) const {
-    auto it = dict.find(name);
-    return it == dict.end() ? nullptr : it->second;
-  }
-
- public:
-  std::map<std::string, T *> dict;  // quick lookup
-  std::vector<T *> vec;             // Used to iterate in order of insertion
-};
-
-// A name space, as set in the schema.
-struct Namespace {
-  Namespace() : from_table(0) {}
-
-  // Given a (potentially unqualified) name, return the "fully qualified" name
-  // which has a full namespaced descriptor.
-  // With max_components you can request less than the number of components
-  // the current namespace has.
-  std::string GetFullyQualifiedName(const std::string &name,
-                                    size_t max_components = 1000) const;
-
-  std::vector<std::string> components;
-  size_t from_table;  // Part of the namespace corresponds to a message/table.
-};
-
-inline bool operator<(const Namespace &a, const Namespace &b) {
-  size_t min_size = std::min(a.components.size(), b.components.size());
-  for (size_t i = 0; i < min_size; ++i) {
-    if (a.components[i] != b.components[i])
-      return a.components[i] < b.components[i];
-  }
-  return a.components.size() < b.components.size();
-}
-
-// Base class for all definition types (fields, structs_, enums_).
-struct Definition {
-  Definition()
-      : generated(false),
-        defined_namespace(nullptr),
-        serialized_location(0),
-        index(-1),
-        refcount(1) {}
-
-  flatbuffers::Offset<
-      flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
-  SerializeAttributes(FlatBufferBuilder *builder, const Parser &parser) const;
-
-  bool DeserializeAttributes(Parser &parser,
-                             const Vector<Offset<reflection::KeyValue>> *attrs);
-
-  std::string name;
-  std::string file;
-  std::vector<std::string> doc_comment;
-  SymbolTable<Value> attributes;
-  bool generated;  // did we already output code for this definition?
-  Namespace *defined_namespace;  // Where it was defined.
-
-  // For use with Serialize()
-  uoffset_t serialized_location;
-  int index;  // Inside the vector it is stored.
-  int refcount;
-};
-
-struct FieldDef : public Definition {
-  FieldDef()
-      : deprecated(false),
-        required(false),
-        key(false),
-        shared(false),
-        native_inline(false),
-        flexbuffer(false),
-        nested_flatbuffer(NULL),
-        padding(0) {}
-
-  Offset<reflection::Field> Serialize(FlatBufferBuilder *builder, uint16_t id,
-                                      const Parser &parser) const;
-
-  bool Deserialize(Parser &parser, const reflection::Field *field);
-
-  Value value;
-  bool deprecated;  // Field is allowed to be present in old data, but can't be.
-                    // written in new data nor accessed in new code.
-  bool required;    // Field must always be present.
-  bool key;         // Field functions as a key for creating sorted vectors.
-  bool shared;  // Field will be using string pooling (i.e. CreateSharedString)
-                // as default serialization behavior if field is a string.
-  bool native_inline;  // Field will be defined inline (instead of as a pointer)
-                       // for native tables if field is a struct.
-  bool flexbuffer;     // This field contains FlexBuffer data.
-  StructDef *nested_flatbuffer;  // This field contains nested FlatBuffer data.
-  size_t padding;                // Bytes to always pad after this field.
-};
-
-struct StructDef : public Definition {
-  StructDef()
-      : fixed(false),
-        predecl(true),
-        sortbysize(true),
-        has_key(false),
-        minalign(1),
-        bytesize(0) {}
-
-  void PadLastField(size_t min_align) {
-    auto padding = PaddingBytes(bytesize, min_align);
-    bytesize += padding;
-    if (fields.vec.size()) fields.vec.back()->padding = padding;
-  }
-
-  Offset<reflection::Object> Serialize(FlatBufferBuilder *builder,
-                                       const Parser &parser) const;
-
-  bool Deserialize(Parser &parser, const reflection::Object *object);
-
-  SymbolTable<FieldDef> fields;
-
-  bool fixed;       // If it's struct, not a table.
-  bool predecl;     // If it's used before it was defined.
-  bool sortbysize;  // Whether fields come in the declaration or size order.
-  bool has_key;     // It has a key field.
-  size_t minalign;  // What the whole object needs to be aligned to.
-  size_t bytesize;  // Size if fixed.
-
-  flatbuffers::unique_ptr<std::string> original_location;
-};
-
-struct EnumDef;
-struct EnumValBuilder;
-
-struct EnumVal {
-  Offset<reflection::EnumVal> Serialize(FlatBufferBuilder *builder,
-                                        const Parser &parser) const;
-
-  bool Deserialize(const Parser &parser, const reflection::EnumVal *val);
-
-  uint64_t GetAsUInt64() const { return static_cast<uint64_t>(value); }
-  int64_t GetAsInt64() const { return value; }
-  bool IsZero() const { return 0 == value; }
-  bool IsNonZero() const { return !IsZero(); }
-
-  std::string name;
-  std::vector<std::string> doc_comment;
-  Type union_type;
-
- private:
-  friend EnumDef;
-  friend EnumValBuilder;
-  friend bool operator==(const EnumVal &lhs, const EnumVal &rhs);
-
-  EnumVal(const std::string &_name, int64_t _val) : name(_name), value(_val) {}
-  EnumVal() : value(0) {}
-
-  int64_t value;
-};
-
-struct EnumDef : public Definition {
-  EnumDef() : is_union(false), uses_multiple_type_instances(false) {}
-
-  Offset<reflection::Enum> Serialize(FlatBufferBuilder *builder,
-                                     const Parser &parser) const;
-
-  bool Deserialize(Parser &parser, const reflection::Enum *values);
-
-  template<typename T> void ChangeEnumValue(EnumVal *ev, T new_val);
-  void SortByValue();
-  void RemoveDuplicates();
-
-  std::string AllFlags() const;
-  const EnumVal *MinValue() const;
-  const EnumVal *MaxValue() const;
-  // Returns the number of integer steps from v1 to v2.
-  uint64_t Distance(const EnumVal *v1, const EnumVal *v2) const;
-  // Returns the number of integer steps from Min to Max.
-  uint64_t Distance() const { return Distance(MinValue(), MaxValue()); }
-
-  EnumVal *ReverseLookup(int64_t enum_idx,
-                         bool skip_union_default = false) const;
-  EnumVal *FindByValue(const std::string &constant) const;
-
-  std::string ToString(const EnumVal &ev) const {
-    return IsUInt64() ? NumToString(ev.GetAsUInt64())
-                      : NumToString(ev.GetAsInt64());
-  }
-
-  size_t size() const { return vals.vec.size(); }
-
-  const std::vector<EnumVal *> &Vals() const { return vals.vec; }
-
-  const EnumVal *Lookup(const std::string &enum_name) const {
-    return vals.Lookup(enum_name);
-  }
-
-  bool is_union;
-  // Type is a union which uses type aliases where at least one type is
-  // available under two different names.
-  bool uses_multiple_type_instances;
-  Type underlying_type;
-
- private:
-  bool IsUInt64() const {
-    return (BASE_TYPE_ULONG == underlying_type.base_type);
-  }
-
-  friend EnumValBuilder;
-  SymbolTable<EnumVal> vals;
-};
-
-inline bool IsStruct(const Type &type) {
-  return type.base_type == BASE_TYPE_STRUCT && type.struct_def->fixed;
-}
-
-inline bool IsUnion(const Type &type) {
-  return type.enum_def != nullptr && type.enum_def->is_union;
-}
-
-inline bool IsVector(const Type &type) {
-  return type.base_type == BASE_TYPE_VECTOR;
-}
-
-inline bool IsArray(const Type &type) {
-  return type.base_type == BASE_TYPE_ARRAY;
-}
-
-inline bool IsSeries(const Type &type) {
-  return IsVector(type) || IsArray(type);
-}
-
-inline bool IsEnum(const Type &type) {
-  return type.enum_def != nullptr && IsInteger(type.base_type);
-}
-
-inline size_t InlineSize(const Type &type) {
-  return IsStruct(type)
-             ? type.struct_def->bytesize
-             : (IsArray(type)
-                    ? InlineSize(type.VectorType()) * type.fixed_length
-                    : SizeOf(type.base_type));
-}
-
-inline size_t InlineAlignment(const Type &type) {
-  if (IsStruct(type)) {
-    return type.struct_def->minalign;
-  } else if (IsArray(type)) {
-    return IsStruct(type.VectorType()) ? type.struct_def->minalign
-                                       : SizeOf(type.element);
-  } else {
-    return SizeOf(type.base_type);
-  }
-}
-inline bool operator==(const EnumVal &lhs, const EnumVal &rhs) {
-  return lhs.value == rhs.value;
-}
-inline bool operator!=(const EnumVal &lhs, const EnumVal &rhs) {
-  return !(lhs == rhs);
-}
-
-inline bool EqualByName(const Type &a, const Type &b) {
-  return a.base_type == b.base_type && a.element == b.element &&
-         (a.struct_def == b.struct_def ||
-          a.struct_def->name == b.struct_def->name) &&
-         (a.enum_def == b.enum_def || a.enum_def->name == b.enum_def->name);
-}
-
-struct RPCCall : public Definition {
-  Offset<reflection::RPCCall> Serialize(FlatBufferBuilder *builder,
-                                        const Parser &parser) const;
-
-  bool Deserialize(Parser &parser, const reflection::RPCCall *call);
-
-  StructDef *request, *response;
-};
-
-struct ServiceDef : public Definition {
-  Offset<reflection::Service> Serialize(FlatBufferBuilder *builder,
-                                        const Parser &parser) const;
-  bool Deserialize(Parser &parser, const reflection::Service *service);
-
-  SymbolTable<RPCCall> calls;
-};
-
-// Container of options that may apply to any of the source/text generators.
-struct IDLOptions {
-  // Use flexbuffers instead for binary and text generation
-  bool use_flexbuffers;
-  bool strict_json;
-  bool skip_js_exports;
-  bool use_goog_js_export_format;
-  bool use_ES6_js_export_format;
-  bool output_default_scalars_in_json;
-  int indent_step;
-  bool output_enum_identifiers;
-  bool prefixed_enums;
-  bool scoped_enums;
-  bool include_dependence_headers;
-  bool mutable_buffer;
-  bool one_file;
-  bool proto_mode;
-  bool proto_oneof_union;
-  bool generate_all;
-  bool skip_unexpected_fields_in_json;
-  bool generate_name_strings;
-  bool generate_object_based_api;
-  bool gen_compare;
-  std::string cpp_object_api_pointer_type;
-  std::string cpp_object_api_string_type;
-  bool cpp_object_api_string_flexible_constructor;
-  bool gen_nullable;
-  bool java_checkerframework;
-  bool gen_generated;
-  std::string object_prefix;
-  std::string object_suffix;
-  bool union_value_namespacing;
-  bool allow_non_utf8;
-  bool natural_utf8;
-  std::string include_prefix;
-  bool keep_include_path;
-  bool binary_schema_comments;
-  bool binary_schema_builtins;
-  bool binary_schema_gen_embed;
-  bool skip_flatbuffers_import;
-  std::string go_import;
-  std::string go_namespace;
-  bool reexport_ts_modules;
-  bool js_ts_short_names;
-  bool protobuf_ascii_alike;
-  bool size_prefixed;
-  std::string root_type;
-  bool force_defaults;
-  bool java_primitive_has_method;
-  bool cs_gen_json_serializer;
-  std::vector<std::string> cpp_includes;
-  std::string cpp_std;
-  std::string proto_namespace_suffix;
-  std::string filename_suffix;
-  std::string filename_extension;
-
-  // Possible options for the more general generator below.
-  enum Language {
-    kJava = 1 << 0,
-    kCSharp = 1 << 1,
-    kGo = 1 << 2,
-    kCpp = 1 << 3,
-    kJs = 1 << 4,
-    kPython = 1 << 5,
-    kPhp = 1 << 6,
-    kJson = 1 << 7,
-    kBinary = 1 << 8,
-    kTs = 1 << 9,
-    kJsonSchema = 1 << 10,
-    kDart = 1 << 11,
-    kLua = 1 << 12,
-    kLobster = 1 << 13,
-    kRust = 1 << 14,
-    kKotlin = 1 << 15,
-    kSwift = 1 << 16,
-    kMAX
-  };
-
-  Language lang;
-
-  enum MiniReflect { kNone, kTypes, kTypesAndNames };
-
-  MiniReflect mini_reflect;
-
-  // The corresponding language bit will be set if a language is included
-  // for code generation.
-  unsigned long lang_to_generate;
-
-  // If set (default behavior), empty string fields will be set to nullptr to
-  // make the flatbuffer more compact.
-  bool set_empty_strings_to_null;
-
-  // If set (default behavior), empty vector fields will be set to nullptr to
-  // make the flatbuffer more compact.
-  bool set_empty_vectors_to_null;
-
-  IDLOptions()
-      : use_flexbuffers(false),
-        strict_json(false),
-        skip_js_exports(false),
-        use_goog_js_export_format(false),
-        use_ES6_js_export_format(false),
-        output_default_scalars_in_json(false),
-        indent_step(2),
-        output_enum_identifiers(true),
-        prefixed_enums(true),
-        scoped_enums(false),
-        include_dependence_headers(true),
-        mutable_buffer(false),
-        one_file(false),
-        proto_mode(false),
-        proto_oneof_union(false),
-        generate_all(false),
-        skip_unexpected_fields_in_json(false),
-        generate_name_strings(false),
-        generate_object_based_api(false),
-        gen_compare(false),
-        cpp_object_api_pointer_type("std::unique_ptr"),
-        cpp_object_api_string_flexible_constructor(false),
-        gen_nullable(false),
-        java_checkerframework(false),
-        gen_generated(false),
-        object_suffix("T"),
-        union_value_namespacing(true),
-        allow_non_utf8(false),
-        natural_utf8(false),
-        keep_include_path(false),
-        binary_schema_comments(false),
-        binary_schema_builtins(false),
-        binary_schema_gen_embed(false),
-        skip_flatbuffers_import(false),
-        reexport_ts_modules(true),
-        js_ts_short_names(false),
-        protobuf_ascii_alike(false),
-        size_prefixed(false),
-        force_defaults(false),
-        java_primitive_has_method(false),
-        cs_gen_json_serializer(false),
-        filename_suffix("_generated"),
-        filename_extension(),
-        lang(IDLOptions::kJava),
-        mini_reflect(IDLOptions::kNone),
-        lang_to_generate(0),
-        set_empty_strings_to_null(true),
-        set_empty_vectors_to_null(true) {}
-};
-
-// This encapsulates where the parser is in the current source file.
-struct ParserState {
-  ParserState()
-      : cursor_(nullptr),
-        line_start_(nullptr),
-        line_(0),
-        token_(-1),
-        attr_is_trivial_ascii_string_(true) {}
-
- protected:
-  void ResetState(const char *source) {
-    cursor_ = source;
-    line_ = 0;
-    MarkNewLine();
-  }
-
-  void MarkNewLine() {
-    line_start_ = cursor_;
-    line_ += 1;
-  }
-
-  int64_t CursorPosition() const {
-    FLATBUFFERS_ASSERT(cursor_ && line_start_ && cursor_ >= line_start_);
-    return static_cast<int64_t>(cursor_ - line_start_);
-  }
-
-  const char *cursor_;
-  const char *line_start_;
-  int line_;  // the current line being parsed
-  int token_;
-
-  // Flag: text in attribute_ is true ASCII string without escape
-  // sequences. Only printable ASCII (without [\t\r\n]).
-  // Used for number-in-string (and base64 string in future).
-  bool attr_is_trivial_ascii_string_;
-  std::string attribute_;
-  std::vector<std::string> doc_comment_;
-};
-
-// A way to make error propagation less error prone by requiring values to be
-// checked.
-// Once you create a value of this type you must either:
-// - Call Check() on it.
-// - Copy or assign it to another value.
-// Failure to do so leads to an assert.
-// This guarantees that this as return value cannot be ignored.
-class CheckedError {
- public:
-  explicit CheckedError(bool error)
-      : is_error_(error), has_been_checked_(false) {}
-
-  CheckedError &operator=(const CheckedError &other) {
-    is_error_ = other.is_error_;
-    has_been_checked_ = false;
-    other.has_been_checked_ = true;
-    return *this;
-  }
-
-  CheckedError(const CheckedError &other) {
-    *this = other;  // Use assignment operator.
-  }
-
-  ~CheckedError() { FLATBUFFERS_ASSERT(has_been_checked_); }
-
-  bool Check() {
-    has_been_checked_ = true;
-    return is_error_;
-  }
-
- private:
-  bool is_error_;
-  mutable bool has_been_checked_;
-};
-
-// Additionally, in GCC we can get these errors statically, for additional
-// assurance:
-// clang-format off
-#ifdef __GNUC__
-#define FLATBUFFERS_CHECKED_ERROR CheckedError \
-          __attribute__((warn_unused_result))
-#else
-#define FLATBUFFERS_CHECKED_ERROR CheckedError
-#endif
-// clang-format on
-
-class Parser : public ParserState {
- public:
-  explicit Parser(const IDLOptions &options = IDLOptions())
-      : current_namespace_(nullptr),
-        empty_namespace_(nullptr),
-        flex_builder_(256, flexbuffers::BUILDER_FLAG_SHARE_ALL),
-        root_struct_def_(nullptr),
-        opts(options),
-        uses_flexbuffers_(false),
-        source_(nullptr),
-        anonymous_counter(0),
-        recurse_protection_counter(0) {
-    if (opts.force_defaults) { builder_.ForceDefaults(true); }
-    // Start out with the empty namespace being current.
-    empty_namespace_ = new Namespace();
-    namespaces_.push_back(empty_namespace_);
-    current_namespace_ = empty_namespace_;
-    known_attributes_["deprecated"] = true;
-    known_attributes_["required"] = true;
-    known_attributes_["key"] = true;
-    known_attributes_["shared"] = true;
-    known_attributes_["hash"] = true;
-    known_attributes_["id"] = true;
-    known_attributes_["force_align"] = true;
-    known_attributes_["bit_flags"] = true;
-    known_attributes_["original_order"] = true;
-    known_attributes_["nested_flatbuffer"] = true;
-    known_attributes_["csharp_partial"] = true;
-    known_attributes_["streaming"] = true;
-    known_attributes_["idempotent"] = true;
-    known_attributes_["cpp_type"] = true;
-    known_attributes_["cpp_ptr_type"] = true;
-    known_attributes_["cpp_ptr_type_get"] = true;
-    known_attributes_["cpp_str_type"] = true;
-    known_attributes_["cpp_str_flex_ctor"] = true;
-    known_attributes_["native_inline"] = true;
-    known_attributes_["native_custom_alloc"] = true;
-    known_attributes_["native_type"] = true;
-    known_attributes_["native_default"] = true;
-    known_attributes_["flexbuffer"] = true;
-    known_attributes_["private"] = true;
-  }
-
-  ~Parser() {
-    for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) {
-      delete *it;
-    }
-  }
-
-  // Parse the string containing either schema or JSON data, which will
-  // populate the SymbolTable's or the FlatBufferBuilder above.
-  // include_paths is used to resolve any include statements, and typically
-  // should at least include the project path (where you loaded source_ from).
-  // include_paths must be nullptr terminated if specified.
-  // If include_paths is nullptr, it will attempt to load from the current
-  // directory.
-  // If the source was loaded from a file and isn't an include file,
-  // supply its name in source_filename.
-  // All paths specified in this call must be in posix format, if you accept
-  // paths from user input, please call PosixPath on them first.
-  bool Parse(const char *_source, const char **include_paths = nullptr,
-             const char *source_filename = nullptr);
-
-  // Set the root type. May override the one set in the schema.
-  bool SetRootType(const char *name);
-
-  // Mark all definitions as already having code generated.
-  void MarkGenerated();
-
-  // Get the files recursively included by the given file. The returned
-  // container will have at least the given file.
-  std::set<std::string> GetIncludedFilesRecursive(
-      const std::string &file_name) const;
-
-  // Fills builder_ with a binary version of the schema parsed.
-  // See reflection/reflection.fbs
-  void Serialize();
-
-  // Deserialize a schema buffer
-  bool Deserialize(const uint8_t *buf, const size_t size);
-
-  // Fills internal structure as if the schema passed had been loaded by parsing
-  // with Parse except that included filenames will not be populated.
-  bool Deserialize(const reflection::Schema *schema);
-
-  Type *DeserializeType(const reflection::Type *type);
-
-  // Checks that the schema represented by this parser is a safe evolution
-  // of the schema provided. Returns non-empty error on any problems.
-  std::string ConformTo(const Parser &base);
-
-  // Similar to Parse(), but now only accepts JSON to be parsed into a
-  // FlexBuffer.
-  bool ParseFlexBuffer(const char *source, const char *source_filename,
-                       flexbuffers::Builder *builder);
-
-  StructDef *LookupStruct(const std::string &id) const;
-
-  std::string UnqualifiedName(const std::string &fullQualifiedName);
-
-  FLATBUFFERS_CHECKED_ERROR Error(const std::string &msg);
-
- private:
-  void Message(const std::string &msg);
-  void Warning(const std::string &msg);
-  FLATBUFFERS_CHECKED_ERROR ParseHexNum(int nibbles, uint64_t *val);
-  FLATBUFFERS_CHECKED_ERROR Next();
-  FLATBUFFERS_CHECKED_ERROR SkipByteOrderMark();
-  bool Is(int t) const;
-  bool IsIdent(const char *id) const;
-  FLATBUFFERS_CHECKED_ERROR Expect(int t);
-  std::string TokenToStringId(int t) const;
-  EnumDef *LookupEnum(const std::string &id);
-  FLATBUFFERS_CHECKED_ERROR ParseNamespacing(std::string *id,
-                                             std::string *last);
-  FLATBUFFERS_CHECKED_ERROR ParseTypeIdent(Type &type);
-  FLATBUFFERS_CHECKED_ERROR ParseType(Type &type);
-  FLATBUFFERS_CHECKED_ERROR AddField(StructDef &struct_def,
-                                     const std::string &name, const Type &type,
-                                     FieldDef **dest);
-  FLATBUFFERS_CHECKED_ERROR ParseField(StructDef &struct_def);
-  FLATBUFFERS_CHECKED_ERROR ParseString(Value &val, bool use_string_pooling);
-  FLATBUFFERS_CHECKED_ERROR ParseComma();
-  FLATBUFFERS_CHECKED_ERROR ParseAnyValue(Value &val, FieldDef *field,
-                                          size_t parent_fieldn,
-                                          const StructDef *parent_struct_def,
-                                          uoffset_t count,
-                                          bool inside_vector = false);
-  template<typename F>
-  FLATBUFFERS_CHECKED_ERROR ParseTableDelimiters(size_t &fieldn,
-                                                 const StructDef *struct_def,
-                                                 F body);
-  FLATBUFFERS_CHECKED_ERROR ParseTable(const StructDef &struct_def,
-                                       std::string *value, uoffset_t *ovalue);
-  void SerializeStruct(const StructDef &struct_def, const Value &val);
-  void SerializeStruct(FlatBufferBuilder &builder, const StructDef &struct_def,
-                       const Value &val);
-  template<typename F>
-  FLATBUFFERS_CHECKED_ERROR ParseVectorDelimiters(uoffset_t &count, F body);
-  FLATBUFFERS_CHECKED_ERROR ParseVector(const Type &type, uoffset_t *ovalue,
-                                        FieldDef *field, size_t fieldn);
-  FLATBUFFERS_CHECKED_ERROR ParseArray(Value &array);
-  FLATBUFFERS_CHECKED_ERROR ParseNestedFlatbuffer(
-      Value &val, FieldDef *field, size_t fieldn,
-      const StructDef *parent_struct_def);
-  FLATBUFFERS_CHECKED_ERROR ParseMetaData(SymbolTable<Value> *attributes);
-  FLATBUFFERS_CHECKED_ERROR TryTypedValue(const std::string *name, int dtoken,
-                                          bool check, Value &e, BaseType req,
-                                          bool *destmatch);
-  FLATBUFFERS_CHECKED_ERROR ParseHash(Value &e, FieldDef *field);
-  FLATBUFFERS_CHECKED_ERROR TokenError();
-  FLATBUFFERS_CHECKED_ERROR ParseSingleValue(const std::string *name, Value &e,
-                                             bool check_now);
-  FLATBUFFERS_CHECKED_ERROR ParseFunction(const std::string *name, Value &e);
-  FLATBUFFERS_CHECKED_ERROR ParseEnumFromString(const Type &type,
-                                                std::string *result);
-  StructDef *LookupCreateStruct(const std::string &name,
-                                bool create_if_new = true,
-                                bool definition = false);
-  FLATBUFFERS_CHECKED_ERROR ParseEnum(bool is_union, EnumDef **dest);
-  FLATBUFFERS_CHECKED_ERROR ParseNamespace();
-  FLATBUFFERS_CHECKED_ERROR StartStruct(const std::string &name,
-                                        StructDef **dest);
-  FLATBUFFERS_CHECKED_ERROR StartEnum(const std::string &name, bool is_union,
-                                      EnumDef **dest);
-  FLATBUFFERS_CHECKED_ERROR ParseDecl();
-  FLATBUFFERS_CHECKED_ERROR ParseService();
-  FLATBUFFERS_CHECKED_ERROR ParseProtoFields(StructDef *struct_def,
-                                             bool isextend, bool inside_oneof);
-  FLATBUFFERS_CHECKED_ERROR ParseProtoOption();
-  FLATBUFFERS_CHECKED_ERROR ParseProtoKey();
-  FLATBUFFERS_CHECKED_ERROR ParseProtoDecl();
-  FLATBUFFERS_CHECKED_ERROR ParseProtoCurliesOrIdent();
-  FLATBUFFERS_CHECKED_ERROR ParseTypeFromProtoType(Type *type);
-  FLATBUFFERS_CHECKED_ERROR SkipAnyJsonValue();
-  FLATBUFFERS_CHECKED_ERROR ParseFlexBufferValue(flexbuffers::Builder *builder);
-  FLATBUFFERS_CHECKED_ERROR StartParseFile(const char *source,
-                                           const char *source_filename);
-  FLATBUFFERS_CHECKED_ERROR ParseRoot(const char *_source,
-                                      const char **include_paths,
-                                      const char *source_filename);
-  FLATBUFFERS_CHECKED_ERROR DoParse(const char *_source,
-                                    const char **include_paths,
-                                    const char *source_filename,
-                                    const char *include_filename);
-  FLATBUFFERS_CHECKED_ERROR CheckClash(std::vector<FieldDef *> &fields,
-                                       StructDef *struct_def,
-                                       const char *suffix, BaseType baseType);
-
-  bool SupportsAdvancedUnionFeatures() const;
-  bool SupportsAdvancedArrayFeatures() const;
-  Namespace *UniqueNamespace(Namespace *ns);
-
-  FLATBUFFERS_CHECKED_ERROR RecurseError();
-  template<typename F> CheckedError Recurse(F f);
-
- public:
-  SymbolTable<Type> types_;
-  SymbolTable<StructDef> structs_;
-  SymbolTable<EnumDef> enums_;
-  SymbolTable<ServiceDef> services_;
-  std::vector<Namespace *> namespaces_;
-  Namespace *current_namespace_;
-  Namespace *empty_namespace_;
-  std::string error_;  // User readable error_ if Parse() == false
-
-  FlatBufferBuilder builder_;  // any data contained in the file
-  flexbuffers::Builder flex_builder_;
-  flexbuffers::Reference flex_root_;
-  StructDef *root_struct_def_;
-  std::string file_identifier_;
-  std::string file_extension_;
-
-  std::map<std::string, std::string> included_files_;
-  std::map<std::string, std::set<std::string>> files_included_per_file_;
-  std::vector<std::string> native_included_files_;
-
-  std::map<std::string, bool> known_attributes_;
-
-  IDLOptions opts;
-  bool uses_flexbuffers_;
-
- private:
-  const char *source_;
-
-  std::string file_being_parsed_;
-
-  std::vector<std::pair<Value, FieldDef *>> field_stack_;
-
-  int anonymous_counter;
-  int recurse_protection_counter;
-};
-
-// Utility functions for multiple generators:
-
-extern std::string MakeCamel(const std::string &in, bool first = true);
-
-extern std::string MakeScreamingCamel(const std::string &in);
-
-// Generate text (JSON) from a given FlatBuffer, and a given Parser
-// object that has been populated with the corresponding schema.
-// If ident_step is 0, no indentation will be generated. Additionally,
-// if it is less than 0, no linefeeds will be generated either.
-// See idl_gen_text.cpp.
-// strict_json adds "quotes" around field names if true.
-// If the flatbuffer cannot be encoded in JSON (e.g., it contains non-UTF-8
-// byte arrays in String values), returns false.
-extern bool GenerateTextFromTable(const Parser &parser, const void *table,
-                                  const std::string &tablename,
-                                  std::string *text);
-extern bool GenerateText(const Parser &parser, const void *flatbuffer,
-                         std::string *text);
-extern bool GenerateTextFile(const Parser &parser, const std::string &path,
-                             const std::string &file_name);
-
-// Generate binary files from a given FlatBuffer, and a given Parser
-// object that has been populated with the corresponding schema.
-// See code_generators.cpp.
-extern bool GenerateBinary(const Parser &parser, const std::string &path,
-                           const std::string &file_name);
-
-// Generate a C++ header from the definitions in the Parser object.
-// See idl_gen_cpp.
-extern bool GenerateCPP(const Parser &parser, const std::string &path,
-                        const std::string &file_name);
-
-// Generate C# files from the definitions in the Parser object.
-// See idl_gen_csharp.cpp.
-extern bool GenerateCSharp(const Parser &parser, const std::string &path,
-                           const std::string &file_name);
-
-extern bool GenerateDart(const Parser &parser, const std::string &path,
-                         const std::string &file_name);
-
-// Generate Java files from the definitions in the Parser object.
-// See idl_gen_java.cpp.
-extern bool GenerateJava(const Parser &parser, const std::string &path,
-                         const std::string &file_name);
-
-// Generate JavaScript or TypeScript code from the definitions in the Parser
-// object. See idl_gen_js.
-extern bool GenerateJSTS(const Parser &parser, const std::string &path,
-                         const std::string &file_name);
-
-// Generate Go files from the definitions in the Parser object.
-// See idl_gen_go.cpp.
-extern bool GenerateGo(const Parser &parser, const std::string &path,
-                       const std::string &file_name);
-
-// Generate Php code from the definitions in the Parser object.
-// See idl_gen_php.
-extern bool GeneratePhp(const Parser &parser, const std::string &path,
-                        const std::string &file_name);
-
-// Generate Python files from the definitions in the Parser object.
-// See idl_gen_python.cpp.
-extern bool GeneratePython(const Parser &parser, const std::string &path,
-                           const std::string &file_name);
-
-// Generate Lobster files from the definitions in the Parser object.
-// See idl_gen_lobster.cpp.
-extern bool GenerateLobster(const Parser &parser, const std::string &path,
-                            const std::string &file_name);
-
-// Generate Lua files from the definitions in the Parser object.
-// See idl_gen_lua.cpp.
-extern bool GenerateLua(const Parser &parser, const std::string &path,
-                        const std::string &file_name);
-
-// Generate Rust files from the definitions in the Parser object.
-// See idl_gen_rust.cpp.
-extern bool GenerateRust(const Parser &parser, const std::string &path,
-                         const std::string &file_name);
-
-// Generate Json schema file
-// See idl_gen_json_schema.cpp.
-extern bool GenerateJsonSchema(const Parser &parser, const std::string &path,
-                               const std::string &file_name);
-
-extern bool GenerateKotlin(const Parser &parser, const std::string &path,
-                           const std::string &file_name);
-
-// Generate Swift classes.
-// See idl_gen_swift.cpp
-extern bool GenerateSwift(const Parser &parser, const std::string &path,
-                          const std::string &file_name);
-
-// Generate a schema file from the internal representation, useful after
-// parsing a .proto schema.
-extern std::string GenerateFBS(const Parser &parser,
-                               const std::string &file_name);
-extern bool GenerateFBS(const Parser &parser, const std::string &path,
-                        const std::string &file_name);
-
-// Generate a make rule for the generated JavaScript or TypeScript code.
-// See idl_gen_js.cpp.
-extern std::string JSTSMakeRule(const Parser &parser, const std::string &path,
-                                const std::string &file_name);
-
-// Generate a make rule for the generated C++ header.
-// See idl_gen_cpp.cpp.
-extern std::string CPPMakeRule(const Parser &parser, const std::string &path,
-                               const std::string &file_name);
-
-// Generate a make rule for the generated Dart code
-// see idl_gen_dart.cpp
-extern std::string DartMakeRule(const Parser &parser, const std::string &path,
-                                const std::string &file_name);
-
-// Generate a make rule for the generated Rust code.
-// See idl_gen_rust.cpp.
-extern std::string RustMakeRule(const Parser &parser, const std::string &path,
-                                const std::string &file_name);
-
-// Generate a make rule for generated Java or C# files.
-// See code_generators.cpp.
-extern std::string JavaCSharpMakeRule(const Parser &parser,
-                                      const std::string &path,
-                                      const std::string &file_name);
-
-// Generate a make rule for the generated text (JSON) files.
-// See idl_gen_text.cpp.
-extern std::string TextMakeRule(const Parser &parser, const std::string &path,
-                                const std::string &file_names);
-
-// Generate a make rule for the generated binary files.
-// See code_generators.cpp.
-extern std::string BinaryMakeRule(const Parser &parser, const std::string &path,
-                                  const std::string &file_name);
-
-// Generate GRPC Cpp interfaces.
-// See idl_gen_grpc.cpp.
-bool GenerateCppGRPC(const Parser &parser, const std::string &path,
-                     const std::string &file_name);
-
-// Generate GRPC Go interfaces.
-// See idl_gen_grpc.cpp.
-bool GenerateGoGRPC(const Parser &parser, const std::string &path,
-                    const std::string &file_name);
-
-// Generate GRPC Java classes.
-// See idl_gen_grpc.cpp
-bool GenerateJavaGRPC(const Parser &parser, const std::string &path,
-                      const std::string &file_name);
-
-// Generate GRPC Python interfaces.
-// See idl_gen_grpc.cpp.
-bool GeneratePythonGRPC(const Parser &parser, const std::string &path,
-                        const std::string &file_name);
-
-// Generate GRPC Swift interfaces.
-// See idl_gen_grpc.cpp.
-extern bool GenerateSwiftGRPC(const Parser &parser, const std::string &path,
-                              const std::string &file_name);
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_IDL_H_

+ 0 - 408
flatbuffers/minireflect.h

@@ -1,408 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_MINIREFLECT_H_
-#define FLATBUFFERS_MINIREFLECT_H_
-
-#include "flatbuffers.h"
-#include "util.h"
-
-namespace flatbuffers {
-
-// Utilities that can be used with the "mini reflection" tables present
-// in generated code with --reflect-types (only types) or --reflect-names
-// (also names).
-// This allows basic reflection functionality such as pretty-printing
-// that does not require the use of the schema parser or loading of binary
-// schema files at runtime (reflection.h).
-
-// For any of the functions below that take `const TypeTable *`, you pass
-// `FooTypeTable()` if the type of the root is `Foo`.
-
-// First, a generic iterator that can be used by multiple algorithms.
-
-struct IterationVisitor {
-  // These mark the scope of a table or struct.
-  virtual void StartSequence() {}
-  virtual void EndSequence() {}
-  // Called for each field regardless of whether it is present or not.
-  // If not present, val == nullptr. set_idx is the index of all set fields.
-  virtual void Field(size_t /*field_idx*/, size_t /*set_idx*/,
-                     ElementaryType /*type*/, bool /*is_vector*/,
-                     const TypeTable * /*type_table*/, const char * /*name*/,
-                     const uint8_t * /*val*/) {}
-  // Called for a value that is actually present, after a field, or as part
-  // of a vector.
-  virtual void UType(uint8_t, const char *) {}
-  virtual void Bool(bool) {}
-  virtual void Char(int8_t, const char *) {}
-  virtual void UChar(uint8_t, const char *) {}
-  virtual void Short(int16_t, const char *) {}
-  virtual void UShort(uint16_t, const char *) {}
-  virtual void Int(int32_t, const char *) {}
-  virtual void UInt(uint32_t, const char *) {}
-  virtual void Long(int64_t) {}
-  virtual void ULong(uint64_t) {}
-  virtual void Float(float) {}
-  virtual void Double(double) {}
-  virtual void String(const String *) {}
-  virtual void Unknown(const uint8_t *) {}  // From a future version.
-  // These mark the scope of a vector.
-  virtual void StartVector() {}
-  virtual void EndVector() {}
-  virtual void Element(size_t /*i*/, ElementaryType /*type*/,
-                       const TypeTable * /*type_table*/,
-                       const uint8_t * /*val*/) {}
-  virtual ~IterationVisitor() {}
-};
-
-inline size_t InlineSize(ElementaryType type, const TypeTable *type_table) {
-  switch (type) {
-    case ET_UTYPE:
-    case ET_BOOL:
-    case ET_CHAR:
-    case ET_UCHAR: return 1;
-    case ET_SHORT:
-    case ET_USHORT: return 2;
-    case ET_INT:
-    case ET_UINT:
-    case ET_FLOAT:
-    case ET_STRING: return 4;
-    case ET_LONG:
-    case ET_ULONG:
-    case ET_DOUBLE: return 8;
-    case ET_SEQUENCE:
-      switch (type_table->st) {
-        case ST_TABLE:
-        case ST_UNION: return 4;
-        case ST_STRUCT:
-          return static_cast<size_t>(type_table->values[type_table->num_elems]);
-        default: FLATBUFFERS_ASSERT(false); return 1;
-      }
-    default: FLATBUFFERS_ASSERT(false); return 1;
-  }
-}
-
-inline int64_t LookupEnum(int64_t enum_val, const int64_t *values,
-                          size_t num_values) {
-  if (!values) return enum_val;
-  for (size_t i = 0; i < num_values; i++) {
-    if (enum_val == values[i]) return static_cast<int64_t>(i);
-  }
-  return -1;  // Unknown enum value.
-}
-
-template<typename T> const char *EnumName(T tval, const TypeTable *type_table) {
-  if (!type_table || !type_table->names) return nullptr;
-  auto i = LookupEnum(static_cast<int64_t>(tval), type_table->values,
-                      type_table->num_elems);
-  if (i >= 0 && i < static_cast<int64_t>(type_table->num_elems)) {
-    return type_table->names[i];
-  }
-  return nullptr;
-}
-
-void IterateObject(const uint8_t *obj, const TypeTable *type_table,
-                   IterationVisitor *visitor);
-
-inline void IterateValue(ElementaryType type, const uint8_t *val,
-                         const TypeTable *type_table, const uint8_t *prev_val,
-                         soffset_t vector_index, IterationVisitor *visitor) {
-  switch (type) {
-    case ET_UTYPE: {
-      auto tval = ReadScalar<uint8_t>(val);
-      visitor->UType(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_BOOL: {
-      visitor->Bool(ReadScalar<uint8_t>(val) != 0);
-      break;
-    }
-    case ET_CHAR: {
-      auto tval = ReadScalar<int8_t>(val);
-      visitor->Char(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_UCHAR: {
-      auto tval = ReadScalar<uint8_t>(val);
-      visitor->UChar(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_SHORT: {
-      auto tval = ReadScalar<int16_t>(val);
-      visitor->Short(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_USHORT: {
-      auto tval = ReadScalar<uint16_t>(val);
-      visitor->UShort(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_INT: {
-      auto tval = ReadScalar<int32_t>(val);
-      visitor->Int(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_UINT: {
-      auto tval = ReadScalar<uint32_t>(val);
-      visitor->UInt(tval, EnumName(tval, type_table));
-      break;
-    }
-    case ET_LONG: {
-      visitor->Long(ReadScalar<int64_t>(val));
-      break;
-    }
-    case ET_ULONG: {
-      visitor->ULong(ReadScalar<uint64_t>(val));
-      break;
-    }
-    case ET_FLOAT: {
-      visitor->Float(ReadScalar<float>(val));
-      break;
-    }
-    case ET_DOUBLE: {
-      visitor->Double(ReadScalar<double>(val));
-      break;
-    }
-    case ET_STRING: {
-      val += ReadScalar<uoffset_t>(val);
-      visitor->String(reinterpret_cast<const String *>(val));
-      break;
-    }
-    case ET_SEQUENCE: {
-      switch (type_table->st) {
-        case ST_TABLE:
-          val += ReadScalar<uoffset_t>(val);
-          IterateObject(val, type_table, visitor);
-          break;
-        case ST_STRUCT: IterateObject(val, type_table, visitor); break;
-        case ST_UNION: {
-          val += ReadScalar<uoffset_t>(val);
-          FLATBUFFERS_ASSERT(prev_val);
-          auto union_type = *prev_val;  // Always a uint8_t.
-          if (vector_index >= 0) {
-            auto type_vec = reinterpret_cast<const Vector<uint8_t> *>(prev_val);
-            union_type = type_vec->Get(static_cast<uoffset_t>(vector_index));
-          }
-          auto type_code_idx =
-              LookupEnum(union_type, type_table->values, type_table->num_elems);
-          if (type_code_idx >= 0 &&
-              type_code_idx < static_cast<int32_t>(type_table->num_elems)) {
-            auto type_code = type_table->type_codes[type_code_idx];
-            switch (type_code.base_type) {
-              case ET_SEQUENCE: {
-                auto ref = type_table->type_refs[type_code.sequence_ref]();
-                IterateObject(val, ref, visitor);
-                break;
-              }
-              case ET_STRING:
-                visitor->String(reinterpret_cast<const String *>(val));
-                break;
-              default: visitor->Unknown(val);
-            }
-          } else {
-            visitor->Unknown(val);
-          }
-          break;
-        }
-        case ST_ENUM: FLATBUFFERS_ASSERT(false); break;
-      }
-      break;
-    }
-    default: {
-      visitor->Unknown(val);
-      break;
-    }
-  }
-}
-
-inline void IterateObject(const uint8_t *obj, const TypeTable *type_table,
-                          IterationVisitor *visitor) {
-  visitor->StartSequence();
-  const uint8_t *prev_val = nullptr;
-  size_t set_idx = 0;
-  for (size_t i = 0; i < type_table->num_elems; i++) {
-    auto type_code = type_table->type_codes[i];
-    auto type = static_cast<ElementaryType>(type_code.base_type);
-    auto is_vector = type_code.is_vector != 0;
-    auto ref_idx = type_code.sequence_ref;
-    const TypeTable *ref = nullptr;
-    if (ref_idx >= 0) { ref = type_table->type_refs[ref_idx](); }
-    auto name = type_table->names ? type_table->names[i] : nullptr;
-    const uint8_t *val = nullptr;
-    if (type_table->st == ST_TABLE) {
-      val = reinterpret_cast<const Table *>(obj)->GetAddressOf(
-          FieldIndexToOffset(static_cast<voffset_t>(i)));
-    } else {
-      val = obj + type_table->values[i];
-    }
-    visitor->Field(i, set_idx, type, is_vector, ref, name, val);
-    if (val) {
-      set_idx++;
-      if (is_vector) {
-        val += ReadScalar<uoffset_t>(val);
-        auto vec = reinterpret_cast<const Vector<uint8_t> *>(val);
-        visitor->StartVector();
-        auto elem_ptr = vec->Data();
-        for (size_t j = 0; j < vec->size(); j++) {
-          visitor->Element(j, type, ref, elem_ptr);
-          IterateValue(type, elem_ptr, ref, prev_val, static_cast<soffset_t>(j),
-                       visitor);
-          elem_ptr += InlineSize(type, ref);
-        }
-        visitor->EndVector();
-      } else {
-        IterateValue(type, val, ref, prev_val, -1, visitor);
-      }
-    }
-    prev_val = val;
-  }
-  visitor->EndSequence();
-}
-
-inline void IterateFlatBuffer(const uint8_t *buffer,
-                              const TypeTable *type_table,
-                              IterationVisitor *callback) {
-  IterateObject(GetRoot<uint8_t>(buffer), type_table, callback);
-}
-
-// Outputting a Flatbuffer to a string. Tries to conform as close to JSON /
-// the output generated by idl_gen_text.cpp.
-
-struct ToStringVisitor : public IterationVisitor {
-  std::string s;
-  std::string d;
-  bool q;
-  std::string in;
-  size_t indent_level;
-  bool vector_delimited;
-  ToStringVisitor(std::string delimiter, bool quotes, std::string indent,
-                  bool vdelimited = true)
-      : d(delimiter),
-        q(quotes),
-        in(indent),
-        indent_level(0),
-        vector_delimited(vdelimited) {}
-  ToStringVisitor(std::string delimiter)
-      : d(delimiter),
-        q(false),
-        in(""),
-        indent_level(0),
-        vector_delimited(true) {}
-
-  void append_indent() {
-    for (size_t i = 0; i < indent_level; i++) { s += in; }
-  }
-
-  void StartSequence() {
-    s += "{";
-    s += d;
-    indent_level++;
-  }
-  void EndSequence() {
-    s += d;
-    indent_level--;
-    append_indent();
-    s += "}";
-  }
-  void Field(size_t /*field_idx*/, size_t set_idx, ElementaryType /*type*/,
-             bool /*is_vector*/, const TypeTable * /*type_table*/,
-             const char *name, const uint8_t *val) {
-    if (!val) return;
-    if (set_idx) {
-      s += ",";
-      s += d;
-    }
-    append_indent();
-    if (name) {
-      if (q) s += "\"";
-      s += name;
-      if (q) s += "\"";
-      s += ": ";
-    }
-  }
-  template<typename T> void Named(T x, const char *name) {
-    if (name) {
-      if (q) s += "\"";
-      s += name;
-      if (q) s += "\"";
-    } else {
-      s += NumToString(x);
-    }
-  }
-  void UType(uint8_t x, const char *name) { Named(x, name); }
-  void Bool(bool x) { s += x ? "true" : "false"; }
-  void Char(int8_t x, const char *name) { Named(x, name); }
-  void UChar(uint8_t x, const char *name) { Named(x, name); }
-  void Short(int16_t x, const char *name) { Named(x, name); }
-  void UShort(uint16_t x, const char *name) { Named(x, name); }
-  void Int(int32_t x, const char *name) { Named(x, name); }
-  void UInt(uint32_t x, const char *name) { Named(x, name); }
-  void Long(int64_t x) { s += NumToString(x); }
-  void ULong(uint64_t x) { s += NumToString(x); }
-  void Float(float x) { s += NumToString(x); }
-  void Double(double x) { s += NumToString(x); }
-  void String(const struct String *str) {
-    EscapeString(str->c_str(), str->size(), &s, true, false);
-  }
-  void Unknown(const uint8_t *) { s += "(?)"; }
-  void StartVector() {
-    s += "[";
-    if (vector_delimited) {
-      s += d;
-      indent_level++;
-      append_indent();
-    } else {
-      s += " ";
-    }
-  }
-  void EndVector() {
-    if (vector_delimited) {
-      s += d;
-      indent_level--;
-      append_indent();
-    } else {
-      s += " ";
-    }
-    s += "]";
-  }
-  void Element(size_t i, ElementaryType /*type*/,
-               const TypeTable * /*type_table*/, const uint8_t * /*val*/) {
-    if (i) {
-      s += ",";
-      if (vector_delimited) {
-        s += d;
-        append_indent();
-      } else {
-        s += " ";
-      }
-    }
-  }
-};
-
-inline std::string FlatBufferToString(const uint8_t *buffer,
-                                      const TypeTable *type_table,
-                                      bool multi_line = false,
-                                      bool vector_delimited = true) {
-  ToStringVisitor tostring_visitor(multi_line ? "\n" : " ", false, "",
-                                   vector_delimited);
-  IterateFlatBuffer(buffer, type_table, &tostring_visitor);
-  return tostring_visitor.s;
-}
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_MINIREFLECT_H_

+ 0 - 38
flatbuffers/pch.h

@@ -1,38 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_PCH_H_
-#define FLATBUFFERS_PCH_H_
-
-// stl
-#include <cstdint>
-#include <cstring>
-#include <algorithm>
-#include <list>
-#include <string>
-#include <utility>
-#include <iomanip>
-#include <map>
-#include <memory>
-#include <limits>
-#include <stack>
-#include <vector>
-#include <type_traits>
-
-// flatbuffers
-#include "util.h"
-
-#endif // FLATBUFFERS_PCH_H_

+ 0 - 9
flatbuffers/pch/SConscript

@@ -1,9 +0,0 @@
-from building import *
-
-cwd     = GetCurrentDir()
-src     = Glob('*.c') + Glob('*.cc')
-CPPPATH = [cwd]
-
-group = DefineGroup('Flatbuffers', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
-
-Return('group')

+ 0 - 39
flatbuffers/pch/flatc_pch.h

@@ -1,39 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_FLATC_PCH_H_
-#define FLATBUFFERS_FLATC_PCH_H_
-
-// stl
-#include <cmath>
-#include <sstream>
-#include <cassert>
-#include <unordered_set>
-#include <unordered_map>
-#include <iostream>
-#include <functional>
-#include <set>
-#include <iterator>
-#include <tuple>
-
-// flatbuffers
-#include "packages/TensorflowLiteMicro/flatbuffers/pch/pch.h"
-#include "packages/TensorflowLiteMicro/flatbuffers/code_generators.h"
-#include "packages/TensorflowLiteMicro/flatbuffers/flatbuffers.h"
-#include "packages/TensorflowLiteMicro/flatbuffers/flexbuffers.h"
-#include "packages/TensorflowLiteMicro/flatbuffers/idl.h"
-
-#endif // FLATBUFFERS_FLATC_PCH_H_

+ 0 - 38
flatbuffers/pch/pch.h

@@ -1,38 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_PCH_H_
-#define FLATBUFFERS_PCH_H_
-
-// stl
-#include <cstdint>
-#include <cstring>
-#include <algorithm>
-#include <list>
-#include <string>
-#include <utility>
-#include <iomanip>
-#include <map>
-#include <memory>
-#include <limits>
-#include <stack>
-#include <vector>
-#include <type_traits>
-
-// flatbuffers
-#include "packages/TensorflowLiteMicro/flatbuffers/util.h"
-
-#endif // FLATBUFFERS_PCH_H_

+ 0 - 500
flatbuffers/reflection.h

@@ -1,500 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_REFLECTION_H_
-#define FLATBUFFERS_REFLECTION_H_
-
-// This is somewhat of a circular dependency because flatc (and thus this
-// file) is needed to generate this header in the first place.
-// Should normally not be a problem since it can be generated by the
-// previous version of flatc whenever this code needs to change.
-// See reflection/generate_code.sh
-#include "reflection_generated.h"
-
-// Helper functionality for reflection.
-
-namespace flatbuffers {
-
-// ------------------------- GETTERS -------------------------
-
-inline bool IsScalar(reflection::BaseType t) {
-  return t >= reflection::UType && t <= reflection::Double;
-}
-inline bool IsInteger(reflection::BaseType t) {
-  return t >= reflection::UType && t <= reflection::ULong;
-}
-inline bool IsFloat(reflection::BaseType t) {
-  return t == reflection::Float || t == reflection::Double;
-}
-inline bool IsLong(reflection::BaseType t) {
-  return t == reflection::Long || t == reflection::ULong;
-}
-
-// Size of a basic type, don't use with structs.
-inline size_t GetTypeSize(reflection::BaseType base_type) {
-  // This needs to correspond to the BaseType enum.
-  static size_t sizes[] = {
-    0, // None
-    1, // UType
-    1, // Bool
-    1, // Byte
-    1, // UByte
-    2, // Short
-    2, // UShort
-    4, // Int
-    4, // UInt
-    8, // Long
-    8, // ULong
-    4, // Float
-    8, // Double
-    4, // String
-    4, // Vector
-    4, // Obj
-    4, // Union
-    0, // Array. Only used in structs. 0 was chosen to prevent out-of-bounds errors.
-
-    0  // MaxBaseType. This must be kept the last entry in this array.
-    };
-  static_assert(sizeof(sizes) / sizeof(size_t) == reflection::MaxBaseType + 1,
-                "Size of sizes[] array does not match the count of BaseType enum values.");
-  return sizes[base_type];
-}
-
-// Same as above, but now correctly returns the size of a struct if
-// the field (or vector element) is a struct.
-inline size_t GetTypeSizeInline(reflection::BaseType base_type, int type_index,
-                                const reflection::Schema &schema) {
-  if (base_type == reflection::Obj &&
-      schema.objects()->Get(type_index)->is_struct()) {
-    return schema.objects()->Get(type_index)->bytesize();
-  } else {
-    return GetTypeSize(base_type);
-  }
-}
-
-// Get the root, regardless of what type it is.
-inline Table *GetAnyRoot(uint8_t *flatbuf) {
-  return GetMutableRoot<Table>(flatbuf);
-}
-inline const Table *GetAnyRoot(const uint8_t *flatbuf) {
-  return GetRoot<Table>(flatbuf);
-}
-
-// Get a field's default, if you know it's an integer, and its exact type.
-template<typename T> T GetFieldDefaultI(const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
-  return static_cast<T>(field.default_integer());
-}
-
-// Get a field's default, if you know it's floating point and its exact type.
-template<typename T> T GetFieldDefaultF(const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
-  return static_cast<T>(field.default_real());
-}
-
-// Get a field, if you know it's an integer, and its exact type.
-template<typename T>
-T GetFieldI(const Table &table, const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
-  return table.GetField<T>(field.offset(),
-                           static_cast<T>(field.default_integer()));
-}
-
-// Get a field, if you know it's floating point and its exact type.
-template<typename T>
-T GetFieldF(const Table &table, const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
-  return table.GetField<T>(field.offset(),
-                           static_cast<T>(field.default_real()));
-}
-
-// Get a field, if you know it's a string.
-inline const String *GetFieldS(const Table &table,
-                               const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::String);
-  return table.GetPointer<const String *>(field.offset());
-}
-
-// Get a field, if you know it's a vector.
-template<typename T>
-Vector<T> *GetFieldV(const Table &table, const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Vector &&
-                     sizeof(T) == GetTypeSize(field.type()->element()));
-  return table.GetPointer<Vector<T> *>(field.offset());
-}
-
-// Get a field, if you know it's a vector, generically.
-// To actually access elements, use the return value together with
-// field.type()->element() in any of GetAnyVectorElemI below etc.
-inline VectorOfAny *GetFieldAnyV(const Table &table,
-                                 const reflection::Field &field) {
-  return table.GetPointer<VectorOfAny *>(field.offset());
-}
-
-// Get a field, if you know it's a table.
-inline Table *GetFieldT(const Table &table, const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj ||
-                     field.type()->base_type() == reflection::Union);
-  return table.GetPointer<Table *>(field.offset());
-}
-
-// Get a field, if you know it's a struct.
-inline const Struct *GetFieldStruct(const Table &table,
-                                    const reflection::Field &field) {
-  // TODO: This does NOT check if the field is a table or struct, but we'd need
-  // access to the schema to check the is_struct flag.
-  FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
-  return table.GetStruct<const Struct *>(field.offset());
-}
-
-// Get a structure's field, if you know it's a struct.
-inline const Struct *GetFieldStruct(const Struct &structure,
-                                    const reflection::Field &field) {
-  FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
-  return structure.GetStruct<const Struct *>(field.offset());
-}
-
-// Raw helper functions used below: get any value in memory as a 64bit int, a
-// double or a string.
-// All scalars get static_cast to an int64_t, strings use strtoull, every other
-// data type returns 0.
-int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data);
-// All scalars static cast to double, strings use strtod, every other data
-// type is 0.0.
-double GetAnyValueF(reflection::BaseType type, const uint8_t *data);
-// All scalars converted using stringstream, strings as-is, and all other
-// data types provide some level of debug-pretty-printing.
-std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
-                         const reflection::Schema *schema, int type_index);
-
-// Get any table field as a 64bit int, regardless of what type it is.
-inline int64_t GetAnyFieldI(const Table &table,
-                            const reflection::Field &field) {
-  auto field_ptr = table.GetAddressOf(field.offset());
-  return field_ptr ? GetAnyValueI(field.type()->base_type(), field_ptr)
-                   : field.default_integer();
-}
-
-// Get any table field as a double, regardless of what type it is.
-inline double GetAnyFieldF(const Table &table, const reflection::Field &field) {
-  auto field_ptr = table.GetAddressOf(field.offset());
-  return field_ptr ? GetAnyValueF(field.type()->base_type(), field_ptr)
-                   : field.default_real();
-}
-
-// Get any table field as a string, regardless of what type it is.
-// You may pass nullptr for the schema if you don't care to have fields that
-// are of table type pretty-printed.
-inline std::string GetAnyFieldS(const Table &table,
-                                const reflection::Field &field,
-                                const reflection::Schema *schema) {
-  auto field_ptr = table.GetAddressOf(field.offset());
-  return field_ptr ? GetAnyValueS(field.type()->base_type(), field_ptr, schema,
-                                  field.type()->index())
-                   : "";
-}
-
-// Get any struct field as a 64bit int, regardless of what type it is.
-inline int64_t GetAnyFieldI(const Struct &st, const reflection::Field &field) {
-  return GetAnyValueI(field.type()->base_type(),
-                      st.GetAddressOf(field.offset()));
-}
-
-// Get any struct field as a double, regardless of what type it is.
-inline double GetAnyFieldF(const Struct &st, const reflection::Field &field) {
-  return GetAnyValueF(field.type()->base_type(),
-                      st.GetAddressOf(field.offset()));
-}
-
-// Get any struct field as a string, regardless of what type it is.
-inline std::string GetAnyFieldS(const Struct &st,
-                                const reflection::Field &field) {
-  return GetAnyValueS(field.type()->base_type(),
-                      st.GetAddressOf(field.offset()), nullptr, -1);
-}
-
-// Get any vector element as a 64bit int, regardless of what type it is.
-inline int64_t GetAnyVectorElemI(const VectorOfAny *vec,
-                                 reflection::BaseType elem_type, size_t i) {
-  return GetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
-}
-
-// Get any vector element as a double, regardless of what type it is.
-inline double GetAnyVectorElemF(const VectorOfAny *vec,
-                                reflection::BaseType elem_type, size_t i) {
-  return GetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
-}
-
-// Get any vector element as a string, regardless of what type it is.
-inline std::string GetAnyVectorElemS(const VectorOfAny *vec,
-                                     reflection::BaseType elem_type, size_t i) {
-  return GetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i,
-                      nullptr, -1);
-}
-
-// Get a vector element that's a table/string/vector from a generic vector.
-// Pass Table/String/VectorOfAny as template parameter.
-// Warning: does no typechecking.
-template<typename T>
-T *GetAnyVectorElemPointer(const VectorOfAny *vec, size_t i) {
-  auto elem_ptr = vec->Data() + sizeof(uoffset_t) * i;
-  return reinterpret_cast<T *>(elem_ptr + ReadScalar<uoffset_t>(elem_ptr));
-}
-
-// Get the inline-address of a vector element. Useful for Structs (pass Struct
-// as template arg), or being able to address a range of scalars in-line.
-// Get elem_size from GetTypeSizeInline().
-// Note: little-endian data on all platforms, use EndianScalar() instead of
-// raw pointer access with scalars).
-template<typename T>
-T *GetAnyVectorElemAddressOf(const VectorOfAny *vec, size_t i,
-                             size_t elem_size) {
-  return reinterpret_cast<T *>(vec->Data() + elem_size * i);
-}
-
-// Similarly, for elements of tables.
-template<typename T>
-T *GetAnyFieldAddressOf(const Table &table, const reflection::Field &field) {
-  return reinterpret_cast<T *>(table.GetAddressOf(field.offset()));
-}
-
-// Similarly, for elements of structs.
-template<typename T>
-T *GetAnyFieldAddressOf(const Struct &st, const reflection::Field &field) {
-  return reinterpret_cast<T *>(st.GetAddressOf(field.offset()));
-}
-
-// ------------------------- SETTERS -------------------------
-
-// Set any scalar field, if you know its exact type.
-template<typename T>
-bool SetField(Table *table, const reflection::Field &field, T val) {
-  reflection::BaseType type = field.type()->base_type();
-  if (!IsScalar(type)) { return false; }
-  FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(type));
-  T def;
-  if (IsInteger(type)) {
-    def = GetFieldDefaultI<T>(field);
-  } else {
-    FLATBUFFERS_ASSERT(IsFloat(type));
-    def = GetFieldDefaultF<T>(field);
-  }
-  return table->SetField(field.offset(), val, def);
-}
-
-// Raw helper functions used below: set any value in memory as a 64bit int, a
-// double or a string.
-// These work for all scalar values, but do nothing for other data types.
-// To set a string, see SetString below.
-void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val);
-void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val);
-void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val);
-
-// Set any table field as a 64bit int, regardless of type what it is.
-inline bool SetAnyFieldI(Table *table, const reflection::Field &field,
-                         int64_t val) {
-  auto field_ptr = table->GetAddressOf(field.offset());
-  if (!field_ptr) return val == GetFieldDefaultI<int64_t>(field);
-  SetAnyValueI(field.type()->base_type(), field_ptr, val);
-  return true;
-}
-
-// Set any table field as a double, regardless of what type it is.
-inline bool SetAnyFieldF(Table *table, const reflection::Field &field,
-                         double val) {
-  auto field_ptr = table->GetAddressOf(field.offset());
-  if (!field_ptr) return val == GetFieldDefaultF<double>(field);
-  SetAnyValueF(field.type()->base_type(), field_ptr, val);
-  return true;
-}
-
-// Set any table field as a string, regardless of what type it is.
-inline bool SetAnyFieldS(Table *table, const reflection::Field &field,
-                         const char *val) {
-  auto field_ptr = table->GetAddressOf(field.offset());
-  if (!field_ptr) return false;
-  SetAnyValueS(field.type()->base_type(), field_ptr, val);
-  return true;
-}
-
-// Set any struct field as a 64bit int, regardless of type what it is.
-inline void SetAnyFieldI(Struct *st, const reflection::Field &field,
-                         int64_t val) {
-  SetAnyValueI(field.type()->base_type(), st->GetAddressOf(field.offset()),
-               val);
-}
-
-// Set any struct field as a double, regardless of type what it is.
-inline void SetAnyFieldF(Struct *st, const reflection::Field &field,
-                         double val) {
-  SetAnyValueF(field.type()->base_type(), st->GetAddressOf(field.offset()),
-               val);
-}
-
-// Set any struct field as a string, regardless of type what it is.
-inline void SetAnyFieldS(Struct *st, const reflection::Field &field,
-                         const char *val) {
-  SetAnyValueS(field.type()->base_type(), st->GetAddressOf(field.offset()),
-               val);
-}
-
-// Set any vector element as a 64bit int, regardless of type what it is.
-inline void SetAnyVectorElemI(VectorOfAny *vec, reflection::BaseType elem_type,
-                              size_t i, int64_t val) {
-  SetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
-}
-
-// Set any vector element as a double, regardless of type what it is.
-inline void SetAnyVectorElemF(VectorOfAny *vec, reflection::BaseType elem_type,
-                              size_t i, double val) {
-  SetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
-}
-
-// Set any vector element as a string, regardless of type what it is.
-inline void SetAnyVectorElemS(VectorOfAny *vec, reflection::BaseType elem_type,
-                              size_t i, const char *val) {
-  SetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
-}
-
-// ------------------------- RESIZING SETTERS -------------------------
-
-// "smart" pointer for use with resizing vectors: turns a pointer inside
-// a vector into a relative offset, such that it is not affected by resizes.
-template<typename T, typename U> class pointer_inside_vector {
- public:
-  pointer_inside_vector(T *ptr, std::vector<U> &vec)
-      : offset_(reinterpret_cast<uint8_t *>(ptr) -
-                reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec))),
-        vec_(vec) {}
-
-  T *operator*() const {
-    return reinterpret_cast<T *>(
-        reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec_)) + offset_);
-  }
-  T *operator->() const { return operator*(); }
-
- private:
-  size_t offset_;
-  std::vector<U> &vec_;
-};
-
-// Helper to create the above easily without specifying template args.
-template<typename T, typename U>
-pointer_inside_vector<T, U> piv(T *ptr, std::vector<U> &vec) {
-  return pointer_inside_vector<T, U>(ptr, vec);
-}
-
-inline const char *UnionTypeFieldSuffix() { return "_type"; }
-
-// Helper to figure out the actual table type a union refers to.
-inline const reflection::Object &GetUnionType(
-    const reflection::Schema &schema, const reflection::Object &parent,
-    const reflection::Field &unionfield, const Table &table) {
-  auto enumdef = schema.enums()->Get(unionfield.type()->index());
-  // TODO: this is clumsy and slow, but no other way to find it?
-  auto type_field = parent.fields()->LookupByKey(
-      (unionfield.name()->str() + UnionTypeFieldSuffix()).c_str());
-  FLATBUFFERS_ASSERT(type_field);
-  auto union_type = GetFieldI<uint8_t>(table, *type_field);
-  auto enumval = enumdef->values()->LookupByKey(union_type);
-  return *enumval->object();
-}
-
-// Changes the contents of a string inside a FlatBuffer. FlatBuffer must
-// live inside a std::vector so we can resize the buffer if needed.
-// "str" must live inside "flatbuf" and may be invalidated after this call.
-// If your FlatBuffer's root table is not the schema's root table, you should
-// pass in your root_table type as well.
-void SetString(const reflection::Schema &schema, const std::string &val,
-               const String *str, std::vector<uint8_t> *flatbuf,
-               const reflection::Object *root_table = nullptr);
-
-// Resizes a flatbuffers::Vector inside a FlatBuffer. FlatBuffer must
-// live inside a std::vector so we can resize the buffer if needed.
-// "vec" must live inside "flatbuf" and may be invalidated after this call.
-// If your FlatBuffer's root table is not the schema's root table, you should
-// pass in your root_table type as well.
-uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
-                         const VectorOfAny *vec, uoffset_t num_elems,
-                         uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
-                         const reflection::Object *root_table = nullptr);
-
-template<typename T>
-void ResizeVector(const reflection::Schema &schema, uoffset_t newsize, T val,
-                  const Vector<T> *vec, std::vector<uint8_t> *flatbuf,
-                  const reflection::Object *root_table = nullptr) {
-  auto delta_elem = static_cast<int>(newsize) - static_cast<int>(vec->size());
-  auto newelems = ResizeAnyVector(
-      schema, newsize, reinterpret_cast<const VectorOfAny *>(vec), vec->size(),
-      static_cast<uoffset_t>(sizeof(T)), flatbuf, root_table);
-  // Set new elements to "val".
-  for (int i = 0; i < delta_elem; i++) {
-    auto loc = newelems + i * sizeof(T);
-    auto is_scalar = flatbuffers::is_scalar<T>::value;
-    if (is_scalar) {
-      WriteScalar(loc, val);
-    } else {  // struct
-      *reinterpret_cast<T *>(loc) = val;
-    }
-  }
-}
-
-// Adds any new data (in the form of a new FlatBuffer) to an existing
-// FlatBuffer. This can be used when any of the above methods are not
-// sufficient, in particular for adding new tables and new fields.
-// This is potentially slightly less efficient than a FlatBuffer constructed
-// in one piece, since the new FlatBuffer doesn't share any vtables with the
-// existing one.
-// The return value can now be set using Vector::MutateOffset or SetFieldT
-// below.
-const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
-                             const uint8_t *newbuf, size_t newlen);
-
-inline bool SetFieldT(Table *table, const reflection::Field &field,
-                      const uint8_t *val) {
-  FLATBUFFERS_ASSERT(sizeof(uoffset_t) ==
-                     GetTypeSize(field.type()->base_type()));
-  return table->SetPointer(field.offset(), val);
-}
-
-// ------------------------- COPYING -------------------------
-
-// Generic copying of tables from a FlatBuffer into a FlatBuffer builder.
-// Can be used to do any kind of merging/selecting you may want to do out
-// of existing buffers. Also useful to reconstruct a whole buffer if the
-// above resizing functionality has introduced garbage in a buffer you want
-// to remove.
-// Note: this does not deal with DAGs correctly. If the table passed forms a
-// DAG, the copy will be a tree instead (with duplicates). Strings can be
-// shared however, by passing true for use_string_pooling.
-
-Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
-                                const reflection::Schema &schema,
-                                const reflection::Object &objectdef,
-                                const Table &table,
-                                bool use_string_pooling = false);
-
-// Verifies the provided flatbuffer using reflection.
-// root should point to the root type for this flatbuffer.
-// buf should point to the start of flatbuffer data.
-// length specifies the size of the flatbuffer data.
-bool Verify(const reflection::Schema &schema, const reflection::Object &root,
-            const uint8_t *buf, size_t length, uoffset_t max_depth = 64,
-            uoffset_t max_tables = 1000000);
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_REFLECTION_H_

+ 0 - 1216
flatbuffers/reflection_generated.h

@@ -1,1216 +0,0 @@
-// automatically generated by the FlatBuffers compiler, do not modify
-
-
-#ifndef FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
-#define FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
-
-#include "flatbuffers.h"
-
-namespace reflection {
-
-struct Type;
-struct TypeBuilder;
-
-struct KeyValue;
-struct KeyValueBuilder;
-
-struct EnumVal;
-struct EnumValBuilder;
-
-struct Enum;
-struct EnumBuilder;
-
-struct Field;
-struct FieldBuilder;
-
-struct Object;
-struct ObjectBuilder;
-
-struct RPCCall;
-struct RPCCallBuilder;
-
-struct Service;
-struct ServiceBuilder;
-
-struct Schema;
-struct SchemaBuilder;
-
-enum BaseType {
-  None = 0,
-  UType = 1,
-  Bool = 2,
-  Byte = 3,
-  UByte = 4,
-  Short = 5,
-  UShort = 6,
-  Int = 7,
-  UInt = 8,
-  Long = 9,
-  ULong = 10,
-  Float = 11,
-  Double = 12,
-  String = 13,
-  Vector = 14,
-  Obj = 15,
-  Union = 16,
-  Array = 17,
-  MaxBaseType = 18
-};
-
-inline const BaseType (&EnumValuesBaseType())[19] {
-  static const BaseType values[] = {
-    None,
-    UType,
-    Bool,
-    Byte,
-    UByte,
-    Short,
-    UShort,
-    Int,
-    UInt,
-    Long,
-    ULong,
-    Float,
-    Double,
-    String,
-    Vector,
-    Obj,
-    Union,
-    Array,
-    MaxBaseType
-  };
-  return values;
-}
-
-inline const char * const *EnumNamesBaseType() {
-  static const char * const names[20] = {
-    "None",
-    "UType",
-    "Bool",
-    "Byte",
-    "UByte",
-    "Short",
-    "UShort",
-    "Int",
-    "UInt",
-    "Long",
-    "ULong",
-    "Float",
-    "Double",
-    "String",
-    "Vector",
-    "Obj",
-    "Union",
-    "Array",
-    "MaxBaseType",
-    nullptr
-  };
-  return names;
-}
-
-inline const char *EnumNameBaseType(BaseType e) {
-  if (flatbuffers::IsOutRange(e, None, MaxBaseType)) return "";
-  const size_t index = static_cast<size_t>(e);
-  return EnumNamesBaseType()[index];
-}
-
-struct Type FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef TypeBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_BASE_TYPE = 4,
-    VT_ELEMENT = 6,
-    VT_INDEX = 8,
-    VT_FIXED_LENGTH = 10
-  };
-  reflection::BaseType base_type() const {
-    return static_cast<reflection::BaseType>(GetField<int8_t>(VT_BASE_TYPE, 0));
-  }
-  reflection::BaseType element() const {
-    return static_cast<reflection::BaseType>(GetField<int8_t>(VT_ELEMENT, 0));
-  }
-  int32_t index() const {
-    return GetField<int32_t>(VT_INDEX, -1);
-  }
-  uint16_t fixed_length() const {
-    return GetField<uint16_t>(VT_FIXED_LENGTH, 0);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyField<int8_t>(verifier, VT_BASE_TYPE) &&
-           VerifyField<int8_t>(verifier, VT_ELEMENT) &&
-           VerifyField<int32_t>(verifier, VT_INDEX) &&
-           VerifyField<uint16_t>(verifier, VT_FIXED_LENGTH) &&
-           verifier.EndTable();
-  }
-};
-
-struct TypeBuilder {
-  typedef Type Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_base_type(reflection::BaseType base_type) {
-    fbb_.AddElement<int8_t>(Type::VT_BASE_TYPE, static_cast<int8_t>(base_type), 0);
-  }
-  void add_element(reflection::BaseType element) {
-    fbb_.AddElement<int8_t>(Type::VT_ELEMENT, static_cast<int8_t>(element), 0);
-  }
-  void add_index(int32_t index) {
-    fbb_.AddElement<int32_t>(Type::VT_INDEX, index, -1);
-  }
-  void add_fixed_length(uint16_t fixed_length) {
-    fbb_.AddElement<uint16_t>(Type::VT_FIXED_LENGTH, fixed_length, 0);
-  }
-  explicit TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Type> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Type>(end);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Type> CreateType(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    reflection::BaseType base_type = reflection::None,
-    reflection::BaseType element = reflection::None,
-    int32_t index = -1,
-    uint16_t fixed_length = 0) {
-  TypeBuilder builder_(_fbb);
-  builder_.add_index(index);
-  builder_.add_fixed_length(fixed_length);
-  builder_.add_element(element);
-  builder_.add_base_type(base_type);
-  return builder_.Finish();
-}
-
-struct KeyValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef KeyValueBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_KEY = 4,
-    VT_VALUE = 6
-  };
-  const flatbuffers::String *key() const {
-    return GetPointer<const flatbuffers::String *>(VT_KEY);
-  }
-  bool KeyCompareLessThan(const KeyValue *o) const {
-    return *key() < *o->key();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(key()->c_str(), val);
-  }
-  const flatbuffers::String *value() const {
-    return GetPointer<const flatbuffers::String *>(VT_VALUE);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_KEY) &&
-           verifier.VerifyString(key()) &&
-           VerifyOffset(verifier, VT_VALUE) &&
-           verifier.VerifyString(value()) &&
-           verifier.EndTable();
-  }
-};
-
-struct KeyValueBuilder {
-  typedef KeyValue Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_key(flatbuffers::Offset<flatbuffers::String> key) {
-    fbb_.AddOffset(KeyValue::VT_KEY, key);
-  }
-  void add_value(flatbuffers::Offset<flatbuffers::String> value) {
-    fbb_.AddOffset(KeyValue::VT_VALUE, value);
-  }
-  explicit KeyValueBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<KeyValue> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<KeyValue>(end);
-    fbb_.Required(o, KeyValue::VT_KEY);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<KeyValue> CreateKeyValue(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> key = 0,
-    flatbuffers::Offset<flatbuffers::String> value = 0) {
-  KeyValueBuilder builder_(_fbb);
-  builder_.add_value(value);
-  builder_.add_key(key);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<KeyValue> CreateKeyValueDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *key = nullptr,
-    const char *value = nullptr) {
-  auto key__ = key ? _fbb.CreateString(key) : 0;
-  auto value__ = value ? _fbb.CreateString(value) : 0;
-  return reflection::CreateKeyValue(
-      _fbb,
-      key__,
-      value__);
-}
-
-struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef EnumValBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_VALUE = 6,
-    VT_OBJECT = 8,
-    VT_UNION_TYPE = 10,
-    VT_DOCUMENTATION = 12
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  int64_t value() const {
-    return GetField<int64_t>(VT_VALUE, 0);
-  }
-  bool KeyCompareLessThan(const EnumVal *o) const {
-    return value() < o->value();
-  }
-  int KeyCompareWithValue(int64_t val) const {
-    return static_cast<int>(value() > val) - static_cast<int>(value() < val);
-  }
-  const reflection::Object *object() const {
-    return GetPointer<const reflection::Object *>(VT_OBJECT);
-  }
-  const reflection::Type *union_type() const {
-    return GetPointer<const reflection::Type *>(VT_UNION_TYPE);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyField<int64_t>(verifier, VT_VALUE) &&
-           VerifyOffset(verifier, VT_OBJECT) &&
-           verifier.VerifyTable(object()) &&
-           VerifyOffset(verifier, VT_UNION_TYPE) &&
-           verifier.VerifyTable(union_type()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct EnumValBuilder {
-  typedef EnumVal Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(EnumVal::VT_NAME, name);
-  }
-  void add_value(int64_t value) {
-    fbb_.AddElement<int64_t>(EnumVal::VT_VALUE, value, 0);
-  }
-  void add_object(flatbuffers::Offset<reflection::Object> object) {
-    fbb_.AddOffset(EnumVal::VT_OBJECT, object);
-  }
-  void add_union_type(flatbuffers::Offset<reflection::Type> union_type) {
-    fbb_.AddOffset(EnumVal::VT_UNION_TYPE, union_type);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(EnumVal::VT_DOCUMENTATION, documentation);
-  }
-  explicit EnumValBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<EnumVal> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<EnumVal>(end);
-    fbb_.Required(o, EnumVal::VT_NAME);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<EnumVal> CreateEnumVal(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    int64_t value = 0,
-    flatbuffers::Offset<reflection::Object> object = 0,
-    flatbuffers::Offset<reflection::Type> union_type = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  EnumValBuilder builder_(_fbb);
-  builder_.add_value(value);
-  builder_.add_documentation(documentation);
-  builder_.add_union_type(union_type);
-  builder_.add_object(object);
-  builder_.add_name(name);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<EnumVal> CreateEnumValDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    int64_t value = 0,
-    flatbuffers::Offset<reflection::Object> object = 0,
-    flatbuffers::Offset<reflection::Type> union_type = 0,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateEnumVal(
-      _fbb,
-      name__,
-      value,
-      object,
-      union_type,
-      documentation__);
-}
-
-struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef EnumBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_VALUES = 6,
-    VT_IS_UNION = 8,
-    VT_UNDERLYING_TYPE = 10,
-    VT_ATTRIBUTES = 12,
-    VT_DOCUMENTATION = 14
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  bool KeyCompareLessThan(const Enum *o) const {
-    return *name() < *o->name();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(name()->c_str(), val);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *values() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *>(VT_VALUES);
-  }
-  bool is_union() const {
-    return GetField<uint8_t>(VT_IS_UNION, 0) != 0;
-  }
-  const reflection::Type *underlying_type() const {
-    return GetPointer<const reflection::Type *>(VT_UNDERLYING_TYPE);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(VT_ATTRIBUTES);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyOffsetRequired(verifier, VT_VALUES) &&
-           verifier.VerifyVector(values()) &&
-           verifier.VerifyVectorOfTables(values()) &&
-           VerifyField<uint8_t>(verifier, VT_IS_UNION) &&
-           VerifyOffsetRequired(verifier, VT_UNDERLYING_TYPE) &&
-           verifier.VerifyTable(underlying_type()) &&
-           VerifyOffset(verifier, VT_ATTRIBUTES) &&
-           verifier.VerifyVector(attributes()) &&
-           verifier.VerifyVectorOfTables(attributes()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct EnumBuilder {
-  typedef Enum Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(Enum::VT_NAME, name);
-  }
-  void add_values(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>>> values) {
-    fbb_.AddOffset(Enum::VT_VALUES, values);
-  }
-  void add_is_union(bool is_union) {
-    fbb_.AddElement<uint8_t>(Enum::VT_IS_UNION, static_cast<uint8_t>(is_union), 0);
-  }
-  void add_underlying_type(flatbuffers::Offset<reflection::Type> underlying_type) {
-    fbb_.AddOffset(Enum::VT_UNDERLYING_TYPE, underlying_type);
-  }
-  void add_attributes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes) {
-    fbb_.AddOffset(Enum::VT_ATTRIBUTES, attributes);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(Enum::VT_DOCUMENTATION, documentation);
-  }
-  explicit EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Enum> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Enum>(end);
-    fbb_.Required(o, Enum::VT_NAME);
-    fbb_.Required(o, Enum::VT_VALUES);
-    fbb_.Required(o, Enum::VT_UNDERLYING_TYPE);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Enum> CreateEnum(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>>> values = 0,
-    bool is_union = false,
-    flatbuffers::Offset<reflection::Type> underlying_type = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  EnumBuilder builder_(_fbb);
-  builder_.add_documentation(documentation);
-  builder_.add_attributes(attributes);
-  builder_.add_underlying_type(underlying_type);
-  builder_.add_values(values);
-  builder_.add_name(name);
-  builder_.add_is_union(is_union);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Enum> CreateEnumDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    std::vector<flatbuffers::Offset<reflection::EnumVal>> *values = nullptr,
-    bool is_union = false,
-    flatbuffers::Offset<reflection::Type> underlying_type = 0,
-    std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto values__ = values ? _fbb.CreateVectorOfSortedTables<reflection::EnumVal>(values) : 0;
-  auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateEnum(
-      _fbb,
-      name__,
-      values__,
-      is_union,
-      underlying_type,
-      attributes__,
-      documentation__);
-}
-
-struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef FieldBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_TYPE = 6,
-    VT_ID = 8,
-    VT_OFFSET = 10,
-    VT_DEFAULT_INTEGER = 12,
-    VT_DEFAULT_REAL = 14,
-    VT_DEPRECATED = 16,
-    VT_REQUIRED = 18,
-    VT_KEY = 20,
-    VT_ATTRIBUTES = 22,
-    VT_DOCUMENTATION = 24
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  bool KeyCompareLessThan(const Field *o) const {
-    return *name() < *o->name();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(name()->c_str(), val);
-  }
-  const reflection::Type *type() const {
-    return GetPointer<const reflection::Type *>(VT_TYPE);
-  }
-  uint16_t id() const {
-    return GetField<uint16_t>(VT_ID, 0);
-  }
-  uint16_t offset() const {
-    return GetField<uint16_t>(VT_OFFSET, 0);
-  }
-  int64_t default_integer() const {
-    return GetField<int64_t>(VT_DEFAULT_INTEGER, 0);
-  }
-  double default_real() const {
-    return GetField<double>(VT_DEFAULT_REAL, 0.0);
-  }
-  bool deprecated() const {
-    return GetField<uint8_t>(VT_DEPRECATED, 0) != 0;
-  }
-  bool required() const {
-    return GetField<uint8_t>(VT_REQUIRED, 0) != 0;
-  }
-  bool key() const {
-    return GetField<uint8_t>(VT_KEY, 0) != 0;
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(VT_ATTRIBUTES);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyOffsetRequired(verifier, VT_TYPE) &&
-           verifier.VerifyTable(type()) &&
-           VerifyField<uint16_t>(verifier, VT_ID) &&
-           VerifyField<uint16_t>(verifier, VT_OFFSET) &&
-           VerifyField<int64_t>(verifier, VT_DEFAULT_INTEGER) &&
-           VerifyField<double>(verifier, VT_DEFAULT_REAL) &&
-           VerifyField<uint8_t>(verifier, VT_DEPRECATED) &&
-           VerifyField<uint8_t>(verifier, VT_REQUIRED) &&
-           VerifyField<uint8_t>(verifier, VT_KEY) &&
-           VerifyOffset(verifier, VT_ATTRIBUTES) &&
-           verifier.VerifyVector(attributes()) &&
-           verifier.VerifyVectorOfTables(attributes()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct FieldBuilder {
-  typedef Field Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(Field::VT_NAME, name);
-  }
-  void add_type(flatbuffers::Offset<reflection::Type> type) {
-    fbb_.AddOffset(Field::VT_TYPE, type);
-  }
-  void add_id(uint16_t id) {
-    fbb_.AddElement<uint16_t>(Field::VT_ID, id, 0);
-  }
-  void add_offset(uint16_t offset) {
-    fbb_.AddElement<uint16_t>(Field::VT_OFFSET, offset, 0);
-  }
-  void add_default_integer(int64_t default_integer) {
-    fbb_.AddElement<int64_t>(Field::VT_DEFAULT_INTEGER, default_integer, 0);
-  }
-  void add_default_real(double default_real) {
-    fbb_.AddElement<double>(Field::VT_DEFAULT_REAL, default_real, 0.0);
-  }
-  void add_deprecated(bool deprecated) {
-    fbb_.AddElement<uint8_t>(Field::VT_DEPRECATED, static_cast<uint8_t>(deprecated), 0);
-  }
-  void add_required(bool required) {
-    fbb_.AddElement<uint8_t>(Field::VT_REQUIRED, static_cast<uint8_t>(required), 0);
-  }
-  void add_key(bool key) {
-    fbb_.AddElement<uint8_t>(Field::VT_KEY, static_cast<uint8_t>(key), 0);
-  }
-  void add_attributes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes) {
-    fbb_.AddOffset(Field::VT_ATTRIBUTES, attributes);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(Field::VT_DOCUMENTATION, documentation);
-  }
-  explicit FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Field> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Field>(end);
-    fbb_.Required(o, Field::VT_NAME);
-    fbb_.Required(o, Field::VT_TYPE);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Field> CreateField(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<reflection::Type> type = 0,
-    uint16_t id = 0,
-    uint16_t offset = 0,
-    int64_t default_integer = 0,
-    double default_real = 0.0,
-    bool deprecated = false,
-    bool required = false,
-    bool key = false,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  FieldBuilder builder_(_fbb);
-  builder_.add_default_real(default_real);
-  builder_.add_default_integer(default_integer);
-  builder_.add_documentation(documentation);
-  builder_.add_attributes(attributes);
-  builder_.add_type(type);
-  builder_.add_name(name);
-  builder_.add_offset(offset);
-  builder_.add_id(id);
-  builder_.add_key(key);
-  builder_.add_required(required);
-  builder_.add_deprecated(deprecated);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Field> CreateFieldDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    flatbuffers::Offset<reflection::Type> type = 0,
-    uint16_t id = 0,
-    uint16_t offset = 0,
-    int64_t default_integer = 0,
-    double default_real = 0.0,
-    bool deprecated = false,
-    bool required = false,
-    bool key = false,
-    std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateField(
-      _fbb,
-      name__,
-      type,
-      id,
-      offset,
-      default_integer,
-      default_real,
-      deprecated,
-      required,
-      key,
-      attributes__,
-      documentation__);
-}
-
-struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef ObjectBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_FIELDS = 6,
-    VT_IS_STRUCT = 8,
-    VT_MINALIGN = 10,
-    VT_BYTESIZE = 12,
-    VT_ATTRIBUTES = 14,
-    VT_DOCUMENTATION = 16
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  bool KeyCompareLessThan(const Object *o) const {
-    return *name() < *o->name();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(name()->c_str(), val);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *fields() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *>(VT_FIELDS);
-  }
-  bool is_struct() const {
-    return GetField<uint8_t>(VT_IS_STRUCT, 0) != 0;
-  }
-  int32_t minalign() const {
-    return GetField<int32_t>(VT_MINALIGN, 0);
-  }
-  int32_t bytesize() const {
-    return GetField<int32_t>(VT_BYTESIZE, 0);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(VT_ATTRIBUTES);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyOffsetRequired(verifier, VT_FIELDS) &&
-           verifier.VerifyVector(fields()) &&
-           verifier.VerifyVectorOfTables(fields()) &&
-           VerifyField<uint8_t>(verifier, VT_IS_STRUCT) &&
-           VerifyField<int32_t>(verifier, VT_MINALIGN) &&
-           VerifyField<int32_t>(verifier, VT_BYTESIZE) &&
-           VerifyOffset(verifier, VT_ATTRIBUTES) &&
-           verifier.VerifyVector(attributes()) &&
-           verifier.VerifyVectorOfTables(attributes()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct ObjectBuilder {
-  typedef Object Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(Object::VT_NAME, name);
-  }
-  void add_fields(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Field>>> fields) {
-    fbb_.AddOffset(Object::VT_FIELDS, fields);
-  }
-  void add_is_struct(bool is_struct) {
-    fbb_.AddElement<uint8_t>(Object::VT_IS_STRUCT, static_cast<uint8_t>(is_struct), 0);
-  }
-  void add_minalign(int32_t minalign) {
-    fbb_.AddElement<int32_t>(Object::VT_MINALIGN, minalign, 0);
-  }
-  void add_bytesize(int32_t bytesize) {
-    fbb_.AddElement<int32_t>(Object::VT_BYTESIZE, bytesize, 0);
-  }
-  void add_attributes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes) {
-    fbb_.AddOffset(Object::VT_ATTRIBUTES, attributes);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(Object::VT_DOCUMENTATION, documentation);
-  }
-  explicit ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Object> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Object>(end);
-    fbb_.Required(o, Object::VT_NAME);
-    fbb_.Required(o, Object::VT_FIELDS);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Object> CreateObject(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Field>>> fields = 0,
-    bool is_struct = false,
-    int32_t minalign = 0,
-    int32_t bytesize = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  ObjectBuilder builder_(_fbb);
-  builder_.add_documentation(documentation);
-  builder_.add_attributes(attributes);
-  builder_.add_bytesize(bytesize);
-  builder_.add_minalign(minalign);
-  builder_.add_fields(fields);
-  builder_.add_name(name);
-  builder_.add_is_struct(is_struct);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Object> CreateObjectDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    std::vector<flatbuffers::Offset<reflection::Field>> *fields = nullptr,
-    bool is_struct = false,
-    int32_t minalign = 0,
-    int32_t bytesize = 0,
-    std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto fields__ = fields ? _fbb.CreateVectorOfSortedTables<reflection::Field>(fields) : 0;
-  auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateObject(
-      _fbb,
-      name__,
-      fields__,
-      is_struct,
-      minalign,
-      bytesize,
-      attributes__,
-      documentation__);
-}
-
-struct RPCCall FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef RPCCallBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_REQUEST = 6,
-    VT_RESPONSE = 8,
-    VT_ATTRIBUTES = 10,
-    VT_DOCUMENTATION = 12
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  bool KeyCompareLessThan(const RPCCall *o) const {
-    return *name() < *o->name();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(name()->c_str(), val);
-  }
-  const reflection::Object *request() const {
-    return GetPointer<const reflection::Object *>(VT_REQUEST);
-  }
-  const reflection::Object *response() const {
-    return GetPointer<const reflection::Object *>(VT_RESPONSE);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(VT_ATTRIBUTES);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyOffsetRequired(verifier, VT_REQUEST) &&
-           verifier.VerifyTable(request()) &&
-           VerifyOffsetRequired(verifier, VT_RESPONSE) &&
-           verifier.VerifyTable(response()) &&
-           VerifyOffset(verifier, VT_ATTRIBUTES) &&
-           verifier.VerifyVector(attributes()) &&
-           verifier.VerifyVectorOfTables(attributes()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct RPCCallBuilder {
-  typedef RPCCall Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(RPCCall::VT_NAME, name);
-  }
-  void add_request(flatbuffers::Offset<reflection::Object> request) {
-    fbb_.AddOffset(RPCCall::VT_REQUEST, request);
-  }
-  void add_response(flatbuffers::Offset<reflection::Object> response) {
-    fbb_.AddOffset(RPCCall::VT_RESPONSE, response);
-  }
-  void add_attributes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes) {
-    fbb_.AddOffset(RPCCall::VT_ATTRIBUTES, attributes);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(RPCCall::VT_DOCUMENTATION, documentation);
-  }
-  explicit RPCCallBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<RPCCall> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<RPCCall>(end);
-    fbb_.Required(o, RPCCall::VT_NAME);
-    fbb_.Required(o, RPCCall::VT_REQUEST);
-    fbb_.Required(o, RPCCall::VT_RESPONSE);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<RPCCall> CreateRPCCall(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<reflection::Object> request = 0,
-    flatbuffers::Offset<reflection::Object> response = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  RPCCallBuilder builder_(_fbb);
-  builder_.add_documentation(documentation);
-  builder_.add_attributes(attributes);
-  builder_.add_response(response);
-  builder_.add_request(request);
-  builder_.add_name(name);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<RPCCall> CreateRPCCallDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    flatbuffers::Offset<reflection::Object> request = 0,
-    flatbuffers::Offset<reflection::Object> response = 0,
-    std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateRPCCall(
-      _fbb,
-      name__,
-      request,
-      response,
-      attributes__,
-      documentation__);
-}
-
-struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef ServiceBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_NAME = 4,
-    VT_CALLS = 6,
-    VT_ATTRIBUTES = 8,
-    VT_DOCUMENTATION = 10
-  };
-  const flatbuffers::String *name() const {
-    return GetPointer<const flatbuffers::String *>(VT_NAME);
-  }
-  bool KeyCompareLessThan(const Service *o) const {
-    return *name() < *o->name();
-  }
-  int KeyCompareWithValue(const char *val) const {
-    return strcmp(name()->c_str(), val);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *calls() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *>(VT_CALLS);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(VT_ATTRIBUTES);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_NAME) &&
-           verifier.VerifyString(name()) &&
-           VerifyOffset(verifier, VT_CALLS) &&
-           verifier.VerifyVector(calls()) &&
-           verifier.VerifyVectorOfTables(calls()) &&
-           VerifyOffset(verifier, VT_ATTRIBUTES) &&
-           verifier.VerifyVector(attributes()) &&
-           verifier.VerifyVectorOfTables(attributes()) &&
-           VerifyOffset(verifier, VT_DOCUMENTATION) &&
-           verifier.VerifyVector(documentation()) &&
-           verifier.VerifyVectorOfStrings(documentation()) &&
-           verifier.EndTable();
-  }
-};
-
-struct ServiceBuilder {
-  typedef Service Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_name(flatbuffers::Offset<flatbuffers::String> name) {
-    fbb_.AddOffset(Service::VT_NAME, name);
-  }
-  void add_calls(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>>> calls) {
-    fbb_.AddOffset(Service::VT_CALLS, calls);
-  }
-  void add_attributes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes) {
-    fbb_.AddOffset(Service::VT_ATTRIBUTES, attributes);
-  }
-  void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
-    fbb_.AddOffset(Service::VT_DOCUMENTATION, documentation);
-  }
-  explicit ServiceBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Service> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Service>(end);
-    fbb_.Required(o, Service::VT_NAME);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Service> CreateService(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::String> name = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>>> calls = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
-  ServiceBuilder builder_(_fbb);
-  builder_.add_documentation(documentation);
-  builder_.add_attributes(attributes);
-  builder_.add_calls(calls);
-  builder_.add_name(name);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Service> CreateServiceDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    const char *name = nullptr,
-    std::vector<flatbuffers::Offset<reflection::RPCCall>> *calls = nullptr,
-    std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
-    const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
-  auto name__ = name ? _fbb.CreateString(name) : 0;
-  auto calls__ = calls ? _fbb.CreateVectorOfSortedTables<reflection::RPCCall>(calls) : 0;
-  auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
-  auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
-  return reflection::CreateService(
-      _fbb,
-      name__,
-      calls__,
-      attributes__,
-      documentation__);
-}
-
-struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
-  typedef SchemaBuilder Builder;
-  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
-    VT_OBJECTS = 4,
-    VT_ENUMS = 6,
-    VT_FILE_IDENT = 8,
-    VT_FILE_EXT = 10,
-    VT_ROOT_TABLE = 12,
-    VT_SERVICES = 14
-  };
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *>(VT_OBJECTS);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *>(VT_ENUMS);
-  }
-  const flatbuffers::String *file_ident() const {
-    return GetPointer<const flatbuffers::String *>(VT_FILE_IDENT);
-  }
-  const flatbuffers::String *file_ext() const {
-    return GetPointer<const flatbuffers::String *>(VT_FILE_EXT);
-  }
-  const reflection::Object *root_table() const {
-    return GetPointer<const reflection::Object *>(VT_ROOT_TABLE);
-  }
-  const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *services() const {
-    return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *>(VT_SERVICES);
-  }
-  bool Verify(flatbuffers::Verifier &verifier) const {
-    return VerifyTableStart(verifier) &&
-           VerifyOffsetRequired(verifier, VT_OBJECTS) &&
-           verifier.VerifyVector(objects()) &&
-           verifier.VerifyVectorOfTables(objects()) &&
-           VerifyOffsetRequired(verifier, VT_ENUMS) &&
-           verifier.VerifyVector(enums()) &&
-           verifier.VerifyVectorOfTables(enums()) &&
-           VerifyOffset(verifier, VT_FILE_IDENT) &&
-           verifier.VerifyString(file_ident()) &&
-           VerifyOffset(verifier, VT_FILE_EXT) &&
-           verifier.VerifyString(file_ext()) &&
-           VerifyOffset(verifier, VT_ROOT_TABLE) &&
-           verifier.VerifyTable(root_table()) &&
-           VerifyOffset(verifier, VT_SERVICES) &&
-           verifier.VerifyVector(services()) &&
-           verifier.VerifyVectorOfTables(services()) &&
-           verifier.EndTable();
-  }
-};
-
-struct SchemaBuilder {
-  typedef Schema Table;
-  flatbuffers::FlatBufferBuilder &fbb_;
-  flatbuffers::uoffset_t start_;
-  void add_objects(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Object>>> objects) {
-    fbb_.AddOffset(Schema::VT_OBJECTS, objects);
-  }
-  void add_enums(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>>> enums) {
-    fbb_.AddOffset(Schema::VT_ENUMS, enums);
-  }
-  void add_file_ident(flatbuffers::Offset<flatbuffers::String> file_ident) {
-    fbb_.AddOffset(Schema::VT_FILE_IDENT, file_ident);
-  }
-  void add_file_ext(flatbuffers::Offset<flatbuffers::String> file_ext) {
-    fbb_.AddOffset(Schema::VT_FILE_EXT, file_ext);
-  }
-  void add_root_table(flatbuffers::Offset<reflection::Object> root_table) {
-    fbb_.AddOffset(Schema::VT_ROOT_TABLE, root_table);
-  }
-  void add_services(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services) {
-    fbb_.AddOffset(Schema::VT_SERVICES, services);
-  }
-  explicit SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb)
-        : fbb_(_fbb) {
-    start_ = fbb_.StartTable();
-  }
-  flatbuffers::Offset<Schema> Finish() {
-    const auto end = fbb_.EndTable(start_);
-    auto o = flatbuffers::Offset<Schema>(end);
-    fbb_.Required(o, Schema::VT_OBJECTS);
-    fbb_.Required(o, Schema::VT_ENUMS);
-    return o;
-  }
-};
-
-inline flatbuffers::Offset<Schema> CreateSchema(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Object>>> objects = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>>> enums = 0,
-    flatbuffers::Offset<flatbuffers::String> file_ident = 0,
-    flatbuffers::Offset<flatbuffers::String> file_ext = 0,
-    flatbuffers::Offset<reflection::Object> root_table = 0,
-    flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services = 0) {
-  SchemaBuilder builder_(_fbb);
-  builder_.add_services(services);
-  builder_.add_root_table(root_table);
-  builder_.add_file_ext(file_ext);
-  builder_.add_file_ident(file_ident);
-  builder_.add_enums(enums);
-  builder_.add_objects(objects);
-  return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Schema> CreateSchemaDirect(
-    flatbuffers::FlatBufferBuilder &_fbb,
-    std::vector<flatbuffers::Offset<reflection::Object>> *objects = nullptr,
-    std::vector<flatbuffers::Offset<reflection::Enum>> *enums = nullptr,
-    const char *file_ident = nullptr,
-    const char *file_ext = nullptr,
-    flatbuffers::Offset<reflection::Object> root_table = 0,
-    std::vector<flatbuffers::Offset<reflection::Service>> *services = nullptr) {
-  auto objects__ = objects ? _fbb.CreateVectorOfSortedTables<reflection::Object>(objects) : 0;
-  auto enums__ = enums ? _fbb.CreateVectorOfSortedTables<reflection::Enum>(enums) : 0;
-  auto file_ident__ = file_ident ? _fbb.CreateString(file_ident) : 0;
-  auto file_ext__ = file_ext ? _fbb.CreateString(file_ext) : 0;
-  auto services__ = services ? _fbb.CreateVectorOfSortedTables<reflection::Service>(services) : 0;
-  return reflection::CreateSchema(
-      _fbb,
-      objects__,
-      enums__,
-      file_ident__,
-      file_ext__,
-      root_table,
-      services__);
-}
-
-inline const reflection::Schema *GetSchema(const void *buf) {
-  return flatbuffers::GetRoot<reflection::Schema>(buf);
-}
-
-inline const reflection::Schema *GetSizePrefixedSchema(const void *buf) {
-  return flatbuffers::GetSizePrefixedRoot<reflection::Schema>(buf);
-}
-
-inline const char *SchemaIdentifier() {
-  return "BFBS";
-}
-
-inline bool SchemaBufferHasIdentifier(const void *buf) {
-  return flatbuffers::BufferHasIdentifier(
-      buf, SchemaIdentifier());
-}
-
-inline bool VerifySchemaBuffer(
-    flatbuffers::Verifier &verifier) {
-  return verifier.VerifyBuffer<reflection::Schema>(SchemaIdentifier());
-}
-
-inline bool VerifySizePrefixedSchemaBuffer(
-    flatbuffers::Verifier &verifier) {
-  return verifier.VerifySizePrefixedBuffer<reflection::Schema>(SchemaIdentifier());
-}
-
-inline const char *SchemaExtension() {
-  return "bfbs";
-}
-
-inline void FinishSchemaBuffer(
-    flatbuffers::FlatBufferBuilder &fbb,
-    flatbuffers::Offset<reflection::Schema> root) {
-  fbb.Finish(root, SchemaIdentifier());
-}
-
-inline void FinishSizePrefixedSchemaBuffer(
-    flatbuffers::FlatBufferBuilder &fbb,
-    flatbuffers::Offset<reflection::Schema> root) {
-  fbb.FinishSizePrefixed(root, SchemaIdentifier());
-}
-
-}  // namespace reflection
-
-#endif  // FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_

+ 0 - 127
flatbuffers/registry.h

@@ -1,127 +0,0 @@
-/*
- * Copyright 2017 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_REGISTRY_H_
-#define FLATBUFFERS_REGISTRY_H_
-
-#include "idl.h"
-
-namespace flatbuffers {
-
-// Convenience class to easily parse or generate text for arbitrary FlatBuffers.
-// Simply pre-populate it with all schema filenames that may be in use, and
-// This class will look them up using the file_identifier declared in the
-// schema.
-class Registry {
- public:
-  // Call this for all schemas that may be in use. The identifier has
-  // a function in the generated code, e.g. MonsterIdentifier().
-  void Register(const char *file_identifier, const char *schema_path) {
-    Schema schema;
-    schema.path_ = schema_path;
-    schemas_[file_identifier] = schema;
-  }
-
-  // Generate text from an arbitrary FlatBuffer by looking up its
-  // file_identifier in the registry.
-  bool FlatBufferToText(const uint8_t *flatbuf, size_t len, std::string *dest) {
-    // Get the identifier out of the buffer.
-    // If the buffer is truncated, exit.
-    if (len < sizeof(uoffset_t) + FlatBufferBuilder::kFileIdentifierLength) {
-      lasterror_ = "buffer truncated";
-      return false;
-    }
-    std::string ident(
-        reinterpret_cast<const char *>(flatbuf) + sizeof(uoffset_t),
-        FlatBufferBuilder::kFileIdentifierLength);
-    // Load and parse the schema.
-    Parser parser;
-    if (!LoadSchema(ident, &parser)) return false;
-    // Now we're ready to generate text.
-    if (!GenerateText(parser, flatbuf, dest)) {
-      lasterror_ = "unable to generate text for FlatBuffer binary";
-      return false;
-    }
-    return true;
-  }
-
-  // Converts a binary buffer to text using one of the schemas in the registry,
-  // use the file_identifier to indicate which.
-  // If DetachedBuffer::data() is null then parsing failed.
-  DetachedBuffer TextToFlatBuffer(const char *text,
-                                  const char *file_identifier) {
-    // Load and parse the schema.
-    Parser parser;
-    if (!LoadSchema(file_identifier, &parser)) return DetachedBuffer();
-    // Parse the text.
-    if (!parser.Parse(text)) {
-      lasterror_ = parser.error_;
-      return DetachedBuffer();
-    }
-    // We have a valid FlatBuffer. Detach it from the builder and return.
-    return parser.builder_.Release();
-  }
-
-  // Modify any parsing / output options used by the other functions.
-  void SetOptions(const IDLOptions &opts) { opts_ = opts; }
-
-  // If schemas used contain include statements, call this function for every
-  // directory the parser should search them for.
-  void AddIncludeDirectory(const char *path) { include_paths_.push_back(path); }
-
-  // Returns a human readable error if any of the above functions fail.
-  const std::string &GetLastError() { return lasterror_; }
-
- private:
-  bool LoadSchema(const std::string &ident, Parser *parser) {
-    // Find the schema, if not, exit.
-    auto it = schemas_.find(ident);
-    if (it == schemas_.end()) {
-      // Don't attach the identifier, since it may not be human readable.
-      lasterror_ = "identifier for this buffer not in the registry";
-      return false;
-    }
-    auto &schema = it->second;
-    // Load the schema from disk. If not, exit.
-    std::string schematext;
-    if (!LoadFile(schema.path_.c_str(), false, &schematext)) {
-      lasterror_ = "could not load schema: " + schema.path_;
-      return false;
-    }
-    // Parse schema.
-    parser->opts = opts_;
-    if (!parser->Parse(schematext.c_str(), vector_data(include_paths_),
-                       schema.path_.c_str())) {
-      lasterror_ = parser->error_;
-      return false;
-    }
-    return true;
-  }
-
-  struct Schema {
-    std::string path_;
-    // TODO(wvo) optionally cache schema file or parsed schema here.
-  };
-
-  std::string lasterror_;
-  IDLOptions opts_;
-  std::vector<const char *> include_paths_;
-  std::map<std::string, Schema> schemas_;
-};
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_REGISTRY_H_

+ 0 - 683
flatbuffers/util.h

@@ -1,683 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLATBUFFERS_UTIL_H_
-#define FLATBUFFERS_UTIL_H_
-
-#include <errno.h>
-
-#include "base.h"
-
-#ifndef FLATBUFFERS_PREFER_PRINTF
-#  include <sstream>
-#else  // FLATBUFFERS_PREFER_PRINTF
-#  include <float.h>
-#  include <stdio.h>
-#endif  // FLATBUFFERS_PREFER_PRINTF
-
-#include <iomanip>
-#include <string>
-
-namespace flatbuffers {
-
-// @locale-independent functions for ASCII characters set.
-
-// Fast checking that character lies in closed range: [a <= x <= b]
-// using one compare (conditional branch) operator.
-inline bool check_ascii_range(char x, char a, char b) {
-  FLATBUFFERS_ASSERT(a <= b);
-  // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`.
-  // The x, a, b will be promoted to int and subtracted without overflow.
-  return static_cast<unsigned int>(x - a) <= static_cast<unsigned int>(b - a);
-}
-
-// Case-insensitive isalpha
-inline bool is_alpha(char c) {
-  // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
-  return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF);
-}
-
-// Check (case-insensitive) that `c` is equal to alpha.
-inline bool is_alpha_char(char c, char alpha) {
-  FLATBUFFERS_ASSERT(is_alpha(alpha));
-  // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
-  return ((c & 0xDF) == (alpha & 0xDF));
-}
-
-// https://en.cppreference.com/w/cpp/string/byte/isxdigit
-// isdigit and isxdigit are the only standard narrow character classification
-// functions that are not affected by the currently installed C locale. although
-// some implementations (e.g. Microsoft in 1252 codepage) may classify
-// additional single-byte characters as digits.
-inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); }
-
-inline bool is_xdigit(char c) {
-  // Replace by look-up table.
-  return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF);
-}
-
-// Case-insensitive isalnum
-inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); }
-
-// @end-locale-independent functions for ASCII character set
-
-#ifdef FLATBUFFERS_PREFER_PRINTF
-template<typename T> size_t IntToDigitCount(T t) {
-  size_t digit_count = 0;
-  // Count the sign for negative numbers
-  if (t < 0) digit_count++;
-  // Count a single 0 left of the dot for fractional numbers
-  if (-1 < t && t < 1) digit_count++;
-  // Count digits until fractional part
-  T eps = std::numeric_limits<float>::epsilon();
-  while (t <= (-1 + eps) || (1 - eps) <= t) {
-    t /= 10;
-    digit_count++;
-  }
-  return digit_count;
-}
-
-template<typename T> size_t NumToStringWidth(T t, int precision = 0) {
-  size_t string_width = IntToDigitCount(t);
-  // Count the dot for floating point numbers
-  if (precision) string_width += (precision + 1);
-  return string_width;
-}
-
-template<typename T>
-std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) {
-  size_t string_width = NumToStringWidth(t, precision);
-  std::string s(string_width, 0x00);
-  // Allow snprintf to use std::string trailing null to detect buffer overflow
-  snprintf(const_cast<char *>(s.data()), (s.size() + 1), fmt, string_width, t);
-  return s;
-}
-#endif  // FLATBUFFERS_PREFER_PRINTF
-
-// Convert an integer or floating point value to a string.
-// In contrast to std::stringstream, "char" values are
-// converted to a string of digits, and we don't use scientific notation.
-template<typename T> std::string NumToString(T t) {
-  // clang-format off
-
-  #ifndef FLATBUFFERS_PREFER_PRINTF
-    std::stringstream ss;
-    ss << t;
-    return ss.str();
-  #else // FLATBUFFERS_PREFER_PRINTF
-    auto v = static_cast<long long>(t);
-    return NumToStringImplWrapper(v, "%.*lld");
-  #endif // FLATBUFFERS_PREFER_PRINTF
-  // clang-format on
-}
-// Avoid char types used as character data.
-template<> inline std::string NumToString<signed char>(signed char t) {
-  return NumToString(static_cast<int>(t));
-}
-template<> inline std::string NumToString<unsigned char>(unsigned char t) {
-  return NumToString(static_cast<int>(t));
-}
-template<> inline std::string NumToString<char>(char t) {
-  return NumToString(static_cast<int>(t));
-}
-#if defined(FLATBUFFERS_CPP98_STL)
-template<> inline std::string NumToString<long long>(long long t) {
-  char buf[21];  // (log((1 << 63) - 1) / log(10)) + 2
-  snprintf(buf, sizeof(buf), "%lld", t);
-  return std::string(buf);
-}
-
-template<>
-inline std::string NumToString<unsigned long long>(unsigned long long t) {
-  char buf[22];  // (log((1 << 63) - 1) / log(10)) + 1
-  snprintf(buf, sizeof(buf), "%llu", t);
-  return std::string(buf);
-}
-#endif  // defined(FLATBUFFERS_CPP98_STL)
-
-// Special versions for floats/doubles.
-template<typename T> std::string FloatToString(T t, int precision) {
-  // clang-format off
-
-  #ifndef FLATBUFFERS_PREFER_PRINTF
-    // to_string() prints different numbers of digits for floats depending on
-    // platform and isn't available on Android, so we use stringstream
-    std::stringstream ss;
-    // Use std::fixed to suppress scientific notation.
-    ss << std::fixed;
-    // Default precision is 6, we want that to be higher for doubles.
-    ss << std::setprecision(precision);
-    ss << t;
-    auto s = ss.str();
-  #else // FLATBUFFERS_PREFER_PRINTF
-    auto v = static_cast<double>(t);
-    auto s = NumToStringImplWrapper(v, "%0.*f", precision);
-  #endif // FLATBUFFERS_PREFER_PRINTF
-  // clang-format on
-  // Sadly, std::fixed turns "1" into "1.00000", so here we undo that.
-  auto p = s.find_last_not_of('0');
-  if (p != std::string::npos) {
-    // Strip trailing zeroes. If it is a whole number, keep one zero.
-    s.resize(p + (s[p] == '.' ? 2 : 1));
-  }
-  return s;
-}
-
-template<> inline std::string NumToString<double>(double t) {
-  return FloatToString(t, 12);
-}
-template<> inline std::string NumToString<float>(float t) {
-  return FloatToString(t, 6);
-}
-
-// Convert an integer value to a hexadecimal string.
-// The returned string length is always xdigits long, prefixed by 0 digits.
-// For example, IntToStringHex(0x23, 8) returns the string "00000023".
-inline std::string IntToStringHex(int i, int xdigits) {
-  FLATBUFFERS_ASSERT(i >= 0);
-  // clang-format off
-
-  #ifndef FLATBUFFERS_PREFER_PRINTF
-    std::stringstream ss;
-    ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase
-       << i;
-    return ss.str();
-  #else // FLATBUFFERS_PREFER_PRINTF
-    return NumToStringImplWrapper(i, "%.*X", xdigits);
-  #endif // FLATBUFFERS_PREFER_PRINTF
-  // clang-format on
-}
-
-// clang-format off
-// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}.
-#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0)
-  class ClassicLocale {
-    #ifdef _MSC_VER
-      typedef _locale_t locale_type;
-    #else
-      typedef locale_t locale_type;  // POSIX.1-2008 locale_t type
-    #endif
-    ClassicLocale();
-    ~ClassicLocale();
-    locale_type locale_;
-    static ClassicLocale instance_;
-  public:
-    static locale_type Get() { return instance_.locale_; }
-  };
-
-  #ifdef _MSC_VER
-    #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get())
-    #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get())
-    #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get())
-    #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get())
-  #else
-    #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get())
-    #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get())
-    #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get())
-    #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get())
-  #endif
-#else
-  #define __strtod_impl(s, pe) strtod(s, pe)
-  #define __strtof_impl(s, pe) static_cast<float>(strtod(s, pe))
-  #ifdef _MSC_VER
-    #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b)
-    #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b)
-  #else
-    #define __strtoull_impl(s, pe, b) strtoull(s, pe, b)
-    #define __strtoll_impl(s, pe, b) strtoll(s, pe, b)
-  #endif
-#endif
-
-inline void strtoval_impl(int64_t *val, const char *str, char **endptr,
-                                 int base) {
-    *val = __strtoll_impl(str, endptr, base);
-}
-
-inline void strtoval_impl(uint64_t *val, const char *str, char **endptr,
-                                 int base) {
-  *val = __strtoull_impl(str, endptr, base);
-}
-
-inline void strtoval_impl(double *val, const char *str, char **endptr) {
-  *val = __strtod_impl(str, endptr);
-}
-
-// UBSAN: double to float is safe if numeric_limits<float>::is_iec559 is true.
-__supress_ubsan__("float-cast-overflow")
-inline void strtoval_impl(float *val, const char *str, char **endptr) {
-  *val = __strtof_impl(str, endptr);
-}
-#undef __strtoull_impl
-#undef __strtoll_impl
-#undef __strtod_impl
-#undef __strtof_impl
-// clang-format on
-
-// Adaptor for strtoull()/strtoll().
-// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9),
-// while strtoll with base=0 interprets first leading zero as octal prefix.
-// In future, it is possible to add prefixed 0b0101.
-// 1) Checks errno code for overflow condition (out of range).
-// 2) If base <= 0, function try to detect base of number by prefix.
-//
-// Return value (like strtoull and strtoll, but reject partial result):
-// - If successful, an integer value corresponding to the str is returned.
-// - If full string conversion can't be performed, 0 is returned.
-// - If the converted value falls out of range of corresponding return type, a
-// range error occurs. In this case value MAX(T)/MIN(T) is returned.
-template<typename T>
-inline bool StringToIntegerImpl(T *val, const char *const str,
-                                const int base = 0,
-                                const bool check_errno = true) {
-  // T is int64_t or uint64_T
-  FLATBUFFERS_ASSERT(str);
-  if (base <= 0) {
-    auto s = str;
-    while (*s && !is_digit(*s)) s++;
-    if (s[0] == '0' && is_alpha_char(s[1], 'X'))
-      return StringToIntegerImpl(val, str, 16, check_errno);
-    // if a prefix not match, try base=10
-    return StringToIntegerImpl(val, str, 10, check_errno);
-  } else {
-    if (check_errno) errno = 0;  // clear thread-local errno
-    auto endptr = str;
-    strtoval_impl(val, str, const_cast<char **>(&endptr), base);
-    if ((*endptr != '\0') || (endptr == str)) {
-      *val = 0;      // erase partial result
-      return false;  // invalid string
-    }
-    // errno is out-of-range, return MAX/MIN
-    if (check_errno && errno) return false;
-    return true;
-  }
-}
-
-template<typename T>
-inline bool StringToFloatImpl(T *val, const char *const str) {
-  // Type T must be either float or double.
-  FLATBUFFERS_ASSERT(str && val);
-  auto end = str;
-  strtoval_impl(val, str, const_cast<char **>(&end));
-  auto done = (end != str) && (*end == '\0');
-  if (!done) *val = 0;  // erase partial result
-  return done;
-}
-
-// Convert a string to an instance of T.
-// Return value (matched with StringToInteger64Impl and strtod):
-// - If successful, a numeric value corresponding to the str is returned.
-// - If full string conversion can't be performed, 0 is returned.
-// - If the converted value falls out of range of corresponding return type, a
-// range error occurs. In this case value MAX(T)/MIN(T) is returned.
-template<typename T> inline bool StringToNumber(const char *s, T *val) {
-  FLATBUFFERS_ASSERT(s && val);
-  int64_t i64;
-  // The errno check isn't needed, will return MAX/MIN on overflow.
-  if (StringToIntegerImpl(&i64, s, 0, false)) {
-    const int64_t max = (flatbuffers::numeric_limits<T>::max)();
-    const int64_t min = flatbuffers::numeric_limits<T>::lowest();
-    if (i64 > max) {
-      *val = static_cast<T>(max);
-      return false;
-    }
-    if (i64 < min) {
-      // For unsigned types return max to distinguish from
-      // "no conversion can be performed" when 0 is returned.
-      *val = static_cast<T>(flatbuffers::is_unsigned<T>::value ? max : min);
-      return false;
-    }
-    *val = static_cast<T>(i64);
-    return true;
-  }
-  *val = 0;
-  return false;
-}
-
-template<> inline bool StringToNumber<int64_t>(const char *str, int64_t *val) {
-  return StringToIntegerImpl(val, str);
-}
-
-template<>
-inline bool StringToNumber<uint64_t>(const char *str, uint64_t *val) {
-  if (!StringToIntegerImpl(val, str)) return false;
-  // The strtoull accepts negative numbers:
-  // If the minus sign was part of the input sequence, the numeric value
-  // calculated from the sequence of digits is negated as if by unary minus
-  // in the result type, which applies unsigned integer wraparound rules.
-  // Fix this behaviour (except -0).
-  if (*val) {
-    auto s = str;
-    while (*s && !is_digit(*s)) s++;
-    s = (s > str) ? (s - 1) : s;  // step back to one symbol
-    if (*s == '-') {
-      // For unsigned types return the max to distinguish from
-      // "no conversion can be performed".
-      *val = (flatbuffers::numeric_limits<uint64_t>::max)();
-      return false;
-    }
-  }
-  return true;
-}
-
-template<> inline bool StringToNumber(const char *s, float *val) {
-  return StringToFloatImpl(val, s);
-}
-
-template<> inline bool StringToNumber(const char *s, double *val) {
-  return StringToFloatImpl(val, s);
-}
-
-inline int64_t StringToInt(const char *s, int base = 10) {
-  int64_t val;
-  return StringToIntegerImpl(&val, s, base) ? val : 0;
-}
-
-inline uint64_t StringToUInt(const char *s, int base = 10) {
-  uint64_t val;
-  return StringToIntegerImpl(&val, s, base) ? val : 0;
-}
-
-typedef bool (*LoadFileFunction)(const char *filename, bool binary,
-                                 std::string *dest);
-typedef bool (*FileExistsFunction)(const char *filename);
-
-LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function);
-
-FileExistsFunction SetFileExistsFunction(
-    FileExistsFunction file_exists_function);
-
-// Check if file "name" exists.
-bool FileExists(const char *name);
-
-// Check if "name" exists and it is also a directory.
-bool DirExists(const char *name);
-
-// Load file "name" into "buf" returning true if successful
-// false otherwise.  If "binary" is false data is read
-// using ifstream's text mode, otherwise data is read with
-// no transcoding.
-bool LoadFile(const char *name, bool binary, std::string *buf);
-
-// Save data "buf" of length "len" bytes into a file
-// "name" returning true if successful, false otherwise.
-// If "binary" is false data is written using ifstream's
-// text mode, otherwise data is written with no
-// transcoding.
-bool SaveFile(const char *name, const char *buf, size_t len, bool binary);
-
-// Save data "buf" into file "name" returning true if
-// successful, false otherwise.  If "binary" is false
-// data is written using ifstream's text mode, otherwise
-// data is written with no transcoding.
-inline bool SaveFile(const char *name, const std::string &buf, bool binary) {
-  return SaveFile(name, buf.c_str(), buf.size(), binary);
-}
-
-// Functionality for minimalistic portable path handling.
-
-// The functions below behave correctly regardless of whether posix ('/') or
-// Windows ('/' or '\\') separators are used.
-
-// Any new separators inserted are always posix.
-FLATBUFFERS_CONSTEXPR char kPathSeparator = '/';
-
-// Returns the path with the extension, if any, removed.
-std::string StripExtension(const std::string &filepath);
-
-// Returns the extension, if any.
-std::string GetExtension(const std::string &filepath);
-
-// Return the last component of the path, after the last separator.
-std::string StripPath(const std::string &filepath);
-
-// Strip the last component of the path + separator.
-std::string StripFileName(const std::string &filepath);
-
-// Concatenates a path with a filename, regardless of whether the path
-// ends in a separator or not.
-std::string ConCatPathFileName(const std::string &path,
-                               const std::string &filename);
-
-// Replaces any '\\' separators with '/'
-std::string PosixPath(const char *path);
-
-// This function ensure a directory exists, by recursively
-// creating dirs for any parts of the path that don't exist yet.
-void EnsureDirExists(const std::string &filepath);
-
-// Obtains the absolute path from any other path.
-// Returns the input path if the absolute path couldn't be resolved.
-std::string AbsolutePath(const std::string &filepath);
-
-// To and from UTF-8 unicode conversion functions
-
-// Convert a unicode code point into a UTF-8 representation by appending it
-// to a string. Returns the number of bytes generated.
-inline int ToUTF8(uint32_t ucc, std::string *out) {
-  FLATBUFFERS_ASSERT(!(ucc & 0x80000000));  // Top bit can't be set.
-  // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8
-  for (int i = 0; i < 6; i++) {
-    // Max bits this encoding can represent.
-    uint32_t max_bits = 6 + i * 5 + static_cast<int>(!i);
-    if (ucc < (1u << max_bits)) {  // does it fit?
-      // Remaining bits not encoded in the first byte, store 6 bits each
-      uint32_t remain_bits = i * 6;
-      // Store first byte:
-      (*out) += static_cast<char>((0xFE << (max_bits - remain_bits)) |
-                                  (ucc >> remain_bits));
-      // Store remaining bytes:
-      for (int j = i - 1; j >= 0; j--) {
-        (*out) += static_cast<char>(((ucc >> (j * 6)) & 0x3F) | 0x80);
-      }
-      return i + 1;  // Return the number of bytes added.
-    }
-  }
-  FLATBUFFERS_ASSERT(0);  // Impossible to arrive here.
-  return -1;
-}
-
-// Converts whatever prefix of the incoming string corresponds to a valid
-// UTF-8 sequence into a unicode code. The incoming pointer will have been
-// advanced past all bytes parsed.
-// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in
-// this case).
-inline int FromUTF8(const char **in) {
-  int len = 0;
-  // Count leading 1 bits.
-  for (int mask = 0x80; mask >= 0x04; mask >>= 1) {
-    if (**in & mask) {
-      len++;
-    } else {
-      break;
-    }
-  }
-  if ((static_cast<unsigned char>(**in) << len) & 0x80)
-    return -1;  // Bit after leading 1's must be 0.
-  if (!len) return *(*in)++;
-  // UTF-8 encoded values with a length are between 2 and 4 bytes.
-  if (len < 2 || len > 4) { return -1; }
-  // Grab initial bits of the code.
-  int ucc = *(*in)++ & ((1 << (7 - len)) - 1);
-  for (int i = 0; i < len - 1; i++) {
-    if ((**in & 0xC0) != 0x80) return -1;  // Upper bits must 1 0.
-    ucc <<= 6;
-    ucc |= *(*in)++ & 0x3F;  // Grab 6 more bits of the code.
-  }
-  // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for
-  // UTF-16 surrogate pairs).
-  if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; }
-  // UTF-8 must represent code points in their shortest possible encoding.
-  switch (len) {
-    case 2:
-      // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF.
-      if (ucc < 0x0080 || ucc > 0x07FF) { return -1; }
-      break;
-    case 3:
-      // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF.
-      if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; }
-      break;
-    case 4:
-      // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF.
-      if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; }
-      break;
-  }
-  return ucc;
-}
-
-#ifndef FLATBUFFERS_PREFER_PRINTF
-// Wraps a string to a maximum length, inserting new lines where necessary. Any
-// existing whitespace will be collapsed down to a single space. A prefix or
-// suffix can be provided, which will be inserted before or after a wrapped
-// line, respectively.
-inline std::string WordWrap(const std::string in, size_t max_length,
-                            const std::string wrapped_line_prefix,
-                            const std::string wrapped_line_suffix) {
-  std::istringstream in_stream(in);
-  std::string wrapped, line, word;
-
-  in_stream >> word;
-  line = word;
-
-  while (in_stream >> word) {
-    if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) <
-        max_length) {
-      line += " " + word;
-    } else {
-      wrapped += line + wrapped_line_suffix + "\n";
-      line = wrapped_line_prefix + word;
-    }
-  }
-  wrapped += line;
-
-  return wrapped;
-}
-#endif  // !FLATBUFFERS_PREFER_PRINTF
-
-inline bool EscapeString(const char *s, size_t length, std::string *_text,
-                         bool allow_non_utf8, bool natural_utf8) {
-  std::string &text = *_text;
-  text += "\"";
-  for (uoffset_t i = 0; i < length; i++) {
-    char c = s[i];
-    switch (c) {
-      case '\n': text += "\\n"; break;
-      case '\t': text += "\\t"; break;
-      case '\r': text += "\\r"; break;
-      case '\b': text += "\\b"; break;
-      case '\f': text += "\\f"; break;
-      case '\"': text += "\\\""; break;
-      case '\\': text += "\\\\"; break;
-      default:
-        if (c >= ' ' && c <= '~') {
-          text += c;
-        } else {
-          // Not printable ASCII data. Let's see if it's valid UTF-8 first:
-          const char *utf8 = s + i;
-          int ucc = FromUTF8(&utf8);
-          if (ucc < 0) {
-            if (allow_non_utf8) {
-              text += "\\x";
-              text += IntToStringHex(static_cast<uint8_t>(c), 2);
-            } else {
-              // There are two cases here:
-              //
-              // 1) We reached here by parsing an IDL file. In that case,
-              // we previously checked for non-UTF-8, so we shouldn't reach
-              // here.
-              //
-              // 2) We reached here by someone calling GenerateText()
-              // on a previously-serialized flatbuffer. The data might have
-              // non-UTF-8 Strings, or might be corrupt.
-              //
-              // In both cases, we have to give up and inform the caller
-              // they have no JSON.
-              return false;
-            }
-          } else {
-            if (natural_utf8) {
-              // utf8 points to past all utf-8 bytes parsed
-              text.append(s + i, static_cast<size_t>(utf8 - s - i));
-            } else if (ucc <= 0xFFFF) {
-              // Parses as Unicode within JSON's \uXXXX range, so use that.
-              text += "\\u";
-              text += IntToStringHex(ucc, 4);
-            } else if (ucc <= 0x10FFFF) {
-              // Encode Unicode SMP values to a surrogate pair using two \u
-              // escapes.
-              uint32_t base = ucc - 0x10000;
-              auto high_surrogate = (base >> 10) + 0xD800;
-              auto low_surrogate = (base & 0x03FF) + 0xDC00;
-              text += "\\u";
-              text += IntToStringHex(high_surrogate, 4);
-              text += "\\u";
-              text += IntToStringHex(low_surrogate, 4);
-            }
-            // Skip past characters recognized.
-            i = static_cast<uoffset_t>(utf8 - s - 1);
-          }
-        }
-        break;
-    }
-  }
-  text += "\"";
-  return true;
-}
-
-inline std::string BufferToHexText(const void *buffer, size_t buffer_size,
-                                   size_t max_length,
-                                   const std::string &wrapped_line_prefix,
-                                   const std::string &wrapped_line_suffix) {
-  std::string text = wrapped_line_prefix;
-  size_t start_offset = 0;
-  const char *s = reinterpret_cast<const char *>(buffer);
-  for (size_t i = 0; s && i < buffer_size; i++) {
-    // Last iteration or do we have more?
-    bool have_more = i + 1 < buffer_size;
-    text += "0x";
-    text += IntToStringHex(static_cast<uint8_t>(s[i]), 2);
-    if (have_more) { text += ','; }
-    // If we have more to process and we reached max_length
-    if (have_more &&
-        text.size() + wrapped_line_suffix.size() >= start_offset + max_length) {
-      text += wrapped_line_suffix;
-      text += '\n';
-      start_offset = text.size();
-      text += wrapped_line_prefix;
-    }
-  }
-  text += wrapped_line_suffix;
-  return text;
-}
-
-// Remove paired quotes in a string: "text"|'text' -> text.
-std::string RemoveStringQuotes(const std::string &s);
-
-// Change th global C-locale to locale with name <locale_name>.
-// Returns an actual locale name in <_value>, useful if locale_name is "" or
-// null.
-bool SetGlobalTestLocale(const char *locale_name,
-                         std::string *_value = nullptr);
-
-// Read (or test) a value of environment variable.
-bool ReadEnvironmentVariable(const char *var_name,
-                             std::string *_value = nullptr);
-
-// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs.
-void SetupDefaultCRTReportMode();
-
-}  // namespace flatbuffers
-
-#endif  // FLATBUFFERS_UTIL_H_

+ 0 - 15
ruy/SConscript

@@ -1,15 +0,0 @@
-# for module compiling
-import os
-from building import *
-
-objs = []
-cwd  = GetCurrentDir()
-list = os.listdir(cwd)
-
-if GetDepend('PKG_USING_TENSORFLOWLITEMICRO'):
-    for d in list:
-        path = os.path.join(cwd, d)
-        if os.path.isfile(os.path.join(path, 'SConscript')):
-            objs = objs + SConscript(os.path.join(d, 'SConscript'))
-
-Return('objs')

+ 0 - 9
ruy/profiler/SConscript

@@ -1,9 +0,0 @@
-from building import *
-
-cwd     = GetCurrentDir()
-src     = Glob('*.c') + Glob('*.cc')
-CPPPATH = [cwd]
-
-group = DefineGroup('ruy', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
-
-Return('group')

+ 0 - 132
ruy/profiler/instrumentation.cc

@@ -1,132 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "instrumentation.h"
-
-#ifdef RUY_PROFILER
-
-#include <cstring>
-
-namespace ruy {
-namespace profiler {
-
-void Label::operator=(const Label& other) {
-  format_ = other.format_;
-  args_count_ = other.args_count_;
-  for (int i = 0; i < args_count_; i++) {
-    args_[i] = other.args_[i];
-  }
-}
-
-bool Label::operator==(const Label& other) const {
-  if (std::string(format_) != std::string(other.format_)) {
-    return false;
-  }
-  if (args_count_ != other.args_count_) {
-    return false;
-  }
-  for (int i = 0; i < args_count_; i++) {
-    if (args_[i] != other.args_[i]) {
-      return false;
-    }
-  }
-  return true;
-}
-
-std::string Label::Formatted() const {
-  static constexpr int kBufSize = 256;
-  char buf[kBufSize];
-  if (args_count_ == 0) {
-    return format_;
-  }
-  if (args_count_ == 1) {
-    snprintf(buf, kBufSize, format_, args_[0]);
-  } else if (args_count_ == 2) {
-    snprintf(buf, kBufSize, format_, args_[0], args_[1]);
-  } else if (args_count_ == 3) {
-    snprintf(buf, kBufSize, format_, args_[0], args_[1], args_[2]);
-  } else if (args_count_ == 4) {
-    snprintf(buf, kBufSize, format_, args_[0], args_[1], args_[2], args_[3]);
-  } else {
-    abort();
-  }
-  return buf;
-}
-
-namespace detail {
-
-std::mutex* GlobalsMutex() {
-  static std::mutex mutex;
-  return &mutex;
-}
-
-bool& GlobalIsProfilerRunning() {
-  static bool b;
-  return b;
-}
-
-std::vector<ThreadStack*>* GlobalAllThreadStacks() {
-  static std::vector<ThreadStack*> all_stacks;
-  return &all_stacks;
-}
-
-ThreadStack* ThreadLocalThreadStack() {
-  thread_local static ThreadStack thread_stack;
-  return &thread_stack;
-}
-
-ThreadStack::ThreadStack() {
-  std::lock_guard<std::mutex> lock(*GlobalsMutex());
-  static std::uint32_t global_next_thread_stack_id = 0;
-  stack_.id = global_next_thread_stack_id++;
-  GlobalAllThreadStacks()->push_back(this);
-}
-
-ThreadStack::~ThreadStack() {
-  std::lock_guard<std::mutex> lock(*GlobalsMutex());
-  std::vector<ThreadStack*>* all_stacks = GlobalAllThreadStacks();
-  for (auto it = all_stacks->begin(); it != all_stacks->end(); ++it) {
-    if (*it == this) {
-      all_stacks->erase(it);
-      return;
-    }
-  }
-}
-int GetBufferSize(const Stack& stack) {
-  return sizeof(stack.id) + sizeof(stack.size) +
-         stack.size * sizeof(stack.labels[0]);
-}
-
-void CopyToBuffer(const Stack& stack, char* dst) {
-  memcpy(dst, &stack.id, sizeof(stack.id));
-  dst += sizeof(stack.id);
-  memcpy(dst, &stack.size, sizeof(stack.size));
-  dst += sizeof(stack.size);
-  memcpy(dst, stack.labels, stack.size * sizeof(stack.labels[0]));
-}
-
-void ReadFromBuffer(const char* src, Stack* stack) {
-  memcpy(&stack->id, src, sizeof(stack->id));
-  src += sizeof(stack->id);
-  memcpy(&stack->size, src, sizeof(stack->size));
-  src += sizeof(stack->size);
-  memcpy(stack->labels, src, stack->size * sizeof(stack->labels[0]));
-}
-
-}  // namespace detail
-}  // namespace profiler
-}  // namespace ruy
-
-#endif

+ 0 - 0
tflite/SConscript → tensorflow/SConscript


+ 2 - 2
tflite/lite/tensorflow_version.h → tensorflow/core/public/version.h

@@ -21,7 +21,7 @@ limitations under the License.
 // Also update tensorflow/tensorflow.bzl and
 // tensorflow/tools/pip_package/setup.py
 #define TF_MAJOR_VERSION 2
-#define TF_MINOR_VERSION 2
+#define TF_MINOR_VERSION 4
 #define TF_PATCH_VERSION 0
 
 // TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
@@ -108,7 +108,7 @@ limitations under the License.
 
 #define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
 #define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
-#define TF_GRAPH_DEF_VERSION 427  // Updated: 2020/6/9
+#define TF_GRAPH_DEF_VERSION 485  // Updated: 2020/8/6
 
 // Checkpoint compatibility versions (the versions field in SavedSliceMeta).
 //

+ 0 - 0
tflite/kernels/SConscript → tensorflow/lite/SConscript


+ 29 - 0
tensorflow/lite/c/SConscript

@@ -0,0 +1,29 @@
+from building import *
+import os
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c') + Glob('*.cc')
+
+#.
+root =  str(Dir('#'))
+packages = os.path.join(root, 'packages')
+file_list = os.listdir(packages)
+for f in file_list:
+    if(f.split('-')[0] == 'TensorflowLiteMicro'):
+        tflm_pkg = os.path.join(packages, f)
+        break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
+
+Return('group')

+ 8 - 3
tflite/c/builtin_op_data.h → tensorflow/lite/c/builtin_op_data.h

@@ -17,7 +17,7 @@ limitations under the License.
 
 #include <stdint.h>
 
-#include "common.h"
+#include "tensorflow/lite/c/common.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -67,8 +67,9 @@ typedef struct {
 typedef enum {
   kTfLiteActNone = 0,
   kTfLiteActRelu,
-  kTfLiteActRelu1,  // min(max(-1, x), 1)
-  kTfLiteActRelu6,  // min(max(0, x), 6)
+  kTfLiteActReluN1To1,                    // min(max(-1, x), 1)
+  kTfLiteActRelu1 = kTfLiteActReluN1To1,  // kTfLiteActRelu1 will be deprecated.
+  kTfLiteActRelu6,                        // min(max(0, x), 6)
   kTfLiteActTanh,
   kTfLiteActSignBit,
   kTfLiteActSigmoid,
@@ -198,6 +199,8 @@ typedef struct {
 
 typedef struct {
   TfLiteFusedActivation activation;
+  // Parameter added for the version 4.
+  bool pot_scale_int16;
 } TfLiteAddParams;
 
 typedef struct {
@@ -219,6 +222,8 @@ typedef struct {
 
 typedef struct {
   TfLiteFusedActivation activation;
+  // Parameter added for the version 5.
+  bool pot_scale_int16;
 } TfLiteSubParams;
 
 typedef struct {

+ 3 - 1
tflite/c/common.c → tensorflow/lite/c/common.c

@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "common.h"
+#include "tensorflow/lite/c/common.h"
 #ifndef TF_LITE_STATIC_MEMORY
 #include <stdlib.h>
 #include <string.h>
@@ -207,6 +207,8 @@ const char* TfLiteTypeGetName(TfLiteType type) {
       return "BOOL";
     case kTfLiteComplex64:
       return "COMPLEX64";
+    case kTfLiteComplex128:
+      return "COMPLEX128";
     case kTfLiteString:
       return "STRING";
     case kTfLiteFloat16:

+ 185 - 30
tflite/c/common.h → tensorflow/lite/c/common.h

@@ -205,6 +205,7 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
 // the current function, while also reporting the location of the error.
 // `a` and `b` may be evaluated more than once, so no side effects or
 // extremely expensive computations should be done.
+// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
 #define TF_LITE_ENSURE_EQ(context, a, b)                                   \
   do {                                                                     \
     if ((a) != (b)) {                                                      \
@@ -232,11 +233,32 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
     }                                      \
   } while (0)
 
+// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
+// library.
+#ifdef SWIG
+#define TFL_CAPI_EXPORT
+#else
+#if defined(_WIN32)
+#ifdef TFL_COMPILE_LIBRARY
+#define TFL_CAPI_EXPORT __declspec(dllexport)
+#else
+#define TFL_CAPI_EXPORT __declspec(dllimport)
+#endif  // TFL_COMPILE_LIBRARY
+#else
+#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
+#endif  // _WIN32
+#endif  // SWIG
+
 // Single-precision complex data type compatible with the C99 definition.
 typedef struct TfLiteComplex64 {
   float re, im;  // real and imaginary parts, respectively.
 } TfLiteComplex64;
 
+// Double-precision complex data type compatible with the C99 definition.
+typedef struct TfLiteComplex128 {
+  double re, im;  // real and imaginary parts, respectively.
+} TfLiteComplex128;
+
 // Half precision data type compatible with the C99 definition.
 typedef struct TfLiteFloat16 {
   uint16_t data;
@@ -256,6 +278,7 @@ typedef enum {
   kTfLiteInt8 = 9,
   kTfLiteFloat16 = 10,
   kTfLiteFloat64 = 11,
+  kTfLiteComplex128 = 12,
 } TfLiteType;
 
 // Return the name of a given type, for error reporting purposes.
@@ -312,12 +335,14 @@ typedef union TfLitePtrUnion {
   int64_t* i64;
   float* f;
   TfLiteFloat16* f16;
+  double* f64;
   char* raw;
   const char* raw_const;
   uint8_t* uint8;
   bool* b;
   int16_t* i16;
   TfLiteComplex64* c64;
+  TfLiteComplex128* c128;
   int8_t* int8;
   /* Only use this member. */
   void* data;
@@ -374,6 +399,7 @@ typedef struct TfLiteSparsity {
 
 // An tensor in the interpreter system which is a wrapper around a buffer of
 // data including a dimensionality (or NULL if not currently defined).
+#ifndef TF_LITE_STATIC_MEMORY
 typedef struct TfLiteTensor {
   // The data type specification for data stored in `data`. This affects
   // what member of `data` union should be used.
@@ -439,31 +465,6 @@ typedef struct TfLiteTensor {
   const TfLiteIntArray* dims_signature;
 } TfLiteTensor;
 
-#ifndef TF_LITE_STATIC_MEMORY
-// Free data memory of tensor `t`.
-void TfLiteTensorDataFree(TfLiteTensor* t);
-
-// Free quantization data.
-void TfLiteQuantizationFree(TfLiteQuantization* quantization);
-
-// Free sparsity parameters.
-void TfLiteSparsityFree(TfLiteSparsity* sparsity);
-
-// Free memory of tensor `t`.
-void TfLiteTensorFree(TfLiteTensor* t);
-
-// Set all of a tensor's fields (and free any previously allocated data).
-void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
-                       TfLiteQuantizationParams quantization, char* buffer,
-                       size_t size, TfLiteAllocationType allocation_type,
-                       const void* allocation, bool is_variable,
-                       TfLiteTensor* tensor);
-
-// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
-// types other than kTfLiteDynamic will be ignored.
-void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
-#endif  // TF_LITE_STATIC_MEMORY
-
 // A structure representing an instance of a node.
 // This structure only exhibits the inputs, outputs and user defined data, not
 // other features like the type.
@@ -500,6 +501,130 @@ typedef struct TfLiteNode {
   // WARNING: This is an experimental interface that is subject to change.
   struct TfLiteDelegate* delegate;
 } TfLiteNode;
+#else  // defined(TF_LITE_STATIC_MEMORY)?
+// NOTE: This flag is opt-in only at compile time.
+//
+// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
+// contains only the minimum fields required to initialize and prepare a micro
+// inference graph. The fields in this struct have been ordered from
+// largest-to-smallest for optimal struct sizeof.
+//
+// This struct does not use:
+// - allocation
+// - buffer_handle
+// - data_is_stale
+// - delegate
+// - dims_signature
+// - name
+// - sparsity
+typedef struct TfLiteTensor {
+  // TODO(b/155784997): Consider consolidating these quantization fields:
+  // Quantization information. Replaces params field above.
+  TfLiteQuantization quantization;
+
+  // Quantization information.
+  TfLiteQuantizationParams params;
+
+  // A union of data pointers. The appropriate type should be used for a typed
+  // tensor based on `type`.
+  TfLitePtrUnion data;
+
+  // A pointer to a structure representing the dimensionality interpretation
+  // that the buffer should have. NOTE: the product of elements of `dims`
+  // and the element datatype size should be equal to `bytes` below.
+  TfLiteIntArray* dims;
+
+  // The number of bytes required to store the data of this Tensor. I.e.
+  // (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
+  // type is kTfLiteFloat32 and dims = {3, 2} then
+  // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
+  size_t bytes;
+
+  // The data type specification for data stored in `data`. This affects
+  // what member of `data` union should be used.
+  TfLiteType type;
+
+  // How memory is mapped
+  //  kTfLiteMmapRo: Memory mapped read only.
+  //  i.e. weights
+  //  kTfLiteArenaRw: Arena allocated read write memory
+  //  (i.e. temporaries, outputs).
+  TfLiteAllocationType allocation_type;
+
+  // True if the tensor is a variable.
+  bool is_variable;
+} TfLiteTensor;
+
+// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
+// only the minimum fields required to represent a node.
+//
+// This struct does not use:
+// - delegate
+// - intermediates
+// - temporaries
+typedef struct TfLiteNode {
+  // Inputs to this node expressed as indices into the simulator's tensors.
+  TfLiteIntArray* inputs;
+
+  // Outputs to this node expressed as indices into the simulator's tensors.
+  TfLiteIntArray* outputs;
+
+  // Opaque data provided by the node implementer through `Registration.init`.
+  void* user_data;
+
+  // Opaque data provided to the node if the node is a builtin. This is usually
+  // a structure defined in builtin_op_data.h
+  void* builtin_data;
+
+  // Custom initial data. This is the opaque data provided in the flatbuffer.
+  // WARNING: This is an experimental interface that is subject to change.
+  const void* custom_initial_data;
+  int custom_initial_data_size;
+} TfLiteNode;
+#endif  // TF_LITE_STATIC_MEMORY
+
+// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
+// of information required for a kernel to run during TfLiteRegistration::Eval.
+// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
+// builds with this flag by default internally.
+typedef struct TfLiteEvalTensor {
+  // A union of data pointers. The appropriate type should be used for a typed
+  // tensor based on `type`.
+  TfLitePtrUnion data;
+
+  // A pointer to a structure representing the dimensionality interpretation
+  // that the buffer should have.
+  TfLiteIntArray* dims;
+
+  // The data type specification for data stored in `data`. This affects
+  // what member of `data` union should be used.
+  TfLiteType type;
+} TfLiteEvalTensor;
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Free data memory of tensor `t`.
+void TfLiteTensorDataFree(TfLiteTensor* t);
+
+// Free quantization data.
+void TfLiteQuantizationFree(TfLiteQuantization* quantization);
+
+// Free sparsity parameters.
+void TfLiteSparsityFree(TfLiteSparsity* sparsity);
+
+// Free memory of tensor `t`.
+void TfLiteTensorFree(TfLiteTensor* t);
+
+// Set all of a tensor's fields (and free any previously allocated data).
+void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
+                       TfLiteQuantizationParams quantization, char* buffer,
+                       size_t size, TfLiteAllocationType allocation_type,
+                       const void* allocation, bool is_variable,
+                       TfLiteTensor* tensor);
+
+// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
+// types other than kTfLiteDynamic will be ignored.
+void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
+#endif  // TF_LITE_STATIC_MEMORY
 
 // WARNING: This is an experimental interface that is subject to change.
 //
@@ -591,12 +716,11 @@ typedef struct TfLiteContext {
   void* profiler;
 
   // Allocate persistent buffer which has the same life time as the interpreter.
+  // Returns nullptr on failure.
   // The memory is allocated from heap for TFL, and from tail in TFLM.
-  // If *ptr is not nullptr, the pointer will be reallocated.
-  // This method is only available in Prepare stage.
+  // This method is only available in Init or Prepare stage.
   // WARNING: This is an experimental interface that is subject to change.
-  TfLiteStatus (*AllocatePersistentBuffer)(struct TfLiteContext* ctx,
-                                           size_t bytes, void** ptr);
+  void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
 
   // Allocate a buffer which will be deallocated right after invoke phase.
   // The memory is allocated from heap in TFL, and from volatile arena in TFLM.
@@ -651,6 +775,18 @@ typedef struct TfLiteContext {
   TfLiteStatus (*PreviewDelegatePartitioning)(
       struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
       TfLiteDelegateParams** partition_params_array, int* num_partitions);
+
+  // Returns a TfLiteTensor struct for a given index.
+  // WARNING: This is an experimental interface that is subject to change.
+  // WARNING: This method may not be available on all platforms.
+  TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
+                             int tensor_idx);
+
+  // Returns a TfLiteEvalTensor struct for a given index.
+  // WARNING: This is an experimental interface that is subject to change.
+  // WARNING: This method may not be available on all platforms.
+  TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
+                                     int tensor_idx);
 } TfLiteContext;
 
 typedef struct TfLiteRegistration {
@@ -725,7 +861,26 @@ typedef enum TfLiteDelegateFlags {
   //
   // If the delegate isn't capable to handle dynamic tensors, this flag need
   // to be set to false.
-  kTfLiteDelegateFlagsAllowDynamicTensors = 1
+  kTfLiteDelegateFlagsAllowDynamicTensors = 1,
+
+  // This flag can be used by delegates (that allow dynamic tensors) to ensure
+  // applicable tensor shapes are automatically propagated in the case of tensor
+  // resizing.
+  // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
+  // of a delegate kernel will have correct shapes before its Prepare() method
+  // is called. The runtime leverages TFLite builtin ops in the original
+  // execution plan to propagate shapes.
+  //
+  // A few points to note:
+  // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
+  // false, this one is redundant since the delegate kernels are re-initialized
+  // every time tensors are resized.
+  // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
+  // work is required to prepare the original execution plan.
+  // 3. This flag requires that the original execution plan only have ops with
+  // valid registrations (and not 'dummy' custom ops like with Flex).
+  // WARNING: This feature is experimental and subject to change.
+  kTfLiteDelegateFlagsRequirePropagatedShapes = 2
 } TfLiteDelegateFlags;
 
 // WARNING: This is an experimental interface that is subject to change.

+ 0 - 0
tflite/micro/SConscript → tensorflow/lite/core/SConscript


+ 29 - 0
tensorflow/lite/core/api/SConscript

@@ -0,0 +1,29 @@
+from building import *
+import os
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c') + Glob('*.cc')
+
+#.
+root =  str(Dir('#'))
+packages = os.path.join(root, 'packages')
+file_list = os.listdir(packages)
+for f in file_list:
+    if(f.split('-')[0] == 'TensorflowLiteMicro'):
+        tflm_pkg = os.path.join(packages, f)
+        break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
+
+Return('group')

+ 1 - 1
tflite/core/error_reporter.cc → tensorflow/lite/core/api/error_reporter.cc

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "error_reporter.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
 #include <cstdarg>
 
 namespace tflite {

+ 0 - 0
tflite/core/error_reporter.h → tensorflow/lite/core/api/error_reporter.h


+ 1115 - 471
tflite/core/flatbuffer_conversions.cc → tensorflow/lite/core/api/flatbuffer_conversions.cc

@@ -13,18 +13,18 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "flatbuffer_conversions.h"
+#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
 
 #include <cstddef>
 #include <cstdint>
 #include <memory>
 
 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
-#include "tflite/c/builtin_op_data.h"
-#include "tflite/c/common.h"
-#include "error_reporter.h"
-#include "compatibility.h"
-#include "tflite/schema/schema_generated.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/schema/schema_generated.h"
 
 namespace tflite {
 
@@ -109,7 +109,7 @@ TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
     case ActivationFunctionType_RELU:
       return kTfLiteActRelu;
     case ActivationFunctionType_RELU_N1_TO_1:
-      return kTfLiteActRelu1;
+      return kTfLiteActReluN1To1;
     case ActivationFunctionType_RELU6:
       return kTfLiteActRelu6;
     case ActivationFunctionType_TANH:
@@ -131,339 +131,263 @@ TfLitePadding ConvertPadding(Padding padding) {
   return kTfLitePaddingUnknown;
 }
 
-}  // namespace
+#ifndef TF_LITE_STATIC_MEMORY
+TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data) {
+  auto parseLSHProjectionType = [](LSHProjectionType type) {
+    switch (type) {
+      case LSHProjectionType_SPARSE:
+        return kTfLiteLshProjectionSparse;
+      case LSHProjectionType_DENSE:
+        return kTfLiteLshProjectionDense;
+      default:
+        return kTfLiteLshProjectionUnknown;
+    }
+  };
+  auto parseCombinerType = [](CombinerType type) {
+    switch (type) {
+      case CombinerType_MEAN:
+        return kTfLiteCombinerTypeMean;
+      case CombinerType_SQRTN:
+        return kTfLiteCombinerTypeSqrtn;
+      case CombinerType_SUM:
+      default:
+        return kTfLiteCombinerTypeSum;
+    }
+  };
 
-TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
-                               ErrorReporter* error_reporter) {
-  switch (tensor_type) {
-    case TensorType_FLOAT16:
-      *type = kTfLiteFloat16;
-      return kTfLiteOk;
-    case TensorType_FLOAT32:
-      *type = kTfLiteFloat32;
-      return kTfLiteOk;
-    case TensorType_FLOAT64:
-      *type = kTfLiteFloat64;
-      return kTfLiteOk;
-    case TensorType_INT16:
-      *type = kTfLiteInt16;
-      return kTfLiteOk;
-    case TensorType_INT32:
-      *type = kTfLiteInt32;
-      return kTfLiteOk;
-    case TensorType_UINT8:
-      *type = kTfLiteUInt8;
-      return kTfLiteOk;
-    case TensorType_INT8:
-      *type = kTfLiteInt8;
-      return kTfLiteOk;
-    case TensorType_INT64:
-      *type = kTfLiteInt64;
-      return kTfLiteOk;
-    case TensorType_STRING:
-      *type = kTfLiteString;
-      return kTfLiteOk;
-    case TensorType_BOOL:
-      *type = kTfLiteBool;
-      return kTfLiteOk;
-    case TensorType_COMPLEX64:
-      *type = kTfLiteComplex64;
-      return kTfLiteOk;
-    default:
-      *type = kTfLiteNoType;
-      TF_LITE_REPORT_ERROR(error_reporter,
-                           "Unsupported data type %d in tensor\n", tensor_type);
-      return kTfLiteError;
-  }
-}
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  *builtin_data = nullptr;
+  switch (op_type) {
+    case BuiltinOperator_ABS: {
+      return ParseAbs(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseConv2D(const Operator* op, BuiltinOperator,
-                         ErrorReporter* error_reporter,
-                         BuiltinDataAllocator* allocator, void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_ADD: {
+      return ParseAdd(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
-  std::unique_ptr<TfLiteConvParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteConvParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_ARG_MAX: {
+      return ParseArgMax(op, error_reporter, allocator, builtin_data);
+    }
 
-  const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
+    case BuiltinOperator_ARG_MIN: {
+      return ParseArgMin(op, error_reporter, allocator, builtin_data);
+    }
 
-  if (schema_params != nullptr) {
-    params->padding = ConvertPadding(schema_params->padding());
-    params->stride_width = schema_params->stride_w();
-    params->stride_height = schema_params->stride_h();
-    params->activation =
-        ConvertActivation(schema_params->fused_activation_function());
+    case BuiltinOperator_AVERAGE_POOL_2D: {
+      return ParsePool(op, error_reporter, allocator, builtin_data);
+    }
 
-    params->dilation_width_factor = schema_params->dilation_w_factor();
-    params->dilation_height_factor = schema_params->dilation_h_factor();
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
+    case BuiltinOperator_CEIL: {
+      return ParseCeil(op, error_reporter, allocator, builtin_data);
+    }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_CONCATENATION: {
+      return ParseConcatenation(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseDepthwiseConv2D(const Operator* op, BuiltinOperator,
-                                  ErrorReporter* error_reporter,
-                                  BuiltinDataAllocator* allocator,
-                                  void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_CONV_2D: {
+      return ParseConv2D(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
+    case BuiltinOperator_DEPTHWISE_CONV_2D: {
+      return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
+    }
 
-  std::unique_ptr<TfLiteDepthwiseConvParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_DEQUANTIZE: {
+      return ParseDequantize(op, error_reporter, allocator, builtin_data);
+    }
 
-  const DepthwiseConv2DOptions* schema_params =
-      op->builtin_options_as_DepthwiseConv2DOptions();
+    case BuiltinOperator_FLOOR: {
+      return ParseFloor(op, error_reporter, allocator, builtin_data);
+    }
 
-  if (schema_params != nullptr) {
-    params->padding = ConvertPadding(schema_params->padding());
-    params->stride_width = schema_params->stride_w();
-    params->stride_height = schema_params->stride_h();
-    params->depth_multiplier = schema_params->depth_multiplier();
-    params->activation =
-        ConvertActivation(schema_params->fused_activation_function());
+    case BuiltinOperator_FULLY_CONNECTED: {
+      return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
+    }
 
-    params->dilation_width_factor = schema_params->dilation_w_factor();
-    params->dilation_height_factor = schema_params->dilation_h_factor();
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
+    case BuiltinOperator_GREATER: {
+      return ParseGreater(op, error_reporter, allocator, builtin_data);
+    }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_GREATER_EQUAL: {
+      return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
+    }
 
-// We have this parse function instead of directly returning kTfLiteOk from the
-// switch-case in ParseOpData because this function is used as part of the
-// selective registration for the OpResolver implementation in micro.
-TfLiteStatus ParseDequantize(const Operator*, BuiltinOperator, ErrorReporter*,
-                             BuiltinDataAllocator*, void**) {
-  return kTfLiteOk;
-}
+    case BuiltinOperator_HARD_SWISH: {
+      return ParseHardSwish(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseFullyConnected(const Operator* op, BuiltinOperator,
-                                 ErrorReporter* error_reporter,
-                                 BuiltinDataAllocator* allocator,
-                                 void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_L2_NORMALIZATION: {
+      return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
+    case BuiltinOperator_L2_POOL_2D: {
+      return ParsePool(op, error_reporter, allocator, builtin_data);
+    }
 
-  std::unique_ptr<TfLiteFullyConnectedParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_LESS: {
+      return ParseLess(op, error_reporter, allocator, builtin_data);
+    }
 
-  const FullyConnectedOptions* schema_params =
-      op->builtin_options_as_FullyConnectedOptions();
+    case BuiltinOperator_LESS_EQUAL: {
+      return ParseLessEqual(op, error_reporter, allocator, builtin_data);
+    }
 
-  if (schema_params != nullptr) {
-    params->activation =
-        ConvertActivation(schema_params->fused_activation_function());
-    params->keep_num_dims = schema_params->keep_num_dims();
-    params->asymmetric_quantize_inputs =
-        schema_params->asymmetric_quantize_inputs();
+    case BuiltinOperator_LOG: {
+      return ParseLog(op, error_reporter, allocator, builtin_data);
+    }
 
-    switch (schema_params->weights_format()) {
-      case FullyConnectedOptionsWeightsFormat_DEFAULT:
-        params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
-        break;
-      case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
-        params->weights_format =
-            kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
-        break;
-      default:
-        TF_LITE_REPORT_ERROR(error_reporter,
-                             "Unhandled fully-connected weights format.");
-        return kTfLiteError;
+    case BuiltinOperator_LOGICAL_AND: {
+      return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
     }
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_LOGICAL_NOT: {
+      return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseReshape(const Operator* op, BuiltinOperator,
-                          ErrorReporter* error_reporter,
-                          BuiltinDataAllocator* allocator,
-                          void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_LOGICAL_OR: {
+      return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
+    case BuiltinOperator_LOGISTIC: {
+      return ParseLogistic(op, error_reporter, allocator, builtin_data);
+    }
 
-  std::unique_ptr<TfLiteReshapeParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteReshapeParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_MAXIMUM: {
+      return ParseMaximum(op, error_reporter, allocator, builtin_data);
+    }
 
-  const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
+    case BuiltinOperator_MAX_POOL_2D: {
+      return ParsePool(op, error_reporter, allocator, builtin_data);
+    }
 
-  if (schema_params != nullptr) {
-    const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
-    // TODO(b/147203660): We need to figure out when dynamic reshape
-    // (new_shape is a tensor) happens, why the option is not a nullptr.
-    // But nonethless, we should only copy when new_shape is not a nullptr.
-    if (new_shape != nullptr) {
-      TF_LITE_ENSURE_STATUS(
-          FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
-                                     params->shape, error_reporter, "reshape"));
-      params->num_dimensions = new_shape->size();
-    } else {
-      // TODO(b/157480169) TODO(b/147203660): We should either return
-      // kTfLiteError or fill in some reasonable defaults in the params struct.
-      // We are not doing so until we better undertand the ramifications of
-      // changing the legacy behavior.
+    case BuiltinOperator_MEAN: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
     }
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_MINIMUM: {
+      return ParseMinimum(op, error_reporter, allocator, builtin_data);
+    }
 
-// We have this parse function instead of directly returning kTfLiteOk from the
-// switch-case in ParseOpData because this function is used as part of the
-// selective registration for the OpResolver implementation in micro.
-TfLiteStatus ParseQuantize(const Operator*, BuiltinOperator, ErrorReporter*,
-                           BuiltinDataAllocator*, void**) {
-  return kTfLiteOk;
-}
+    case BuiltinOperator_MUL: {
+      return ParseMul(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseSoftmax(const Operator* op, BuiltinOperator,
-                          ErrorReporter* error_reporter,
-                          BuiltinDataAllocator* allocator,
-                          void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_NEG: {
+      return ParseNeg(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
-  std::unique_ptr<TfLiteSoftmaxParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_NOT_EQUAL: {
+      return ParseNotEqual(op, error_reporter, allocator, builtin_data);
+    }
 
-  const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
+    case BuiltinOperator_PACK: {
+      return ParsePack(op, error_reporter, allocator, builtin_data);
+    }
 
-  if (schema_params != nullptr) {
-    params->beta = schema_params->beta();
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
+    case BuiltinOperator_PAD: {
+      return ParsePad(op, error_reporter, allocator, builtin_data);
+    }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_PADV2: {
+      return ParsePadV2(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseSvdf(const Operator* op, BuiltinOperator,
-                       ErrorReporter* error_reporter,
-                       BuiltinDataAllocator* allocator, void** builtin_data) {
-  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_PRELU: {
+      return ParsePrelu(op, error_reporter, allocator, builtin_data);
+    }
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
-  std::unique_ptr<TfLiteSVDFParams,
-                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
-      params = safe_allocator.Allocate<TfLiteSVDFParams>();
-  TF_LITE_ENSURE(error_reporter, params != nullptr);
+    case BuiltinOperator_QUANTIZE: {
+      return ParseQuantize(op, error_reporter, allocator, builtin_data);
+    }
 
-  const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
-  if (schema_params != nullptr) {
-    params->rank = schema_params->rank();
-    params->activation =
-        ConvertActivation(schema_params->fused_activation_function());
-    params->asymmetric_quantize_inputs =
-        schema_params->asymmetric_quantize_inputs();
-  } else {
-    // TODO(b/157480169): We should either return kTfLiteError or fill in some
-    // reasonable defaults in the params struct. We are not doing so until we
-    // better undertand the ramifications of changing the legacy behavior.
-  }
+    case BuiltinOperator_REDUCE_ANY: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
+    }
 
-  *builtin_data = params.release();
-  return kTfLiteOk;
-}
+    case BuiltinOperator_REDUCE_MAX: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
+    }
 
-TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
-                         ErrorReporter* error_reporter,
-                         BuiltinDataAllocator* allocator, void** builtin_data) {
-  auto parseLSHProjectionType = [](LSHProjectionType type) {
-    switch (type) {
-      case LSHProjectionType_SPARSE:
-        return kTfLiteLshProjectionSparse;
-      case LSHProjectionType_DENSE:
-        return kTfLiteLshProjectionDense;
-      default:
-        return kTfLiteLshProjectionUnknown;
+    case BuiltinOperator_REDUCE_MIN: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
     }
-  };
-  auto parseCombinerType = [](CombinerType type) {
-    switch (type) {
-      case CombinerType_MEAN:
-        return kTfLiteCombinerTypeMean;
-      case CombinerType_SQRTN:
-        return kTfLiteCombinerTypeSqrtn;
-      case CombinerType_SUM:
-      default:
-        return kTfLiteCombinerTypeSum;
+
+    case BuiltinOperator_REDUCE_PROD: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
     }
-  };
 
-  SafeBuiltinDataAllocator safe_allocator(allocator);
-  *builtin_data = nullptr;
-  switch (op_type) {
-    case BuiltinOperator_CONV_2D: {
-      return ParseConv2D(op, op_type, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_RELU: {
+      return ParseRelu(op, error_reporter, allocator, builtin_data);
     }
 
-    case BuiltinOperator_DEPTHWISE_CONV_2D: {
-      return ParseDepthwiseConv2D(op, op_type, error_reporter, allocator,
-                                  builtin_data);
+    case BuiltinOperator_RELU6: {
+      return ParseRelu6(op, error_reporter, allocator, builtin_data);
     }
 
-    case BuiltinOperator_DEQUANTIZE: {
-      return ParseDequantize(op, op_type, error_reporter, allocator,
-                             builtin_data);
+    case BuiltinOperator_RESHAPE: {
+      return ParseReshape(op, error_reporter, allocator, builtin_data);
     }
 
-    case BuiltinOperator_FULLY_CONNECTED: {
-      return ParseFullyConnected(op, op_type, error_reporter, allocator,
-                                 builtin_data);
+    case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
+      return ParseResizeNearestNeighbor(op, error_reporter, allocator,
+                                        builtin_data);
     }
 
-    case BuiltinOperator_QUANTIZE: {
-      return ParseQuantize(op, op_type, error_reporter, allocator,
-                           builtin_data);
+    case BuiltinOperator_ROUND: {
+      return ParseRound(op, error_reporter, allocator, builtin_data);
     }
 
-    case BuiltinOperator_RESHAPE: {
-      return ParseReshape(op, op_type, error_reporter, allocator, builtin_data);
+    case BuiltinOperator_RSQRT: {
+      return ParseRsqrt(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SIN: {
+      return ParseSin(op, error_reporter, allocator, builtin_data);
     }
 
     case BuiltinOperator_SOFTMAX: {
-      return ParseSoftmax(op, op_type, error_reporter, allocator, builtin_data);
+      return ParseSoftmax(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SPLIT: {
+      return ParseSplit(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SQRT: {
+      return ParseSqrt(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SQUARE: {
+      return ParseSquare(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_STRIDED_SLICE: {
+      return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SUB: {
+      return ParseSub(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_SUM: {
+      return ParseReducer(op, error_reporter, allocator, builtin_data);
     }
 
     case BuiltinOperator_SVDF: {
-      return ParseSvdf(op, op_type, error_reporter, allocator, builtin_data);
+      return ParseSvdf(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_TANH: {
+      return ParseTanh(op, error_reporter, allocator, builtin_data);
+    }
+
+    case BuiltinOperator_UNPACK: {
+      return ParseUnpack(op, error_reporter, allocator, builtin_data);
     }
 
     case BuiltinOperator_CAST: {
@@ -490,23 +414,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_AVERAGE_POOL_2D:
-    case BuiltinOperator_MAX_POOL_2D:
-    case BuiltinOperator_L2_POOL_2D: {
-      auto params = safe_allocator.Allocate<TfLitePoolParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* pool_params = op->builtin_options_as_Pool2DOptions()) {
-        params->padding = ConvertPadding(pool_params->padding());
-        params->stride_width = pool_params->stride_w();
-        params->stride_height = pool_params->stride_h();
-        params->filter_width = pool_params->filter_width();
-        params->filter_height = pool_params->filter_height();
-        params->activation =
-            ConvertActivation(pool_params->fused_activation_function());
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
       auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -564,38 +471,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
     case BuiltinOperator_HASHTABLE_LOOKUP:
       // no-op.
       return kTfLiteOk;
-    case BuiltinOperator_CONCATENATION: {
-      auto params = safe_allocator.Allocate<TfLiteConcatenationParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* concatenation_params =
-              op->builtin_options_as_ConcatenationOptions()) {
-        params->activation = ConvertActivation(
-            concatenation_params->fused_activation_function());
-        params->axis = concatenation_params->axis();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_MUL: {
-      auto params = safe_allocator.Allocate<TfLiteMulParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_MulOptions()) {
-        params->activation =
-            ConvertActivation(schema_params->fused_activation_function());
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_ADD: {
-      auto params = safe_allocator.Allocate<TfLiteAddParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_AddOptions()) {
-        params->activation =
-            ConvertActivation(schema_params->fused_activation_function());
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_DIV: {
       auto params = safe_allocator.Allocate<TfLiteDivParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -606,26 +481,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_SUB: {
-      auto params = safe_allocator.Allocate<TfLiteSubParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_SubOptions()) {
-        params->activation =
-            ConvertActivation(schema_params->fused_activation_function());
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_L2_NORMALIZATION: {
-      auto params = safe_allocator.Allocate<TfLiteL2NormParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_L2NormOptions()) {
-        params->activation =
-            ConvertActivation(schema_params->fused_activation_function());
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
       auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -721,21 +576,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
-      auto params =
-          safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params =
-              op->builtin_options_as_ResizeNearestNeighborOptions()) {
-        params->align_corners = schema_params->align_corners();
-        params->half_pixel_centers = schema_params->half_pixel_centers();
-      } else {
-        params->align_corners = false;
-        params->half_pixel_centers = false;
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_SKIP_GRAM: {
       auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -779,29 +619,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_MEAN:
-    case BuiltinOperator_REDUCE_MAX:
-    case BuiltinOperator_REDUCE_MIN:
-    case BuiltinOperator_REDUCE_PROD:
-    case BuiltinOperator_REDUCE_ANY:
-    case BuiltinOperator_SUM: {
-      auto params = safe_allocator.Allocate<TfLiteReducerParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_ReducerOptions()) {
-        params->keep_dims = schema_params->keep_dims();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_SPLIT: {
-      auto params = safe_allocator.Allocate<TfLiteSplitParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_SplitOptions()) {
-        params->num_splits = schema_params->num_splits();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_SPLIT_V: {
       auto params = safe_allocator.Allocate<TfLiteSplitParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -824,42 +641,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_STRIDED_SLICE: {
-      auto params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params =
-              op->builtin_options_as_StridedSliceOptions()) {
-        params->begin_mask = schema_params->begin_mask();
-        params->end_mask = schema_params->end_mask();
-        params->ellipsis_mask = schema_params->ellipsis_mask();
-        params->new_axis_mask = schema_params->new_axis_mask();
-        params->shrink_axis_mask = schema_params->shrink_axis_mask();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_ARG_MAX: {
-      auto params = safe_allocator.Allocate<TfLiteArgMaxParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_ArgMaxOptions()) {
-        TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
-                                                &params->output_type,
-                                                error_reporter));
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
-    case BuiltinOperator_ARG_MIN: {
-      auto params = safe_allocator.Allocate<TfLiteArgMinParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
-        TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
-                                                &params->output_type,
-                                                error_reporter));
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_TRANSPOSE_CONV: {
       auto params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -892,16 +673,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_PACK: {
-      auto params = safe_allocator.Allocate<TfLitePackParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* pack_params = op->builtin_options_as_PackOptions()) {
-        params->values_count = pack_params->values_count();
-        params->axis = pack_params->axis();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_DELEGATE: {
       // TODO(ycling): Revisit when supporting saving delegated models.
       TF_LITE_REPORT_ERROR(error_reporter,
@@ -930,16 +701,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       *builtin_data = params.release();
       return kTfLiteOk;
     }
-    case BuiltinOperator_UNPACK: {
-      auto params = safe_allocator.Allocate<TfLiteUnpackParams>();
-      TF_LITE_ENSURE(error_reporter, params != nullptr);
-      if (const auto* unpack_params = op->builtin_options_as_UnpackOptions()) {
-        params->num = unpack_params->num();
-        params->axis = unpack_params->axis();
-      }
-      *builtin_data = params.release();
-      return kTfLiteOk;
-    }
     case BuiltinOperator_LEAKY_RELU: {
       auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
       TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -1019,7 +780,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
       return kTfLiteOk;
     }
     // Below are the ops with no builtin_data structure.
-    case BuiltinOperator_ABS:
     case BuiltinOperator_BATCH_TO_SPACE_ND:
     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
     // ok for now, since there is no call implementation either.
@@ -1032,46 +792,19 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
     case BuiltinOperator_EQUAL:
     case BuiltinOperator_EXP:
     case BuiltinOperator_EXPAND_DIMS:
-    case BuiltinOperator_CEIL:
-    case BuiltinOperator_FLOOR:
-    case BuiltinOperator_GREATER:
-    case BuiltinOperator_GREATER_EQUAL:
-    case BuiltinOperator_HARD_SWISH:
-    case BuiltinOperator_LESS:
-    case BuiltinOperator_LESS_EQUAL:
-    case BuiltinOperator_LOG:
-    case BuiltinOperator_LOGISTIC:
     case BuiltinOperator_LOG_SOFTMAX:
     case BuiltinOperator_MATRIX_DIAG:
     case BuiltinOperator_MATRIX_SET_DIAG:
-    case BuiltinOperator_MAXIMUM:
-    case BuiltinOperator_MINIMUM:
-    case BuiltinOperator_NEG:
-    case BuiltinOperator_NOT_EQUAL:
-    case BuiltinOperator_PAD:
-    case BuiltinOperator_PADV2:
-    case BuiltinOperator_PRELU:
-    case BuiltinOperator_RELU:
-    case BuiltinOperator_RELU6:
     case BuiltinOperator_RELU_N1_TO_1:
-    case BuiltinOperator_ROUND:
-    case BuiltinOperator_RSQRT:
     case BuiltinOperator_SELECT:
     case BuiltinOperator_SELECT_V2:
-    case BuiltinOperator_SIN:
     case BuiltinOperator_SLICE:
     case BuiltinOperator_SPACE_TO_BATCH_ND:
-    case BuiltinOperator_SQRT:
-    case BuiltinOperator_TANH:
     case BuiltinOperator_TILE:
     case BuiltinOperator_TOPK_V2:
     case BuiltinOperator_TRANSPOSE:
     case BuiltinOperator_POW:
-    case BuiltinOperator_LOGICAL_OR:
-    case BuiltinOperator_LOGICAL_AND:
-    case BuiltinOperator_LOGICAL_NOT:
     case BuiltinOperator_FLOOR_DIV:
-    case BuiltinOperator_SQUARE:
     case BuiltinOperator_ZEROS_LIKE:
     case BuiltinOperator_FILL:
     case BuiltinOperator_FLOOR_MOD:
@@ -1091,5 +824,916 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
   }
   return kTfLiteError;
 }  // NOLINT[readability/fn_size]
+#endif  // !defined(TF_LITE_STATIC_MEMORY)
+}  // namespace
+
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
+                               ErrorReporter* error_reporter) {
+  switch (tensor_type) {
+    case TensorType_FLOAT16:
+      *type = kTfLiteFloat16;
+      return kTfLiteOk;
+    case TensorType_FLOAT32:
+      *type = kTfLiteFloat32;
+      return kTfLiteOk;
+    case TensorType_FLOAT64:
+      *type = kTfLiteFloat64;
+      return kTfLiteOk;
+    case TensorType_INT16:
+      *type = kTfLiteInt16;
+      return kTfLiteOk;
+    case TensorType_INT32:
+      *type = kTfLiteInt32;
+      return kTfLiteOk;
+    case TensorType_UINT8:
+      *type = kTfLiteUInt8;
+      return kTfLiteOk;
+    case TensorType_INT8:
+      *type = kTfLiteInt8;
+      return kTfLiteOk;
+    case TensorType_INT64:
+      *type = kTfLiteInt64;
+      return kTfLiteOk;
+    case TensorType_STRING:
+      *type = kTfLiteString;
+      return kTfLiteOk;
+    case TensorType_BOOL:
+      *type = kTfLiteBool;
+      return kTfLiteOk;
+    case TensorType_COMPLEX64:
+      *type = kTfLiteComplex64;
+      return kTfLiteOk;
+    case TensorType_COMPLEX128:
+      *type = kTfLiteComplex128;
+      return kTfLiteOk;
+    default:
+      *type = kTfLiteNoType;
+      TF_LITE_REPORT_ERROR(error_reporter,
+                           "Unsupported data type %d in tensor\n", tensor_type);
+      return kTfLiteError;
+  }
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteAddParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const AddOptions* schema_params = op->builtin_options_as_AddOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+    params->pot_scale_int16 = schema_params->pot_scale_int16();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteArgMaxParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteArgMaxParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
+
+  if (schema_params != nullptr) {
+    TF_LITE_ENSURE_STATUS(ConvertTensorType(
+        schema_params->output_type(), &params->output_type, error_reporter));
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteArgMinParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteArgMinParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
+
+  if (schema_params != nullptr) {
+    TF_LITE_ENSURE_STATUS(ConvertTensorType(
+        schema_params->output_type(), &params->output_type, error_reporter));
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                       void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+                                ErrorReporter* error_reporter,
+                                BuiltinDataAllocator* allocator,
+                                void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteConcatenationParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteConcatenationParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ConcatenationOptions* schema_params =
+      op->builtin_options_as_ConcatenationOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+    params->axis = schema_params->axis();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteConvParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteConvParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
+
+  if (schema_params != nullptr) {
+    params->padding = ConvertPadding(schema_params->padding());
+    params->stride_width = schema_params->stride_w();
+    params->stride_height = schema_params->stride_h();
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+
+    params->dilation_width_factor = schema_params->dilation_w_factor();
+    params->dilation_height_factor = schema_params->dilation_h_factor();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+
+  std::unique_ptr<TfLiteDepthwiseConvParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const DepthwiseConv2DOptions* schema_params =
+      op->builtin_options_as_DepthwiseConv2DOptions();
+
+  if (schema_params != nullptr) {
+    params->padding = ConvertPadding(schema_params->padding());
+    params->stride_width = schema_params->stride_w();
+    params->stride_height = schema_params->stride_h();
+    params->depth_multiplier = schema_params->depth_multiplier();
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+
+    params->dilation_width_factor = schema_params->dilation_w_factor();
+    params->dilation_height_factor = schema_params->dilation_h_factor();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
+                             BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+                                 ErrorReporter* error_reporter,
+                                 BuiltinDataAllocator* allocator,
+                                 void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+
+  std::unique_ptr<TfLiteFullyConnectedParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const FullyConnectedOptions* schema_params =
+      op->builtin_options_as_FullyConnectedOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+    params->keep_num_dims = schema_params->keep_num_dims();
+    params->asymmetric_quantize_inputs =
+        schema_params->asymmetric_quantize_inputs();
+
+    switch (schema_params->weights_format()) {
+      case FullyConnectedOptionsWeightsFormat_DEFAULT:
+        params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
+        break;
+      case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+        params->weights_format =
+            kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
+        break;
+      default:
+        TF_LITE_REPORT_ERROR(error_reporter,
+                             "Unhandled fully-connected weights format.");
+        return kTfLiteError;
+    }
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
+                          BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
+                               BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
+                            BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteL2NormParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteL2NormParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                       void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
+                            BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
+                             BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
+                             BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
+                            BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
+                           BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
+                          BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
+                          BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteMulParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const MulOptions* schema_params = op->builtin_options_as_MulOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
+                           BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLitePackParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLitePackParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const PackOptions* schema_params = op->builtin_options_as_PackOptions();
+
+  if (schema_params != nullptr) {
+    params->values_count = schema_params->values_count();
+    params->axis = schema_params->axis();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLitePoolParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLitePoolParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
+
+  if (schema_params != nullptr) {
+    params->padding = ConvertPadding(schema_params->padding());
+    params->stride_width = schema_params->stride_w();
+    params->stride_height = schema_params->stride_h();
+    params->filter_width = schema_params->filter_width();
+    params->filter_height = schema_params->filter_height();
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
+                           BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator,
+                          void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+
+  std::unique_ptr<TfLiteReducerParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteReducerParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
+
+  if (schema_params != nullptr) {
+    params->keep_dims = schema_params->keep_dims();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                       void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator,
+                          void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+
+  std::unique_ptr<TfLiteReshapeParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteReshapeParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
+
+  if (schema_params != nullptr) {
+    const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
+    // TODO(b/147203660): We need to figure out when dynamic reshape
+    // (new_shape is a tensor) happens, why the option is not a nullptr.
+    // But nonethless, we should only copy when new_shape is not a nullptr.
+    if (new_shape != nullptr) {
+      TF_LITE_ENSURE_STATUS(
+          FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
+                                     params->shape, error_reporter, "reshape"));
+      params->num_dimensions = new_shape->size();
+    } else {
+      // TODO(b/157480169) TODO(b/147203660): We should either return
+      // kTfLiteError or fill in some reasonable defaults in the params struct.
+      // We are not doing so until we better undertand the ramifications of
+      // changing the legacy behavior.
+    }
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+                                        ErrorReporter* error_reporter,
+                                        BuiltinDataAllocator* allocator,
+                                        void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteResizeNearestNeighborParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const ResizeNearestNeighborOptions* schema_params =
+      op->builtin_options_as_ResizeNearestNeighborOptions();
+
+  if (schema_params != nullptr) {
+    params->align_corners = schema_params->align_corners();
+    params->half_pixel_centers = schema_params->half_pixel_centers();
+  } else {
+    params->align_corners = false;
+    params->half_pixel_centers = false;
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                        void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                      void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator,
+                          void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteSoftmaxParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
+
+  if (schema_params != nullptr) {
+    params->beta = schema_params->beta();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteSplitParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteSplitParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
+
+  if (schema_params != nullptr) {
+    params->num_splits = schema_params->num_splits();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                       void**) {
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                         void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteStridedSliceParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const StridedSliceOptions* schema_params =
+      op->builtin_options_as_StridedSliceOptions();
+
+  if (schema_params != nullptr) {
+    params->begin_mask = schema_params->begin_mask();
+    params->end_mask = schema_params->end_mask();
+    params->ellipsis_mask = schema_params->ellipsis_mask();
+    params->new_axis_mask = schema_params->new_axis_mask();
+    params->shrink_axis_mask = schema_params->shrink_axis_mask();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteSubParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const SubOptions* schema_params = op->builtin_options_as_SubOptions();
+
+  if (schema_params != nullptr) {
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+    params->pot_scale_int16 = schema_params->pot_scale_int16();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteSVDFParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteSVDFParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
+  if (schema_params != nullptr) {
+    params->rank = schema_params->rank();
+    params->activation =
+        ConvertActivation(schema_params->fused_activation_function());
+    params->asymmetric_quantize_inputs =
+        schema_params->asymmetric_quantize_inputs();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+                       void**) {
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data) {
+  CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+  SafeBuiltinDataAllocator safe_allocator(allocator);
+  std::unique_ptr<TfLiteUnpackParams,
+                  SafeBuiltinDataAllocator::BuiltinDataDeleter>
+      params = safe_allocator.Allocate<TfLiteUnpackParams>();
+  TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+  const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
+
+  if (schema_params != nullptr) {
+    params->num = schema_params->num();
+    params->axis = schema_params->axis();
+  } else {
+    // TODO(b/157480169): We should either return kTfLiteError or fill in some
+    // reasonable defaults in the params struct. We are not doing so until we
+    // better undertand the ramifications of changing the legacy behavior.
+  }
+
+  *builtin_data = params.release();
+  return kTfLiteOk;
+}
+
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
+                         ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data) {
+// TODO(b/145762662): It would be preferable to have the build graph for TF Lite
+// Micro not have the ParseOpData function at all. This would require splitting
+// the current file into two separate files, one of which defines the
+// ParseOpData function and the other that defines the operator specific parse
+// functions (e.g. ParseAdd).
+//
+// Such a split was attempted but was not worth the effort at the time because
+// of the following reasons:
+//  * We could either duplicate the functions and the SafeBuiltinDataAllocator
+//    class in the anonymous namespace of this file, or attempt to make a common
+//    library with these helper functions and class.
+//  * Making a common library with a separate build target was not feasible as
+//    it introduced circular dependencies due to the ErrorReporter and a common
+//    .cc and .h within the same api build target the also cause circular
+//    dependencies due to the  BuiltinDataAllocator class.
+//  * If all the builtin operators were to have their own parse functions, or we
+//    were ok with some amount of code duplication, then this split of the .cc
+//    files would be a lot more feasible.
+#ifdef TF_LITE_STATIC_MEMORY
+  TF_LITE_REPORT_ERROR(
+      error_reporter,
+      "ParseOpData is unsupported on TfLiteMicro, please use the operator "
+      "specific parse functions (e.g. ParseAdd etc.).\n");
+  return kTfLiteError;
+#else
+  return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
+                           builtin_data);
+#endif
+}
 
 }  // namespace tflite

+ 253 - 0
tensorflow/lite/core/api/flatbuffer_conversions.h

@@ -0,0 +1,253 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
+#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
+
+// These functions transform codes and data structures that are defined in the
+// flatbuffer serialization format into in-memory values that are used by the
+// runtime API and interpreter.
+
+#include <cstddef>
+#include <new>
+#include <type_traits>
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+// Interface class for builtin data allocations.
+class BuiltinDataAllocator {
+ public:
+  virtual void* Allocate(size_t size, size_t alignment_hint) = 0;
+  virtual void Deallocate(void* data) = 0;
+
+  // Allocate a structure, but make sure it is a POD structure that doesn't
+  // require constructors to run. The reason we do this, is that Interpreter's C
+  // extension part will take ownership so destructors  will not be run during
+  // deallocation.
+  template <typename T>
+  T* AllocatePOD() {
+    // TODO(b/154346074): Change this to is_trivially_destructible when all
+    // platform targets support that properly.
+    static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
+    void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
+    return new (allocated_memory) T;
+  }
+
+  virtual ~BuiltinDataAllocator() {}
+};
+
+// Parse the appropriate data out of the op.
+//
+// This handles builtin data explicitly as there are flatbuffer schemas.
+// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
+// calling function has to pass in an allocator object, and this allocator
+// will be called to reserve space for the output data. If the calling
+// function's allocator reserves memory on the heap, then it's the calling
+// function's responsibility to free it.
+// If it returns kTfLiteError, `builtin_data` will be `nullptr`.
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
+                         ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+// Converts the tensor data type used in the flat buffer to the representation
+// used by the runtime.
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
+                               ErrorReporter* error_reporter);
+
+TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+                                ErrorReporter* error_reporter,
+                                BuiltinDataAllocator* allocator,
+                                void** builtin_data);
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data);
+
+TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
+                             BuiltinDataAllocator* allocator,
+                             void** builtin_data);
+
+TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+                                 ErrorReporter* error_reporter,
+                                 BuiltinDataAllocator* allocator,
+                                 void** builtin_data);
+
+TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseGreaterEqual(const Operator* op,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data);
+
+TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
+                            BuiltinDataAllocator* allocator,
+                            void** builtin_data);
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+                                  ErrorReporter* error_reporter,
+                                  BuiltinDataAllocator* allocator,
+                                  void** builtin_data);
+
+TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
+                            BuiltinDataAllocator* allocator,
+                            void** builtin_data);
+
+TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
+                             BuiltinDataAllocator* allocator,
+                             void** builtin_data);
+
+TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
+                             BuiltinDataAllocator* allocator,
+                             void** builtin_data);
+
+TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
+                            BuiltinDataAllocator* allocator,
+                            void** builtin_data);
+
+TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
+                           BuiltinDataAllocator* allocator,
+                           void** builtin_data);
+
+TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
+                           BuiltinDataAllocator* allocator,
+                           void** builtin_data);
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
+                           BuiltinDataAllocator* allocator,
+                           void** builtin_data);
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+                                        ErrorReporter* error_reporter,
+                                        BuiltinDataAllocator* allocator,
+                                        void** builtin_data);
+
+TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+                          BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+                        BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+                               ErrorReporter* error_reporter,
+                               BuiltinDataAllocator* allocator,
+                               void** builtin_data);
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+                      BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
+                       BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+                         BuiltinDataAllocator* allocator, void** builtin_data);
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_

+ 3 - 3
tflite/core/op_resolver.cc → tensorflow/lite/core/api/op_resolver.cc

@@ -13,11 +13,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "op_resolver.h"
+#include "tensorflow/lite/core/api/op_resolver.h"
 
 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
-#include "tflite/c/common.h"
-#include "error_reporter.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
 
 namespace tflite {
 

+ 3 - 3
tflite/core/op_resolver.h → tensorflow/lite/core/api/op_resolver.h

@@ -15,9 +15,9 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
 #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
 
-#include "tflite/c/common.h"
-#include "error_reporter.h"
-#include "tflite/schema/schema_generated.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/schema/schema_generated.h"
 
 namespace tflite {
 

+ 0 - 0
tflite/core/profiler.h → tensorflow/lite/core/api/profiler.h


+ 2 - 2
tflite/core/tensor_utils.cc → tensorflow/lite/core/api/tensor_utils.cc

@@ -13,11 +13,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "tensor_utils.h"
+#include "tensorflow/lite/core/api/tensor_utils.h"
 
 #include <string.h>
 
-#include "tflite/c/common.h"
+#include "tensorflow/lite/c/common.h"
 
 namespace tflite {
 

+ 1 - 1
tflite/core/tensor_utils.h → tensorflow/lite/core/api/tensor_utils.h

@@ -16,7 +16,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
 #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
 
-#include "tflite/c/common.h"
+#include "tensorflow/lite/c/common.h"
 
 namespace tflite {
 

+ 0 - 0
tflite/micro/examples/SConscript → tensorflow/lite/experimental/SConscript


+ 0 - 0
tflite/micro/tools/cmsis/CMSIS/NN/SConscript → tensorflow/lite/experimental/microfrontend/SConscript


+ 28 - 0
tensorflow/lite/experimental/microfrontend/lib/SConscript

@@ -0,0 +1,28 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc')
+
+#.
+root =  str(Dir('#'))
+packages = os.path.join(root, 'packages')
+file_list = os.listdir(packages)
+for f in file_list:
+    if(f.split('-')[0] == 'TensorflowLiteMicro'):
+        tflm_pkg = os.path.join(packages, f)
+        break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
+
+Return('group')

+ 0 - 0
tflite/experimental/bits.h → tensorflow/lite/experimental/microfrontend/lib/bits.h


+ 3 - 2
tflite/experimental/fft.cc → tensorflow/lite/experimental/microfrontend/lib/fft.cc

@@ -12,12 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
 
 #include <string.h>
 
+#define FIXED_POINT 16
 #include "kiss_fft.h"
-#include "kiss_fftr.h"
+#include "tools/kiss_fftr.h"
 
 void FftCompute(struct FftState* state, const int16_t* input,
                 int input_scale_shift) {

+ 0 - 0
tflite/experimental/fft.h → tensorflow/lite/experimental/microfrontend/lib/fft.h


+ 2 - 2
tflite/experimental/fft_util.cc → tensorflow/lite/experimental/microfrontend/lib/fft_util.cc

@@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "fft_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
 
 #include <stdio.h>
 
 #define FIXED_POINT 16
 #include "kiss_fft.h"
-#include "kiss_fftr.h"
+#include "tools/kiss_fftr.h"
 
 int FftPopulateState(struct FftState* state, size_t input_size) {
   state->input_size = input_size;

+ 1 - 1
tflite/experimental/fft_util.h → tensorflow/lite/experimental/microfrontend/lib/fft_util.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
 
-#include "fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 2 - 2
tflite/experimental/filterbank.c → tensorflow/lite/experimental/microfrontend/lib/filterbank.c

@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "filterbank.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
 
 #include <string.h>
 
-#include "bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
 
 void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
                                          struct complex_int16_t* fft_output,

+ 1 - 1
tflite/experimental/filterbank.h → tensorflow/lite/experimental/microfrontend/lib/filterbank.h

@@ -18,7 +18,7 @@ limitations under the License.
 #include <stdint.h>
 #include <stdlib.h>
 
-#include "fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
 
 #define kFilterbankBits 12
 

+ 1 - 1
tflite/experimental/filterbank_util.c → tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "filterbank_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
 
 #include <assert.h>
 #include <math.h>

+ 1 - 1
tflite/experimental/filterbank_util.h → tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
 
-#include "filterbank.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 2 - 2
tflite/experimental/frontend.c → tensorflow/lite/experimental/microfrontend/lib/frontend.c

@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "frontend.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
 
-#include "bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
 
 struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
                                              const int16_t* samples,

+ 6 - 6
tflite/experimental/frontend.h → tensorflow/lite/experimental/microfrontend/lib/frontend.h

@@ -18,12 +18,12 @@ limitations under the License.
 #include <stdint.h>
 #include <stdlib.h>
 
-#include "fft.h"
-#include "filterbank.h"
-#include "log_scale.h"
-#include "noise_reduction.h"
-#include "pcan_gain_control.h"
-#include "window.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 2 - 2
tflite/experimental/frontend_util.c → tensorflow/lite/experimental/microfrontend/lib/frontend_util.c

@@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "frontend_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
 
 #include <stdio.h>
 #include <string.h>
 
-#include "bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
 
 void FrontendFillConfigWithDefaults(struct FrontendConfig* config) {
   WindowFillConfigWithDefaults(&config->window);

+ 7 - 7
tflite/experimental/frontend_util.h → tensorflow/lite/experimental/microfrontend/lib/frontend_util.h

@@ -15,13 +15,13 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
 
-#include "fft_util.h"
-#include "filterbank_util.h"
-#include "frontend.h"
-#include "log_scale_util.h"
-#include "noise_reduction_util.h"
-#include "pcan_gain_control_util.h"
-#include "window_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 1 - 1
tflite/experimental/log_lut.c → tensorflow/lite/experimental/microfrontend/lib/log_lut.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "log_lut.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
 const uint16_t kLogLut[]
 #ifndef _MSC_VER
     __attribute__((aligned(4)))

+ 0 - 0
tflite/experimental/log_lut.h → tensorflow/lite/experimental/microfrontend/lib/log_lut.h


+ 3 - 3
tflite/experimental/log_scale.c → tensorflow/lite/experimental/microfrontend/lib/log_scale.c

@@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "log_scale.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
 
-#include "bits.h"
-#include "log_lut.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
 
 #define kuint16max 0x0000FFFF
 

+ 0 - 0
tflite/experimental/log_scale.h → tensorflow/lite/experimental/microfrontend/lib/log_scale.h


+ 1 - 1
tflite/experimental/log_scale_util.c → tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "log_scale_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
 
 void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
   config->enable_log = 1;

+ 1 - 1
tflite/experimental/log_scale_util.h → tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h

@@ -18,7 +18,7 @@ limitations under the License.
 #include <stdint.h>
 #include <stdlib.h>
 
-#include "log_scale.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 1 - 1
tflite/experimental/noise_reduction.c → tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "noise_reduction.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
 
 #include <string.h>
 

+ 0 - 0
tflite/experimental/noise_reduction.h → tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h


+ 1 - 1
tflite/experimental/noise_reduction_util.c → tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "noise_reduction_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
 
 #include <stdio.h>
 

+ 1 - 1
tflite/experimental/noise_reduction_util.h → tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
 
-#include "noise_reduction.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 2 - 2
tflite/experimental/pcan_gain_control.c → tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c

@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "pcan_gain_control.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
 
-#include "bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
 
 int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
   if (x <= 2) {

+ 0 - 0
tflite/experimental/pcan_gain_control.h → tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h


+ 1 - 1
tflite/experimental/pcan_gain_control_util.c → tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "pcan_gain_control_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
 
 #include <math.h>
 #include <stdio.h>

+ 1 - 1
tflite/experimental/pcan_gain_control_util.h → tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
 
-#include "pcan_gain_control.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
 
 #define kWideDynamicFunctionBits 32
 #define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)

+ 1 - 1
tflite/experimental/window.c → tensorflow/lite/experimental/microfrontend/lib/window.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "window.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
 
 #include <string.h>
 

+ 0 - 0
tflite/experimental/window.h → tensorflow/lite/experimental/microfrontend/lib/window.h


+ 1 - 1
tflite/experimental/window_util.c → tensorflow/lite/experimental/microfrontend/lib/window_util.c

@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include "window_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
 
 #include <math.h>
 #include <stdio.h>

+ 1 - 1
tflite/experimental/window_util.h → tensorflow/lite/experimental/microfrontend/lib/window_util.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
 #define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
 
-#include "window.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
 
 #ifdef __cplusplus
 extern "C" {

+ 29 - 0
tensorflow/lite/kernels/SConscript

@@ -0,0 +1,29 @@
+from building import *
+import os
+
+cwd = GetCurrentDir()
+src = Glob('*.c') + Glob('*.cc') + Glob('internal/*.cc')
+
+#.
+root =  str(Dir('#'))
+packages = os.path.join(root, 'packages')
+file_list = os.listdir(packages)
+for f in file_list:
+    if(f.split('-')[0] == 'TensorflowLiteMicro'):
+        tflm_pkg = os.path.join(packages, f)
+        break
+#./third_party/flatbuffer/include
+flatbuffer = os.path.join(tflm_pkg, "third_party/flatbuffers/include")
+#./third_party/gemmlowp
+gemmlowp = os.path.join(tflm_pkg, "third_party/gemmlowp")
+#./third_party/kissfft
+kissfft = os.path.join(tflm_pkg, "third_party/kissfft")
+#./third_party/ruy
+ruy = os.path.join(tflm_pkg, "third_party/ruy")
+
+
+CPPPATH = [tflm_pkg, flatbuffer, gemmlowp, kissfft, ruy]
+
+group = DefineGroup('lite', src, depend = ['PKG_USING_TENSORFLOWLITEMICRO'], CPPPATH = CPPPATH)
+
+Return('group')

+ 73 - 54
tflite/kernels/internal/common.h → tensorflow/lite/kernels/internal/common.h

@@ -24,9 +24,9 @@ limitations under the License.
 #include <functional>
 
 #include "fixedpoint/fixedpoint.h"
-#include "cppmath.h"
-#include "neon_check.h"
-#include "types.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
+#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 
@@ -55,9 +55,12 @@ inline void GetActivationMinMax(FusedActivationFunctionType ac,
   }
 }
 
-inline float ActivationFunctionWithMinMax(float x, float output_activation_min,
-                                          float output_activation_max) {
-  return std::min(std::max(x, output_activation_min), output_activation_max);
+template <typename T>
+inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
+                                      T output_activation_max) {
+  using std::max;
+  using std::min;
+  return min(max(x, output_activation_min), output_activation_max);
 }
 
 // Legacy function, left for compatibility only.
@@ -135,23 +138,24 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
 #endif
 }
 
-inline int32 MultiplyByQuantizedMultiplierSmallerThanOneExp(
-    int32 x, int32 quantized_multiplier, int left_shift) {
+inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
+    int32_t x, int32_t quantized_multiplier, int left_shift) {
   using gemmlowp::RoundingDivideByPOT;
   using gemmlowp::SaturatingRoundingDoublingHighMul;
   return RoundingDivideByPOT(
       SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
 }
 
-inline int32 MultiplyByQuantizedMultiplierGreaterThanOne(
-    int32 x, int32 quantized_multiplier, int left_shift) {
+inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
+    int32_t x, int32_t quantized_multiplier, int left_shift) {
   using gemmlowp::SaturatingRoundingDoublingHighMul;
   return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
                                            quantized_multiplier);
 }
 
-inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
-                                           int shift) {
+inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
+                                             int32_t quantized_multiplier,
+                                             int shift) {
   using gemmlowp::RoundingDivideByPOT;
   using gemmlowp::SaturatingRoundingDoublingHighMul;
   int left_shift = shift > 0 ? shift : 0;
@@ -161,16 +165,16 @@ inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
                              right_shift);
 }
 
-inline int32 MultiplyByQuantizedMultiplier(int64_t x,
-                                           int32 quantized_multiplier,
-                                           int shift) {
+inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
+                                             int32_t quantized_multiplier,
+                                             int shift) {
   // Inputs:
   // - quantized_multiplier has fixed point at bit 31
   // - shift is -31 to +7 (negative for right shift)
   //
   // Assumptions: The following input ranges are assumed
   // - quantize_scale>=0  (the usual range is (1<<30) to (1>>31)-1)
-  // - scaling is chosen so final scaled result fits in int32
+  // - scaling is chosen so final scaled result fits in int32_t
   // - input x is in the range -(1<<47) <= x < (1<<47)
   assert(quantized_multiplier >= 0);
   assert(shift >= -31 && shift < 8);
@@ -215,9 +219,9 @@ inline int CountLeadingSignBits(T integer_input) {
   using U = typename std::make_unsigned<T>::type;
   return integer_input >= 0
              ? CountLeadingZeros(static_cast<U>(integer_input)) - 1
-             : integer_input != std::numeric_limits<T>::min()
-                   ? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
-                   : 0;
+         : integer_input != std::numeric_limits<T>::min()
+             ? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
+             : 0;
 #endif
 }
 
@@ -259,7 +263,7 @@ inline void gen_lut(const std::function<double(double)>& func, double min,
       std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
 }
 
-// int16 func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
+// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
 inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
   // 512 base value, lut[513] only for calculate slope
   uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
@@ -410,6 +414,23 @@ SaturatingRoundingMultiplyByPOTParam(
       SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
 }
 
+// Convert int32_t multiplier to int16_t with rounding.
+inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
+                                            int16_t* multiplier_int16_t) {
+  TFLITE_DCHECK_GE(multiplier_int32_t, 0);
+  static constexpr int32_t kRoundingOffset = 1 << 15;
+  if (multiplier_int32_t >=
+      std::numeric_limits<int32_t>::max() - kRoundingOffset) {
+    *multiplier_int16_t = std::numeric_limits<int16_t>::max();
+    return;
+  }
+  const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
+  TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
+  TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
+  *multiplier_int16_t = result;
+  TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
+}
+
 // Minimum output bits to accommodate log of maximum input range.  It actually
 // does not matter if one considers, say, [-64,64] or [-64,64).
 //
@@ -418,15 +439,13 @@ SaturatingRoundingMultiplyByPOTParam(
 //  ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
 //  ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
 constexpr int min_log_x_output_bits(int input_bits) {
-  return input_bits > 90
-             ? 7
-             : input_bits > 44
-                   ? 6
-                   : input_bits > 21
-                         ? 5
-                         : input_bits > 10
-                               ? 4
-                               : input_bits > 4 ? 3 : input_bits > 1 ? 2 : 1;
+  return input_bits > 90   ? 7
+         : input_bits > 44 ? 6
+         : input_bits > 21 ? 5
+         : input_bits > 10 ? 4
+         : input_bits > 4  ? 3
+         : input_bits > 1  ? 2
+                           : 1;
 }
 
 // Although currently the name of this function says that it cannot handle
@@ -434,17 +453,17 @@ constexpr int min_log_x_output_bits(int input_bits) {
 // x_max is the largest representable input.  In other words, the output range
 // is symmetric.
 template <int OutputIntegerBits, int InputIntegerBits>
-inline gemmlowp::FixedPoint<int32, OutputIntegerBits>
+inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
 log_x_for_x_greater_than_or_equal_to_1_impl(
-    gemmlowp::FixedPoint<int32, InputIntegerBits> input_val) {
-  // assert(__builtin_clz(0u) >= std::numeric_limits<uint32>::digits - 1);
-  // assert(__builtin_clz(0u) <= std::numeric_limits<uint32>::digits);
-  using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
+    gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
+  // assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
+  // assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
+  using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
   // The reason for accumulating the result with an extra bit of headroom is
   // that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
   // recip_denom will otherwise introduce an error.
   static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
-  using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumIntegerBits>;
+  using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
 
   const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
       FixedPoint0, 1488522236, std::log(2.0));
@@ -472,10 +491,10 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
   // required shift "ourselves" instead of using, say, Rescale.
   FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
   // z_a_pow_2 = input_integer_bits - z_a_headroom;
-  int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32>(z_a.raw()));
+  int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
   FixedPoint0 r_a_tmp =
       SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
-  const int32 r_a_raw =
+  const int32_t r_a_raw =
       SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
   // z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
   // z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
@@ -487,8 +506,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
 
   // z_b is treated like z_a, but premultiplying by sqrt(0.5).
   FixedPoint0 z_b = z_a * sqrt_half;
-  int z_b_headroom = CountLeadingZeros(static_cast<uint32>(z_b.raw())) - 1;
-  const int32 r_b_raw =
+  int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
+  const int32_t r_b_raw =
       SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
   const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
       FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
@@ -516,9 +535,9 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
 }
 
 template <int OutputIntegerBits, int InputIntegerBits>
-inline gemmlowp::FixedPoint<int32, OutputIntegerBits>
+inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
 log_x_for_x_greater_than_or_equal_to_1(
-    gemmlowp::FixedPoint<int32, InputIntegerBits> input_val) {
+    gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
   static_assert(
       OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
       "Output integer bits must be sufficient to accommodate logs of inputs.");
@@ -527,25 +546,25 @@ log_x_for_x_greater_than_or_equal_to_1(
       input_val);
 }
 
-inline int32 GetReciprocal(int32 x, int x_integer_digits,
-                           int* num_bits_over_unit) {
-  int headroom_plus_one = CountLeadingZeros(static_cast<uint32>(x));
+inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
+                             int* num_bits_over_unit) {
+  int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
   // This is the number of bits to the left of the binary point above 1.0.
   // Consider x=1.25.  In that case shifted_scale=0.8 and
   // no later adjustment will be needed.
   *num_bits_over_unit = x_integer_digits - headroom_plus_one;
-  const int32 shifted_sum_minus_one =
-      static_cast<int32>((static_cast<uint32>(x) << headroom_plus_one) -
-                         (static_cast<uint32>(1) << 31));
+  const int32_t shifted_sum_minus_one =
+      static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
+                           (static_cast<uint32_t>(1) << 31));
 
-  gemmlowp::FixedPoint<int32, 0> shifted_scale =
+  gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
       gemmlowp::one_over_one_plus_x_for_x_in_0_1(
-          gemmlowp::FixedPoint<int32, 0>::FromRaw(shifted_sum_minus_one));
+          gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
   return shifted_scale.raw();
 }
 
-inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
-                                             int32* output_inv_sqrt,
+inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
+                                             int32_t* output_inv_sqrt,
                                              int* output_shift) {
   TFLITE_DCHECK_GE(input, 0);
   if (input <= 1) {
@@ -565,7 +584,7 @@ inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
     ++*output_shift;
   }
   const unsigned max_left_shift_bits =
-      CountLeadingZeros(static_cast<uint32>(input)) - 1;
+      CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
   const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
   const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
   *output_shift -= left_shift_bit_pairs;
@@ -577,8 +596,8 @@ inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
   using gemmlowp::SaturatingRoundingMultiplyByPOT;
   // Using 3 integer bits gives us enough room for the internal arithmetic in
   // this Newton-Raphson iteration.
-  using F3 = FixedPoint<int32, 3>;
-  using F0 = FixedPoint<int32, 0>;
+  using F3 = FixedPoint<int32_t, 3>;
+  using F0 = FixedPoint<int32_t, 0>;
   const F3 fixedpoint_input = F3::FromRaw(input >> 1);
   const F3 fixedpoint_half_input =
       SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);

+ 4 - 2
tflite/kernels/internal/compatibility.h → tensorflow/lite/kernels/internal/compatibility.h

@@ -17,7 +17,7 @@ limitations under the License.
 
 #include <cstdint>
 
-#include "tflite/kernels/main/op_macros.h"
+#include "tensorflow/lite/kernels/op_macros.h"
 
 #ifndef TFLITE_DCHECK
 #define TFLITE_DCHECK(condition) (condition) ? (void)0 : TFLITE_ASSERT_FALSE
@@ -76,13 +76,15 @@ limitations under the License.
 #define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT
 #endif
 
-// TODO(ahentz): Clean up.
+#ifndef TF_LITE_STATIC_MEMORY
+// TODO(b/162019032): Consider removing these type-aliases.
 using int8 = std::int8_t;
 using uint8 = std::uint8_t;
 using int16 = std::int16_t;
 using uint16 = std::uint16_t;
 using int32 = std::int32_t;
 using uint32 = std::uint32_t;
+#endif  // !defined(TF_LITE_STATIC_MEMORY)
 
 // TFLITE_DEPRECATED()
 //

+ 1 - 1
tflite/kernels/internal/cppmath.h → tensorflow/lite/kernels/internal/cppmath.h

@@ -24,7 +24,7 @@ namespace tflite {
     defined(__ZEPHYR__)
 #define TF_LITE_GLOBAL_STD_PREFIX
 #else
-#define TF_LITE_GLOBAL_STD_PREFIX
+#define TF_LITE_GLOBAL_STD_PREFIX //std
 #endif
 
 #define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \

+ 1 - 1
tflite/kernels/internal/max.h → tensorflow/lite/kernels/internal/max.h

@@ -21,7 +21,7 @@ namespace tflite {
 
 #if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
 inline float TfLiteMax(const float& x, const float& y) {
-  return std::max(x, y);
+  return ::max(x, y);
 }
 #else
 template <class T>

+ 1 - 1
tflite/kernels/internal/min.h → tensorflow/lite/kernels/internal/min.h

@@ -21,7 +21,7 @@ namespace tflite {
 
 #if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
 inline float TfLiteMin(const float& x, const float& y) {
-  return std::min(x, y);
+  return ::min(x, y);
 }
 #else
 template <class T>

+ 0 - 0
tflite/kernels/internal/neon_check.h → tensorflow/lite/kernels/internal/optimized/neon_check.h


+ 7 - 7
tflite/kernels/internal/quantization_util.cc → tensorflow/lite/kernels/internal/quantization_util.cc

@@ -13,14 +13,14 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "quantization_util.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
 
 #include <algorithm>
 #include <cmath>
 #include <limits>
 
-#include "compatibility.h"
-#include "cppmath.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
 
 namespace tflite {
 
@@ -342,13 +342,13 @@ void NudgeQuantizationRange(const float min, const float max,
   const float quant_max_float = static_cast<float>(quant_max);
   *nudged_scale = (max - min) / (quant_max_float - quant_min_float);
   const float zero_point_from_min = quant_min_float - min / *nudged_scale;
-  uint16 nudged_zero_point;
+  uint16_t nudged_zero_point;
   if (zero_point_from_min < quant_min_float) {
-    nudged_zero_point = static_cast<uint16>(quant_min);
+    nudged_zero_point = static_cast<uint16_t>(quant_min);
   } else if (zero_point_from_min > quant_max_float) {
-    nudged_zero_point = static_cast<uint16>(quant_max);
+    nudged_zero_point = static_cast<uint16_t>(quant_max);
   } else {
-    nudged_zero_point = static_cast<uint16>(TfLiteRound(zero_point_from_min));
+    nudged_zero_point = static_cast<uint16_t>(TfLiteRound(zero_point_from_min));
   }
   *nudged_min = (quant_min_float - nudged_zero_point) * (*nudged_scale);
   *nudged_max = (quant_max_float - nudged_zero_point) * (*nudged_scale);

+ 3 - 3
tflite/kernels/internal/quantization_util.h → tensorflow/lite/kernels/internal/quantization_util.h

@@ -19,9 +19,9 @@ limitations under the License.
 #include <cstdint>
 #include <limits>
 
-#include "compatibility.h"
-#include "cppmath.h"
-#include "types.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 

+ 106 - 71
tflite/kernels/internal/reference/add.h → tensorflow/lite/kernels/internal/reference/add.h

@@ -16,7 +16,7 @@ limitations under the License.
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
 
 #include "fixedpoint/fixedpoint.h"
-#include "tflite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {
 
@@ -51,34 +51,39 @@ inline void Add(const ArithmeticParams& params,
 
 // Element-wise add that can often be used for inner loop of broadcast add as
 // well as the non-broadcast add.
+
+// This function is used for 8-bit as well as for 16-bit, but the accumulator
+// is 32-bit for both cases. The overflow does not happen due to the
+// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
+template <typename T>
 inline void AddElementwise(int size, const ArithmeticParams& params,
-                           const uint8* input1_data, const uint8* input2_data,
-                           uint8* output_data) {
-  TFLITE_DCHECK_GT(params.input1_offset, -256);
-  TFLITE_DCHECK_GT(params.input2_offset, -256);
-  TFLITE_DCHECK_LT(params.input1_offset, 256);
-  TFLITE_DCHECK_LT(params.input2_offset, 256);
+                           const T* input1_data, const T* input2_data,
+                           T* output_data) {
+  TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits<T>::max());
+  TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits<T>::max());
+  TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits<T>::max());
+  TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits<T>::max());
 
   for (int i = 0; i < size; ++i) {
-    const int32 input1_val = params.input1_offset + input1_data[i];
-    const int32 input2_val = params.input2_offset + input2_data[i];
-    const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
-    const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
-    const int32 scaled_input1_val =
+    const int32_t input1_val = params.input1_offset + input1_data[i];
+    const int32_t input2_val = params.input2_offset + input2_data[i];
+    const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
+    const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
+    const int32_t scaled_input1_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input1_val, params.input1_multiplier, params.input1_shift);
-    const int32 scaled_input2_val =
+    const int32_t scaled_input2_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input2_val, params.input2_multiplier, params.input2_shift);
-    const int32 raw_sum = scaled_input1_val + scaled_input2_val;
-    const int32 raw_output =
+    const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
+    const int32_t raw_output =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             raw_sum, params.output_multiplier, params.output_shift) +
         params.output_offset;
-    const int32 clamped_output =
+    const int32_t clamped_output =
         std::min(params.quantized_activation_max,
                  std::max(params.quantized_activation_min, raw_output));
-    output_data[i] = static_cast<uint8>(clamped_output);
+    output_data[i] = static_cast<T>(clamped_output);
   }
 }
 
@@ -86,40 +91,40 @@ inline void AddElementwise(int size, const ArithmeticParams& params,
 // broadcast add, so that, for example, scalar-broadcast with batch will still
 // be fast.
 inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
-                               uint8 input1_data, const uint8* input2_data,
-                               uint8* output_data) {
+                               uint8_t input1_data, const uint8_t* input2_data,
+                               uint8_t* output_data) {
   TFLITE_DCHECK_GT(params.input1_offset, -256);
   TFLITE_DCHECK_GT(params.input2_offset, -256);
   TFLITE_DCHECK_LT(params.input1_offset, 256);
   TFLITE_DCHECK_LT(params.input2_offset, 256);
 
-  const int32 input1_val = params.input1_offset + input1_data;
-  const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
-  const int32 scaled_input1_val =
+  const int32_t input1_val = params.input1_offset + input1_data;
+  const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
+  const int32_t scaled_input1_val =
       MultiplyByQuantizedMultiplierSmallerThanOneExp(
           shifted_input1_val, params.input1_multiplier, params.input1_shift);
   for (int i = 0; i < size; ++i) {
-    const int32 input2_val = params.input2_offset + input2_data[i];
-    const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
-    const int32 scaled_input2_val =
+    const int32_t input2_val = params.input2_offset + input2_data[i];
+    const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
+    const int32_t scaled_input2_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input2_val, params.input2_multiplier, params.input2_shift);
-    const int32 raw_sum = scaled_input1_val + scaled_input2_val;
-    const int32 raw_output =
+    const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
+    const int32_t raw_output =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             raw_sum, params.output_multiplier, params.output_shift) +
         params.output_offset;
-    const int32 clamped_output =
+    const int32_t clamped_output =
         std::min(params.quantized_activation_max,
                  std::max(params.quantized_activation_min, raw_output));
-    output_data[i] = static_cast<uint8>(clamped_output);
+    output_data[i] = static_cast<uint8_t>(clamped_output);
   }
 }
 
 inline void Add(const ArithmeticParams& params,
-                const RuntimeShape& input1_shape, const uint8* input1_data,
-                const RuntimeShape& input2_shape, const uint8* input2_data,
-                const RuntimeShape& output_shape, uint8* output_data) {
+                const RuntimeShape& input1_shape, const uint8_t* input1_data,
+                const RuntimeShape& input2_shape, const uint8_t* input2_data,
+                const RuntimeShape& output_shape, uint8_t* output_data) {
   TFLITE_DCHECK_LE(params.quantized_activation_min,
                    params.quantized_activation_max);
   const int flat_size =
@@ -132,24 +137,53 @@ inline void Add(const ArithmeticParams& params,
   AddElementwise(flat_size, params, input1_data, input2_data, output_data);
 }
 
+inline void AddGeneralParamScale(const ArithmeticParams& params,
+                                 const RuntimeShape& input1_shape,
+                                 const int16_t* input1_data,
+                                 const RuntimeShape& input2_shape,
+                                 const int16_t* input2_data,
+                                 const RuntimeShape& output_shape,
+                                 int16_t* output_data) {
+  TFLITE_DCHECK_LE(params.quantized_activation_min,
+                   params.quantized_activation_max);
+  const int flat_size =
+      MatchingElementsSize(input1_shape, input2_shape, output_shape);
+
+  int max_value = std::numeric_limits<int16_t>::max();
+
+  TFLITE_DCHECK_GT(params.input1_offset, -max_value);
+  TFLITE_DCHECK_GT(params.input2_offset, -max_value);
+  TFLITE_DCHECK_LT(params.input1_offset, max_value);
+  TFLITE_DCHECK_LT(params.input2_offset, max_value);
+  AddElementwise(flat_size, params, input1_data, input2_data, output_data);
+}
+
 inline void Add(const ArithmeticParams& params,
-                const RuntimeShape& input1_shape, const int16* input1_data,
-                const RuntimeShape& input2_shape, const int16* input2_data,
-                const RuntimeShape& output_shape, int16* output_data) {
+                const RuntimeShape& input1_shape, const int16_t* input1_data,
+                const RuntimeShape& input2_shape, const int16_t* input2_data,
+                const RuntimeShape& output_shape, int16_t* output_data,
+                bool pot_scale = true) {
+  if (!pot_scale) {
+    AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
+                         input2_data, output_shape, output_data);
+    return;
+  }
+
   TFLITE_DCHECK_LE(params.quantized_activation_min,
                    params.quantized_activation_max);
 
   const int input1_shift = params.input1_shift;
   const int flat_size =
       MatchingElementsSize(input1_shape, input2_shape, output_shape);
-  const int16 output_activation_min = params.quantized_activation_min;
-  const int16 output_activation_max = params.quantized_activation_max;
+  const int16_t output_activation_min = params.quantized_activation_min;
+  const int16_t output_activation_max = params.quantized_activation_max;
 
   TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
   TFLITE_DCHECK_LE(input1_shift, 0);
   TFLITE_DCHECK_LE(params.input2_shift, 0);
-  const int16* not_shift_input = input1_shift == 0 ? input1_data : input2_data;
-  const int16* shift_input = input1_shift == 0 ? input2_data : input1_data;
+  const int16_t* not_shift_input =
+      input1_shift == 0 ? input1_data : input2_data;
+  const int16_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
   const int input_right_shift =
       input1_shift == 0 ? -params.input2_shift : -input1_shift;
 
@@ -161,8 +195,8 @@ inline void Add(const ArithmeticParams& params,
     F0 scaled_input = F0::FromRaw(
         gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
     F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
-    const int16 raw_output = result.raw();
-    const int16 clamped_output = std::min(
+    const int16_t raw_output = result.raw();
+    const int16_t clamped_output = std::min(
         output_activation_max, std::max(output_activation_min, raw_output));
     output_data[i] = clamped_output;
   }
@@ -218,11 +252,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
 
 inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
                                const RuntimeShape& input1_shape,
-                               const int32* input1_data,
+                               const int32_t* input1_data,
                                const RuntimeShape& input2_shape,
-                               const int32* input2_data,
+                               const int32_t* input2_data,
                                const RuntimeShape& output_shape,
-                               int32* output_data) {
+                               int32_t* output_data) {
   NdArrayDesc<4> desc1;
   NdArrayDesc<4> desc2;
   NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
@@ -257,13 +291,14 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
   }
 }
 
-inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
-                               const RuntimeShape& input1_shape,
-                               const uint8* input1_data,
-                               const RuntimeShape& input2_shape,
-                               const uint8* input2_data,
-                               const RuntimeShape& output_shape,
-                               uint8* output_data) {
+// This function is used for 8-bit as well as for 16-bit, but the accumulator
+// is 32-bit for both cases. The overflow does not happen due to the
+// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
+template <typename T>
+inline void BroadcastAdd4DSlow(
+    const ArithmeticParams& params, const RuntimeShape& input1_shape,
+    const T* input1_data, const RuntimeShape& input2_shape,
+    const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
   NdArrayDesc<4> desc1;
   NdArrayDesc<4> desc2;
   NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
@@ -286,34 +321,34 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
     for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
       for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
         for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
-          const int32 input1_val =
+          const int32_t input1_val =
               params.input1_offset +
               input1_data[SubscriptToIndex(desc1, b, y, x, c)];
-          const int32 input2_val =
+          const int32_t input2_val =
               params.input2_offset +
               input2_data[SubscriptToIndex(desc2, b, y, x, c)];
-          const int32 shifted_input1_val =
+          const int32_t shifted_input1_val =
               input1_val * (1 << params.left_shift);
-          const int32 shifted_input2_val =
+          const int32_t shifted_input2_val =
               input2_val * (1 << params.left_shift);
-          const int32 scaled_input1_val =
+          const int32_t scaled_input1_val =
               MultiplyByQuantizedMultiplierSmallerThanOneExp(
                   shifted_input1_val, params.input1_multiplier,
                   params.input1_shift);
-          const int32 scaled_input2_val =
+          const int32_t scaled_input2_val =
               MultiplyByQuantizedMultiplierSmallerThanOneExp(
                   shifted_input2_val, params.input2_multiplier,
                   params.input2_shift);
-          const int32 raw_sum = scaled_input1_val + scaled_input2_val;
-          const int32 raw_output =
+          const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
+          const int32_t raw_output =
               MultiplyByQuantizedMultiplierSmallerThanOneExp(
                   raw_sum, params.output_multiplier, params.output_shift) +
               params.output_offset;
-          const int32 clamped_output =
+          const int32_t clamped_output =
               std::min(params.quantized_activation_max,
                        std::max(params.quantized_activation_min, raw_output));
           output_data[Offset(extended_output_shape, b, y, x, c)] =
-              static_cast<uint8>(clamped_output);
+              static_cast<T>(clamped_output);
         }
       }
     }
@@ -322,11 +357,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
 
 inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
                                  const RuntimeShape& unswitched_input1_shape,
-                                 const uint8* unswitched_input1_data,
+                                 const uint8_t* unswitched_input1_data,
                                  const RuntimeShape& unswitched_input2_shape,
-                                 const uint8* unswitched_input2_data,
+                                 const uint8_t* unswitched_input2_data,
                                  const RuntimeShape& output_shape,
-                                 uint8* output_data) {
+                                 uint8_t* output_data) {
   ArithmeticParams switched_params = unswitched_params;
   switched_params.input1_offset = unswitched_params.input2_offset;
   switched_params.input1_multiplier = unswitched_params.input2_multiplier;
@@ -341,18 +376,18 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
 
   const ArithmeticParams& params =
       use_unswitched ? unswitched_params : switched_params;
-  const uint8* input1_data =
+  const uint8_t* input1_data =
       use_unswitched ? unswitched_input1_data : unswitched_input2_data;
-  const uint8* input2_data =
+  const uint8_t* input2_data =
       use_unswitched ? unswitched_input2_data : unswitched_input1_data;
 
   // Fivefold nested loops. The second input resets its position for each
   // iteration of the second loop. The first input resets its position at the
   // beginning of the fourth loop. The innermost loop is an elementwise add of
   // sections of the arrays.
-  uint8* output_data_ptr = output_data;
-  const uint8* input1_data_ptr = input1_data;
-  const uint8* input2_data_reset = input2_data;
+  uint8_t* output_data_ptr = output_data;
+  const uint8_t* input1_data_ptr = input1_data;
+  const uint8_t* input2_data_reset = input2_data;
   // In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
   // between input shapes. y3 for input 1 is always broadcast, and so the
   // dimension there is 1, whereas optionally y1 might be broadcast for input 2.
@@ -368,7 +403,7 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
     // General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
     // dimension.
     for (int i0 = 0; i0 < y0; ++i0) {
-      const uint8* input2_data_ptr;
+      const uint8_t* input2_data_ptr;
       for (int i1 = 0; i1 < y1; ++i1) {
         input2_data_ptr = input2_data_reset;
         for (int i2 = 0; i2 < y2; ++i2) {
@@ -397,7 +432,7 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
     // for y4 == 1 and the loop over y3 is contained within the
     // AddScalarBroadcast function.
     for (int i0 = 0; i0 < y0; ++i0) {
-      const uint8* input2_data_ptr;
+      const uint8_t* input2_data_ptr;
       for (int i1 = 0; i1 < y1; ++i1) {
         input2_data_ptr = input2_data_reset;
         for (int i2 = 0; i2 < y2; ++i2) {

+ 1 - 1
tflite/kernels/internal/reference/arg_min_max.h → tensorflow/lite/kernels/internal/reference/arg_min_max.h

@@ -15,7 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
 
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 

+ 3 - 3
tflite/kernels/internal/reference/binary_function.h → tensorflow/lite/kernels/internal/reference/binary_function.h

@@ -15,9 +15,9 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/compatibility.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 

+ 1 - 1
tflite/kernels/internal/reference/ceil.h → tensorflow/lite/kernels/internal/reference/ceil.h

@@ -17,7 +17,7 @@ limitations under the License.
 
 #include <cmath>
 
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 

+ 26 - 26
tflite/kernels/internal/reference/comparisons.h → tensorflow/lite/kernels/internal/reference/comparisons.h

@@ -15,10 +15,10 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
 
-#include "tflite/c/common.h"
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/types.h"
-#include "string_util.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+#include "tensorflow/lite/string_util.h"
 
 namespace tflite {
 
@@ -105,30 +105,30 @@ inline void Comparison(const ComparisonParams& op_params,
                            input2_data, output_shape, output_data);
 }
 
-template <typename T, ComparisonFn<int32> F>
+template <typename T, ComparisonFn<int32_t> F>
 inline void ComparisonWithScaling(
     const ComparisonParams& op_params, const RuntimeShape& input1_shape,
     const T* input1_data, const RuntimeShape& input2_shape,
     const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
   int left_shift = op_params.left_shift;
-  int32 input1_offset = op_params.input1_offset;
-  int32 input1_multiplier = op_params.input1_multiplier;
+  int32_t input1_offset = op_params.input1_offset;
+  int32_t input1_multiplier = op_params.input1_multiplier;
   int input1_shift = op_params.input1_shift;
-  int32 input2_offset = op_params.input2_offset;
-  int32 input2_multiplier = op_params.input2_multiplier;
+  int32_t input2_offset = op_params.input2_offset;
+  int32_t input2_multiplier = op_params.input2_multiplier;
   int input2_shift = op_params.input2_shift;
 
   const int64_t flatsize =
       MatchingFlatSize(input1_shape, input2_shape, output_shape);
   for (int64_t i = 0; i < flatsize; ++i) {
-    const int32 input1_val = input1_offset + input1_data[i];
-    const int32 input2_val = input2_offset + input2_data[i];
-    const int32 shifted_input1_val = input1_val * (1 << left_shift);
-    const int32 shifted_input2_val = input2_val * (1 << left_shift);
-    const int32 scaled_input1_val =
+    const int32_t input1_val = input1_offset + input1_data[i];
+    const int32_t input2_val = input2_offset + input2_data[i];
+    const int32_t shifted_input1_val = input1_val * (1 << left_shift);
+    const int32_t shifted_input2_val = input2_val * (1 << left_shift);
+    const int32_t scaled_input1_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input1_val, input1_multiplier, input1_shift);
-    const int32 scaled_input2_val =
+    const int32_t scaled_input2_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input2_val, input2_multiplier, input2_shift);
     output_data[i] = F(scaled_input1_val, scaled_input2_val);
@@ -218,7 +218,7 @@ inline void BroadcastComparison4DSlow(const ComparisonParams& op_params,
                                           output_shape, output_data);
 }
 
-template <typename T, ComparisonFn<int32> F>
+template <typename T, ComparisonFn<int32_t> F>
 inline void BroadcastComparison4DSlowWithScaling(
     const ComparisonParams& op_params,
     const RuntimeShape& unextended_input1_shape, const T* input1_data,
@@ -230,29 +230,29 @@ inline void BroadcastComparison4DSlowWithScaling(
                                           unextended_output_shape);
 
   int left_shift = op_params.left_shift;
-  int32 input1_offset = op_params.input1_offset;
-  int32 input1_multiplier = op_params.input1_multiplier;
+  int32_t input1_offset = op_params.input1_offset;
+  int32_t input1_multiplier = op_params.input1_multiplier;
   int input1_shift = op_params.input1_shift;
-  int32 input2_offset = op_params.input2_offset;
-  int32 input2_multiplier = op_params.input2_multiplier;
+  int32_t input2_offset = op_params.input2_offset;
+  int32_t input2_multiplier = op_params.input2_multiplier;
   int input2_shift = op_params.input2_shift;
 
   for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
     for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
       for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
         for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
-          const int32 input1_val =
+          const int32_t input1_val =
               input1_offset +
               input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)];
-          const int32 input2_val =
+          const int32_t input2_val =
               input2_offset +
               input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)];
-          const int32 shifted_input1_val = input1_val * (1 << left_shift);
-          const int32 shifted_input2_val = input2_val * (1 << left_shift);
-          const int32 scaled_input1_val =
+          const int32_t shifted_input1_val = input1_val * (1 << left_shift);
+          const int32_t shifted_input2_val = input2_val * (1 << left_shift);
+          const int32_t scaled_input1_val =
               MultiplyByQuantizedMultiplierSmallerThanOneExp(
                   shifted_input1_val, input1_multiplier, input1_shift);
-          const int32 scaled_input2_val =
+          const int32_t scaled_input2_val =
               MultiplyByQuantizedMultiplierSmallerThanOneExp(
                   shifted_input2_val, input2_multiplier, input2_shift);
           output_data[Offset(dims.output_shape, b, y, x, c)] =

+ 10 - 10
tflite/kernels/internal/reference/concatenation.h → tensorflow/lite/kernels/internal/reference/concatenation.h

@@ -16,10 +16,10 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/compatibility.h"
-#include "tflite/kernels/internal/cppmath.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 namespace reference_ops {
@@ -74,14 +74,14 @@ inline void Concatenation(const ConcatenationParams& params,
 // when optimizng this routine further.
 inline void ConcatenationWithScaling(const ConcatenationParams& params,
                                      const RuntimeShape* const* input_shapes,
-                                     const uint8* const* input_data,
+                                     const uint8_t* const* input_data,
                                      const RuntimeShape& output_shape,
-                                     uint8* output_data) {
+                                     uint8_t* output_data) {
   int axis = params.axis;
-  const int32* input_zeropoint = params.input_zeropoint;
+  const int32_t* input_zeropoint = params.input_zeropoint;
   const float* input_scale = params.input_scale;
   int inputs_count = params.inputs_count;
-  const int32 output_zeropoint = params.output_zeropoint;
+  const int32_t output_zeropoint = params.output_zeropoint;
   const float output_scale = params.output_scale;
 
   const int concat_dimensions = output_shape.DimensionsCount();
@@ -110,11 +110,11 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
   }
 
   const float inverse_output_scale = 1.f / output_scale;
-  uint8* output_ptr = output_data;
+  uint8_t* output_ptr = output_data;
   for (int k = 0; k < outer_size; k++) {
     for (int i = 0; i < inputs_count; ++i) {
       const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
-      const uint8* input_ptr = input_data[i] + k * copy_size;
+      const uint8_t* input_ptr = input_data[i] + k * copy_size;
       if (input_zeropoint[i] == output_zeropoint &&
           input_scale[i] == output_scale) {
         memcpy(output_ptr, input_ptr, copy_size);

+ 24 - 22
tflite/kernels/internal/reference/conv.h → tensorflow/lite/kernels/internal/reference/conv.h

@@ -15,8 +15,10 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
 
-#include "tflite/kernels/internal/types.h"
-#include "tflite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+
+
 
 namespace tflite {
 
@@ -97,11 +99,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
 }
 
 inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
-                 const uint8* input_data, const RuntimeShape& filter_shape,
-                 const uint8* filter_data, const RuntimeShape& bias_shape,
-                 const int32* bias_data, const RuntimeShape& output_shape,
-                 uint8* output_data, const RuntimeShape& im2col_shape,
-                 uint8* im2col_data, void* cpu_backend_context) {
+                 const uint8_t* input_data, const RuntimeShape& filter_shape,
+                 const uint8_t* filter_data, const RuntimeShape& bias_shape,
+                 const int32_t* bias_data, const RuntimeShape& output_shape,
+                 uint8_t* output_data, const RuntimeShape& im2col_shape,
+                 uint8_t* im2col_data, void* cpu_backend_context) {
   (void)cpu_backend_context;  // only used in optimized code.
   (void)im2col_data;   // only used in optimized code.
   (void)im2col_shape;  // only used in optimized code.
@@ -111,13 +113,13 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
   const int dilation_height_factor = params.dilation_height_factor;
   const int pad_width = params.padding_values.width;
   const int pad_height = params.padding_values.height;
-  const int32 input_offset = params.input_offset;
-  const int32 filter_offset = params.weights_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_multiplier = params.output_multiplier;
+  const int32_t input_offset = params.input_offset;
+  const int32_t filter_offset = params.weights_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_multiplier = params.output_multiplier;
   const int output_shift = params.output_shift;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
   TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
 
   TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
@@ -141,7 +143,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
         for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
           const int in_x_origin = (out_x * stride_width) - pad_width;
           const int in_y_origin = (out_y * stride_height) - pad_height;
-          int32 acc = 0;
+          int32_t acc = 0;
           for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
             for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
               for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
@@ -152,9 +154,9 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
                 // use zero as a default value.
                 if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
                     (in_y < input_height)) {
-                  int32 input_val = input_data[Offset(input_shape, batch, in_y,
-                                                      in_x, in_channel)];
-                  int32 filter_val =
+                  int32_t input_val = input_data[Offset(
+                      input_shape, batch, in_y, in_x, in_channel)];
+                  int32_t filter_val =
                       filter_data[Offset(filter_shape, out_channel, filter_y,
                                          filter_x, in_channel)];
                   acc +=
@@ -172,7 +174,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
           acc = std::max(acc, output_activation_min);
           acc = std::min(acc, output_activation_max);
           output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
-              static_cast<uint8>(acc);
+              static_cast<uint8_t>(acc);
         }
       }
     }
@@ -218,7 +220,7 @@ inline void HybridConvPerChannel(
         for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
           const int in_x_origin = (out_x * stride_width) - pad_width;
           const int in_y_origin = (out_y * stride_height) - pad_height;
-          int32 acc = 0;
+          int32_t acc = 0;
           for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
             for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
               for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
@@ -229,9 +231,9 @@ inline void HybridConvPerChannel(
                 // use zero as a default value.
                 if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
                     (in_y < input_height)) {
-                  int32 input_val = input_data[Offset(input_shape, batch, in_y,
-                                                      in_x, in_channel)];
-                  int32 filter_val =
+                  int32_t input_val = input_data[Offset(
+                      input_shape, batch, in_y, in_x, in_channel)];
+                  int32_t filter_val =
                       filter_data[Offset(filter_shape, out_channel, filter_y,
                                          filter_x, in_channel)];
                   acc += filter_val * (input_val - input_offset[batch]);

+ 3 - 3
tflite/kernels/internal/reference/depthwiseconv_float.h → tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h

@@ -15,9 +15,9 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/compatibility.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 namespace reference_ops {

+ 47 - 47
tflite/kernels/internal/reference/depthwiseconv_uint8.h → tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h

@@ -18,9 +18,9 @@ limitations under the License.
 #include <algorithm>
 
 #include "fixedpoint/fixedpoint.h"
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/compatibility.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 
@@ -62,21 +62,21 @@ namespace reference_ops {
 namespace depthwise_conv {
 
 template <DepthwiseConvOutputRounding output_rounding>
-inline int32 DepthwiseConvRound(int32 x, int32 quantized_multiplier,
-                                int shift) {
+inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier,
+                                  int shift) {
   TFLITE_DCHECK_NE(output_rounding, DepthwiseConvOutputRounding::kNone);
   return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
 }
 
 template <>
-inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
-    int32 x, int32 quantized_multiplier, int shift) {
+inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
+    int32_t x, int32_t quantized_multiplier, int shift) {
   return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
 }
 
 template <>
-inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
-    int32 x, int32 quantized_multiplier, int shift) {
+inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
+    int32_t x, int32_t quantized_multiplier, int shift) {
   using gemmlowp::SaturatingRoundingDoublingHighMul;
   const int left_shift = shift > 0 ? shift : 0;
   const int right_shift = shift > 0 ? 0 : -shift;
@@ -89,13 +89,12 @@ inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
 
 template <DepthwiseConvOutputRounding output_rounding>
 struct DepthwiseConvBasicKernel {
-  static inline void Run(const DepthwiseParams& params,
-                         const RuntimeShape& input_shape,
-                         const uint8* input_data,
-                         const RuntimeShape& filter_shape,
-                         const uint8* filter_data,
-                         const RuntimeShape& bias_shape, const int32* bias_data,
-                         const RuntimeShape& output_shape, uint8* output_data) {
+  static inline void Run(
+      const DepthwiseParams& params, const RuntimeShape& input_shape,
+      const uint8_t* input_data, const RuntimeShape& filter_shape,
+      const uint8_t* filter_data, const RuntimeShape& bias_shape,
+      const int32_t* bias_data, const RuntimeShape& output_shape,
+      uint8_t* output_data) {
     const int stride_width = params.stride_width;
     const int stride_height = params.stride_height;
     const int dilation_width_factor = params.dilation_width_factor;
@@ -103,12 +102,12 @@ struct DepthwiseConvBasicKernel {
     const int pad_width = params.padding_values.width;
     const int pad_height = params.padding_values.height;
     const int depth_multiplier = params.depth_multiplier;
-    const int32 output_activation_min = params.quantized_activation_min;
-    const int32 output_activation_max = params.quantized_activation_max;
-    const int32 input_offset = params.input_offset;
-    const int32 filter_offset = params.weights_offset;
-    const int32 output_offset = params.output_offset;
-    const int32 output_multiplier = params.output_multiplier;
+    const int32_t output_activation_min = params.quantized_activation_min;
+    const int32_t output_activation_max = params.quantized_activation_max;
+    const int32_t input_offset = params.input_offset;
+    const int32_t filter_offset = params.weights_offset;
+    const int32_t output_offset = params.output_offset;
+    const int32_t output_multiplier = params.output_multiplier;
     const int output_shift = params.output_shift;
     TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
     TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
@@ -135,7 +134,7 @@ struct DepthwiseConvBasicKernel {
               const int oc = m + ic * depth_multiplier;
               const int in_x_origin = (out_x * stride_width) - pad_width;
               const int in_y_origin = (out_y * stride_height) - pad_height;
-              int32 acc = 0;
+              int32_t acc = 0;
               for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
                 for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
                   const int in_x =
@@ -146,9 +145,9 @@ struct DepthwiseConvBasicKernel {
                   // use zero as a default value.
                   if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
                       (in_y < input_height)) {
-                    int32 input_val =
+                    int32_t input_val =
                         input_data[Offset(input_shape, b, in_y, in_x, ic)];
-                    int32 filter_val = filter_data[Offset(
+                    int32_t filter_val = filter_data[Offset(
                         filter_shape, 0, filter_y, filter_x, oc)];
                     acc += (filter_val + filter_offset) *
                            (input_val + input_offset);
@@ -164,7 +163,7 @@ struct DepthwiseConvBasicKernel {
               acc = std::max(acc, output_activation_min);
               acc = std::min(acc, output_activation_max);
               output_data[Offset(output_shape, b, out_y, out_x, oc)] =
-                  static_cast<uint8>(acc);
+                  static_cast<uint8_t>(acc);
             }
           }
         }
@@ -176,10 +175,10 @@ struct DepthwiseConvBasicKernel {
   // MultiplyByQuantizedMultiplier or DepthwiseConvRound function.
   static inline void RunPerChannel(
       const DepthwiseParams& params, const RuntimeShape& input_shape,
-      const int8* input_data, const RuntimeShape& filter_shape,
-      const int8* filter_data, const RuntimeShape& bias_shape,
-      const int32* bias_data, const RuntimeShape& output_shape,
-      int8* output_data) {
+      const int8_t* input_data, const RuntimeShape& filter_shape,
+      const int8_t* filter_data, const RuntimeShape& bias_shape,
+      const int32_t* bias_data, const RuntimeShape& output_shape,
+      int8_t* output_data) {
     // Get parameters.
     // TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
     const int stride_width = params.stride_width;
@@ -189,12 +188,12 @@ struct DepthwiseConvBasicKernel {
     const int pad_width = params.padding_values.width;
     const int pad_height = params.padding_values.height;
     const int depth_multiplier = params.depth_multiplier;
-    const int32 input_offset = params.input_offset;
-    const int32 output_offset = params.output_offset;
-    const int32 output_activation_min = params.quantized_activation_min;
-    const int32 output_activation_max = params.quantized_activation_max;
-    const int32* output_multiplier = params.output_multiplier_per_channel;
-    const int32* output_shift = params.output_shift_per_channel;
+    const int32_t input_offset = params.input_offset;
+    const int32_t output_offset = params.output_offset;
+    const int32_t output_activation_min = params.quantized_activation_min;
+    const int32_t output_activation_max = params.quantized_activation_max;
+    const int32_t* output_multiplier = params.output_multiplier_per_channel;
+    const int32_t* output_shift = params.output_shift_per_channel;
 
     // Check dimensions of the tensors.
     TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
@@ -222,7 +221,7 @@ struct DepthwiseConvBasicKernel {
               const int output_channel = m + in_channel * depth_multiplier;
               const int in_x_origin = (out_x * stride_width) - pad_width;
               const int in_y_origin = (out_y * stride_height) - pad_height;
-              int32 acc = 0;
+              int32_t acc = 0;
               for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
                 for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
                   const int in_x =
@@ -234,17 +233,18 @@ struct DepthwiseConvBasicKernel {
                       (in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
                       (in_y < input_height);
                   if (is_point_inside_image) {
-                    int32 input_val = input_data[Offset(
+                    int32_t input_val = input_data[Offset(
                         input_shape, batch, in_y, in_x, in_channel)];
-                    int32 filter_val = filter_data[Offset(
+                    int32_t filter_val = filter_data[Offset(
                         filter_shape, 0, filter_y, filter_x, output_channel)];
                     // Accumulate with 32 bits accumulator.
                     // In the nudging process during model quantization, we
                     // force real value of 0.0 be represented by a quantized
-                    // value. This guarantees that the input_offset is a int8,
-                    // even though it is represented using int32. int32 += int8
-                    // * (int8 - int8) so the highest value we can get from each
-                    // accumulation is [-127, 127] * ([-128, 127] -
+                    // value. This guarantees that the input_offset is a int8_t,
+                    // even though it is represented using int32_t. int32_t +=
+                    // int8_t
+                    // * (int8_t - int8_t) so the highest value we can get from
+                    // each accumulation is [-127, 127] * ([-128, 127] -
                     // [-128, 127]), which is [-32512, 32512]. log2(32512)
                     // = 14.98, which means we can accumulate at least 2^16
                     // multiplications without overflow. The accumulator is
@@ -279,10 +279,10 @@ struct DepthwiseConvBasicKernel {
 
 inline void DepthwiseConv(
     const DepthwiseParams& params, const RuntimeShape& input_shape,
-    const uint8* input_data, const RuntimeShape& filter_shape,
-    const uint8* filter_data, const RuntimeShape& bias_shape,
-    const int32* bias_data, const RuntimeShape& output_shape,
-    uint8* output_data) {
+    const uint8_t* input_data, const RuntimeShape& filter_shape,
+    const uint8_t* filter_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    uint8_t* output_data) {
   return depthwise_conv::DepthwiseConvBasicKernel<
       DepthwiseConvOutputRounding::kAwayFromZero>::Run(params, input_shape,
                                                        input_data, filter_shape,

+ 9 - 9
tflite/kernels/internal/reference/dequantize.h → tensorflow/lite/kernels/internal/reference/dequantize.h

@@ -19,8 +19,8 @@ limitations under the License.
 
 #include <vector>
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 
@@ -32,12 +32,12 @@ inline void Dequantize(const tflite::DequantizationParams& op_params,
                        const RuntimeShape& input_shape,
                        const InputT* input_data,
                        const RuntimeShape& output_shape, OutputT* output_data) {
-  int32 zero_point = op_params.zero_point;
+  int32_t zero_point = op_params.zero_point;
   const double scale = op_params.scale;
   const int flat_size = MatchingFlatSize(input_shape, output_shape);
 
   for (int i = 0; i < flat_size; i++) {
-    const int32 val = input_data[i];
+    const int32_t val = input_data[i];
     const OutputT result = static_cast<OutputT>(scale * (val - zero_point));
     output_data[i] = result;
   }
@@ -52,11 +52,11 @@ inline void PerChannelDequantize(
   // Ensure flat size is same.
   MatchingFlatSize(input_shape, output_shape);
 
-  const int32* zero_point = op_params.zero_point;
+  const int32_t* zero_point = op_params.zero_point;
   const float* scale = op_params.scale;
-  const int32 quantized_dimension = op_params.quantized_dimension;
-  const int32 num_dims = input_shape.DimensionsCount();
-  const int32* dims_data = input_shape.DimsData();
+  const int32_t quantized_dimension = op_params.quantized_dimension;
+  const int32_t num_dims = input_shape.DimensionsCount();
+  const int32_t* dims_data = input_shape.DimsData();
   std::vector<int> current_dim(num_dims, 0);
 
   do {
@@ -64,7 +64,7 @@ inline void PerChannelDequantize(
         ReducedOutputOffset(num_dims, reinterpret_cast<const int*>(dims_data),
                             current_dim.data(), 0, nullptr);
     const int channel = current_dim[quantized_dimension];
-    const int32 val = input_data[offset];
+    const int32_t val = input_data[offset];
     const float result =
         static_cast<float>(scale[channel] * (val - zero_point[channel]));
     output_data[offset] = result;

+ 1 - 1
tflite/kernels/internal/reference/floor.h → tensorflow/lite/kernels/internal/reference/floor.h

@@ -17,7 +17,7 @@ limitations under the License.
 
 #include <cmath>
 
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 

+ 71 - 70
tflite/kernels/internal/reference/fully_connected.h → tensorflow/lite/kernels/internal/reference/fully_connected.h

@@ -15,10 +15,10 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/cppmath.h"
-#include "tflite/kernels/internal/quantization_util.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/cppmath.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 namespace reference_ops {
@@ -61,17 +61,17 @@ inline void FullyConnected(
 
 inline void FullyConnected(
     const FullyConnectedParams& params, const RuntimeShape& input_shape,
-    const uint8* input_data, const RuntimeShape& filter_shape,
-    const uint8* filter_data, const RuntimeShape& bias_shape,
-    const int32* bias_data, const RuntimeShape& output_shape,
-    uint8* output_data) {
-  const int32 input_offset = params.input_offset;
-  const int32 filter_offset = params.weights_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_multiplier = params.output_multiplier;
+    const uint8_t* input_data, const RuntimeShape& filter_shape,
+    const uint8_t* filter_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    uint8_t* output_data) {
+  const int32_t input_offset = params.input_offset;
+  const int32_t filter_offset = params.weights_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_multiplier = params.output_multiplier;
   const int output_shift = params.output_shift;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
   TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
   TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
 
@@ -89,10 +89,10 @@ inline void FullyConnected(
   const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
   for (int b = 0; b < batches; ++b) {
     for (int out_c = 0; out_c < output_depth; ++out_c) {
-      int32 acc = 0;
+      int32_t acc = 0;
       for (int d = 0; d < accum_depth; ++d) {
-        int32 input_val = input_data[b * accum_depth + d];
-        int32 filter_val = filter_data[out_c * accum_depth + d];
+        int32_t input_val = input_data[b * accum_depth + d];
+        int32_t filter_val = filter_data[out_c * accum_depth + d];
         acc += (filter_val + filter_offset) * (input_val + input_offset);
       }
       if (bias_data) {
@@ -102,24 +102,24 @@ inline void FullyConnected(
       acc += output_offset;
       acc = std::max(acc, output_activation_min);
       acc = std::min(acc, output_activation_max);
-      output_data[out_c + output_depth * b] = static_cast<uint8>(acc);
+      output_data[out_c + output_depth * b] = static_cast<uint8_t>(acc);
     }
   }
 }
 
 inline void FullyConnected(
     const FullyConnectedParams& params, const RuntimeShape& input_shape,
-    const uint8* input_data, const RuntimeShape& filter_shape,
-    const uint8* filter_data, const RuntimeShape& bias_shape,
-    const int32* bias_data, const RuntimeShape& output_shape,
-    int16* output_data) {
-  const int32 input_offset = params.input_offset;
-  const int32 filter_offset = params.weights_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_multiplier = params.output_multiplier;
+    const uint8_t* input_data, const RuntimeShape& filter_shape,
+    const uint8_t* filter_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    int16_t* output_data) {
+  const int32_t input_offset = params.input_offset;
+  const int32_t filter_offset = params.weights_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_multiplier = params.output_multiplier;
   const int output_shift = params.output_shift;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
 
   TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
   TFLITE_DCHECK_EQ(output_offset, 0);
@@ -138,20 +138,21 @@ inline void FullyConnected(
     for (int out_c = 0; out_c < output_depth; ++out_c) {
       // Internal accumulation.
       // Initialize accumulator with the bias-value.
-      int32 accum = bias_data[out_c];
+      int32_t accum = bias_data[out_c];
       // Accumulation loop.
       for (int d = 0; d < accum_depth; ++d) {
-        int16 input_val = input_data[b * accum_depth + d] + input_offset;
-        int16 filter_val = filter_data[out_c * accum_depth + d] + filter_offset;
+        int16_t input_val = input_data[b * accum_depth + d] + input_offset;
+        int16_t filter_val =
+            filter_data[out_c * accum_depth + d] + filter_offset;
         accum += filter_val * input_val;
       }
-      // Down-scale the final int32 accumulator to the scale used by our
+      // Down-scale the final int32_t accumulator to the scale used by our
       // (16-bit, typically 3 integer bits) fixed-point format. The quantized
       // multiplier and shift here have been pre-computed offline
       // (e.g. by toco).
       accum =
           MultiplyByQuantizedMultiplier(accum, output_multiplier, output_shift);
-      // Saturate, cast to int16, and store to output array.
+      // Saturate, cast to int16_t, and store to output array.
       accum = std::max(accum, output_activation_min - output_offset);
       accum = std::min(accum, output_activation_max - output_offset);
       accum += output_offset;
@@ -162,14 +163,14 @@ inline void FullyConnected(
 
 inline void ShuffledFullyConnected(
     const FullyConnectedParams& params, const RuntimeShape& input_shape,
-    const uint8* input_data, const RuntimeShape& weights_shape,
-    const uint8* shuffled_weights_data, const RuntimeShape& bias_shape,
-    const int32* bias_data, const RuntimeShape& output_shape,
-    int16* output_data, uint8* shuffled_input_workspace_data) {
-  const int32 output_multiplier = params.output_multiplier;
+    const uint8_t* input_data, const RuntimeShape& weights_shape,
+    const uint8_t* shuffled_weights_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    int16_t* output_data, uint8_t* shuffled_input_workspace_data) {
+  const int32_t output_multiplier = params.output_multiplier;
   const int output_shift = params.output_shift;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
   TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
 
   TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
@@ -190,7 +191,7 @@ inline void ShuffledFullyConnected(
   TFLITE_DCHECK((output_depth % 4) == 0);
 
   // Shuffling and xoring of input activations into the workspace buffer
-  uint8* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
+  uint8_t* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
   if (batches == 1) {
     for (int i = 0; i < accum_depth; i++) {
       shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
@@ -198,13 +199,13 @@ inline void ShuffledFullyConnected(
   } else if (batches == 4) {
     for (int c = 0; c < accum_depth; c += 16) {
       for (int b = 0; b < 4; b++) {
-        const uint8* src_data_ptr = input_data + b * accum_depth + c;
+        const uint8_t* src_data_ptr = input_data + b * accum_depth + c;
         for (int j = 0; j < 16; j++) {
-          uint8 src_val = *src_data_ptr++;
+          uint8_t src_val = *src_data_ptr++;
           // Flip the sign bit, so that the kernel will only need to
-          // reinterpret these uint8 values as int8, getting for free the
+          // reinterpret these uint8_t values as int8_t, getting for free the
           // subtraction of the zero_point value 128.
-          uint8 dst_val = src_val ^ 0x80;
+          uint8_t dst_val = src_val ^ 0x80;
           *shuffled_input_workspace_ptr++ = dst_val;
         }
       }
@@ -216,62 +217,62 @@ inline void ShuffledFullyConnected(
 
   // Actual computation
   if (batches == 1) {
-    int16* output_ptr = output_data;
+    int16_t* output_ptr = output_data;
     // Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
-    // so that just reinterpreting them as int8 values is equivalent to
+    // so that just reinterpreting them as int8_t values is equivalent to
     // subtracting 128 from them, thus implementing for free the subtraction of
     // the zero_point value 128.
-    const int8* shuffled_weights_ptr =
-        reinterpret_cast<const int8*>(shuffled_weights_data);
+    const int8_t* shuffled_weights_ptr =
+        reinterpret_cast<const int8_t*>(shuffled_weights_data);
     // Likewise, we preshuffled and pre-xored the input data above.
-    const int8* shuffled_input_data =
-        reinterpret_cast<const int8*>(shuffled_input_workspace_data);
+    const int8_t* shuffled_input_data =
+        reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
     for (int c = 0; c < output_depth; c += 4) {
       // Internal accumulation.
       // Initialize accumulator with the bias-value.
-      int32 accum[4] = {0};
+      int32_t accum[4] = {0};
       // Accumulation loop.
       for (int d = 0; d < accum_depth; d += 16) {
         for (int i = 0; i < 4; i++) {
           for (int j = 0; j < 16; j++) {
-            int8 input_val = shuffled_input_data[d + j];
-            int8 weights_val = *shuffled_weights_ptr++;
+            int8_t input_val = shuffled_input_data[d + j];
+            int8_t weights_val = *shuffled_weights_ptr++;
             accum[i] += weights_val * input_val;
           }
         }
       }
       for (int i = 0; i < 4; i++) {
         // Add bias value
-        int32 acc = accum[i] + bias_data[c + i];
-        // Down-scale the final int32 accumulator to the scale used by our
+        int32_t acc = accum[i] + bias_data[c + i];
+        // Down-scale the final int32_t accumulator to the scale used by our
         // (16-bit, typically 3 integer bits) fixed-point format. The quantized
         // multiplier and shift here have been pre-computed offline
         // (e.g. by toco).
         acc =
             MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
-        // Saturate, cast to int16, and store to output array.
+        // Saturate, cast to int16_t, and store to output array.
         acc = std::max(acc, output_activation_min);
         acc = std::min(acc, output_activation_max);
         output_ptr[c + i] = acc;
       }
     }
   } else if (batches == 4) {
-    int16* output_ptr = output_data;
+    int16_t* output_ptr = output_data;
     // Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
-    // so that just reinterpreting them as int8 values is equivalent to
+    // so that just reinterpreting them as int8_t values is equivalent to
     // subtracting 128 from them, thus implementing for free the subtraction of
     // the zero_point value 128.
-    const int8* shuffled_weights_ptr =
-        reinterpret_cast<const int8*>(shuffled_weights_data);
+    const int8_t* shuffled_weights_ptr =
+        reinterpret_cast<const int8_t*>(shuffled_weights_data);
     // Likewise, we preshuffled and pre-xored the input data above.
-    const int8* shuffled_input_data =
-        reinterpret_cast<const int8*>(shuffled_input_workspace_data);
+    const int8_t* shuffled_input_data =
+        reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
     for (int c = 0; c < output_depth; c += 4) {
-      const int8* shuffled_input_ptr = shuffled_input_data;
+      const int8_t* shuffled_input_ptr = shuffled_input_data;
       // Accumulation loop.
       // Internal accumulation.
       // Initialize accumulator with the bias-value.
-      int32 accum[4][4];
+      int32_t accum[4][4];
       for (int i = 0; i < 4; i++) {
         for (int b = 0; b < 4; b++) {
           accum[i][b] = 0;
@@ -281,8 +282,8 @@ inline void ShuffledFullyConnected(
         for (int i = 0; i < 4; i++) {
           for (int b = 0; b < 4; b++) {
             for (int j = 0; j < 16; j++) {
-              int8 input_val = shuffled_input_ptr[16 * b + j];
-              int8 weights_val = shuffled_weights_ptr[16 * i + j];
+              int8_t input_val = shuffled_input_ptr[16 * b + j];
+              int8_t weights_val = shuffled_weights_ptr[16 * i + j];
               accum[i][b] += weights_val * input_val;
             }
           }
@@ -293,14 +294,14 @@ inline void ShuffledFullyConnected(
       for (int i = 0; i < 4; i++) {
         for (int b = 0; b < 4; b++) {
           // Add bias value
-          int32 acc = accum[i][b] + bias_data[c + i];
-          // Down-scale the final int32 accumulator to the scale used by our
+          int32_t acc = accum[i][b] + bias_data[c + i];
+          // Down-scale the final int32_t accumulator to the scale used by our
           // (16-bit, typically 3 integer bits) fixed-point format. The
           // quantized multiplier and shift here have been pre-computed offline
           // (e.g. by toco).
           acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
                                               output_shift);
-          // Saturate, cast to int16, and store to output array.
+          // Saturate, cast to int16_t, and store to output array.
           acc = std::max(acc, output_activation_min);
           acc = std::min(acc, output_activation_max);
           output_ptr[b * output_depth + c + i] = acc;

+ 166 - 0
tensorflow/lite/kernels/internal/reference/hard_swish.h

@@ -0,0 +1,166 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
+
+#include "ruy/profiler/instrumentation.h"  // from @ruy
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+
+namespace tflite {
+namespace reference_ops {
+
+inline int16_t SaturatingLeftShift(int16_t value, int amount) {
+  int32_t result = static_cast<int32_t>(value) * (1 << amount);
+  result = std::min<int32_t>(result, std::numeric_limits<int16_t>::max());
+  result = std::max<int32_t>(result, std::numeric_limits<int16_t>::min());
+  return result;
+}
+
+// Similar to ARM instruction SQDMULH.
+// Similar to gemmlowp::SaturatingRoundingDoublingHighMul except
+// rounding to zero instead of to nearest (SQRDMULH).
+inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) {
+  bool overflow = a == b && a == std::numeric_limits<std::int16_t>::min();
+  std::int32_t a_32(a);
+  std::int32_t b_32(b);
+  std::int32_t ab_32 = a_32 * b_32;
+  std::int16_t ab_x2_high16 = static_cast<std::int16_t>((ab_32) / (1 << 15));
+  return overflow ? std::numeric_limits<std::int16_t>::max() : ab_x2_high16;
+}
+
+template <typename T>
+inline void HardSwish(const RuntimeShape& input_shape, const T* input_data,
+                      const RuntimeShape& output_shape, T* output_data) {
+  ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float");
+  auto matching_size = MatchingFlatSize(input_shape, output_shape);
+  const T* in_end = input_data + matching_size;
+  for (; input_data < in_end; input_data++, output_data++) {
+    const float in = *input_data;
+    *output_data =
+        in * std::min(static_cast<T>(6), std::max(static_cast<T>(0), in + 3)) /
+        6;
+  }
+}
+
+template <typename T>
+inline void HardSwish(const HardSwishParams& params,
+                      const RuntimeShape& input_shape, const T* input_data,
+                      const RuntimeShape& output_shape, T* output_data) {
+  ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized");
+
+  const int flat_size = MatchingFlatSize(input_shape, output_shape);
+
+  for (int i = 0; i < flat_size; i++) {
+    const int16_t input_value = input_data[i] - params.input_zero_point;
+    // Left-shift as much as we can without overflow/saturation to put
+    // significant bits in the high bits of our 16-bit fixedpoint values, so
+    // that fixed-point approximate computations below are as accurate as
+    // possible.
+    const int16_t input_value_on_hires_input_scale = input_value * (1 << 7);
+    // Compute the input value on essentially the output scale, just not
+    // right-shifted yet. This is the value that we'll use in the (x >= +3)
+    // case, and that in the general case we'll multiply against the "relu-ish"
+    // fixed-point multiplier in [0, 1].
+    const int16_t input_value_on_preshift_output_scale =
+        gemmlowp::SaturatingRoundingDoublingHighMul(
+            input_value_on_hires_input_scale,
+            params.output_multiplier_fixedpoint_int16);
+    // Now compute the "relu-ish multiplier". In the (-3 <= x <= +3) case, that
+    // is just an affine rescaling of x from [-3, 3] to [0, 1]. In the general
+    // case, it is just that plus saturation at the boundaries of [-3, 3].
+    // First, we rescale from [-3, 3] to [-1, 1], saturating.
+    // That is done by rescaling the input value with a fixed-point multiplier
+    // (reluish_multiplier_fixedpoint) and bit-shift such that we represent
+    // that input value on the scale where the real value 3.0f is represented
+    // by the quantized value 32768.  (+32768 is actually not representable as
+    // int16_t, so this saturates at +32767, and that is seen empirically to be
+    // a negligible contribution to numerical error/bias).
+    //
+    // This code is careful to correctly implement any magnitude of multiplier,
+    // involving either a right shift or a left shift, with correct saturation
+    // behavior in the left-shift case. This forces this code to be more
+    // complicated, but is necessary for real applications: a partially
+    // trained quantized MobileNet v3-small model that motivated this code
+    // exhibits some large [min, max] range boundaries, of the order of
+    // magnitude of 10 or 100 depending on layers.
+    //
+    // The next few lines are basically just an ordinary
+    // MultiplyByQuantizedMultiplier, except that we are more careful here
+    // about the fine details of saturation when left-shifting, because here
+    // overflow in left-shift is a common case, not an anomaly as
+    // MultiplyByQuantizedMultiplier assumes.
+    int16_t reluish_value = input_value_on_hires_input_scale;
+    // Shift left, saturating, as much as we can while ensuring that this
+    // saturation will not contribute to the result. That is, left shift amount
+    // reduced by 1.
+    if (params.reluish_multiplier_exponent > 0) {
+      reluish_value = SaturatingLeftShift(
+          reluish_value, params.reluish_multiplier_exponent - 1);
+    }
+    // Apply the fixed-point multiplier, dividing the value by a divisor
+    // ranging in [1, 2].
+    reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul(
+        reluish_value, params.reluish_multiplier_fixedpoint_int16);
+    // Apply the last bit of left-shift. Thus, in the left-shifting case, if
+    // any saturation affects the result, it is happening here --- any
+    // saturation having occurred above is overwritten here, not affecting the
+    // result.
+    if (params.reluish_multiplier_exponent > 0) {
+      reluish_value = SaturatingLeftShift(reluish_value, 1);
+    }
+    // Shift right, in the right-shifting case.
+    if (params.reluish_multiplier_exponent < 0) {
+      reluish_value = gemmlowp::RoundingDivideByPOT(
+          reluish_value, -params.reluish_multiplier_exponent);
+    }
+    // At this point we have rescaled the value into a 16bit fixedpoint
+    // reluish_value in [-1, 1].
+    // We now convert that to a 16bit fixedpoint value in [0, 1].
+    reluish_value = (reluish_value + (1 << 15)) >> 1;
+    // Use of SaturatingDoublingHighMul here is important to cancel the biases
+    // from the above SaturatingRoundingDoublingHighMul.
+    //
+    // On a partially trained MobileNet-v3-small,
+    //
+    //                                       | bias on    |  ImageNet
+    //                                       | quantized  |  Top-1
+    // Operation used here                   | values     |  accuracy (50k)
+    // --------------------------------------+------------+-----------
+    // SaturatingDoublingHighMul             | -0.0024    |  58.920
+    // SaturatingRoundingDoublingHighMul     | -0.0067    |  58.064
+    //
+    // In activations_test, this is covered by this testcase:
+    //     QuantizedActivationsOpTest.HardSwishBias
+    //
+    const int16_t preshift_output_value = SaturatingDoublingHighMul(
+        reluish_value, input_value_on_preshift_output_scale);
+    // We were so far operating on the pre-shift output scale. Now we finally
+    // apply that output shift, arriving at the final output scale.
+    int16_t output_value = gemmlowp::RoundingDivideByPOT(
+        preshift_output_value, -params.output_multiplier_exponent);
+    output_value += params.output_zero_point;
+    output_value =
+        std::min<int16_t>(output_value, std::numeric_limits<T>::max());
+    output_value =
+        std::max<int16_t>(output_value, std::numeric_limits<T>::min());
+    output_data[i] = output_value;
+  }
+}
+
+}  // namespace reference_ops
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_

+ 25 - 23
tflite/kernels/internal/reference/integer_ops/add.h → tensorflow/lite/kernels/internal/reference/integer_ops/add.h

@@ -17,40 +17,47 @@ limitations under the License.
 
 #include <limits>
 
-#include "tflite/kernels/internal/common.h"
-#include "tflite/kernels/internal/types.h"
+#include "tensorflow/lite/kernels/internal/common.h"
+#include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {
 namespace reference_integer_ops {
 
+inline void CheckArithmeticParams(const ArithmeticParams& params) {
+  TFLITE_DCHECK_LE(params.quantized_activation_min,
+                   params.quantized_activation_max);
+  // Input offset is negative input zero point. Activation tensors are
+  // asymmetric quantized so they span the full int8 range.
+  TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
+  TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits<int8_t>::min());
+  TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
+  TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits<int8_t>::max());
+}
+
 // Element-wise add that can often be used for inner loop of broadcast add as
 // well as the non-broadcast add.
 inline void AddElementwise(int size, const ArithmeticParams& params,
                            const int8_t* input1_data, const int8_t* input2_data,
                            int8_t* output_data) {
-  const int32_t int8_max_value = std::numeric_limits<int8_t>::max();
-  TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
-  TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
-  TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
-  TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
+  CheckArithmeticParams(params);
 
   for (int i = 0; i < size; ++i) {
-    const int32 input1_val = params.input1_offset + input1_data[i];
-    const int32 input2_val = params.input2_offset + input2_data[i];
-    const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
-    const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
-    const int32 scaled_input1_val =
+    const int32_t input1_val = params.input1_offset + input1_data[i];
+    const int32_t input2_val = params.input2_offset + input2_data[i];
+    const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
+    const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
+    const int32_t scaled_input1_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input1_val, params.input1_multiplier, params.input1_shift);
-    const int32 scaled_input2_val =
+    const int32_t scaled_input2_val =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             shifted_input2_val, params.input2_multiplier, params.input2_shift);
-    const int32 raw_sum = scaled_input1_val + scaled_input2_val;
-    const int32 raw_output =
+    const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
+    const int32_t raw_output =
         MultiplyByQuantizedMultiplierSmallerThanOneExp(
             raw_sum, params.output_multiplier, params.output_shift) +
         params.output_offset;
-    const int32 clamped_output =
+    const int32_t clamped_output =
         std::min(params.quantized_activation_max,
                  std::max(params.quantized_activation_min, raw_output));
     output_data[i] = static_cast<int8_t>(clamped_output);
@@ -61,16 +68,11 @@ inline void Add(const ArithmeticParams& params,
                 const RuntimeShape& input1_shape, const int8_t* input1_data,
                 const RuntimeShape& input2_shape, const int8_t* input2_data,
                 const RuntimeShape& output_shape, int8_t* output_data) {
-  TFLITE_DCHECK_LE(params.quantized_activation_min,
-                   params.quantized_activation_max);
+  CheckArithmeticParams(params);
+
   const int flat_size =
       MatchingElementsSize(input1_shape, input2_shape, output_shape);
 
-  const int32_t int8_max_value = std::numeric_limits<int8_t>::max();
-  TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
-  TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
-  TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
-  TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
   AddElementwise(flat_size, params, input1_data, input2_data, output_data);
 }
 

Some files were not shown because too many files changed in this diff