generate_test_data.py 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. #!/usr/bin/env python3
  2. #
  3. # Copyright (C) 2010-2022 Arm Limited or its affiliates.
  4. #
  5. # SPDX-License-Identifier: Apache-2.0
  6. #
  7. # Licensed under the Apache License, Version 2.0 (the License); you may
  8. # not use this file except in compliance with the License.
  9. # You may obtain a copy of the License at
  10. #
  11. # www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an AS IS BASIS, WITHOUT
  15. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. #
  19. import os
  20. import sys
  21. import json
  22. import math
  23. import argparse
  24. import subprocess
  25. import numpy as np
  26. from packaging import version
  27. from abc import ABC, abstractmethod
  28. from tensorflow.lite.python.interpreter import Interpreter
  29. from tensorflow.lite.python.interpreter import OpResolverType
  30. try:
  31. import tensorflow as tf
  32. except Exception as e:
  33. print(e)
  34. sys.exit(1)
  35. REQUIRED_MINIMUM_TENSORFLOW_VERSION = version.parse("2.5")
  36. ALL_TESTDATA_SETS = {}
  37. CLANG_FORMAT = 'clang-format-9 -i'
  38. INT32_MAX = 2147483647
  39. INT32_MIN = -2147483648
  40. INT16_MAX = 32767
  41. INT16_MIN = -32768
  42. INT8_MAX = 127
  43. INT8_MIN = -128
  44. def parse_args():
  45. parser = argparse.ArgumentParser(description="Generate input and refererence output data for unittests."
  46. " It can regenerate all data, load all stored data or a combination of it.")
  47. parser.add_argument('--dataset', type=str, default=None, help="Name of generated test set.")
  48. parser.add_argument('--regenerate-weights', action='store_true', help="Regenerate and store new weights.")
  49. parser.add_argument('--regenerate-input', action='store_true', help="Regenerate and store new input.")
  50. parser.add_argument('--regenerate-biases', action='store_true', help="Regenerate and store new biases.")
  51. parser.add_argument('-a', '--regenerate-all', action='store_true', help="Regenerate and store all data.")
  52. parser.add_argument('-t', '--testtype', type=str, default=None, choices=['conv', 'depthwise_conv', 'avgpool',
  53. 'maxpool', 'fully_connected', 'softmax',
  54. 'svdf', 'add', 'mul'],
  55. help='Type of test. There are the operators that have unit tests.')
  56. parser.add_argument('--run-all-testsets', action='store_true', help="Run the script for all existing test "
  57. "sets. Regenerate all, partially all or no input data (output may still change, depending on"
  58. " changes in script) depending on regenerate flags. If used together with the -t flag, only"
  59. " tests of that type will be run.")
  60. parser.add_argument('--schema-file', type=str, help="Path to schema file. This may be needed for some tests.")
  61. args = parser.parse_args()
  62. return args
  63. class TestSettings(ABC):
  64. # This is the generated test data used by the test cases.
  65. OUTDIR = 'TestCases/TestData/'
  66. # This is input to the data generation. If everything or something is regenerated then it is overwritten.
  67. # So it always has the same data as the OUTDIR.
  68. # The purpose of the pregen is primarily for debugging, as it is enabling to change a single parameter and see how
  69. # output changes (or not changes), without regenerating all input data.
  70. # It also convinient when tesing changes in the script, to be able to run all test sets again.
  71. PREGEN = 'PregeneratedData/'
  72. def __init__(self, dataset, testtype, args, in_ch, out_ch, x_in, y_in, w_x, w_y, stride_x=1, stride_y=1, pad=False,
  73. randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, relu6=False,
  74. out_activation_min=None, out_activation_max=None, int16xint8=False, bias_min=None, bias_max=None,
  75. dilation_x=1, dilation_y=1):
  76. self.tensor_flow_reference_version = ("// Generated by {} using TFL version {} as reference.\n".
  77. format(os.path.basename(__file__), tf.__version__))
  78. # Randomization interval
  79. self.mins = randmin
  80. self.maxs = randmax
  81. self.bias_mins = bias_min
  82. self.bias_maxs = bias_max
  83. self.input_ch = in_ch
  84. self.output_ch = out_ch
  85. self.x_input = x_in
  86. self.y_input = y_in
  87. self.filter_x = w_x
  88. self.filter_y = w_y
  89. self.stride_x = stride_x
  90. self.stride_y = stride_y
  91. self.dilation_x = dilation_x
  92. self.dilation_y = dilation_y
  93. self.batches = batches
  94. self.test_type = testtype
  95. self.has_padding = pad
  96. self.is_int16xint8 = int16xint8
  97. if relu6:
  98. self.out_activation_max = 6
  99. self.out_activation_min = 0
  100. else:
  101. if out_activation_min is not None:
  102. self.out_activation_min = out_activation_min
  103. else:
  104. self.out_activation_min = INT16_MIN if self.is_int16xint8 else INT8_MIN
  105. if out_activation_max is not None:
  106. self.out_activation_max = out_activation_max
  107. else:
  108. self.out_activation_max = INT16_MAX if self.is_int16xint8 else INT8_MAX
  109. # Bias is optional.
  110. self.generate_bias = generate_bias
  111. self.generated_header_files = []
  112. self.pregenerated_data_dir = self.PREGEN
  113. self.config_data = "config_data.h"
  114. self.testdataset = dataset
  115. self.kernel_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'kernel.txt'
  116. self.inputs_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'input.txt'
  117. self.bias_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'bias.txt'
  118. if self.has_padding:
  119. self.padding = 'SAME'
  120. else:
  121. self.padding = 'VALID'
  122. self.regenerate_new_weights = args.regenerate_weights
  123. self.regenerate_new_input = args.regenerate_input
  124. self.regenerate_new_bias = args.regenerate_biases
  125. if args.regenerate_all:
  126. self.regenerate_new_bias = True
  127. self.regenerate_new_weights = True
  128. self.regenerate_new_input = True
  129. self.headers_dir = self.OUTDIR + self.testdataset + '/'
  130. self.model_path = "{}model_{}".format(self.headers_dir, self.testdataset)
  131. self.model_path_tflite = self.model_path + '.tflite'
  132. def save_multiple_dim_array_in_txt(self, file, data):
  133. header = ','.join(map(str, data.shape))
  134. np.savetxt(file, data.reshape(-1, data.shape[-1]), header=header,
  135. delimiter=',')
  136. def load_multiple_dim_array_from_txt(self, file):
  137. with open(file) as f:
  138. shape = list(map(int, next(f)[1:].split(',')))
  139. data = np.genfromtxt(f, delimiter=',').reshape(shape)
  140. return data.astype(np.float32)
  141. def convert_tensor_np(self, tensor_in, converter, *qminmax):
  142. w = tensor_in.numpy()
  143. shape = w.shape
  144. w = w.ravel()
  145. if len(qminmax) == 2:
  146. fw = converter(w, qminmax[0], qminmax[1])
  147. else:
  148. fw = converter(w)
  149. fw.shape = shape
  150. return tf.convert_to_tensor(fw)
  151. def convert_tensor(self, tensor_in, converter, *qminmax):
  152. w = tensor_in.numpy()
  153. shape = w.shape
  154. w = w.ravel()
  155. normal = np.array(w)
  156. float_normal = []
  157. for i in normal:
  158. if len(qminmax) == 2:
  159. float_normal.append(converter(i, qminmax[0], qminmax[1]))
  160. else:
  161. float_normal.append(converter(i))
  162. np_float_array = np.asarray(float_normal)
  163. np_float_array.shape = shape
  164. return tf.convert_to_tensor(np_float_array)
  165. def get_randomized_data(self, dims, npfile, regenerate, decimals=0, minrange=None, maxrange=None):
  166. if not minrange:
  167. minrange = self.mins
  168. if not maxrange:
  169. maxrange = self.maxs
  170. if not os.path.exists(npfile) or regenerate:
  171. regendir = os.path.dirname(npfile)
  172. os.makedirs(regendir, exist_ok=True)
  173. if decimals == 0:
  174. data = tf.Variable(tf.random.uniform(dims, minval=minrange, maxval=maxrange, dtype=tf.dtypes.int64))
  175. data = tf.cast(data, dtype=tf.float32)
  176. else:
  177. data = tf.Variable(tf.random.uniform(dims, minval=minrange, maxval=maxrange, dtype=tf.dtypes.float32))
  178. data = np.around(data.numpy(), decimals)
  179. data = tf.convert_to_tensor(data)
  180. print("Saving data to {}".format(npfile))
  181. self.save_multiple_dim_array_in_txt(npfile, data.numpy())
  182. else:
  183. print("Loading data from {}".format(npfile))
  184. data = tf.convert_to_tensor(self.load_multiple_dim_array_from_txt(npfile))
  185. return data
  186. def get_randomized_input_data(self, input_data, input_shape=None):
  187. # Generate or load saved input data unless hardcoded data provided
  188. if input_shape is None:
  189. input_shape = [self.batches, self.y_input, self.x_input, self.input_ch]
  190. if input_data is not None:
  191. input_data = tf.reshape(input_data, input_shape)
  192. else:
  193. input_data = self.get_randomized_data(input_shape,
  194. self.inputs_table_file,
  195. regenerate=self.regenerate_new_input)
  196. return input_data
  197. def get_randomized_bias_data(self, biases):
  198. # Generate or load saved bias data unless hardcoded data provided
  199. if not self.generate_bias:
  200. biases = tf.reshape(np.full([self.output_ch], 0), [self.output_ch])
  201. elif biases is not None:
  202. biases = tf.reshape(biases, [self.output_ch])
  203. else:
  204. biases = self.get_randomized_data([self.output_ch],
  205. self.bias_table_file,
  206. regenerate=self.regenerate_new_bias,
  207. minrange=self.bias_mins,
  208. maxrange=self.bias_maxs)
  209. return biases
  210. def format_output_file(self, file):
  211. command_list = CLANG_FORMAT.split(' ')
  212. command_list.append(file)
  213. process = subprocess.run(command_list)
  214. if process.returncode != 0:
  215. print("ERROR: {} failed".format(command_list))
  216. sys.exit(1)
  217. def write_c_header_wrapper(self):
  218. filename = "test_data.h"
  219. filepath = self.headers_dir + filename
  220. print("Generating C header wrapper {}...".format(filepath))
  221. with open(filepath, 'w+') as f:
  222. f.write(self.tensor_flow_reference_version)
  223. while len(self.generated_header_files) > 0:
  224. f.write('#include "{}"\n'.format(self.generated_header_files.pop()))
  225. self.format_output_file(filepath)
  226. def write_common_config(self, f, prefix):
  227. """
  228. Shared by conv/depthwise_conv and pooling
  229. """
  230. f.write("#define {}_FILTER_X {}\n".format(prefix, self.filter_x))
  231. f.write("#define {}_FILTER_Y {}\n".format(prefix, self.filter_y))
  232. f.write("#define {}_STRIDE_X {}\n".format(prefix, self.stride_x))
  233. f.write("#define {}_STRIDE_Y {}\n".format(prefix, self.stride_y))
  234. f.write("#define {}_PAD_X {}\n".format(prefix, self.pad_x))
  235. f.write("#define {}_PAD_Y {}\n".format(prefix, self.pad_y))
  236. f.write("#define {}_OUTPUT_W {}\n".format(prefix, self.x_output))
  237. f.write("#define {}_OUTPUT_H {}\n".format(prefix, self.y_output))
  238. def write_c_common_header(self, f):
  239. f.write(self.tensor_flow_reference_version)
  240. f.write("#pragma once\n")
  241. def write_c_config_header(self, write_common_parameters=True):
  242. filename = self.config_data
  243. self.generated_header_files.append(filename)
  244. filepath = self.headers_dir + filename
  245. prefix = self.testdataset.upper()
  246. print("Writing C header with config data {}...".format(filepath))
  247. with open(filepath, "w+") as f:
  248. self.write_c_common_header(f)
  249. if (write_common_parameters):
  250. f.write("#define {}_OUT_CH {}\n".format(prefix, self.output_ch))
  251. f.write("#define {}_IN_CH {}\n".format(prefix, self.input_ch))
  252. f.write("#define {}_INPUT_W {}\n".format(prefix, self.x_input))
  253. f.write("#define {}_INPUT_H {}\n".format(prefix, self.y_input))
  254. f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output * self.output_ch
  255. * self.batches))
  256. f.write("#define {}_INPUT_SIZE {}\n".format(prefix, self.x_input * self.y_input * self.input_ch))
  257. f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, self.out_activation_min))
  258. f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, self.out_activation_max))
  259. f.write("#define {}_INPUT_BATCHES {}\n".format(prefix, self.batches))
  260. self.format_output_file(filepath)
  261. def generate_c_array(self, name, array, datatype="q7_t", const="const "):
  262. os.makedirs(self.headers_dir, exist_ok=True)
  263. w = None
  264. if type(array) is list:
  265. w = array
  266. size = len(array)
  267. elif type(array) is np.ndarray:
  268. w = array
  269. w = w.ravel()
  270. size = w.size
  271. else:
  272. w = array.numpy()
  273. w = w.ravel()
  274. size = tf.size(array)
  275. filename = name + "_data.h"
  276. filepath = self.headers_dir + filename
  277. self.generated_header_files.append(filename)
  278. print("Generating C header {}...".format(filepath))
  279. with open(filepath, "w+") as f:
  280. self.write_c_common_header(f)
  281. f.write("#include <stdint.h>\n\n")
  282. f.write(const + datatype + " " + self.testdataset + '_' + name + "[%d] =\n{\n" % size)
  283. for i in range(size - 1):
  284. f.write(" %d,\n" % w[i])
  285. f.write(" %d\n" % w[size - 1])
  286. f.write("};\n")
  287. self.format_output_file(filepath)
  288. def set_output_dims_and_padding(self, output_x, output_y):
  289. self.x_output = output_x
  290. self.y_output = output_y
  291. if self.has_padding:
  292. # Take dilation into account.
  293. filter_x = (self.filter_x - 1) * self.dilation_x + 1
  294. filter_y = (self.filter_y - 1) * self.dilation_y + 1
  295. pad_along_width = max((self.x_output - 1) * self.stride_x + filter_x - self.x_input, 0)
  296. pad_along_height = max((self.y_output - 1) * self.stride_y + filter_y - self.y_input, 0)
  297. pad_top = pad_along_height // 2
  298. pad_left = pad_along_width // 2
  299. self.pad_x = pad_left
  300. self.pad_y = pad_top
  301. else:
  302. self.pad_x = 0
  303. self.pad_y = 0
  304. @abstractmethod
  305. def generate_data(self, input_data=None, weights=None, biases=None):
  306. ''' Must be overriden '''
  307. def quantize_scale(self, scale):
  308. significand, shift = math.frexp(scale)
  309. significand_q31 = round(significand * (1 << 31))
  310. return significand_q31, shift
  311. def get_convolving_calib_data_func(self, n_inputs):
  312. def representative_data_gen():
  313. representative_testsets = []
  314. if n_inputs > 0:
  315. for i in range(n_inputs):
  316. representative_testsets.append(np.ones((self.batches, self.y_input, self.x_input, self.input_ch),
  317. dtype=np.float32))
  318. yield representative_testsets
  319. else:
  320. raise RuntimeError("Invalid number of representative test sets: {}. Must be more than 0".
  321. format(self.test_type))
  322. return representative_data_gen
  323. def convert_and_interpret(self, model, inttype, input_data=None):
  324. """
  325. Compile and convert a model to Tflite format, run interpreter and allocate tensors.
  326. """
  327. model.compile(loss=tf.keras.losses.categorical_crossentropy,
  328. optimizer=tf.keras.optimizers.Adam(),
  329. metrics=['accuracy'])
  330. n_inputs = len(model.inputs)
  331. converter = tf.lite.TFLiteConverter.from_keras_model(model)
  332. converter.optimizations = [tf.lite.Optimize.DEFAULT]
  333. converter.representative_dataset = self.get_convolving_calib_data_func(n_inputs)
  334. if self.is_int16xint8:
  335. converter.target_spec.supported_ops = [
  336. tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
  337. else:
  338. converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
  339. converter.inference_input_type = inttype
  340. converter.inference_output_type = inttype
  341. tflite_model = converter.convert()
  342. os.makedirs(os.path.dirname(self.model_path_tflite), exist_ok=True)
  343. with open(self.model_path_tflite, "wb") as model:
  344. model.write(tflite_model)
  345. interpreter = Interpreter(
  346. model_path=str(self.model_path_tflite), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)
  347. interpreter.allocate_tensors()
  348. output_details = interpreter.get_output_details()
  349. (self.output_scale, self.output_zero_point) = output_details[0]['quantization']
  350. if input_data is not None:
  351. input_details = interpreter.get_input_details()
  352. (self.input_scale, self.input_zero_point) = input_details[0]['quantization']
  353. # Set input tensors
  354. interpreter.set_tensor(input_details[0]["index"], tf.cast(input_data, inttype))
  355. return interpreter
  356. def generate_json_from_template(self, weights_feature_data=None, weights_time_data=None, bias_data=None):
  357. """
  358. Takes a json template and parameters as input and creates a new json file.
  359. """
  360. generated_json_file = self.model_path + '.json'
  361. with open(self.json_template, 'r') as in_file, open(generated_json_file, 'w') as out_file:
  362. # Update shapes, scales and zero points
  363. data = in_file.read()
  364. for item, to_replace in self.json_replacements.items():
  365. data = data.replace(item, str(to_replace))
  366. data = json.loads(data)
  367. # Update weights and bias data
  368. if weights_feature_data is not None:
  369. w_1_buffer_index = 1
  370. data["buffers"][w_1_buffer_index]["data"] = self.to_bytes(weights_feature_data.numpy().ravel(), 1)
  371. if weights_time_data is not None:
  372. w_2_buffer_index = 2
  373. data["buffers"][w_2_buffer_index]["data"] = self.to_bytes(weights_time_data.numpy().ravel(), 2)
  374. if bias_data is not None:
  375. bias_buffer_index = 3
  376. data["buffers"][bias_buffer_index]["data"] = self.to_bytes(bias_data.numpy().ravel(), 4)
  377. json.dump(data, out_file, indent=2)
  378. return generated_json_file
  379. def flatc_generate_tflite(self, json_input, schema):
  380. flatc = 'flatc'
  381. if schema is None:
  382. raise RuntimeError("A schema file is required.")
  383. command = "{} -o {} -c -b {} {}".format(flatc, self.headers_dir, schema, json_input)
  384. command_list = command.split(' ')
  385. process = subprocess.run(command_list)
  386. if process.returncode != 0:
  387. raise RuntimeError("The following command failed: {}. Did you install flatc?".format(command))
  388. def to_bytes(self, tensor_data, type_size):
  389. result_bytes = []
  390. if type_size == 1:
  391. tensor_type = np.uint8
  392. elif type_size == 2:
  393. tensor_type = np.uint16
  394. elif type_size == 4:
  395. tensor_type = np.uint32
  396. else:
  397. raise RuntimeError("Size not supported: {}".format(type_size))
  398. for val in tensor_data:
  399. for byte in int(tensor_type(val)).to_bytes(type_size, 'little'):
  400. result_bytes.append(byte)
  401. return result_bytes
  402. class ConvSettings(TestSettings):
  403. def __init__(self, dataset, testtype, args, in_ch=1, out_ch=1, x_in=7, y_in=7, w_x=3, w_y=3, stride_x=2, stride_y=2,
  404. pad=True, randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, relu6=False,
  405. out_activation_min=None, out_activation_max=None, int16xint8=False, bias_min=None,
  406. bias_max=None, dilation_x=1, dilation_y=1):
  407. super().__init__(dataset, testtype, args, in_ch, out_ch, x_in, y_in, w_x, w_y, stride_x, stride_y, pad,
  408. randmin, randmax, batches, generate_bias=generate_bias, relu6=relu6,
  409. out_activation_min=out_activation_min, out_activation_max=out_activation_max,
  410. int16xint8=int16xint8, bias_min=bias_min, bias_max=bias_max, dilation_x=dilation_x,
  411. dilation_y=dilation_y)
  412. self.scaling_factors = []
  413. if self.test_type == 'depthwise_conv':
  414. self.channel_multiplier = self.output_ch // self.input_ch
  415. if self.output_ch % self.input_ch != 0:
  416. raise RuntimeError("out channel ({}) is not multiple of in channel ({})".format(out_ch, in_ch))
  417. elif self.test_type != 'conv':
  418. raise RuntimeError("Invalid test type {}".format(self.test_type))
  419. def write_c_config_header(self):
  420. super().write_c_config_header()
  421. filename = self.config_data
  422. filepath = self.headers_dir + filename
  423. prefix = self.testdataset.upper()
  424. with open(filepath, "a") as f:
  425. self.write_common_config(f, prefix)
  426. if self.test_type == 'depthwise_conv':
  427. f.write("#define {}_CH_MULT {}\n".format(prefix, self.channel_multiplier))
  428. f.write("#define {}_INPUT_OFFSET {}\n".format(prefix, -self.input_zero_point))
  429. f.write("#define {}_OUTPUT_OFFSET {}\n".format(prefix, self.output_zero_point))
  430. f.write("#define {}_DILATION_X {}\n".format(prefix, self.dilation_x))
  431. f.write("#define {}_DILATION_Y {}\n".format(prefix, self.dilation_y))
  432. def generate_quantize_per_channel_multiplier(self):
  433. num_channels = self.output_ch
  434. per_channel_multiplier = []
  435. per_channel_shift = []
  436. if len(self.scaling_factors) != num_channels:
  437. raise RuntimeError("Missing scaling factors")
  438. for i in range(num_channels):
  439. effective_output_scale = self.input_scale * self.scaling_factors[i] / self.output_scale
  440. (quantized_multiplier, shift) = self.quantize_scale(effective_output_scale)
  441. per_channel_multiplier.append(quantized_multiplier)
  442. per_channel_shift.append(shift)
  443. self.generate_c_array("output_mult", per_channel_multiplier, datatype='int32_t')
  444. self.generate_c_array("output_shift", per_channel_shift, datatype='int32_t')
  445. def generate_data(self, input_data=None, weights=None, biases=None):
  446. if self.is_int16xint8:
  447. inttype = tf.int16
  448. datatype = "q15_t"
  449. bias_datatype = "int64_t"
  450. else:
  451. inttype = tf.int8
  452. datatype = "q7_t"
  453. bias_datatype = "int32_t"
  454. input_data = self.get_randomized_input_data(input_data)
  455. if self.test_type == 'conv':
  456. out_channel = self.output_ch
  457. elif self.test_type == 'depthwise_conv':
  458. out_channel = self.channel_multiplier
  459. if weights is not None:
  460. weights = tf.reshape(weights, [self.filter_y, self.filter_x, self.input_ch, out_channel])
  461. else:
  462. weights = self.get_randomized_data([self.filter_y, self.filter_x, self.input_ch, out_channel],
  463. self.kernel_table_file,
  464. regenerate=self.regenerate_new_weights)
  465. biases = self.get_randomized_bias_data(biases)
  466. # Create a one layer Keras model.
  467. model = tf.keras.models.Sequential()
  468. input_shape = (self.batches, self.y_input, self.x_input, self.input_ch)
  469. model.add(tf.keras.layers.InputLayer(
  470. input_shape=input_shape[1:], batch_size=self.batches))
  471. if self.test_type == 'conv':
  472. conv_layer = tf.keras.layers.Conv2D(self.output_ch, kernel_size=(self.filter_y, self.filter_x),
  473. strides=(self.stride_y, self.stride_x),
  474. padding=self.padding, input_shape=input_shape[1:],
  475. dilation_rate=(self.dilation_y, self.dilation_x))
  476. model.add(conv_layer)
  477. conv_layer.set_weights([weights, biases])
  478. elif self.test_type == 'depthwise_conv':
  479. depthwise_layer = tf.keras.layers.DepthwiseConv2D(
  480. kernel_size=(self.filter_y, self.filter_x),
  481. strides=(self.stride_y, self.stride_x),
  482. padding=self.padding, depth_multiplier=self.channel_multiplier,
  483. input_shape=input_shape[1:], dilation_rate=(self.dilation_y, self.dilation_x))
  484. model.add(depthwise_layer)
  485. depthwise_layer.set_weights([weights, biases])
  486. interpreter = self.convert_and_interpret(model, inttype, input_data)
  487. all_layers_details = interpreter.get_tensor_details()
  488. filter_layer = all_layers_details[1]
  489. bias_layer = all_layers_details[2]
  490. if weights.numpy().size != interpreter.get_tensor(filter_layer['index']).size or \
  491. (self.generate_bias and biases.numpy().size != interpreter.get_tensor(bias_layer['index']).size):
  492. raise RuntimeError("Dimension mismatch")
  493. output_details = interpreter.get_output_details()
  494. self.set_output_dims_and_padding(output_details[0]['shape'][2], output_details[0]['shape'][1])
  495. self.generate_c_array("input", input_data, datatype=datatype)
  496. self.generate_c_array("weights", interpreter.get_tensor(filter_layer['index']))
  497. self.scaling_factors = filter_layer['quantization_parameters']['scales']
  498. self.generate_quantize_per_channel_multiplier()
  499. self.generate_c_array("biases", interpreter.get_tensor(bias_layer['index']), bias_datatype)
  500. # Generate reference
  501. interpreter.invoke()
  502. output_data = interpreter.get_tensor(output_details[0]["index"])
  503. self.generate_c_array("output_ref", np.clip(output_data, self.out_activation_min, self.out_activation_max),
  504. datatype=datatype)
  505. self.write_c_config_header()
  506. self.write_c_header_wrapper()
  507. class PoolingSettings(TestSettings):
  508. def __init__(self, dataset, testtype, args, channels=8, x_in=4, y_in=4, w_x=4, w_y=4, stride_x=1, stride_y=1,
  509. randmin=INT8_MIN, randmax=INT8_MAX, batches=1, pad=False, relu6=False, out_activation_min=None,
  510. out_activation_max=None, int16xint8=False):
  511. super().__init__(dataset, testtype, args, channels, channels, x_in, y_in, w_x, w_y, stride_x, stride_y, pad,
  512. randmin=randmin, randmax=randmax, relu6=relu6, out_activation_min=out_activation_min,
  513. out_activation_max=out_activation_max, int16xint8=int16xint8)
  514. def generate_data(self, input_data=None):
  515. if self.is_int16xint8:
  516. datatype = "int16_t"
  517. inttype = tf.int16
  518. else:
  519. datatype = "int8_t"
  520. inttype = tf.int8
  521. input_data = self.get_randomized_input_data(input_data)
  522. self.generate_c_array("input", input_data, datatype=datatype)
  523. input_data = tf.cast(input_data, tf.float32)
  524. # Create a one-layer Keras model
  525. model = tf.keras.models.Sequential()
  526. input_shape = (self.batches, self.y_input, self.x_input, self.input_ch)
  527. model.add(tf.keras.layers.InputLayer(
  528. input_shape=input_shape[1:], batch_size=self.batches))
  529. if self.test_type == 'avgpool':
  530. model.add(tf.keras.layers.AveragePooling2D(pool_size=(self.filter_y, self.filter_x),
  531. strides=(self.stride_y, self.stride_x),
  532. padding=self.padding, input_shape=input_shape[1:]))
  533. elif self.test_type == 'maxpool':
  534. model.add(tf.keras.layers.MaxPooling2D(pool_size=(self.filter_y, self.filter_x),
  535. strides=(self.stride_y, self.stride_x),
  536. padding=self.padding, input_shape=input_shape[1:]))
  537. else:
  538. raise RuntimeError("Wrong test type")
  539. interpreter = self.convert_and_interpret(model, inttype, input_data)
  540. output_details = interpreter.get_output_details()
  541. self.set_output_dims_and_padding(output_details[0]['shape'][2], output_details[0]['shape'][1])
  542. # Generate reference
  543. interpreter.invoke()
  544. output_data = interpreter.get_tensor(output_details[0]["index"])
  545. self.generate_c_array("output_ref", np.clip(output_data, self.out_activation_min, self.out_activation_max),
  546. datatype=datatype)
  547. self.write_c_config_header()
  548. self.write_c_header_wrapper()
  549. def write_c_config_header(self):
  550. super().write_c_config_header()
  551. filename = self.config_data
  552. filepath = self.headers_dir + filename
  553. prefix = self.testdataset.upper()
  554. with open(filepath, "a") as f:
  555. self.write_common_config(f, prefix)
  556. class FullyConnectedSettings(TestSettings):
  557. def __init__(self, dataset, testtype, args, in_ch=1, out_ch=1, x_in=1, y_in=1, w_x=1, w_y=1, stride_x=1, stride_y=1,
  558. pad=False, randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, out_activation_min=None,
  559. out_activation_max=None, int16xint8=False, bias_min=None, bias_max=None):
  560. super().__init__(dataset, testtype, args, in_ch, out_ch, x_in, y_in, x_in, y_in, stride_x, stride_y, pad,
  561. randmin, randmax, batches, generate_bias=generate_bias, out_activation_min=out_activation_min,
  562. out_activation_max=out_activation_max, int16xint8=int16xint8, bias_min=bias_min,
  563. bias_max=bias_min)
  564. if not self.test_type == 'fully_connected':
  565. raise RuntimeError("Invalid test type {}".format(self.test_type))
  566. def write_c_config_header(self):
  567. super().write_c_config_header()
  568. filename = self.config_data
  569. filepath = self.headers_dir + filename
  570. prefix = self.testdataset.upper()
  571. with open(filepath, "a") as f:
  572. f.write("#define {}_OUTPUT_MULTIPLIER {}\n".format(prefix, self.quantized_multiplier))
  573. f.write("#define {}_OUTPUT_SHIFT {}\n".format(prefix, self.quantized_shift))
  574. f.write("#define {}_ACCUMULATION_DEPTH {}\n".format(prefix, self.input_ch*self.x_input*self.y_input))
  575. f.write("#define {}_INPUT_OFFSET {}\n".format(prefix, -self.input_zero_point))
  576. f.write("#define {}_OUTPUT_OFFSET {}\n".format(prefix, self.output_zero_point))
  577. def quantize_multiplier(self):
  578. input_product_scale = self.input_scale * self.weights_scale
  579. if input_product_scale < 0:
  580. raise RuntimeError("negative input product scale")
  581. real_multipler = input_product_scale / self.output_scale
  582. (self.quantized_multiplier, self.quantized_shift) = self.quantize_scale(real_multipler)
  583. def generate_data(self, input_data=None, weights=None, biases=None):
  584. input_data = self.get_randomized_input_data(input_data,
  585. [self.batches, self.input_ch * self.x_input * self.y_input])
  586. if self.is_int16xint8:
  587. inttype = tf.int16
  588. datatype = "q15_t"
  589. bias_datatype = "int64_t"
  590. else:
  591. inttype = tf.int8
  592. datatype = "q7_t"
  593. bias_datatype = "int32_t"
  594. fc_weights_format = [self.input_ch * self.y_input * self.x_input, self.output_ch]
  595. if weights is not None:
  596. weights = tf.reshape(weights, fc_weights_format)
  597. else:
  598. weights = self.get_randomized_data(fc_weights_format,
  599. self.kernel_table_file,
  600. regenerate=self.regenerate_new_weights)
  601. biases = self.get_randomized_bias_data(biases)
  602. # Create model with one fully_connected layer.
  603. model = tf.keras.models.Sequential()
  604. model.add(tf.keras.layers.InputLayer(
  605. input_shape=(self.y_input * self.x_input * self.input_ch,), batch_size=self.batches))
  606. fully_connected_layer = tf.keras.layers.Dense(self.output_ch, activation=None)
  607. model.add(fully_connected_layer)
  608. fully_connected_layer.set_weights([weights, biases])
  609. interpreter = self.convert_and_interpret(model, inttype, input_data)
  610. all_layers_details = interpreter.get_tensor_details()
  611. if self.is_int16xint8:
  612. filter_layer = all_layers_details[2]
  613. bias_layer = all_layers_details[1]
  614. else:
  615. filter_layer = all_layers_details[1]
  616. bias_layer = all_layers_details[2]
  617. if weights.numpy().size != interpreter.get_tensor(filter_layer['index']).size or \
  618. (self.generate_bias and biases.numpy().size != interpreter.get_tensor(bias_layer['index']).size):
  619. raise RuntimeError("Dimension mismatch")
  620. # The generic destination size calculation for these tests are: self.x_output * self.y_output * self.output_ch
  621. # * self.batches.
  622. self.x_output = 1
  623. self.y_output = 1
  624. output_details = interpreter.get_output_details()
  625. if self.output_ch != output_details[0]['shape'][1] or self.batches != output_details[0]['shape'][0]:
  626. raise RuntimeError("Fully connected out dimension mismatch")
  627. self.weights_scale = filter_layer['quantization_parameters']['scales'][0]
  628. self.quantize_multiplier()
  629. self.generate_c_array("input", input_data, datatype=datatype)
  630. self.generate_c_array("weights", interpreter.get_tensor(filter_layer['index']))
  631. if self.generate_bias:
  632. self.generate_c_array("biases", interpreter.get_tensor(bias_layer['index']), bias_datatype)
  633. else:
  634. self.generate_c_array("biases", biases, bias_datatype)
  635. # Generate reference
  636. interpreter.invoke()
  637. output_data = interpreter.get_tensor(output_details[0]["index"])
  638. self.generate_c_array("output_ref", np.clip(output_data, self.out_activation_min, self.out_activation_max),
  639. datatype=datatype)
  640. self.write_c_config_header()
  641. self.write_c_header_wrapper()
  642. class SoftmaxSettings(TestSettings):
  643. softmax_input_integer_bits = 5
  644. def __init__(self, dataset, testtype, args, x_in=5, y_in=1, randmin=INT8_MIN, randmax=INT8_MAX, int16xint8=False,
  645. inInt8outInt16=False, input_scale=0.003922, input_zp=-128):
  646. super().__init__(dataset, testtype, args, 1, 1, x_in, y_in, 1, 1, 1, 1, False, randmin,
  647. randmax, int16xint8=int16xint8)
  648. self.x_input = self.x_output = x_in
  649. self.y_input = self.y_output = y_in
  650. self.inInt8outInt16 = inInt8outInt16
  651. if self.inInt8outInt16 and self.is_int16xint8:
  652. raise RuntimeError("Specify input as either s8 or s16")
  653. if self.inInt8outInt16:
  654. self.input_scale = input_scale
  655. self.json_template = "TestCases/Common/Softmax/softmax_int8_to_int16_template.json"
  656. self.json_replacements = {"num_rows": self.y_input,
  657. "row_size": self.x_input,
  658. "input_scale": input_scale,
  659. "input_zp": input_zp}
  660. def calc_softmax_params(self):
  661. if self.is_int16xint8:
  662. input_scale_beta_rescale = self.input_scale / (10.0 / 65535.0)
  663. (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_scale_beta_rescale)
  664. else:
  665. input_real_multiplier = min(self.input_scale * (1 << (31 - self.softmax_input_integer_bits)),
  666. (1 << 31) - 1)
  667. (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_real_multiplier)
  668. self.diff_min = ((1 << self.softmax_input_integer_bits) - 1) * \
  669. (1 << (31 - self.softmax_input_integer_bits)) / \
  670. (1 << self.input_left_shift)
  671. self.diff_min = math.floor(self.diff_min)
  672. def write_c_config_header(self):
  673. super().write_c_config_header(write_common_parameters=False)
  674. filename = self.config_data
  675. filepath = self.headers_dir + filename
  676. prefix = self.testdataset.upper()
  677. with open(filepath, "a") as f:
  678. f.write("#define {}_NUM_ROWS {}\n".format(prefix, self.y_input))
  679. f.write("#define {}_ROW_SIZE {}\n".format(prefix, self.x_input))
  680. f.write("#define {}_INPUT_MULT {}\n".format(prefix, self.input_multiplier))
  681. f.write("#define {}_INPUT_LEFT_SHIFT {}\n".format(prefix, self.input_left_shift))
  682. if not self.is_int16xint8:
  683. f.write("#define {}_DIFF_MIN {}\n".format(prefix, -self.diff_min))
  684. f.write("#define {}_DST_SIZE {}\n".format(prefix, self.x_output * self.y_output))
  685. def get_softmax_randomized_input_data(self, input_data, input_shape):
  686. # Generate or load saved input data unless hardcoded data provided.
  687. if input_data is not None:
  688. input_data = tf.reshape(input_data, input_shape)
  689. else:
  690. input_data = self.get_randomized_data(input_shape,
  691. self.inputs_table_file,
  692. regenerate=self.regenerate_new_input)
  693. return input_data
  694. def generate_data(self, input_data=None, weights=None, biases=None):
  695. input_data = self.get_softmax_randomized_input_data(input_data, [self.y_input, self.x_input])
  696. if self.is_int16xint8:
  697. inttype = tf.int16
  698. datatype = "q15_t"
  699. else:
  700. inttype = tf.int8
  701. datatype = "q7_t"
  702. self.generate_c_array("input", input_data, datatype=datatype)
  703. # Generate reference.
  704. if self.inInt8outInt16:
  705. # Output is int16.
  706. datatype = "q15_t"
  707. # Keras does not support int8 input and int16 output for Softmax.
  708. # Using a template json instead.
  709. generated_json = self.generate_json_from_template()
  710. self.flatc_generate_tflite(generated_json, args.schema_file)
  711. interpreter = Interpreter(
  712. model_path=str(self.model_path_tflite), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)
  713. interpreter.allocate_tensors()
  714. all_layers_details = interpreter.get_tensor_details()
  715. input_layer = all_layers_details[0]
  716. output_layer = all_layers_details[1]
  717. interpreter.set_tensor(input_layer["index"], tf.cast(input_data, tf.int8))
  718. interpreter.invoke()
  719. output_data = interpreter.get_tensor(output_layer["index"])
  720. else:
  721. # Create a one-layer Keras model.
  722. model = tf.keras.models.Sequential()
  723. input_shape = (self.y_input, self.x_input)
  724. model.add(tf.keras.layers.Softmax(input_shape=input_shape))
  725. interpreter = self.convert_and_interpret(model, inttype, tf.expand_dims(input_data, axis=0))
  726. output_details = interpreter.get_output_details()
  727. interpreter.invoke()
  728. output_data = interpreter.get_tensor(output_details[0]["index"])
  729. self.calc_softmax_params()
  730. self.generate_c_array("output_ref", output_data, datatype=datatype)
  731. self.write_c_config_header()
  732. self.write_c_header_wrapper()
  733. class SVDFSettings(TestSettings):
  734. def __init__(self, dataset, testtype, args, batches=2, number_inputs=2, rank=8, memory_size=10, randmin=INT8_MIN,
  735. randmax=INT8_MAX, input_size=3, number_units=4, generate_bias=True, input_scale=0.1, input_zp=0,
  736. w_1_scale=0.005, w_1_zp=0, w_2_scale=0.005, w_2_zp=0, bias_scale=0.000001, bias_zp=0,
  737. state_scale=0.005, state_zp=0, output_scale=0.1, output_zp=0):
  738. super().__init__(dataset, testtype, args, 1, 1, 1, 1, 1, 1, 1, 1, False, randmin,
  739. randmax, generate_bias=generate_bias)
  740. self.batches = batches
  741. self.number_units = number_units
  742. self.input_size = input_size
  743. self.memory_size = memory_size
  744. self.rank = rank
  745. self.number_filters = self.number_units * self.rank
  746. self.time_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'time_data.txt'
  747. self.number_inputs = number_inputs
  748. self.input_sequence_length = self.number_inputs * self.input_size * self.batches
  749. self.in_activation_max = INT16_MAX
  750. self.in_activation_min = INT16_MIN
  751. self.json_template = "TestCases/Common/svdf_template.json"
  752. self.json_replacements = {"memory_sizeXnumber_filters": self.memory_size * self.number_filters,
  753. "batches": self.batches,
  754. "input_size": self.input_size,
  755. "number_filters": self.number_filters,
  756. "memory_size": self.memory_size,
  757. "number_units": self.number_units,
  758. "rank_value": self.rank,
  759. "input_scale": input_scale,
  760. "input_zp": input_zp,
  761. "w_1_scale": w_1_scale,
  762. "w_1_zp": w_1_zp,
  763. "w_2_scale": w_2_scale,
  764. "w_2_zp": w_2_zp,
  765. "bias_scale": bias_scale,
  766. "bias_zp": bias_zp,
  767. "state_scale": state_scale,
  768. "state_zp": state_zp,
  769. "output_scale": output_scale,
  770. "output_zp": output_zp}
  771. def calc_multipliers_and_shifts(self, input_scale, weights_1_scale, weights_2_scale, state_scale, output_scale):
  772. effective_scale_1 = weights_1_scale * input_scale / state_scale
  773. effective_scale_2 = state_scale * weights_2_scale / output_scale
  774. (self.multiplier_in, self.shift_1) = self.quantize_scale(effective_scale_1)
  775. (self.multiplier_out, self.shift_2) = self.quantize_scale(effective_scale_2)
  776. def write_c_config_header(self):
  777. super().write_c_config_header(write_common_parameters=False)
  778. filename = self.config_data
  779. filepath = self.headers_dir + filename
  780. prefix = self.testdataset.upper()
  781. with open(filepath, "a") as f:
  782. f.write("#define {}_MULTIPLIER_IN {}\n".format(prefix, self.multiplier_in))
  783. f.write("#define {}_MULTIPLIER_OUT {}\n".format(prefix, self.multiplier_out))
  784. f.write("#define {}_SHIFT_1 {}\n".format(prefix, self.shift_1))
  785. f.write("#define {}_SHIFT_2 {}\n".format(prefix, self.shift_2))
  786. f.write("#define {}_IN_ACTIVATION_MIN {}\n".format(prefix, self.in_activation_min))
  787. f.write("#define {}_IN_ACTIVATION_MAX {}\n".format(prefix, self.in_activation_max))
  788. f.write("#define {}_RANK {}\n".format(prefix, self.rank))
  789. f.write("#define {}_FEATURE_BATCHES {}\n".format(prefix, self.number_filters))
  790. f.write("#define {}_TIME_BATCHES {}\n".format(prefix, self.memory_size))
  791. f.write("#define {}_INPUT_SIZE {}\n".format(prefix, self.input_size))
  792. f.write("#define {}_DST_SIZE {}\n".format(prefix, self.number_units * self.batches))
  793. f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, self.out_activation_min))
  794. f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, self.out_activation_max))
  795. f.write("#define {}_INPUT_BATCHES {}\n".format(prefix, self.batches))
  796. f.write("#define {}_INPUT_OFFSET {}\n".format(prefix, self.input_zero_point))
  797. f.write("#define {}_OUTPUT_OFFSET {}\n".format(prefix, self.output_zero_point))
  798. def generate_data(self, input_data=None, weights=None, biases=None, time_data=None, state_data=None):
  799. if input_data is not None:
  800. input_data = tf.reshape(input_data, [self.input_sequence_length])
  801. else:
  802. input_data = self.get_randomized_data([self.input_sequence_length],
  803. self.inputs_table_file,
  804. regenerate=self.regenerate_new_input)
  805. self.generate_c_array("input_sequence", input_data)
  806. if weights is not None:
  807. weights_feature_data = tf.reshape(weights, [self.number_filters, self.input_size])
  808. else:
  809. weights_feature_data = self.get_randomized_data([self.number_filters, self.input_size],
  810. self.kernel_table_file,
  811. regenerate=self.regenerate_new_weights)
  812. if time_data is not None:
  813. weights_time_data = tf.reshape(time_data, [self.number_filters, self.memory_size])
  814. else:
  815. weights_time_data = self.get_randomized_data([self.number_filters, self.memory_size],
  816. self.time_table_file,
  817. regenerate=self.regenerate_new_weights)
  818. if not self.generate_bias:
  819. biases = [0] * self.number_units
  820. if biases is not None:
  821. biases = tf.reshape(biases, [self.number_units])
  822. else:
  823. biases = self.get_randomized_data([self.number_units],
  824. self.bias_table_file,
  825. regenerate=self.regenerate_new_weights)
  826. # Generate tflite model
  827. generated_json = self.generate_json_from_template(weights_feature_data, weights_time_data, biases)
  828. self.flatc_generate_tflite(generated_json, args.schema_file)
  829. # Run TFL interpreter
  830. interpreter = Interpreter(
  831. model_path=str(self.model_path_tflite), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)
  832. interpreter.allocate_tensors()
  833. # Read back scales and zero points from tflite model
  834. all_layers_details = interpreter.get_tensor_details()
  835. input_layer = all_layers_details[0]
  836. weights_1_layer = all_layers_details[1]
  837. weights_2_layer = all_layers_details[2]
  838. bias_layer = all_layers_details[3]
  839. state_layer = all_layers_details[4]
  840. output_layer = all_layers_details[5]
  841. (input_scale, self.input_zero_point) = self.get_scale_and_zp(input_layer)
  842. (weights_1_scale, zero_point) = self.get_scale_and_zp(weights_1_layer)
  843. (weights_2_scale, zero_point) = self.get_scale_and_zp(weights_2_layer)
  844. (bias_scale, zero_point) = self.get_scale_and_zp(bias_layer)
  845. (state_scale, zero_point) = self.get_scale_and_zp(state_layer)
  846. (output_scale, self.output_zero_point) = self.get_scale_and_zp(output_layer)
  847. self.calc_multipliers_and_shifts(input_scale, weights_1_scale, weights_2_scale, state_scale, output_scale)
  848. # Generate unit test C headers
  849. self.generate_c_array("weights_feature", interpreter.get_tensor(weights_1_layer['index']))
  850. self.generate_c_array("weights_time", interpreter.get_tensor(weights_2_layer['index']), datatype='q15_t')
  851. self.generate_c_array("biases", interpreter.get_tensor(bias_layer['index']), "int32_t")
  852. self.generate_c_array("state", interpreter.get_tensor(state_layer['index']), "q15_t")
  853. # Generate reference output
  854. svdf_ref = None
  855. for i in range(self.number_inputs):
  856. start = i * self.input_size * self.batches
  857. end = i * self.input_size * self.batches + self.input_size * self.batches
  858. input_sequence = input_data[start:end]
  859. input_sequence = tf.reshape(input_sequence, [self.batches, self.input_size])
  860. interpreter.set_tensor(input_layer["index"], tf.cast(input_sequence, tf.int8))
  861. interpreter.invoke()
  862. svdf_ref = interpreter.get_tensor(output_layer["index"])
  863. self.generate_c_array("output_ref", svdf_ref)
  864. self.write_c_config_header()
  865. self.write_c_header_wrapper()
  866. def get_scale_and_zp(self, layer):
  867. return (layer['quantization_parameters']['scales'][0], layer['quantization_parameters']['zero_points'][0])
  868. class AddMulSettings(TestSettings):
  869. def __init__(self, dataset, testtype, args, channels=1, x_in=4, y_in=4, decimal_input=6, randmin=INT8_MIN,
  870. randmax=INT8_MAX, out_activation_min=INT8_MIN, out_activation_max=INT8_MAX, int16xint8=False):
  871. super().__init__(dataset, testtype, args, in_ch=channels, out_ch=channels, x_in=x_in, y_in=y_in, w_x=1, w_y=1,
  872. stride_x=1, stride_y=1, pad=False, randmin=randmin, randmax=randmax, batches=1,
  873. generate_bias=False, relu6=False, out_activation_min=out_activation_min,
  874. out_activation_max=out_activation_max, int16xint8=int16xint8)
  875. self.x_input = self.x_output = x_in
  876. self.y_input = self.y_output = y_in
  877. self.decimal_input = decimal_input
  878. self.left_shift = 15 if self.is_int16xint8 else 20
  879. def generate_data(self, input_data1=None, input_data2=None):
  880. input_shape = (1, self.y_input, self.x_input, self.input_ch)
  881. input_data1 = self.get_randomized_data(list(input_shape),
  882. self.inputs_table_file,
  883. regenerate=self.regenerate_new_input,
  884. decimals=self.decimal_input)
  885. input_data2 = self.get_randomized_data(list(input_shape),
  886. self.kernel_table_file,
  887. regenerate=self.regenerate_new_weights,
  888. decimals=self.decimal_input)
  889. if self.is_int16xint8:
  890. inttype = "int16_t"
  891. inttype_tf = tf.int16
  892. else:
  893. inttype = "int8_t"
  894. inttype_tf = tf.int8
  895. # Create a one-layer functional Keras model as add/mul cannot use a sequntial Keras model.
  896. input1 = tf.keras.layers.Input(shape=input_shape[1:])
  897. input2 = tf.keras.layers.Input(shape=input_shape[1:])
  898. if self.test_type == 'add':
  899. layer = tf.keras.layers.Add()([input1, input2])
  900. elif self.test_type == 'mul':
  901. layer = tf.keras.layers.Multiply()([input1, input2])
  902. else:
  903. raise RuntimeError("Wrong test type")
  904. out = tf.keras.layers.Lambda(function=lambda x: x)(layer)
  905. model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
  906. interpreter = self.convert_and_interpret(model, inttype_tf)
  907. input_details = interpreter.get_input_details()
  908. interpreter.set_tensor(input_details[0]["index"], tf.cast(input_data1, inttype_tf))
  909. interpreter.set_tensor(input_details[1]["index"], tf.cast(input_data2, inttype_tf))
  910. # Calculate multipliers, shifts and offsets.
  911. (input1_scale, self.input1_zero_point) = input_details[0]['quantization']
  912. (input2_scale, self.input2_zero_point) = input_details[1]['quantization']
  913. self.input1_zero_point = -self.input1_zero_point
  914. self.input2_zero_point = -self.input2_zero_point
  915. double_max_input_scale = max(input1_scale, input2_scale) * 2
  916. (self.input1_mult, self.input1_shift) = self.quantize_scale(input1_scale/double_max_input_scale)
  917. (self.input2_mult, self.input2_shift) = self.quantize_scale(input2_scale/double_max_input_scale)
  918. if self.test_type == 'add':
  919. actual_output_scale = double_max_input_scale / ((1 << self.left_shift) * self.output_scale)
  920. elif self.test_type == 'mul':
  921. actual_output_scale = input1_scale * input2_scale / self.output_scale
  922. (self.output_mult, self.output_shift) = self.quantize_scale(actual_output_scale)
  923. # Generate reference.
  924. interpreter.invoke()
  925. output_details = interpreter.get_output_details()
  926. output_data = interpreter.get_tensor(output_details[0]["index"])
  927. self.generate_c_array("input1", input_data1, datatype=inttype)
  928. self.generate_c_array("input2", input_data2, datatype=inttype)
  929. self.generate_c_array("output_ref", np.clip(output_data, self.out_activation_min, self.out_activation_max),
  930. datatype=inttype)
  931. self.write_c_config_header()
  932. self.write_c_header_wrapper()
  933. def write_c_config_header(self):
  934. super().write_c_config_header(write_common_parameters=False)
  935. filename = self.config_data
  936. filepath = self.headers_dir + filename
  937. prefix = self.testdataset.upper()
  938. with open(filepath, "a") as f:
  939. f.write("#define {}_DST_SIZE {}\n".format(prefix,
  940. self.batches * self.y_input * self.x_input * self.input_ch))
  941. f.write("#define {}_OUT_ACTIVATION_MIN {}\n".format(prefix, self.out_activation_min))
  942. f.write("#define {}_OUT_ACTIVATION_MAX {}\n".format(prefix, self.out_activation_max))
  943. f.write("#define {}_INPUT1_OFFSET {}\n".format(prefix, self.input1_zero_point))
  944. f.write("#define {}_INPUT2_OFFSET {}\n".format(prefix, self.input2_zero_point))
  945. f.write("#define {}_OUTPUT_MULT {}\n".format(prefix, self.output_mult))
  946. f.write("#define {}_OUTPUT_SHIFT {}\n".format(prefix, self.output_shift))
  947. f.write("#define {}_OUTPUT_OFFSET {}\n".format(prefix, self.output_zero_point))
  948. if self.test_type == 'add':
  949. f.write("#define {}_LEFT_SHIFT {}\n".format(prefix, self.left_shift))
  950. f.write("#define {}_INPUT1_SHIFT {}\n".format(prefix, self.input1_shift))
  951. f.write("#define {}_INPUT2_SHIFT {}\n".format(prefix, self.input2_shift))
  952. f.write("#define {}_INPUT1_MULT {}\n".format(prefix, self.input1_mult))
  953. f.write("#define {}_INPUT2_MULT {}\n".format(prefix, self.input2_mult))
  954. def load_all_testdatasets():
  955. """
  956. Add all new testdata sets here
  957. """
  958. type_of_test = 'conv'
  959. dataset = 'basic'
  960. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=1, x_in=5,
  961. y_in=8, w_x=2, w_y=4, stride_x=1, stride_y=1, pad=False)
  962. dataset = 'stride2pad1'
  963. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=1, x_in=7,
  964. y_in=7, w_x=3, w_y=3, stride_x=2, stride_y=2, pad=True)
  965. dataset = 'kernel1x1'
  966. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=4, out_ch=17, x_in=15,
  967. y_in=15, w_x=1, w_y=1, stride_x=1, stride_y=1, pad=False,
  968. out_activation_min=-126, out_activation_max=127)
  969. dataset = 'conv_3'
  970. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=1, x_in=10, y_in=49, w_x=4,
  971. w_y=10, stride_x=1, stride_y=2, pad=True,
  972. out_activation_min=-127, out_activation_max=127)
  973. dataset = 'conv_1_x_n_1'
  974. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=5, y_in=5, w_x=2,
  975. w_y=1, stride_x=2, stride_y=1, pad=False, out_activation_min=-127,
  976. out_activation_max=127, batches=2)
  977. dataset = 'conv_1_x_n_2'
  978. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=1, x_in=11, y_in=11, w_x=11,
  979. w_y=1, stride_x=1, stride_y=1, pad=True,
  980. out_activation_min=-111, out_activation_max=127)
  981. dataset = 'conv_1_x_n_3'
  982. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=3, x_in=11, y_in=11, w_x=1,
  983. w_y=11, stride_x=1, stride_y=1, pad=True,
  984. out_activation_min=-88, out_activation_max=127)
  985. dataset = 'conv_2'
  986. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=4, x_in=6, y_in=3, w_x=3,
  987. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-101,
  988. out_activation_max=127)
  989. dataset = 'conv_4' # batches > 2
  990. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=5, y_in=5, w_x=2,
  991. w_y=3, stride_x=2, stride_y=2, pad=False,
  992. out_activation_min=-109, out_activation_max=127, batches=3)
  993. dataset = 'conv_out_activation'
  994. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3, y_in=3, w_x=3,
  995. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,
  996. out_activation_max=107)
  997. dataset = 'conv_dilation_golden'
  998. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, batches=2, out_ch=3, x_in=6, y_in=4,
  999. w_x=2, w_y=2, stride_x=1, stride_y=1, pad=True, out_activation_min=-128,
  1000. out_activation_max=127, dilation_x=3, dilation_y=2)
  1001. dataset = 'conv_2x2_dilation'
  1002. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=10, y_in=10, w_x=3,
  1003. w_y=3, stride_x=1, stride_y=1, pad=False, out_activation_min=-61,
  1004. out_activation_max=107, dilation_x=2, dilation_y=2)
  1005. dataset = 'conv_2x3_dilation'
  1006. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3, y_in=3, w_x=3,
  1007. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,
  1008. out_activation_max=107, dilation_x=2, dilation_y=2)
  1009. dataset = 'conv_3x2_dilation'
  1010. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3, y_in=3, w_x=3,
  1011. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,
  1012. out_activation_max=107, dilation_x=3, dilation_y=2)
  1013. dataset = 'conv_2x2_dilation_5x5_input'
  1014. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=5, y_in=5, w_x=3,
  1015. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,
  1016. out_activation_max=107, dilation_x=2, dilation_y=2)
  1017. dataset = 'conv_3x3_dilation_5x5_input'
  1018. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=9, y_in=11, w_x=3,
  1019. w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,
  1020. out_activation_max=107, dilation_x=2, dilation_y=2)
  1021. dataset = 'int16xint8'
  1022. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=4, x_in=7,
  1023. y_in=8, w_x=2, w_y=4, stride_x=2, stride_y=3, pad=True,
  1024. randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=-13335,
  1025. out_activation_max=32767, int16xint8=True)
  1026. dataset = 'requantize_s64'
  1027. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3,
  1028. y_in=2, w_x=2, w_y=2, stride_x=1, stride_y=1, pad=False,
  1029. out_activation_min=INT16_MIN, out_activation_max=INT16_MAX,
  1030. int16xint8=True, bias_min=-0x300, bias_max=0x9fff)
  1031. dataset = 'int16xint8_dilation_1'
  1032. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=32,
  1033. y_in=32, w_x=2, w_y=2, stride_x=1, stride_y=1, pad=False,
  1034. out_activation_min=INT16_MIN, out_activation_max=INT16_MAX,
  1035. int16xint8=True, bias_min=-0x300, dilation_x=2, dilation_y=2)
  1036. dataset = 'int16xint8_dilation_2'
  1037. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=4, x_in=7,
  1038. y_in=8, w_x=2, w_y=4, stride_x=1, stride_y=1, pad=True,
  1039. randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=-13335,
  1040. out_activation_max=32767, int16xint8=True, dilation_x=2, dilation_y=2)
  1041. dataset = 'int16xint8_dilation_3'
  1042. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=4, x_in=7,
  1043. y_in=8, w_x=2, w_y=4, stride_x=1, stride_y=1, pad=True,
  1044. randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=-13335,
  1045. out_activation_max=32767, int16xint8=True, dilation_x=2)
  1046. type_of_test = 'depthwise_conv'
  1047. dataset = 'depthwise_2'
  1048. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=9, x_in=6, y_in=5, w_x=3,
  1049. w_y=4, stride_x=2, stride_y=2, pad=True,
  1050. out_activation_min=-73, out_activation_max=127)
  1051. dataset = 'depthwise_kernel_3x3'
  1052. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=5, out_ch=5, x_in=4, y_in=5, w_x=3,
  1053. w_y=3, stride_x=2, stride_y=2, pad=True,
  1054. out_activation_min=-104, out_activation_max=127)
  1055. dataset = 'depthwise_eq_in_out_ch'
  1056. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=6, out_ch=6, x_in=4, y_in=5, w_x=2,
  1057. w_y=3, stride_x=1, stride_y=1, pad=True,
  1058. out_activation_min=-86, out_activation_max=127)
  1059. dataset = 'depthwise_out_activation'
  1060. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=6, y_in=5, w_x=3,
  1061. w_y=4, pad=False, out_activation_min=-45,
  1062. out_activation_max=103)
  1063. dataset = 'depthwise_mult_batches'
  1064. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=3, y_in=5, w_x=2,
  1065. w_y=4, stride_x=2, stride_y=2, pad=True,
  1066. batches=2)
  1067. dataset = 'depthwise_null_bias_0'
  1068. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=4, y_in=5, w_x=2,
  1069. w_y=2, stride_x=1, stride_y=1, pad=True, generate_bias=False,
  1070. batches=1)
  1071. dataset = 'depthwise_null_bias_1'
  1072. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=8, x_in=4, y_in=5, w_x=2,
  1073. w_y=2, stride_x=1, stride_y=1, pad=True, generate_bias=False,
  1074. batches=1)
  1075. dataset = 'depthwise_dilation'
  1076. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=9, x_in=6, y_in=5, w_x=3,
  1077. w_y=4, stride_x=2, stride_y=2, pad=True,
  1078. out_activation_min=-70, out_activation_max=127, dilation_x=2,
  1079. dilation_y=3)
  1080. dataset = 'dw_int16xint8'
  1081. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=4, out_ch=8, x_in=9, y_in=5, w_x=3,
  1082. w_y=4, stride_x=3, stride_y=2, pad=True, randmin=INT16_MIN,
  1083. randmax=INT16_MAX, out_activation_min=-21111,
  1084. out_activation_max=32767, int16xint8=True)
  1085. dataset = 'dw_int16xint8_dilation'
  1086. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=4, out_ch=8, x_in=9, y_in=5, w_x=4,
  1087. w_y=4, stride_x=1, stride_y=1, pad=True, randmin=INT16_MIN,
  1088. randmax=INT16_MAX, out_activation_min=-32700, dilation_x=3, dilation_y=2,
  1089. out_activation_max=32767, int16xint8=True)
  1090. dataset = 'dw_int16xint8_mult4'
  1091. ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=8, x_in=4, y_in=5, w_x=3,
  1092. w_y=4, stride_x=3, stride_y=2, pad=False, randmin=INT16_MIN,
  1093. randmax=INT16_MAX, out_activation_min=-32767,
  1094. out_activation_max=32767, int16xint8=True)
  1095. type_of_test = 'fully_connected'
  1096. dataset = 'fully_connected'
  1097. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=10, out_ch=6, x_in=2, y_in=1,
  1098. batches=3)
  1099. dataset = 'fully_connected_mve_0'
  1100. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=16, out_ch=9, x_in=1, y_in=1,
  1101. batches=1)
  1102. dataset = 'fully_connected_mve_1'
  1103. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=20, out_ch=4, x_in=1, y_in=1,
  1104. batches=1)
  1105. dataset = 'fully_connected_null_bias_0'
  1106. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=33, out_ch=5,
  1107. batches=2, generate_bias=False)
  1108. dataset = 'fully_connected_out_activation'
  1109. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=10, out_ch=4,
  1110. out_activation_min=-70, out_activation_max=100)
  1111. dataset = 'fully_connected_int16'
  1112. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=7, out_ch=11, x_in=3, y_in=3,
  1113. batches=2, randmin=INT16_MIN, randmax=INT16_MAX,
  1114. out_activation_min=-9999, out_activation_max=32767,
  1115. int16xint8=True)
  1116. dataset = 'fully_connected_int16_big'
  1117. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=7, out_ch=11, x_in=10,
  1118. y_in=10, batches=3, out_activation_min=-1444,
  1119. out_activation_max=32767, int16xint8=True)
  1120. dataset = 'fc_int16_slow'
  1121. ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=7, out_ch=11, x_in=10,
  1122. y_in=8, batches=3, randmin=(INT16_MAX-100), randmax=INT16_MAX,
  1123. int16xint8=True)
  1124. type_of_test = 'avgpool'
  1125. dataset = 'avgpooling'
  1126. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=8, x_in=22, y_in=12, stride_x=9,
  1127. stride_y=5, w_x=6, w_y=5, pad=True)
  1128. dataset = 'avgpooling_1'
  1129. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=3, x_in=9, y_in=5, stride_x=1,
  1130. stride_y=2, w_x=9, w_y=5, pad=False)
  1131. dataset = 'avgpooling_2'
  1132. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=5, x_in=12, y_in=1, stride_x=1,
  1133. stride_y=2, w_x=3, w_y=1, pad=True)
  1134. dataset = 'avgpooling_3'
  1135. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=9, y_in=1, stride_x=2,
  1136. stride_y=1, w_x=1, w_y=1, pad=False)
  1137. dataset = 'avgpooling_4'
  1138. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=1, y_in=20, stride_x=1,
  1139. stride_y=3, w_x=1, w_y=3, pad=True)
  1140. dataset = 'avgpooling_5'
  1141. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=1, x_in=3, y_in=3,
  1142. stride_x=1, stride_y=1, w_x=1, w_y=3, pad=True, relu6=True)
  1143. dataset = 'avgpooling_int16'
  1144. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=6, y_in=4,
  1145. stride_x=2, stride_y=1, w_x=2, w_y=3, pad=True,
  1146. randmin=INT16_MIN, randmax=INT16_MAX, int16xint8=True)
  1147. type_of_test = 'maxpool'
  1148. dataset = 'maxpooling'
  1149. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=8, x_in=22, y_in=12, stride_x=9,
  1150. stride_y=5, w_x=6, w_y=5, pad=True)
  1151. dataset = 'maxpooling_1'
  1152. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=3, x_in=9, y_in=5, stride_x=1,
  1153. stride_y=2, w_x=9, w_y=5, pad=False)
  1154. dataset = 'maxpooling_2'
  1155. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=5, x_in=12, y_in=1, stride_x=1,
  1156. stride_y=2, w_x=3, w_y=1, pad=True)
  1157. dataset = 'maxpooling_3'
  1158. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=9, y_in=1, stride_x=2,
  1159. stride_y=1, w_x=1, w_y=1, pad=False)
  1160. dataset = 'maxpooling_4'
  1161. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=1, y_in=20, stride_x=1,
  1162. stride_y=3, w_x=1, w_y=3, pad=True)
  1163. dataset = 'maxpooling_5'
  1164. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=20, x_in=1, y_in=1, stride_x=1,
  1165. stride_y=1, w_x=1, w_y=1, pad=True)
  1166. dataset = 'maxpooling_6'
  1167. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=17, x_in=1, y_in=5, stride_x=1,
  1168. stride_y=3, w_x=3, w_y=4, pad=True)
  1169. dataset = 'maxpooling_7'
  1170. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=1, x_in=4, y_in=2, stride_x=2,
  1171. stride_y=2, w_x=2, w_y=2, pad=False, relu6=True)
  1172. dataset = 'maxpool_int16'
  1173. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=4, y_in=3, stride_x=2,
  1174. stride_y=2, w_x=2, w_y=2, pad=False, randmin=INT16_MIN,
  1175. randmax=INT16_MAX, int16xint8=True)
  1176. dataset = 'maxpool_int16_1'
  1177. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=4, y_in=5, stride_x=2,
  1178. stride_y=1, w_x=3, w_y=3, pad=True, randmin=INT16_MIN,
  1179. randmax=INT16_MAX, out_activation_min=-30000, out_activation_max=30000,
  1180. int16xint8=True)
  1181. dataset = 'maxpool_int16_2'
  1182. ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=3, x_in=7, y_in=7, stride_x=1,
  1183. stride_y=1, w_x=3, w_y=3, pad=False, randmin=INT16_MIN,
  1184. randmax=INT16_MAX, out_activation_min=-30000, out_activation_max=30000,
  1185. int16xint8=True)
  1186. type_of_test = 'softmax'
  1187. dataset = 'softmax'
  1188. ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=5, y_in=2)
  1189. dataset = 'softmax_s16'
  1190. ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=10, y_in=3, int16xint8=True,
  1191. randmin=INT16_MIN, randmax=INT16_MAX)
  1192. dataset = 'softmax_s8_s16'
  1193. ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=12, y_in=2, inInt8outInt16=True)
  1194. type_of_test = 'svdf'
  1195. dataset = 'svdf'
  1196. ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=2, number_inputs=2, rank=8,
  1197. memory_size=8, input_size=3, number_units=3)
  1198. dataset = 'svdf_1'
  1199. ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=1,
  1200. memory_size=2, input_size=7, number_units=5)
  1201. dataset = 'svdf_2'
  1202. ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=2,
  1203. memory_size=2, input_size=7, number_units=5, generate_bias=False)
  1204. dataset = 'svdf_3'
  1205. ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=1, number_inputs=2, rank=1,
  1206. memory_size=2, input_size=20, number_units=12, generate_bias=False)
  1207. type_of_test = 'add'
  1208. dataset = 'add'
  1209. ALL_TESTDATA_SETS[dataset] = AddMulSettings(dataset, type_of_test, args, channels=8, x_in=4, y_in=4,
  1210. randmin=INT8_MIN, randmax=INT8_MAX)
  1211. dataset = 'add_s16'
  1212. ALL_TESTDATA_SETS[dataset] = AddMulSettings(dataset, type_of_test, args, channels=8, x_in=4, y_in=4,
  1213. randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=INT16_MIN,
  1214. out_activation_max=INT16_MAX, int16xint8=True)
  1215. type_of_test = 'mul'
  1216. dataset = 'mul'
  1217. ALL_TESTDATA_SETS[dataset] = AddMulSettings(dataset, type_of_test, args, channels=8, x_in=4, y_in=5,
  1218. randmin=INT8_MIN, randmax=INT8_MAX)
  1219. dataset = 'mul_s16'
  1220. ALL_TESTDATA_SETS[dataset] = AddMulSettings(dataset, type_of_test, args, channels=8, x_in=5, y_in=4,
  1221. randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=INT16_MIN,
  1222. out_activation_max=INT16_MAX, int16xint8=True)
  1223. if __name__ == '__main__':
  1224. if version.parse(tf.__version__) < REQUIRED_MINIMUM_TENSORFLOW_VERSION:
  1225. print("Unsupported tensorflow version, ", version.parse(tf.__version__))
  1226. sys.exit(0)
  1227. args = parse_args()
  1228. testdataset = args.dataset
  1229. test_type = args.testtype
  1230. load_all_testdatasets()
  1231. if (args.run_all_testsets):
  1232. for testset_name, testset_generator in ALL_TESTDATA_SETS.items():
  1233. if test_type and testset_generator.test_type != test_type:
  1234. continue
  1235. print("Generating testset {}..".format(testset_name))
  1236. testset_generator.generate_data()
  1237. print()
  1238. # Check that all testsets have been loaded.
  1239. found_test_data_sets = []
  1240. directory = 'TestCases/TestData'
  1241. for dir in next(os.walk(directory))[1]:
  1242. found_test_data_sets.append(dir)
  1243. for testset_name in found_test_data_sets:
  1244. if testset_name not in ALL_TESTDATA_SETS:
  1245. print("WARNING: Testset {} in {} was not loaded".format(testset_name, directory))
  1246. else:
  1247. try:
  1248. if not testdataset:
  1249. raise RuntimeError("Please select testdataset or use --run_all_testsets")
  1250. generator = ALL_TESTDATA_SETS[testdataset]
  1251. except KeyError:
  1252. print("WARNING: testset {} not in testset list".format(testdataset))
  1253. if args.testtype == 'conv' or args.testtype == 'depthwise_conv':
  1254. generator = ConvSettings(testdataset, test_type, args)
  1255. elif args.testtype == 'fully_connected':
  1256. generator = FullyConnectedSettings(testdataset, test_type, args)
  1257. elif args.testtype == 'avgpool' or args.testtype == 'maxpool':
  1258. generator = PoolingSettings(testdataset, test_type, args)
  1259. elif args.testtype == 'softmax':
  1260. generator = SoftmaxSettings(testdataset, test_type, args)
  1261. elif args.testtype == 'svdf':
  1262. generator = SVDFSettings(testdataset, test_type, args)
  1263. elif args.testtype == 'add' or args.testtype == 'mul':
  1264. generator = AddMulSettings(testdataset, test_type, args)
  1265. else:
  1266. raise RuntimeError("Please specify type of test with -t")
  1267. generator.generate_data()