k210_kernels.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /* Copyright 2019-2020 Canaan Inc.
  2. *
  3. * Licensed under the Apache License, Version 2.0 (the "License");
  4. * you may not use this file except in compliance with the License.
  5. * You may obtain a copy of the License at
  6. *
  7. * http://www.apache.org/licenses/LICENSE-2.0
  8. *
  9. * Unless required by applicable law or agreed to in writing, software
  10. * distributed under the License is distributed on an "AS IS" BASIS,
  11. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. * See the License for the specific language governing permissions and
  13. * limitations under the License.
  14. */
  15. #pragma once
  16. #include "../kernel_utils.h"
  17. #include <runtime/k210/k210_runtime_op_utility.h>
  18. #include <runtime/runtime_op_utility.h>
  19. #include <xtl/xspan.hpp>
  20. namespace nncase
  21. {
  22. namespace kernels
  23. {
  24. namespace k210
  25. {
  26. namespace details
  27. {
  28. template <class T>
  29. struct pool_partial_type;
  30. template <>
  31. struct pool_partial_type<uint8_t>
  32. {
  33. using type = uint32_t;
  34. };
  35. template <>
  36. struct pool_partial_type<float>
  37. {
  38. using type = float;
  39. };
  40. template <class T>
  41. using pool_partial_type_t = typename pool_partial_type<T>::type;
  42. }
  43. inline void kpu_upload(const uint8_t *src, uint8_t *dest, const runtime_shape_t &in_shape)
  44. {
  45. using namespace runtime::k210;
  46. if (in_shape[3] % 64 == 0)
  47. {
  48. std::copy(src, src + kernels::details::compute_size(in_shape), dest);
  49. }
  50. else
  51. {
  52. auto layout = get_kpu_row_layout(in_shape[3]);
  53. auto fmap_size = get_kpu_bytes(in_shape[3], in_shape[2], in_shape[1]);
  54. for (int32_t batch = 0; batch < in_shape[0]; batch++)
  55. {
  56. auto batch_origin = dest + (size_t)batch * fmap_size;
  57. for (int32_t oc = 0; oc < in_shape[1]; oc++)
  58. {
  59. auto channel_origin = batch_origin + (size_t)oc / layout.groups * layout.row_len * in_shape[2] * 64 + (size_t)oc % layout.groups * layout.row_pitch;
  60. for (int32_t y = 0; y < in_shape[2]; y++)
  61. {
  62. auto y_origin = channel_origin + (size_t)y * layout.row_len * 64;
  63. std::copy(src, src + in_shape[3], y_origin);
  64. src += in_shape[3];
  65. }
  66. }
  67. }
  68. }
  69. }
  70. inline void kpu_download(const uint8_t *src, uint8_t *dest, const runtime_shape_t &in_shape)
  71. {
  72. using namespace runtime::k210;
  73. if (in_shape[3] % 64 == 0)
  74. {
  75. std::copy(src, src + kernels::details::compute_size(in_shape), dest);
  76. }
  77. else
  78. {
  79. auto layout = get_kpu_row_layout(in_shape[3]);
  80. auto fmap_size = get_kpu_bytes(in_shape[3], in_shape[2], in_shape[1]);
  81. for (int32_t batch = 0; batch < in_shape[0]; batch++)
  82. {
  83. auto batch_origin = src + (size_t)batch * fmap_size;
  84. for (int32_t oc = 0; oc < in_shape[1]; oc++)
  85. {
  86. auto channel_origin = batch_origin + (size_t)oc / layout.groups * layout.row_len * in_shape[2] * 64 + (size_t)oc % layout.groups * layout.row_pitch;
  87. for (int32_t y = 0; y < in_shape[2]; y++)
  88. {
  89. auto y_origin = channel_origin + (size_t)y * layout.row_len * 64;
  90. for (int32_t x = 0; x < in_shape[3]; x++)
  91. *dest++ = y_origin[x];
  92. }
  93. }
  94. }
  95. }
  96. }
  97. template <bool IsDepthwise, int32_t FilterSize>
  98. void kpu_conv2d(const uint8_t *input, int64_t *workspace, uint8_t *output, const uint8_t *weights, int32_t in_h, int32_t in_w, int32_t in_channels, int32_t out_channels, uint8_t pad_value, int32_t arg_x,
  99. int32_t shift_x, int32_t arg_w, int32_t shift_w, int64_t arg_add, const runtime::k210::kpu_batchnorm_segment *batchnorm, const runtime::k210::kpu_activation_table_t &activation)
  100. {
  101. const auto channel_size = size_t(in_h) * in_w;
  102. // conv
  103. {
  104. auto out_it = workspace;
  105. const auto pad = FilterSize == 1 ? 0 : 1;
  106. const auto groups = IsDepthwise ? out_channels : 1;
  107. const auto g_ic = IsDepthwise ? 1 : in_channels / groups;
  108. const auto g_oc = IsDepthwise ? 1 : out_channels;
  109. for (int32_t og = 0; og < groups; og++)
  110. {
  111. const uint8_t *w_group_p = weights + (size_t)og * g_oc * g_ic * FilterSize * FilterSize;
  112. for (int32_t oc = 0; oc < g_oc; oc++)
  113. {
  114. const uint8_t *w_oc_p = w_group_p + (size_t)oc * g_ic * FilterSize * FilterSize;
  115. for (int32_t oy = 0; oy < in_h; oy++)
  116. {
  117. for (int32_t ox = 0; ox < in_w; ox++)
  118. {
  119. const int32_t in_y_origin = oy - pad;
  120. const int32_t in_x_origin = ox - pad;
  121. int64_t value = 0;
  122. int64_t sum_x = 0, sum_w = 0;
  123. for (int32_t ic = 0; ic < g_ic; ic++)
  124. {
  125. const uint8_t *in_c_p = input + ((size_t)og * g_ic + ic) * in_h * in_w;
  126. const uint8_t *w_ic_p = w_oc_p + (size_t)ic * FilterSize * FilterSize;
  127. for (int32_t ky = 0; ky < FilterSize; ky++)
  128. {
  129. for (int32_t kx = 0; kx < FilterSize; kx++)
  130. {
  131. const int32_t in_y = in_y_origin + ky;
  132. const int32_t in_x = in_x_origin + kx;
  133. uint8_t x;
  134. if (in_x < 0 || in_x >= in_w
  135. || in_y < 0 || in_y >= in_h)
  136. x = pad_value;
  137. else
  138. x = in_c_p[in_y * in_w + in_x];
  139. uint8_t w = w_ic_p[ky * FilterSize + kx];
  140. sum_x += x;
  141. sum_w += w;
  142. value += (int32_t)x * w;
  143. }
  144. }
  145. }
  146. *out_it++ = value + (arg_x * sum_x >> shift_x) + (arg_w * sum_w >> shift_w) + arg_add * g_ic;
  147. }
  148. }
  149. }
  150. }
  151. }
  152. // bn act
  153. {
  154. auto src_it = workspace;
  155. auto out_it = output;
  156. for (int32_t oc = 0; oc < out_channels; oc++)
  157. {
  158. const auto &bn = batchnorm[oc];
  159. for (size_t i = 0; i < channel_size; i++)
  160. {
  161. auto value = (*src_it++ * bn.mul >> bn.shift) + bn.add;
  162. auto &seg = *std::find_if(activation.rbegin(), activation.rend(), [value](const runtime::k210::kpu_activation_segment &seg) {
  163. return value > seg.start_x;
  164. });
  165. value = runtime::carry_shift<int64_t, true>((value - seg.start_x) * seg.mul, seg.shift) + seg.add;
  166. *out_it++ = (uint8_t)std::clamp(value, int64_t(0), int64_t(255));
  167. }
  168. }
  169. }
  170. }
  171. template <class T>
  172. inline void kpu_pool2d(const T *input, T *output, int32_t in_h, int32_t in_w, int32_t in_channels, runtime::k210::kpu_pool_type_t pool_type)
  173. {
  174. using namespace runtime::k210;
  175. using partial_t = details::pool_partial_type_t<T>;
  176. const auto filter = get_kpu_filter_size(pool_type);
  177. const auto stride = get_kpu_filter_stride(pool_type);
  178. const auto out_h = get_kpu_pool_output_size(in_h, pool_type);
  179. const auto out_w = get_kpu_pool_output_size(in_w, pool_type);
  180. for (int32_t oc = 0; oc < in_channels; oc++)
  181. {
  182. auto in_c_p = input + (size_t)oc * in_h * in_w;
  183. for (int32_t oy = 0; oy < out_h; oy++)
  184. {
  185. for (int32_t ox = 0; ox < out_w; ox++)
  186. {
  187. const int32_t in_y_origin = oy * stride;
  188. const int32_t in_x_origin = ox * stride;
  189. partial_t value = 0;
  190. switch (pool_type)
  191. {
  192. case kpu_pool_bypass:
  193. {
  194. const int32_t in_y = in_y_origin;
  195. const int32_t in_x = in_x_origin;
  196. value = in_c_p[in_y * in_w + in_x];
  197. break;
  198. }
  199. case kpu_pool_max_2_s2:
  200. case kpu_pool_max_2_s1:
  201. case kpu_pool_max_4_s4:
  202. {
  203. value = std::numeric_limits<T>::lowest();
  204. for (int32_t ky = 0; ky < filter; ky++)
  205. {
  206. for (int32_t kx = 0; kx < filter; kx++)
  207. {
  208. const int32_t in_y = in_y_origin + ky;
  209. const int32_t in_x = in_x_origin + kx;
  210. partial_t in_v;
  211. if (in_y < 0 || in_y >= in_h || in_x < 0 || in_x >= in_w)
  212. in_v = std::numeric_limits<T>::lowest();
  213. else
  214. in_v = in_c_p[in_y * in_w + in_x];
  215. value = std::max(value, in_v);
  216. }
  217. }
  218. break;
  219. }
  220. case kpu_pool_mean_2_s2:
  221. case kpu_pool_mean_2_s1:
  222. case kpu_pool_mean_4_s4:
  223. {
  224. for (int32_t ky = 0; ky < filter; ky++)
  225. {
  226. for (int32_t kx = 0; kx < filter; kx++)
  227. {
  228. const int32_t in_y = std::clamp(in_y_origin + ky, 0, in_h - 1);
  229. const int32_t in_x = std::clamp(in_x_origin + kx, 0, in_w - 1);
  230. const T in_v = in_c_p[in_y * in_w + in_x];
  231. value += in_v;
  232. }
  233. }
  234. value /= filter * filter;
  235. break;
  236. }
  237. case kpu_pool_left_top_2_s2:
  238. case kpu_pool_left_top_4_s4:
  239. case kpu_pool_right_top_2_s2:
  240. {
  241. auto k_off = get_kpu_select_pool_offset(pool_type);
  242. const int32_t in_y = in_y_origin + k_off[0];
  243. const int32_t in_x = in_x_origin + k_off[1];
  244. partial_t in_v;
  245. if (in_y < 0 || in_y >= in_h || in_x < 0 || in_x >= in_w)
  246. in_v = 0;
  247. else
  248. in_v = in_c_p[in_y * in_w + in_x];
  249. value = in_v;
  250. break;
  251. }
  252. }
  253. *output++ = (T)value;
  254. }
  255. }
  256. }
  257. }
  258. template <bool IsDepthwise, int32_t FilterSize>
  259. void fake_kpu_conv2d(const float *input, float *output, const float *weights, const float *bias, int32_t in_h, int32_t in_w, int32_t in_channels, int32_t out_channels, value_range<float> fused_activation)
  260. {
  261. const auto channel_size = size_t(in_h) * in_w;
  262. const auto pad = FilterSize == 1 ? 0 : 1;
  263. const auto groups = IsDepthwise ? out_channels : 1;
  264. const auto g_ic = IsDepthwise ? 1 : in_channels / groups;
  265. const auto g_oc = IsDepthwise ? 1 : out_channels;
  266. for (int32_t og = 0; og < groups; og++)
  267. {
  268. const auto *w_group_p = weights + (size_t)og * g_oc * g_ic * FilterSize * FilterSize;
  269. for (int32_t oc = 0; oc < g_oc; oc++)
  270. {
  271. const auto *w_oc_p = w_group_p + (size_t)oc * g_ic * FilterSize * FilterSize;
  272. for (int32_t oy = 0; oy < in_h; oy++)
  273. {
  274. for (int32_t ox = 0; ox < in_w; ox++)
  275. {
  276. const int32_t in_y_origin = oy - pad;
  277. const int32_t in_x_origin = ox - pad;
  278. const int32_t filter_y_start = std::max(0, -in_y_origin);
  279. const int32_t filter_y_end = std::min(FilterSize, in_h - in_y_origin);
  280. const int32_t filter_x_start = std::max(0, -in_x_origin);
  281. const int32_t filter_x_end = std::min(FilterSize, in_w - in_x_origin);
  282. float value = bias[og * g_oc + oc];
  283. for (int32_t ic = 0; ic < g_ic; ic++)
  284. {
  285. const auto *in_c_p = input + ((size_t)og * g_ic + ic) * in_h * in_w;
  286. const auto *w_ic_p = w_oc_p + (size_t)ic * FilterSize * FilterSize;
  287. for (int32_t ky = filter_y_start; ky < filter_y_end; ky++)
  288. {
  289. for (int32_t kx = filter_x_start; kx < filter_x_end; kx++)
  290. {
  291. const int32_t in_y = in_y_origin + ky;
  292. const int32_t in_x = in_x_origin + kx;
  293. const auto in_v = in_c_p[in_y * in_w + in_x];
  294. const auto w = w_ic_p[ky * FilterSize + kx];
  295. value += in_v * w;
  296. }
  297. }
  298. }
  299. *output++ = kernels::details::apply_activation(value, fused_activation);
  300. }
  301. }
  302. }
  303. }
  304. }
  305. }
  306. }
  307. }