ce_common.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /* Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
  2. * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
  3. * the the People's Republic of China and other countries.
  4. * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
  5. * DISCLAIMER
  6. * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
  7. * IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
  8. * IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
  9. * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
  10. * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
  11. * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
  12. * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
  13. * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
  14. * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
  15. * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
  16. * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
  17. * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  18. * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  19. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  20. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  21. * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  23. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  24. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  25. * OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <interrupt.h>
  30. #include <hal_cache.h>
  31. #include <hal_mem.h>
  32. #include <hal_osal.h>
  33. #include <hal_sem.h>
  34. #include <hal_timer.h>
  35. #include <sunxi_hal_ce.h>
  36. #include "ce_common.h"
  37. #include "hal_ce.h"
  38. #include "ce_reg.h"
  39. #include "platform_ce.h"
  40. //#define CE_NO_IRQ
  41. #define CE_WAIT_TIME (50000)
  42. #ifndef CE_NO_IRQ
  43. static hal_sem_t ce_sem;
  44. #endif
  45. //static rt_wqueue_t ce_wqueue;
  46. void ce_print_hex(char *_data, int _len, void *_addr)
  47. {
  48. int i, j;
  49. char last[128] = {0};
  50. CE_DBG("---------------- The valid len = %d ----------------\n",
  51. _len);
  52. for (i = 0; i < _len/8; i++) {
  53. CE_DBG("%p: %02X %02X %02X %02X %02X %02X %02X %02X\n",
  54. i*8 + _addr,
  55. _data[i*8+0], _data[i*8+1], _data[i*8+2], _data[i*8+3],
  56. _data[i*8+4], _data[i*8+5], _data[i*8+6], _data[i*8+7]);
  57. }
  58. for (j = 0; j < _len%8; j++) {
  59. if (j == 0)
  60. snprintf(last, 12, "%p:", i * 8 + _addr);
  61. snprintf(last + 11 + j*3, 4, " %02X", _data[i*8 + j]);
  62. if (j == _len % 8 - 1)
  63. CE_DBG("%s\n", last);
  64. }
  65. CE_DBG("----------------------------------------------------\n");
  66. }
  67. void ce_print_task_info(ce_task_desc_t *task)
  68. {
  69. CE_DBG("-----------task_info------\n");
  70. CE_DBG("task = 0x%lx\n", (uint32_t)task);
  71. CE_DBG("task->comm_ctl = 0x%lx\n", task->comm_ctl);
  72. CE_DBG("task->sym_ctl = 0x%lx\n", task->sym_ctl);
  73. CE_DBG("task->asym_ctl = 0x%lx\n", task->asym_ctl);
  74. CE_DBG("task->chan_id = 0x%lx\n", task->chan_id);
  75. CE_DBG("task->ctr_addr = 0x%lx\n", task->ctr_addr);
  76. CE_DBG("task->data_len = 0x%lx\n", task->data_len);
  77. CE_DBG("task->iv_addr = 0x%lx\n", task->iv_addr);
  78. CE_DBG("task->key_addr = 0x%lx\n", task->key_addr);
  79. CE_DBG("task->src[0].addr = 0x%lx\n", task->src[0].addr);
  80. CE_DBG("task->src[0].len = 0x%lx\n", task->src[0].len);
  81. CE_DBG("task->dst[0].addr = 0x%lx\n", task->dst[0].addr);
  82. CE_DBG("task->dst[0].len = 0x%lx\n", task->dst[0].len);
  83. }
  84. #ifndef CE_NO_IRQ
  85. static hal_irqreturn_t ce_irq_handler(void *data)
  86. {
  87. int i;
  88. int ret;
  89. int pending = 0;
  90. pending = hal_ce_pending_get();
  91. for (i = 0; i < CE_FLOW_NUM; i++) {
  92. if (pending & (CE_CHAN_PENDING << i)) {
  93. CE_DBG("Chan %d completed. pending: %#x\n", i, pending);
  94. hal_ce_pending_clear(i);
  95. ret = hal_sem_post(ce_sem);
  96. if (ret == 0) {
  97. ;
  98. } else {
  99. CE_ERR("hal_sem_post FAIL \n");
  100. }
  101. #if 0
  102. rt_wqueue_wakeup(&ce_wqueue, NULL);
  103. #endif
  104. }
  105. }
  106. return HAL_IRQ_OK;
  107. }
  108. static int ce_irq_request(void)
  109. {
  110. #ifdef CONFIG_ARCH_SUN20IW2P1
  111. uint32_t irqn = SUNXI_IRQ_CE_NS;
  112. if (hal_request_irq(irqn, ce_irq_handler, "ce", NULL) < 0) {
  113. CE_ERR("request irq error\n");
  114. return -1;
  115. }
  116. hal_enable_irq(irqn);
  117. #else
  118. uint32_t irqn = SUNXI_IRQ_CE;
  119. if (request_irq(irqn, ce_irq_handler, 0, "crypto", NULL) < 0) {
  120. CE_ERR("Cannot request IRQ\n");
  121. return -1;
  122. }
  123. enable_irq(irqn);
  124. #endif
  125. return 0;
  126. }
  127. #endif
  128. int sunxi_ce_init(void)
  129. {
  130. int ret = 0;
  131. hal_ce_clock_init();
  132. #ifndef CE_NO_IRQ
  133. ret = ce_irq_request();
  134. if (ret < 0) {
  135. return -1;
  136. }
  137. ce_sem = hal_sem_create(0);
  138. if (ce_sem == NULL) {
  139. CE_ERR("hal_sem_create fail\n");
  140. return -1;
  141. }
  142. #endif
  143. #if 0
  144. rt_wqueue_init(&ce_wqueue);
  145. #endif
  146. return 0;
  147. }
  148. int sunxi_ce_uninit(void)
  149. {
  150. #ifndef CE_NO_IRQ
  151. if (ce_sem)
  152. hal_sem_delete(ce_sem);
  153. #ifdef CONFIG_ARCH_SUN20IW2P1
  154. hal_free_irq(SUNXI_IRQ_CE_NS);
  155. #else
  156. hal_free_irq(SUNXI_IRQ_CE);
  157. #endif
  158. #endif
  159. return 0;
  160. }
  161. static void ce_task_desc_init(ce_task_desc_t *task, uint32_t flow)
  162. {
  163. memset((void *)task, 0x0, sizeof(ce_task_desc_t));
  164. task->chan_id = flow;
  165. hal_ce_task_enable(task);
  166. }
  167. static ce_task_desc_t *ce_aes_config(uint32_t dir, uint32_t type, uint32_t mode,
  168. uint8_t *key_buf, uint32_t key_length)
  169. {
  170. ce_task_desc_t *task = NULL;
  171. uint32_t flow = 1;
  172. task = (ce_task_desc_t *)hal_malloc(sizeof(ce_task_desc_t));
  173. if (task == NULL) {
  174. CE_ERR("hal_malloc fail\n");
  175. return NULL;
  176. }
  177. ce_task_desc_init(task, flow);
  178. hal_ce_method_set(dir, type, task);
  179. hal_ce_aes_mode_set(mode, task);
  180. hal_ce_key_set(key_buf, key_length, task);
  181. hal_dcache_clean((unsigned long)key_buf, key_length);
  182. return task;
  183. }
  184. static uint32_t ce_aes_sw_padding(crypto_aes_req_ctx_t *ctx)
  185. {
  186. uint32_t blk_num = 0;
  187. uint32_t padding_size = 0;
  188. uint32_t last_blk_size = 0;
  189. blk_num = ctx->src_length / AES_BLOCK_SIZE;
  190. last_blk_size = ctx->src_length % AES_BLOCK_SIZE;
  191. if (last_blk_size) {
  192. padding_size = AES_BLOCK_SIZE - last_blk_size;
  193. memcpy(ctx->padding, ctx->src_buffer + blk_num * AES_BLOCK_SIZE, last_blk_size);
  194. memset(ctx->padding + last_blk_size, padding_size, padding_size);
  195. ctx->padding_len = AES_BLOCK_SIZE;
  196. } else {
  197. ctx->padding_len = 0;
  198. }
  199. return blk_num * AES_BLOCK_SIZE;
  200. }
  201. static int ce_aes_start(crypto_aes_req_ctx_t *req_ctx)
  202. {
  203. int ret = 0;
  204. ce_task_desc_t *task;
  205. uint32_t src_len = 0;
  206. uint32_t src_word_len = 0;
  207. src_len = ce_aes_sw_padding(req_ctx);
  208. src_word_len = src_len >> 2;
  209. /*ce task config*/
  210. task = ce_aes_config(req_ctx->dir, CE_METHOD_AES, req_ctx->mode, req_ctx->key, req_ctx->key_length);
  211. if (task == NULL) {
  212. CE_ERR("ce_aes_config fail\n");
  213. return HAL_AES_INPUT_ERROR;
  214. }
  215. hal_ce_pending_clear(task->chan_id);
  216. if (req_ctx->iv) {
  217. hal_ce_iv_set(req_ctx->iv, AES_BLOCK_SIZE, task);
  218. hal_dcache_clean((unsigned long)req_ctx->iv, AES_BLOCK_SIZE);
  219. }
  220. if (CE_AES_MODE_CTR == req_ctx->mode) {
  221. hal_ce_cnt_set(req_ctx->iv_next, req_ctx->key_length, task);
  222. hal_dcache_clean((unsigned long)req_ctx->iv_next, req_ctx->key_length);
  223. } else if (CE_AES_MODE_CTS == req_ctx->mode) {
  224. hal_ce_cts_last(task);
  225. } else if (CE_AES_MODE_CFB == req_ctx->mode) {
  226. hal_ce_cfb_bitwidth_set(req_ctx->bitwidth,task);
  227. }
  228. if ((task->sym_ctl & 0xF00) == (CE_AES_MODE_CTS << CE_SYM_CTL_OP_MODE_SHIFT))
  229. task->data_len = src_len + req_ctx->padding_len;
  230. else
  231. hal_ce_data_len_set(src_len + req_ctx->padding_len, task);
  232. if (src_len) {
  233. task->src[0].addr = (uint32_t)__va_to_pa((unsigned long)req_ctx->src_buffer);
  234. task->src[0].len = src_word_len;
  235. if (req_ctx->padding_len) {
  236. task->src[1].addr = (uint32_t)__va_to_pa((unsigned long)req_ctx->padding);
  237. task->src[1].len = req_ctx->padding_len >> 2;
  238. }
  239. } else {
  240. task->src[0].addr = (uint32_t)__va_to_pa((unsigned long)req_ctx->padding);
  241. task->src[0].len = req_ctx->padding_len >> 2;
  242. }
  243. task->dst[0].addr = (uint32_t)__va_to_pa((unsigned long)req_ctx->dst_buffer);
  244. task->dst[0].len = (src_len + req_ctx->padding_len) >> 2;
  245. task->next = 0;
  246. hal_dcache_clean((unsigned long)task, sizeof(ce_task_desc_t));
  247. if (src_len) {
  248. hal_dcache_clean((unsigned long)req_ctx->src_buffer, src_len);
  249. }
  250. if (req_ctx->padding_len) {
  251. hal_dcache_clean((unsigned long)req_ctx->padding, req_ctx->padding_len);
  252. }
  253. hal_dcache_clean((unsigned long)req_ctx->dst_buffer, src_len + req_ctx->padding_len);
  254. //ce_print_task_info(task);
  255. hal_ce_set_task((unsigned long)task);
  256. hal_ce_irq_enable(task->chan_id);
  257. hal_ce_ctrl_start();
  258. #ifdef CE_NO_IRQ
  259. hal_ce_wait_finish(task->chan_id);
  260. hal_ce_pending_clear(task->chan_id);
  261. #else
  262. #if 1
  263. ret = hal_sem_timedwait(ce_sem, CE_WAIT_TIME);
  264. if (ret != 0) {
  265. CE_ERR("Timed out\n");
  266. hal_free(task);
  267. return HAL_AES_TIME_OUT;
  268. }
  269. #else
  270. rt_wqueue_wait(&ce_wqueue, 0, RT_WAITING_FOREVER);
  271. #endif
  272. #endif
  273. hal_dcache_invalidate((uint32_t)req_ctx->dst_buffer, src_len + req_ctx->padding_len);
  274. hal_ce_irq_disable(task->chan_id);
  275. if (hal_ce_get_erro() > 0) {
  276. CE_ERR("error\n");
  277. hal_ce_reg_printf();
  278. hal_free(task);
  279. return HAL_AES_CRYPTO_ERROR;
  280. }
  281. CE_DBG("do_aes_crypto sucess\n");
  282. hal_free(task);
  283. return HAL_AES_STATUS_OK;
  284. }
  285. static int ce_aes_check_ctx_vaild(crypto_aes_req_ctx_t *req_ctx)
  286. {
  287. if (req_ctx == NULL) {
  288. CE_ERR("aes req_ctx is NULL\n");
  289. return HAL_AES_INPUT_ERROR;
  290. }
  291. if ((req_ctx->src_buffer == NULL)
  292. || (req_ctx->dst_buffer == NULL)
  293. || (req_ctx->key == NULL)
  294. || ((req_ctx->mode != CE_AES_MODE_ECB) && (req_ctx->iv == NULL) )) {
  295. CE_ERR("input is NULL\n");
  296. return HAL_AES_INPUT_ERROR;
  297. }
  298. if ((req_ctx->key_length != AES_KEYSIZE_16)
  299. && (req_ctx->key_length != AES_KEYSIZE_24)
  300. && (req_ctx->key_length != AES_KEYSIZE_32)) {
  301. CE_ERR("key length is %ld, invalid\n", req_ctx->key_length);
  302. return HAL_AES_INPUT_ERROR;
  303. }
  304. if ((((u32)req_ctx->src_buffer & (CE_ALIGN_SIZE - 1)) != 0)
  305. || (((u32)req_ctx->dst_buffer & (CE_ALIGN_SIZE - 1)) != 0)
  306. || (((u32)req_ctx->key & (CE_ALIGN_SIZE - 1)) != 0)) {
  307. CE_ERR("input buffer is not %d align\n", CE_ALIGN_SIZE);
  308. return HAL_AES_INPUT_ERROR;
  309. }
  310. if (req_ctx->dir == CE_DIR_DECRYPT) {
  311. if (((req_ctx->mode == CE_AES_MODE_ECB)
  312. || (req_ctx->mode == CE_AES_MODE_CBC)
  313. || (req_ctx->mode == CE_AES_MODE_CTS))
  314. && (req_ctx->src_length % AES_BLOCK_SIZE != 0)) {
  315. CE_ERR("src_length: %d is not %d align\n",
  316. req_ctx->src_length, AES_BLOCK_SIZE);
  317. return HAL_AES_INPUT_ERROR;
  318. }
  319. if (req_ctx->src_length > req_ctx->dst_length) {
  320. CE_ERR("src_length: %d should not bigger than dst_length: %d\n",
  321. req_ctx->src_length, req_ctx->dst_length);
  322. return HAL_AES_INPUT_ERROR;
  323. }
  324. } else if (req_ctx->dir == CE_DIR_ENCRYPT) {
  325. if (req_ctx->dst_length < CE_ROUND_UP(req_ctx->src_length, AES_BLOCK_SIZE)) {
  326. CE_ERR("dst_length: %d should not smaller than %d\n",
  327. req_ctx->dst_length,
  328. CE_ROUND_UP(req_ctx->src_length, AES_BLOCK_SIZE));
  329. return HAL_AES_INPUT_ERROR;
  330. }
  331. } else {
  332. CE_ERR("input crypt dir: %d is error.\n", req_ctx->dir);
  333. return HAL_AES_INPUT_ERROR;
  334. }
  335. return HAL_AES_STATUS_OK;
  336. }
  337. int do_aes_crypto(crypto_aes_req_ctx_t *req_ctx)
  338. {
  339. uint32_t last_block_size = 0;
  340. uint32_t block_num = 0;
  341. uint32_t padding_size = 0;
  342. uint32_t first_encypt_size = 0;
  343. uint8_t data_block[AES_BLOCK_SIZE] = {0};
  344. uint8_t *iv;
  345. uint8_t *init_vector2;
  346. ce_task_desc_t *task;
  347. int ret;
  348. ret = ce_aes_check_ctx_vaild(req_ctx);
  349. if (ret) {
  350. CE_ERR("ce_aes_check_ctx_vaild fail\n");
  351. return ret;
  352. }
  353. ret = ce_aes_start(req_ctx);
  354. if (ret < 0) {
  355. CE_ERR("aes crypto fail\n");
  356. return ret;
  357. }
  358. return ret;
  359. }
  360. static uint32_t ce_hash_endian4(uint32_t data)
  361. {
  362. uint32_t d1, d2, d3, d4;
  363. d1 = (data & 0xff) << 24;
  364. d2 = (data & 0xff00) << 8;
  365. d3 = (data & 0xff0000) >> 8;
  366. d4 = (data & 0xff000000) >> 24;
  367. return (d1 | d2 | d3 | d4);
  368. }
  369. static uint32_t ce_hash_blk_size(int type)
  370. {
  371. if ((type == CE_METHOD_SHA384) || (type == CE_METHOD_SHA512))
  372. return SHA512_BLOCK_SIZE;
  373. return SHA1_BLOCK_SIZE;
  374. }
  375. static uint32_t ce_hash_sw_padding(crypto_hash_req_ctx_t *ctx)
  376. {
  377. uint32_t blk_size = ce_hash_blk_size(ctx->type);
  378. uint32_t len_threshold = (blk_size == SHA512_BLOCK_SIZE) ? 112 : 56;
  379. uint32_t n = ctx->src_length % blk_size;
  380. uint8_t *p = ctx->padding;
  381. uint32_t len_l = ctx->src_length << 3; /* total len, in bits. */
  382. uint32_t len_h = ctx->src_length >> 29;
  383. uint32_t big_endian = (ctx->type == CE_METHOD_MD5) ? 0 : 1;
  384. memset(ctx->padding, 0, 256);
  385. if (n) {
  386. memcpy(ctx->padding, ctx->src_buffer + ctx->src_length - n, n);
  387. }
  388. CE_DBG("ctx->type = %d, n = %d, ctx->src_length = %d\n", ctx->type, n, ctx->src_length);
  389. p[n] = 0x80;
  390. n++;
  391. if (n > len_threshold) { /* The pad data need two blocks. */
  392. memset(p+n, 0, blk_size*2 - n);
  393. p += blk_size*2 - 8;
  394. } else {
  395. memset(p+n, 0, blk_size - n);
  396. p += blk_size - 8;
  397. }
  398. if (big_endian == 1) {
  399. #if 0
  400. /* The length should use bit64 in SHA384/512 case.
  401. * The OpenSSL package is always small than 8K,
  402. * so we use still bit32.
  403. */
  404. if (blk_size == SHA512_BLOCK_SIZE) {
  405. int len_hh = ctx->cnt >> 61;
  406. *(int *)(p-4) = ce_hash_endian4(len_hh);
  407. }
  408. #endif
  409. *(int *)p = ce_hash_endian4(len_h);
  410. *(int *)(p+4) = ce_hash_endian4(len_l);
  411. } else {
  412. *(int *)p = len_l;
  413. *(int *)(p+4) = len_h;
  414. }
  415. ctx->padding_len = (uint32_t)(p + 8 - ctx->padding);
  416. CE_DBG("After padding %d: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  417. ctx->padding_len,
  418. p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
  419. return ctx->src_length - (ctx->src_length % blk_size);
  420. }
  421. static int ce_hash_start(crypto_hash_req_ctx_t *req_ctx)
  422. {
  423. int ret = 0;
  424. int i = 0;
  425. uint8_t chan_id = 1;
  426. ce_task_desc_t *task = NULL;
  427. uint32_t src_word_len = 0;
  428. uint32_t src_length = ce_hash_sw_padding(req_ctx);
  429. src_word_len = src_length >> 2;
  430. task = (ce_task_desc_t *)hal_malloc(sizeof(ce_task_desc_t));
  431. if (task == NULL) {
  432. CE_ERR("hal_malloc fail\n");
  433. return HAL_HASH_MALLOC_ERROR;
  434. }
  435. CE_DBG("task addr = 0x%lx\n", (uint32_t)task);
  436. ce_task_desc_init(task, chan_id);
  437. hal_ce_pending_clear(chan_id);
  438. hal_ce_method_set(req_ctx->dir, req_ctx->type, task);
  439. hal_ce_data_len_set(src_length + req_ctx->padding_len, task);
  440. if (req_ctx->md_size) {
  441. hal_ce_iv_set(req_ctx->md, req_ctx->md_size, task);
  442. hal_dcache_clean((unsigned long)req_ctx->md, req_ctx->md_size);
  443. hal_ce_iv_mode_set(CE_HASH_IV_INPUT, task);
  444. }
  445. if (src_word_len != 0) {
  446. task->src[0].addr = (uint32_t)__va_to_pa((uint32_t)req_ctx->src_buffer);
  447. task->src[0].len = src_word_len;
  448. task->src[1].addr = (uint32_t)__va_to_pa((uint32_t)req_ctx->padding);
  449. task->src[1].len = req_ctx->padding_len >> 2;
  450. } else {
  451. task->src[0].addr = (uint32_t)__va_to_pa((uint32_t)req_ctx->padding);
  452. task->src[0].len = req_ctx->padding_len >> 2;
  453. }
  454. task->dst[0].addr = (uint32_t)__va_to_pa((uint32_t)req_ctx->dst_buffer);
  455. task->dst[0].len = req_ctx->dst_length >> 2;
  456. task->next = 0;
  457. hal_dcache_clean((uint32_t)task, sizeof(ce_task_desc_t));
  458. hal_dcache_clean((uint32_t)req_ctx->src_buffer, req_ctx->src_length);
  459. hal_dcache_clean((uint32_t)req_ctx->padding, req_ctx->padding_len);
  460. hal_dcache_clean((uint32_t)req_ctx->dst_buffer, req_ctx->dst_length);
  461. //FlushCacheAll();
  462. //ce_print_task_info(task);
  463. hal_ce_set_task((uint32_t)task);
  464. hal_ce_irq_enable(task->chan_id);
  465. hal_ce_ctrl_start();
  466. #ifdef CE_NO_IRQ
  467. hal_ce_wait_finish(task->chan_id);
  468. hal_ce_pending_clear(task->chan_id);
  469. #else
  470. ret = hal_sem_timedwait(ce_sem, CE_WAIT_TIME);
  471. if (ret != 0) {
  472. CE_ERR("Timed out\n");
  473. ret = HAL_HASH_TIME_OUT;
  474. goto fail;
  475. }
  476. #endif
  477. hal_dcache_invalidate((uint32_t)req_ctx->dst_buffer, req_ctx->dst_length);
  478. if (hal_ce_get_erro() > 0) {
  479. hal_ce_reg_printf();
  480. ret = HAL_HASH_CRYPTO_ERROR;
  481. goto fail;
  482. }
  483. //ce_print_hex((char *)task->dst[0].addr, (task->dst[0].len * 4), (char *)task->dst[0].addr);
  484. //hal_ce_reg_printf();
  485. hal_ce_irq_disable(task->chan_id);
  486. memcpy(req_ctx->md, req_ctx->dst_buffer, req_ctx->dst_length);
  487. req_ctx->md_size = req_ctx->dst_length;
  488. hal_free(task);
  489. return HAL_HASH_STATUS_OK;
  490. fail:
  491. if (task) {
  492. hal_free(task);
  493. }
  494. return ret;
  495. }
  496. static int ce_hash_check_ctx_valid(crypto_hash_req_ctx_t *req_ctx)
  497. {
  498. if (req_ctx == NULL) {
  499. CE_ERR("sha req_ctx is NULL\n");
  500. return HAL_HASH_INPUT_ERROR;
  501. }
  502. if ((((u32)req_ctx->dst_buffer & (CE_ALIGN_SIZE - 1)) != 0)
  503. || (((u32)req_ctx->src_buffer & (CE_ALIGN_SIZE - 1)) != 0)) {
  504. CE_ERR("input buffer addr is not %d align\n", CE_ALIGN_SIZE);
  505. return HAL_HASH_INPUT_ERROR;
  506. }
  507. if (req_ctx->type == CE_METHOD_MD5) {
  508. if ((req_ctx->dst_length != MD5_DIGEST_SIZE)
  509. || ((req_ctx->md_size != 0)
  510. && (req_ctx->md_size != MD5_DIGEST_SIZE))) {
  511. CE_ERR("output length is not %d\n", MD5_DIGEST_SIZE);
  512. return HAL_HASH_INPUT_ERROR;
  513. }
  514. } else if (req_ctx->type == CE_METHOD_SHA1) {
  515. if ((req_ctx->dst_length != SHA1_DIGEST_SIZE)
  516. || ((req_ctx->md_size != 0)
  517. && (req_ctx->md_size != SHA1_DIGEST_SIZE))) {
  518. CE_ERR("output length is not %d\n", SHA1_DIGEST_SIZE);
  519. return HAL_HASH_INPUT_ERROR;
  520. }
  521. } else if (req_ctx->type == CE_METHOD_SHA224) {
  522. if ((req_ctx->dst_length != SHA256_DIGEST_SIZE)
  523. || ((req_ctx->md_size != 0)
  524. && (req_ctx->md_size != SHA256_DIGEST_SIZE))) {
  525. CE_ERR("output length is not %d\n", SHA256_DIGEST_SIZE);
  526. return HAL_HASH_INPUT_ERROR;
  527. }
  528. } else if (req_ctx->type == CE_METHOD_SHA256) {
  529. if ((req_ctx->dst_length != SHA256_DIGEST_SIZE)
  530. || ((req_ctx->md_size != 0)
  531. && (req_ctx->md_size != SHA256_DIGEST_SIZE))) {
  532. CE_ERR("output length is not %d\n", SHA256_DIGEST_SIZE);
  533. return HAL_HASH_INPUT_ERROR;
  534. }
  535. } else if (req_ctx->type == CE_METHOD_SHA384) {
  536. if ((req_ctx->dst_length != SHA512_DIGEST_SIZE)
  537. || ((req_ctx->md_size != 0)
  538. && (req_ctx->md_size != SHA512_DIGEST_SIZE))) {
  539. CE_ERR("output length is not %d\n", SHA512_DIGEST_SIZE);
  540. return HAL_HASH_INPUT_ERROR;
  541. }
  542. } else if (req_ctx->type == CE_METHOD_SHA512) {
  543. if ((req_ctx->dst_length != SHA512_DIGEST_SIZE)
  544. || ((req_ctx->md_size != 0)
  545. && (req_ctx->md_size != SHA512_DIGEST_SIZE))) {
  546. CE_ERR("output length is not %d\n", SHA512_DIGEST_SIZE);
  547. return HAL_HASH_INPUT_ERROR;
  548. }
  549. } else {
  550. CE_ERR("ce don't support hash mode: %d\n", req_ctx->type);
  551. return HAL_HASH_INPUT_ERROR;
  552. }
  553. return HAL_HASH_STATUS_OK;
  554. }
  555. int do_hash_crypto(crypto_hash_req_ctx_t *req_ctx)
  556. {
  557. uint8_t *src_tmp = NULL;
  558. uint32_t src_align_len = 0;
  559. int ret = 0;
  560. ret = ce_hash_check_ctx_valid(req_ctx);
  561. if (ret < 0) {
  562. CE_ERR("ce_hash_check_ctx_valid fail: %d\n", ret);
  563. return ret;
  564. }
  565. ret = ce_hash_start(req_ctx);
  566. if (ret < 0) {
  567. CE_ERR("caclu hash erro num is %d\n", ret);
  568. return HAL_HASH_CRYPTO_ERROR;
  569. }
  570. return HAL_HASH_STATUS_OK;
  571. }
  572. static void ce_rsa_sw_padding(uint8_t *dst_buf, uint8_t *src_buf, uint32_t data_len, uint32_t group_len)
  573. {
  574. int i = 0;
  575. memset(dst_buf, 0, group_len);
  576. for (i = group_len - data_len; i < group_len; i++) {
  577. dst_buf[i] = src_buf[group_len - 1 - i];
  578. }
  579. }
  580. static int ce_rsa_start(crypto_rsa_req_ctx_t *req_ctx)
  581. {
  582. int ret = 0;
  583. uint8_t chan_id = 2;
  584. uint8_t *p_src = NULL;
  585. uint8_t *p_n = NULL;
  586. uint8_t *p_d = NULL;
  587. uint8_t *p_dst = NULL;
  588. ce_task_desc_t *task = NULL;
  589. uint32_t bitwidth_byte_len = 0;
  590. uint32_t bitwidth_word_len = 0;
  591. bitwidth_byte_len = req_ctx->bitwidth >> 3;
  592. bitwidth_word_len = req_ctx->bitwidth >> 5;
  593. p_src = hal_malloc(bitwidth_byte_len);
  594. if (p_src == NULL) {
  595. CE_ERR("rsa src hal_malloc fail\n");
  596. ret = HAL_RSA_MALLOC_ERROR;
  597. goto fail;
  598. }
  599. memset(p_src, 0x0, bitwidth_byte_len);
  600. ce_rsa_sw_padding(p_src, req_ctx->src_buffer, req_ctx->src_length, bitwidth_byte_len);
  601. p_n = hal_malloc(bitwidth_byte_len);
  602. if (p_n == NULL) {
  603. CE_ERR("rsa key n hal_malloc fail\n");
  604. ret = HAL_RSA_MALLOC_ERROR;
  605. goto fail;
  606. }
  607. memset(p_n, 0x0, bitwidth_byte_len);
  608. ce_rsa_sw_padding(p_n, req_ctx->key_n, req_ctx->n_len, bitwidth_byte_len);
  609. if (req_ctx->key_d) {
  610. p_d = hal_malloc(bitwidth_byte_len);
  611. if (p_d == NULL) {
  612. CE_ERR("rsa key d hal_malloc fail\n");
  613. ret = HAL_RSA_MALLOC_ERROR;
  614. goto fail;
  615. }
  616. memset(p_d, 0x0, bitwidth_byte_len);
  617. ce_rsa_sw_padding(p_d, req_ctx->key_d, req_ctx->d_len, bitwidth_byte_len);
  618. req_ctx->key_d = p_d;
  619. }
  620. p_dst = hal_malloc(bitwidth_byte_len);
  621. if (p_dst == NULL) {
  622. CE_ERR("hal_malloc fail\n");
  623. ret = HAL_RSA_MALLOC_ERROR;
  624. goto fail;
  625. }
  626. memset(p_dst, 0x0, bitwidth_byte_len);
  627. task = (ce_task_desc_t *)hal_malloc(sizeof(ce_task_desc_t));
  628. if (task == NULL) {
  629. CE_ERR("rt_malloc_align fail\n");
  630. ret = HAL_RSA_MALLOC_ERROR;
  631. goto fail;
  632. }
  633. CE_DBG("task addr = 0x%lx\n", (uint32_t)task);
  634. ce_task_desc_init(task, chan_id);
  635. hal_ce_pending_clear(chan_id);
  636. hal_ce_method_set(req_ctx->dir, req_ctx->type, task);
  637. hal_ce_rsa_width_set(req_ctx->bitwidth, task);
  638. task->iv_addr = (uint32_t)__va_to_pa((uint32_t)p_n);
  639. if (req_ctx->key_d)
  640. task->key_addr = (uint32_t)__va_to_pa((uint32_t)p_d);
  641. else
  642. task->key_addr = (uint32_t)__va_to_pa((uint32_t)req_ctx->key_e);
  643. hal_ce_data_len_set(bitwidth_byte_len, task);
  644. task->src[0].addr = (uint32_t)__va_to_pa((uint32_t)p_src);
  645. task->src[0].len = bitwidth_word_len;
  646. task->dst[0].addr = (uint32_t)__va_to_pa((uint32_t)p_dst);
  647. task->dst[0].len = bitwidth_word_len;
  648. task->next = 0;
  649. hal_dcache_clean((uint32_t)task, sizeof(ce_task_desc_t));
  650. hal_dcache_clean((uint32_t)p_src, bitwidth_byte_len);
  651. hal_dcache_clean((uint32_t)p_n, bitwidth_byte_len);
  652. if (req_ctx->key_d)
  653. hal_dcache_clean((uint32_t)p_d, bitwidth_byte_len);
  654. else
  655. hal_dcache_clean((uint32_t)req_ctx->key_e, bitwidth_byte_len);
  656. hal_dcache_clean((uint32_t)p_dst, bitwidth_byte_len);
  657. //FlushCacheAll();
  658. /*ce_print_task_info(task);*/
  659. hal_ce_set_task((uint32_t)task);
  660. hal_ce_irq_enable(task->chan_id);
  661. hal_ce_ctrl_start();
  662. #ifdef CE_NO_IRQ
  663. hal_ce_wait_finish(task->chan_id);
  664. hal_ce_pending_clear(task->chan_id);
  665. #else
  666. ret = hal_sem_timedwait(ce_sem, CE_WAIT_TIME);
  667. if (ret != 0) {
  668. CE_ERR("Timed out\n");
  669. ret = HAL_RSA_CRYPTO_ERROR;
  670. goto fail;
  671. }
  672. #endif
  673. hal_dcache_invalidate((uint32_t)p_dst, bitwidth_byte_len);
  674. /*ce_reg_printf();*/
  675. if (hal_ce_get_erro() > 0) {
  676. hal_ce_reg_printf();
  677. ret = HAL_RSA_CRYPTO_ERROR;
  678. goto fail;
  679. }
  680. hal_ce_irq_disable(task->chan_id);
  681. ce_rsa_sw_padding(req_ctx->dst_buffer, p_dst, req_ctx->dst_length, req_ctx->dst_length);
  682. fail:
  683. if (p_src)
  684. hal_free(p_src);
  685. if (p_n)
  686. hal_free(p_n);
  687. if (p_d)
  688. hal_free(p_d);
  689. if (p_dst)
  690. hal_free(p_dst);
  691. if (task)
  692. hal_free(task);
  693. return ret;
  694. }
  695. static int ce_rsa_check_ctx_valid(crypto_rsa_req_ctx_t *req_ctx)
  696. {
  697. if (req_ctx == NULL) {
  698. CE_ERR("rsa req_ctx is NULL\n");
  699. return HAL_RSA_INPUT_ERROR;
  700. }
  701. if ((((u32)req_ctx->key_n & (CE_ALIGN_SIZE - 1)) != 0)
  702. || (((u32)req_ctx->key_e & (CE_ALIGN_SIZE - 1)) != 0)
  703. || (((u32)req_ctx->key_d & (CE_ALIGN_SIZE - 1)) != 0)
  704. || (((u32)req_ctx->src_buffer & (CE_ALIGN_SIZE - 1)) != 0)
  705. || (((u32)req_ctx->dst_buffer & (CE_ALIGN_SIZE - 1)) != 0)) {
  706. printf("rsa req_ctx buffer is not %d align\n", CE_ALIGN_SIZE);
  707. return HAL_RSA_INPUT_ERROR;
  708. }
  709. if ((req_ctx->bitwidth == 512)
  710. || (req_ctx->bitwidth == 1024)
  711. || (req_ctx->bitwidth == 2048)) {
  712. if ((req_ctx->n_len > req_ctx->bitwidth / 8)
  713. || (req_ctx->e_len > req_ctx->bitwidth / 8)
  714. || (req_ctx->d_len > req_ctx->bitwidth / 8)
  715. || (req_ctx->src_length > req_ctx->bitwidth / 8)
  716. || (req_ctx->dst_length > req_ctx->bitwidth / 8)) {
  717. CE_ERR("rsa length should not bigger than %d\n", req_ctx->bitwidth / 8);
  718. return HAL_RSA_INPUT_ERROR;
  719. }
  720. } else {
  721. CE_ERR("invalid bitwidth: %d\n", req_ctx->bitwidth);
  722. return HAL_RSA_INPUT_ERROR;
  723. }
  724. return HAL_RSA_STATUS_OK;
  725. }
  726. int do_rsa_crypto(crypto_rsa_req_ctx_t *req_ctx)
  727. {
  728. int ret = 0;
  729. ret = ce_rsa_check_ctx_valid(req_ctx);
  730. if (ret < 0) {
  731. CE_ERR("ce_rsa_check_ctx_valid fail: %d\n", ret);
  732. return ret;
  733. }
  734. ret = ce_rsa_start(req_ctx);
  735. if (ret < 0) {
  736. CE_ERR("calc rsa erro num is %d\n", ret);
  737. return ret;
  738. }
  739. return HAL_RSA_STATUS_OK;
  740. }
  741. int do_rng_gen(crypto_rng_req_ctx_t *req_ctx)
  742. {
  743. int ret = 0;
  744. uint8_t chan_id = 2;
  745. uint32_t dst_len = 0;
  746. uint8_t *dst_buf = NULL;
  747. ce_task_desc_t *task = NULL;
  748. if (req_ctx->mode == CE_METHOD_TRNG) {
  749. dst_len = CE_ROUND_UP(req_ctx->rng_len, 32); /*align with 32 Bytes*/
  750. } else if (req_ctx->mode == CE_METHOD_PRNG) {
  751. dst_len = CE_ROUND_UP(req_ctx->rng_len, 20); /*align with 20 Bytes*/
  752. } else {
  753. CE_ERR("Error: do not support mode %d\n", req_ctx->mode);
  754. ret = HAL_RNG_INPUT_ERROR;
  755. goto fail;
  756. }
  757. if (dst_len > SS_RNG_MAX_LEN) {
  758. CE_ERR("Error: The RNG length is too large: %d\n", dst_len);
  759. ret = HAL_RNG_INPUT_ERROR;
  760. goto fail;
  761. }
  762. dst_buf = (uint8_t *)hal_malloc(dst_len);
  763. if (dst_buf == NULL) {
  764. CE_ERR("hal_malloc dst_buf fail\n");
  765. ret = HAL_RNG_MALLOC_ERROR;
  766. goto fail;
  767. }
  768. task = (ce_task_desc_t *)hal_malloc(sizeof(ce_task_desc_t));
  769. if (task == NULL) {
  770. CE_ERR("hal_malloc task fail\n");
  771. ret = HAL_RNG_MALLOC_ERROR;
  772. goto fail;
  773. }
  774. CE_DBG("task addr = 0x%lx, rng_buf = 0x%lx, rng_len = %d\n", (uint32_t)task, req_ctx->rng_buf, req_ctx->rng_len);
  775. ce_task_desc_init(task, chan_id);
  776. hal_ce_pending_clear(chan_id);
  777. hal_ce_method_set(0, req_ctx->mode, task);
  778. hal_ce_data_len_set(dst_len, task);
  779. if (req_ctx->mode == CE_METHOD_PRNG) {
  780. /* must set the seed add in prng */
  781. if (req_ctx->key && (req_ctx->key_len == 24)) {
  782. hal_ce_key_set(req_ctx->key, req_ctx->key_len, task);
  783. hal_dcache_clean((uint32_t)req_ctx->key, req_ctx->key_len);
  784. } else {
  785. CE_ERR("Error: RRNG must set seed, and the seed size must be 24!\n");
  786. ret = HAL_RNG_INPUT_ERROR;
  787. goto fail;
  788. }
  789. }
  790. task->src[0].addr = 0;
  791. task->src[0].len = 0;
  792. task->dst[0].addr = (uint32_t)__va_to_pa((uint32_t)dst_buf);
  793. task->dst[0].len = dst_len >> 2;
  794. hal_dcache_clean((uint32_t)task, sizeof(ce_task_desc_t));
  795. hal_dcache_clean((uint32_t)dst_buf, dst_len);
  796. //ce_print_task_info(task);
  797. hal_ce_set_task((uint32_t)task);
  798. hal_ce_irq_enable(task->chan_id);
  799. hal_ce_ctrl_start();
  800. #ifdef CE_NO_IRQ
  801. hal_ce_wait_finish(task->chan_id);
  802. hal_ce_pending_clear(task->chan_id);
  803. #else
  804. ret = hal_sem_timedwait(ce_sem, CE_WAIT_TIME);
  805. if (ret != 0) {
  806. CE_ERR("Timed out\n");
  807. ret = HAL_RNG_TIME_OUT;
  808. goto fail;
  809. }
  810. #endif
  811. hal_dcache_invalidate((uint32_t)dst_buf, dst_len);
  812. if (req_ctx->mode == CE_METHOD_PRNG) {
  813. hal_dcache_invalidate((uint32_t)req_ctx->key, req_ctx->key_len);
  814. }
  815. if (hal_ce_get_erro() > 0) {
  816. hal_ce_reg_printf();
  817. ret = HAL_RNG_CRYPTO_ERROR;
  818. goto fail;
  819. }
  820. //ce_print_hex((char *)task->dst[0].addr, (task->dst[0].len * 4), (char *)task->dst[0].addr);
  821. /*ce_reg_printf();*/
  822. memcpy(req_ctx->rng_buf, dst_buf, req_ctx->rng_len);
  823. hal_ce_irq_disable(task->chan_id);
  824. hal_free(task);
  825. hal_free(dst_buf);
  826. return HAL_RNG_STATUS_OK;
  827. fail:
  828. if (task) {
  829. hal_free(task);
  830. }
  831. if (dst_buf) {
  832. hal_free(task);
  833. }
  834. return ret;
  835. }