aes.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. /*
  2. ---------------------------------------------------------------------------
  3. Copyright (c) 1998-2008, Brian Gladman, Worcester, UK. All rights reserved.
  4. LICENSE TERMS
  5. The redistribution and use of this software (with or without changes)
  6. is allowed without the payment of fees or royalties provided that:
  7. 1. source code distributions include the above copyright notice, this
  8. list of conditions and the following disclaimer;
  9. 2. binary distributions include the above copyright notice, this list
  10. of conditions and the following disclaimer in their documentation;
  11. 3. the name of the copyright holder is not used to endorse products
  12. built using this software without specific written permission.
  13. DISCLAIMER
  14. This software is provided 'as is' with no explicit or implied warranties
  15. in respect of its properties, including, but not limited to, correctness
  16. and/or fitness for purpose.
  17. ---------------------------------------------------------------------------
  18. Issue 09/09/2006
  19. This is an AES implementation that uses only 8-bit byte operations on the
  20. cipher state (there are options to use 32-bit types if available).
  21. The combination of mix columns and byte substitution used here is based on
  22. that developed by Karl Malbrain. His contribution is acknowledged.
  23. */
  24. /* define if you have a fast memcpy function on your system */
  25. #if 1
  26. # define HAVE_MEMCPY
  27. # include <string.h>
  28. #if 0
  29. # if defined( _MSC_VER )
  30. # include <intrin.h>
  31. # pragma intrinsic( memcpy )
  32. # endif
  33. #endif
  34. #endif
  35. #include <stdlib.h>
  36. /* add the target configuration to allow using internal data types and compilation options */
  37. #include "common/bt_target.h"
  38. /* define if you have fast 32-bit types on your system */
  39. #if 1
  40. # define HAVE_UINT_32T
  41. #endif
  42. /* define if you don't want any tables */
  43. #if 1
  44. # define USE_TABLES
  45. #endif
  46. /* On Intel Core 2 duo VERSION_1 is faster */
  47. /* alternative versions (test for performance on your system) */
  48. #if 1
  49. # define VERSION_1
  50. #endif
  51. #include "aes.h"
  52. #if defined( HAVE_UINT_32T )
  53. typedef UINT32 uint_32t;
  54. #endif
  55. /* functions for finite field multiplication in the AES Galois field */
  56. #define WPOLY 0x011b
  57. #define BPOLY 0x1b
  58. #define DPOLY 0x008d
  59. #define f1(x) (x)
  60. #define f2(x) ((x << 1) ^ (((x >> 7) & 1) * WPOLY))
  61. #define f4(x) ((x << 2) ^ (((x >> 6) & 1) * WPOLY) ^ (((x >> 6) & 2) * WPOLY))
  62. #define f8(x) ((x << 3) ^ (((x >> 5) & 1) * WPOLY) ^ (((x >> 5) & 2) * WPOLY) \
  63. ^ (((x >> 5) & 4) * WPOLY))
  64. #define d2(x) (((x) >> 1) ^ ((x) & 1 ? DPOLY : 0))
  65. #define f3(x) (f2(x) ^ x)
  66. #define f9(x) (f8(x) ^ x)
  67. #define fb(x) (f8(x) ^ f2(x) ^ x)
  68. #define fd(x) (f8(x) ^ f4(x) ^ x)
  69. #define fe(x) (f8(x) ^ f4(x) ^ f2(x))
  70. #if defined( USE_TABLES )
  71. #define sb_data(w) { /* S Box data values */ \
  72. w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
  73. w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
  74. w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
  75. w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
  76. w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
  77. w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
  78. w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
  79. w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
  80. w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
  81. w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
  82. w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
  83. w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
  84. w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
  85. w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
  86. w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
  87. w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
  88. w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
  89. w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
  90. w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
  91. w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
  92. w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
  93. w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
  94. w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
  95. w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
  96. w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
  97. w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
  98. w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
  99. w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
  100. w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
  101. w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
  102. w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
  103. w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
  104. #define isb_data(w) { /* inverse S Box data values */ \
  105. w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\
  106. w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\
  107. w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\
  108. w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\
  109. w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\
  110. w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\
  111. w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\
  112. w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\
  113. w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\
  114. w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\
  115. w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\
  116. w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\
  117. w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\
  118. w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\
  119. w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\
  120. w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\
  121. w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\
  122. w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\
  123. w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\
  124. w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\
  125. w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\
  126. w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\
  127. w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\
  128. w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\
  129. w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\
  130. w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\
  131. w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\
  132. w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\
  133. w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\
  134. w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\
  135. w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\
  136. w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) }
  137. #define mm_data(w) { /* basic data for forming finite field tables */ \
  138. w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\
  139. w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\
  140. w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\
  141. w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\
  142. w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\
  143. w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\
  144. w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\
  145. w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\
  146. w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\
  147. w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\
  148. w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\
  149. w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\
  150. w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\
  151. w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\
  152. w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\
  153. w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\
  154. w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\
  155. w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\
  156. w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\
  157. w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\
  158. w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\
  159. w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\
  160. w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\
  161. w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\
  162. w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\
  163. w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\
  164. w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\
  165. w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\
  166. w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\
  167. w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\
  168. w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\
  169. w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) }
  170. static const uint_8t sbox[256] = sb_data(f1);
  171. static const uint_8t isbox[256] = isb_data(f1);
  172. static const uint_8t gfm2_sbox[256] = sb_data(f2);
  173. static const uint_8t gfm3_sbox[256] = sb_data(f3);
  174. static const uint_8t gfmul_9[256] = mm_data(f9);
  175. static const uint_8t gfmul_b[256] = mm_data(fb);
  176. static const uint_8t gfmul_d[256] = mm_data(fd);
  177. static const uint_8t gfmul_e[256] = mm_data(fe);
  178. #define s_box(x) sbox[(x)]
  179. #define is_box(x) isbox[(x)]
  180. #define gfm2_sb(x) gfm2_sbox[(x)]
  181. #define gfm3_sb(x) gfm3_sbox[(x)]
  182. #define gfm_9(x) gfmul_9[(x)]
  183. #define gfm_b(x) gfmul_b[(x)]
  184. #define gfm_d(x) gfmul_d[(x)]
  185. #define gfm_e(x) gfmul_e[(x)]
  186. #else
  187. /* this is the high bit of x right shifted by 1 */
  188. /* position. Since the starting polynomial has */
  189. /* 9 bits (0x11b), this right shift keeps the */
  190. /* values of all top bits within a byte */
  191. static uint_8t hibit(const uint_8t x)
  192. {
  193. uint_8t r = (uint_8t)((x >> 1) | (x >> 2));
  194. r |= (r >> 2);
  195. r |= (r >> 4);
  196. return (r + 1) >> 1;
  197. }
  198. /* return the inverse of the finite field element x */
  199. static uint_8t gf_inv(const uint_8t x)
  200. {
  201. uint_8t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
  202. if (x < 2) {
  203. return x;
  204. }
  205. for ( ; ; ) {
  206. if (n1) {
  207. while (n2 >= n1) { /* divide polynomial p2 by p1 */
  208. n2 /= n1; /* shift smaller polynomial left */
  209. p2 ^= (p1 * n2) & 0xff; /* and remove from larger one */
  210. v2 ^= (v1 * n2); /* shift accumulated value and */
  211. n2 = hibit(p2); /* add into result */
  212. }
  213. } else {
  214. return v1;
  215. }
  216. if (n2) { /* repeat with values swapped */
  217. while (n1 >= n2) {
  218. n1 /= n2;
  219. p1 ^= p2 * n1;
  220. v1 ^= v2 * n1;
  221. n1 = hibit(p1);
  222. }
  223. } else {
  224. return v2;
  225. }
  226. }
  227. }
  228. /* The forward and inverse affine transformations used in the S-box */
  229. uint_8t fwd_affine(const uint_8t x)
  230. {
  231. #if defined( HAVE_UINT_32T )
  232. uint_32t w = x;
  233. w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4);
  234. return 0x63 ^ ((w ^ (w >> 8)) & 0xff);
  235. #else
  236. return 0x63 ^ x ^ (x << 1) ^ (x << 2) ^ (x << 3) ^ (x << 4)
  237. ^ (x >> 7) ^ (x >> 6) ^ (x >> 5) ^ (x >> 4);
  238. #endif
  239. }
  240. uint_8t inv_affine(const uint_8t x)
  241. {
  242. #if defined( HAVE_UINT_32T )
  243. uint_32t w = x;
  244. w = (w << 1) ^ (w << 3) ^ (w << 6);
  245. return 0x05 ^ ((w ^ (w >> 8)) & 0xff);
  246. #else
  247. return 0x05 ^ (x << 1) ^ (x << 3) ^ (x << 6)
  248. ^ (x >> 7) ^ (x >> 5) ^ (x >> 2);
  249. #endif
  250. }
  251. #define s_box(x) fwd_affine(gf_inv(x))
  252. #define is_box(x) gf_inv(inv_affine(x))
  253. #define gfm2_sb(x) f2(s_box(x))
  254. #define gfm3_sb(x) f3(s_box(x))
  255. #define gfm_9(x) f9(x)
  256. #define gfm_b(x) fb(x)
  257. #define gfm_d(x) fd(x)
  258. #define gfm_e(x) fe(x)
  259. #endif
  260. #if defined( HAVE_MEMCPY )
  261. # define block_copy_nn(d, s, l) memcpy(d, s, l)
  262. # define block_copy(d, s) memcpy(d, s, N_BLOCK)
  263. #else
  264. # define block_copy_nn(d, s, l) copy_block_nn(d, s, l)
  265. # define block_copy(d, s) copy_block(d, s)
  266. #endif
  267. #if !defined( HAVE_MEMCPY )
  268. static void copy_block( void *d, const void *s )
  269. {
  270. #if defined( HAVE_UINT_32T )
  271. ((uint_32t *)d)[ 0] = ((uint_32t *)s)[ 0];
  272. ((uint_32t *)d)[ 1] = ((uint_32t *)s)[ 1];
  273. ((uint_32t *)d)[ 2] = ((uint_32t *)s)[ 2];
  274. ((uint_32t *)d)[ 3] = ((uint_32t *)s)[ 3];
  275. #else
  276. ((uint_8t *)d)[ 0] = ((uint_8t *)s)[ 0];
  277. ((uint_8t *)d)[ 1] = ((uint_8t *)s)[ 1];
  278. ((uint_8t *)d)[ 2] = ((uint_8t *)s)[ 2];
  279. ((uint_8t *)d)[ 3] = ((uint_8t *)s)[ 3];
  280. ((uint_8t *)d)[ 4] = ((uint_8t *)s)[ 4];
  281. ((uint_8t *)d)[ 5] = ((uint_8t *)s)[ 5];
  282. ((uint_8t *)d)[ 6] = ((uint_8t *)s)[ 6];
  283. ((uint_8t *)d)[ 7] = ((uint_8t *)s)[ 7];
  284. ((uint_8t *)d)[ 8] = ((uint_8t *)s)[ 8];
  285. ((uint_8t *)d)[ 9] = ((uint_8t *)s)[ 9];
  286. ((uint_8t *)d)[10] = ((uint_8t *)s)[10];
  287. ((uint_8t *)d)[11] = ((uint_8t *)s)[11];
  288. ((uint_8t *)d)[12] = ((uint_8t *)s)[12];
  289. ((uint_8t *)d)[13] = ((uint_8t *)s)[13];
  290. ((uint_8t *)d)[14] = ((uint_8t *)s)[14];
  291. ((uint_8t *)d)[15] = ((uint_8t *)s)[15];
  292. #endif
  293. }
  294. static void copy_block_nn( void *d, const void *s, uint_8t nn )
  295. {
  296. while ( nn-- ) {
  297. *((uint_8t *)d)++ = *((uint_8t *)s)++;
  298. }
  299. }
  300. #endif
  301. static void xor_block( void *d, const void *s )
  302. {
  303. #if defined( HAVE_UINT_32T )
  304. ((uint_32t *)d)[ 0] ^= ((uint_32t *)s)[ 0];
  305. ((uint_32t *)d)[ 1] ^= ((uint_32t *)s)[ 1];
  306. ((uint_32t *)d)[ 2] ^= ((uint_32t *)s)[ 2];
  307. ((uint_32t *)d)[ 3] ^= ((uint_32t *)s)[ 3];
  308. #else
  309. ((uint_8t *)d)[ 0] ^= ((uint_8t *)s)[ 0];
  310. ((uint_8t *)d)[ 1] ^= ((uint_8t *)s)[ 1];
  311. ((uint_8t *)d)[ 2] ^= ((uint_8t *)s)[ 2];
  312. ((uint_8t *)d)[ 3] ^= ((uint_8t *)s)[ 3];
  313. ((uint_8t *)d)[ 4] ^= ((uint_8t *)s)[ 4];
  314. ((uint_8t *)d)[ 5] ^= ((uint_8t *)s)[ 5];
  315. ((uint_8t *)d)[ 6] ^= ((uint_8t *)s)[ 6];
  316. ((uint_8t *)d)[ 7] ^= ((uint_8t *)s)[ 7];
  317. ((uint_8t *)d)[ 8] ^= ((uint_8t *)s)[ 8];
  318. ((uint_8t *)d)[ 9] ^= ((uint_8t *)s)[ 9];
  319. ((uint_8t *)d)[10] ^= ((uint_8t *)s)[10];
  320. ((uint_8t *)d)[11] ^= ((uint_8t *)s)[11];
  321. ((uint_8t *)d)[12] ^= ((uint_8t *)s)[12];
  322. ((uint_8t *)d)[13] ^= ((uint_8t *)s)[13];
  323. ((uint_8t *)d)[14] ^= ((uint_8t *)s)[14];
  324. ((uint_8t *)d)[15] ^= ((uint_8t *)s)[15];
  325. #endif
  326. }
  327. static void copy_and_key( void *d, const void *s, const void *k )
  328. {
  329. #if defined( HAVE_UINT_32T )
  330. ((uint_32t *)d)[ 0] = ((uint_32t *)s)[ 0] ^ ((uint_32t *)k)[ 0];
  331. ((uint_32t *)d)[ 1] = ((uint_32t *)s)[ 1] ^ ((uint_32t *)k)[ 1];
  332. ((uint_32t *)d)[ 2] = ((uint_32t *)s)[ 2] ^ ((uint_32t *)k)[ 2];
  333. ((uint_32t *)d)[ 3] = ((uint_32t *)s)[ 3] ^ ((uint_32t *)k)[ 3];
  334. #elif 1
  335. ((uint_8t *)d)[ 0] = ((uint_8t *)s)[ 0] ^ ((uint_8t *)k)[ 0];
  336. ((uint_8t *)d)[ 1] = ((uint_8t *)s)[ 1] ^ ((uint_8t *)k)[ 1];
  337. ((uint_8t *)d)[ 2] = ((uint_8t *)s)[ 2] ^ ((uint_8t *)k)[ 2];
  338. ((uint_8t *)d)[ 3] = ((uint_8t *)s)[ 3] ^ ((uint_8t *)k)[ 3];
  339. ((uint_8t *)d)[ 4] = ((uint_8t *)s)[ 4] ^ ((uint_8t *)k)[ 4];
  340. ((uint_8t *)d)[ 5] = ((uint_8t *)s)[ 5] ^ ((uint_8t *)k)[ 5];
  341. ((uint_8t *)d)[ 6] = ((uint_8t *)s)[ 6] ^ ((uint_8t *)k)[ 6];
  342. ((uint_8t *)d)[ 7] = ((uint_8t *)s)[ 7] ^ ((uint_8t *)k)[ 7];
  343. ((uint_8t *)d)[ 8] = ((uint_8t *)s)[ 8] ^ ((uint_8t *)k)[ 8];
  344. ((uint_8t *)d)[ 9] = ((uint_8t *)s)[ 9] ^ ((uint_8t *)k)[ 9];
  345. ((uint_8t *)d)[10] = ((uint_8t *)s)[10] ^ ((uint_8t *)k)[10];
  346. ((uint_8t *)d)[11] = ((uint_8t *)s)[11] ^ ((uint_8t *)k)[11];
  347. ((uint_8t *)d)[12] = ((uint_8t *)s)[12] ^ ((uint_8t *)k)[12];
  348. ((uint_8t *)d)[13] = ((uint_8t *)s)[13] ^ ((uint_8t *)k)[13];
  349. ((uint_8t *)d)[14] = ((uint_8t *)s)[14] ^ ((uint_8t *)k)[14];
  350. ((uint_8t *)d)[15] = ((uint_8t *)s)[15] ^ ((uint_8t *)k)[15];
  351. #else
  352. block_copy(d, s);
  353. xor_block(d, k);
  354. #endif
  355. }
  356. static void add_round_key( uint_8t d[N_BLOCK], const uint_8t k[N_BLOCK] )
  357. {
  358. xor_block(d, k);
  359. }
  360. static void shift_sub_rows( uint_8t st[N_BLOCK] )
  361. {
  362. uint_8t tt;
  363. st[ 0] = s_box(st[ 0]); st[ 4] = s_box(st[ 4]);
  364. st[ 8] = s_box(st[ 8]); st[12] = s_box(st[12]);
  365. tt = st[1]; st[ 1] = s_box(st[ 5]); st[ 5] = s_box(st[ 9]);
  366. st[ 9] = s_box(st[13]); st[13] = s_box( tt );
  367. tt = st[2]; st[ 2] = s_box(st[10]); st[10] = s_box( tt );
  368. tt = st[6]; st[ 6] = s_box(st[14]); st[14] = s_box( tt );
  369. tt = st[15]; st[15] = s_box(st[11]); st[11] = s_box(st[ 7]);
  370. st[ 7] = s_box(st[ 3]); st[ 3] = s_box( tt );
  371. }
  372. static void inv_shift_sub_rows( uint_8t st[N_BLOCK] )
  373. {
  374. uint_8t tt;
  375. st[ 0] = is_box(st[ 0]); st[ 4] = is_box(st[ 4]);
  376. st[ 8] = is_box(st[ 8]); st[12] = is_box(st[12]);
  377. tt = st[13]; st[13] = is_box(st[9]); st[ 9] = is_box(st[5]);
  378. st[ 5] = is_box(st[1]); st[ 1] = is_box( tt );
  379. tt = st[2]; st[ 2] = is_box(st[10]); st[10] = is_box( tt );
  380. tt = st[6]; st[ 6] = is_box(st[14]); st[14] = is_box( tt );
  381. tt = st[3]; st[ 3] = is_box(st[ 7]); st[ 7] = is_box(st[11]);
  382. st[11] = is_box(st[15]); st[15] = is_box( tt );
  383. }
  384. #if defined( VERSION_1 )
  385. static void mix_sub_columns( uint_8t dt[N_BLOCK] )
  386. {
  387. uint_8t st[N_BLOCK];
  388. block_copy(st, dt);
  389. #else
  390. static void mix_sub_columns( uint_8t dt[N_BLOCK], uint_8t st[N_BLOCK] )
  391. {
  392. #endif
  393. dt[ 0] = gfm2_sb(st[0]) ^ gfm3_sb(st[5]) ^ s_box(st[10]) ^ s_box(st[15]);
  394. dt[ 1] = s_box(st[0]) ^ gfm2_sb(st[5]) ^ gfm3_sb(st[10]) ^ s_box(st[15]);
  395. dt[ 2] = s_box(st[0]) ^ s_box(st[5]) ^ gfm2_sb(st[10]) ^ gfm3_sb(st[15]);
  396. dt[ 3] = gfm3_sb(st[0]) ^ s_box(st[5]) ^ s_box(st[10]) ^ gfm2_sb(st[15]);
  397. dt[ 4] = gfm2_sb(st[4]) ^ gfm3_sb(st[9]) ^ s_box(st[14]) ^ s_box(st[3]);
  398. dt[ 5] = s_box(st[4]) ^ gfm2_sb(st[9]) ^ gfm3_sb(st[14]) ^ s_box(st[3]);
  399. dt[ 6] = s_box(st[4]) ^ s_box(st[9]) ^ gfm2_sb(st[14]) ^ gfm3_sb(st[3]);
  400. dt[ 7] = gfm3_sb(st[4]) ^ s_box(st[9]) ^ s_box(st[14]) ^ gfm2_sb(st[3]);
  401. dt[ 8] = gfm2_sb(st[8]) ^ gfm3_sb(st[13]) ^ s_box(st[2]) ^ s_box(st[7]);
  402. dt[ 9] = s_box(st[8]) ^ gfm2_sb(st[13]) ^ gfm3_sb(st[2]) ^ s_box(st[7]);
  403. dt[10] = s_box(st[8]) ^ s_box(st[13]) ^ gfm2_sb(st[2]) ^ gfm3_sb(st[7]);
  404. dt[11] = gfm3_sb(st[8]) ^ s_box(st[13]) ^ s_box(st[2]) ^ gfm2_sb(st[7]);
  405. dt[12] = gfm2_sb(st[12]) ^ gfm3_sb(st[1]) ^ s_box(st[6]) ^ s_box(st[11]);
  406. dt[13] = s_box(st[12]) ^ gfm2_sb(st[1]) ^ gfm3_sb(st[6]) ^ s_box(st[11]);
  407. dt[14] = s_box(st[12]) ^ s_box(st[1]) ^ gfm2_sb(st[6]) ^ gfm3_sb(st[11]);
  408. dt[15] = gfm3_sb(st[12]) ^ s_box(st[1]) ^ s_box(st[6]) ^ gfm2_sb(st[11]);
  409. }
  410. #if defined( VERSION_1 )
  411. static void inv_mix_sub_columns( uint_8t dt[N_BLOCK] )
  412. {
  413. uint_8t st[N_BLOCK];
  414. block_copy(st, dt);
  415. #else
  416. static void inv_mix_sub_columns( uint_8t dt[N_BLOCK], uint_8t st[N_BLOCK] )
  417. {
  418. #endif
  419. dt[ 0] = is_box(gfm_e(st[ 0]) ^ gfm_b(st[ 1]) ^ gfm_d(st[ 2]) ^ gfm_9(st[ 3]));
  420. dt[ 5] = is_box(gfm_9(st[ 0]) ^ gfm_e(st[ 1]) ^ gfm_b(st[ 2]) ^ gfm_d(st[ 3]));
  421. dt[10] = is_box(gfm_d(st[ 0]) ^ gfm_9(st[ 1]) ^ gfm_e(st[ 2]) ^ gfm_b(st[ 3]));
  422. dt[15] = is_box(gfm_b(st[ 0]) ^ gfm_d(st[ 1]) ^ gfm_9(st[ 2]) ^ gfm_e(st[ 3]));
  423. dt[ 4] = is_box(gfm_e(st[ 4]) ^ gfm_b(st[ 5]) ^ gfm_d(st[ 6]) ^ gfm_9(st[ 7]));
  424. dt[ 9] = is_box(gfm_9(st[ 4]) ^ gfm_e(st[ 5]) ^ gfm_b(st[ 6]) ^ gfm_d(st[ 7]));
  425. dt[14] = is_box(gfm_d(st[ 4]) ^ gfm_9(st[ 5]) ^ gfm_e(st[ 6]) ^ gfm_b(st[ 7]));
  426. dt[ 3] = is_box(gfm_b(st[ 4]) ^ gfm_d(st[ 5]) ^ gfm_9(st[ 6]) ^ gfm_e(st[ 7]));
  427. dt[ 8] = is_box(gfm_e(st[ 8]) ^ gfm_b(st[ 9]) ^ gfm_d(st[10]) ^ gfm_9(st[11]));
  428. dt[13] = is_box(gfm_9(st[ 8]) ^ gfm_e(st[ 9]) ^ gfm_b(st[10]) ^ gfm_d(st[11]));
  429. dt[ 2] = is_box(gfm_d(st[ 8]) ^ gfm_9(st[ 9]) ^ gfm_e(st[10]) ^ gfm_b(st[11]));
  430. dt[ 7] = is_box(gfm_b(st[ 8]) ^ gfm_d(st[ 9]) ^ gfm_9(st[10]) ^ gfm_e(st[11]));
  431. dt[12] = is_box(gfm_e(st[12]) ^ gfm_b(st[13]) ^ gfm_d(st[14]) ^ gfm_9(st[15]));
  432. dt[ 1] = is_box(gfm_9(st[12]) ^ gfm_e(st[13]) ^ gfm_b(st[14]) ^ gfm_d(st[15]));
  433. dt[ 6] = is_box(gfm_d(st[12]) ^ gfm_9(st[13]) ^ gfm_e(st[14]) ^ gfm_b(st[15]));
  434. dt[11] = is_box(gfm_b(st[12]) ^ gfm_d(st[13]) ^ gfm_9(st[14]) ^ gfm_e(st[15]));
  435. }
  436. #if defined( AES_ENC_PREKEYED ) || defined( AES_DEC_PREKEYED )
  437. /* Set the cipher key for the pre-keyed version */
  438. /* NOTE: If the length_type used for the key length is an
  439. unsigned 8-bit character, a key length of 256 bits must
  440. be entered as a length in bytes (valid inputs are hence
  441. 128, 192, 16, 24 and 32).
  442. */
  443. return_type aes_set_key( const unsigned char key[], length_type keylen, aes_context ctx[1] )
  444. {
  445. uint_8t cc, rc, hi;
  446. switch ( keylen ) {
  447. case 16:
  448. case 128: /* length in bits (128 = 8*16) */
  449. keylen = 16;
  450. break;
  451. case 24:
  452. case 192: /* length in bits (192 = 8*24) */
  453. keylen = 24;
  454. break;
  455. case 32:
  456. /* case 256: length in bits (256 = 8*32) */
  457. keylen = 32;
  458. break;
  459. default:
  460. ctx->rnd = 0;
  461. return (return_type) - 1;
  462. }
  463. block_copy_nn(ctx->ksch, key, keylen);
  464. hi = (keylen + 28) << 2;
  465. ctx->rnd = (hi >> 4) - 1;
  466. for ( cc = keylen, rc = 1; cc < hi; cc += 4 ) {
  467. uint_8t tt, t0, t1, t2, t3;
  468. t0 = ctx->ksch[cc - 4];
  469. t1 = ctx->ksch[cc - 3];
  470. t2 = ctx->ksch[cc - 2];
  471. t3 = ctx->ksch[cc - 1];
  472. if ( cc % keylen == 0 ) {
  473. tt = t0;
  474. t0 = s_box(t1) ^ rc;
  475. t1 = s_box(t2);
  476. t2 = s_box(t3);
  477. t3 = s_box(tt);
  478. rc = f2(rc);
  479. } else if ( keylen > 24 && cc % keylen == 16 ) {
  480. t0 = s_box(t0);
  481. t1 = s_box(t1);
  482. t2 = s_box(t2);
  483. t3 = s_box(t3);
  484. }
  485. tt = cc - keylen;
  486. ctx->ksch[cc + 0] = ctx->ksch[tt + 0] ^ t0;
  487. ctx->ksch[cc + 1] = ctx->ksch[tt + 1] ^ t1;
  488. ctx->ksch[cc + 2] = ctx->ksch[tt + 2] ^ t2;
  489. ctx->ksch[cc + 3] = ctx->ksch[tt + 3] ^ t3;
  490. }
  491. return 0;
  492. }
  493. #endif
  494. #if defined( AES_ENC_PREKEYED )
  495. /* Encrypt a single block of 16 bytes */
  496. /* @breif change the name by snake for avoid the conflict with libcrypto */
  497. return_type bluedroid_aes_encrypt( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1] )
  498. {
  499. if ( ctx->rnd ) {
  500. uint_8t s1[N_BLOCK], r;
  501. copy_and_key( s1, in, ctx->ksch );
  502. for ( r = 1 ; r < ctx->rnd ; ++r )
  503. #if defined( VERSION_1 )
  504. {
  505. mix_sub_columns( s1 );
  506. add_round_key( s1, ctx->ksch + r * N_BLOCK);
  507. }
  508. #else
  509. {
  510. uint_8t s2[N_BLOCK];
  511. mix_sub_columns( s2, s1 );
  512. copy_and_key( s1, s2, ctx->ksch + r * N_BLOCK);
  513. }
  514. #endif
  515. shift_sub_rows( s1 );
  516. copy_and_key( out, s1, ctx->ksch + r * N_BLOCK );
  517. } else {
  518. return (return_type) - 1;
  519. }
  520. return 0;
  521. }
  522. /* CBC encrypt a number of blocks (input and return an IV) */
  523. return_type aes_cbc_encrypt( const unsigned char *in, unsigned char *out,
  524. int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1] )
  525. {
  526. while (n_block--) {
  527. xor_block(iv, in);
  528. if (bluedroid_aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS) {
  529. return EXIT_FAILURE;
  530. }
  531. memcpy(out, iv, N_BLOCK);
  532. in += N_BLOCK;
  533. out += N_BLOCK;
  534. }
  535. return EXIT_SUCCESS;
  536. }
  537. #endif
  538. #if defined( AES_DEC_PREKEYED )
  539. /* Decrypt a single block of 16 bytes */
  540. return_type bluedroid_aes_decrypt( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK], const aes_context ctx[1] )
  541. {
  542. if ( ctx->rnd ) {
  543. uint_8t s1[N_BLOCK], r;
  544. copy_and_key( s1, in, ctx->ksch + ctx->rnd * N_BLOCK );
  545. inv_shift_sub_rows( s1 );
  546. for ( r = ctx->rnd ; --r ; )
  547. #if defined( VERSION_1 )
  548. {
  549. add_round_key( s1, ctx->ksch + r * N_BLOCK );
  550. inv_mix_sub_columns( s1 );
  551. }
  552. #else
  553. {
  554. uint_8t s2[N_BLOCK];
  555. copy_and_key( s2, s1, ctx->ksch + r * N_BLOCK );
  556. inv_mix_sub_columns( s1, s2 );
  557. }
  558. #endif
  559. copy_and_key( out, s1, ctx->ksch );
  560. } else {
  561. return (return_type) - 1;
  562. }
  563. return 0;
  564. }
  565. /* CBC decrypt a number of blocks (input and return an IV) */
  566. return_type aes_cbc_decrypt( const unsigned char *in, unsigned char *out,
  567. int n_block, unsigned char iv[N_BLOCK], const aes_context ctx[1] )
  568. {
  569. while (n_block--) {
  570. uint_8t tmp[N_BLOCK];
  571. memcpy(tmp, in, N_BLOCK);
  572. if (bluedroid_aes_decrypt(in, out, ctx) != EXIT_SUCCESS) {
  573. return EXIT_FAILURE;
  574. }
  575. xor_block(out, iv);
  576. memcpy(iv, tmp, N_BLOCK);
  577. in += N_BLOCK;
  578. out += N_BLOCK;
  579. }
  580. return EXIT_SUCCESS;
  581. }
  582. #endif
  583. #if defined( AES_ENC_128_OTFK )
  584. /* The 'on the fly' encryption key update for for 128 bit keys */
  585. static void update_encrypt_key_128( uint_8t k[N_BLOCK], uint_8t *rc )
  586. {
  587. uint_8t cc;
  588. k[0] ^= s_box(k[13]) ^ *rc;
  589. k[1] ^= s_box(k[14]);
  590. k[2] ^= s_box(k[15]);
  591. k[3] ^= s_box(k[12]);
  592. *rc = f2( *rc );
  593. for (cc = 4; cc < 16; cc += 4 ) {
  594. k[cc + 0] ^= k[cc - 4];
  595. k[cc + 1] ^= k[cc - 3];
  596. k[cc + 2] ^= k[cc - 2];
  597. k[cc + 3] ^= k[cc - 1];
  598. }
  599. }
  600. /* Encrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
  601. void bluedroid_aes_encrypt_128( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK],
  602. const unsigned char key[N_BLOCK], unsigned char o_key[N_BLOCK] )
  603. {
  604. uint_8t s1[N_BLOCK], r, rc = 1;
  605. if (o_key != key) {
  606. block_copy( o_key, key );
  607. }
  608. copy_and_key( s1, in, o_key );
  609. for ( r = 1 ; r < 10 ; ++r )
  610. #if defined( VERSION_1 )
  611. {
  612. mix_sub_columns( s1 );
  613. update_encrypt_key_128( o_key, &rc );
  614. add_round_key( s1, o_key );
  615. }
  616. #else
  617. {
  618. uint_8t s2[N_BLOCK];
  619. mix_sub_columns( s2, s1 );
  620. update_encrypt_key_128( o_key, &rc );
  621. copy_and_key( s1, s2, o_key );
  622. }
  623. #endif
  624. shift_sub_rows( s1 );
  625. update_encrypt_key_128( o_key, &rc );
  626. copy_and_key( out, s1, o_key );
  627. }
  628. #endif
  629. #if defined( AES_DEC_128_OTFK )
  630. /* The 'on the fly' decryption key update for for 128 bit keys */
  631. static void update_decrypt_key_128( uint_8t k[N_BLOCK], uint_8t *rc )
  632. {
  633. uint_8t cc;
  634. for ( cc = 12; cc > 0; cc -= 4 ) {
  635. k[cc + 0] ^= k[cc - 4];
  636. k[cc + 1] ^= k[cc - 3];
  637. k[cc + 2] ^= k[cc - 2];
  638. k[cc + 3] ^= k[cc - 1];
  639. }
  640. *rc = d2(*rc);
  641. k[0] ^= s_box(k[13]) ^ *rc;
  642. k[1] ^= s_box(k[14]);
  643. k[2] ^= s_box(k[15]);
  644. k[3] ^= s_box(k[12]);
  645. }
  646. /* Decrypt a single block of 16 bytes with 'on the fly' 128 bit keying */
  647. void bluedroid_aes_decrypt_128( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK],
  648. const unsigned char key[N_BLOCK], unsigned char o_key[N_BLOCK] )
  649. {
  650. uint_8t s1[N_BLOCK], r, rc = 0x6c;
  651. if (o_key != key) {
  652. block_copy( o_key, key );
  653. }
  654. copy_and_key( s1, in, o_key );
  655. inv_shift_sub_rows( s1 );
  656. for ( r = 10 ; --r ; )
  657. #if defined( VERSION_1 )
  658. {
  659. update_decrypt_key_128( o_key, &rc );
  660. add_round_key( s1, o_key );
  661. inv_mix_sub_columns( s1 );
  662. }
  663. #else
  664. {
  665. uint_8t s2[N_BLOCK];
  666. update_decrypt_key_128( o_key, &rc );
  667. copy_and_key( s2, s1, o_key );
  668. inv_mix_sub_columns( s1, s2 );
  669. }
  670. #endif
  671. update_decrypt_key_128( o_key, &rc );
  672. copy_and_key( out, s1, o_key );
  673. }
  674. #endif
  675. #if defined( AES_ENC_256_OTFK )
  676. /* The 'on the fly' encryption key update for for 256 bit keys */
  677. static void update_encrypt_key_256( uint_8t k[2 * N_BLOCK], uint_8t *rc )
  678. {
  679. uint_8t cc;
  680. k[0] ^= s_box(k[29]) ^ *rc;
  681. k[1] ^= s_box(k[30]);
  682. k[2] ^= s_box(k[31]);
  683. k[3] ^= s_box(k[28]);
  684. *rc = f2( *rc );
  685. for (cc = 4; cc < 16; cc += 4) {
  686. k[cc + 0] ^= k[cc - 4];
  687. k[cc + 1] ^= k[cc - 3];
  688. k[cc + 2] ^= k[cc - 2];
  689. k[cc + 3] ^= k[cc - 1];
  690. }
  691. k[16] ^= s_box(k[12]);
  692. k[17] ^= s_box(k[13]);
  693. k[18] ^= s_box(k[14]);
  694. k[19] ^= s_box(k[15]);
  695. for ( cc = 20; cc < 32; cc += 4 ) {
  696. k[cc + 0] ^= k[cc - 4];
  697. k[cc + 1] ^= k[cc - 3];
  698. k[cc + 2] ^= k[cc - 2];
  699. k[cc + 3] ^= k[cc - 1];
  700. }
  701. }
  702. /* Encrypt a single block of 16 bytes with 'on the fly' 256 bit keying */
  703. void bluedroid_aes_encrypt_256( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK],
  704. const unsigned char key[2 * N_BLOCK], unsigned char o_key[2 * N_BLOCK] )
  705. {
  706. uint_8t s1[N_BLOCK], r, rc = 1;
  707. if (o_key != key) {
  708. block_copy( o_key, key );
  709. block_copy( o_key + 16, key + 16 );
  710. }
  711. copy_and_key( s1, in, o_key );
  712. for ( r = 1 ; r < 14 ; ++r )
  713. #if defined( VERSION_1 )
  714. {
  715. mix_sub_columns(s1);
  716. if ( r & 1 ) {
  717. add_round_key( s1, o_key + 16 );
  718. } else {
  719. update_encrypt_key_256( o_key, &rc );
  720. add_round_key( s1, o_key );
  721. }
  722. }
  723. #else
  724. {
  725. uint_8t s2[N_BLOCK];
  726. mix_sub_columns( s2, s1 );
  727. if ( r & 1 ) {
  728. copy_and_key( s1, s2, o_key + 16 );
  729. } else {
  730. update_encrypt_key_256( o_key, &rc );
  731. copy_and_key( s1, s2, o_key );
  732. }
  733. }
  734. #endif
  735. shift_sub_rows( s1 );
  736. update_encrypt_key_256( o_key, &rc );
  737. copy_and_key( out, s1, o_key );
  738. }
  739. #endif
  740. #if defined( AES_DEC_256_OTFK )
  741. /* The 'on the fly' encryption key update for for 256 bit keys */
  742. static void update_decrypt_key_256( uint_8t k[2 * N_BLOCK], uint_8t *rc )
  743. {
  744. uint_8t cc;
  745. for (cc = 28; cc > 16; cc -= 4) {
  746. k[cc + 0] ^= k[cc - 4];
  747. k[cc + 1] ^= k[cc - 3];
  748. k[cc + 2] ^= k[cc - 2];
  749. k[cc + 3] ^= k[cc - 1];
  750. }
  751. k[16] ^= s_box(k[12]);
  752. k[17] ^= s_box(k[13]);
  753. k[18] ^= s_box(k[14]);
  754. k[19] ^= s_box(k[15]);
  755. for (cc = 12; cc > 0; cc -= 4) {
  756. k[cc + 0] ^= k[cc - 4];
  757. k[cc + 1] ^= k[cc - 3];
  758. k[cc + 2] ^= k[cc - 2];
  759. k[cc + 3] ^= k[cc - 1];
  760. }
  761. *rc = d2(*rc);
  762. k[0] ^= s_box(k[29]) ^ *rc;
  763. k[1] ^= s_box(k[30]);
  764. k[2] ^= s_box(k[31]);
  765. k[3] ^= s_box(k[28]);
  766. }
  767. /* Decrypt a single block of 16 bytes with 'on the fly'
  768. 256 bit keying
  769. */
  770. void bluedroid_aes_decrypt_256( const unsigned char in[N_BLOCK], unsigned char out[N_BLOCK],
  771. const unsigned char key[2 * N_BLOCK], unsigned char o_key[2 * N_BLOCK] )
  772. {
  773. uint_8t s1[N_BLOCK], r, rc = 0x80;
  774. if (o_key != key) {
  775. block_copy( o_key, key );
  776. block_copy( o_key + 16, key + 16 );
  777. }
  778. copy_and_key( s1, in, o_key );
  779. inv_shift_sub_rows( s1 );
  780. for ( r = 14 ; --r ; )
  781. #if defined( VERSION_1 )
  782. {
  783. if ( ( r & 1 ) ) {
  784. update_decrypt_key_256( o_key, &rc );
  785. add_round_key( s1, o_key + 16 );
  786. } else {
  787. add_round_key( s1, o_key );
  788. }
  789. inv_mix_sub_columns( s1 );
  790. }
  791. #else
  792. {
  793. uint_8t s2[N_BLOCK];
  794. if ( ( r & 1 ) ) {
  795. update_decrypt_key_256( o_key, &rc );
  796. copy_and_key( s2, s1, o_key + 16 );
  797. } else {
  798. copy_and_key( s2, s1, o_key );
  799. }
  800. inv_mix_sub_columns( s1, s2 );
  801. }
  802. #endif
  803. copy_and_key( out, s1, o_key );
  804. }
  805. #endif