utils.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803
  1. #ifndef __STDC_WANT_LIB_EXT1__
  2. # define __STDC_WANT_LIB_EXT1__ 1
  3. #endif
  4. #include <assert.h>
  5. #include <errno.h>
  6. #include <limits.h>
  7. #include <stddef.h>
  8. #include <stdint.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #ifndef __wasm__
  12. # include <signal.h>
  13. #endif
  14. #ifdef HAVE_SYS_MMAN_H
  15. # include <sys/mman.h>
  16. #endif
  17. #ifdef _WIN32
  18. # include <windows.h>
  19. # include <wincrypt.h>
  20. #else
  21. # include <unistd.h>
  22. #endif
  23. #ifndef HAVE_C_VARARRAYS
  24. # ifdef HAVE_ALLOCA_H
  25. # include <alloca.h>
  26. # elif !defined(alloca)
  27. # if defined(__clang__) || defined(__GNUC__)
  28. # define alloca __builtin_alloca
  29. # elif defined _AIX
  30. # define alloca __alloca
  31. # elif defined _MSC_VER
  32. # include <malloc.h>
  33. # define alloca _alloca
  34. # else
  35. # include <stddef.h>
  36. # ifdef __cplusplus
  37. extern "C"
  38. # endif
  39. void *alloca (size_t);
  40. # endif
  41. # endif
  42. #endif
  43. #include "core.h"
  44. #include "crypto_generichash.h"
  45. #include "crypto_stream.h"
  46. #include "randombytes.h"
  47. #include "private/common.h"
  48. #include "utils.h"
  49. #ifndef ENOSYS
  50. # define ENOSYS ENXIO
  51. #endif
  52. #if defined(_WIN32) && \
  53. (!defined(WINAPI_FAMILY) || WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)
  54. # define WINAPI_DESKTOP
  55. #endif
  56. #define CANARY_SIZE 16U
  57. #define GARBAGE_VALUE 0xdb
  58. #ifndef MAP_NOCORE
  59. # ifdef MAP_CONCEAL
  60. # define MAP_NOCORE MAP_CONCEAL
  61. # else
  62. # define MAP_NOCORE 0
  63. # endif
  64. #endif
  65. #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
  66. # define MAP_ANON MAP_ANONYMOUS
  67. #endif
  68. #if defined(WINAPI_DESKTOP) || (defined(MAP_ANON) && defined(HAVE_MMAP)) || \
  69. defined(HAVE_POSIX_MEMALIGN)
  70. # define HAVE_ALIGNED_MALLOC
  71. #endif
  72. #if defined(HAVE_MPROTECT) && \
  73. !(defined(PROT_NONE) && defined(PROT_READ) && defined(PROT_WRITE))
  74. # undef HAVE_MPROTECT
  75. #endif
  76. #if defined(HAVE_ALIGNED_MALLOC) && \
  77. (defined(WINAPI_DESKTOP) || defined(HAVE_MPROTECT))
  78. # define HAVE_PAGE_PROTECTION
  79. #endif
  80. #if !defined(MADV_DODUMP) && defined(MADV_CORE)
  81. # define MADV_DODUMP MADV_CORE
  82. # define MADV_DONTDUMP MADV_NOCORE
  83. #endif
  84. #ifndef DEFAULT_PAGE_SIZE
  85. # ifdef PAGE_SIZE
  86. # define DEFAULT_PAGE_SIZE PAGE_SIZE
  87. # else
  88. # define DEFAULT_PAGE_SIZE 0x10000
  89. # endif
  90. #endif
  91. static size_t page_size = DEFAULT_PAGE_SIZE;
  92. static unsigned char canary[CANARY_SIZE];
  93. /* LCOV_EXCL_START */
  94. #ifdef HAVE_WEAK_SYMBOLS
  95. __attribute__((weak)) void
  96. _sodium_dummy_symbol_to_prevent_memzero_lto(void *const pnt,
  97. const size_t len);
  98. __attribute__((weak)) void
  99. _sodium_dummy_symbol_to_prevent_memzero_lto(void *const pnt,
  100. const size_t len)
  101. {
  102. (void) pnt; /* LCOV_EXCL_LINE */
  103. (void) len; /* LCOV_EXCL_LINE */
  104. }
  105. #endif
  106. /* LCOV_EXCL_STOP */
  107. void
  108. sodium_memzero(void * const pnt, const size_t len)
  109. {
  110. #ifdef _WIN32
  111. SecureZeroMemory(pnt, len);
  112. #elif defined(HAVE_MEMSET_S)
  113. if (len > 0U && memset_s(pnt, (rsize_t) len, 0, (rsize_t) len) != 0) {
  114. sodium_misuse(); /* LCOV_EXCL_LINE */
  115. }
  116. #elif defined(HAVE_EXPLICIT_BZERO)
  117. explicit_bzero(pnt, len);
  118. #elif defined(HAVE_EXPLICIT_MEMSET)
  119. explicit_memset(pnt, 0, len);
  120. #elif HAVE_WEAK_SYMBOLS
  121. if (len > 0U) {
  122. memset(pnt, 0, len);
  123. _sodium_dummy_symbol_to_prevent_memzero_lto(pnt, len);
  124. }
  125. # ifdef HAVE_INLINE_ASM
  126. __asm__ __volatile__ ("" : : "r"(pnt) : "memory");
  127. # endif
  128. #else
  129. volatile unsigned char *volatile pnt_ =
  130. (volatile unsigned char *volatile) pnt;
  131. size_t i = (size_t) 0U;
  132. while (i < len) {
  133. pnt_[i++] = 0U;
  134. }
  135. #endif
  136. }
  137. void
  138. sodium_stackzero(const size_t len)
  139. {
  140. #ifdef HAVE_C_VARARRAYS
  141. unsigned char fodder[len];
  142. sodium_memzero(fodder, len);
  143. #elif HAVE_ALLOCA
  144. sodium_memzero(alloca(len), len);
  145. #endif
  146. }
  147. #ifdef HAVE_WEAK_SYMBOLS
  148. __attribute__((weak)) void
  149. _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
  150. const unsigned char *b2,
  151. const size_t len);
  152. __attribute__((weak)) void
  153. _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
  154. const unsigned char *b2,
  155. const size_t len)
  156. {
  157. (void) b1;
  158. (void) b2;
  159. (void) len;
  160. }
  161. #endif
  162. int
  163. sodium_memcmp(const void *const b1_, const void *const b2_, size_t len)
  164. {
  165. #ifdef HAVE_WEAK_SYMBOLS
  166. const unsigned char *b1 = (const unsigned char *) b1_;
  167. const unsigned char *b2 = (const unsigned char *) b2_;
  168. #else
  169. const volatile unsigned char *volatile b1 =
  170. (const volatile unsigned char *volatile) b1_;
  171. const volatile unsigned char *volatile b2 =
  172. (const volatile unsigned char *volatile) b2_;
  173. #endif
  174. size_t i;
  175. volatile unsigned char d = 0U;
  176. #if HAVE_WEAK_SYMBOLS
  177. _sodium_dummy_symbol_to_prevent_memcmp_lto(b1, b2, len);
  178. #endif
  179. for (i = 0U; i < len; i++) {
  180. d |= b1[i] ^ b2[i];
  181. }
  182. return (1 & ((d - 1) >> 8)) - 1;
  183. }
  184. #ifdef HAVE_WEAK_SYMBOLS
  185. __attribute__((weak)) void
  186. _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
  187. const unsigned char *b2,
  188. const size_t len);
  189. __attribute__((weak)) void
  190. _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
  191. const unsigned char *b2,
  192. const size_t len)
  193. {
  194. (void) b1;
  195. (void) b2;
  196. (void) len;
  197. }
  198. #endif
  199. int
  200. sodium_compare(const unsigned char *b1_, const unsigned char *b2_, size_t len)
  201. {
  202. #ifdef HAVE_WEAK_SYMBOLS
  203. const unsigned char *b1 = b1_;
  204. const unsigned char *b2 = b2_;
  205. #else
  206. const volatile unsigned char *volatile b1 =
  207. (const volatile unsigned char *volatile) b1_;
  208. const volatile unsigned char *volatile b2 =
  209. (const volatile unsigned char *volatile) b2_;
  210. #endif
  211. size_t i;
  212. volatile unsigned char gt = 0U;
  213. volatile unsigned char eq = 1U;
  214. uint16_t x1, x2;
  215. #if HAVE_WEAK_SYMBOLS
  216. _sodium_dummy_symbol_to_prevent_compare_lto(b1, b2, len);
  217. #endif
  218. i = len;
  219. while (i != 0U) {
  220. i--;
  221. x1 = b1[i];
  222. x2 = b2[i];
  223. gt |= ((x2 - x1) >> 8) & eq;
  224. eq &= ((x2 ^ x1) - 1) >> 8;
  225. }
  226. return (int) (gt + gt + eq) - 1;
  227. }
  228. int
  229. sodium_is_zero(const unsigned char *n, const size_t nlen)
  230. {
  231. size_t i;
  232. volatile unsigned char d = 0U;
  233. for (i = 0U; i < nlen; i++) {
  234. d |= n[i];
  235. }
  236. return 1 & ((d - 1) >> 8);
  237. }
  238. void
  239. sodium_increment(unsigned char *n, const size_t nlen)
  240. {
  241. size_t i = 0U;
  242. uint_fast16_t c = 1U;
  243. #ifdef HAVE_AMD64_ASM
  244. uint64_t t64, t64_2;
  245. uint32_t t32;
  246. if (nlen == 12U) {
  247. __asm__ __volatile__(
  248. "xorq %[t64], %[t64] \n"
  249. "xorl %[t32], %[t32] \n"
  250. "stc \n"
  251. "adcq %[t64], (%[out]) \n"
  252. "adcl %[t32], 8(%[out]) \n"
  253. : [t64] "=&r"(t64), [t32] "=&r"(t32)
  254. : [out] "D"(n)
  255. : "memory", "flags", "cc");
  256. return;
  257. } else if (nlen == 24U) {
  258. __asm__ __volatile__(
  259. "movq $1, %[t64] \n"
  260. "xorq %[t64_2], %[t64_2] \n"
  261. "addq %[t64], (%[out]) \n"
  262. "adcq %[t64_2], 8(%[out]) \n"
  263. "adcq %[t64_2], 16(%[out]) \n"
  264. : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2)
  265. : [out] "D"(n)
  266. : "memory", "flags", "cc");
  267. return;
  268. } else if (nlen == 8U) {
  269. __asm__ __volatile__("incq (%[out]) \n"
  270. :
  271. : [out] "D"(n)
  272. : "memory", "flags", "cc");
  273. return;
  274. }
  275. #endif
  276. for (; i < nlen; i++) {
  277. c += (uint_fast16_t) n[i];
  278. n[i] = (unsigned char) c;
  279. c >>= 8;
  280. }
  281. }
  282. void
  283. sodium_add(unsigned char *a, const unsigned char *b, const size_t len)
  284. {
  285. size_t i;
  286. uint_fast16_t c = 0U;
  287. #ifdef HAVE_AMD64_ASM
  288. uint64_t t64, t64_2, t64_3;
  289. uint32_t t32;
  290. if (len == 12U) {
  291. __asm__ __volatile__(
  292. "movq (%[in]), %[t64] \n"
  293. "movl 8(%[in]), %[t32] \n"
  294. "addq %[t64], (%[out]) \n"
  295. "adcl %[t32], 8(%[out]) \n"
  296. : [t64] "=&r"(t64), [t32] "=&r"(t32)
  297. : [in] "S"(b), [out] "D"(a)
  298. : "memory", "flags", "cc");
  299. return;
  300. } else if (len == 24U) {
  301. __asm__ __volatile__(
  302. "movq (%[in]), %[t64] \n"
  303. "movq 8(%[in]), %[t64_2] \n"
  304. "movq 16(%[in]), %[t64_3] \n"
  305. "addq %[t64], (%[out]) \n"
  306. "adcq %[t64_2], 8(%[out]) \n"
  307. "adcq %[t64_3], 16(%[out]) \n"
  308. : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2), [t64_3] "=&r"(t64_3)
  309. : [in] "S"(b), [out] "D"(a)
  310. : "memory", "flags", "cc");
  311. return;
  312. } else if (len == 8U) {
  313. __asm__ __volatile__(
  314. "movq (%[in]), %[t64] \n"
  315. "addq %[t64], (%[out]) \n"
  316. : [t64] "=&r"(t64)
  317. : [in] "S"(b), [out] "D"(a)
  318. : "memory", "flags", "cc");
  319. return;
  320. }
  321. #endif
  322. for (i = 0U; i < len; i++) {
  323. c += (uint_fast16_t) a[i] + (uint_fast16_t) b[i];
  324. a[i] = (unsigned char) c;
  325. c >>= 8;
  326. }
  327. }
  328. void
  329. sodium_sub(unsigned char *a, const unsigned char *b, const size_t len)
  330. {
  331. uint_fast16_t c = 0U;
  332. size_t i;
  333. #ifdef HAVE_AMD64_ASM
  334. uint64_t t64_1, t64_2, t64_3, t64_4;
  335. uint64_t t64_5, t64_6, t64_7, t64_8;
  336. uint32_t t32;
  337. if (len == 64U) {
  338. __asm__ __volatile__(
  339. "movq (%[in]), %[t64_1] \n"
  340. "movq 8(%[in]), %[t64_2] \n"
  341. "movq 16(%[in]), %[t64_3] \n"
  342. "movq 24(%[in]), %[t64_4] \n"
  343. "movq 32(%[in]), %[t64_5] \n"
  344. "movq 40(%[in]), %[t64_6] \n"
  345. "movq 48(%[in]), %[t64_7] \n"
  346. "movq 56(%[in]), %[t64_8] \n"
  347. "subq %[t64_1], (%[out]) \n"
  348. "sbbq %[t64_2], 8(%[out]) \n"
  349. "sbbq %[t64_3], 16(%[out]) \n"
  350. "sbbq %[t64_4], 24(%[out]) \n"
  351. "sbbq %[t64_5], 32(%[out]) \n"
  352. "sbbq %[t64_6], 40(%[out]) \n"
  353. "sbbq %[t64_7], 48(%[out]) \n"
  354. "sbbq %[t64_8], 56(%[out]) \n"
  355. : [t64_1] "=&r"(t64_1), [t64_2] "=&r"(t64_2), [t64_3] "=&r"(t64_3), [t64_4] "=&r"(t64_4),
  356. [t64_5] "=&r"(t64_5), [t64_6] "=&r"(t64_6), [t64_7] "=&r"(t64_7), [t64_8] "=&r"(t64_8)
  357. : [in] "S"(b), [out] "D"(a)
  358. : "memory", "flags", "cc");
  359. return;
  360. }
  361. #endif
  362. for (i = 0U; i < len; i++) {
  363. c = (uint_fast16_t) a[i] - (uint_fast16_t) b[i] - c;
  364. a[i] = (unsigned char) c;
  365. c = (c >> 8) & 1U;
  366. }
  367. }
  368. int
  369. _sodium_alloc_init(void)
  370. {
  371. #ifdef HAVE_ALIGNED_MALLOC
  372. # if defined(_SC_PAGESIZE)
  373. long page_size_ = sysconf(_SC_PAGESIZE);
  374. if (page_size_ > 0L) {
  375. page_size = (size_t) page_size_;
  376. }
  377. # elif defined(WINAPI_DESKTOP)
  378. SYSTEM_INFO si;
  379. GetSystemInfo(&si);
  380. page_size = (size_t) si.dwPageSize;
  381. # else
  382. # warning Unknown page size
  383. # endif
  384. if (page_size < CANARY_SIZE || page_size < sizeof(size_t)) {
  385. sodium_misuse(); /* LCOV_EXCL_LINE */
  386. }
  387. #endif
  388. randombytes_buf(canary, CANARY_SIZE);
  389. return 0;
  390. }
  391. int
  392. sodium_mlock(void *const addr, const size_t len)
  393. {
  394. #if defined(MADV_DONTDUMP) && defined(HAVE_MADVISE)
  395. (void) madvise(addr, len, MADV_DONTDUMP);
  396. #endif
  397. #ifdef HAVE_MLOCK
  398. return mlock(addr, len);
  399. #elif defined(WINAPI_DESKTOP)
  400. return -(VirtualLock(addr, len) == 0);
  401. #else
  402. errno = ENOSYS;
  403. return -1;
  404. #endif
  405. }
  406. int
  407. sodium_munlock(void *const addr, const size_t len)
  408. {
  409. sodium_memzero(addr, len);
  410. #if defined(MADV_DODUMP) && defined(HAVE_MADVISE)
  411. (void) madvise(addr, len, MADV_DODUMP);
  412. #endif
  413. #ifdef HAVE_MLOCK
  414. return munlock(addr, len);
  415. #elif defined(WINAPI_DESKTOP)
  416. return -(VirtualUnlock(addr, len) == 0);
  417. #else
  418. errno = ENOSYS;
  419. return -1;
  420. #endif
  421. }
  422. static int
  423. _mprotect_noaccess(void *ptr, size_t size)
  424. {
  425. #ifdef HAVE_MPROTECT
  426. return mprotect(ptr, size, PROT_NONE);
  427. #elif defined(WINAPI_DESKTOP)
  428. DWORD old;
  429. return -(VirtualProtect(ptr, size, PAGE_NOACCESS, &old) == 0);
  430. #else
  431. errno = ENOSYS;
  432. return -1;
  433. #endif
  434. }
  435. static int
  436. _mprotect_readonly(void *ptr, size_t size)
  437. {
  438. #ifdef HAVE_MPROTECT
  439. return mprotect(ptr, size, PROT_READ);
  440. #elif defined(WINAPI_DESKTOP)
  441. DWORD old;
  442. return -(VirtualProtect(ptr, size, PAGE_READONLY, &old) == 0);
  443. #else
  444. errno = ENOSYS;
  445. return -1;
  446. #endif
  447. }
  448. static int
  449. _mprotect_readwrite(void *ptr, size_t size)
  450. {
  451. #ifdef HAVE_MPROTECT
  452. return mprotect(ptr, size, PROT_READ | PROT_WRITE);
  453. #elif defined(WINAPI_DESKTOP)
  454. DWORD old;
  455. return -(VirtualProtect(ptr, size, PAGE_READWRITE, &old) == 0);
  456. #else
  457. errno = ENOSYS;
  458. return -1;
  459. #endif
  460. }
  461. #ifdef HAVE_ALIGNED_MALLOC
  462. __attribute__((noreturn)) static void
  463. _out_of_bounds(void)
  464. {
  465. # ifndef __wasm__
  466. # ifdef SIGSEGV
  467. raise(SIGSEGV);
  468. # elif defined(SIGKILL)
  469. raise(SIGKILL);
  470. # endif
  471. # endif
  472. abort(); /* not something we want any higher-level API to catch */
  473. } /* LCOV_EXCL_LINE */
  474. static inline size_t
  475. _page_round(const size_t size)
  476. {
  477. const size_t page_mask = page_size - 1U;
  478. return (size + page_mask) & ~page_mask;
  479. }
  480. static __attribute__((malloc)) unsigned char *
  481. _alloc_aligned(const size_t size)
  482. {
  483. void *ptr;
  484. # if defined(MAP_ANON) && defined(HAVE_MMAP)
  485. if ((ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
  486. MAP_ANON | MAP_PRIVATE | MAP_NOCORE, -1, 0)) ==
  487. MAP_FAILED) {
  488. ptr = NULL; /* LCOV_EXCL_LINE */
  489. } /* LCOV_EXCL_LINE */
  490. # elif defined(HAVE_POSIX_MEMALIGN)
  491. if (posix_memalign(&ptr, page_size, size) != 0) {
  492. ptr = NULL; /* LCOV_EXCL_LINE */
  493. } /* LCOV_EXCL_LINE */
  494. # elif defined(WINAPI_DESKTOP)
  495. ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
  496. # else
  497. # error Bug
  498. # endif
  499. return (unsigned char *) ptr;
  500. }
  501. static void
  502. _free_aligned(unsigned char *const ptr, const size_t size)
  503. {
  504. # if defined(MAP_ANON) && defined(HAVE_MMAP)
  505. (void) munmap(ptr, size);
  506. # elif defined(HAVE_POSIX_MEMALIGN)
  507. free(ptr);
  508. # elif defined(WINAPI_DESKTOP)
  509. VirtualFree(ptr, 0U, MEM_RELEASE);
  510. # else
  511. # error Bug
  512. #endif
  513. }
  514. static unsigned char *
  515. _unprotected_ptr_from_user_ptr(void *const ptr)
  516. {
  517. uintptr_t unprotected_ptr_u;
  518. unsigned char *canary_ptr;
  519. size_t page_mask;
  520. canary_ptr = ((unsigned char *) ptr) - sizeof canary;
  521. page_mask = page_size - 1U;
  522. unprotected_ptr_u = ((uintptr_t) canary_ptr & (uintptr_t) ~page_mask);
  523. if (unprotected_ptr_u <= page_size * 2U) {
  524. sodium_misuse(); /* LCOV_EXCL_LINE */
  525. }
  526. return (unsigned char *) unprotected_ptr_u;
  527. }
  528. #endif /* HAVE_ALIGNED_MALLOC */
  529. #ifndef HAVE_ALIGNED_MALLOC
  530. static __attribute__((malloc)) void *
  531. _sodium_malloc(const size_t size)
  532. {
  533. return malloc(size > (size_t) 0U ? size : (size_t) 1U);
  534. }
  535. #else
  536. static __attribute__((malloc)) void *
  537. _sodium_malloc(const size_t size)
  538. {
  539. void *user_ptr;
  540. unsigned char *base_ptr;
  541. unsigned char *canary_ptr;
  542. unsigned char *unprotected_ptr;
  543. size_t size_with_canary;
  544. size_t total_size;
  545. size_t unprotected_size;
  546. if (size >= (size_t) SIZE_MAX - page_size * 4U) {
  547. errno = ENOMEM;
  548. return NULL;
  549. }
  550. if (page_size <= sizeof canary || page_size < sizeof unprotected_size) {
  551. sodium_misuse(); /* LCOV_EXCL_LINE */
  552. }
  553. size_with_canary = (sizeof canary) + size;
  554. unprotected_size = _page_round(size_with_canary);
  555. total_size = page_size + page_size + unprotected_size + page_size;
  556. if ((base_ptr = _alloc_aligned(total_size)) == NULL) {
  557. return NULL; /* LCOV_EXCL_LINE */
  558. }
  559. unprotected_ptr = base_ptr + page_size * 2U;
  560. _mprotect_noaccess(base_ptr + page_size, page_size);
  561. # ifndef HAVE_PAGE_PROTECTION
  562. memcpy(unprotected_ptr + unprotected_size, canary, sizeof canary);
  563. # endif
  564. _mprotect_noaccess(unprotected_ptr + unprotected_size, page_size);
  565. sodium_mlock(unprotected_ptr, unprotected_size);
  566. canary_ptr =
  567. unprotected_ptr + _page_round(size_with_canary) - size_with_canary;
  568. user_ptr = canary_ptr + sizeof canary;
  569. memcpy(canary_ptr, canary, sizeof canary);
  570. memcpy(base_ptr, &unprotected_size, sizeof unprotected_size);
  571. _mprotect_readonly(base_ptr, page_size);
  572. assert(_unprotected_ptr_from_user_ptr(user_ptr) == unprotected_ptr);
  573. return user_ptr;
  574. }
  575. #endif /* !HAVE_ALIGNED_MALLOC */
  576. __attribute__((malloc)) void *
  577. sodium_malloc(const size_t size)
  578. {
  579. void *ptr;
  580. if ((ptr = _sodium_malloc(size)) == NULL) {
  581. return NULL;
  582. }
  583. memset(ptr, (int) GARBAGE_VALUE, size);
  584. return ptr;
  585. }
  586. __attribute__((malloc)) void *
  587. sodium_allocarray(size_t count, size_t size)
  588. {
  589. if (count > (size_t) 0U && size >= (size_t) SIZE_MAX / count) {
  590. errno = ENOMEM;
  591. return NULL;
  592. }
  593. return sodium_malloc(count * size);
  594. }
  595. #ifndef HAVE_ALIGNED_MALLOC
  596. void
  597. sodium_free(void *ptr)
  598. {
  599. free(ptr);
  600. }
  601. #else
  602. void
  603. sodium_free(void *ptr)
  604. {
  605. unsigned char *base_ptr;
  606. unsigned char *canary_ptr;
  607. unsigned char *unprotected_ptr;
  608. size_t total_size;
  609. size_t unprotected_size;
  610. if (ptr == NULL) {
  611. return;
  612. }
  613. canary_ptr = ((unsigned char *) ptr) - sizeof canary;
  614. unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
  615. base_ptr = unprotected_ptr - page_size * 2U;
  616. memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
  617. total_size = page_size + page_size + unprotected_size + page_size;
  618. _mprotect_readwrite(base_ptr, total_size);
  619. if (sodium_memcmp(canary_ptr, canary, sizeof canary) != 0) {
  620. _out_of_bounds();
  621. }
  622. # ifndef HAVE_PAGE_PROTECTION
  623. if (sodium_memcmp(unprotected_ptr + unprotected_size, canary,
  624. sizeof canary) != 0) {
  625. _out_of_bounds();
  626. }
  627. # endif
  628. sodium_munlock(unprotected_ptr, unprotected_size);
  629. _free_aligned(base_ptr, total_size);
  630. }
  631. #endif /* HAVE_ALIGNED_MALLOC */
  632. #ifndef HAVE_PAGE_PROTECTION
  633. static int
  634. _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
  635. {
  636. (void) ptr;
  637. (void) cb;
  638. errno = ENOSYS;
  639. return -1;
  640. }
  641. #else
  642. static int
  643. _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
  644. {
  645. unsigned char *base_ptr;
  646. unsigned char *unprotected_ptr;
  647. size_t unprotected_size;
  648. unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
  649. base_ptr = unprotected_ptr - page_size * 2U;
  650. memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
  651. return cb(unprotected_ptr, unprotected_size);
  652. }
  653. #endif
  654. int
  655. sodium_mprotect_noaccess(void *ptr)
  656. {
  657. return _sodium_mprotect(ptr, _mprotect_noaccess);
  658. }
  659. int
  660. sodium_mprotect_readonly(void *ptr)
  661. {
  662. return _sodium_mprotect(ptr, _mprotect_readonly);
  663. }
  664. int
  665. sodium_mprotect_readwrite(void *ptr)
  666. {
  667. return _sodium_mprotect(ptr, _mprotect_readwrite);
  668. }
  669. int
  670. sodium_pad(size_t *padded_buflen_p, unsigned char *buf,
  671. size_t unpadded_buflen, size_t blocksize, size_t max_buflen)
  672. {
  673. unsigned char *tail;
  674. size_t i;
  675. size_t xpadlen;
  676. size_t xpadded_len;
  677. volatile unsigned char mask;
  678. unsigned char barrier_mask;
  679. if (blocksize <= 0U) {
  680. return -1;
  681. }
  682. xpadlen = blocksize - 1U;
  683. if ((blocksize & (blocksize - 1U)) == 0U) {
  684. xpadlen -= unpadded_buflen & (blocksize - 1U);
  685. } else {
  686. xpadlen -= unpadded_buflen % blocksize;
  687. }
  688. if ((size_t) SIZE_MAX - unpadded_buflen <= xpadlen) {
  689. sodium_misuse();
  690. }
  691. xpadded_len = unpadded_buflen + xpadlen;
  692. if (xpadded_len >= max_buflen) {
  693. return -1;
  694. }
  695. tail = &buf[xpadded_len];
  696. if (padded_buflen_p != NULL) {
  697. *padded_buflen_p = xpadded_len + 1U;
  698. }
  699. mask = 0U;
  700. for (i = 0; i < blocksize; i++) {
  701. barrier_mask = (unsigned char) (((i ^ xpadlen) - 1U)
  702. >> ((sizeof(size_t) - 1) * CHAR_BIT));
  703. *(tail - i) = ((*(tail - i)) & mask) | (0x80 & barrier_mask);
  704. mask |= barrier_mask;
  705. }
  706. return 0;
  707. }
  708. int
  709. sodium_unpad(size_t *unpadded_buflen_p, const unsigned char *buf,
  710. size_t padded_buflen, size_t blocksize)
  711. {
  712. const unsigned char *tail;
  713. unsigned char acc = 0U;
  714. unsigned char c;
  715. unsigned char valid = 0U;
  716. volatile size_t pad_len = 0U;
  717. size_t i;
  718. size_t is_barrier;
  719. if (padded_buflen < blocksize || blocksize <= 0U) {
  720. return -1;
  721. }
  722. tail = &buf[padded_buflen - 1U];
  723. for (i = 0U; i < blocksize; i++) {
  724. c = *(tail - i);
  725. is_barrier =
  726. (( (acc - 1U) & (pad_len - 1U) & ((c ^ 0x80) - 1U) ) >> 8) & 1U;
  727. acc |= c;
  728. pad_len |= i & (1U + ~is_barrier);
  729. valid |= (unsigned char) is_barrier;
  730. }
  731. *unpadded_buflen_p = padded_buflen - 1U - pad_len;
  732. return (int) (valid - 1U);
  733. }