slab.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /*
  7. * File : slab.c
  8. *
  9. * Change Logs:
  10. * Date Author Notes
  11. * 2008-07-12 Bernard the first version
  12. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  13. * 2010-10-23 yi.qiu add module memory allocator
  14. * 2010-12-18 yi.qiu fix zone release bug
  15. */
  16. /*
  17. * KERN_SLABALLOC.C - Kernel SLAB memory allocator
  18. *
  19. * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
  20. *
  21. * This code is derived from software contributed to The DragonFly Project
  22. * by Matthew Dillon <dillon@backplane.com>
  23. *
  24. * Redistribution and use in source and binary forms, with or without
  25. * modification, are permitted provided that the following conditions
  26. * are met:
  27. *
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in
  32. * the documentation and/or other materials provided with the
  33. * distribution.
  34. * 3. Neither the name of The DragonFly Project nor the names of its
  35. * contributors may be used to endorse or promote products derived
  36. * from this software without specific, prior written permission.
  37. *
  38. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  39. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  40. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  41. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  42. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  43. * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
  44. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  45. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  46. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  47. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  48. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  49. * SUCH DAMAGE.
  50. *
  51. */
  52. #include <rthw.h>
  53. #include <rtthread.h>
  54. #define RT_MEM_STATS
  55. #if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
  56. /* some statistical variable */
  57. #ifdef RT_MEM_STATS
  58. static rt_size_t used_mem, max_mem;
  59. #endif /* RT_MEM_STATS */
  60. #ifdef RT_USING_HOOK
  61. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  62. static void (*rt_free_hook)(void *ptr);
  63. /**
  64. * @addtogroup Hook
  65. */
  66. /**@{*/
  67. /**
  68. * @brief This function will set a hook function, which will be invoked when a memory
  69. * block is allocated from heap memory.
  70. *
  71. * @param hook the hook function.
  72. */
  73. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  74. {
  75. rt_malloc_hook = hook;
  76. }
  77. RTM_EXPORT(rt_malloc_sethook);
  78. /**
  79. * @brief This function will set a hook function, which will be invoked when a memory
  80. * block is released to heap memory.
  81. *
  82. * @param hook the hook function
  83. */
  84. void rt_free_sethook(void (*hook)(void *ptr))
  85. {
  86. rt_free_hook = hook;
  87. }
  88. RTM_EXPORT(rt_free_sethook);
  89. /**@}*/
  90. #endif /* RT_USING_HOOK */
  91. /*
  92. * slab allocator implementation
  93. *
  94. * A slab allocator reserves a ZONE for each chunk size, then lays the
  95. * chunks out in an array within the zone. Allocation and deallocation
  96. * is nearly instantanious, and fragmentation/overhead losses are limited
  97. * to a fixed worst-case amount.
  98. *
  99. * The downside of this slab implementation is in the chunk size
  100. * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
  101. * In a kernel implementation all this memory will be physical so
  102. * the zone size is adjusted downward on machines with less physical
  103. * memory. The upside is that overhead is bounded... this is the *worst*
  104. * case overhead.
  105. *
  106. * Slab management is done on a per-cpu basis and no locking or mutexes
  107. * are required, only a critical section. When one cpu frees memory
  108. * belonging to another cpu's slab manager an asynchronous IPI message
  109. * will be queued to execute the operation. In addition, both the
  110. * high level slab allocator and the low level zone allocator optimize
  111. * M_ZERO requests, and the slab allocator does not have to pre initialize
  112. * the linked list of chunks.
  113. *
  114. * XXX Balancing is needed between cpus. Balance will be handled through
  115. * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
  116. *
  117. * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
  118. * the new zone should be restricted to M_USE_RESERVE requests only.
  119. *
  120. * Alloc Size Chunking Number of zones
  121. * 0-127 8 16
  122. * 128-255 16 8
  123. * 256-511 32 8
  124. * 512-1023 64 8
  125. * 1024-2047 128 8
  126. * 2048-4095 256 8
  127. * 4096-8191 512 8
  128. * 8192-16383 1024 8
  129. * 16384-32767 2048 8
  130. * (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
  131. *
  132. * Allocations >= zone_limit go directly to kmem.
  133. *
  134. * API REQUIREMENTS AND SIDE EFFECTS
  135. *
  136. * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
  137. * have remained compatible with the following API requirements:
  138. *
  139. * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
  140. * + all power-of-2 sized allocations are power-of-2 aligned (twe)
  141. * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
  142. * + ability to allocate arbitrarily large chunks of memory
  143. */
  144. /*
  145. * Chunk structure for free elements
  146. */
  147. typedef struct slab_chunk
  148. {
  149. struct slab_chunk *c_next;
  150. } slab_chunk;
  151. /*
  152. * The IN-BAND zone header is placed at the beginning of each zone.
  153. */
  154. typedef struct slab_zone
  155. {
  156. rt_int32_t z_magic; /* magic number for sanity check */
  157. rt_int32_t z_nfree; /* total free chunks / ualloc space in zone */
  158. rt_int32_t z_nmax; /* maximum free chunks */
  159. struct slab_zone *z_next; /* zoneary[] link if z_nfree non-zero */
  160. rt_uint8_t *z_baseptr; /* pointer to start of chunk array */
  161. rt_int32_t z_uindex; /* current initial allocation index */
  162. rt_int32_t z_chunksize; /* chunk size for validation */
  163. rt_int32_t z_zoneindex; /* zone index */
  164. slab_chunk *z_freechunk; /* free chunk list */
  165. } slab_zone;
  166. #define ZALLOC_SLAB_MAGIC 0x51ab51ab
  167. #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
  168. #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
  169. #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
  170. #define NZONES 72 /* number of zones */
  171. #define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
  172. static slab_zone *zone_array[NZONES]; /* linked list of zones NFree > 0 */
  173. static slab_zone *zone_free; /* whole zones that have become free */
  174. static int zone_free_cnt;
  175. static int zone_size;
  176. static int zone_limit;
  177. static int zone_page_cnt;
  178. /*
  179. * Misc constants. Note that allocations that are exact multiples of
  180. * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
  181. */
  182. #define MIN_CHUNK_SIZE 8 /* in bytes */
  183. #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
  184. /*
  185. * Array of descriptors that describe the contents of each page
  186. */
  187. #define PAGE_TYPE_FREE 0x00
  188. #define PAGE_TYPE_SMALL 0x01
  189. #define PAGE_TYPE_LARGE 0x02
  190. struct memusage
  191. {
  192. rt_uint32_t type: 2 ; /* page type */
  193. rt_uint32_t size: 30; /* pages allocated or offset from zone */
  194. };
  195. static struct memusage *memusage = RT_NULL;
  196. #define btokup(addr) \
  197. (&memusage[((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
  198. static rt_ubase_t heap_start, heap_end;
  199. /* page allocator */
  200. struct rt_page_head
  201. {
  202. struct rt_page_head *next; /* next valid page */
  203. rt_size_t page; /* number of page */
  204. /* dummy */
  205. char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head *) + sizeof(rt_size_t))];
  206. };
  207. static struct rt_page_head *rt_page_list;
  208. static struct rt_semaphore heap_sem;
  209. /**
  210. * @brief Alloc memory size by page.
  211. *
  212. * @param npages the number of pages.
  213. */
  214. void *rt_page_alloc(rt_size_t npages)
  215. {
  216. struct rt_page_head *b, *n;
  217. struct rt_page_head **prev;
  218. if (npages == 0)
  219. return RT_NULL;
  220. /* lock heap */
  221. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  222. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  223. {
  224. if (b->page > npages)
  225. {
  226. /* splite pages */
  227. n = b + npages;
  228. n->next = b->next;
  229. n->page = b->page - npages;
  230. *prev = n;
  231. break;
  232. }
  233. if (b->page == npages)
  234. {
  235. /* this node fit, remove this node */
  236. *prev = b->next;
  237. break;
  238. }
  239. }
  240. /* unlock heap */
  241. rt_sem_release(&heap_sem);
  242. return b;
  243. }
  244. /**
  245. * @brief Free memory by page.
  246. *
  247. * @param addr is the head address of first page.
  248. *
  249. * @param npages is the number of pages.
  250. */
  251. void rt_page_free(void *addr, rt_size_t npages)
  252. {
  253. struct rt_page_head *b, *n;
  254. struct rt_page_head **prev;
  255. RT_ASSERT(addr != RT_NULL);
  256. RT_ASSERT((rt_ubase_t)addr % RT_MM_PAGE_SIZE == 0);
  257. RT_ASSERT(npages != 0);
  258. n = (struct rt_page_head *)addr;
  259. /* lock heap */
  260. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  261. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  262. {
  263. RT_ASSERT(b->page > 0);
  264. RT_ASSERT(b > n || b + b->page <= n);
  265. if (b + b->page == n)
  266. {
  267. if (b + (b->page += npages) == b->next)
  268. {
  269. b->page += b->next->page;
  270. b->next = b->next->next;
  271. }
  272. goto _return;
  273. }
  274. if (b == n + npages)
  275. {
  276. n->page = b->page + npages;
  277. n->next = b->next;
  278. *prev = n;
  279. goto _return;
  280. }
  281. if (b > n + npages)
  282. break;
  283. }
  284. n->page = npages;
  285. n->next = b;
  286. *prev = n;
  287. _return:
  288. /* unlock heap */
  289. rt_sem_release(&heap_sem);
  290. }
  291. /*
  292. * Initialize the page allocator
  293. */
  294. static void rt_page_init(void *addr, rt_size_t npages)
  295. {
  296. RT_ASSERT(addr != RT_NULL);
  297. RT_ASSERT(npages != 0);
  298. rt_page_list = RT_NULL;
  299. rt_page_free(addr, npages);
  300. }
  301. /**
  302. * @brief This function will init system heap.
  303. *
  304. * @param begin_addr the beginning address of system page.
  305. *
  306. * @param end_addr the end address of system page.
  307. */
  308. void rt_system_heap_init(void *begin_addr, void *end_addr)
  309. {
  310. rt_uint32_t limsize, npages;
  311. RT_DEBUG_NOT_IN_INTERRUPT;
  312. /* align begin and end addr to page */
  313. heap_start = RT_ALIGN((rt_ubase_t)begin_addr, RT_MM_PAGE_SIZE);
  314. heap_end = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_MM_PAGE_SIZE);
  315. if (heap_start >= heap_end)
  316. {
  317. rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
  318. (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
  319. return;
  320. }
  321. limsize = heap_end - heap_start;
  322. npages = limsize / RT_MM_PAGE_SIZE;
  323. /* initialize heap semaphore */
  324. rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_PRIO);
  325. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n",
  326. heap_start, heap_end, limsize, npages));
  327. /* init pages */
  328. rt_page_init((void *)heap_start, npages);
  329. /* calculate zone size */
  330. zone_size = ZALLOC_MIN_ZONE_SIZE;
  331. while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize / 1024))
  332. zone_size <<= 1;
  333. zone_limit = zone_size / 4;
  334. if (zone_limit > ZALLOC_ZONE_LIMIT)
  335. zone_limit = ZALLOC_ZONE_LIMIT;
  336. zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
  337. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n",
  338. zone_size, zone_page_cnt));
  339. /* allocate memusage array */
  340. limsize = npages * sizeof(struct memusage);
  341. limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
  342. memusage = rt_page_alloc(limsize / RT_MM_PAGE_SIZE);
  343. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n",
  344. (rt_ubase_t)memusage, limsize));
  345. }
  346. /*
  347. * Calculate the zone index for the allocation request size and set the
  348. * allocation request size to that particular zone's chunk size.
  349. */
  350. rt_inline int zoneindex(rt_size_t *bytes)
  351. {
  352. /* unsigned for shift opt */
  353. rt_ubase_t n = (rt_ubase_t)(*bytes);
  354. if (n < 128)
  355. {
  356. *bytes = n = (n + 7) & ~7;
  357. /* 8 byte chunks, 16 zones */
  358. return (n / 8 - 1);
  359. }
  360. if (n < 256)
  361. {
  362. *bytes = n = (n + 15) & ~15;
  363. return (n / 16 + 7);
  364. }
  365. if (n < 8192)
  366. {
  367. if (n < 512)
  368. {
  369. *bytes = n = (n + 31) & ~31;
  370. return (n / 32 + 15);
  371. }
  372. if (n < 1024)
  373. {
  374. *bytes = n = (n + 63) & ~63;
  375. return (n / 64 + 23);
  376. }
  377. if (n < 2048)
  378. {
  379. *bytes = n = (n + 127) & ~127;
  380. return (n / 128 + 31);
  381. }
  382. if (n < 4096)
  383. {
  384. *bytes = n = (n + 255) & ~255;
  385. return (n / 256 + 39);
  386. }
  387. *bytes = n = (n + 511) & ~511;
  388. return (n / 512 + 47);
  389. }
  390. if (n < 16384)
  391. {
  392. *bytes = n = (n + 1023) & ~1023;
  393. return (n / 1024 + 55);
  394. }
  395. rt_kprintf("Unexpected byte count %d", n);
  396. return 0;
  397. }
  398. /**
  399. * @addtogroup MM
  400. */
  401. /**@{*/
  402. /**
  403. * @brief This function will allocate a block from system heap memory.
  404. *
  405. * @note the RT_NULL is returned if
  406. * - the nbytes is less than zero.
  407. * - there is no nbytes sized memory valid in system.
  408. *
  409. * @param size is the size of memory to be allocated.
  410. *
  411. * @return the allocated memory.
  412. */
  413. void *rt_malloc(rt_size_t size)
  414. {
  415. slab_zone *z;
  416. rt_int32_t zi;
  417. slab_chunk *chunk;
  418. struct memusage *kup;
  419. /* zero size, return RT_NULL */
  420. if (size == 0)
  421. return RT_NULL;
  422. /*
  423. * Handle large allocations directly. There should not be very many of
  424. * these so performance is not a big issue.
  425. */
  426. if (size >= zone_limit)
  427. {
  428. size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
  429. chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
  430. if (chunk == RT_NULL)
  431. return RT_NULL;
  432. /* set kup */
  433. kup = btokup(chunk);
  434. kup->type = PAGE_TYPE_LARGE;
  435. kup->size = size >> RT_MM_PAGE_BITS;
  436. RT_DEBUG_LOG(RT_DEBUG_SLAB,
  437. ("malloc a large memory 0x%x, page cnt %d, kup %d\n",
  438. size,
  439. size >> RT_MM_PAGE_BITS,
  440. ((rt_ubase_t)chunk - heap_start) >> RT_MM_PAGE_BITS));
  441. /* lock heap */
  442. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  443. #ifdef RT_MEM_STATS
  444. used_mem += size;
  445. if (used_mem > max_mem)
  446. max_mem = used_mem;
  447. #endif /* RT_MEM_STATS */
  448. goto done;
  449. }
  450. /* lock heap */
  451. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  452. /*
  453. * Attempt to allocate out of an existing zone. First try the free list,
  454. * then allocate out of unallocated space. If we find a good zone move
  455. * it to the head of the list so later allocations find it quickly
  456. * (we might have thousands of zones in the list).
  457. *
  458. * Note: zoneindex() will panic of size is too large.
  459. */
  460. zi = zoneindex(&size);
  461. RT_ASSERT(zi < NZONES);
  462. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("try to malloc 0x%x on zone: %d\n", size, zi));
  463. if ((z = zone_array[zi]) != RT_NULL)
  464. {
  465. RT_ASSERT(z->z_nfree > 0);
  466. /* Remove us from the zone_array[] when we become full */
  467. if (--z->z_nfree == 0)
  468. {
  469. zone_array[zi] = z->z_next;
  470. z->z_next = RT_NULL;
  471. }
  472. /*
  473. * No chunks are available but nfree said we had some memory, so
  474. * it must be available in the never-before-used-memory area
  475. * governed by uindex. The consequences are very serious if our zone
  476. * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
  477. */
  478. if (z->z_uindex + 1 != z->z_nmax)
  479. {
  480. z->z_uindex = z->z_uindex + 1;
  481. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  482. }
  483. else
  484. {
  485. /* find on free chunk list */
  486. chunk = z->z_freechunk;
  487. /* remove this chunk from list */
  488. z->z_freechunk = z->z_freechunk->c_next;
  489. }
  490. #ifdef RT_MEM_STATS
  491. used_mem += z->z_chunksize;
  492. if (used_mem > max_mem)
  493. max_mem = used_mem;
  494. #endif /* RT_MEM_STATS */
  495. goto done;
  496. }
  497. /*
  498. * If all zones are exhausted we need to allocate a new zone for this
  499. * index.
  500. *
  501. * At least one subsystem, the tty code (see CROUND) expects power-of-2
  502. * allocations to be power-of-2 aligned. We maintain compatibility by
  503. * adjusting the base offset below.
  504. */
  505. {
  506. rt_int32_t off;
  507. if ((z = zone_free) != RT_NULL)
  508. {
  509. /* remove zone from free zone list */
  510. zone_free = z->z_next;
  511. -- zone_free_cnt;
  512. }
  513. else
  514. {
  515. /* unlock heap, since page allocator will think about lock */
  516. rt_sem_release(&heap_sem);
  517. /* allocate a zone from page */
  518. z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
  519. if (z == RT_NULL)
  520. {
  521. chunk = RT_NULL;
  522. goto __exit;
  523. }
  524. /* lock heap */
  525. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  526. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("alloc a new zone: 0x%x\n",
  527. (rt_ubase_t)z));
  528. /* set message usage */
  529. for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
  530. {
  531. kup->type = PAGE_TYPE_SMALL;
  532. kup->size = off;
  533. kup ++;
  534. }
  535. }
  536. /* clear to zero */
  537. rt_memset(z, 0, sizeof(slab_zone));
  538. /* offset of slab zone struct in zone */
  539. off = sizeof(slab_zone);
  540. /*
  541. * Guarentee power-of-2 alignment for power-of-2-sized chunks.
  542. * Otherwise just 8-byte align the data.
  543. */
  544. if ((size | (size - 1)) + 1 == (size << 1))
  545. off = (off + size - 1) & ~(size - 1);
  546. else
  547. off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
  548. z->z_magic = ZALLOC_SLAB_MAGIC;
  549. z->z_zoneindex = zi;
  550. z->z_nmax = (zone_size - off) / size;
  551. z->z_nfree = z->z_nmax - 1;
  552. z->z_baseptr = (rt_uint8_t *)z + off;
  553. z->z_uindex = 0;
  554. z->z_chunksize = size;
  555. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  556. /* link to zone array */
  557. z->z_next = zone_array[zi];
  558. zone_array[zi] = z;
  559. #ifdef RT_MEM_STATS
  560. used_mem += z->z_chunksize;
  561. if (used_mem > max_mem)
  562. max_mem = used_mem;
  563. #endif /* RT_MEM_STATS */
  564. }
  565. done:
  566. rt_sem_release(&heap_sem);
  567. RT_OBJECT_HOOK_CALL(rt_malloc_hook, ((char *)chunk, size));
  568. __exit:
  569. return chunk;
  570. }
  571. RTM_EXPORT(rt_malloc);
  572. /**
  573. * @brief This function will change the size of previously allocated memory block.
  574. *
  575. * @param ptr is the previously allocated memory block.
  576. *
  577. * @param size is the new size of memory block.
  578. *
  579. * @return the allocated memory.
  580. */
  581. void *rt_realloc(void *ptr, rt_size_t size)
  582. {
  583. void *nptr;
  584. slab_zone *z;
  585. struct memusage *kup;
  586. if (ptr == RT_NULL)
  587. return rt_malloc(size);
  588. if (size == 0)
  589. {
  590. rt_free(ptr);
  591. return RT_NULL;
  592. }
  593. /*
  594. * Get the original allocation's zone. If the new request winds up
  595. * using the same chunk size we do not have to do anything.
  596. */
  597. kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
  598. if (kup->type == PAGE_TYPE_LARGE)
  599. {
  600. rt_size_t osize;
  601. osize = kup->size << RT_MM_PAGE_BITS;
  602. if ((nptr = rt_malloc(size)) == RT_NULL)
  603. return RT_NULL;
  604. rt_memcpy(nptr, ptr, size > osize ? osize : size);
  605. rt_free(ptr);
  606. return nptr;
  607. }
  608. else if (kup->type == PAGE_TYPE_SMALL)
  609. {
  610. z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
  611. kup->size * RT_MM_PAGE_SIZE);
  612. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  613. zoneindex(&size);
  614. if (z->z_chunksize == size)
  615. return (ptr); /* same chunk */
  616. /*
  617. * Allocate memory for the new request size. Note that zoneindex has
  618. * already adjusted the request size to the appropriate chunk size, which
  619. * should optimize our bcopy(). Then copy and return the new pointer.
  620. */
  621. if ((nptr = rt_malloc(size)) == RT_NULL)
  622. return RT_NULL;
  623. rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
  624. rt_free(ptr);
  625. return nptr;
  626. }
  627. return RT_NULL;
  628. }
  629. RTM_EXPORT(rt_realloc);
  630. /**
  631. * @brief This function will contiguously allocate enough space for count objects
  632. * that are size bytes of memory each and returns a pointer to the allocated
  633. * memory.
  634. *
  635. * @note The allocated memory is filled with bytes of value zero.
  636. *
  637. * @param count is the number of objects to allocate.
  638. *
  639. * @param size is the size of the objects to allocate.
  640. *
  641. * @return pointer to allocated memory / NULL pointer if there is an error.
  642. */
  643. void *rt_calloc(rt_size_t count, rt_size_t size)
  644. {
  645. void *p;
  646. /* allocate 'count' objects of size 'size' */
  647. p = rt_malloc(count * size);
  648. /* zero the memory */
  649. if (p)
  650. rt_memset(p, 0, count * size);
  651. return p;
  652. }
  653. RTM_EXPORT(rt_calloc);
  654. /**
  655. * @brief This function will release the previous allocated memory block by rt_malloc.
  656. *
  657. * @note The released memory block is taken back to system heap.
  658. *
  659. * @param ptr is the address of memory which will be released
  660. */
  661. void rt_free(void *ptr)
  662. {
  663. slab_zone *z;
  664. slab_chunk *chunk;
  665. struct memusage *kup;
  666. /* free a RT_NULL pointer */
  667. if (ptr == RT_NULL)
  668. return ;
  669. RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
  670. /* get memory usage */
  671. #if RT_DEBUG_SLAB
  672. {
  673. rt_ubase_t addr = ((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
  674. RT_DEBUG_LOG(RT_DEBUG_SLAB,
  675. ("free a memory 0x%x and align to 0x%x, kup index %d\n",
  676. (rt_ubase_t)ptr,
  677. (rt_ubase_t)addr,
  678. ((rt_ubase_t)(addr) - heap_start) >> RT_MM_PAGE_BITS));
  679. }
  680. #endif /* RT_DEBUG_SLAB */
  681. kup = btokup((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK);
  682. /* release large allocation */
  683. if (kup->type == PAGE_TYPE_LARGE)
  684. {
  685. rt_ubase_t size;
  686. /* lock heap */
  687. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  688. /* clear page counter */
  689. size = kup->size;
  690. kup->size = 0;
  691. #ifdef RT_MEM_STATS
  692. used_mem -= size * RT_MM_PAGE_SIZE;
  693. #endif /* RT_MEM_STATS */
  694. rt_sem_release(&heap_sem);
  695. RT_DEBUG_LOG(RT_DEBUG_SLAB,
  696. ("free large memory block 0x%x, page count %d\n",
  697. (rt_ubase_t)ptr, size));
  698. /* free this page */
  699. rt_page_free(ptr, size);
  700. return;
  701. }
  702. /* lock heap */
  703. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  704. /* zone case. get out zone. */
  705. z = (slab_zone *)(((rt_ubase_t)ptr & ~RT_MM_PAGE_MASK) -
  706. kup->size * RT_MM_PAGE_SIZE);
  707. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  708. chunk = (slab_chunk *)ptr;
  709. chunk->c_next = z->z_freechunk;
  710. z->z_freechunk = chunk;
  711. #ifdef RT_MEM_STATS
  712. used_mem -= z->z_chunksize;
  713. #endif /* RT_MEM_STATS */
  714. /*
  715. * Bump the number of free chunks. If it becomes non-zero the zone
  716. * must be added back onto the appropriate list.
  717. */
  718. if (z->z_nfree++ == 0)
  719. {
  720. z->z_next = zone_array[z->z_zoneindex];
  721. zone_array[z->z_zoneindex] = z;
  722. }
  723. /*
  724. * If the zone becomes totally free, and there are other zones we
  725. * can allocate from, move this zone to the FreeZones list. Since
  726. * this code can be called from an IPI callback, do *NOT* try to mess
  727. * with kernel_map here. Hysteresis will be performed at malloc() time.
  728. */
  729. if (z->z_nfree == z->z_nmax &&
  730. (z->z_next || zone_array[z->z_zoneindex] != z))
  731. {
  732. slab_zone **pz;
  733. RT_DEBUG_LOG(RT_DEBUG_SLAB, ("free zone 0x%x\n",
  734. (rt_ubase_t)z, z->z_zoneindex));
  735. /* remove zone from zone array list */
  736. for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
  737. ;
  738. *pz = z->z_next;
  739. /* reset zone */
  740. z->z_magic = -1;
  741. /* insert to free zone list */
  742. z->z_next = zone_free;
  743. zone_free = z;
  744. ++ zone_free_cnt;
  745. /* release zone to page allocator */
  746. if (zone_free_cnt > ZONE_RELEASE_THRESH)
  747. {
  748. register rt_base_t i;
  749. z = zone_free;
  750. zone_free = z->z_next;
  751. -- zone_free_cnt;
  752. /* set message usage */
  753. for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
  754. {
  755. kup->type = PAGE_TYPE_FREE;
  756. kup->size = 0;
  757. kup ++;
  758. }
  759. /* unlock heap */
  760. rt_sem_release(&heap_sem);
  761. /* release pages */
  762. rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
  763. return;
  764. }
  765. }
  766. /* unlock heap */
  767. rt_sem_release(&heap_sem);
  768. }
  769. RTM_EXPORT(rt_free);
  770. #ifdef RT_MEM_STATS
  771. /**
  772. * @brief This function will caculate the total memory, the used memory, and
  773. * the max used memory.
  774. *
  775. * @param total is a pointer to get the total size of the memory.
  776. *
  777. * @param used is a pointer to get the size of memory used.
  778. *
  779. * @param max_used is a pointer to get the maximum memory used.
  780. */
  781. void rt_memory_info(rt_uint32_t *total,
  782. rt_uint32_t *used,
  783. rt_uint32_t *max_used)
  784. {
  785. if (total != RT_NULL)
  786. *total = heap_end - heap_start;
  787. if (used != RT_NULL)
  788. *used = used_mem;
  789. if (max_used != RT_NULL)
  790. *max_used = max_mem;
  791. }
  792. #ifdef RT_USING_FINSH
  793. #include <finsh.h>
  794. void list_mem(void)
  795. {
  796. rt_kprintf("total memory: %d\n", heap_end - heap_start);
  797. rt_kprintf("used memory : %d\n", used_mem);
  798. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  799. }
  800. FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
  801. #endif /* RT_USING_FINSH */
  802. #endif /* RT_MEM_STATS */
  803. /**@}*/
  804. #endif /* defined (RT_USING_HEAP) && defined (RT_USING_SLAB) */