slab.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * File : slab.c
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2008 - 2009, RT-Thread Development Team
  5. *
  6. * The license and distribution terms for this file may be
  7. * found in the file LICENSE in this distribution or at
  8. * http://www.rt-thread.org/license/LICENSE
  9. *
  10. * Change Logs:
  11. * Date Author Notes
  12. * 2008-07-12 Bernard the first version
  13. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  14. * 2010-10-23 yi.qiu add module memory allocator
  15. * 2010-12-18 yi.qiu fix zone release bug
  16. */
  17. /*
  18. * KERN_SLABALLOC.C - Kernel SLAB memory allocator
  19. *
  20. * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
  21. *
  22. * This code is derived from software contributed to The DragonFly Project
  23. * by Matthew Dillon <dillon@backplane.com>
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. *
  29. * 1. Redistributions of source code must retain the above copyright
  30. * notice, this list of conditions and the following disclaimer.
  31. * 2. Redistributions in binary form must reproduce the above copyright
  32. * notice, this list of conditions and the following disclaimer in
  33. * the documentation and/or other materials provided with the
  34. * distribution.
  35. * 3. Neither the name of The DragonFly Project nor the names of its
  36. * contributors may be used to endorse or promote products derived
  37. * from this software without specific, prior written permission.
  38. *
  39. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  40. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  41. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  42. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  43. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  44. * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
  45. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  46. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  47. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  48. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  49. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  50. * SUCH DAMAGE.
  51. *
  52. */
  53. #include <rthw.h>
  54. #include <rtthread.h>
  55. #include "kservice.h"
  56. /* #define RT_SLAB_DEBUG */
  57. #define RT_MEM_STATS
  58. #if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
  59. /* some statistical variable */
  60. #ifdef RT_MEM_STATS
  61. static rt_size_t used_mem, max_mem;
  62. #endif
  63. #ifdef RT_USING_HOOK
  64. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  65. static void (*rt_free_hook)(void *ptr);
  66. /**
  67. * @addtogroup Hook
  68. */
  69. /*@{*/
  70. /**
  71. * This function will set a hook function, which will be invoked when a memory
  72. * block is allocated from heap memory.
  73. *
  74. * @param hook the hook function
  75. */
  76. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  77. {
  78. rt_malloc_hook = hook;
  79. }
  80. /**
  81. * This function will set a hook function, which will be invoked when a memory
  82. * block is released to heap memory.
  83. *
  84. * @param hook the hook function
  85. */
  86. void rt_free_sethook(void (*hook)(void *ptr))
  87. {
  88. rt_free_hook = hook;
  89. }
  90. /*@}*/
  91. #endif
  92. /*
  93. * slab allocator implementation
  94. *
  95. * A slab allocator reserves a ZONE for each chunk size, then lays the
  96. * chunks out in an array within the zone. Allocation and deallocation
  97. * is nearly instantanious, and fragmentation/overhead losses are limited
  98. * to a fixed worst-case amount.
  99. *
  100. * The downside of this slab implementation is in the chunk size
  101. * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
  102. * In a kernel implementation all this memory will be physical so
  103. * the zone size is adjusted downward on machines with less physical
  104. * memory. The upside is that overhead is bounded... this is the *worst*
  105. * case overhead.
  106. *
  107. * Slab management is done on a per-cpu basis and no locking or mutexes
  108. * are required, only a critical section. When one cpu frees memory
  109. * belonging to another cpu's slab manager an asynchronous IPI message
  110. * will be queued to execute the operation. In addition, both the
  111. * high level slab allocator and the low level zone allocator optimize
  112. * M_ZERO requests, and the slab allocator does not have to pre initialize
  113. * the linked list of chunks.
  114. *
  115. * XXX Balancing is needed between cpus. Balance will be handled through
  116. * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
  117. *
  118. * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
  119. * the new zone should be restricted to M_USE_RESERVE requests only.
  120. *
  121. * Alloc Size Chunking Number of zones
  122. * 0-127 8 16
  123. * 128-255 16 8
  124. * 256-511 32 8
  125. * 512-1023 64 8
  126. * 1024-2047 128 8
  127. * 2048-4095 256 8
  128. * 4096-8191 512 8
  129. * 8192-16383 1024 8
  130. * 16384-32767 2048 8
  131. * (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
  132. *
  133. * Allocations >= zone_limit go directly to kmem.
  134. *
  135. * API REQUIREMENTS AND SIDE EFFECTS
  136. *
  137. * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
  138. * have remained compatible with the following API requirements:
  139. *
  140. * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
  141. * + all power-of-2 sized allocations are power-of-2 aligned (twe)
  142. * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
  143. * + ability to allocate arbitrarily large chunks of memory
  144. */
  145. /*
  146. * Chunk structure for free elements
  147. */
  148. typedef struct slab_chunk
  149. {
  150. struct slab_chunk *c_next;
  151. } slab_chunk;
  152. /*
  153. * The IN-BAND zone header is placed at the beginning of each zone.
  154. */
  155. typedef struct slab_zone {
  156. rt_int32_t z_magic; /* magic number for sanity check */
  157. rt_int32_t z_nfree; /* total free chunks / ualloc space in zone */
  158. rt_int32_t z_nmax; /* maximum free chunks */
  159. struct slab_zone *z_next; /* zoneary[] link if z_nfree non-zero */
  160. rt_uint8_t *z_baseptr; /* pointer to start of chunk array */
  161. rt_int32_t z_uindex; /* current initial allocation index */
  162. rt_int32_t z_chunksize; /* chunk size for validation */
  163. rt_int32_t z_zoneindex; /* zone index */
  164. slab_chunk *z_freechunk; /* free chunk list */
  165. } slab_zone;
  166. #define ZALLOC_SLAB_MAGIC 0x51ab51ab
  167. #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
  168. #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
  169. #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
  170. #define NZONES 72 /* number of zones */
  171. #define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
  172. static slab_zone *zone_array[NZONES]; /* linked list of zones NFree > 0 */
  173. static slab_zone *zone_free; /* whole zones that have become free */
  174. static int zone_free_cnt;
  175. static int zone_size;
  176. static int zone_limit;
  177. static int zone_page_cnt;
  178. /*
  179. * Misc constants. Note that allocations that are exact multiples of
  180. * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
  181. */
  182. #define MIN_CHUNK_SIZE 8 /* in bytes */
  183. #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
  184. /*
  185. * Array of descriptors that describe the contents of each page
  186. */
  187. #define PAGE_TYPE_FREE 0x00
  188. #define PAGE_TYPE_SMALL 0x01
  189. #define PAGE_TYPE_LARGE 0x02
  190. struct memusage {
  191. rt_uint32_t type:2 ; /* page type */
  192. rt_uint32_t size:30; /* pages allocated or offset from zone */
  193. };
  194. static struct memusage *memusage = RT_NULL;
  195. #define btokup(addr) (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
  196. static rt_uint32_t heap_start, heap_end;
  197. /* page allocator */
  198. struct rt_page_head
  199. {
  200. struct rt_page_head *next; /* next valid page */
  201. rt_size_t page; /* number of page */
  202. /* dummy */
  203. char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head*) + sizeof (rt_size_t))];
  204. };
  205. static struct rt_page_head *rt_page_list;
  206. static struct rt_semaphore heap_sem;
  207. void *rt_page_alloc(rt_size_t npages)
  208. {
  209. struct rt_page_head *b, *n;
  210. struct rt_page_head **prev;
  211. if(npages == 0) return RT_NULL;
  212. /* lock heap */
  213. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  214. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  215. {
  216. if (b->page > npages)
  217. {
  218. /* splite pages */
  219. n = b + npages;
  220. n->next = b->next;
  221. n->page = b->page - npages;
  222. *prev = n;
  223. break;
  224. }
  225. if (b->page == npages)
  226. {
  227. /* this node fit, remove this node */
  228. *prev = b->next;
  229. break;
  230. }
  231. }
  232. /* unlock heap */
  233. rt_sem_release(&heap_sem);
  234. return b;
  235. }
  236. void rt_page_free(void *addr, rt_size_t npages)
  237. {
  238. struct rt_page_head *b, *n;
  239. struct rt_page_head **prev;
  240. RT_ASSERT(addr != RT_NULL);
  241. RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
  242. RT_ASSERT(npages != 0);
  243. n = (struct rt_page_head *)addr;
  244. /* lock heap */
  245. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  246. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  247. {
  248. RT_ASSERT(b->page > 0);
  249. RT_ASSERT(b > n || b + b->page <= n);
  250. if (b + b->page == n)
  251. {
  252. if (b + (b->page += npages) == b->next)
  253. {
  254. b->page += b->next->page;
  255. b->next = b->next->next;
  256. }
  257. goto _return;
  258. }
  259. if (b == n + npages)
  260. {
  261. n->page = b->page + npages;
  262. n->next = b->next;
  263. *prev = n;
  264. goto _return;
  265. }
  266. if (b > n + npages) break;
  267. }
  268. n->page = npages;
  269. n->next = b;
  270. *prev = n;
  271. _return:
  272. /* unlock heap */
  273. rt_sem_release(&heap_sem);
  274. }
  275. /*
  276. * Initialize the page allocator
  277. */
  278. static void rt_page_init(void* addr, rt_size_t npages)
  279. {
  280. RT_ASSERT(addr != RT_NULL);
  281. RT_ASSERT(npages != 0);
  282. rt_page_list = RT_NULL;
  283. rt_page_free(addr, npages);
  284. }
  285. /**
  286. * @ingroup SystemInit
  287. *
  288. * This function will init system heap
  289. *
  290. * @param begin_addr the beginning address of system page
  291. * @param end_addr the end address of system page
  292. *
  293. */
  294. void rt_system_heap_init(void *begin_addr, void* end_addr)
  295. {
  296. rt_uint32_t limsize, npages;
  297. /* align begin and end addr to page */
  298. heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
  299. heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
  300. if(heap_start >= heap_end) {
  301. rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n",
  302. (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
  303. return;
  304. }
  305. limsize = heap_end - heap_start;
  306. npages = limsize / RT_MM_PAGE_SIZE;
  307. /* initialize heap semaphore */
  308. rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
  309. #ifdef RT_SLAB_DEBUG
  310. rt_kprintf("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n", heap_start, heap_end,
  311. limsize, npages);
  312. #endif
  313. /* init pages */
  314. rt_page_init((void*)heap_start, npages);
  315. /* calculate zone size */
  316. zone_size = ZALLOC_MIN_ZONE_SIZE;
  317. while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize/1024))
  318. zone_size <<= 1;
  319. zone_limit = zone_size / 4;
  320. if (zone_limit > ZALLOC_ZONE_LIMIT) zone_limit = ZALLOC_ZONE_LIMIT;
  321. zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
  322. #ifdef RT_SLAB_DEBUG
  323. rt_kprintf("zone size 0x%x, zone page count 0x%x\n", zone_size, zone_page_cnt);
  324. #endif
  325. /* allocate memusage array */
  326. limsize = npages * sizeof(struct memusage);
  327. limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
  328. memusage = rt_page_alloc(limsize/RT_MM_PAGE_SIZE);
  329. #ifdef RT_SLAB_DEBUG
  330. rt_kprintf("memusage 0x%x, size 0x%x\n", (rt_uint32_t)memusage, limsize);
  331. #endif
  332. }
  333. /*
  334. * Calculate the zone index for the allocation request size and set the
  335. * allocation request size to that particular zone's chunk size.
  336. */
  337. rt_inline int zoneindex(rt_uint32_t *bytes)
  338. {
  339. rt_uint32_t n = (rt_uint32_t)*bytes; /* unsigned for shift opt */
  340. if (n < 128)
  341. {
  342. *bytes = n = (n + 7) & ~7;
  343. return(n / 8 - 1); /* 8 byte chunks, 16 zones */
  344. }
  345. if (n < 256)
  346. {
  347. *bytes = n = (n + 15) & ~15;
  348. return(n / 16 + 7);
  349. }
  350. if (n < 8192)
  351. {
  352. if (n < 512)
  353. {
  354. *bytes = n = (n + 31) & ~31;
  355. return(n / 32 + 15);
  356. }
  357. if (n < 1024)
  358. {
  359. *bytes = n = (n + 63) & ~63;
  360. return(n / 64 + 23);
  361. }
  362. if (n < 2048)
  363. {
  364. *bytes = n = (n + 127) & ~127;
  365. return(n / 128 + 31);
  366. }
  367. if (n < 4096)
  368. {
  369. *bytes = n = (n + 255) & ~255;
  370. return(n / 256 + 39);
  371. }
  372. *bytes = n = (n + 511) & ~511;
  373. return(n / 512 + 47);
  374. }
  375. if (n < 16384)
  376. {
  377. *bytes = n = (n + 1023) & ~1023;
  378. return(n / 1024 + 55);
  379. }
  380. rt_kprintf("Unexpected byte count %d", n);
  381. return 0;
  382. }
  383. /**
  384. * @addtogroup MM
  385. */
  386. /*@{*/
  387. /*
  388. * This function will allocate the numbers page with specified size
  389. * in page memory.
  390. *
  391. * @param size the size of memory to be allocated.
  392. * @note this function is used for RT-Thread Application Module
  393. */
  394. void *rt_malloc_page(rt_size_t npages)
  395. {
  396. void* chunk;
  397. chunk = rt_page_alloc(npages);
  398. if (chunk == RT_NULL) return RT_NULL;
  399. /* update memory usage */
  400. #ifdef RT_MEM_STATS
  401. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  402. used_mem += npages * RT_MM_PAGE_SIZE;
  403. if (used_mem > max_mem) max_mem = used_mem;
  404. rt_sem_release(&heap_sem);
  405. #endif
  406. return chunk;
  407. }
  408. /*
  409. * This function will release the previously allocated memory page
  410. * by rt_malloc_page.
  411. *
  412. * @param page_ptr the page address to be released.
  413. * @param npages the number of page shall be released.
  414. *
  415. * @note this function is used for RT-Thread Application Module
  416. */
  417. void rt_free_page(void *page_ptr, rt_size_t npages)
  418. {
  419. rt_page_free(page_ptr, npages);
  420. /* update memory usage */
  421. #ifdef RT_MEM_STATS
  422. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  423. used_mem -= npages * RT_MM_PAGE_SIZE;
  424. rt_sem_release(&heap_sem);
  425. #endif
  426. }
  427. /**
  428. * This function will allocate a block from system heap memory.
  429. * - If the nbytes is less than zero,
  430. * or
  431. * - If there is no nbytes sized memory valid in system,
  432. * the RT_NULL is returned.
  433. *
  434. * @param size the size of memory to be allocated
  435. *
  436. * @return the allocated memory
  437. *
  438. */
  439. void *rt_malloc(rt_size_t size)
  440. {
  441. slab_zone *z;
  442. rt_int32_t zi;
  443. slab_chunk *chunk;
  444. struct memusage *kup;
  445. /* zero size, return RT_NULL */
  446. if (size == 0) return RT_NULL;
  447. #ifdef RT_USING_MODULE
  448. if(rt_module_self() != RT_NULL) return rt_module_malloc(size);
  449. #endif
  450. /*
  451. * Handle large allocations directly. There should not be very many of
  452. * these so performance is not a big issue.
  453. */
  454. if (size >= zone_limit)
  455. {
  456. size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
  457. chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
  458. if (chunk == RT_NULL) return RT_NULL;
  459. /* set kup */
  460. kup = btokup(chunk);
  461. kup->type = PAGE_TYPE_LARGE;
  462. kup->size = size >> RT_MM_PAGE_BITS;
  463. #ifdef RT_SLAB_DEBUG
  464. rt_kprintf("malloc a large memory 0x%x, page cnt %d, kup %d\n",
  465. size,
  466. size >> RT_MM_PAGE_BITS,
  467. ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS);
  468. #endif
  469. /* lock heap */
  470. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  471. #ifdef RT_MEM_STATS
  472. used_mem += size;
  473. if (used_mem > max_mem) max_mem = used_mem;
  474. #endif
  475. goto done;
  476. }
  477. /* lock heap */
  478. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  479. /*
  480. * Attempt to allocate out of an existing zone. First try the free list,
  481. * then allocate out of unallocated space. If we find a good zone move
  482. * it to the head of the list so later allocations find it quickly
  483. * (we might have thousands of zones in the list).
  484. *
  485. * Note: zoneindex() will panic of size is too large.
  486. */
  487. zi = zoneindex(&size);
  488. RT_ASSERT(zi < NZONES);
  489. #ifdef RT_SLAB_DEBUG
  490. rt_kprintf("try to malloc 0x%x on zone: %d\n", size, zi);
  491. #endif
  492. if ((z = zone_array[zi]) != RT_NULL)
  493. {
  494. RT_ASSERT(z->z_nfree > 0);
  495. /* Remove us from the zone_array[] when we become empty */
  496. if (--z->z_nfree == 0)
  497. {
  498. zone_array[zi] = z->z_next;
  499. z->z_next = RT_NULL;
  500. }
  501. /*
  502. * No chunks are available but nfree said we had some memory, so
  503. * it must be available in the never-before-used-memory area
  504. * governed by uindex. The consequences are very serious if our zone
  505. * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
  506. */
  507. if (z->z_uindex + 1 != z->z_nmax)
  508. {
  509. z->z_uindex = z->z_uindex + 1;
  510. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  511. }
  512. else
  513. {
  514. /* find on free chunk list */
  515. chunk = z->z_freechunk;
  516. /* remove this chunk from list */
  517. z->z_freechunk = z->z_freechunk->c_next;
  518. }
  519. #ifdef RT_MEM_STATS
  520. used_mem += z->z_chunksize;
  521. if (used_mem > max_mem) max_mem = used_mem;
  522. #endif
  523. goto done;
  524. }
  525. /*
  526. * If all zones are exhausted we need to allocate a new zone for this
  527. * index.
  528. *
  529. * At least one subsystem, the tty code (see CROUND) expects power-of-2
  530. * allocations to be power-of-2 aligned. We maintain compatibility by
  531. * adjusting the base offset below.
  532. */
  533. {
  534. rt_int32_t off;
  535. if ((z = zone_free) != RT_NULL)
  536. {
  537. /* remove zone from free zone list */
  538. zone_free = z->z_next;
  539. --zone_free_cnt;
  540. }
  541. else
  542. {
  543. /* unlock heap, since page allocator will think about lock */
  544. rt_sem_release(&heap_sem);
  545. /* allocate a zone from page */
  546. z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
  547. if (z == RT_NULL) goto fail;
  548. /* lock heap */
  549. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  550. #ifdef RT_SLAB_DEBUG
  551. rt_kprintf("alloc a new zone: 0x%x\n", (rt_uint32_t)z);
  552. #endif
  553. /* set message usage */
  554. for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
  555. {
  556. kup->type = PAGE_TYPE_SMALL;
  557. kup->size = off;
  558. kup ++;
  559. }
  560. }
  561. /* clear to zero */
  562. rt_memset(z, 0, sizeof(slab_zone));
  563. /* offset of slab zone struct in zone */
  564. off = sizeof(slab_zone);
  565. /*
  566. * Guarentee power-of-2 alignment for power-of-2-sized chunks.
  567. * Otherwise just 8-byte align the data.
  568. */
  569. if ((size | (size - 1)) + 1 == (size << 1))
  570. off = (off + size - 1) & ~(size - 1);
  571. else
  572. off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
  573. z->z_magic = ZALLOC_SLAB_MAGIC;
  574. z->z_zoneindex = zi;
  575. z->z_nmax = (zone_size - off) / size;
  576. z->z_nfree = z->z_nmax - 1;
  577. z->z_baseptr = (rt_uint8_t*)z + off;
  578. z->z_uindex = 0;
  579. z->z_chunksize = size;
  580. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  581. /* link to zone array */
  582. z->z_next = zone_array[zi];
  583. zone_array[zi] = z;
  584. #ifdef RT_MEM_STATS
  585. used_mem += z->z_chunksize;
  586. if (used_mem > max_mem) max_mem = used_mem;
  587. #endif
  588. }
  589. done:
  590. rt_sem_release(&heap_sem);
  591. #ifdef RT_USING_HOOK
  592. if (rt_malloc_hook != RT_NULL) rt_malloc_hook((char*)chunk, size);
  593. #endif
  594. return chunk;
  595. fail:
  596. rt_sem_release(&heap_sem);
  597. return RT_NULL;
  598. }
  599. /**
  600. * This function will change the size of previously allocated memory block.
  601. *
  602. * @param ptr the previously allocated memory block
  603. * @param size the new size of memory block
  604. *
  605. * @return the allocated memory
  606. */
  607. void *rt_realloc(void *ptr, rt_size_t size)
  608. {
  609. void *nptr;
  610. slab_zone *z;
  611. struct memusage *kup;
  612. if (ptr == RT_NULL) return rt_malloc(size);
  613. if (size == 0)
  614. {
  615. rt_free(ptr);
  616. return RT_NULL;
  617. }
  618. #ifdef RT_USING_MODULE
  619. if(rt_module_self() != RT_NULL) return rt_module_realloc(ptr, size);
  620. #endif
  621. /*
  622. * Get the original allocation's zone. If the new request winds up
  623. * using the same chunk size we do not have to do anything.
  624. */
  625. kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  626. if (kup->type == PAGE_TYPE_LARGE)
  627. {
  628. rt_size_t osize;
  629. osize = kup->size << RT_MM_PAGE_BITS;
  630. if ((nptr = rt_malloc(size)) == RT_NULL) return RT_NULL;
  631. rt_memcpy(nptr, ptr, size > osize? osize : size);
  632. rt_free(ptr);
  633. return nptr;
  634. }
  635. else if (kup->type == PAGE_TYPE_SMALL)
  636. {
  637. z = (slab_zone*)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
  638. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  639. zoneindex(&size);
  640. if (z->z_chunksize == size) return(ptr); /* same chunk */
  641. /*
  642. * Allocate memory for the new request size. Note that zoneindex has
  643. * already adjusted the request size to the appropriate chunk size, which
  644. * should optimize our bcopy(). Then copy and return the new pointer.
  645. */
  646. if ((nptr = rt_malloc(size)) == RT_NULL) return RT_NULL;
  647. rt_memcpy(nptr, ptr, size > z->z_chunksize? z->z_chunksize : size);
  648. rt_free(ptr);
  649. return nptr;
  650. }
  651. return RT_NULL;
  652. }
  653. /**
  654. * This function will contiguously allocate enough space for count objects
  655. * that are size bytes of memory each and returns a pointer to the allocated
  656. * memory.
  657. *
  658. * The allocated memory is filled with bytes of value zero.
  659. *
  660. * @param count number of objects to allocate
  661. * @param size size of the objects to allocate
  662. *
  663. * @return pointer to allocated memory / NULL pointer if there is an error
  664. */
  665. void *rt_calloc(rt_size_t count, rt_size_t size)
  666. {
  667. void *p;
  668. /* allocate 'count' objects of size 'size' */
  669. p = rt_malloc(count * size);
  670. /* zero the memory */
  671. if (p) rt_memset(p, 0, count * size);
  672. return p;
  673. }
  674. /**
  675. * This function will release the previously allocated memory block by rt_malloc.
  676. * The released memory block is taken back to system heap.
  677. *
  678. * @param ptr the address of memory which will be released
  679. */
  680. void rt_free(void *ptr)
  681. {
  682. slab_zone *z;
  683. slab_chunk *chunk;
  684. struct memusage *kup;
  685. /* free a RT_NULL pointer */
  686. if (ptr == RT_NULL) return ;
  687. #ifdef RT_USING_HOOK
  688. if (rt_free_hook != RT_NULL) rt_free_hook(ptr);
  689. #endif
  690. #ifdef RT_USING_MODULE
  691. if(rt_module_self() != RT_NULL)
  692. {
  693. rt_module_free(rt_module_self(), ptr);
  694. return;
  695. }
  696. #endif
  697. /* get memory usage */
  698. #ifdef RT_SLAB_DEBUG
  699. {
  700. rt_uint32_t addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  701. rt_kprintf("free a memory 0x%x and align to 0x%x, kup index %d\n",
  702. (rt_uint32_t)ptr,
  703. (rt_uint32_t)addr,
  704. ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS);
  705. }
  706. #endif
  707. kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  708. /* release large allocation */
  709. if (kup->type == PAGE_TYPE_LARGE)
  710. {
  711. rt_uint32_t size;
  712. /* lock heap */
  713. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  714. /* clear page counter */
  715. size = kup->size;
  716. kup->size = 0;
  717. #ifdef RT_MEM_STATS
  718. used_mem -= size * RT_MM_PAGE_SIZE;
  719. #endif
  720. rt_sem_release(&heap_sem);
  721. #ifdef RT_SLAB_DEBUG
  722. rt_kprintf("free large memory block 0x%x, page count %d\n", (rt_uint32_t)ptr, size);
  723. #endif
  724. /* free this page */
  725. rt_page_free(ptr, size);
  726. return;
  727. }
  728. /* lock heap */
  729. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  730. /* zone case. get out zone. */
  731. z = (slab_zone*)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
  732. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  733. chunk = (slab_chunk*)ptr;
  734. chunk->c_next = z->z_freechunk;
  735. z->z_freechunk = chunk;
  736. #ifdef RT_MEM_STATS
  737. used_mem -= z->z_chunksize;
  738. #endif
  739. /*
  740. * Bump the number of free chunks. If it becomes non-zero the zone
  741. * must be added back onto the appropriate list.
  742. */
  743. if (z->z_nfree++ == 0)
  744. {
  745. z->z_next = zone_array[z->z_zoneindex];
  746. zone_array[z->z_zoneindex] = z;
  747. }
  748. /*
  749. * If the zone becomes totally free, and there are other zones we
  750. * can allocate from, move this zone to the FreeZones list. Since
  751. * this code can be called from an IPI callback, do *NOT* try to mess
  752. * with kernel_map here. Hysteresis will be performed at malloc() time.
  753. */
  754. if (z->z_nfree == z->z_nmax &&
  755. (z->z_next || zone_array[z->z_zoneindex] != z))
  756. {
  757. slab_zone **pz;
  758. #ifdef RT_SLAB_DEBUG
  759. rt_kprintf("free zone 0x%x\n", (rt_uint32_t)z, z->z_zoneindex);
  760. #endif
  761. /* remove zone from zone array list */
  762. for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) ;
  763. *pz = z->z_next;
  764. /* reset zone */
  765. z->z_magic = -1;
  766. /* insert to free zone list */
  767. z->z_next = zone_free;
  768. zone_free = z;
  769. ++zone_free_cnt;
  770. /* release zone to page allocator */
  771. if (zone_free_cnt > ZONE_RELEASE_THRESH)
  772. {
  773. register rt_base_t i;
  774. z = zone_free;
  775. zone_free = z->z_next;
  776. --zone_free_cnt;
  777. /* set message usage */
  778. for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
  779. {
  780. kup->type = PAGE_TYPE_FREE;
  781. kup->size = 0;
  782. kup ++;
  783. }
  784. /* unlock heap */
  785. rt_sem_release(&heap_sem);
  786. /* release pages */
  787. rt_page_free(z, zone_size / RT_MM_PAGE_SIZE);
  788. return;
  789. }
  790. }
  791. /* unlock heap */
  792. rt_sem_release(&heap_sem);
  793. }
  794. #ifdef RT_MEM_STATS
  795. void rt_memory_info(rt_uint32_t *total,
  796. rt_uint32_t *used,
  797. rt_uint32_t *max_used)
  798. {
  799. if (total != RT_NULL) *total = heap_end - heap_start;
  800. if (used != RT_NULL) *used = used_mem;
  801. if (max_used != RT_NULL) *max_used = max_mem;
  802. }
  803. #ifdef RT_USING_FINSH
  804. #include <finsh.h>
  805. void list_mem()
  806. {
  807. rt_kprintf("total memory: %d\n", heap_end - heap_start);
  808. rt_kprintf("used memory : %d\n", used_mem);
  809. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  810. }
  811. FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
  812. #endif
  813. #endif
  814. /*@}*/
  815. #endif