Lines Matching +full:out +full:- +full:of +full:- +full:band
2 * Copyright (c) 2006-2018, RT-Thread Development Team
4 * SPDX-License-Identifier: Apache-2.0
12 * 2008-07-12 Bernard the first version
13 * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
14 * 2010-10-23 yi.qiu add module memory allocator
15 * 2010-12-18 yi.qiu fix zone release bug
19 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
33 * notice, this list of conditions and the following disclaimer in
36 * 3. Neither the name of The DragonFly Project nor the names of its
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
108 * chunks out in an array within the zone. Allocation and deallocation
110 * to a fixed worst-case amount.
112 * The downside of this slab implementation is in the chunk size
113 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
119 * Slab management is done on a per-cpu basis and no locking or mutexes
125 * the linked list of chunks.
128 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
130 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
133 * Alloc Size Chunking Number of zones
134 * 0-127 8 16
135 * 128-255 16 8
136 * 256-511 32 8
137 * 512-1023 64 8
138 * 1024-2047 128 8
139 * 2048-4095 256 8
140 * 4096-8191 512 8
141 * 8192-16383 1024 8
142 * 16384-32767 2048 8
149 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
152 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
153 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
154 * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
155 * + ability to allocate arbitrarily large chunks of memory
167 * The IN-BAND zone header is placed at the beginning of each zone.
175 struct slab_zone *z_next; /* zoneary[] link if z_nfree non-zero */
176 rt_uint8_t *z_baseptr; /* pointer to start of chunk array */
186 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
189 #define NZONES 72 /* number of zones */
190 #define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
192 static slab_zone *zone_array[NZONES]; /* linked list of zones NFree > 0 */
201 * Misc constants. Note that allocations that are exact multiples of
205 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
208 * Array of descriptors that describe the contents of each page
220 (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
228 rt_size_t page; /* number of page */
231 char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head *) + sizeof(rt_size_t))];
246 for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next)) in rt_page_alloc()
248 if (b->page > npages) in rt_page_alloc()
252 n->next = b->next; in rt_page_alloc()
253 n->page = b->page - npages; in rt_page_alloc()
258 if (b->page == npages) in rt_page_alloc()
261 *prev = b->next; in rt_page_alloc()
286 for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next)) in rt_page_free()
288 RT_ASSERT(b->page > 0); in rt_page_free()
289 RT_ASSERT(b > n || b + b->page <= n); in rt_page_free()
291 if (b + b->page == n) in rt_page_free()
293 if (b + (b->page += npages) == b->next) in rt_page_free()
295 b->page += b->next->page; in rt_page_free()
296 b->next = b->next->next; in rt_page_free()
304 n->page = b->page + npages; in rt_page_free()
305 n->next = b->next; in rt_page_free()
315 n->page = npages; in rt_page_free()
316 n->next = b; in rt_page_free()
341 * @param begin_addr the beginning address of system page
342 * @param end_addr the end address of system page
356 rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n", in rt_system_heap_init()
362 limsize = heap_end - heap_start; in rt_system_heap_init()
368 RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n", in rt_system_heap_init()
411 return (n / 8 - 1); in zoneindex()
469 * - If the nbytes is less than zero,
471 * - If there is no nbytes sized memory valid in system,
474 * @param size the size of memory to be allocated
490 * Handle large allocations directly. There should not be very many of in rt_malloc()
503 kup->type = PAGE_TYPE_LARGE; in rt_malloc()
504 kup->size = size >> RT_MM_PAGE_BITS; in rt_malloc()
510 ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS)); in rt_malloc()
527 * Attempt to allocate out of an existing zone. First try the free list, in rt_malloc()
528 * then allocate out of unallocated space. If we find a good zone move in rt_malloc()
529 * it to the head of the list so later allocations find it quickly in rt_malloc()
530 * (we might have thousands of zones in the list). in rt_malloc()
532 * Note: zoneindex() will panic of size is too large. in rt_malloc()
541 RT_ASSERT(z->z_nfree > 0); in rt_malloc()
544 if (--z->z_nfree == 0) in rt_malloc()
546 zone_array[zi] = z->z_next; in rt_malloc()
547 z->z_next = RT_NULL; in rt_malloc()
552 * it must be available in the never-before-used-memory area in rt_malloc()
556 if (z->z_uindex + 1 != z->z_nmax) in rt_malloc()
558 z->z_uindex = z->z_uindex + 1; in rt_malloc()
559 chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size); in rt_malloc()
564 chunk = z->z_freechunk; in rt_malloc()
567 z->z_freechunk = z->z_freechunk->c_next; in rt_malloc()
571 used_mem += z->z_chunksize; in rt_malloc()
583 * At least one subsystem, the tty code (see CROUND) expects power-of-2 in rt_malloc()
584 * allocations to be power-of-2 aligned. We maintain compatibility by in rt_malloc()
593 zone_free = z->z_next; in rt_malloc()
594 -- zone_free_cnt; in rt_malloc()
618 kup->type = PAGE_TYPE_SMALL; in rt_malloc()
619 kup->size = off; in rt_malloc()
628 /* offset of slab zone struct in zone */ in rt_malloc()
632 * Guarentee power-of-2 alignment for power-of-2-sized chunks. in rt_malloc()
633 * Otherwise just 8-byte align the data. in rt_malloc()
635 if ((size | (size - 1)) + 1 == (size << 1)) in rt_malloc()
636 off = (off + size - 1) & ~(size - 1); in rt_malloc()
640 z->z_magic = ZALLOC_SLAB_MAGIC; in rt_malloc()
641 z->z_zoneindex = zi; in rt_malloc()
642 z->z_nmax = (zone_size - off) / size; in rt_malloc()
643 z->z_nfree = z->z_nmax - 1; in rt_malloc()
644 z->z_baseptr = (rt_uint8_t *)z + off; in rt_malloc()
645 z->z_uindex = 0; in rt_malloc()
646 z->z_chunksize = size; in rt_malloc()
648 chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size); in rt_malloc()
651 z->z_next = zone_array[zi]; in rt_malloc()
655 used_mem += z->z_chunksize; in rt_malloc()
671 * This function will change the size of previously allocated memory block.
674 * @param size the new size of memory block
698 if (kup->type == PAGE_TYPE_LARGE) in rt_realloc()
702 osize = kup->size << RT_MM_PAGE_BITS; in rt_realloc()
710 else if (kup->type == PAGE_TYPE_SMALL) in rt_realloc()
712 z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - in rt_realloc()
713 kup->size * RT_MM_PAGE_SIZE); in rt_realloc()
714 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); in rt_realloc()
717 if (z->z_chunksize == size) in rt_realloc()
728 rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size); in rt_realloc()
740 * that are size bytes of memory each and returns a pointer to the allocated
743 * The allocated memory is filled with bytes of value zero.
745 * @param count number of objects to allocate
746 * @param size size of the objects to allocate
754 /* allocate 'count' objects of size 'size' */ in rt_calloc()
769 * @param ptr the address of memory which will be released
791 ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS)); in rt_free()
797 if (kup->type == PAGE_TYPE_LARGE) in rt_free()
804 size = kup->size; in rt_free()
805 kup->size = 0; in rt_free()
808 used_mem -= size * RT_MM_PAGE_SIZE; in rt_free()
825 /* zone case. get out zone. */ in rt_free()
826 z = (slab_zone *)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - in rt_free()
827 kup->size * RT_MM_PAGE_SIZE); in rt_free()
828 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC); in rt_free()
831 chunk->c_next = z->z_freechunk; in rt_free()
832 z->z_freechunk = chunk; in rt_free()
835 used_mem -= z->z_chunksize; in rt_free()
839 * Bump the number of free chunks. If it becomes non-zero the zone in rt_free()
842 if (z->z_nfree++ == 0) in rt_free()
844 z->z_next = zone_array[z->z_zoneindex]; in rt_free()
845 zone_array[z->z_zoneindex] = z; in rt_free()
854 if (z->z_nfree == z->z_nmax && in rt_free()
855 (z->z_next || zone_array[z->z_zoneindex] != z)) in rt_free()
860 (rt_uint32_t)z, z->z_zoneindex)); in rt_free()
863 for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) in rt_free()
865 *pz = z->z_next; in rt_free()
868 z->z_magic = -1; in rt_free()
871 z->z_next = zone_free; in rt_free()
882 zone_free = z->z_next; in rt_free()
883 -- zone_free_cnt; in rt_free()
888 kup->type = PAGE_TYPE_FREE; in rt_free()
889 kup->size = 0; in rt_free()
913 *total = heap_end - heap_start; in rt_memory_info()
927 rt_kprintf("total memory: %d\n", heap_end - heap_start); in list_mem()