Index: subr_kmem.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_kmem.c,v retrieving revision 1.19 diff -u -p -r1.19 subr_kmem.c --- subr_kmem.c 9 Feb 2008 12:56:20 -0000 1.19 +++ subr_kmem.c 11 Jan 2009 13:25:22 -0000 @@ -1,4 +1,33 @@ -/* $NetBSD: subr_kmem.c,v 1.19 2008/02/09 12:56:20 yamt Exp $ */ +/* $NetBSD: subr_kmem.c,v 1.21 2008/12/15 11:33:13 ad Exp $ */ + +/*- + * Copyright (c) 2008 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software developed for The NetBSD Foundation + * by Andrew Doran. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ /*- * Copyright (c)2006 YAMAMOTO Takashi, @@ -42,6 +71,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c, #include #include #include +#include #include #include @@ -49,14 +79,29 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c, #include #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1) +#define KMEM_QCACHE_MAX (KMEM_QUANTUM_SIZE * 32) +#define KMEM_CACHE_COUNT 16 + +typedef struct kmem_cache { + pool_cache_t kc_cache; + struct pool_allocator kc_pa; + char kc_name[12]; +} kmem_cache_t; static vmem_t *kmem_arena; static struct callback_entry kmem_kva_reclaim_entry; +static kmem_cache_t kmem_cache[KMEM_CACHE_COUNT + 1]; +static size_t kmem_cache_max; +static size_t kmem_cache_min; +static size_t kmem_cache_mask; +static int kmem_cache_shift; + #if defined(DEBUG) static void *kmem_freecheck; #define KMEM_POISON #define KMEM_REDZONE +#define KMEM_SIZE #endif /* defined(DEBUG) */ #if defined(KMEM_POISON) @@ -73,6 +118,16 @@ static void kmem_poison_check(void *, si #define REDZONE_SIZE 0 #endif /* defined(KMEM_REDZONE) */ +#if defined(KMEM_SIZE) +#define SIZE_SIZE (min(KMEM_QUANTUM_SIZE, sizeof(size_t))) +static void kmem_size_set(void *, size_t); +static void kmem_size_check(void *, size_t); +#else +#define SIZE_SIZE 0 +#define kmem_size_set(p, sz) /* nothing */ +#define kmem_size_check(p, sz) /* nothing */ +#endif + static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t); static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t); @@ -97,6 +152,25 @@ kmf_to_vmf(km_flag_t kmflags) return vmflags; } +static void * +kmem_poolpage_alloc(struct pool *pool, int prflags) +{ + + KASSERT(KM_SLEEP == PR_WAITOK); + KASSERT(KM_NOSLEEP == PR_NOWAIT); + + return (void *)vmem_alloc(kmem_arena, pool->pr_alloc->pa_pagesz, + kmf_to_vmf(prflags) | VM_INSTANTFIT); + +} + +static void +kmem_poolpage_free(struct pool *pool, void *addr) +{ + + vmem_free(kmem_arena, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); +} + /* ---- kmem API */ /* @@ -108,14 +182,27 @@ kmf_to_vmf(km_flag_t kmflags) void * kmem_alloc(size_t size, km_flag_t kmflags) { - void *p; + kmem_cache_t *kc; + uint8_t *p; - size += REDZONE_SIZE; + KASSERT(!cpu_intr_p()); + KASSERT((curlwp->l_pflag & LP_INTR) == 0); + + size += REDZONE_SIZE + SIZE_SIZE; + if (size >= kmem_cache_min && size <= kmem_cache_max) { + kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift]; + KASSERT(size <= kc->kc_pa.pa_pagesz); + KASSERT(KM_SLEEP == PR_WAITOK); + KASSERT(KM_NOSLEEP == PR_NOWAIT); + kmflags &= (KM_SLEEP | KM_NOSLEEP); + return pool_cache_get(kc->kc_cache, kmflags); + } p = (void *)vmem_alloc(kmem_arena, size, kmf_to_vmf(kmflags) | VM_INSTANTFIT); - if (p != NULL) { + if (__predict_true(p != NULL)) { kmem_poison_check(p, kmem_roundup_size(size)); FREECHECK_OUT(&kmem_freecheck, p); + kmem_size_set(p, size); } return p; } @@ -147,24 +234,70 @@ kmem_zalloc(size_t size, km_flag_t kmfla void kmem_free(void *p, size_t size) { + kmem_cache_t *kc; + + KASSERT(!cpu_intr_p()); + KASSERT((curlwp->l_pflag & LP_INTR) == 0); + + size += SIZE_SIZE; + p = (uint8_t *)p - SIZE_SIZE; + kmem_size_check(p, size + REDZONE_SIZE); FREECHECK_IN(&kmem_freecheck, p); LOCKDEBUG_MEM_CHECK(p, size); kmem_poison_check((char *)p + size, kmem_roundup_size(size + REDZONE_SIZE) - size); kmem_poison_fill(p, size); - vmem_free(kmem_arena, (vmem_addr_t)p, size + REDZONE_SIZE); + if (size >= kmem_cache_min && size <= kmem_cache_max) { + kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift]; + KASSERT(size <= kc->kc_pa.pa_pagesz); + pool_cache_put(kc->kc_cache, p); + } else { + vmem_free(kmem_arena, (vmem_addr_t)p, size + REDZONE_SIZE); + } } + void kmem_init(void) { + kmem_cache_t *kc; + size_t sz; + int i; kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE, - kmem_backend_alloc, kmem_backend_free, NULL, - KMEM_QUANTUM_SIZE * 32, VM_SLEEP, IPL_NONE); + kmem_backend_alloc, kmem_backend_free, NULL, KMEM_QCACHE_MAX, + VM_SLEEP, IPL_NONE); callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback); + + /* + * kmem caches start at twice the size of the largest vmem qcache + * and end at PAGE_SIZE or earlier. assert that KMEM_QCACHE_MAX + * is a power of two. + */ + KASSERT(ffs(KMEM_QCACHE_MAX) != 0); + KASSERT(KMEM_QCACHE_MAX - (1 << (ffs(KMEM_QCACHE_MAX) - 1)) == 0); + kmem_cache_shift = ffs(KMEM_QCACHE_MAX); + kmem_cache_min = 1 << kmem_cache_shift; + kmem_cache_mask = kmem_cache_min - 1; + for (i = 1; i <= KMEM_CACHE_COUNT; i++) { + sz = i << kmem_cache_shift; + if (sz > PAGE_SIZE) { + break; + } + kmem_cache_max = sz; + kc = &kmem_cache[i]; + kc->kc_pa.pa_pagesz = sz; + kc->kc_pa.pa_alloc = kmem_poolpage_alloc; + kc->kc_pa.pa_free = kmem_poolpage_free; + sprintf(kc->kc_name, "kmem-%zd", sz); + kc->kc_cache = pool_cache_init(sz, + KMEM_QUANTUM_SIZE, 0, PR_NOALIGN | PR_NOTOUCH, + kc->kc_name, &kc->kc_pa, IPL_NONE, + NULL, NULL, NULL); + KASSERT(kc->kc_cache != NULL); + } } size_t @@ -276,3 +409,24 @@ kmem_poison_check(void *p, size_t sz) } #endif /* defined(KMEM_POISON) */ + +#if defined(KMEM_SIZE) +static void +kmem_size_set(void *p, size_t sz) +{ + + memcpy(p, &sz, sizeof(sz)); +} + +static void +kmem_size_check(void *p, size_t sz) +{ + size_t psz; + + memcpy(&psz, p, sizeof(psz)); + if (psz != sz) { + panic("kmem_free(%p, %zu) != allocated size %zu", + (uint8_t*)p + SIZE_SIZE, sz - SIZE_SIZE, psz); + } +} +#endif /* defined(KMEM_SIZE) */