Home | History | Annotate | Line # | Download | only in linux
slab.h revision 1.9
      1 /*	$NetBSD: slab.h,v 1.9 2021/12/19 12:12:23 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SLAB_H_
     33 #define _LINUX_SLAB_H_
     34 
     35 #include <sys/kmem.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <uvm/uvm_extern.h>	/* For PAGE_SIZE.  */
     40 
     41 #include <linux/gfp.h>
     42 #include <linux/rcupdate.h>
     43 
     44 #define	ARCH_KMALLOC_MINALIGN	4 /* XXX ??? */
     45 
     46 struct linux_malloc {
     47 	size_t	lm_size;
     48 } __aligned(ALIGNBYTES + 1);
     49 
     50 static inline int
     51 linux_gfp_to_kmem(gfp_t gfp)
     52 {
     53 	int flags = 0;
     54 
     55 	/* This has no meaning to us.  */
     56 	gfp &= ~__GFP_NOWARN;
     57 	gfp &= ~__GFP_RECLAIMABLE;
     58 
     59 	/* Pretend this was the same as not passing __GFP_WAIT.  */
     60 	if (ISSET(gfp, __GFP_NORETRY)) {
     61 		gfp &= ~__GFP_NORETRY;
     62 		gfp &= ~__GFP_WAIT;
     63 	}
     64 
     65 	if (ISSET(gfp, __GFP_ZERO)) {
     66 		gfp &= ~__GFP_ZERO;
     67 	}
     68 
     69 	/*
     70 	 * XXX Handle other cases as they arise -- prefer to fail early
     71 	 * rather than allocate memory without respecting parameters we
     72 	 * don't understand.
     73 	 */
     74 	KASSERT((gfp == GFP_ATOMIC) || (gfp == GFP_NOWAIT) ||
     75 	    ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
     76 
     77 	if (ISSET(gfp, __GFP_WAIT)) {
     78 		flags |= KM_SLEEP;
     79 		gfp &= ~__GFP_WAIT;
     80 	} else {
     81 		flags |= KM_NOSLEEP;
     82 	}
     83 
     84 	return flags;
     85 }
     86 
     87 /*
     88  * XXX vmalloc and kmalloc both use this.  If you change that, be sure
     89  * to update vmalloc in <linux/vmalloc.h> and kvfree in <linux/mm.h>.
     90  */
     91 
     92 static inline void *
     93 kmalloc(size_t size, gfp_t gfp)
     94 {
     95 	struct linux_malloc *lm;
     96 	int kmflags = linux_gfp_to_kmem(gfp);
     97 
     98 	KASSERTMSG(size < SIZE_MAX - sizeof(*lm), "size=%zu", size);
     99 
    100 	if (gfp & __GFP_ZERO)
    101 		lm = kmem_intr_zalloc(sizeof(*lm) + size, kmflags);
    102 	else
    103 		lm = kmem_intr_alloc(sizeof(*lm) + size, kmflags);
    104 	if (lm == NULL)
    105 		return NULL;
    106 
    107 	lm->lm_size = size;
    108 	return lm + 1;
    109 }
    110 
    111 static inline void *
    112 kzalloc(size_t size, gfp_t gfp)
    113 {
    114 	return kmalloc(size, gfp | __GFP_ZERO);
    115 }
    116 
    117 static inline void *
    118 kmalloc_array(size_t n, size_t size, gfp_t gfp)
    119 {
    120 	if ((size != 0) && (n > (SIZE_MAX / size)))
    121 		return NULL;
    122 	return kmalloc(n * size, gfp);
    123 }
    124 
    125 static inline void *
    126 kcalloc(size_t n, size_t size, gfp_t gfp)
    127 {
    128 	return kmalloc_array(n, size, (gfp | __GFP_ZERO));
    129 }
    130 
    131 static inline void *
    132 krealloc(void *ptr, size_t size, gfp_t gfp)
    133 {
    134 	struct linux_malloc *olm, *nlm;
    135 	int kmflags = linux_gfp_to_kmem(gfp);
    136 
    137 	if (gfp & __GFP_ZERO)
    138 		nlm = kmem_intr_zalloc(sizeof(*nlm) + size, kmflags);
    139 	else
    140 		nlm = kmem_intr_alloc(sizeof(*nlm) + size, kmflags);
    141 	if (nlm == NULL)
    142 		return NULL;
    143 
    144 	nlm->lm_size = size;
    145 	if (ptr) {
    146 		olm = (struct linux_malloc *)ptr - 1;
    147 		memcpy(nlm + 1, olm + 1, MIN(nlm->lm_size, olm->lm_size));
    148 		kmem_intr_free(olm, sizeof(*olm) + olm->lm_size);
    149 	}
    150 	return nlm + 1;
    151 }
    152 
    153 static inline void
    154 kfree(void *ptr)
    155 {
    156 	struct linux_malloc *lm;
    157 
    158 	if (ptr == NULL)
    159 		return;
    160 
    161 	lm = (struct linux_malloc *)ptr - 1;
    162 	kmem_intr_free(lm, sizeof(*lm) + lm->lm_size);
    163 }
    164 
    165 #define	SLAB_HWCACHE_ALIGN	__BIT(0)
    166 #define	SLAB_RECLAIM_ACCOUNT	__BIT(1)
    167 #define	SLAB_TYPESAFE_BY_RCU	__BIT(2)
    168 
    169 struct kmem_cache {
    170 	pool_cache_t	kc_pool_cache;
    171 	size_t		kc_size;
    172 	void		(*kc_ctor)(void *);
    173 	void		(*kc_dtor)(void *);
    174 };
    175 
    176 /* XXX These should be in <sys/pool.h>.  */
    177 void *	pool_page_alloc(struct pool *, int);
    178 void	pool_page_free(struct pool *, void *);
    179 
    180 static void
    181 pool_page_free_rcu(struct pool *pp, void *v)
    182 {
    183 
    184 	synchronize_rcu();
    185 	pool_page_free(pp, v);
    186 }
    187 
    188 static struct pool_allocator pool_allocator_kmem_rcu = {
    189 	.pa_alloc = pool_page_alloc,
    190 	.pa_free = pool_page_free_rcu,
    191 	.pa_pagesz = 0,
    192 };
    193 
    194 static int
    195 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
    196 {
    197 	struct kmem_cache *const kc = cookie;
    198 
    199 	if (kc->kc_ctor)
    200 		(*kc->kc_ctor)(ptr);
    201 
    202 	return 0;
    203 }
    204 
    205 static void
    206 kmem_cache_dtor(void *cookie, void *ptr)
    207 {
    208 	struct kmem_cache *const kc = cookie;
    209 
    210 	if (kc->kc_dtor)
    211 		(*kc->kc_dtor)(ptr);
    212 }
    213 
    214 static inline struct kmem_cache *
    215 kmem_cache_create(const char *name, size_t size, size_t align,
    216     unsigned long flags, void (*ctor)(void *))
    217 {
    218 	struct pool_allocator *palloc = NULL;
    219 	struct kmem_cache *kc;
    220 
    221 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    222 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    223 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
    224 		palloc = &pool_allocator_kmem_rcu;
    225 
    226 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    227 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
    228 	    IPL_VM, &kmem_cache_ctor, NULL, kc);
    229 	kc->kc_size = size;
    230 	kc->kc_ctor = ctor;
    231 
    232 	return kc;
    233 }
    234 
    235 /* XXX extension */
    236 static inline struct kmem_cache *
    237 kmem_cache_create_dtor(const char *name, size_t size, size_t align,
    238     unsigned long flags, void (*ctor)(void *), void (*dtor)(void *))
    239 {
    240 	struct pool_allocator *palloc = NULL;
    241 	struct kmem_cache *kc;
    242 
    243 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    244 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    245 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
    246 		palloc = &pool_allocator_kmem_rcu;
    247 
    248 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    249 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
    250 	    IPL_VM, &kmem_cache_ctor, &kmem_cache_dtor, kc);
    251 	kc->kc_size = size;
    252 	kc->kc_ctor = ctor;
    253 	kc->kc_dtor = dtor;
    254 
    255 	return kc;
    256 }
    257 
    258 #define	KMEM_CACHE(T, F)						      \
    259 	kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T),	      \
    260 	    (F), NULL)
    261 
    262 static inline void
    263 kmem_cache_destroy(struct kmem_cache *kc)
    264 {
    265 
    266 	pool_cache_destroy(kc->kc_pool_cache);
    267 	kmem_free(kc, sizeof(*kc));
    268 }
    269 
    270 static inline void *
    271 kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
    272 {
    273 	int flags = 0;
    274 	void *ptr;
    275 
    276 	if (gfp & __GFP_WAIT)
    277 		flags |= PR_WAITOK;
    278 	else
    279 		flags |= PR_NOWAIT;
    280 
    281 	ptr = pool_cache_get(kc->kc_pool_cache, flags);
    282 	if (ptr == NULL)
    283 		return NULL;
    284 
    285 	if (ISSET(gfp, __GFP_ZERO))
    286 		(void)memset(ptr, 0, kc->kc_size);
    287 
    288 	return ptr;
    289 }
    290 
    291 static inline void *
    292 kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
    293 {
    294 
    295 	return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
    296 }
    297 
    298 static inline void
    299 kmem_cache_free(struct kmem_cache *kc, void *ptr)
    300 {
    301 
    302 	pool_cache_put(kc->kc_pool_cache, ptr);
    303 }
    304 
    305 static inline void
    306 kmem_cache_shrink(struct kmem_cache *kc)
    307 {
    308 
    309 	pool_cache_reclaim(kc->kc_pool_cache);
    310 }
    311 
    312 #endif  /* _LINUX_SLAB_H_ */
    313