Home | History | Annotate | Line # | Download | only in linux
slab.h revision 1.11
      1 /*	$NetBSD: slab.h,v 1.11 2021/12/21 19:07:09 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SLAB_H_
     33 #define _LINUX_SLAB_H_
     34 
     35 #include <sys/kmem.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <uvm/uvm_extern.h>	/* For PAGE_SIZE.  */
     40 
     41 #include <linux/gfp.h>
     42 #include <linux/overflow.h>
     43 #include <linux/rcupdate.h>
     44 
     45 #define	ARCH_KMALLOC_MINALIGN	4 /* XXX ??? */
     46 
     47 struct linux_malloc {
     48 	size_t	lm_size;
     49 } __aligned(ALIGNBYTES + 1);
     50 
     51 static inline int
     52 linux_gfp_to_kmem(gfp_t gfp)
     53 {
     54 	int flags = 0;
     55 
     56 	/* This has no meaning to us.  */
     57 	gfp &= ~__GFP_NOWARN;
     58 	gfp &= ~__GFP_RECLAIMABLE;
     59 
     60 	/* Pretend this was the same as not passing __GFP_WAIT.  */
     61 	if (ISSET(gfp, __GFP_NORETRY)) {
     62 		gfp &= ~__GFP_NORETRY;
     63 		gfp &= ~__GFP_WAIT;
     64 	}
     65 
     66 	if (ISSET(gfp, __GFP_ZERO)) {
     67 		gfp &= ~__GFP_ZERO;
     68 	}
     69 
     70 	/*
     71 	 * XXX Handle other cases as they arise -- prefer to fail early
     72 	 * rather than allocate memory without respecting parameters we
     73 	 * don't understand.
     74 	 */
     75 	KASSERT((gfp == GFP_ATOMIC) || (gfp == GFP_NOWAIT) ||
     76 	    ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
     77 
     78 	if (ISSET(gfp, __GFP_WAIT)) {
     79 		flags |= KM_SLEEP;
     80 		gfp &= ~__GFP_WAIT;
     81 	} else {
     82 		flags |= KM_NOSLEEP;
     83 	}
     84 
     85 	return flags;
     86 }
     87 
     88 /*
     89  * XXX vmalloc and kmalloc both use this.  If you change that, be sure
     90  * to update vmalloc in <linux/vmalloc.h> and kvfree in <linux/mm.h>.
     91  */
     92 
     93 static inline void *
     94 kmalloc(size_t size, gfp_t gfp)
     95 {
     96 	struct linux_malloc *lm;
     97 	int kmflags = linux_gfp_to_kmem(gfp);
     98 
     99 	KASSERTMSG(size < SIZE_MAX - sizeof(*lm), "size=%zu", size);
    100 
    101 	if (gfp & __GFP_ZERO)
    102 		lm = kmem_intr_zalloc(sizeof(*lm) + size, kmflags);
    103 	else
    104 		lm = kmem_intr_alloc(sizeof(*lm) + size, kmflags);
    105 	if (lm == NULL)
    106 		return NULL;
    107 
    108 	lm->lm_size = size;
    109 	return lm + 1;
    110 }
    111 
    112 static inline void *
    113 kzalloc(size_t size, gfp_t gfp)
    114 {
    115 	return kmalloc(size, gfp | __GFP_ZERO);
    116 }
    117 
    118 static inline void *
    119 kmalloc_array(size_t n, size_t size, gfp_t gfp)
    120 {
    121 	if ((size != 0) && (n > (SIZE_MAX / size)))
    122 		return NULL;
    123 	return kmalloc(n * size, gfp);
    124 }
    125 
    126 static inline void *
    127 kcalloc(size_t n, size_t size, gfp_t gfp)
    128 {
    129 	return kmalloc_array(n, size, (gfp | __GFP_ZERO));
    130 }
    131 
    132 static inline void *
    133 krealloc(void *ptr, size_t size, gfp_t gfp)
    134 {
    135 	struct linux_malloc *olm, *nlm;
    136 	int kmflags = linux_gfp_to_kmem(gfp);
    137 
    138 	if (gfp & __GFP_ZERO)
    139 		nlm = kmem_intr_zalloc(sizeof(*nlm) + size, kmflags);
    140 	else
    141 		nlm = kmem_intr_alloc(sizeof(*nlm) + size, kmflags);
    142 	if (nlm == NULL)
    143 		return NULL;
    144 
    145 	nlm->lm_size = size;
    146 	if (ptr) {
    147 		olm = (struct linux_malloc *)ptr - 1;
    148 		memcpy(nlm + 1, olm + 1, MIN(nlm->lm_size, olm->lm_size));
    149 		kmem_intr_free(olm, sizeof(*olm) + olm->lm_size);
    150 	}
    151 	return nlm + 1;
    152 }
    153 
    154 static inline void
    155 kfree(void *ptr)
    156 {
    157 	struct linux_malloc *lm;
    158 
    159 	if (ptr == NULL)
    160 		return;
    161 
    162 	lm = (struct linux_malloc *)ptr - 1;
    163 	kmem_intr_free(lm, sizeof(*lm) + lm->lm_size);
    164 }
    165 
    166 #define	SLAB_HWCACHE_ALIGN	__BIT(0)
    167 #define	SLAB_RECLAIM_ACCOUNT	__BIT(1)
    168 #define	SLAB_TYPESAFE_BY_RCU	__BIT(2)
    169 
    170 struct kmem_cache {
    171 	pool_cache_t	kc_pool_cache;
    172 	size_t		kc_size;
    173 	void		(*kc_ctor)(void *);
    174 	void		(*kc_dtor)(void *);
    175 };
    176 
    177 /* XXX These should be in <sys/pool.h>.  */
    178 void *	pool_page_alloc(struct pool *, int);
    179 void	pool_page_free(struct pool *, void *);
    180 
    181 static void
    182 pool_page_free_rcu(struct pool *pp, void *v)
    183 {
    184 
    185 	synchronize_rcu();
    186 	pool_page_free(pp, v);
    187 }
    188 
    189 static struct pool_allocator pool_allocator_kmem_rcu = {
    190 	.pa_alloc = pool_page_alloc,
    191 	.pa_free = pool_page_free_rcu,
    192 	.pa_pagesz = 0,
    193 };
    194 
    195 static int
    196 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
    197 {
    198 	struct kmem_cache *const kc = cookie;
    199 
    200 	if (kc->kc_ctor)
    201 		(*kc->kc_ctor)(ptr);
    202 
    203 	return 0;
    204 }
    205 
    206 static void
    207 kmem_cache_dtor(void *cookie, void *ptr)
    208 {
    209 	struct kmem_cache *const kc = cookie;
    210 
    211 	if (kc->kc_dtor)
    212 		(*kc->kc_dtor)(ptr);
    213 }
    214 
    215 static void
    216 kmem_cache_pre_dtor(void *cookie)
    217 {
    218 	synchronize_rcu();
    219 }
    220 
    221 static inline struct kmem_cache *
    222 kmem_cache_create(const char *name, size_t size, size_t align,
    223     unsigned long flags, void (*ctor)(void *))
    224 {
    225 	struct pool_allocator *palloc = NULL;
    226 	struct kmem_cache *kc;
    227 
    228 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    229 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    230 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
    231 		palloc = &pool_allocator_kmem_rcu;
    232 
    233 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    234 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
    235 	    IPL_VM, &kmem_cache_ctor, NULL, kc);
    236 	kc->kc_size = size;
    237 	kc->kc_ctor = ctor;
    238 
    239 	return kc;
    240 }
    241 
    242 /* XXX extension */
    243 static inline struct kmem_cache *
    244 kmem_cache_create_dtor(const char *name, size_t size, size_t align,
    245     unsigned long flags, void (*ctor)(void *), void (*dtor)(void *))
    246 {
    247 	struct pool_allocator *palloc = NULL;
    248 	struct kmem_cache *kc;
    249 
    250 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    251 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    252 	/*
    253 	 * No need to use pool_allocator_kmem_rcu here; RCU synchronization
    254 	 * will be handled by the pre-destructor hook.
    255 	 */
    256 
    257 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    258 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
    259 	    IPL_VM, &kmem_cache_ctor, &kmem_cache_dtor, kc);
    260 	kc->kc_size = size;
    261 	kc->kc_ctor = ctor;
    262 	kc->kc_dtor = dtor;
    263 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU)) {
    264 		pool_cache_setpredestruct(kc->kc_pool_cache,
    265 		    kmem_cache_pre_dtor);
    266 	}
    267 
    268 	return kc;
    269 }
    270 
    271 #define	KMEM_CACHE(T, F)						      \
    272 	kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T),	      \
    273 	    (F), NULL)
    274 
    275 static inline void
    276 kmem_cache_destroy(struct kmem_cache *kc)
    277 {
    278 
    279 	pool_cache_destroy(kc->kc_pool_cache);
    280 	kmem_free(kc, sizeof(*kc));
    281 }
    282 
    283 static inline void *
    284 kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
    285 {
    286 	int flags = 0;
    287 	void *ptr;
    288 
    289 	if (gfp & __GFP_WAIT)
    290 		flags |= PR_WAITOK;
    291 	else
    292 		flags |= PR_NOWAIT;
    293 
    294 	ptr = pool_cache_get(kc->kc_pool_cache, flags);
    295 	if (ptr == NULL)
    296 		return NULL;
    297 
    298 	if (ISSET(gfp, __GFP_ZERO))
    299 		(void)memset(ptr, 0, kc->kc_size);
    300 
    301 	return ptr;
    302 }
    303 
    304 static inline void *
    305 kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
    306 {
    307 
    308 	return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
    309 }
    310 
    311 static inline void
    312 kmem_cache_free(struct kmem_cache *kc, void *ptr)
    313 {
    314 
    315 	pool_cache_put(kc->kc_pool_cache, ptr);
    316 }
    317 
    318 static inline void
    319 kmem_cache_shrink(struct kmem_cache *kc)
    320 {
    321 
    322 	pool_cache_reclaim(kc->kc_pool_cache);
    323 }
    324 
    325 #endif  /* _LINUX_SLAB_H_ */
    326