Home | History | Annotate | Line # | Download | only in linux
slab.h revision 1.5
      1 /*	$NetBSD: slab.h,v 1.5 2021/12/19 11:58:02 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SLAB_H_
     33 #define _LINUX_SLAB_H_
     34 
     35 #include <sys/kmem.h>
     36 #include <sys/malloc.h>
     37 
     38 #include <machine/limits.h>
     39 
     40 #include <uvm/uvm_extern.h>	/* For PAGE_SIZE.  */
     41 
     42 #include <linux/gfp.h>
     43 #include <linux/rcupdate.h>
     44 
     45 #define	ARCH_KMALLOC_MINALIGN	4 /* XXX ??? */
     46 
     47 /* XXX Should use kmem, but Linux kfree doesn't take the size.  */
     48 
     49 static inline int
     50 linux_gfp_to_malloc(gfp_t gfp)
     51 {
     52 	int flags = 0;
     53 
     54 	/* This has no meaning to us.  */
     55 	gfp &= ~__GFP_NOWARN;
     56 	gfp &= ~__GFP_RECLAIMABLE;
     57 
     58 	/* Pretend this was the same as not passing __GFP_WAIT.  */
     59 	if (ISSET(gfp, __GFP_NORETRY)) {
     60 		gfp &= ~__GFP_NORETRY;
     61 		gfp &= ~__GFP_WAIT;
     62 	}
     63 
     64 	if (ISSET(gfp, __GFP_ZERO)) {
     65 		flags |= M_ZERO;
     66 		gfp &= ~__GFP_ZERO;
     67 	}
     68 
     69 	/*
     70 	 * XXX Handle other cases as they arise -- prefer to fail early
     71 	 * rather than allocate memory without respecting parameters we
     72 	 * don't understand.
     73 	 */
     74 	KASSERT((gfp == GFP_ATOMIC) ||
     75 	    ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
     76 
     77 	if (ISSET(gfp, __GFP_WAIT)) {
     78 		flags |= M_WAITOK;
     79 		gfp &= ~__GFP_WAIT;
     80 	} else {
     81 		flags |= M_NOWAIT;
     82 	}
     83 
     84 	return flags;
     85 }
     86 
     87 /*
     88  * XXX vmalloc and kmalloc both use malloc(9).  If you change this, be
     89  * sure to update vmalloc in <linux/vmalloc.h> and kvfree in
     90  * <linux/mm.h>.
     91  */
     92 
     93 static inline void *
     94 kmalloc(size_t size, gfp_t gfp)
     95 {
     96 	return malloc(size, M_TEMP, linux_gfp_to_malloc(gfp));
     97 }
     98 
     99 static inline void *
    100 kzalloc(size_t size, gfp_t gfp)
    101 {
    102 	return malloc(size, M_TEMP, (linux_gfp_to_malloc(gfp) | M_ZERO));
    103 }
    104 
    105 static inline void *
    106 kmalloc_array(size_t n, size_t size, gfp_t gfp)
    107 {
    108 	if ((size != 0) && (n > (SIZE_MAX / size)))
    109 		return NULL;
    110 	return malloc((n * size), M_TEMP, linux_gfp_to_malloc(gfp));
    111 }
    112 
    113 static inline void *
    114 kcalloc(size_t n, size_t size, gfp_t gfp)
    115 {
    116 	return kmalloc_array(n, size, (gfp | __GFP_ZERO));
    117 }
    118 
    119 static inline void *
    120 krealloc(void *ptr, size_t size, gfp_t gfp)
    121 {
    122 	return realloc(ptr, size, M_TEMP, linux_gfp_to_malloc(gfp));
    123 }
    124 
    125 static inline void
    126 kfree(void *ptr)
    127 {
    128 	if (ptr != NULL)
    129 		free(ptr, M_TEMP);
    130 }
    131 
    132 #define	SLAB_HWCACHE_ALIGN	__BIT(0)
    133 #define	SLAB_RECLAIM_ACCOUNT	__BIT(1)
    134 #define	SLAB_TYPESAFE_BY_RCU	__BIT(2)
    135 
    136 struct kmem_cache {
    137 	pool_cache_t	kc_pool_cache;
    138 	size_t		kc_size;
    139 	void		(*kc_ctor)(void *);
    140 };
    141 
    142 /* XXX These should be in <sys/pool.h>.  */
    143 void *	pool_page_alloc(struct pool *, int);
    144 void	pool_page_free(struct pool *, void *);
    145 
    146 static void
    147 pool_page_free_rcu(struct pool *pp, void *v)
    148 {
    149 
    150 	synchronize_rcu();
    151 	pool_page_free(pp, v);
    152 }
    153 
    154 static struct pool_allocator pool_allocator_kmem_rcu = {
    155 	.pa_alloc = pool_page_alloc,
    156 	.pa_free = pool_page_free_rcu,
    157 	.pa_pagesz = 0,
    158 };
    159 
    160 static int
    161 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
    162 {
    163 	struct kmem_cache *const kc = cookie;
    164 
    165 	if (kc->kc_ctor)
    166 		(*kc->kc_ctor)(ptr);
    167 
    168 	return 0;
    169 }
    170 
    171 static inline struct kmem_cache *
    172 kmem_cache_create(const char *name, size_t size, size_t align,
    173     unsigned long flags, void (*ctor)(void *))
    174 {
    175 	struct pool_allocator *palloc = NULL;
    176 	struct kmem_cache *kc;
    177 
    178 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    179 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    180 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
    181 		palloc = &pool_allocator_kmem_rcu;
    182 
    183 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    184 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
    185 	    IPL_NONE, &kmem_cache_ctor, NULL, kc);
    186 	kc->kc_size = size;
    187 	kc->kc_ctor = ctor;
    188 
    189 	return kc;
    190 }
    191 
    192 #define	KMEM_CACHE(T, F)						      \
    193 	kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T),	      \
    194 	    (F), NULL)
    195 
    196 static inline void
    197 kmem_cache_destroy(struct kmem_cache *kc)
    198 {
    199 
    200 	pool_cache_destroy(kc->kc_pool_cache);
    201 	kmem_free(kc, sizeof(*kc));
    202 }
    203 
    204 static inline void *
    205 kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
    206 {
    207 	int flags = 0;
    208 	void *ptr;
    209 
    210 	if (gfp & __GFP_WAIT)
    211 		flags |= PR_WAITOK;
    212 	else
    213 		flags |= PR_NOWAIT;
    214 
    215 	ptr = pool_cache_get(kc->kc_pool_cache, flags);
    216 	if (ptr == NULL)
    217 		return NULL;
    218 
    219 	if (ISSET(gfp, __GFP_ZERO))
    220 		(void)memset(ptr, 0, kc->kc_size);
    221 
    222 	return ptr;
    223 }
    224 
    225 static inline void *
    226 kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
    227 {
    228 
    229 	return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
    230 }
    231 
    232 static inline void
    233 kmem_cache_free(struct kmem_cache *kc, void *ptr)
    234 {
    235 
    236 	pool_cache_put(kc->kc_pool_cache, ptr);
    237 }
    238 
    239 static inline void
    240 kmem_cache_shrink(struct kmem_cache *kc)
    241 {
    242 
    243 	pool_cache_reclaim(kc->kc_pool_cache);
    244 }
    245 
    246 #endif  /* _LINUX_SLAB_H_ */
    247