Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: slab.h,v 1.13 2021/12/22 18:04:53 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SLAB_H_
     33 #define _LINUX_SLAB_H_
     34 
     35 #include <sys/kmem.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <uvm/uvm_extern.h>	/* For PAGE_SIZE.  */
     40 
     41 #include <linux/gfp.h>
     42 #include <linux/overflow.h>
     43 #include <linux/rcupdate.h>
     44 
     45 #define	ARCH_KMALLOC_MINALIGN	4 /* XXX ??? */
     46 
     47 struct linux_malloc {
     48 	size_t	lm_size;
     49 } __aligned(ALIGNBYTES + 1);
     50 
     51 static inline int
     52 linux_gfp_to_kmem(gfp_t gfp)
     53 {
     54 	int flags = 0;
     55 
     56 	/* This has no meaning to us.  */
     57 	gfp &= ~__GFP_NOWARN;
     58 	gfp &= ~__GFP_RECLAIMABLE;
     59 
     60 	/* Pretend this was the same as not passing __GFP_WAIT.  */
     61 	if (ISSET(gfp, __GFP_NORETRY)) {
     62 		gfp &= ~__GFP_NORETRY;
     63 		gfp &= ~__GFP_WAIT;
     64 	}
     65 
     66 	if (ISSET(gfp, __GFP_ZERO)) {
     67 		gfp &= ~__GFP_ZERO;
     68 	}
     69 
     70 	/*
     71 	 * XXX Handle other cases as they arise -- prefer to fail early
     72 	 * rather than allocate memory without respecting parameters we
     73 	 * don't understand.
     74 	 */
     75 	KASSERT((gfp == GFP_ATOMIC) || (gfp == GFP_NOWAIT) ||
     76 	    ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
     77 
     78 	if (ISSET(gfp, __GFP_WAIT)) {
     79 		flags |= KM_SLEEP;
     80 		gfp &= ~__GFP_WAIT;
     81 	} else {
     82 		flags |= KM_NOSLEEP;
     83 	}
     84 
     85 	return flags;
     86 }
     87 
     88 /*
     89  * XXX vmalloc and kmalloc both use this.  If you change that, be sure
     90  * to update vmalloc in <linux/vmalloc.h> and kvfree in <linux/mm.h>.
     91  */
     92 
     93 static inline void *
     94 kmalloc(size_t size, gfp_t gfp)
     95 {
     96 	struct linux_malloc *lm;
     97 	int kmflags = linux_gfp_to_kmem(gfp);
     98 
     99 	KASSERTMSG(size < SIZE_MAX - sizeof(*lm), "size=%zu", size);
    100 
    101 	if (gfp & __GFP_ZERO)
    102 		lm = kmem_intr_zalloc(sizeof(*lm) + size, kmflags);
    103 	else
    104 		lm = kmem_intr_alloc(sizeof(*lm) + size, kmflags);
    105 	if (lm == NULL)
    106 		return NULL;
    107 
    108 	lm->lm_size = size;
    109 	return lm + 1;
    110 }
    111 
    112 static inline void *
    113 kzalloc(size_t size, gfp_t gfp)
    114 {
    115 	return kmalloc(size, gfp | __GFP_ZERO);
    116 }
    117 
    118 static inline void *
    119 kmalloc_array(size_t n, size_t size, gfp_t gfp)
    120 {
    121 	if ((size != 0) && (n > (SIZE_MAX / size)))
    122 		return NULL;
    123 	return kmalloc(n * size, gfp);
    124 }
    125 
    126 static inline void *
    127 kcalloc(size_t n, size_t size, gfp_t gfp)
    128 {
    129 	return kmalloc_array(n, size, (gfp | __GFP_ZERO));
    130 }
    131 
    132 static inline void *
    133 krealloc(void *ptr, size_t size, gfp_t gfp)
    134 {
    135 	struct linux_malloc *olm, *nlm;
    136 	int kmflags = linux_gfp_to_kmem(gfp);
    137 
    138 	if (gfp & __GFP_ZERO)
    139 		nlm = kmem_intr_zalloc(sizeof(*nlm) + size, kmflags);
    140 	else
    141 		nlm = kmem_intr_alloc(sizeof(*nlm) + size, kmflags);
    142 	if (nlm == NULL)
    143 		return NULL;
    144 
    145 	nlm->lm_size = size;
    146 	if (ptr) {
    147 		olm = (struct linux_malloc *)ptr - 1;
    148 		memcpy(nlm + 1, olm + 1, MIN(nlm->lm_size, olm->lm_size));
    149 		kmem_intr_free(olm, sizeof(*olm) + olm->lm_size);
    150 	}
    151 	return nlm + 1;
    152 }
    153 
    154 static inline void
    155 kfree(void *ptr)
    156 {
    157 	struct linux_malloc *lm;
    158 
    159 	if (ptr == NULL)
    160 		return;
    161 
    162 	lm = (struct linux_malloc *)ptr - 1;
    163 	kmem_intr_free(lm, sizeof(*lm) + lm->lm_size);
    164 }
    165 
    166 #define	SLAB_HWCACHE_ALIGN	__BIT(0)
    167 #define	SLAB_RECLAIM_ACCOUNT	__BIT(1)
    168 #define	SLAB_TYPESAFE_BY_RCU	__BIT(2)
    169 
    170 struct kmem_cache {
    171 	pool_cache_t	kc_pool_cache;
    172 	size_t		kc_size;
    173 	void		(*kc_ctor)(void *);
    174 	void		(*kc_dtor)(void *);
    175 };
    176 
    177 static int
    178 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
    179 {
    180 	struct kmem_cache *const kc = cookie;
    181 
    182 	if (kc->kc_ctor)
    183 		(*kc->kc_ctor)(ptr);
    184 
    185 	return 0;
    186 }
    187 
    188 static void
    189 kmem_cache_dtor(void *cookie, void *ptr)
    190 {
    191 	struct kmem_cache *const kc = cookie;
    192 
    193 	if (kc->kc_dtor)
    194 		(*kc->kc_dtor)(ptr);
    195 }
    196 
    197 /* XXX extension */
    198 static inline struct kmem_cache *
    199 kmem_cache_create_dtor(const char *name, size_t size, size_t align,
    200     unsigned long flags, void (*ctor)(void *), void (*dtor)(void *))
    201 {
    202 	struct kmem_cache *kc;
    203 	int pcflags = 0;
    204 
    205 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    206 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    207 	if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
    208 		pcflags |= PR_PSERIALIZE;
    209 
    210 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    211 	kc->kc_pool_cache = pool_cache_init(size, align, 0, pcflags, name, NULL,
    212 	    IPL_VM, &kmem_cache_ctor, dtor != NULL ? &kmem_cache_dtor : NULL,
    213 	    kc);
    214 	kc->kc_size = size;
    215 	kc->kc_ctor = ctor;
    216 	kc->kc_dtor = dtor;
    217 
    218 	return kc;
    219 }
    220 
    221 static inline struct kmem_cache *
    222 kmem_cache_create(const char *name, size_t size, size_t align,
    223     unsigned long flags, void (*ctor)(void *))
    224 {
    225 	return kmem_cache_create_dtor(name, size, align, flags, ctor, NULL);
    226 }
    227 
    228 #define	KMEM_CACHE(T, F)						      \
    229 	kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T),	      \
    230 	    (F), NULL)
    231 
    232 static inline void
    233 kmem_cache_destroy(struct kmem_cache *kc)
    234 {
    235 
    236 	pool_cache_destroy(kc->kc_pool_cache);
    237 	kmem_free(kc, sizeof(*kc));
    238 }
    239 
    240 static inline void *
    241 kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
    242 {
    243 	int flags = 0;
    244 	void *ptr;
    245 
    246 	if (gfp & __GFP_WAIT)
    247 		flags |= PR_WAITOK;
    248 	else
    249 		flags |= PR_NOWAIT;
    250 
    251 	ptr = pool_cache_get(kc->kc_pool_cache, flags);
    252 	if (ptr == NULL)
    253 		return NULL;
    254 
    255 	if (ISSET(gfp, __GFP_ZERO))
    256 		(void)memset(ptr, 0, kc->kc_size);
    257 
    258 	return ptr;
    259 }
    260 
    261 static inline void *
    262 kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
    263 {
    264 
    265 	return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
    266 }
    267 
    268 static inline void
    269 kmem_cache_free(struct kmem_cache *kc, void *ptr)
    270 {
    271 
    272 	pool_cache_put(kc->kc_pool_cache, ptr);
    273 }
    274 
    275 static inline void
    276 kmem_cache_shrink(struct kmem_cache *kc)
    277 {
    278 
    279 	pool_cache_reclaim(kc->kc_pool_cache);
    280 }
    281 
    282 #endif  /* _LINUX_SLAB_H_ */
    283