Home | History | Annotate | Line # | Download | only in linux
slab.h revision 1.1.2.2
      1 /*	$NetBSD: slab.h,v 1.1.2.2 2018/09/06 06:56:08 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_SLAB_H_
     33 #define _LINUX_SLAB_H_
     34 
     35 #include <sys/kmem.h>
     36 #include <sys/malloc.h>
     37 
     38 #include <machine/limits.h>
     39 
     40 #include <uvm/uvm_extern.h>	/* For PAGE_SIZE.  */
     41 
     42 #include <linux/gfp.h>
     43 
     44 /* XXX Should use kmem, but Linux kfree doesn't take the size.  */
     45 
     46 static inline int
     47 linux_gfp_to_malloc(gfp_t gfp)
     48 {
     49 	int flags = 0;
     50 
     51 	/* This has no meaning to us.  */
     52 	gfp &= ~__GFP_NOWARN;
     53 	gfp &= ~__GFP_RECLAIMABLE;
     54 
     55 	/* Pretend this was the same as not passing __GFP_WAIT.  */
     56 	if (ISSET(gfp, __GFP_NORETRY)) {
     57 		gfp &= ~__GFP_NORETRY;
     58 		gfp &= ~__GFP_WAIT;
     59 	}
     60 
     61 	if (ISSET(gfp, __GFP_ZERO)) {
     62 		flags |= M_ZERO;
     63 		gfp &= ~__GFP_ZERO;
     64 	}
     65 
     66 	/*
     67 	 * XXX Handle other cases as they arise -- prefer to fail early
     68 	 * rather than allocate memory without respecting parameters we
     69 	 * don't understand.
     70 	 */
     71 	KASSERT((gfp == GFP_ATOMIC) ||
     72 	    ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
     73 
     74 	if (ISSET(gfp, __GFP_WAIT)) {
     75 		flags |= M_WAITOK;
     76 		gfp &= ~__GFP_WAIT;
     77 	} else {
     78 		flags |= M_NOWAIT;
     79 	}
     80 
     81 	return flags;
     82 }
     83 
     84 /*
     85  * XXX vmalloc and kmalloc both use malloc(9).  If you change this, be
     86  * sure to update vmalloc in <linux/vmalloc.h> and kvfree in
     87  * <linux/mm.h>.
     88  */
     89 
     90 static inline void *
     91 kmalloc(size_t size, gfp_t gfp)
     92 {
     93 	return malloc(size, M_TEMP, linux_gfp_to_malloc(gfp));
     94 }
     95 
     96 static inline void *
     97 kzalloc(size_t size, gfp_t gfp)
     98 {
     99 	return malloc(size, M_TEMP, (linux_gfp_to_malloc(gfp) | M_ZERO));
    100 }
    101 
    102 static inline void *
    103 kmalloc_array(size_t n, size_t size, gfp_t gfp)
    104 {
    105 	if ((size != 0) && (n > (SIZE_MAX / size)))
    106 		return NULL;
    107 	return malloc((n * size), M_TEMP, linux_gfp_to_malloc(gfp));
    108 }
    109 
    110 static inline void *
    111 kcalloc(size_t n, size_t size, gfp_t gfp)
    112 {
    113 	return kmalloc_array(n, size, (gfp | __GFP_ZERO));
    114 }
    115 
    116 static inline void *
    117 krealloc(void *ptr, size_t size, gfp_t gfp)
    118 {
    119 	return realloc(ptr, size, M_TEMP, linux_gfp_to_malloc(gfp));
    120 }
    121 
    122 static inline void
    123 kfree(void *ptr)
    124 {
    125 	if (ptr != NULL)
    126 		free(ptr, M_TEMP);
    127 }
    128 
    129 #define	SLAB_HWCACHE_ALIGN	1
    130 
    131 struct kmem_cache {
    132 	pool_cache_t	kc_pool_cache;
    133 	size_t		kc_size;
    134 	void		(*kc_ctor)(void *);
    135 };
    136 
    137 static int
    138 kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
    139 {
    140 	struct kmem_cache *const kc = cookie;
    141 
    142 	if (kc->kc_ctor)
    143 		(*kc->kc_ctor)(ptr);
    144 
    145 	return 0;
    146 }
    147 
    148 static inline struct kmem_cache *
    149 kmem_cache_create(const char *name, size_t size, size_t align,
    150     unsigned long flags, void (*ctor)(void *))
    151 {
    152 	struct kmem_cache *kc;
    153 
    154 	if (ISSET(flags, SLAB_HWCACHE_ALIGN))
    155 		align = roundup(MAX(1, align), CACHE_LINE_SIZE);
    156 
    157 	kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
    158 	kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, NULL,
    159 	    IPL_NONE, &kmem_cache_ctor, NULL, kc);
    160 	kc->kc_size = size;
    161 	kc->kc_ctor = ctor;
    162 
    163 	return kc;
    164 }
    165 
    166 static inline void
    167 kmem_cache_destroy(struct kmem_cache *kc)
    168 {
    169 
    170 	pool_cache_destroy(kc->kc_pool_cache);
    171 	kmem_free(kc, sizeof(*kc));
    172 }
    173 
    174 static inline void *
    175 kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
    176 {
    177 	int flags = 0;
    178 	void *ptr;
    179 
    180 	if (gfp & __GFP_WAIT)
    181 		flags |= PR_NOWAIT;
    182 	else
    183 		flags |= PR_WAITOK;
    184 
    185 	ptr = pool_cache_get(kc->kc_pool_cache, flags);
    186 	if (ptr == NULL)
    187 		return NULL;
    188 
    189 	if (ISSET(gfp, __GFP_ZERO))
    190 		(void)memset(ptr, 0, kc->kc_size);
    191 
    192 	return ptr;
    193 }
    194 
    195 static inline void *
    196 kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
    197 {
    198 
    199 	return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
    200 }
    201 
    202 static inline void
    203 kmem_cache_free(struct kmem_cache *kc, void *ptr)
    204 {
    205 
    206 	pool_cache_put(kc->kc_pool_cache, ptr);
    207 }
    208 
    209 #endif  /* _LINUX_SLAB_H_ */
    210