slab.h revision 1.3 1 1.3 riastrad /* $NetBSD: slab.h,v 1.3 2021/12/19 01:33:44 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #ifndef _LINUX_SLAB_H_
33 1.1 riastrad #define _LINUX_SLAB_H_
34 1.1 riastrad
35 1.1 riastrad #include <sys/kmem.h>
36 1.1 riastrad #include <sys/malloc.h>
37 1.1 riastrad
38 1.1 riastrad #include <machine/limits.h>
39 1.1 riastrad
40 1.1 riastrad #include <uvm/uvm_extern.h> /* For PAGE_SIZE. */
41 1.1 riastrad
42 1.1 riastrad #include <linux/gfp.h>
43 1.3 riastrad #include <linux/rcupdate.h>
44 1.1 riastrad
45 1.1 riastrad /* XXX Should use kmem, but Linux kfree doesn't take the size. */
46 1.1 riastrad
47 1.1 riastrad static inline int
48 1.1 riastrad linux_gfp_to_malloc(gfp_t gfp)
49 1.1 riastrad {
50 1.1 riastrad int flags = 0;
51 1.1 riastrad
52 1.1 riastrad /* This has no meaning to us. */
53 1.1 riastrad gfp &= ~__GFP_NOWARN;
54 1.1 riastrad gfp &= ~__GFP_RECLAIMABLE;
55 1.1 riastrad
56 1.1 riastrad /* Pretend this was the same as not passing __GFP_WAIT. */
57 1.1 riastrad if (ISSET(gfp, __GFP_NORETRY)) {
58 1.1 riastrad gfp &= ~__GFP_NORETRY;
59 1.1 riastrad gfp &= ~__GFP_WAIT;
60 1.1 riastrad }
61 1.1 riastrad
62 1.1 riastrad if (ISSET(gfp, __GFP_ZERO)) {
63 1.1 riastrad flags |= M_ZERO;
64 1.1 riastrad gfp &= ~__GFP_ZERO;
65 1.1 riastrad }
66 1.1 riastrad
67 1.1 riastrad /*
68 1.1 riastrad * XXX Handle other cases as they arise -- prefer to fail early
69 1.1 riastrad * rather than allocate memory without respecting parameters we
70 1.1 riastrad * don't understand.
71 1.1 riastrad */
72 1.1 riastrad KASSERT((gfp == GFP_ATOMIC) ||
73 1.1 riastrad ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
74 1.1 riastrad
75 1.1 riastrad if (ISSET(gfp, __GFP_WAIT)) {
76 1.1 riastrad flags |= M_WAITOK;
77 1.1 riastrad gfp &= ~__GFP_WAIT;
78 1.1 riastrad } else {
79 1.1 riastrad flags |= M_NOWAIT;
80 1.1 riastrad }
81 1.1 riastrad
82 1.1 riastrad return flags;
83 1.1 riastrad }
84 1.1 riastrad
85 1.1 riastrad /*
86 1.1 riastrad * XXX vmalloc and kmalloc both use malloc(9). If you change this, be
87 1.1 riastrad * sure to update vmalloc in <linux/vmalloc.h> and kvfree in
88 1.1 riastrad * <linux/mm.h>.
89 1.1 riastrad */
90 1.1 riastrad
91 1.1 riastrad static inline void *
92 1.1 riastrad kmalloc(size_t size, gfp_t gfp)
93 1.1 riastrad {
94 1.1 riastrad return malloc(size, M_TEMP, linux_gfp_to_malloc(gfp));
95 1.1 riastrad }
96 1.1 riastrad
97 1.1 riastrad static inline void *
98 1.1 riastrad kzalloc(size_t size, gfp_t gfp)
99 1.1 riastrad {
100 1.1 riastrad return malloc(size, M_TEMP, (linux_gfp_to_malloc(gfp) | M_ZERO));
101 1.1 riastrad }
102 1.1 riastrad
103 1.1 riastrad static inline void *
104 1.1 riastrad kmalloc_array(size_t n, size_t size, gfp_t gfp)
105 1.1 riastrad {
106 1.1 riastrad if ((size != 0) && (n > (SIZE_MAX / size)))
107 1.1 riastrad return NULL;
108 1.1 riastrad return malloc((n * size), M_TEMP, linux_gfp_to_malloc(gfp));
109 1.1 riastrad }
110 1.1 riastrad
111 1.1 riastrad static inline void *
112 1.1 riastrad kcalloc(size_t n, size_t size, gfp_t gfp)
113 1.1 riastrad {
114 1.1 riastrad return kmalloc_array(n, size, (gfp | __GFP_ZERO));
115 1.1 riastrad }
116 1.1 riastrad
117 1.1 riastrad static inline void *
118 1.1 riastrad krealloc(void *ptr, size_t size, gfp_t gfp)
119 1.1 riastrad {
120 1.1 riastrad return realloc(ptr, size, M_TEMP, linux_gfp_to_malloc(gfp));
121 1.1 riastrad }
122 1.1 riastrad
123 1.1 riastrad static inline void
124 1.1 riastrad kfree(void *ptr)
125 1.1 riastrad {
126 1.1 riastrad if (ptr != NULL)
127 1.1 riastrad free(ptr, M_TEMP);
128 1.1 riastrad }
129 1.1 riastrad
130 1.3 riastrad #define SLAB_HWCACHE_ALIGN __BIT(0)
131 1.3 riastrad #define SLAB_RECLAIM_ACCOUNT __BIT(1)
132 1.3 riastrad #define SLAB_TYPESAFE_BY_RCU __BIT(2)
133 1.1 riastrad
134 1.1 riastrad struct kmem_cache {
135 1.1 riastrad pool_cache_t kc_pool_cache;
136 1.1 riastrad size_t kc_size;
137 1.1 riastrad void (*kc_ctor)(void *);
138 1.1 riastrad };
139 1.1 riastrad
140 1.3 riastrad /* XXX These should be in <sys/pool.h>. */
141 1.3 riastrad void * pool_page_alloc(struct pool *, int);
142 1.3 riastrad void pool_page_free(struct pool *, void *);
143 1.3 riastrad
144 1.3 riastrad static void
145 1.3 riastrad pool_page_free_rcu(struct pool *pp, void *v)
146 1.3 riastrad {
147 1.3 riastrad
148 1.3 riastrad synchronize_rcu();
149 1.3 riastrad pool_page_free(pp, v);
150 1.3 riastrad }
151 1.3 riastrad
152 1.3 riastrad static struct pool_allocator pool_allocator_kmem_rcu = {
153 1.3 riastrad .pa_alloc = pool_page_alloc,
154 1.3 riastrad .pa_free = pool_page_free_rcu,
155 1.3 riastrad .pa_pagesz = 0,
156 1.3 riastrad };
157 1.3 riastrad
158 1.1 riastrad static int
159 1.1 riastrad kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
160 1.1 riastrad {
161 1.1 riastrad struct kmem_cache *const kc = cookie;
162 1.1 riastrad
163 1.1 riastrad if (kc->kc_ctor)
164 1.1 riastrad (*kc->kc_ctor)(ptr);
165 1.1 riastrad
166 1.1 riastrad return 0;
167 1.1 riastrad }
168 1.1 riastrad
169 1.1 riastrad static inline struct kmem_cache *
170 1.1 riastrad kmem_cache_create(const char *name, size_t size, size_t align,
171 1.1 riastrad unsigned long flags, void (*ctor)(void *))
172 1.1 riastrad {
173 1.3 riastrad struct pool_allocator *palloc = NULL;
174 1.1 riastrad struct kmem_cache *kc;
175 1.1 riastrad
176 1.1 riastrad if (ISSET(flags, SLAB_HWCACHE_ALIGN))
177 1.1 riastrad align = roundup(MAX(1, align), CACHE_LINE_SIZE);
178 1.3 riastrad if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
179 1.3 riastrad palloc = &pool_allocator_kmem_rcu;
180 1.1 riastrad
181 1.1 riastrad kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
182 1.3 riastrad kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
183 1.1 riastrad IPL_NONE, &kmem_cache_ctor, NULL, kc);
184 1.1 riastrad kc->kc_size = size;
185 1.1 riastrad kc->kc_ctor = ctor;
186 1.1 riastrad
187 1.1 riastrad return kc;
188 1.1 riastrad }
189 1.1 riastrad
190 1.3 riastrad #define KMEM_CACHE(T, F) \
191 1.3 riastrad kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T), \
192 1.3 riastrad (F), NULL)
193 1.3 riastrad
194 1.1 riastrad static inline void
195 1.1 riastrad kmem_cache_destroy(struct kmem_cache *kc)
196 1.1 riastrad {
197 1.1 riastrad
198 1.1 riastrad pool_cache_destroy(kc->kc_pool_cache);
199 1.1 riastrad kmem_free(kc, sizeof(*kc));
200 1.1 riastrad }
201 1.1 riastrad
202 1.1 riastrad static inline void *
203 1.1 riastrad kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
204 1.1 riastrad {
205 1.1 riastrad int flags = 0;
206 1.1 riastrad void *ptr;
207 1.1 riastrad
208 1.1 riastrad if (gfp & __GFP_WAIT)
209 1.1 riastrad flags |= PR_NOWAIT;
210 1.1 riastrad else
211 1.1 riastrad flags |= PR_WAITOK;
212 1.1 riastrad
213 1.1 riastrad ptr = pool_cache_get(kc->kc_pool_cache, flags);
214 1.1 riastrad if (ptr == NULL)
215 1.1 riastrad return NULL;
216 1.1 riastrad
217 1.1 riastrad if (ISSET(gfp, __GFP_ZERO))
218 1.1 riastrad (void)memset(ptr, 0, kc->kc_size);
219 1.1 riastrad
220 1.1 riastrad return ptr;
221 1.1 riastrad }
222 1.1 riastrad
223 1.1 riastrad static inline void *
224 1.1 riastrad kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
225 1.1 riastrad {
226 1.1 riastrad
227 1.1 riastrad return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
228 1.1 riastrad }
229 1.1 riastrad
230 1.1 riastrad static inline void
231 1.1 riastrad kmem_cache_free(struct kmem_cache *kc, void *ptr)
232 1.1 riastrad {
233 1.1 riastrad
234 1.1 riastrad pool_cache_put(kc->kc_pool_cache, ptr);
235 1.1 riastrad }
236 1.1 riastrad
237 1.2 riastrad static inline void
238 1.2 riastrad kmem_cache_shrink(struct kmem_cache *kc)
239 1.2 riastrad {
240 1.2 riastrad
241 1.2 riastrad pool_cache_reclaim(kc->kc_pool_cache);
242 1.2 riastrad }
243 1.2 riastrad
244 1.1 riastrad #endif /* _LINUX_SLAB_H_ */
245