slab.h revision 1.10 1 1.10 riastrad /* $NetBSD: slab.h,v 1.10 2021/12/19 12:20:46 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #ifndef _LINUX_SLAB_H_
33 1.1 riastrad #define _LINUX_SLAB_H_
34 1.1 riastrad
35 1.1 riastrad #include <sys/kmem.h>
36 1.1 riastrad
37 1.1 riastrad #include <machine/limits.h>
38 1.1 riastrad
39 1.1 riastrad #include <uvm/uvm_extern.h> /* For PAGE_SIZE. */
40 1.1 riastrad
41 1.1 riastrad #include <linux/gfp.h>
42 1.10 riastrad #include <linux/overflow.h>
43 1.3 riastrad #include <linux/rcupdate.h>
44 1.1 riastrad
45 1.4 riastrad #define ARCH_KMALLOC_MINALIGN 4 /* XXX ??? */
46 1.4 riastrad
47 1.8 riastrad struct linux_malloc {
48 1.8 riastrad size_t lm_size;
49 1.8 riastrad } __aligned(ALIGNBYTES + 1);
50 1.1 riastrad
51 1.1 riastrad static inline int
52 1.8 riastrad linux_gfp_to_kmem(gfp_t gfp)
53 1.1 riastrad {
54 1.1 riastrad int flags = 0;
55 1.1 riastrad
56 1.1 riastrad /* This has no meaning to us. */
57 1.1 riastrad gfp &= ~__GFP_NOWARN;
58 1.1 riastrad gfp &= ~__GFP_RECLAIMABLE;
59 1.1 riastrad
60 1.1 riastrad /* Pretend this was the same as not passing __GFP_WAIT. */
61 1.1 riastrad if (ISSET(gfp, __GFP_NORETRY)) {
62 1.1 riastrad gfp &= ~__GFP_NORETRY;
63 1.1 riastrad gfp &= ~__GFP_WAIT;
64 1.1 riastrad }
65 1.1 riastrad
66 1.1 riastrad if (ISSET(gfp, __GFP_ZERO)) {
67 1.1 riastrad gfp &= ~__GFP_ZERO;
68 1.1 riastrad }
69 1.1 riastrad
70 1.1 riastrad /*
71 1.1 riastrad * XXX Handle other cases as they arise -- prefer to fail early
72 1.1 riastrad * rather than allocate memory without respecting parameters we
73 1.1 riastrad * don't understand.
74 1.1 riastrad */
75 1.9 riastrad KASSERT((gfp == GFP_ATOMIC) || (gfp == GFP_NOWAIT) ||
76 1.1 riastrad ((gfp & ~__GFP_WAIT) == (GFP_KERNEL & ~__GFP_WAIT)));
77 1.1 riastrad
78 1.1 riastrad if (ISSET(gfp, __GFP_WAIT)) {
79 1.8 riastrad flags |= KM_SLEEP;
80 1.1 riastrad gfp &= ~__GFP_WAIT;
81 1.1 riastrad } else {
82 1.8 riastrad flags |= KM_NOSLEEP;
83 1.1 riastrad }
84 1.1 riastrad
85 1.1 riastrad return flags;
86 1.1 riastrad }
87 1.1 riastrad
88 1.1 riastrad /*
89 1.8 riastrad * XXX vmalloc and kmalloc both use this. If you change that, be sure
90 1.8 riastrad * to update vmalloc in <linux/vmalloc.h> and kvfree in <linux/mm.h>.
91 1.1 riastrad */
92 1.1 riastrad
93 1.1 riastrad static inline void *
94 1.1 riastrad kmalloc(size_t size, gfp_t gfp)
95 1.1 riastrad {
96 1.8 riastrad struct linux_malloc *lm;
97 1.8 riastrad int kmflags = linux_gfp_to_kmem(gfp);
98 1.8 riastrad
99 1.8 riastrad KASSERTMSG(size < SIZE_MAX - sizeof(*lm), "size=%zu", size);
100 1.8 riastrad
101 1.8 riastrad if (gfp & __GFP_ZERO)
102 1.8 riastrad lm = kmem_intr_zalloc(sizeof(*lm) + size, kmflags);
103 1.8 riastrad else
104 1.8 riastrad lm = kmem_intr_alloc(sizeof(*lm) + size, kmflags);
105 1.8 riastrad if (lm == NULL)
106 1.8 riastrad return NULL;
107 1.8 riastrad
108 1.8 riastrad lm->lm_size = size;
109 1.8 riastrad return lm + 1;
110 1.1 riastrad }
111 1.1 riastrad
112 1.1 riastrad static inline void *
113 1.1 riastrad kzalloc(size_t size, gfp_t gfp)
114 1.1 riastrad {
115 1.8 riastrad return kmalloc(size, gfp | __GFP_ZERO);
116 1.1 riastrad }
117 1.1 riastrad
118 1.1 riastrad static inline void *
119 1.1 riastrad kmalloc_array(size_t n, size_t size, gfp_t gfp)
120 1.1 riastrad {
121 1.1 riastrad if ((size != 0) && (n > (SIZE_MAX / size)))
122 1.1 riastrad return NULL;
123 1.8 riastrad return kmalloc(n * size, gfp);
124 1.1 riastrad }
125 1.1 riastrad
126 1.1 riastrad static inline void *
127 1.1 riastrad kcalloc(size_t n, size_t size, gfp_t gfp)
128 1.1 riastrad {
129 1.1 riastrad return kmalloc_array(n, size, (gfp | __GFP_ZERO));
130 1.1 riastrad }
131 1.1 riastrad
132 1.1 riastrad static inline void *
133 1.1 riastrad krealloc(void *ptr, size_t size, gfp_t gfp)
134 1.1 riastrad {
135 1.8 riastrad struct linux_malloc *olm, *nlm;
136 1.8 riastrad int kmflags = linux_gfp_to_kmem(gfp);
137 1.8 riastrad
138 1.8 riastrad if (gfp & __GFP_ZERO)
139 1.8 riastrad nlm = kmem_intr_zalloc(sizeof(*nlm) + size, kmflags);
140 1.8 riastrad else
141 1.8 riastrad nlm = kmem_intr_alloc(sizeof(*nlm) + size, kmflags);
142 1.8 riastrad if (nlm == NULL)
143 1.8 riastrad return NULL;
144 1.8 riastrad
145 1.8 riastrad nlm->lm_size = size;
146 1.8 riastrad if (ptr) {
147 1.8 riastrad olm = (struct linux_malloc *)ptr - 1;
148 1.8 riastrad memcpy(nlm + 1, olm + 1, MIN(nlm->lm_size, olm->lm_size));
149 1.8 riastrad kmem_intr_free(olm, sizeof(*olm) + olm->lm_size);
150 1.8 riastrad }
151 1.8 riastrad return nlm + 1;
152 1.1 riastrad }
153 1.1 riastrad
154 1.1 riastrad static inline void
155 1.1 riastrad kfree(void *ptr)
156 1.1 riastrad {
157 1.8 riastrad struct linux_malloc *lm;
158 1.8 riastrad
159 1.8 riastrad if (ptr == NULL)
160 1.8 riastrad return;
161 1.8 riastrad
162 1.8 riastrad lm = (struct linux_malloc *)ptr - 1;
163 1.8 riastrad kmem_intr_free(lm, sizeof(*lm) + lm->lm_size);
164 1.1 riastrad }
165 1.1 riastrad
166 1.3 riastrad #define SLAB_HWCACHE_ALIGN __BIT(0)
167 1.3 riastrad #define SLAB_RECLAIM_ACCOUNT __BIT(1)
168 1.3 riastrad #define SLAB_TYPESAFE_BY_RCU __BIT(2)
169 1.1 riastrad
170 1.1 riastrad struct kmem_cache {
171 1.1 riastrad pool_cache_t kc_pool_cache;
172 1.1 riastrad size_t kc_size;
173 1.1 riastrad void (*kc_ctor)(void *);
174 1.7 riastrad void (*kc_dtor)(void *);
175 1.1 riastrad };
176 1.1 riastrad
177 1.3 riastrad /* XXX These should be in <sys/pool.h>. */
178 1.3 riastrad void * pool_page_alloc(struct pool *, int);
179 1.3 riastrad void pool_page_free(struct pool *, void *);
180 1.3 riastrad
181 1.3 riastrad static void
182 1.3 riastrad pool_page_free_rcu(struct pool *pp, void *v)
183 1.3 riastrad {
184 1.3 riastrad
185 1.3 riastrad synchronize_rcu();
186 1.3 riastrad pool_page_free(pp, v);
187 1.3 riastrad }
188 1.3 riastrad
189 1.3 riastrad static struct pool_allocator pool_allocator_kmem_rcu = {
190 1.3 riastrad .pa_alloc = pool_page_alloc,
191 1.3 riastrad .pa_free = pool_page_free_rcu,
192 1.3 riastrad .pa_pagesz = 0,
193 1.3 riastrad };
194 1.3 riastrad
195 1.1 riastrad static int
196 1.1 riastrad kmem_cache_ctor(void *cookie, void *ptr, int flags __unused)
197 1.1 riastrad {
198 1.1 riastrad struct kmem_cache *const kc = cookie;
199 1.1 riastrad
200 1.1 riastrad if (kc->kc_ctor)
201 1.1 riastrad (*kc->kc_ctor)(ptr);
202 1.1 riastrad
203 1.1 riastrad return 0;
204 1.1 riastrad }
205 1.1 riastrad
206 1.7 riastrad static void
207 1.7 riastrad kmem_cache_dtor(void *cookie, void *ptr)
208 1.7 riastrad {
209 1.7 riastrad struct kmem_cache *const kc = cookie;
210 1.7 riastrad
211 1.7 riastrad if (kc->kc_dtor)
212 1.7 riastrad (*kc->kc_dtor)(ptr);
213 1.7 riastrad }
214 1.7 riastrad
215 1.1 riastrad static inline struct kmem_cache *
216 1.1 riastrad kmem_cache_create(const char *name, size_t size, size_t align,
217 1.1 riastrad unsigned long flags, void (*ctor)(void *))
218 1.1 riastrad {
219 1.3 riastrad struct pool_allocator *palloc = NULL;
220 1.1 riastrad struct kmem_cache *kc;
221 1.1 riastrad
222 1.1 riastrad if (ISSET(flags, SLAB_HWCACHE_ALIGN))
223 1.1 riastrad align = roundup(MAX(1, align), CACHE_LINE_SIZE);
224 1.3 riastrad if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
225 1.3 riastrad palloc = &pool_allocator_kmem_rcu;
226 1.1 riastrad
227 1.1 riastrad kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
228 1.3 riastrad kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
229 1.6 riastrad IPL_VM, &kmem_cache_ctor, NULL, kc);
230 1.1 riastrad kc->kc_size = size;
231 1.1 riastrad kc->kc_ctor = ctor;
232 1.1 riastrad
233 1.1 riastrad return kc;
234 1.1 riastrad }
235 1.1 riastrad
236 1.7 riastrad /* XXX extension */
237 1.7 riastrad static inline struct kmem_cache *
238 1.7 riastrad kmem_cache_create_dtor(const char *name, size_t size, size_t align,
239 1.7 riastrad unsigned long flags, void (*ctor)(void *), void (*dtor)(void *))
240 1.7 riastrad {
241 1.7 riastrad struct pool_allocator *palloc = NULL;
242 1.7 riastrad struct kmem_cache *kc;
243 1.7 riastrad
244 1.7 riastrad if (ISSET(flags, SLAB_HWCACHE_ALIGN))
245 1.7 riastrad align = roundup(MAX(1, align), CACHE_LINE_SIZE);
246 1.7 riastrad if (ISSET(flags, SLAB_TYPESAFE_BY_RCU))
247 1.7 riastrad palloc = &pool_allocator_kmem_rcu;
248 1.7 riastrad
249 1.7 riastrad kc = kmem_alloc(sizeof(*kc), KM_SLEEP);
250 1.7 riastrad kc->kc_pool_cache = pool_cache_init(size, align, 0, 0, name, palloc,
251 1.7 riastrad IPL_VM, &kmem_cache_ctor, &kmem_cache_dtor, kc);
252 1.7 riastrad kc->kc_size = size;
253 1.7 riastrad kc->kc_ctor = ctor;
254 1.7 riastrad kc->kc_dtor = dtor;
255 1.7 riastrad
256 1.7 riastrad return kc;
257 1.7 riastrad }
258 1.7 riastrad
259 1.3 riastrad #define KMEM_CACHE(T, F) \
260 1.3 riastrad kmem_cache_create(#T, sizeof(struct T), __alignof__(struct T), \
261 1.3 riastrad (F), NULL)
262 1.3 riastrad
263 1.1 riastrad static inline void
264 1.1 riastrad kmem_cache_destroy(struct kmem_cache *kc)
265 1.1 riastrad {
266 1.1 riastrad
267 1.1 riastrad pool_cache_destroy(kc->kc_pool_cache);
268 1.1 riastrad kmem_free(kc, sizeof(*kc));
269 1.1 riastrad }
270 1.1 riastrad
271 1.1 riastrad static inline void *
272 1.1 riastrad kmem_cache_alloc(struct kmem_cache *kc, gfp_t gfp)
273 1.1 riastrad {
274 1.1 riastrad int flags = 0;
275 1.1 riastrad void *ptr;
276 1.1 riastrad
277 1.1 riastrad if (gfp & __GFP_WAIT)
278 1.5 riastrad flags |= PR_WAITOK;
279 1.5 riastrad else
280 1.1 riastrad flags |= PR_NOWAIT;
281 1.1 riastrad
282 1.1 riastrad ptr = pool_cache_get(kc->kc_pool_cache, flags);
283 1.1 riastrad if (ptr == NULL)
284 1.1 riastrad return NULL;
285 1.1 riastrad
286 1.1 riastrad if (ISSET(gfp, __GFP_ZERO))
287 1.1 riastrad (void)memset(ptr, 0, kc->kc_size);
288 1.1 riastrad
289 1.1 riastrad return ptr;
290 1.1 riastrad }
291 1.1 riastrad
292 1.1 riastrad static inline void *
293 1.1 riastrad kmem_cache_zalloc(struct kmem_cache *kc, gfp_t gfp)
294 1.1 riastrad {
295 1.1 riastrad
296 1.1 riastrad return kmem_cache_alloc(kc, (gfp | __GFP_ZERO));
297 1.1 riastrad }
298 1.1 riastrad
299 1.1 riastrad static inline void
300 1.1 riastrad kmem_cache_free(struct kmem_cache *kc, void *ptr)
301 1.1 riastrad {
302 1.1 riastrad
303 1.1 riastrad pool_cache_put(kc->kc_pool_cache, ptr);
304 1.1 riastrad }
305 1.1 riastrad
306 1.2 riastrad static inline void
307 1.2 riastrad kmem_cache_shrink(struct kmem_cache *kc)
308 1.2 riastrad {
309 1.2 riastrad
310 1.2 riastrad pool_cache_reclaim(kc->kc_pool_cache);
311 1.2 riastrad }
312 1.2 riastrad
313 1.1 riastrad #endif /* _LINUX_SLAB_H_ */
314