subr_kmem.c revision 1.30 1 1.30 yamt /* $NetBSD: subr_kmem.c,v 1.30 2009/10/12 23:36:02 yamt Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.23 ad * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 1.23 ad * All rights reserved.
6 1.23 ad *
7 1.23 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.23 ad * by Andrew Doran.
9 1.23 ad *
10 1.23 ad * Redistribution and use in source and binary forms, with or without
11 1.23 ad * modification, are permitted provided that the following conditions
12 1.23 ad * are met:
13 1.23 ad * 1. Redistributions of source code must retain the above copyright
14 1.23 ad * notice, this list of conditions and the following disclaimer.
15 1.23 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.23 ad * notice, this list of conditions and the following disclaimer in the
17 1.23 ad * documentation and/or other materials provided with the distribution.
18 1.23 ad *
19 1.23 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.23 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.23 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.23 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.23 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.23 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.23 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.23 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.23 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.23 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.23 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.23 ad */
31 1.23 ad
32 1.23 ad /*-
33 1.1 yamt * Copyright (c)2006 YAMAMOTO Takashi,
34 1.1 yamt * All rights reserved.
35 1.1 yamt *
36 1.1 yamt * Redistribution and use in source and binary forms, with or without
37 1.1 yamt * modification, are permitted provided that the following conditions
38 1.1 yamt * are met:
39 1.1 yamt * 1. Redistributions of source code must retain the above copyright
40 1.1 yamt * notice, this list of conditions and the following disclaimer.
41 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 yamt * notice, this list of conditions and the following disclaimer in the
43 1.1 yamt * documentation and/or other materials provided with the distribution.
44 1.1 yamt *
45 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 yamt * SUCH DAMAGE.
56 1.1 yamt */
57 1.1 yamt
58 1.1 yamt /*
59 1.1 yamt * allocator of kernel wired memory.
60 1.1 yamt *
61 1.1 yamt * TODO:
62 1.1 yamt * - worth to have "intrsafe" version? maybe..
63 1.1 yamt */
64 1.1 yamt
65 1.1 yamt #include <sys/cdefs.h>
66 1.30 yamt __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.30 2009/10/12 23:36:02 yamt Exp $");
67 1.1 yamt
68 1.1 yamt #include <sys/param.h>
69 1.6 yamt #include <sys/callback.h>
70 1.1 yamt #include <sys/kmem.h>
71 1.1 yamt #include <sys/vmem.h>
72 1.13 ad #include <sys/debug.h>
73 1.17 ad #include <sys/lockdebug.h>
74 1.23 ad #include <sys/cpu.h>
75 1.1 yamt
76 1.6 yamt #include <uvm/uvm_extern.h>
77 1.6 yamt #include <uvm/uvm_map.h>
78 1.27 ad #include <uvm/uvm_kmguard.h>
79 1.6 yamt
80 1.1 yamt #include <lib/libkern/libkern.h>
81 1.1 yamt
82 1.3 yamt #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1)
83 1.23 ad #define KMEM_QCACHE_MAX (KMEM_QUANTUM_SIZE * 32)
84 1.23 ad #define KMEM_CACHE_COUNT 16
85 1.23 ad
86 1.23 ad typedef struct kmem_cache {
87 1.23 ad pool_cache_t kc_cache;
88 1.23 ad struct pool_allocator kc_pa;
89 1.23 ad char kc_name[12];
90 1.23 ad } kmem_cache_t;
91 1.1 yamt
92 1.1 yamt static vmem_t *kmem_arena;
93 1.6 yamt static struct callback_entry kmem_kva_reclaim_entry;
94 1.1 yamt
95 1.23 ad static kmem_cache_t kmem_cache[KMEM_CACHE_COUNT + 1];
96 1.23 ad static size_t kmem_cache_max;
97 1.23 ad static size_t kmem_cache_min;
98 1.23 ad static size_t kmem_cache_mask;
99 1.23 ad static int kmem_cache_shift;
100 1.23 ad
101 1.4 yamt #if defined(DEBUG)
102 1.27 ad int kmem_guard_depth;
103 1.27 ad size_t kmem_guard_size;
104 1.27 ad static struct uvm_kmguard kmem_guard;
105 1.13 ad static void *kmem_freecheck;
106 1.19 yamt #define KMEM_POISON
107 1.19 yamt #define KMEM_REDZONE
108 1.23 ad #define KMEM_SIZE
109 1.27 ad #define KMEM_GUARD
110 1.19 yamt #endif /* defined(DEBUG) */
111 1.19 yamt
112 1.19 yamt #if defined(KMEM_POISON)
113 1.4 yamt static void kmem_poison_fill(void *, size_t);
114 1.4 yamt static void kmem_poison_check(void *, size_t);
115 1.19 yamt #else /* defined(KMEM_POISON) */
116 1.4 yamt #define kmem_poison_fill(p, sz) /* nothing */
117 1.4 yamt #define kmem_poison_check(p, sz) /* nothing */
118 1.19 yamt #endif /* defined(KMEM_POISON) */
119 1.19 yamt
120 1.19 yamt #if defined(KMEM_REDZONE)
121 1.19 yamt #define REDZONE_SIZE 1
122 1.19 yamt #else /* defined(KMEM_REDZONE) */
123 1.19 yamt #define REDZONE_SIZE 0
124 1.19 yamt #endif /* defined(KMEM_REDZONE) */
125 1.4 yamt
126 1.23 ad #if defined(KMEM_SIZE)
127 1.25 ad #define SIZE_SIZE (max(KMEM_QUANTUM_SIZE, sizeof(size_t)))
128 1.23 ad static void kmem_size_set(void *, size_t);
129 1.30 yamt static void kmem_size_check(const void *, size_t);
130 1.23 ad #else
131 1.23 ad #define SIZE_SIZE 0
132 1.23 ad #define kmem_size_set(p, sz) /* nothing */
133 1.23 ad #define kmem_size_check(p, sz) /* nothing */
134 1.23 ad #endif
135 1.23 ad
136 1.1 yamt static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *,
137 1.1 yamt vm_flag_t);
138 1.1 yamt static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t);
139 1.6 yamt static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
140 1.1 yamt
141 1.1 yamt static inline vm_flag_t
142 1.1 yamt kmf_to_vmf(km_flag_t kmflags)
143 1.1 yamt {
144 1.1 yamt vm_flag_t vmflags;
145 1.1 yamt
146 1.1 yamt KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
147 1.1 yamt KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
148 1.1 yamt
149 1.1 yamt vmflags = 0;
150 1.1 yamt if ((kmflags & KM_SLEEP) != 0) {
151 1.1 yamt vmflags |= VM_SLEEP;
152 1.1 yamt }
153 1.1 yamt if ((kmflags & KM_NOSLEEP) != 0) {
154 1.1 yamt vmflags |= VM_NOSLEEP;
155 1.1 yamt }
156 1.1 yamt
157 1.1 yamt return vmflags;
158 1.1 yamt }
159 1.1 yamt
160 1.23 ad static void *
161 1.23 ad kmem_poolpage_alloc(struct pool *pool, int prflags)
162 1.23 ad {
163 1.23 ad
164 1.23 ad KASSERT(KM_SLEEP == PR_WAITOK);
165 1.23 ad KASSERT(KM_NOSLEEP == PR_NOWAIT);
166 1.23 ad
167 1.23 ad return (void *)vmem_alloc(kmem_arena, pool->pr_alloc->pa_pagesz,
168 1.23 ad kmf_to_vmf(prflags) | VM_INSTANTFIT);
169 1.23 ad
170 1.23 ad }
171 1.23 ad
172 1.23 ad static void
173 1.23 ad kmem_poolpage_free(struct pool *pool, void *addr)
174 1.23 ad {
175 1.23 ad
176 1.23 ad vmem_free(kmem_arena, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
177 1.23 ad }
178 1.23 ad
179 1.1 yamt /* ---- kmem API */
180 1.1 yamt
181 1.1 yamt /*
182 1.1 yamt * kmem_alloc: allocate wired memory.
183 1.1 yamt *
184 1.1 yamt * => must not be called from interrupt context.
185 1.1 yamt */
186 1.1 yamt
187 1.1 yamt void *
188 1.1 yamt kmem_alloc(size_t size, km_flag_t kmflags)
189 1.1 yamt {
190 1.23 ad kmem_cache_t *kc;
191 1.23 ad uint8_t *p;
192 1.23 ad
193 1.23 ad KASSERT(!cpu_intr_p());
194 1.27 ad KASSERT(!cpu_softintr_p());
195 1.27 ad KASSERT(size > 0);
196 1.27 ad
197 1.27 ad #ifdef KMEM_GUARD
198 1.27 ad if (size <= kmem_guard_size) {
199 1.27 ad return uvm_kmguard_alloc(&kmem_guard, size,
200 1.27 ad (kmflags & KM_SLEEP) != 0);
201 1.27 ad }
202 1.27 ad #endif
203 1.1 yamt
204 1.23 ad size += REDZONE_SIZE + SIZE_SIZE;
205 1.23 ad if (size >= kmem_cache_min && size <= kmem_cache_max) {
206 1.23 ad kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift];
207 1.23 ad KASSERT(size <= kc->kc_pa.pa_pagesz);
208 1.23 ad KASSERT(KM_SLEEP == PR_WAITOK);
209 1.23 ad KASSERT(KM_NOSLEEP == PR_NOWAIT);
210 1.23 ad kmflags &= (KM_SLEEP | KM_NOSLEEP);
211 1.23 ad p = pool_cache_get(kc->kc_cache, kmflags);
212 1.23 ad } else {
213 1.23 ad p = (void *)vmem_alloc(kmem_arena, size,
214 1.23 ad kmf_to_vmf(kmflags) | VM_INSTANTFIT);
215 1.23 ad }
216 1.23 ad if (__predict_true(p != NULL)) {
217 1.22 ad kmem_poison_check(p, kmem_roundup_size(size));
218 1.22 ad FREECHECK_OUT(&kmem_freecheck, p);
219 1.23 ad kmem_size_set(p, size);
220 1.23 ad p = (uint8_t *)p + SIZE_SIZE;
221 1.12 yamt }
222 1.22 ad return p;
223 1.1 yamt }
224 1.1 yamt
225 1.1 yamt /*
226 1.2 yamt * kmem_zalloc: allocate wired memory.
227 1.2 yamt *
228 1.2 yamt * => must not be called from interrupt context.
229 1.2 yamt */
230 1.2 yamt
231 1.2 yamt void *
232 1.2 yamt kmem_zalloc(size_t size, km_flag_t kmflags)
233 1.2 yamt {
234 1.2 yamt void *p;
235 1.2 yamt
236 1.2 yamt p = kmem_alloc(size, kmflags);
237 1.2 yamt if (p != NULL) {
238 1.2 yamt memset(p, 0, size);
239 1.2 yamt }
240 1.2 yamt return p;
241 1.2 yamt }
242 1.2 yamt
243 1.2 yamt /*
244 1.1 yamt * kmem_free: free wired memory allocated by kmem_alloc.
245 1.1 yamt *
246 1.1 yamt * => must not be called from interrupt context.
247 1.1 yamt */
248 1.1 yamt
249 1.1 yamt void
250 1.1 yamt kmem_free(void *p, size_t size)
251 1.1 yamt {
252 1.23 ad kmem_cache_t *kc;
253 1.23 ad
254 1.23 ad KASSERT(!cpu_intr_p());
255 1.27 ad KASSERT(!cpu_softintr_p());
256 1.28 jnemeth KASSERT(p != NULL);
257 1.27 ad KASSERT(size > 0);
258 1.23 ad
259 1.27 ad #ifdef KMEM_GUARD
260 1.27 ad if (size <= kmem_guard_size) {
261 1.27 ad uvm_kmguard_free(&kmem_guard, size, p);
262 1.27 ad return;
263 1.27 ad }
264 1.27 ad #endif
265 1.29 yamt size += SIZE_SIZE;
266 1.29 yamt p = (uint8_t *)p - SIZE_SIZE;
267 1.29 yamt kmem_size_check(p, size + REDZONE_SIZE);
268 1.13 ad FREECHECK_IN(&kmem_freecheck, p);
269 1.17 ad LOCKDEBUG_MEM_CHECK(p, size);
270 1.19 yamt kmem_poison_check((char *)p + size,
271 1.19 yamt kmem_roundup_size(size + REDZONE_SIZE) - size);
272 1.4 yamt kmem_poison_fill(p, size);
273 1.24 enami size += REDZONE_SIZE;
274 1.23 ad if (size >= kmem_cache_min && size <= kmem_cache_max) {
275 1.23 ad kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift];
276 1.23 ad KASSERT(size <= kc->kc_pa.pa_pagesz);
277 1.23 ad pool_cache_put(kc->kc_cache, p);
278 1.23 ad } else {
279 1.24 enami vmem_free(kmem_arena, (vmem_addr_t)p, size);
280 1.23 ad }
281 1.1 yamt }
282 1.1 yamt
283 1.23 ad
284 1.1 yamt void
285 1.1 yamt kmem_init(void)
286 1.1 yamt {
287 1.23 ad kmem_cache_t *kc;
288 1.23 ad size_t sz;
289 1.23 ad int i;
290 1.1 yamt
291 1.27 ad #ifdef KMEM_GUARD
292 1.27 ad uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
293 1.27 ad kernel_map);
294 1.27 ad #endif
295 1.27 ad
296 1.1 yamt kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
297 1.23 ad kmem_backend_alloc, kmem_backend_free, NULL, KMEM_QCACHE_MAX,
298 1.23 ad VM_SLEEP, IPL_NONE);
299 1.6 yamt callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
300 1.6 yamt &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
301 1.23 ad
302 1.23 ad /*
303 1.23 ad * kmem caches start at twice the size of the largest vmem qcache
304 1.23 ad * and end at PAGE_SIZE or earlier. assert that KMEM_QCACHE_MAX
305 1.23 ad * is a power of two.
306 1.23 ad */
307 1.23 ad KASSERT(ffs(KMEM_QCACHE_MAX) != 0);
308 1.23 ad KASSERT(KMEM_QCACHE_MAX - (1 << (ffs(KMEM_QCACHE_MAX) - 1)) == 0);
309 1.23 ad kmem_cache_shift = ffs(KMEM_QCACHE_MAX);
310 1.23 ad kmem_cache_min = 1 << kmem_cache_shift;
311 1.23 ad kmem_cache_mask = kmem_cache_min - 1;
312 1.23 ad for (i = 1; i <= KMEM_CACHE_COUNT; i++) {
313 1.23 ad sz = i << kmem_cache_shift;
314 1.23 ad if (sz > PAGE_SIZE) {
315 1.23 ad break;
316 1.23 ad }
317 1.23 ad kmem_cache_max = sz;
318 1.23 ad kc = &kmem_cache[i];
319 1.23 ad kc->kc_pa.pa_pagesz = sz;
320 1.23 ad kc->kc_pa.pa_alloc = kmem_poolpage_alloc;
321 1.23 ad kc->kc_pa.pa_free = kmem_poolpage_free;
322 1.26 yamt sprintf(kc->kc_name, "kmem-%zu", sz);
323 1.23 ad kc->kc_cache = pool_cache_init(sz,
324 1.23 ad KMEM_QUANTUM_SIZE, 0, PR_NOALIGN | PR_NOTOUCH,
325 1.23 ad kc->kc_name, &kc->kc_pa, IPL_NONE,
326 1.23 ad NULL, NULL, NULL);
327 1.23 ad KASSERT(kc->kc_cache != NULL);
328 1.23 ad }
329 1.1 yamt }
330 1.1 yamt
331 1.1 yamt size_t
332 1.1 yamt kmem_roundup_size(size_t size)
333 1.1 yamt {
334 1.1 yamt
335 1.1 yamt return vmem_roundup_size(kmem_arena, size);
336 1.1 yamt }
337 1.1 yamt
338 1.1 yamt /* ---- uvm glue */
339 1.1 yamt
340 1.1 yamt static vmem_addr_t
341 1.11 yamt kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
342 1.11 yamt vm_flag_t vmflags)
343 1.1 yamt {
344 1.1 yamt uvm_flag_t uflags;
345 1.4 yamt vaddr_t va;
346 1.1 yamt
347 1.1 yamt KASSERT(dummy == NULL);
348 1.1 yamt KASSERT(size != 0);
349 1.1 yamt KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
350 1.1 yamt KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
351 1.1 yamt
352 1.1 yamt if ((vmflags & VM_NOSLEEP) != 0) {
353 1.1 yamt uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
354 1.1 yamt } else {
355 1.1 yamt uflags = UVM_KMF_WAITVA;
356 1.1 yamt }
357 1.1 yamt *resultsize = size = round_page(size);
358 1.4 yamt va = uvm_km_alloc(kernel_map, size, 0,
359 1.1 yamt uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
360 1.14 yamt if (va != 0) {
361 1.14 yamt kmem_poison_fill((void *)va, size);
362 1.14 yamt }
363 1.22 ad return (vmem_addr_t)va;
364 1.1 yamt }
365 1.1 yamt
366 1.1 yamt static void
367 1.11 yamt kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size)
368 1.1 yamt {
369 1.1 yamt
370 1.1 yamt KASSERT(dummy == NULL);
371 1.1 yamt KASSERT(addr != 0);
372 1.1 yamt KASSERT(size != 0);
373 1.1 yamt KASSERT(size == round_page(size));
374 1.1 yamt
375 1.4 yamt kmem_poison_check((void *)addr, size);
376 1.1 yamt uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
377 1.1 yamt }
378 1.4 yamt
379 1.7 yamt static int
380 1.11 yamt kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
381 1.7 yamt {
382 1.7 yamt vmem_t *vm = obj;
383 1.7 yamt
384 1.7 yamt vmem_reap(vm);
385 1.7 yamt return CALLBACK_CHAIN_CONTINUE;
386 1.7 yamt }
387 1.7 yamt
388 1.4 yamt /* ---- debug */
389 1.4 yamt
390 1.19 yamt #if defined(KMEM_POISON)
391 1.4 yamt
392 1.4 yamt #if defined(_LP64)
393 1.4 yamt #define PRIME 0x9e37fffffffc0001UL
394 1.4 yamt #else /* defined(_LP64) */
395 1.4 yamt #define PRIME 0x9e3779b1
396 1.4 yamt #endif /* defined(_LP64) */
397 1.4 yamt
398 1.4 yamt static inline uint8_t
399 1.4 yamt kmem_poison_pattern(const void *p)
400 1.4 yamt {
401 1.4 yamt
402 1.4 yamt return (uint8_t)((((uintptr_t)p) * PRIME)
403 1.4 yamt >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
404 1.4 yamt }
405 1.4 yamt
406 1.4 yamt static void
407 1.4 yamt kmem_poison_fill(void *p, size_t sz)
408 1.4 yamt {
409 1.4 yamt uint8_t *cp;
410 1.4 yamt const uint8_t *ep;
411 1.4 yamt
412 1.4 yamt cp = p;
413 1.4 yamt ep = cp + sz;
414 1.4 yamt while (cp < ep) {
415 1.4 yamt *cp = kmem_poison_pattern(cp);
416 1.4 yamt cp++;
417 1.4 yamt }
418 1.4 yamt }
419 1.4 yamt
420 1.4 yamt static void
421 1.4 yamt kmem_poison_check(void *p, size_t sz)
422 1.4 yamt {
423 1.4 yamt uint8_t *cp;
424 1.4 yamt const uint8_t *ep;
425 1.4 yamt
426 1.4 yamt cp = p;
427 1.4 yamt ep = cp + sz;
428 1.4 yamt while (cp < ep) {
429 1.4 yamt const uint8_t expected = kmem_poison_pattern(cp);
430 1.4 yamt
431 1.4 yamt if (*cp != expected) {
432 1.4 yamt panic("%s: %p: 0x%02x != 0x%02x\n",
433 1.4 yamt __func__, cp, *cp, expected);
434 1.4 yamt }
435 1.4 yamt cp++;
436 1.4 yamt }
437 1.4 yamt }
438 1.4 yamt
439 1.19 yamt #endif /* defined(KMEM_POISON) */
440 1.23 ad
441 1.23 ad #if defined(KMEM_SIZE)
442 1.23 ad static void
443 1.23 ad kmem_size_set(void *p, size_t sz)
444 1.23 ad {
445 1.23 ad
446 1.23 ad memcpy(p, &sz, sizeof(sz));
447 1.23 ad }
448 1.23 ad
449 1.23 ad static void
450 1.30 yamt kmem_size_check(const void *p, size_t sz)
451 1.23 ad {
452 1.23 ad size_t psz;
453 1.23 ad
454 1.23 ad memcpy(&psz, p, sizeof(psz));
455 1.23 ad if (psz != sz) {
456 1.23 ad panic("kmem_free(%p, %zu) != allocated size %zu",
457 1.30 yamt (const uint8_t *)p + SIZE_SIZE, sz - SIZE_SIZE, psz);
458 1.23 ad }
459 1.23 ad }
460 1.23 ad #endif /* defined(KMEM_SIZE) */
461