subr_kmem.c revision 1.11.4.6 1 /* $NetBSD: subr_kmem.c,v 1.11.4.6 2008/01/21 09:46:18 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * allocator of kernel wired memory.
31 *
32 * TODO:
33 * - worth to have "intrsafe" version? maybe..
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.11.4.6 2008/01/21 09:46:18 yamt Exp $");
38
39 #include <sys/param.h>
40 #include <sys/callback.h>
41 #include <sys/kmem.h>
42 #include <sys/vmem.h>
43 #include <sys/debug.h>
44 #include <sys/lockdebug.h>
45
46 #include <uvm/uvm_extern.h>
47 #include <uvm/uvm_map.h>
48
49 #include <lib/libkern/libkern.h>
50
51 #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1)
52
53 static vmem_t *kmem_arena;
54 static struct callback_entry kmem_kva_reclaim_entry;
55
56 #if defined(DEBUG)
57 static void *kmem_freecheck;
58 static void kmem_poison_fill(void *, size_t);
59 static void kmem_poison_check(void *, size_t);
60 #else /* defined(DEBUG) */
61 #define kmem_poison_fill(p, sz) /* nothing */
62 #define kmem_poison_check(p, sz) /* nothing */
63 #endif /* defined(DEBUG) */
64
65 static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *,
66 vm_flag_t);
67 static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t);
68 static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
69
70 static inline vm_flag_t
71 kmf_to_vmf(km_flag_t kmflags)
72 {
73 vm_flag_t vmflags;
74
75 KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
76 KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
77
78 vmflags = 0;
79 if ((kmflags & KM_SLEEP) != 0) {
80 vmflags |= VM_SLEEP;
81 }
82 if ((kmflags & KM_NOSLEEP) != 0) {
83 vmflags |= VM_NOSLEEP;
84 }
85
86 return vmflags;
87 }
88
89 /* ---- kmem API */
90
91 /*
92 * kmem_alloc: allocate wired memory.
93 *
94 * => must not be called from interrupt context.
95 */
96
97 void *
98 kmem_alloc(size_t size, km_flag_t kmflags)
99 {
100 void *p;
101
102 p = (void *)vmem_alloc(kmem_arena, size,
103 kmf_to_vmf(kmflags) | VM_INSTANTFIT);
104 if (p != NULL) {
105 kmem_poison_check(p, kmem_roundup_size(size));
106 FREECHECK_OUT(&kmem_freecheck, p);
107 }
108 return p;
109 }
110
111 /*
112 * kmem_zalloc: allocate wired memory.
113 *
114 * => must not be called from interrupt context.
115 */
116
117 void *
118 kmem_zalloc(size_t size, km_flag_t kmflags)
119 {
120 void *p;
121
122 p = kmem_alloc(size, kmflags);
123 if (p != NULL) {
124 memset(p, 0, size);
125 }
126 return p;
127 }
128
129 /*
130 * kmem_free: free wired memory allocated by kmem_alloc.
131 *
132 * => must not be called from interrupt context.
133 */
134
135 void
136 kmem_free(void *p, size_t size)
137 {
138
139 FREECHECK_IN(&kmem_freecheck, p);
140 LOCKDEBUG_MEM_CHECK(p, size);
141 kmem_poison_check((char *)p + size, kmem_roundup_size(size) - size);
142 kmem_poison_fill(p, size);
143 vmem_free(kmem_arena, (vmem_addr_t)p, size);
144 }
145
146 void
147 kmem_init(void)
148 {
149
150 kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
151 kmem_backend_alloc, kmem_backend_free, NULL,
152 KMEM_QUANTUM_SIZE * 32, VM_SLEEP, IPL_NONE);
153 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
154 &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
155 }
156
157 size_t
158 kmem_roundup_size(size_t size)
159 {
160
161 return vmem_roundup_size(kmem_arena, size);
162 }
163
164 /* ---- uvm glue */
165
166 static vmem_addr_t
167 kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
168 vm_flag_t vmflags)
169 {
170 uvm_flag_t uflags;
171 vaddr_t va;
172
173 KASSERT(dummy == NULL);
174 KASSERT(size != 0);
175 KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
176 KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
177
178 if ((vmflags & VM_NOSLEEP) != 0) {
179 uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
180 } else {
181 uflags = UVM_KMF_WAITVA;
182 }
183 *resultsize = size = round_page(size);
184 va = uvm_km_alloc(kernel_map, size, 0,
185 uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
186 if (va != 0) {
187 kmem_poison_fill((void *)va, size);
188 }
189 return (vmem_addr_t)va;
190 }
191
192 static void
193 kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size)
194 {
195
196 KASSERT(dummy == NULL);
197 KASSERT(addr != 0);
198 KASSERT(size != 0);
199 KASSERT(size == round_page(size));
200
201 kmem_poison_check((void *)addr, size);
202 uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
203 }
204
205 static int
206 kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
207 {
208 vmem_t *vm = obj;
209
210 vmem_reap(vm);
211 return CALLBACK_CHAIN_CONTINUE;
212 }
213
214 /* ---- debug */
215
216 #if defined(DEBUG)
217
218 #if defined(_LP64)
219 #define PRIME 0x9e37fffffffc0001UL
220 #else /* defined(_LP64) */
221 #define PRIME 0x9e3779b1
222 #endif /* defined(_LP64) */
223
224 static inline uint8_t
225 kmem_poison_pattern(const void *p)
226 {
227
228 return (uint8_t)((((uintptr_t)p) * PRIME)
229 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
230 }
231
232 static void
233 kmem_poison_fill(void *p, size_t sz)
234 {
235 uint8_t *cp;
236 const uint8_t *ep;
237
238 cp = p;
239 ep = cp + sz;
240 while (cp < ep) {
241 *cp = kmem_poison_pattern(cp);
242 cp++;
243 }
244 }
245
246 static void
247 kmem_poison_check(void *p, size_t sz)
248 {
249 uint8_t *cp;
250 const uint8_t *ep;
251
252 cp = p;
253 ep = cp + sz;
254 while (cp < ep) {
255 const uint8_t expected = kmem_poison_pattern(cp);
256
257 if (*cp != expected) {
258 panic("%s: %p: 0x%02x != 0x%02x\n",
259 __func__, cp, *cp, expected);
260 }
261 cp++;
262 }
263 }
264
265 #endif /* defined(DEBUG) */
266