subr_kmem.c revision 1.17.4.1 1 /* $NetBSD: subr_kmem.c,v 1.17.4.1 2007/12/10 12:56:10 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * allocator of kernel wired memory.
31 *
32 * TODO:
33 * - worth to have "intrsafe" version? maybe..
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.17.4.1 2007/12/10 12:56:10 yamt Exp $");
38
39 #include <sys/param.h>
40 #include <sys/callback.h>
41 #include <sys/kmem.h>
42 #include <sys/vmem.h>
43 #include <sys/debug.h>
44 #include <sys/lockdebug.h>
45
46 #include <uvm/uvm_extern.h>
47 #include <uvm/uvm_map.h>
48
49 #include <lib/libkern/libkern.h>
50
51 #define KMEM_QUANTUM_SIZE (ALIGNBYTES + 1)
52
53 static vmem_t *kmem_arena;
54 static struct callback_entry kmem_kva_reclaim_entry;
55
56 #if defined(DEBUG)
57 static void *kmem_freecheck;
58 static void kmem_poison_fill(void *, size_t);
59 static void kmem_poison_check(void *, size_t);
60 #else /* defined(DEBUG) */
61 #define kmem_poison_fill(p, sz) /* nothing */
62 #define kmem_poison_check(p, sz) /* nothing */
63 #endif /* defined(DEBUG) */
64
65 static vmem_addr_t kmem_backend_alloc(vmem_t *, vmem_size_t, vmem_size_t *,
66 vm_flag_t);
67 static void kmem_backend_free(vmem_t *, vmem_addr_t, vmem_size_t);
68 static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
69
70 static inline vm_flag_t
71 kmf_to_vmf(km_flag_t kmflags)
72 {
73 vm_flag_t vmflags;
74
75 KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
76 KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
77
78 vmflags = 0;
79 if ((kmflags & KM_SLEEP) != 0) {
80 vmflags |= VM_SLEEP;
81 }
82 if ((kmflags & KM_NOSLEEP) != 0) {
83 vmflags |= VM_NOSLEEP;
84 }
85
86 return vmflags;
87 }
88
89 /* ---- kmem API */
90
91 /*
92 * kmem_alloc: allocate wired memory.
93 *
94 * => must not be called from interrupt context.
95 */
96
97 void *
98 kmem_alloc(size_t size, km_flag_t kmflags)
99 {
100 void *p;
101
102 p = (void *)vmem_alloc(kmem_arena, size,
103 kmf_to_vmf(kmflags) | VM_INSTANTFIT);
104 if (p != NULL) {
105 kmem_poison_check(p, size);
106 FREECHECK_OUT(&kmem_freecheck, p);
107 }
108 #if 1
109 if (p == NULL && (kmflags & KM_SLEEP) != 0)
110 panic("kmem_alloc");
111 #endif
112 return p;
113 }
114
115 /*
116 * kmem_zalloc: allocate wired memory.
117 *
118 * => must not be called from interrupt context.
119 */
120
121 void *
122 kmem_zalloc(size_t size, km_flag_t kmflags)
123 {
124 void *p;
125
126 p = kmem_alloc(size, kmflags);
127 if (p != NULL) {
128 memset(p, 0, size);
129 }
130 return p;
131 }
132
133 /*
134 * kmem_free: free wired memory allocated by kmem_alloc.
135 *
136 * => must not be called from interrupt context.
137 */
138
139 void
140 kmem_free(void *p, size_t size)
141 {
142
143 FREECHECK_IN(&kmem_freecheck, p);
144 LOCKDEBUG_MEM_CHECK(p, size);
145 kmem_poison_fill(p, size);
146 vmem_free(kmem_arena, (vmem_addr_t)p, size);
147 }
148
149 void
150 kmem_init(void)
151 {
152
153 kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
154 kmem_backend_alloc, kmem_backend_free, NULL,
155 KMEM_QUANTUM_SIZE * 32, VM_SLEEP|VMC_KMEM, IPL_VM);
156 callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
157 &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
158 }
159
160 size_t
161 kmem_roundup_size(size_t size)
162 {
163
164 return vmem_roundup_size(kmem_arena, size);
165 }
166
167 bool
168 kmem_running_p(void)
169 {
170
171 return kmem_arena != NULL;
172 }
173
174 /* ---- uvm glue */
175
176 static vmem_addr_t
177 kmem_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
178 vm_flag_t vmflags)
179 {
180 uvm_flag_t uflags;
181 vaddr_t va;
182
183 KASSERT(dummy == NULL);
184 KASSERT(size != 0);
185 KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
186 KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
187
188 if ((vmflags & VM_NOSLEEP) != 0) {
189 uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
190 } else {
191 uflags = UVM_KMF_WAITVA;
192 }
193 *resultsize = size = round_page(size);
194 va = uvm_km_alloc(kernel_map, size, 0,
195 uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
196 if (va != 0) {
197 kmem_poison_fill((void *)va, size);
198 }
199 return (vmem_addr_t)va;
200 }
201
202 static void
203 kmem_backend_free(vmem_t *dummy, vmem_addr_t addr, vmem_size_t size)
204 {
205
206 KASSERT(dummy == NULL);
207 KASSERT(addr != 0);
208 KASSERT(size != 0);
209 KASSERT(size == round_page(size));
210
211 kmem_poison_check((void *)addr, size);
212 uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
213 }
214
215 static int
216 kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
217 {
218 vmem_t *vm = obj;
219
220 vmem_reap(vm);
221 return CALLBACK_CHAIN_CONTINUE;
222 }
223
224 /* ---- debug */
225
226 #if defined(DEBUG)
227
228 #if defined(_LP64)
229 #define PRIME 0x9e37fffffffc0001UL
230 #else /* defined(_LP64) */
231 #define PRIME 0x9e3779b1
232 #endif /* defined(_LP64) */
233
234 static inline uint8_t
235 kmem_poison_pattern(const void *p)
236 {
237
238 return (uint8_t)((((uintptr_t)p) * PRIME)
239 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
240 }
241
242 static void
243 kmem_poison_fill(void *p, size_t sz)
244 {
245 uint8_t *cp;
246 const uint8_t *ep;
247
248 cp = p;
249 ep = cp + sz;
250 while (cp < ep) {
251 *cp = kmem_poison_pattern(cp);
252 cp++;
253 }
254 }
255
256 static void
257 kmem_poison_check(void *p, size_t sz)
258 {
259 uint8_t *cp;
260 const uint8_t *ep;
261
262 cp = p;
263 ep = cp + sz;
264 while (cp < ep) {
265 const uint8_t expected = kmem_poison_pattern(cp);
266
267 if (*cp != expected) {
268 panic("%s: %p: 0x%02x != 0x%02x\n",
269 __func__, cp, *cp, expected);
270 }
271 cp++;
272 }
273 }
274
275 #endif /* defined(DEBUG) */
276