subr_vmem.c revision 1.92.6.2 1 1.92.6.2 skrll /* $NetBSD: subr_vmem.c,v 1.92.6.2 2016/03/19 11:30:31 skrll Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.55 yamt * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * reference:
31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator
32 1.1 yamt * to Many CPUs and Arbitrary Resources
33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html
34 1.88 para *
35 1.88 para * locking & the boundary tag pool:
36 1.88 para * - A pool(9) is used for vmem boundary tags
37 1.88 para * - During a pool get call the global vmem_btag_refill_lock is taken,
38 1.88 para * to serialize access to the allocation reserve, but no other
39 1.88 para * vmem arena locks.
40 1.88 para * - During pool_put calls no vmem mutexes are locked.
41 1.88 para * - pool_drain doesn't hold the pool's mutex while releasing memory to
42 1.88 para * its backing therefore no interferance with any vmem mutexes.
43 1.88 para * - The boundary tag pool is forced to put page headers into pool pages
44 1.88 para * (PR_PHINPAGE) and not off page to avoid pool recursion.
45 1.88 para * (due to sizeof(bt_t) it should be the case anyway)
46 1.1 yamt */
47 1.1 yamt
48 1.1 yamt #include <sys/cdefs.h>
49 1.92.6.2 skrll __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.92.6.2 2016/03/19 11:30:31 skrll Exp $");
50 1.92.6.2 skrll
51 1.1 yamt
52 1.92.6.1 skrll #if defined(_KERNEL) && defined(_KERNEL_OPT)
53 1.37 yamt #include "opt_ddb.h"
54 1.92.6.1 skrll #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
55 1.1 yamt
56 1.1 yamt #include <sys/param.h>
57 1.1 yamt #include <sys/hash.h>
58 1.1 yamt #include <sys/queue.h>
59 1.62 rmind #include <sys/bitops.h>
60 1.1 yamt
61 1.1 yamt #if defined(_KERNEL)
62 1.1 yamt #include <sys/systm.h>
63 1.30 yamt #include <sys/kernel.h> /* hz */
64 1.30 yamt #include <sys/callout.h>
65 1.66 para #include <sys/kmem.h>
66 1.1 yamt #include <sys/pool.h>
67 1.1 yamt #include <sys/vmem.h>
68 1.80 para #include <sys/vmem_impl.h>
69 1.30 yamt #include <sys/workqueue.h>
70 1.66 para #include <sys/atomic.h>
71 1.66 para #include <uvm/uvm.h>
72 1.66 para #include <uvm/uvm_extern.h>
73 1.66 para #include <uvm/uvm_km.h>
74 1.66 para #include <uvm/uvm_page.h>
75 1.66 para #include <uvm/uvm_pdaemon.h>
76 1.1 yamt #else /* defined(_KERNEL) */
77 1.80 para #include <stdio.h>
78 1.80 para #include <errno.h>
79 1.80 para #include <assert.h>
80 1.80 para #include <stdlib.h>
81 1.80 para #include <string.h>
82 1.1 yamt #include "../sys/vmem.h"
83 1.80 para #include "../sys/vmem_impl.h"
84 1.1 yamt #endif /* defined(_KERNEL) */
85 1.1 yamt
86 1.66 para
87 1.1 yamt #if defined(_KERNEL)
88 1.66 para #include <sys/evcnt.h>
89 1.66 para #define VMEM_EVCNT_DEFINE(name) \
90 1.66 para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
91 1.88 para "vmem", #name); \
92 1.66 para EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
93 1.66 para #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++
94 1.66 para #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count--
95 1.66 para
96 1.88 para VMEM_EVCNT_DEFINE(static_bt_count)
97 1.88 para VMEM_EVCNT_DEFINE(static_bt_inuse)
98 1.66 para
99 1.80 para #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
100 1.80 para #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
101 1.80 para #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
102 1.80 para #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
103 1.66 para
104 1.1 yamt #else /* defined(_KERNEL) */
105 1.1 yamt
106 1.66 para #define VMEM_EVCNT_INCR(ev) /* nothing */
107 1.66 para #define VMEM_EVCNT_DECR(ev) /* nothing */
108 1.66 para
109 1.80 para #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
110 1.80 para #define VMEM_CONDVAR_DESTROY(vm) /* nothing */
111 1.80 para #define VMEM_CONDVAR_WAIT(vm) /* nothing */
112 1.80 para #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
113 1.80 para
114 1.79 para #define UNITTEST
115 1.79 para #define KASSERT(a) assert(a)
116 1.31 ad #define mutex_init(a, b, c) /* nothing */
117 1.31 ad #define mutex_destroy(a) /* nothing */
118 1.31 ad #define mutex_enter(a) /* nothing */
119 1.55 yamt #define mutex_tryenter(a) true
120 1.31 ad #define mutex_exit(a) /* nothing */
121 1.31 ad #define mutex_owned(a) /* nothing */
122 1.55 yamt #define ASSERT_SLEEPABLE() /* nothing */
123 1.55 yamt #define panic(...) printf(__VA_ARGS__); abort()
124 1.1 yamt #endif /* defined(_KERNEL) */
125 1.1 yamt
126 1.55 yamt #if defined(VMEM_SANITY)
127 1.55 yamt static void vmem_check(vmem_t *);
128 1.55 yamt #else /* defined(VMEM_SANITY) */
129 1.55 yamt #define vmem_check(vm) /* nothing */
130 1.55 yamt #endif /* defined(VMEM_SANITY) */
131 1.1 yamt
132 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */
133 1.54 yamt #define VMEM_HASHSIZE_MAX 65536 /* XXX */
134 1.66 para #define VMEM_HASHSIZE_INIT 1
135 1.1 yamt
136 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
137 1.1 yamt
138 1.80 para #if defined(_KERNEL)
139 1.80 para static bool vmem_bootstrapped = false;
140 1.80 para static kmutex_t vmem_list_lock;
141 1.80 para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
142 1.80 para #endif /* defined(_KERNEL) */
143 1.79 para
144 1.80 para /* ---- misc */
145 1.1 yamt
146 1.31 ad #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
147 1.31 ad #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
148 1.31 ad #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
149 1.36 ad #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
150 1.31 ad #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
151 1.31 ad #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
152 1.1 yamt
153 1.19 yamt #define VMEM_ALIGNUP(addr, align) \
154 1.19 yamt (-(-(addr) & -(align)))
155 1.62 rmind
156 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \
157 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0)
158 1.19 yamt
159 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
160 1.62 rmind #define SIZE2ORDER(size) ((int)ilog2(size))
161 1.4 yamt
162 1.62 rmind #if !defined(_KERNEL)
163 1.62 rmind #define xmalloc(sz, flags) malloc(sz)
164 1.67 rmind #define xfree(p, sz) free(p)
165 1.62 rmind #define bt_alloc(vm, flags) malloc(sizeof(bt_t))
166 1.62 rmind #define bt_free(vm, bt) free(bt)
167 1.66 para #else /* defined(_KERNEL) */
168 1.1 yamt
169 1.67 rmind #define xmalloc(sz, flags) \
170 1.80 para kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
171 1.80 para #define xfree(p, sz) kmem_free(p, sz);
172 1.66 para
173 1.75 para /*
174 1.75 para * BT_RESERVE calculation:
175 1.75 para * we allocate memory for boundry tags with vmem, therefor we have
176 1.75 para * to keep a reserve of bts used to allocated memory for bts.
177 1.75 para * This reserve is 4 for each arena involved in allocating vmems memory.
178 1.75 para * BT_MAXFREE: don't cache excessive counts of bts in arenas
179 1.75 para */
180 1.75 para #define STATIC_BT_COUNT 200
181 1.75 para #define BT_MINRESERVE 4
182 1.66 para #define BT_MAXFREE 64
183 1.66 para
184 1.66 para static struct vmem_btag static_bts[STATIC_BT_COUNT];
185 1.66 para static int static_bt_count = STATIC_BT_COUNT;
186 1.66 para
187 1.80 para static struct vmem kmem_va_meta_arena_store;
188 1.66 para vmem_t *kmem_va_meta_arena;
189 1.80 para static struct vmem kmem_meta_arena_store;
190 1.88 para vmem_t *kmem_meta_arena = NULL;
191 1.66 para
192 1.88 para static kmutex_t vmem_btag_refill_lock;
193 1.66 para static kmutex_t vmem_btag_lock;
194 1.66 para static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
195 1.66 para static size_t vmem_btag_freelist_count = 0;
196 1.88 para static struct pool vmem_btag_pool;
197 1.66 para
198 1.92.6.2 skrll static void
199 1.92.6.2 skrll vmem_kick_pdaemon(void)
200 1.92.6.2 skrll {
201 1.92.6.2 skrll #if defined(_KERNEL)
202 1.92.6.2 skrll mutex_spin_enter(&uvm_fpageqlock);
203 1.92.6.2 skrll uvm_kick_pdaemon();
204 1.92.6.2 skrll mutex_spin_exit(&uvm_fpageqlock);
205 1.92.6.2 skrll #endif
206 1.92.6.2 skrll }
207 1.92.6.2 skrll
208 1.1 yamt /* ---- boundary tag */
209 1.1 yamt
210 1.92.6.2 skrll static int bt_refill(vmem_t *vm);
211 1.66 para
212 1.88 para static void *
213 1.88 para pool_page_alloc_vmem_meta(struct pool *pp, int flags)
214 1.66 para {
215 1.88 para const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
216 1.66 para vmem_addr_t va;
217 1.88 para int ret;
218 1.66 para
219 1.88 para ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
220 1.88 para (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
221 1.77 para
222 1.88 para return ret ? NULL : (void *)va;
223 1.88 para }
224 1.66 para
225 1.88 para static void
226 1.88 para pool_page_free_vmem_meta(struct pool *pp, void *v)
227 1.88 para {
228 1.66 para
229 1.88 para vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
230 1.88 para }
231 1.66 para
232 1.88 para /* allocator for vmem-pool metadata */
233 1.88 para struct pool_allocator pool_allocator_vmem_meta = {
234 1.88 para .pa_alloc = pool_page_alloc_vmem_meta,
235 1.88 para .pa_free = pool_page_free_vmem_meta,
236 1.88 para .pa_pagesz = 0
237 1.88 para };
238 1.66 para
239 1.66 para static int
240 1.92.6.2 skrll bt_refill(vmem_t *vm)
241 1.66 para {
242 1.66 para bt_t *bt;
243 1.66 para
244 1.88 para VMEM_LOCK(vm);
245 1.88 para if (vm->vm_nfreetags > BT_MINRESERVE) {
246 1.88 para VMEM_UNLOCK(vm);
247 1.88 para return 0;
248 1.77 para }
249 1.66 para
250 1.66 para mutex_enter(&vmem_btag_lock);
251 1.66 para while (!LIST_EMPTY(&vmem_btag_freelist) &&
252 1.75 para vm->vm_nfreetags <= BT_MINRESERVE) {
253 1.66 para bt = LIST_FIRST(&vmem_btag_freelist);
254 1.66 para LIST_REMOVE(bt, bt_freelist);
255 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
256 1.66 para vm->vm_nfreetags++;
257 1.66 para vmem_btag_freelist_count--;
258 1.88 para VMEM_EVCNT_INCR(static_bt_inuse);
259 1.66 para }
260 1.66 para mutex_exit(&vmem_btag_lock);
261 1.66 para
262 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE) {
263 1.88 para VMEM_UNLOCK(vm);
264 1.88 para mutex_enter(&vmem_btag_refill_lock);
265 1.91 para bt = pool_get(&vmem_btag_pool, PR_NOWAIT);
266 1.88 para mutex_exit(&vmem_btag_refill_lock);
267 1.88 para VMEM_LOCK(vm);
268 1.91 para if (bt == NULL)
269 1.88 para break;
270 1.88 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
271 1.88 para vm->vm_nfreetags++;
272 1.88 para }
273 1.88 para
274 1.92 para if (vm->vm_nfreetags <= BT_MINRESERVE) {
275 1.91 para VMEM_UNLOCK(vm);
276 1.66 para return ENOMEM;
277 1.66 para }
278 1.88 para
279 1.91 para VMEM_UNLOCK(vm);
280 1.88 para
281 1.88 para if (kmem_meta_arena != NULL) {
282 1.92.6.2 skrll (void)bt_refill(kmem_arena);
283 1.92.6.2 skrll (void)bt_refill(kmem_va_meta_arena);
284 1.92.6.2 skrll (void)bt_refill(kmem_meta_arena);
285 1.88 para }
286 1.66 para
287 1.66 para return 0;
288 1.66 para }
289 1.1 yamt
290 1.88 para static bt_t *
291 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
292 1.1 yamt {
293 1.66 para bt_t *bt;
294 1.66 para VMEM_LOCK(vm);
295 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
296 1.66 para VMEM_UNLOCK(vm);
297 1.92.6.2 skrll if (bt_refill(vm)) {
298 1.92.6.2 skrll if ((flags & VM_NOSLEEP) != 0) {
299 1.92.6.2 skrll return NULL;
300 1.92.6.2 skrll }
301 1.92.6.2 skrll
302 1.92.6.2 skrll /*
303 1.92.6.2 skrll * It would be nice to wait for something specific here
304 1.92.6.2 skrll * but there are multiple ways that a retry could
305 1.92.6.2 skrll * succeed and we can't wait for multiple things
306 1.92.6.2 skrll * simultaneously. So we'll just sleep for an arbitrary
307 1.92.6.2 skrll * short period of time and retry regardless.
308 1.92.6.2 skrll * This should be a very rare case.
309 1.92.6.2 skrll */
310 1.92.6.2 skrll
311 1.92.6.2 skrll vmem_kick_pdaemon();
312 1.92.6.2 skrll kpause("btalloc", false, 1, NULL);
313 1.66 para }
314 1.88 para VMEM_LOCK(vm);
315 1.66 para }
316 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
317 1.66 para LIST_REMOVE(bt, bt_freelist);
318 1.66 para vm->vm_nfreetags--;
319 1.66 para VMEM_UNLOCK(vm);
320 1.66 para
321 1.66 para return bt;
322 1.1 yamt }
323 1.1 yamt
324 1.88 para static void
325 1.17 yamt bt_free(vmem_t *vm, bt_t *bt)
326 1.1 yamt {
327 1.66 para
328 1.66 para VMEM_LOCK(vm);
329 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
330 1.66 para vm->vm_nfreetags++;
331 1.88 para VMEM_UNLOCK(vm);
332 1.88 para }
333 1.88 para
334 1.88 para static void
335 1.88 para bt_freetrim(vmem_t *vm, int freelimit)
336 1.88 para {
337 1.88 para bt_t *t;
338 1.88 para LIST_HEAD(, vmem_btag) tofree;
339 1.88 para
340 1.88 para LIST_INIT(&tofree);
341 1.88 para
342 1.88 para VMEM_LOCK(vm);
343 1.88 para while (vm->vm_nfreetags > freelimit) {
344 1.88 para bt_t *bt = LIST_FIRST(&vm->vm_freetags);
345 1.66 para LIST_REMOVE(bt, bt_freelist);
346 1.66 para vm->vm_nfreetags--;
347 1.88 para if (bt >= static_bts
348 1.90 mlelstv && bt < &static_bts[STATIC_BT_COUNT]) {
349 1.88 para mutex_enter(&vmem_btag_lock);
350 1.88 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
351 1.88 para vmem_btag_freelist_count++;
352 1.88 para mutex_exit(&vmem_btag_lock);
353 1.88 para VMEM_EVCNT_DECR(static_bt_inuse);
354 1.88 para } else {
355 1.88 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
356 1.88 para }
357 1.66 para }
358 1.88 para
359 1.66 para VMEM_UNLOCK(vm);
360 1.88 para while (!LIST_EMPTY(&tofree)) {
361 1.88 para t = LIST_FIRST(&tofree);
362 1.88 para LIST_REMOVE(t, bt_freelist);
363 1.88 para pool_put(&vmem_btag_pool, t);
364 1.88 para }
365 1.1 yamt }
366 1.67 rmind #endif /* defined(_KERNEL) */
367 1.62 rmind
368 1.1 yamt /*
369 1.67 rmind * freelist[0] ... [1, 1]
370 1.1 yamt * freelist[1] ... [2, 3]
371 1.1 yamt * freelist[2] ... [4, 7]
372 1.1 yamt * freelist[3] ... [8, 15]
373 1.1 yamt * :
374 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
375 1.1 yamt * :
376 1.1 yamt */
377 1.1 yamt
378 1.1 yamt static struct vmem_freelist *
379 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
380 1.1 yamt {
381 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
382 1.62 rmind const int idx = SIZE2ORDER(qsize);
383 1.1 yamt
384 1.62 rmind KASSERT(size != 0 && qsize != 0);
385 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
386 1.1 yamt KASSERT(idx >= 0);
387 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
388 1.1 yamt
389 1.1 yamt return &vm->vm_freelist[idx];
390 1.1 yamt }
391 1.1 yamt
392 1.59 yamt /*
393 1.59 yamt * bt_freehead_toalloc: return the freelist for the given size and allocation
394 1.59 yamt * strategy.
395 1.59 yamt *
396 1.59 yamt * for VM_INSTANTFIT, return the list in which any blocks are large enough
397 1.59 yamt * for the requested size. otherwise, return the list which can have blocks
398 1.59 yamt * large enough for the requested size.
399 1.59 yamt */
400 1.59 yamt
401 1.1 yamt static struct vmem_freelist *
402 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
403 1.1 yamt {
404 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
405 1.62 rmind int idx = SIZE2ORDER(qsize);
406 1.1 yamt
407 1.62 rmind KASSERT(size != 0 && qsize != 0);
408 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
409 1.1 yamt
410 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
411 1.1 yamt idx++;
412 1.1 yamt /* check too large request? */
413 1.1 yamt }
414 1.1 yamt KASSERT(idx >= 0);
415 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
416 1.1 yamt
417 1.1 yamt return &vm->vm_freelist[idx];
418 1.1 yamt }
419 1.1 yamt
420 1.1 yamt /* ---- boundary tag hash */
421 1.1 yamt
422 1.1 yamt static struct vmem_hashlist *
423 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
424 1.1 yamt {
425 1.1 yamt struct vmem_hashlist *list;
426 1.1 yamt unsigned int hash;
427 1.1 yamt
428 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
429 1.1 yamt list = &vm->vm_hashlist[hash % vm->vm_hashsize];
430 1.1 yamt
431 1.1 yamt return list;
432 1.1 yamt }
433 1.1 yamt
434 1.1 yamt static bt_t *
435 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
436 1.1 yamt {
437 1.1 yamt struct vmem_hashlist *list;
438 1.1 yamt bt_t *bt;
439 1.1 yamt
440 1.1 yamt list = bt_hashhead(vm, addr);
441 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) {
442 1.1 yamt if (bt->bt_start == addr) {
443 1.1 yamt break;
444 1.1 yamt }
445 1.1 yamt }
446 1.1 yamt
447 1.1 yamt return bt;
448 1.1 yamt }
449 1.1 yamt
450 1.1 yamt static void
451 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt)
452 1.1 yamt {
453 1.1 yamt
454 1.1 yamt KASSERT(vm->vm_nbusytag > 0);
455 1.73 para vm->vm_inuse -= bt->bt_size;
456 1.1 yamt vm->vm_nbusytag--;
457 1.1 yamt LIST_REMOVE(bt, bt_hashlist);
458 1.1 yamt }
459 1.1 yamt
460 1.1 yamt static void
461 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt)
462 1.1 yamt {
463 1.1 yamt struct vmem_hashlist *list;
464 1.1 yamt
465 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
466 1.1 yamt
467 1.1 yamt list = bt_hashhead(vm, bt->bt_start);
468 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist);
469 1.1 yamt vm->vm_nbusytag++;
470 1.73 para vm->vm_inuse += bt->bt_size;
471 1.1 yamt }
472 1.1 yamt
473 1.1 yamt /* ---- boundary tag list */
474 1.1 yamt
475 1.1 yamt static void
476 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt)
477 1.1 yamt {
478 1.1 yamt
479 1.87 christos TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
480 1.1 yamt }
481 1.1 yamt
482 1.1 yamt static void
483 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
484 1.1 yamt {
485 1.1 yamt
486 1.87 christos TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
487 1.1 yamt }
488 1.1 yamt
489 1.1 yamt static void
490 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
491 1.1 yamt {
492 1.1 yamt
493 1.87 christos TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
494 1.1 yamt }
495 1.1 yamt
496 1.1 yamt static void
497 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt)
498 1.1 yamt {
499 1.1 yamt
500 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
501 1.1 yamt
502 1.1 yamt LIST_REMOVE(bt, bt_freelist);
503 1.1 yamt }
504 1.1 yamt
505 1.1 yamt static void
506 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt)
507 1.1 yamt {
508 1.1 yamt struct vmem_freelist *list;
509 1.1 yamt
510 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size);
511 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist);
512 1.1 yamt }
513 1.1 yamt
514 1.1 yamt /* ---- vmem internal functions */
515 1.1 yamt
516 1.5 yamt #if defined(QCACHE)
517 1.5 yamt static inline vm_flag_t
518 1.5 yamt prf_to_vmf(int prflags)
519 1.5 yamt {
520 1.5 yamt vm_flag_t vmflags;
521 1.5 yamt
522 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
523 1.5 yamt if ((prflags & PR_WAITOK) != 0) {
524 1.5 yamt vmflags = VM_SLEEP;
525 1.5 yamt } else {
526 1.5 yamt vmflags = VM_NOSLEEP;
527 1.5 yamt }
528 1.5 yamt return vmflags;
529 1.5 yamt }
530 1.5 yamt
531 1.5 yamt static inline int
532 1.5 yamt vmf_to_prf(vm_flag_t vmflags)
533 1.5 yamt {
534 1.5 yamt int prflags;
535 1.5 yamt
536 1.7 yamt if ((vmflags & VM_SLEEP) != 0) {
537 1.5 yamt prflags = PR_WAITOK;
538 1.7 yamt } else {
539 1.5 yamt prflags = PR_NOWAIT;
540 1.5 yamt }
541 1.5 yamt return prflags;
542 1.5 yamt }
543 1.5 yamt
544 1.5 yamt static size_t
545 1.5 yamt qc_poolpage_size(size_t qcache_max)
546 1.5 yamt {
547 1.5 yamt int i;
548 1.5 yamt
549 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
550 1.5 yamt /* nothing */
551 1.5 yamt }
552 1.5 yamt return ORDER2SIZE(i);
553 1.5 yamt }
554 1.5 yamt
555 1.5 yamt static void *
556 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags)
557 1.5 yamt {
558 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
559 1.5 yamt vmem_t *vm = qc->qc_vmem;
560 1.61 dyoung vmem_addr_t addr;
561 1.5 yamt
562 1.61 dyoung if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
563 1.61 dyoung prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
564 1.61 dyoung return NULL;
565 1.61 dyoung return (void *)addr;
566 1.5 yamt }
567 1.5 yamt
568 1.5 yamt static void
569 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr)
570 1.5 yamt {
571 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
572 1.5 yamt vmem_t *vm = qc->qc_vmem;
573 1.5 yamt
574 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
575 1.5 yamt }
576 1.5 yamt
577 1.5 yamt static void
578 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
579 1.5 yamt {
580 1.22 yamt qcache_t *prevqc;
581 1.5 yamt struct pool_allocator *pa;
582 1.5 yamt int qcache_idx_max;
583 1.5 yamt int i;
584 1.5 yamt
585 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
586 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
587 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
588 1.5 yamt }
589 1.5 yamt vm->vm_qcache_max = qcache_max;
590 1.5 yamt pa = &vm->vm_qcache_allocator;
591 1.5 yamt memset(pa, 0, sizeof(*pa));
592 1.5 yamt pa->pa_alloc = qc_poolpage_alloc;
593 1.5 yamt pa->pa_free = qc_poolpage_free;
594 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max);
595 1.5 yamt
596 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
597 1.22 yamt prevqc = NULL;
598 1.22 yamt for (i = qcache_idx_max; i > 0; i--) {
599 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1];
600 1.5 yamt size_t size = i << vm->vm_quantum_shift;
601 1.66 para pool_cache_t pc;
602 1.5 yamt
603 1.5 yamt qc->qc_vmem = vm;
604 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
605 1.5 yamt vm->vm_name, size);
606 1.66 para
607 1.80 para pc = pool_cache_init(size,
608 1.80 para ORDER2SIZE(vm->vm_quantum_shift), 0,
609 1.80 para PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
610 1.80 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
611 1.80 para
612 1.80 para KASSERT(pc);
613 1.80 para
614 1.66 para qc->qc_cache = pc;
615 1.35 ad KASSERT(qc->qc_cache != NULL); /* XXX */
616 1.22 yamt if (prevqc != NULL &&
617 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage ==
618 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) {
619 1.80 para pool_cache_destroy(qc->qc_cache);
620 1.22 yamt vm->vm_qcache[i - 1] = prevqc;
621 1.27 ad continue;
622 1.22 yamt }
623 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc;
624 1.22 yamt vm->vm_qcache[i - 1] = qc;
625 1.22 yamt prevqc = qc;
626 1.5 yamt }
627 1.5 yamt }
628 1.6 yamt
629 1.23 yamt static void
630 1.23 yamt qc_destroy(vmem_t *vm)
631 1.23 yamt {
632 1.23 yamt const qcache_t *prevqc;
633 1.23 yamt int i;
634 1.23 yamt int qcache_idx_max;
635 1.23 yamt
636 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
637 1.23 yamt prevqc = NULL;
638 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
639 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
640 1.23 yamt
641 1.23 yamt if (prevqc == qc) {
642 1.23 yamt continue;
643 1.23 yamt }
644 1.80 para pool_cache_destroy(qc->qc_cache);
645 1.23 yamt prevqc = qc;
646 1.23 yamt }
647 1.23 yamt }
648 1.66 para #endif
649 1.23 yamt
650 1.66 para #if defined(_KERNEL)
651 1.80 para static void
652 1.66 para vmem_bootstrap(void)
653 1.6 yamt {
654 1.6 yamt
655 1.66 para mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
656 1.66 para mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
657 1.88 para mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
658 1.6 yamt
659 1.66 para while (static_bt_count-- > 0) {
660 1.66 para bt_t *bt = &static_bts[static_bt_count];
661 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
662 1.88 para VMEM_EVCNT_INCR(static_bt_count);
663 1.66 para vmem_btag_freelist_count++;
664 1.6 yamt }
665 1.80 para vmem_bootstrapped = TRUE;
666 1.6 yamt }
667 1.5 yamt
668 1.66 para void
669 1.80 para vmem_subsystem_init(vmem_t *vm)
670 1.1 yamt {
671 1.1 yamt
672 1.80 para kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
673 1.80 para 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
674 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
675 1.66 para IPL_VM);
676 1.66 para
677 1.80 para kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
678 1.80 para 0, 0, PAGE_SIZE,
679 1.66 para uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
680 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
681 1.88 para
682 1.88 para pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, PR_PHINPAGE,
683 1.88 para "vmembt", &pool_allocator_vmem_meta, IPL_VM);
684 1.1 yamt }
685 1.1 yamt #endif /* defined(_KERNEL) */
686 1.1 yamt
687 1.61 dyoung static int
688 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
689 1.1 yamt int spanbttype)
690 1.1 yamt {
691 1.1 yamt bt_t *btspan;
692 1.1 yamt bt_t *btfree;
693 1.1 yamt
694 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
695 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
696 1.58 yamt KASSERT(spanbttype == BT_TYPE_SPAN ||
697 1.58 yamt spanbttype == BT_TYPE_SPAN_STATIC);
698 1.1 yamt
699 1.1 yamt btspan = bt_alloc(vm, flags);
700 1.1 yamt if (btspan == NULL) {
701 1.61 dyoung return ENOMEM;
702 1.1 yamt }
703 1.1 yamt btfree = bt_alloc(vm, flags);
704 1.1 yamt if (btfree == NULL) {
705 1.1 yamt bt_free(vm, btspan);
706 1.61 dyoung return ENOMEM;
707 1.1 yamt }
708 1.1 yamt
709 1.1 yamt btspan->bt_type = spanbttype;
710 1.1 yamt btspan->bt_start = addr;
711 1.1 yamt btspan->bt_size = size;
712 1.1 yamt
713 1.1 yamt btfree->bt_type = BT_TYPE_FREE;
714 1.1 yamt btfree->bt_start = addr;
715 1.1 yamt btfree->bt_size = size;
716 1.1 yamt
717 1.1 yamt VMEM_LOCK(vm);
718 1.1 yamt bt_insseg_tail(vm, btspan);
719 1.1 yamt bt_insseg(vm, btfree, btspan);
720 1.1 yamt bt_insfree(vm, btfree);
721 1.66 para vm->vm_size += size;
722 1.1 yamt VMEM_UNLOCK(vm);
723 1.1 yamt
724 1.61 dyoung return 0;
725 1.1 yamt }
726 1.1 yamt
727 1.30 yamt static void
728 1.30 yamt vmem_destroy1(vmem_t *vm)
729 1.30 yamt {
730 1.30 yamt
731 1.30 yamt #if defined(QCACHE)
732 1.30 yamt qc_destroy(vm);
733 1.30 yamt #endif /* defined(QCACHE) */
734 1.30 yamt if (vm->vm_hashlist != NULL) {
735 1.30 yamt int i;
736 1.30 yamt
737 1.30 yamt for (i = 0; i < vm->vm_hashsize; i++) {
738 1.30 yamt bt_t *bt;
739 1.30 yamt
740 1.30 yamt while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
741 1.30 yamt KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
742 1.30 yamt bt_free(vm, bt);
743 1.30 yamt }
744 1.30 yamt }
745 1.66 para if (vm->vm_hashlist != &vm->vm_hash0) {
746 1.66 para xfree(vm->vm_hashlist,
747 1.66 para sizeof(struct vmem_hashlist *) * vm->vm_hashsize);
748 1.66 para }
749 1.66 para }
750 1.66 para
751 1.88 para bt_freetrim(vm, 0);
752 1.66 para
753 1.80 para VMEM_CONDVAR_DESTROY(vm);
754 1.31 ad VMEM_LOCK_DESTROY(vm);
755 1.66 para xfree(vm, sizeof(*vm));
756 1.30 yamt }
757 1.30 yamt
758 1.1 yamt static int
759 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
760 1.1 yamt {
761 1.1 yamt vmem_addr_t addr;
762 1.61 dyoung int rc;
763 1.1 yamt
764 1.61 dyoung if (vm->vm_importfn == NULL) {
765 1.1 yamt return EINVAL;
766 1.1 yamt }
767 1.1 yamt
768 1.66 para if (vm->vm_flags & VM_LARGEIMPORT) {
769 1.80 para size *= 16;
770 1.66 para }
771 1.66 para
772 1.66 para if (vm->vm_flags & VM_XIMPORT) {
773 1.66 para rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size,
774 1.66 para &size, flags, &addr);
775 1.66 para } else {
776 1.66 para rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
777 1.69 rmind }
778 1.69 rmind if (rc) {
779 1.69 rmind return ENOMEM;
780 1.1 yamt }
781 1.1 yamt
782 1.61 dyoung if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
783 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, addr, size);
784 1.1 yamt return ENOMEM;
785 1.1 yamt }
786 1.1 yamt
787 1.1 yamt return 0;
788 1.1 yamt }
789 1.1 yamt
790 1.1 yamt static int
791 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
792 1.1 yamt {
793 1.1 yamt bt_t *bt;
794 1.1 yamt int i;
795 1.1 yamt struct vmem_hashlist *newhashlist;
796 1.1 yamt struct vmem_hashlist *oldhashlist;
797 1.1 yamt size_t oldhashsize;
798 1.1 yamt
799 1.1 yamt KASSERT(newhashsize > 0);
800 1.1 yamt
801 1.1 yamt newhashlist =
802 1.1 yamt xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
803 1.1 yamt if (newhashlist == NULL) {
804 1.1 yamt return ENOMEM;
805 1.1 yamt }
806 1.1 yamt for (i = 0; i < newhashsize; i++) {
807 1.1 yamt LIST_INIT(&newhashlist[i]);
808 1.1 yamt }
809 1.1 yamt
810 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
811 1.66 para xfree(newhashlist,
812 1.66 para sizeof(struct vmem_hashlist *) * newhashsize);
813 1.30 yamt return EBUSY;
814 1.30 yamt }
815 1.1 yamt oldhashlist = vm->vm_hashlist;
816 1.1 yamt oldhashsize = vm->vm_hashsize;
817 1.1 yamt vm->vm_hashlist = newhashlist;
818 1.1 yamt vm->vm_hashsize = newhashsize;
819 1.1 yamt if (oldhashlist == NULL) {
820 1.1 yamt VMEM_UNLOCK(vm);
821 1.1 yamt return 0;
822 1.1 yamt }
823 1.1 yamt for (i = 0; i < oldhashsize; i++) {
824 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
825 1.1 yamt bt_rembusy(vm, bt); /* XXX */
826 1.1 yamt bt_insbusy(vm, bt);
827 1.1 yamt }
828 1.1 yamt }
829 1.1 yamt VMEM_UNLOCK(vm);
830 1.1 yamt
831 1.66 para if (oldhashlist != &vm->vm_hash0) {
832 1.66 para xfree(oldhashlist,
833 1.66 para sizeof(struct vmem_hashlist *) * oldhashsize);
834 1.66 para }
835 1.1 yamt
836 1.1 yamt return 0;
837 1.1 yamt }
838 1.1 yamt
839 1.10 yamt /*
840 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions.
841 1.59 yamt *
842 1.59 yamt * it's a caller's responsibility to ensure the region is big enough
843 1.59 yamt * before calling us.
844 1.10 yamt */
845 1.10 yamt
846 1.61 dyoung static int
847 1.76 joerg vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
848 1.60 dyoung vmem_size_t phase, vmem_size_t nocross,
849 1.61 dyoung vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
850 1.10 yamt {
851 1.10 yamt vmem_addr_t start;
852 1.10 yamt vmem_addr_t end;
853 1.10 yamt
854 1.60 dyoung KASSERT(size > 0);
855 1.59 yamt KASSERT(bt->bt_size >= size); /* caller's responsibility */
856 1.10 yamt
857 1.10 yamt /*
858 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are
859 1.10 yamt * unsigned integer of the same size.
860 1.10 yamt */
861 1.10 yamt
862 1.10 yamt start = bt->bt_start;
863 1.10 yamt if (start < minaddr) {
864 1.10 yamt start = minaddr;
865 1.10 yamt }
866 1.10 yamt end = BT_END(bt);
867 1.60 dyoung if (end > maxaddr) {
868 1.60 dyoung end = maxaddr;
869 1.10 yamt }
870 1.60 dyoung if (start > end) {
871 1.61 dyoung return ENOMEM;
872 1.10 yamt }
873 1.19 yamt
874 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase;
875 1.10 yamt if (start < bt->bt_start) {
876 1.10 yamt start += align;
877 1.10 yamt }
878 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
879 1.10 yamt KASSERT(align < nocross);
880 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase;
881 1.10 yamt }
882 1.60 dyoung if (start <= end && end - start >= size - 1) {
883 1.10 yamt KASSERT((start & (align - 1)) == phase);
884 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
885 1.10 yamt KASSERT(minaddr <= start);
886 1.60 dyoung KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
887 1.10 yamt KASSERT(bt->bt_start <= start);
888 1.60 dyoung KASSERT(BT_END(bt) - start >= size - 1);
889 1.61 dyoung *addrp = start;
890 1.61 dyoung return 0;
891 1.10 yamt }
892 1.61 dyoung return ENOMEM;
893 1.10 yamt }
894 1.10 yamt
895 1.80 para /* ---- vmem API */
896 1.1 yamt
897 1.1 yamt /*
898 1.66 para * vmem_create_internal: creates a vmem arena.
899 1.1 yamt */
900 1.1 yamt
901 1.80 para vmem_t *
902 1.80 para vmem_init(vmem_t *vm, const char *name,
903 1.80 para vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
904 1.80 para vmem_import_t *importfn, vmem_release_t *releasefn,
905 1.80 para vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
906 1.1 yamt {
907 1.1 yamt int i;
908 1.1 yamt
909 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
910 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
911 1.62 rmind KASSERT(quantum > 0);
912 1.1 yamt
913 1.1 yamt #if defined(_KERNEL)
914 1.80 para /* XXX: SMP, we get called early... */
915 1.80 para if (!vmem_bootstrapped) {
916 1.80 para vmem_bootstrap();
917 1.80 para }
918 1.66 para #endif /* defined(_KERNEL) */
919 1.80 para
920 1.80 para if (vm == NULL) {
921 1.66 para vm = xmalloc(sizeof(*vm), flags);
922 1.1 yamt }
923 1.1 yamt if (vm == NULL) {
924 1.1 yamt return NULL;
925 1.1 yamt }
926 1.1 yamt
927 1.66 para VMEM_CONDVAR_INIT(vm, "vmem");
928 1.31 ad VMEM_LOCK_INIT(vm, ipl);
929 1.66 para vm->vm_flags = flags;
930 1.66 para vm->vm_nfreetags = 0;
931 1.66 para LIST_INIT(&vm->vm_freetags);
932 1.64 yamt strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
933 1.1 yamt vm->vm_quantum_mask = quantum - 1;
934 1.62 rmind vm->vm_quantum_shift = SIZE2ORDER(quantum);
935 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
936 1.61 dyoung vm->vm_importfn = importfn;
937 1.61 dyoung vm->vm_releasefn = releasefn;
938 1.61 dyoung vm->vm_arg = arg;
939 1.1 yamt vm->vm_nbusytag = 0;
940 1.66 para vm->vm_size = 0;
941 1.66 para vm->vm_inuse = 0;
942 1.5 yamt #if defined(QCACHE)
943 1.31 ad qc_init(vm, qcache_max, ipl);
944 1.5 yamt #endif /* defined(QCACHE) */
945 1.1 yamt
946 1.87 christos TAILQ_INIT(&vm->vm_seglist);
947 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
948 1.1 yamt LIST_INIT(&vm->vm_freelist[i]);
949 1.1 yamt }
950 1.80 para memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist));
951 1.80 para vm->vm_hashsize = 1;
952 1.80 para vm->vm_hashlist = &vm->vm_hash0;
953 1.1 yamt
954 1.1 yamt if (size != 0) {
955 1.61 dyoung if (vmem_add(vm, base, size, flags) != 0) {
956 1.30 yamt vmem_destroy1(vm);
957 1.1 yamt return NULL;
958 1.1 yamt }
959 1.1 yamt }
960 1.1 yamt
961 1.30 yamt #if defined(_KERNEL)
962 1.66 para if (flags & VM_BOOTSTRAP) {
963 1.92.6.2 skrll bt_refill(vm);
964 1.66 para }
965 1.66 para
966 1.30 yamt mutex_enter(&vmem_list_lock);
967 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
968 1.30 yamt mutex_exit(&vmem_list_lock);
969 1.30 yamt #endif /* defined(_KERNEL) */
970 1.30 yamt
971 1.1 yamt return vm;
972 1.1 yamt }
973 1.1 yamt
974 1.66 para
975 1.66 para
976 1.66 para /*
977 1.66 para * vmem_create: create an arena.
978 1.66 para *
979 1.66 para * => must not be called from interrupt context.
980 1.66 para */
981 1.66 para
982 1.66 para vmem_t *
983 1.66 para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
984 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
985 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
986 1.66 para {
987 1.66 para
988 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
989 1.66 para
990 1.80 para return vmem_init(NULL, name, base, size, quantum,
991 1.66 para importfn, releasefn, source, qcache_max, flags, ipl);
992 1.66 para }
993 1.66 para
994 1.66 para /*
995 1.66 para * vmem_xcreate: create an arena takes alternative import func.
996 1.66 para *
997 1.66 para * => must not be called from interrupt context.
998 1.66 para */
999 1.66 para
1000 1.66 para vmem_t *
1001 1.66 para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
1002 1.66 para vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
1003 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1004 1.66 para {
1005 1.66 para
1006 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
1007 1.66 para
1008 1.80 para return vmem_init(NULL, name, base, size, quantum,
1009 1.66 para (vmem_import_t *)importfn, releasefn, source,
1010 1.66 para qcache_max, flags | VM_XIMPORT, ipl);
1011 1.66 para }
1012 1.66 para
1013 1.1 yamt void
1014 1.1 yamt vmem_destroy(vmem_t *vm)
1015 1.1 yamt {
1016 1.1 yamt
1017 1.30 yamt #if defined(_KERNEL)
1018 1.30 yamt mutex_enter(&vmem_list_lock);
1019 1.30 yamt LIST_REMOVE(vm, vm_alllist);
1020 1.30 yamt mutex_exit(&vmem_list_lock);
1021 1.30 yamt #endif /* defined(_KERNEL) */
1022 1.1 yamt
1023 1.30 yamt vmem_destroy1(vm);
1024 1.1 yamt }
1025 1.1 yamt
1026 1.1 yamt vmem_size_t
1027 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1028 1.1 yamt {
1029 1.1 yamt
1030 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1031 1.1 yamt }
1032 1.1 yamt
1033 1.1 yamt /*
1034 1.83 yamt * vmem_alloc: allocate resource from the arena.
1035 1.1 yamt */
1036 1.1 yamt
1037 1.61 dyoung int
1038 1.61 dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1039 1.1 yamt {
1040 1.86 martin const vm_flag_t strat __diagused = flags & VM_FITMASK;
1041 1.1 yamt
1042 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1043 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1044 1.1 yamt
1045 1.1 yamt KASSERT(size > 0);
1046 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1047 1.3 yamt if ((flags & VM_SLEEP) != 0) {
1048 1.42 yamt ASSERT_SLEEPABLE();
1049 1.3 yamt }
1050 1.1 yamt
1051 1.5 yamt #if defined(QCACHE)
1052 1.5 yamt if (size <= vm->vm_qcache_max) {
1053 1.61 dyoung void *p;
1054 1.38 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1055 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1056 1.5 yamt
1057 1.61 dyoung p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1058 1.61 dyoung if (addrp != NULL)
1059 1.61 dyoung *addrp = (vmem_addr_t)p;
1060 1.61 dyoung return (p == NULL) ? ENOMEM : 0;
1061 1.5 yamt }
1062 1.5 yamt #endif /* defined(QCACHE) */
1063 1.5 yamt
1064 1.60 dyoung return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1065 1.61 dyoung flags, addrp);
1066 1.10 yamt }
1067 1.10 yamt
1068 1.61 dyoung int
1069 1.60 dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1070 1.60 dyoung const vmem_size_t phase, const vmem_size_t nocross,
1071 1.61 dyoung const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1072 1.61 dyoung vmem_addr_t *addrp)
1073 1.10 yamt {
1074 1.10 yamt struct vmem_freelist *list;
1075 1.10 yamt struct vmem_freelist *first;
1076 1.10 yamt struct vmem_freelist *end;
1077 1.10 yamt bt_t *bt;
1078 1.10 yamt bt_t *btnew;
1079 1.10 yamt bt_t *btnew2;
1080 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0);
1081 1.10 yamt vm_flag_t strat = flags & VM_FITMASK;
1082 1.10 yamt vmem_addr_t start;
1083 1.61 dyoung int rc;
1084 1.10 yamt
1085 1.10 yamt KASSERT(size0 > 0);
1086 1.10 yamt KASSERT(size > 0);
1087 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1088 1.10 yamt if ((flags & VM_SLEEP) != 0) {
1089 1.42 yamt ASSERT_SLEEPABLE();
1090 1.10 yamt }
1091 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0);
1092 1.10 yamt KASSERT((align & (align - 1)) == 0);
1093 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0);
1094 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0);
1095 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0);
1096 1.10 yamt KASSERT((align == 0 && phase == 0) || phase < align);
1097 1.10 yamt KASSERT(nocross == 0 || nocross >= size);
1098 1.60 dyoung KASSERT(minaddr <= maxaddr);
1099 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1100 1.10 yamt
1101 1.10 yamt if (align == 0) {
1102 1.10 yamt align = vm->vm_quantum_mask + 1;
1103 1.10 yamt }
1104 1.59 yamt
1105 1.59 yamt /*
1106 1.59 yamt * allocate boundary tags before acquiring the vmem lock.
1107 1.59 yamt */
1108 1.1 yamt btnew = bt_alloc(vm, flags);
1109 1.1 yamt if (btnew == NULL) {
1110 1.61 dyoung return ENOMEM;
1111 1.1 yamt }
1112 1.10 yamt btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1113 1.10 yamt if (btnew2 == NULL) {
1114 1.10 yamt bt_free(vm, btnew);
1115 1.61 dyoung return ENOMEM;
1116 1.10 yamt }
1117 1.1 yamt
1118 1.59 yamt /*
1119 1.59 yamt * choose a free block from which we allocate.
1120 1.59 yamt */
1121 1.1 yamt retry_strat:
1122 1.1 yamt first = bt_freehead_toalloc(vm, size, strat);
1123 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER];
1124 1.1 yamt retry:
1125 1.1 yamt bt = NULL;
1126 1.1 yamt VMEM_LOCK(vm);
1127 1.55 yamt vmem_check(vm);
1128 1.2 yamt if (strat == VM_INSTANTFIT) {
1129 1.59 yamt /*
1130 1.59 yamt * just choose the first block which satisfies our restrictions.
1131 1.59 yamt *
1132 1.59 yamt * note that we don't need to check the size of the blocks
1133 1.59 yamt * because any blocks found on these list should be larger than
1134 1.59 yamt * the given size.
1135 1.59 yamt */
1136 1.2 yamt for (list = first; list < end; list++) {
1137 1.2 yamt bt = LIST_FIRST(list);
1138 1.2 yamt if (bt != NULL) {
1139 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1140 1.61 dyoung nocross, minaddr, maxaddr, &start);
1141 1.61 dyoung if (rc == 0) {
1142 1.10 yamt goto gotit;
1143 1.10 yamt }
1144 1.59 yamt /*
1145 1.59 yamt * don't bother to follow the bt_freelist link
1146 1.59 yamt * here. the list can be very long and we are
1147 1.59 yamt * told to run fast. blocks from the later free
1148 1.59 yamt * lists are larger and have better chances to
1149 1.59 yamt * satisfy our restrictions.
1150 1.59 yamt */
1151 1.2 yamt }
1152 1.2 yamt }
1153 1.2 yamt } else { /* VM_BESTFIT */
1154 1.59 yamt /*
1155 1.59 yamt * we assume that, for space efficiency, it's better to
1156 1.59 yamt * allocate from a smaller block. thus we will start searching
1157 1.59 yamt * from the lower-order list than VM_INSTANTFIT.
1158 1.59 yamt * however, don't bother to find the smallest block in a free
1159 1.59 yamt * list because the list can be very long. we can revisit it
1160 1.59 yamt * if/when it turns out to be a problem.
1161 1.59 yamt *
1162 1.59 yamt * note that the 'first' list can contain blocks smaller than
1163 1.59 yamt * the requested size. thus we need to check bt_size.
1164 1.59 yamt */
1165 1.2 yamt for (list = first; list < end; list++) {
1166 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) {
1167 1.2 yamt if (bt->bt_size >= size) {
1168 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1169 1.61 dyoung nocross, minaddr, maxaddr, &start);
1170 1.61 dyoung if (rc == 0) {
1171 1.10 yamt goto gotit;
1172 1.10 yamt }
1173 1.2 yamt }
1174 1.1 yamt }
1175 1.1 yamt }
1176 1.1 yamt }
1177 1.2 yamt VMEM_UNLOCK(vm);
1178 1.1 yamt #if 1
1179 1.2 yamt if (strat == VM_INSTANTFIT) {
1180 1.2 yamt strat = VM_BESTFIT;
1181 1.2 yamt goto retry_strat;
1182 1.2 yamt }
1183 1.1 yamt #endif
1184 1.69 rmind if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
1185 1.10 yamt
1186 1.10 yamt /*
1187 1.10 yamt * XXX should try to import a region large enough to
1188 1.10 yamt * satisfy restrictions?
1189 1.10 yamt */
1190 1.10 yamt
1191 1.20 yamt goto fail;
1192 1.10 yamt }
1193 1.60 dyoung /* XXX eeek, minaddr & maxaddr not respected */
1194 1.2 yamt if (vmem_import(vm, size, flags) == 0) {
1195 1.2 yamt goto retry;
1196 1.1 yamt }
1197 1.2 yamt /* XXX */
1198 1.66 para
1199 1.68 para if ((flags & VM_SLEEP) != 0) {
1200 1.92.6.2 skrll vmem_kick_pdaemon();
1201 1.68 para VMEM_LOCK(vm);
1202 1.68 para VMEM_CONDVAR_WAIT(vm);
1203 1.68 para VMEM_UNLOCK(vm);
1204 1.68 para goto retry;
1205 1.68 para }
1206 1.20 yamt fail:
1207 1.20 yamt bt_free(vm, btnew);
1208 1.20 yamt bt_free(vm, btnew2);
1209 1.61 dyoung return ENOMEM;
1210 1.2 yamt
1211 1.2 yamt gotit:
1212 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
1213 1.1 yamt KASSERT(bt->bt_size >= size);
1214 1.1 yamt bt_remfree(vm, bt);
1215 1.55 yamt vmem_check(vm);
1216 1.10 yamt if (bt->bt_start != start) {
1217 1.10 yamt btnew2->bt_type = BT_TYPE_FREE;
1218 1.10 yamt btnew2->bt_start = bt->bt_start;
1219 1.10 yamt btnew2->bt_size = start - bt->bt_start;
1220 1.10 yamt bt->bt_start = start;
1221 1.10 yamt bt->bt_size -= btnew2->bt_size;
1222 1.10 yamt bt_insfree(vm, btnew2);
1223 1.87 christos bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1224 1.10 yamt btnew2 = NULL;
1225 1.55 yamt vmem_check(vm);
1226 1.10 yamt }
1227 1.10 yamt KASSERT(bt->bt_start == start);
1228 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1229 1.1 yamt /* split */
1230 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1231 1.1 yamt btnew->bt_start = bt->bt_start;
1232 1.1 yamt btnew->bt_size = size;
1233 1.1 yamt bt->bt_start = bt->bt_start + size;
1234 1.1 yamt bt->bt_size -= size;
1235 1.1 yamt bt_insfree(vm, bt);
1236 1.87 christos bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1237 1.1 yamt bt_insbusy(vm, btnew);
1238 1.55 yamt vmem_check(vm);
1239 1.1 yamt VMEM_UNLOCK(vm);
1240 1.1 yamt } else {
1241 1.1 yamt bt->bt_type = BT_TYPE_BUSY;
1242 1.1 yamt bt_insbusy(vm, bt);
1243 1.55 yamt vmem_check(vm);
1244 1.1 yamt VMEM_UNLOCK(vm);
1245 1.1 yamt bt_free(vm, btnew);
1246 1.1 yamt btnew = bt;
1247 1.1 yamt }
1248 1.10 yamt if (btnew2 != NULL) {
1249 1.10 yamt bt_free(vm, btnew2);
1250 1.10 yamt }
1251 1.1 yamt KASSERT(btnew->bt_size >= size);
1252 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1253 1.1 yamt
1254 1.61 dyoung if (addrp != NULL)
1255 1.61 dyoung *addrp = btnew->bt_start;
1256 1.61 dyoung return 0;
1257 1.1 yamt }
1258 1.1 yamt
1259 1.1 yamt /*
1260 1.83 yamt * vmem_free: free the resource to the arena.
1261 1.1 yamt */
1262 1.1 yamt
1263 1.1 yamt void
1264 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1265 1.1 yamt {
1266 1.1 yamt
1267 1.1 yamt KASSERT(size > 0);
1268 1.1 yamt
1269 1.5 yamt #if defined(QCACHE)
1270 1.5 yamt if (size <= vm->vm_qcache_max) {
1271 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1272 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1273 1.5 yamt
1274 1.63 rmind pool_cache_put(qc->qc_cache, (void *)addr);
1275 1.63 rmind return;
1276 1.5 yamt }
1277 1.5 yamt #endif /* defined(QCACHE) */
1278 1.5 yamt
1279 1.10 yamt vmem_xfree(vm, addr, size);
1280 1.10 yamt }
1281 1.10 yamt
1282 1.10 yamt void
1283 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1284 1.10 yamt {
1285 1.10 yamt bt_t *bt;
1286 1.10 yamt bt_t *t;
1287 1.66 para LIST_HEAD(, vmem_btag) tofree;
1288 1.66 para
1289 1.66 para LIST_INIT(&tofree);
1290 1.10 yamt
1291 1.10 yamt KASSERT(size > 0);
1292 1.10 yamt
1293 1.1 yamt VMEM_LOCK(vm);
1294 1.1 yamt
1295 1.1 yamt bt = bt_lookupbusy(vm, addr);
1296 1.1 yamt KASSERT(bt != NULL);
1297 1.1 yamt KASSERT(bt->bt_start == addr);
1298 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1299 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1300 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
1301 1.1 yamt bt_rembusy(vm, bt);
1302 1.1 yamt bt->bt_type = BT_TYPE_FREE;
1303 1.1 yamt
1304 1.1 yamt /* coalesce */
1305 1.87 christos t = TAILQ_NEXT(bt, bt_seglist);
1306 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1307 1.60 dyoung KASSERT(BT_END(bt) < t->bt_start); /* YYY */
1308 1.1 yamt bt_remfree(vm, t);
1309 1.1 yamt bt_remseg(vm, t);
1310 1.1 yamt bt->bt_size += t->bt_size;
1311 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1312 1.1 yamt }
1313 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1314 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1315 1.60 dyoung KASSERT(BT_END(t) < bt->bt_start); /* YYY */
1316 1.1 yamt bt_remfree(vm, t);
1317 1.1 yamt bt_remseg(vm, t);
1318 1.1 yamt bt->bt_size += t->bt_size;
1319 1.1 yamt bt->bt_start = t->bt_start;
1320 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1321 1.1 yamt }
1322 1.1 yamt
1323 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1324 1.1 yamt KASSERT(t != NULL);
1325 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1326 1.61 dyoung if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1327 1.1 yamt t->bt_size == bt->bt_size) {
1328 1.1 yamt vmem_addr_t spanaddr;
1329 1.1 yamt vmem_size_t spansize;
1330 1.1 yamt
1331 1.1 yamt KASSERT(t->bt_start == bt->bt_start);
1332 1.1 yamt spanaddr = bt->bt_start;
1333 1.1 yamt spansize = bt->bt_size;
1334 1.1 yamt bt_remseg(vm, bt);
1335 1.66 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
1336 1.1 yamt bt_remseg(vm, t);
1337 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1338 1.66 para vm->vm_size -= spansize;
1339 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1340 1.1 yamt VMEM_UNLOCK(vm);
1341 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1342 1.1 yamt } else {
1343 1.1 yamt bt_insfree(vm, bt);
1344 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1345 1.1 yamt VMEM_UNLOCK(vm);
1346 1.1 yamt }
1347 1.66 para
1348 1.66 para while (!LIST_EMPTY(&tofree)) {
1349 1.66 para t = LIST_FIRST(&tofree);
1350 1.66 para LIST_REMOVE(t, bt_freelist);
1351 1.66 para bt_free(vm, t);
1352 1.66 para }
1353 1.88 para
1354 1.88 para bt_freetrim(vm, BT_MAXFREE);
1355 1.1 yamt }
1356 1.1 yamt
1357 1.1 yamt /*
1358 1.1 yamt * vmem_add:
1359 1.1 yamt *
1360 1.1 yamt * => caller must ensure appropriate spl,
1361 1.1 yamt * if the arena can be accessed from interrupt context.
1362 1.1 yamt */
1363 1.1 yamt
1364 1.61 dyoung int
1365 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1366 1.1 yamt {
1367 1.1 yamt
1368 1.1 yamt return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1369 1.1 yamt }
1370 1.1 yamt
1371 1.6 yamt /*
1372 1.66 para * vmem_size: information about arenas size
1373 1.6 yamt *
1374 1.66 para * => return free/allocated size in arena
1375 1.6 yamt */
1376 1.66 para vmem_size_t
1377 1.66 para vmem_size(vmem_t *vm, int typemask)
1378 1.6 yamt {
1379 1.6 yamt
1380 1.66 para switch (typemask) {
1381 1.66 para case VMEM_ALLOC:
1382 1.66 para return vm->vm_inuse;
1383 1.66 para case VMEM_FREE:
1384 1.66 para return vm->vm_size - vm->vm_inuse;
1385 1.66 para case VMEM_FREE|VMEM_ALLOC:
1386 1.66 para return vm->vm_size;
1387 1.66 para default:
1388 1.66 para panic("vmem_size");
1389 1.66 para }
1390 1.6 yamt }
1391 1.6 yamt
1392 1.30 yamt /* ---- rehash */
1393 1.30 yamt
1394 1.30 yamt #if defined(_KERNEL)
1395 1.30 yamt static struct callout vmem_rehash_ch;
1396 1.30 yamt static int vmem_rehash_interval;
1397 1.30 yamt static struct workqueue *vmem_rehash_wq;
1398 1.30 yamt static struct work vmem_rehash_wk;
1399 1.30 yamt
1400 1.30 yamt static void
1401 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy)
1402 1.30 yamt {
1403 1.30 yamt vmem_t *vm;
1404 1.30 yamt
1405 1.30 yamt KASSERT(wk == &vmem_rehash_wk);
1406 1.30 yamt mutex_enter(&vmem_list_lock);
1407 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1408 1.30 yamt size_t desired;
1409 1.30 yamt size_t current;
1410 1.30 yamt
1411 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
1412 1.30 yamt continue;
1413 1.30 yamt }
1414 1.30 yamt desired = vm->vm_nbusytag;
1415 1.30 yamt current = vm->vm_hashsize;
1416 1.30 yamt VMEM_UNLOCK(vm);
1417 1.30 yamt
1418 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) {
1419 1.30 yamt desired = VMEM_HASHSIZE_MAX;
1420 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) {
1421 1.30 yamt desired = VMEM_HASHSIZE_MIN;
1422 1.30 yamt }
1423 1.30 yamt if (desired > current * 2 || desired * 2 < current) {
1424 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP);
1425 1.30 yamt }
1426 1.30 yamt }
1427 1.30 yamt mutex_exit(&vmem_list_lock);
1428 1.30 yamt
1429 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1430 1.30 yamt }
1431 1.30 yamt
1432 1.30 yamt static void
1433 1.30 yamt vmem_rehash_all_kick(void *dummy)
1434 1.30 yamt {
1435 1.30 yamt
1436 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1437 1.30 yamt }
1438 1.30 yamt
1439 1.30 yamt void
1440 1.30 yamt vmem_rehash_start(void)
1441 1.30 yamt {
1442 1.30 yamt int error;
1443 1.30 yamt
1444 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1445 1.41 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1446 1.30 yamt if (error) {
1447 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error);
1448 1.30 yamt }
1449 1.41 ad callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1450 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1451 1.30 yamt
1452 1.30 yamt vmem_rehash_interval = hz * 10;
1453 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1454 1.30 yamt }
1455 1.30 yamt #endif /* defined(_KERNEL) */
1456 1.30 yamt
1457 1.1 yamt /* ---- debug */
1458 1.1 yamt
1459 1.55 yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1460 1.55 yamt
1461 1.82 christos static void bt_dump(const bt_t *, void (*)(const char *, ...)
1462 1.82 christos __printflike(1, 2));
1463 1.55 yamt
1464 1.55 yamt static const char *
1465 1.55 yamt bt_type_string(int type)
1466 1.55 yamt {
1467 1.55 yamt static const char * const table[] = {
1468 1.55 yamt [BT_TYPE_BUSY] = "busy",
1469 1.55 yamt [BT_TYPE_FREE] = "free",
1470 1.55 yamt [BT_TYPE_SPAN] = "span",
1471 1.55 yamt [BT_TYPE_SPAN_STATIC] = "static span",
1472 1.55 yamt };
1473 1.55 yamt
1474 1.55 yamt if (type >= __arraycount(table)) {
1475 1.55 yamt return "BOGUS";
1476 1.55 yamt }
1477 1.55 yamt return table[type];
1478 1.55 yamt }
1479 1.55 yamt
1480 1.55 yamt static void
1481 1.55 yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1482 1.55 yamt {
1483 1.55 yamt
1484 1.55 yamt (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1485 1.55 yamt bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1486 1.55 yamt bt->bt_type, bt_type_string(bt->bt_type));
1487 1.55 yamt }
1488 1.55 yamt
1489 1.55 yamt static void
1490 1.82 christos vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2))
1491 1.55 yamt {
1492 1.55 yamt const bt_t *bt;
1493 1.55 yamt int i;
1494 1.55 yamt
1495 1.55 yamt (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1496 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1497 1.55 yamt bt_dump(bt, pr);
1498 1.55 yamt }
1499 1.55 yamt
1500 1.55 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1501 1.55 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i];
1502 1.55 yamt
1503 1.55 yamt if (LIST_EMPTY(fl)) {
1504 1.55 yamt continue;
1505 1.55 yamt }
1506 1.55 yamt
1507 1.55 yamt (*pr)("freelist[%d]\n", i);
1508 1.55 yamt LIST_FOREACH(bt, fl, bt_freelist) {
1509 1.55 yamt bt_dump(bt, pr);
1510 1.55 yamt }
1511 1.55 yamt }
1512 1.55 yamt }
1513 1.55 yamt
1514 1.55 yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1515 1.55 yamt
1516 1.37 yamt #if defined(DDB)
1517 1.37 yamt static bt_t *
1518 1.37 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1519 1.37 yamt {
1520 1.39 yamt bt_t *bt;
1521 1.37 yamt
1522 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1523 1.39 yamt if (BT_ISSPAN_P(bt)) {
1524 1.39 yamt continue;
1525 1.39 yamt }
1526 1.60 dyoung if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1527 1.39 yamt return bt;
1528 1.37 yamt }
1529 1.37 yamt }
1530 1.37 yamt
1531 1.37 yamt return NULL;
1532 1.37 yamt }
1533 1.37 yamt
1534 1.37 yamt void
1535 1.37 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1536 1.37 yamt {
1537 1.37 yamt vmem_t *vm;
1538 1.37 yamt
1539 1.37 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1540 1.37 yamt bt_t *bt;
1541 1.37 yamt
1542 1.37 yamt bt = vmem_whatis_lookup(vm, addr);
1543 1.37 yamt if (bt == NULL) {
1544 1.37 yamt continue;
1545 1.37 yamt }
1546 1.39 yamt (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1547 1.37 yamt (void *)addr, (void *)bt->bt_start,
1548 1.39 yamt (size_t)(addr - bt->bt_start), vm->vm_name,
1549 1.39 yamt (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1550 1.37 yamt }
1551 1.37 yamt }
1552 1.43 cegger
1553 1.55 yamt void
1554 1.55 yamt vmem_printall(const char *modif, void (*pr)(const char *, ...))
1555 1.43 cegger {
1556 1.55 yamt const vmem_t *vm;
1557 1.43 cegger
1558 1.47 cegger LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1559 1.55 yamt vmem_dump(vm, pr);
1560 1.43 cegger }
1561 1.43 cegger }
1562 1.43 cegger
1563 1.43 cegger void
1564 1.43 cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1565 1.43 cegger {
1566 1.55 yamt const vmem_t *vm = (const void *)addr;
1567 1.43 cegger
1568 1.55 yamt vmem_dump(vm, pr);
1569 1.43 cegger }
1570 1.37 yamt #endif /* defined(DDB) */
1571 1.37 yamt
1572 1.60 dyoung #if defined(_KERNEL)
1573 1.60 dyoung #define vmem_printf printf
1574 1.60 dyoung #else
1575 1.1 yamt #include <stdio.h>
1576 1.60 dyoung #include <stdarg.h>
1577 1.60 dyoung
1578 1.60 dyoung static void
1579 1.60 dyoung vmem_printf(const char *fmt, ...)
1580 1.60 dyoung {
1581 1.60 dyoung va_list ap;
1582 1.60 dyoung va_start(ap, fmt);
1583 1.60 dyoung vprintf(fmt, ap);
1584 1.60 dyoung va_end(ap);
1585 1.60 dyoung }
1586 1.60 dyoung #endif
1587 1.1 yamt
1588 1.55 yamt #if defined(VMEM_SANITY)
1589 1.1 yamt
1590 1.55 yamt static bool
1591 1.55 yamt vmem_check_sanity(vmem_t *vm)
1592 1.1 yamt {
1593 1.55 yamt const bt_t *bt, *bt2;
1594 1.1 yamt
1595 1.55 yamt KASSERT(vm != NULL);
1596 1.1 yamt
1597 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1598 1.60 dyoung if (bt->bt_start > BT_END(bt)) {
1599 1.55 yamt printf("corrupted tag\n");
1600 1.60 dyoung bt_dump(bt, vmem_printf);
1601 1.55 yamt return false;
1602 1.55 yamt }
1603 1.55 yamt }
1604 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1605 1.87 christos TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1606 1.55 yamt if (bt == bt2) {
1607 1.55 yamt continue;
1608 1.55 yamt }
1609 1.55 yamt if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1610 1.55 yamt continue;
1611 1.55 yamt }
1612 1.60 dyoung if (bt->bt_start <= BT_END(bt2) &&
1613 1.60 dyoung bt2->bt_start <= BT_END(bt)) {
1614 1.55 yamt printf("overwrapped tags\n");
1615 1.60 dyoung bt_dump(bt, vmem_printf);
1616 1.60 dyoung bt_dump(bt2, vmem_printf);
1617 1.55 yamt return false;
1618 1.55 yamt }
1619 1.55 yamt }
1620 1.1 yamt }
1621 1.1 yamt
1622 1.55 yamt return true;
1623 1.55 yamt }
1624 1.1 yamt
1625 1.55 yamt static void
1626 1.55 yamt vmem_check(vmem_t *vm)
1627 1.55 yamt {
1628 1.1 yamt
1629 1.55 yamt if (!vmem_check_sanity(vm)) {
1630 1.55 yamt panic("insanity vmem %p", vm);
1631 1.1 yamt }
1632 1.1 yamt }
1633 1.1 yamt
1634 1.55 yamt #endif /* defined(VMEM_SANITY) */
1635 1.1 yamt
1636 1.55 yamt #if defined(UNITTEST)
1637 1.1 yamt int
1638 1.57 cegger main(void)
1639 1.1 yamt {
1640 1.61 dyoung int rc;
1641 1.1 yamt vmem_t *vm;
1642 1.1 yamt vmem_addr_t p;
1643 1.1 yamt struct reg {
1644 1.1 yamt vmem_addr_t p;
1645 1.1 yamt vmem_size_t sz;
1646 1.25 thorpej bool x;
1647 1.1 yamt } *reg = NULL;
1648 1.1 yamt int nreg = 0;
1649 1.1 yamt int nalloc = 0;
1650 1.1 yamt int nfree = 0;
1651 1.1 yamt vmem_size_t total = 0;
1652 1.1 yamt #if 1
1653 1.1 yamt vm_flag_t strat = VM_INSTANTFIT;
1654 1.1 yamt #else
1655 1.1 yamt vm_flag_t strat = VM_BESTFIT;
1656 1.1 yamt #endif
1657 1.1 yamt
1658 1.61 dyoung vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1659 1.61 dyoung #ifdef _KERNEL
1660 1.61 dyoung IPL_NONE
1661 1.61 dyoung #else
1662 1.61 dyoung 0
1663 1.61 dyoung #endif
1664 1.61 dyoung );
1665 1.1 yamt if (vm == NULL) {
1666 1.1 yamt printf("vmem_create\n");
1667 1.1 yamt exit(EXIT_FAILURE);
1668 1.1 yamt }
1669 1.60 dyoung vmem_dump(vm, vmem_printf);
1670 1.1 yamt
1671 1.61 dyoung rc = vmem_add(vm, 0, 50, VM_SLEEP);
1672 1.61 dyoung assert(rc == 0);
1673 1.61 dyoung rc = vmem_add(vm, 100, 200, VM_SLEEP);
1674 1.61 dyoung assert(rc == 0);
1675 1.61 dyoung rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1676 1.61 dyoung assert(rc == 0);
1677 1.61 dyoung rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1678 1.61 dyoung assert(rc == 0);
1679 1.61 dyoung rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1680 1.61 dyoung assert(rc == 0);
1681 1.61 dyoung rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1682 1.61 dyoung assert(rc == 0);
1683 1.61 dyoung rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1684 1.61 dyoung assert(rc == 0);
1685 1.61 dyoung rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1686 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1687 1.61 dyoung assert(rc != 0);
1688 1.61 dyoung rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1689 1.61 dyoung assert(rc == 0 && p == 0);
1690 1.61 dyoung vmem_xfree(vm, p, 50);
1691 1.61 dyoung rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1692 1.61 dyoung assert(rc == 0 && p == 0);
1693 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1694 1.61 dyoung 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1695 1.61 dyoung assert(rc != 0);
1696 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1697 1.61 dyoung 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1698 1.61 dyoung assert(rc != 0);
1699 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1700 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1701 1.61 dyoung assert(rc == 0);
1702 1.60 dyoung vmem_dump(vm, vmem_printf);
1703 1.1 yamt for (;;) {
1704 1.1 yamt struct reg *r;
1705 1.10 yamt int t = rand() % 100;
1706 1.1 yamt
1707 1.10 yamt if (t > 45) {
1708 1.10 yamt /* alloc */
1709 1.1 yamt vmem_size_t sz = rand() % 500 + 1;
1710 1.25 thorpej bool x;
1711 1.10 yamt vmem_size_t align, phase, nocross;
1712 1.10 yamt vmem_addr_t minaddr, maxaddr;
1713 1.10 yamt
1714 1.10 yamt if (t > 70) {
1715 1.26 thorpej x = true;
1716 1.10 yamt /* XXX */
1717 1.10 yamt align = 1 << (rand() % 15);
1718 1.10 yamt phase = rand() % 65536;
1719 1.10 yamt nocross = 1 << (rand() % 15);
1720 1.10 yamt if (align <= phase) {
1721 1.10 yamt phase = 0;
1722 1.10 yamt }
1723 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1,
1724 1.19 yamt nocross)) {
1725 1.10 yamt nocross = 0;
1726 1.10 yamt }
1727 1.60 dyoung do {
1728 1.60 dyoung minaddr = rand() % 50000;
1729 1.60 dyoung maxaddr = rand() % 70000;
1730 1.60 dyoung } while (minaddr > maxaddr);
1731 1.10 yamt printf("=== xalloc %" PRIu64
1732 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64
1733 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64
1734 1.10 yamt ", max=%" PRIu64 "\n",
1735 1.10 yamt (uint64_t)sz,
1736 1.10 yamt (uint64_t)align,
1737 1.10 yamt (uint64_t)phase,
1738 1.10 yamt (uint64_t)nocross,
1739 1.10 yamt (uint64_t)minaddr,
1740 1.10 yamt (uint64_t)maxaddr);
1741 1.61 dyoung rc = vmem_xalloc(vm, sz, align, phase, nocross,
1742 1.61 dyoung minaddr, maxaddr, strat|VM_SLEEP, &p);
1743 1.10 yamt } else {
1744 1.26 thorpej x = false;
1745 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1746 1.61 dyoung rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1747 1.10 yamt }
1748 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p);
1749 1.60 dyoung vmem_dump(vm, vmem_printf);
1750 1.61 dyoung if (rc != 0) {
1751 1.10 yamt if (x) {
1752 1.10 yamt continue;
1753 1.10 yamt }
1754 1.1 yamt break;
1755 1.1 yamt }
1756 1.1 yamt nreg++;
1757 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg);
1758 1.1 yamt r = ®[nreg - 1];
1759 1.1 yamt r->p = p;
1760 1.1 yamt r->sz = sz;
1761 1.10 yamt r->x = x;
1762 1.1 yamt total += sz;
1763 1.1 yamt nalloc++;
1764 1.1 yamt } else if (nreg != 0) {
1765 1.10 yamt /* free */
1766 1.1 yamt r = ®[rand() % nreg];
1767 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1768 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz);
1769 1.10 yamt if (r->x) {
1770 1.10 yamt vmem_xfree(vm, r->p, r->sz);
1771 1.10 yamt } else {
1772 1.10 yamt vmem_free(vm, r->p, r->sz);
1773 1.10 yamt }
1774 1.1 yamt total -= r->sz;
1775 1.60 dyoung vmem_dump(vm, vmem_printf);
1776 1.1 yamt *r = reg[nreg - 1];
1777 1.1 yamt nreg--;
1778 1.1 yamt nfree++;
1779 1.1 yamt }
1780 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total);
1781 1.1 yamt }
1782 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1783 1.1 yamt (uint64_t)total, nalloc, nfree);
1784 1.1 yamt exit(EXIT_SUCCESS);
1785 1.1 yamt }
1786 1.55 yamt #endif /* defined(UNITTEST) */
1787