subr_vmem.c revision 1.89 1 1.89 pooka /* $NetBSD: subr_vmem.c,v 1.89 2014/03/11 20:32:05 pooka Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.55 yamt * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * reference:
31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator
32 1.1 yamt * to Many CPUs and Arbitrary Resources
33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html
34 1.88 para *
35 1.88 para * locking & the boundary tag pool:
36 1.88 para * - A pool(9) is used for vmem boundary tags
37 1.88 para * - During a pool get call the global vmem_btag_refill_lock is taken,
38 1.88 para * to serialize access to the allocation reserve, but no other
39 1.88 para * vmem arena locks.
40 1.88 para * - During pool_put calls no vmem mutexes are locked.
41 1.88 para * - pool_drain doesn't hold the pool's mutex while releasing memory to
42 1.88 para * its backing therefore no interferance with any vmem mutexes.
43 1.88 para * - The boundary tag pool is forced to put page headers into pool pages
44 1.88 para * (PR_PHINPAGE) and not off page to avoid pool recursion.
45 1.88 para * (due to sizeof(bt_t) it should be the case anyway)
46 1.1 yamt */
47 1.1 yamt
48 1.1 yamt #include <sys/cdefs.h>
49 1.89 pooka __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.89 2014/03/11 20:32:05 pooka Exp $");
50 1.1 yamt
51 1.5 yamt #if defined(_KERNEL)
52 1.37 yamt #include "opt_ddb.h"
53 1.5 yamt #endif /* defined(_KERNEL) */
54 1.1 yamt
55 1.1 yamt #include <sys/param.h>
56 1.1 yamt #include <sys/hash.h>
57 1.1 yamt #include <sys/queue.h>
58 1.62 rmind #include <sys/bitops.h>
59 1.1 yamt
60 1.1 yamt #if defined(_KERNEL)
61 1.1 yamt #include <sys/systm.h>
62 1.30 yamt #include <sys/kernel.h> /* hz */
63 1.30 yamt #include <sys/callout.h>
64 1.66 para #include <sys/kmem.h>
65 1.1 yamt #include <sys/pool.h>
66 1.1 yamt #include <sys/vmem.h>
67 1.80 para #include <sys/vmem_impl.h>
68 1.30 yamt #include <sys/workqueue.h>
69 1.66 para #include <sys/atomic.h>
70 1.66 para #include <uvm/uvm.h>
71 1.66 para #include <uvm/uvm_extern.h>
72 1.66 para #include <uvm/uvm_km.h>
73 1.66 para #include <uvm/uvm_page.h>
74 1.66 para #include <uvm/uvm_pdaemon.h>
75 1.1 yamt #else /* defined(_KERNEL) */
76 1.80 para #include <stdio.h>
77 1.80 para #include <errno.h>
78 1.80 para #include <assert.h>
79 1.80 para #include <stdlib.h>
80 1.80 para #include <string.h>
81 1.1 yamt #include "../sys/vmem.h"
82 1.80 para #include "../sys/vmem_impl.h"
83 1.1 yamt #endif /* defined(_KERNEL) */
84 1.1 yamt
85 1.66 para
86 1.1 yamt #if defined(_KERNEL)
87 1.66 para #include <sys/evcnt.h>
88 1.66 para #define VMEM_EVCNT_DEFINE(name) \
89 1.66 para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
90 1.88 para "vmem", #name); \
91 1.66 para EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
92 1.66 para #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++
93 1.66 para #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count--
94 1.66 para
95 1.88 para VMEM_EVCNT_DEFINE(static_bt_count)
96 1.88 para VMEM_EVCNT_DEFINE(static_bt_inuse)
97 1.66 para
98 1.80 para #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
99 1.80 para #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
100 1.80 para #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
101 1.80 para #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
102 1.66 para
103 1.1 yamt #else /* defined(_KERNEL) */
104 1.1 yamt
105 1.66 para #define VMEM_EVCNT_INCR(ev) /* nothing */
106 1.66 para #define VMEM_EVCNT_DECR(ev) /* nothing */
107 1.66 para
108 1.80 para #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
109 1.80 para #define VMEM_CONDVAR_DESTROY(vm) /* nothing */
110 1.80 para #define VMEM_CONDVAR_WAIT(vm) /* nothing */
111 1.80 para #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
112 1.80 para
113 1.79 para #define UNITTEST
114 1.79 para #define KASSERT(a) assert(a)
115 1.31 ad #define mutex_init(a, b, c) /* nothing */
116 1.31 ad #define mutex_destroy(a) /* nothing */
117 1.31 ad #define mutex_enter(a) /* nothing */
118 1.55 yamt #define mutex_tryenter(a) true
119 1.31 ad #define mutex_exit(a) /* nothing */
120 1.31 ad #define mutex_owned(a) /* nothing */
121 1.55 yamt #define ASSERT_SLEEPABLE() /* nothing */
122 1.55 yamt #define panic(...) printf(__VA_ARGS__); abort()
123 1.1 yamt #endif /* defined(_KERNEL) */
124 1.1 yamt
125 1.55 yamt #if defined(VMEM_SANITY)
126 1.55 yamt static void vmem_check(vmem_t *);
127 1.55 yamt #else /* defined(VMEM_SANITY) */
128 1.55 yamt #define vmem_check(vm) /* nothing */
129 1.55 yamt #endif /* defined(VMEM_SANITY) */
130 1.1 yamt
131 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */
132 1.54 yamt #define VMEM_HASHSIZE_MAX 65536 /* XXX */
133 1.66 para #define VMEM_HASHSIZE_INIT 1
134 1.1 yamt
135 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
136 1.1 yamt
137 1.80 para #if defined(_KERNEL)
138 1.80 para static bool vmem_bootstrapped = false;
139 1.80 para static kmutex_t vmem_list_lock;
140 1.80 para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
141 1.80 para #endif /* defined(_KERNEL) */
142 1.79 para
143 1.80 para /* ---- misc */
144 1.1 yamt
145 1.31 ad #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
146 1.31 ad #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
147 1.31 ad #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
148 1.36 ad #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
149 1.31 ad #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
150 1.31 ad #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
151 1.1 yamt
152 1.19 yamt #define VMEM_ALIGNUP(addr, align) \
153 1.19 yamt (-(-(addr) & -(align)))
154 1.62 rmind
155 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \
156 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0)
157 1.19 yamt
158 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
159 1.62 rmind #define SIZE2ORDER(size) ((int)ilog2(size))
160 1.4 yamt
161 1.62 rmind #if !defined(_KERNEL)
162 1.62 rmind #define xmalloc(sz, flags) malloc(sz)
163 1.67 rmind #define xfree(p, sz) free(p)
164 1.62 rmind #define bt_alloc(vm, flags) malloc(sizeof(bt_t))
165 1.62 rmind #define bt_free(vm, bt) free(bt)
166 1.66 para #else /* defined(_KERNEL) */
167 1.1 yamt
168 1.67 rmind #define xmalloc(sz, flags) \
169 1.80 para kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
170 1.80 para #define xfree(p, sz) kmem_free(p, sz);
171 1.66 para
172 1.75 para /*
173 1.75 para * BT_RESERVE calculation:
174 1.75 para * we allocate memory for boundry tags with vmem, therefor we have
175 1.75 para * to keep a reserve of bts used to allocated memory for bts.
176 1.75 para * This reserve is 4 for each arena involved in allocating vmems memory.
177 1.75 para * BT_MAXFREE: don't cache excessive counts of bts in arenas
178 1.75 para */
179 1.75 para #define STATIC_BT_COUNT 200
180 1.75 para #define BT_MINRESERVE 4
181 1.66 para #define BT_MAXFREE 64
182 1.66 para
183 1.66 para static struct vmem_btag static_bts[STATIC_BT_COUNT];
184 1.66 para static int static_bt_count = STATIC_BT_COUNT;
185 1.66 para
186 1.80 para static struct vmem kmem_va_meta_arena_store;
187 1.66 para vmem_t *kmem_va_meta_arena;
188 1.80 para static struct vmem kmem_meta_arena_store;
189 1.88 para vmem_t *kmem_meta_arena = NULL;
190 1.66 para
191 1.88 para static kmutex_t vmem_btag_refill_lock;
192 1.66 para static kmutex_t vmem_btag_lock;
193 1.66 para static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
194 1.66 para static size_t vmem_btag_freelist_count = 0;
195 1.88 para static struct pool vmem_btag_pool;
196 1.66 para
197 1.1 yamt /* ---- boundary tag */
198 1.1 yamt
199 1.66 para static int bt_refill(vmem_t *vm, vm_flag_t flags);
200 1.66 para
201 1.88 para static void *
202 1.88 para pool_page_alloc_vmem_meta(struct pool *pp, int flags)
203 1.66 para {
204 1.88 para const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
205 1.66 para vmem_addr_t va;
206 1.88 para int ret;
207 1.66 para
208 1.88 para ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
209 1.88 para (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
210 1.77 para
211 1.88 para return ret ? NULL : (void *)va;
212 1.88 para }
213 1.66 para
214 1.88 para static void
215 1.88 para pool_page_free_vmem_meta(struct pool *pp, void *v)
216 1.88 para {
217 1.66 para
218 1.88 para vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
219 1.88 para }
220 1.66 para
221 1.88 para /* allocator for vmem-pool metadata */
222 1.88 para struct pool_allocator pool_allocator_vmem_meta = {
223 1.88 para .pa_alloc = pool_page_alloc_vmem_meta,
224 1.88 para .pa_free = pool_page_free_vmem_meta,
225 1.88 para .pa_pagesz = 0
226 1.88 para };
227 1.66 para
228 1.66 para static int
229 1.66 para bt_refill(vmem_t *vm, vm_flag_t flags)
230 1.66 para {
231 1.66 para bt_t *bt;
232 1.66 para
233 1.88 para VMEM_LOCK(vm);
234 1.88 para if (vm->vm_nfreetags > BT_MINRESERVE) {
235 1.88 para VMEM_UNLOCK(vm);
236 1.88 para return 0;
237 1.77 para }
238 1.66 para
239 1.66 para mutex_enter(&vmem_btag_lock);
240 1.66 para while (!LIST_EMPTY(&vmem_btag_freelist) &&
241 1.75 para vm->vm_nfreetags <= BT_MINRESERVE) {
242 1.66 para bt = LIST_FIRST(&vmem_btag_freelist);
243 1.66 para LIST_REMOVE(bt, bt_freelist);
244 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
245 1.66 para vm->vm_nfreetags++;
246 1.66 para vmem_btag_freelist_count--;
247 1.88 para VMEM_EVCNT_INCR(static_bt_inuse);
248 1.66 para }
249 1.66 para mutex_exit(&vmem_btag_lock);
250 1.66 para
251 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE) {
252 1.88 para VMEM_UNLOCK(vm);
253 1.88 para mutex_enter(&vmem_btag_refill_lock);
254 1.88 para bt = pool_get(&vmem_btag_pool,
255 1.88 para (flags & VM_SLEEP) ? PR_WAITOK: PR_NOWAIT);
256 1.88 para mutex_exit(&vmem_btag_refill_lock);
257 1.88 para VMEM_LOCK(vm);
258 1.88 para if (bt == NULL && (flags & VM_SLEEP) == 0)
259 1.88 para break;
260 1.88 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
261 1.88 para vm->vm_nfreetags++;
262 1.88 para }
263 1.88 para
264 1.88 para VMEM_UNLOCK(vm);
265 1.88 para
266 1.66 para if (vm->vm_nfreetags == 0) {
267 1.66 para return ENOMEM;
268 1.66 para }
269 1.88 para
270 1.88 para
271 1.88 para if (kmem_meta_arena != NULL) {
272 1.88 para bt_refill(kmem_arena, (flags & ~VM_FITMASK)
273 1.88 para | VM_INSTANTFIT | VM_POPULATING);
274 1.88 para bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK)
275 1.88 para | VM_INSTANTFIT | VM_POPULATING);
276 1.88 para bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK)
277 1.88 para | VM_INSTANTFIT | VM_POPULATING);
278 1.88 para }
279 1.66 para
280 1.66 para return 0;
281 1.66 para }
282 1.1 yamt
283 1.88 para static bt_t *
284 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
285 1.1 yamt {
286 1.66 para bt_t *bt;
287 1.66 para VMEM_LOCK(vm);
288 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
289 1.66 para VMEM_UNLOCK(vm);
290 1.66 para if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) {
291 1.66 para return NULL;
292 1.66 para }
293 1.88 para VMEM_LOCK(vm);
294 1.66 para }
295 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
296 1.66 para LIST_REMOVE(bt, bt_freelist);
297 1.66 para vm->vm_nfreetags--;
298 1.66 para VMEM_UNLOCK(vm);
299 1.66 para
300 1.66 para return bt;
301 1.1 yamt }
302 1.1 yamt
303 1.88 para static void
304 1.17 yamt bt_free(vmem_t *vm, bt_t *bt)
305 1.1 yamt {
306 1.66 para
307 1.66 para VMEM_LOCK(vm);
308 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
309 1.66 para vm->vm_nfreetags++;
310 1.88 para VMEM_UNLOCK(vm);
311 1.88 para }
312 1.88 para
313 1.88 para static void
314 1.88 para bt_freetrim(vmem_t *vm, int freelimit)
315 1.88 para {
316 1.88 para bt_t *t;
317 1.88 para LIST_HEAD(, vmem_btag) tofree;
318 1.88 para
319 1.88 para LIST_INIT(&tofree);
320 1.88 para
321 1.88 para VMEM_LOCK(vm);
322 1.88 para while (vm->vm_nfreetags > freelimit) {
323 1.88 para bt_t *bt = LIST_FIRST(&vm->vm_freetags);
324 1.66 para LIST_REMOVE(bt, bt_freelist);
325 1.66 para vm->vm_nfreetags--;
326 1.88 para if (bt >= static_bts
327 1.88 para && bt < static_bts + sizeof(static_bts)) {
328 1.88 para mutex_enter(&vmem_btag_lock);
329 1.88 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
330 1.88 para vmem_btag_freelist_count++;
331 1.88 para mutex_exit(&vmem_btag_lock);
332 1.88 para VMEM_EVCNT_DECR(static_bt_inuse);
333 1.88 para } else {
334 1.88 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
335 1.88 para }
336 1.66 para }
337 1.88 para
338 1.66 para VMEM_UNLOCK(vm);
339 1.88 para while (!LIST_EMPTY(&tofree)) {
340 1.88 para t = LIST_FIRST(&tofree);
341 1.88 para LIST_REMOVE(t, bt_freelist);
342 1.88 para pool_put(&vmem_btag_pool, t);
343 1.88 para }
344 1.1 yamt }
345 1.67 rmind #endif /* defined(_KERNEL) */
346 1.62 rmind
347 1.1 yamt /*
348 1.67 rmind * freelist[0] ... [1, 1]
349 1.1 yamt * freelist[1] ... [2, 3]
350 1.1 yamt * freelist[2] ... [4, 7]
351 1.1 yamt * freelist[3] ... [8, 15]
352 1.1 yamt * :
353 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
354 1.1 yamt * :
355 1.1 yamt */
356 1.1 yamt
357 1.1 yamt static struct vmem_freelist *
358 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
359 1.1 yamt {
360 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
361 1.62 rmind const int idx = SIZE2ORDER(qsize);
362 1.1 yamt
363 1.62 rmind KASSERT(size != 0 && qsize != 0);
364 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
365 1.1 yamt KASSERT(idx >= 0);
366 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
367 1.1 yamt
368 1.1 yamt return &vm->vm_freelist[idx];
369 1.1 yamt }
370 1.1 yamt
371 1.59 yamt /*
372 1.59 yamt * bt_freehead_toalloc: return the freelist for the given size and allocation
373 1.59 yamt * strategy.
374 1.59 yamt *
375 1.59 yamt * for VM_INSTANTFIT, return the list in which any blocks are large enough
376 1.59 yamt * for the requested size. otherwise, return the list which can have blocks
377 1.59 yamt * large enough for the requested size.
378 1.59 yamt */
379 1.59 yamt
380 1.1 yamt static struct vmem_freelist *
381 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
382 1.1 yamt {
383 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
384 1.62 rmind int idx = SIZE2ORDER(qsize);
385 1.1 yamt
386 1.62 rmind KASSERT(size != 0 && qsize != 0);
387 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
388 1.1 yamt
389 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
390 1.1 yamt idx++;
391 1.1 yamt /* check too large request? */
392 1.1 yamt }
393 1.1 yamt KASSERT(idx >= 0);
394 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
395 1.1 yamt
396 1.1 yamt return &vm->vm_freelist[idx];
397 1.1 yamt }
398 1.1 yamt
399 1.1 yamt /* ---- boundary tag hash */
400 1.1 yamt
401 1.1 yamt static struct vmem_hashlist *
402 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
403 1.1 yamt {
404 1.1 yamt struct vmem_hashlist *list;
405 1.1 yamt unsigned int hash;
406 1.1 yamt
407 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
408 1.1 yamt list = &vm->vm_hashlist[hash % vm->vm_hashsize];
409 1.1 yamt
410 1.1 yamt return list;
411 1.1 yamt }
412 1.1 yamt
413 1.1 yamt static bt_t *
414 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
415 1.1 yamt {
416 1.1 yamt struct vmem_hashlist *list;
417 1.1 yamt bt_t *bt;
418 1.1 yamt
419 1.1 yamt list = bt_hashhead(vm, addr);
420 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) {
421 1.1 yamt if (bt->bt_start == addr) {
422 1.1 yamt break;
423 1.1 yamt }
424 1.1 yamt }
425 1.1 yamt
426 1.1 yamt return bt;
427 1.1 yamt }
428 1.1 yamt
429 1.1 yamt static void
430 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt)
431 1.1 yamt {
432 1.1 yamt
433 1.1 yamt KASSERT(vm->vm_nbusytag > 0);
434 1.73 para vm->vm_inuse -= bt->bt_size;
435 1.1 yamt vm->vm_nbusytag--;
436 1.1 yamt LIST_REMOVE(bt, bt_hashlist);
437 1.1 yamt }
438 1.1 yamt
439 1.1 yamt static void
440 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt)
441 1.1 yamt {
442 1.1 yamt struct vmem_hashlist *list;
443 1.1 yamt
444 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
445 1.1 yamt
446 1.1 yamt list = bt_hashhead(vm, bt->bt_start);
447 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist);
448 1.1 yamt vm->vm_nbusytag++;
449 1.73 para vm->vm_inuse += bt->bt_size;
450 1.1 yamt }
451 1.1 yamt
452 1.1 yamt /* ---- boundary tag list */
453 1.1 yamt
454 1.1 yamt static void
455 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt)
456 1.1 yamt {
457 1.1 yamt
458 1.87 christos TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
459 1.1 yamt }
460 1.1 yamt
461 1.1 yamt static void
462 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
463 1.1 yamt {
464 1.1 yamt
465 1.87 christos TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
466 1.1 yamt }
467 1.1 yamt
468 1.1 yamt static void
469 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
470 1.1 yamt {
471 1.1 yamt
472 1.87 christos TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
473 1.1 yamt }
474 1.1 yamt
475 1.1 yamt static void
476 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt)
477 1.1 yamt {
478 1.1 yamt
479 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
480 1.1 yamt
481 1.1 yamt LIST_REMOVE(bt, bt_freelist);
482 1.1 yamt }
483 1.1 yamt
484 1.1 yamt static void
485 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt)
486 1.1 yamt {
487 1.1 yamt struct vmem_freelist *list;
488 1.1 yamt
489 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size);
490 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist);
491 1.1 yamt }
492 1.1 yamt
493 1.1 yamt /* ---- vmem internal functions */
494 1.1 yamt
495 1.5 yamt #if defined(QCACHE)
496 1.5 yamt static inline vm_flag_t
497 1.5 yamt prf_to_vmf(int prflags)
498 1.5 yamt {
499 1.5 yamt vm_flag_t vmflags;
500 1.5 yamt
501 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
502 1.5 yamt if ((prflags & PR_WAITOK) != 0) {
503 1.5 yamt vmflags = VM_SLEEP;
504 1.5 yamt } else {
505 1.5 yamt vmflags = VM_NOSLEEP;
506 1.5 yamt }
507 1.5 yamt return vmflags;
508 1.5 yamt }
509 1.5 yamt
510 1.5 yamt static inline int
511 1.5 yamt vmf_to_prf(vm_flag_t vmflags)
512 1.5 yamt {
513 1.5 yamt int prflags;
514 1.5 yamt
515 1.7 yamt if ((vmflags & VM_SLEEP) != 0) {
516 1.5 yamt prflags = PR_WAITOK;
517 1.7 yamt } else {
518 1.5 yamt prflags = PR_NOWAIT;
519 1.5 yamt }
520 1.5 yamt return prflags;
521 1.5 yamt }
522 1.5 yamt
523 1.5 yamt static size_t
524 1.5 yamt qc_poolpage_size(size_t qcache_max)
525 1.5 yamt {
526 1.5 yamt int i;
527 1.5 yamt
528 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
529 1.5 yamt /* nothing */
530 1.5 yamt }
531 1.5 yamt return ORDER2SIZE(i);
532 1.5 yamt }
533 1.5 yamt
534 1.5 yamt static void *
535 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags)
536 1.5 yamt {
537 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
538 1.5 yamt vmem_t *vm = qc->qc_vmem;
539 1.61 dyoung vmem_addr_t addr;
540 1.5 yamt
541 1.61 dyoung if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
542 1.61 dyoung prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
543 1.61 dyoung return NULL;
544 1.61 dyoung return (void *)addr;
545 1.5 yamt }
546 1.5 yamt
547 1.5 yamt static void
548 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr)
549 1.5 yamt {
550 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
551 1.5 yamt vmem_t *vm = qc->qc_vmem;
552 1.5 yamt
553 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
554 1.5 yamt }
555 1.5 yamt
556 1.5 yamt static void
557 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
558 1.5 yamt {
559 1.22 yamt qcache_t *prevqc;
560 1.5 yamt struct pool_allocator *pa;
561 1.5 yamt int qcache_idx_max;
562 1.5 yamt int i;
563 1.5 yamt
564 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
565 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
566 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
567 1.5 yamt }
568 1.5 yamt vm->vm_qcache_max = qcache_max;
569 1.5 yamt pa = &vm->vm_qcache_allocator;
570 1.5 yamt memset(pa, 0, sizeof(*pa));
571 1.5 yamt pa->pa_alloc = qc_poolpage_alloc;
572 1.5 yamt pa->pa_free = qc_poolpage_free;
573 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max);
574 1.5 yamt
575 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
576 1.22 yamt prevqc = NULL;
577 1.22 yamt for (i = qcache_idx_max; i > 0; i--) {
578 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1];
579 1.5 yamt size_t size = i << vm->vm_quantum_shift;
580 1.66 para pool_cache_t pc;
581 1.5 yamt
582 1.5 yamt qc->qc_vmem = vm;
583 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
584 1.5 yamt vm->vm_name, size);
585 1.66 para
586 1.80 para pc = pool_cache_init(size,
587 1.80 para ORDER2SIZE(vm->vm_quantum_shift), 0,
588 1.80 para PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
589 1.80 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
590 1.80 para
591 1.80 para KASSERT(pc);
592 1.80 para
593 1.66 para qc->qc_cache = pc;
594 1.35 ad KASSERT(qc->qc_cache != NULL); /* XXX */
595 1.22 yamt if (prevqc != NULL &&
596 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage ==
597 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) {
598 1.80 para pool_cache_destroy(qc->qc_cache);
599 1.22 yamt vm->vm_qcache[i - 1] = prevqc;
600 1.27 ad continue;
601 1.22 yamt }
602 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc;
603 1.22 yamt vm->vm_qcache[i - 1] = qc;
604 1.22 yamt prevqc = qc;
605 1.5 yamt }
606 1.5 yamt }
607 1.6 yamt
608 1.23 yamt static void
609 1.23 yamt qc_destroy(vmem_t *vm)
610 1.23 yamt {
611 1.23 yamt const qcache_t *prevqc;
612 1.23 yamt int i;
613 1.23 yamt int qcache_idx_max;
614 1.23 yamt
615 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
616 1.23 yamt prevqc = NULL;
617 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
618 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
619 1.23 yamt
620 1.23 yamt if (prevqc == qc) {
621 1.23 yamt continue;
622 1.23 yamt }
623 1.80 para pool_cache_destroy(qc->qc_cache);
624 1.23 yamt prevqc = qc;
625 1.23 yamt }
626 1.23 yamt }
627 1.66 para #endif
628 1.23 yamt
629 1.66 para #if defined(_KERNEL)
630 1.80 para static void
631 1.66 para vmem_bootstrap(void)
632 1.6 yamt {
633 1.6 yamt
634 1.66 para mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
635 1.66 para mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
636 1.88 para mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
637 1.6 yamt
638 1.66 para while (static_bt_count-- > 0) {
639 1.66 para bt_t *bt = &static_bts[static_bt_count];
640 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
641 1.88 para VMEM_EVCNT_INCR(static_bt_count);
642 1.66 para vmem_btag_freelist_count++;
643 1.6 yamt }
644 1.80 para vmem_bootstrapped = TRUE;
645 1.6 yamt }
646 1.5 yamt
647 1.66 para void
648 1.80 para vmem_subsystem_init(vmem_t *vm)
649 1.1 yamt {
650 1.1 yamt
651 1.80 para kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
652 1.80 para 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
653 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
654 1.66 para IPL_VM);
655 1.66 para
656 1.80 para kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
657 1.80 para 0, 0, PAGE_SIZE,
658 1.66 para uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
659 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
660 1.88 para
661 1.88 para pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, PR_PHINPAGE,
662 1.88 para "vmembt", &pool_allocator_vmem_meta, IPL_VM);
663 1.1 yamt }
664 1.1 yamt #endif /* defined(_KERNEL) */
665 1.1 yamt
666 1.61 dyoung static int
667 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
668 1.1 yamt int spanbttype)
669 1.1 yamt {
670 1.1 yamt bt_t *btspan;
671 1.1 yamt bt_t *btfree;
672 1.1 yamt
673 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
674 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
675 1.58 yamt KASSERT(spanbttype == BT_TYPE_SPAN ||
676 1.58 yamt spanbttype == BT_TYPE_SPAN_STATIC);
677 1.1 yamt
678 1.1 yamt btspan = bt_alloc(vm, flags);
679 1.1 yamt if (btspan == NULL) {
680 1.61 dyoung return ENOMEM;
681 1.1 yamt }
682 1.1 yamt btfree = bt_alloc(vm, flags);
683 1.1 yamt if (btfree == NULL) {
684 1.1 yamt bt_free(vm, btspan);
685 1.61 dyoung return ENOMEM;
686 1.1 yamt }
687 1.1 yamt
688 1.1 yamt btspan->bt_type = spanbttype;
689 1.1 yamt btspan->bt_start = addr;
690 1.1 yamt btspan->bt_size = size;
691 1.1 yamt
692 1.1 yamt btfree->bt_type = BT_TYPE_FREE;
693 1.1 yamt btfree->bt_start = addr;
694 1.1 yamt btfree->bt_size = size;
695 1.1 yamt
696 1.1 yamt VMEM_LOCK(vm);
697 1.1 yamt bt_insseg_tail(vm, btspan);
698 1.1 yamt bt_insseg(vm, btfree, btspan);
699 1.1 yamt bt_insfree(vm, btfree);
700 1.66 para vm->vm_size += size;
701 1.1 yamt VMEM_UNLOCK(vm);
702 1.1 yamt
703 1.61 dyoung return 0;
704 1.1 yamt }
705 1.1 yamt
706 1.30 yamt static void
707 1.30 yamt vmem_destroy1(vmem_t *vm)
708 1.30 yamt {
709 1.30 yamt
710 1.30 yamt #if defined(QCACHE)
711 1.30 yamt qc_destroy(vm);
712 1.30 yamt #endif /* defined(QCACHE) */
713 1.30 yamt if (vm->vm_hashlist != NULL) {
714 1.30 yamt int i;
715 1.30 yamt
716 1.30 yamt for (i = 0; i < vm->vm_hashsize; i++) {
717 1.30 yamt bt_t *bt;
718 1.30 yamt
719 1.30 yamt while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
720 1.30 yamt KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
721 1.30 yamt bt_free(vm, bt);
722 1.30 yamt }
723 1.30 yamt }
724 1.66 para if (vm->vm_hashlist != &vm->vm_hash0) {
725 1.66 para xfree(vm->vm_hashlist,
726 1.66 para sizeof(struct vmem_hashlist *) * vm->vm_hashsize);
727 1.66 para }
728 1.66 para }
729 1.66 para
730 1.88 para bt_freetrim(vm, 0);
731 1.66 para
732 1.80 para VMEM_CONDVAR_DESTROY(vm);
733 1.31 ad VMEM_LOCK_DESTROY(vm);
734 1.66 para xfree(vm, sizeof(*vm));
735 1.30 yamt }
736 1.30 yamt
737 1.1 yamt static int
738 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
739 1.1 yamt {
740 1.1 yamt vmem_addr_t addr;
741 1.61 dyoung int rc;
742 1.1 yamt
743 1.61 dyoung if (vm->vm_importfn == NULL) {
744 1.1 yamt return EINVAL;
745 1.1 yamt }
746 1.1 yamt
747 1.66 para if (vm->vm_flags & VM_LARGEIMPORT) {
748 1.80 para size *= 16;
749 1.66 para }
750 1.66 para
751 1.66 para if (vm->vm_flags & VM_XIMPORT) {
752 1.66 para rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size,
753 1.66 para &size, flags, &addr);
754 1.66 para } else {
755 1.66 para rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
756 1.69 rmind }
757 1.69 rmind if (rc) {
758 1.69 rmind return ENOMEM;
759 1.1 yamt }
760 1.1 yamt
761 1.61 dyoung if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
762 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, addr, size);
763 1.1 yamt return ENOMEM;
764 1.1 yamt }
765 1.1 yamt
766 1.1 yamt return 0;
767 1.1 yamt }
768 1.1 yamt
769 1.1 yamt static int
770 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
771 1.1 yamt {
772 1.1 yamt bt_t *bt;
773 1.1 yamt int i;
774 1.1 yamt struct vmem_hashlist *newhashlist;
775 1.1 yamt struct vmem_hashlist *oldhashlist;
776 1.1 yamt size_t oldhashsize;
777 1.1 yamt
778 1.1 yamt KASSERT(newhashsize > 0);
779 1.1 yamt
780 1.1 yamt newhashlist =
781 1.1 yamt xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
782 1.1 yamt if (newhashlist == NULL) {
783 1.1 yamt return ENOMEM;
784 1.1 yamt }
785 1.1 yamt for (i = 0; i < newhashsize; i++) {
786 1.1 yamt LIST_INIT(&newhashlist[i]);
787 1.1 yamt }
788 1.1 yamt
789 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
790 1.66 para xfree(newhashlist,
791 1.66 para sizeof(struct vmem_hashlist *) * newhashsize);
792 1.30 yamt return EBUSY;
793 1.30 yamt }
794 1.1 yamt oldhashlist = vm->vm_hashlist;
795 1.1 yamt oldhashsize = vm->vm_hashsize;
796 1.1 yamt vm->vm_hashlist = newhashlist;
797 1.1 yamt vm->vm_hashsize = newhashsize;
798 1.1 yamt if (oldhashlist == NULL) {
799 1.1 yamt VMEM_UNLOCK(vm);
800 1.1 yamt return 0;
801 1.1 yamt }
802 1.1 yamt for (i = 0; i < oldhashsize; i++) {
803 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
804 1.1 yamt bt_rembusy(vm, bt); /* XXX */
805 1.1 yamt bt_insbusy(vm, bt);
806 1.1 yamt }
807 1.1 yamt }
808 1.1 yamt VMEM_UNLOCK(vm);
809 1.1 yamt
810 1.66 para if (oldhashlist != &vm->vm_hash0) {
811 1.66 para xfree(oldhashlist,
812 1.66 para sizeof(struct vmem_hashlist *) * oldhashsize);
813 1.66 para }
814 1.1 yamt
815 1.1 yamt return 0;
816 1.1 yamt }
817 1.1 yamt
818 1.10 yamt /*
819 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions.
820 1.59 yamt *
821 1.59 yamt * it's a caller's responsibility to ensure the region is big enough
822 1.59 yamt * before calling us.
823 1.10 yamt */
824 1.10 yamt
825 1.61 dyoung static int
826 1.76 joerg vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
827 1.60 dyoung vmem_size_t phase, vmem_size_t nocross,
828 1.61 dyoung vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
829 1.10 yamt {
830 1.10 yamt vmem_addr_t start;
831 1.10 yamt vmem_addr_t end;
832 1.10 yamt
833 1.60 dyoung KASSERT(size > 0);
834 1.59 yamt KASSERT(bt->bt_size >= size); /* caller's responsibility */
835 1.10 yamt
836 1.10 yamt /*
837 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are
838 1.10 yamt * unsigned integer of the same size.
839 1.10 yamt */
840 1.10 yamt
841 1.10 yamt start = bt->bt_start;
842 1.10 yamt if (start < minaddr) {
843 1.10 yamt start = minaddr;
844 1.10 yamt }
845 1.10 yamt end = BT_END(bt);
846 1.60 dyoung if (end > maxaddr) {
847 1.60 dyoung end = maxaddr;
848 1.10 yamt }
849 1.60 dyoung if (start > end) {
850 1.61 dyoung return ENOMEM;
851 1.10 yamt }
852 1.19 yamt
853 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase;
854 1.10 yamt if (start < bt->bt_start) {
855 1.10 yamt start += align;
856 1.10 yamt }
857 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
858 1.10 yamt KASSERT(align < nocross);
859 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase;
860 1.10 yamt }
861 1.60 dyoung if (start <= end && end - start >= size - 1) {
862 1.10 yamt KASSERT((start & (align - 1)) == phase);
863 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
864 1.10 yamt KASSERT(minaddr <= start);
865 1.60 dyoung KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
866 1.10 yamt KASSERT(bt->bt_start <= start);
867 1.60 dyoung KASSERT(BT_END(bt) - start >= size - 1);
868 1.61 dyoung *addrp = start;
869 1.61 dyoung return 0;
870 1.10 yamt }
871 1.61 dyoung return ENOMEM;
872 1.10 yamt }
873 1.10 yamt
874 1.80 para /* ---- vmem API */
875 1.1 yamt
876 1.1 yamt /*
877 1.66 para * vmem_create_internal: creates a vmem arena.
878 1.1 yamt */
879 1.1 yamt
880 1.80 para vmem_t *
881 1.80 para vmem_init(vmem_t *vm, const char *name,
882 1.80 para vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
883 1.80 para vmem_import_t *importfn, vmem_release_t *releasefn,
884 1.80 para vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
885 1.1 yamt {
886 1.1 yamt int i;
887 1.1 yamt
888 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
889 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
890 1.62 rmind KASSERT(quantum > 0);
891 1.1 yamt
892 1.1 yamt #if defined(_KERNEL)
893 1.80 para /* XXX: SMP, we get called early... */
894 1.80 para if (!vmem_bootstrapped) {
895 1.80 para vmem_bootstrap();
896 1.80 para }
897 1.66 para #endif /* defined(_KERNEL) */
898 1.80 para
899 1.80 para if (vm == NULL) {
900 1.66 para vm = xmalloc(sizeof(*vm), flags);
901 1.1 yamt }
902 1.1 yamt if (vm == NULL) {
903 1.1 yamt return NULL;
904 1.1 yamt }
905 1.1 yamt
906 1.66 para VMEM_CONDVAR_INIT(vm, "vmem");
907 1.31 ad VMEM_LOCK_INIT(vm, ipl);
908 1.66 para vm->vm_flags = flags;
909 1.66 para vm->vm_nfreetags = 0;
910 1.66 para LIST_INIT(&vm->vm_freetags);
911 1.64 yamt strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
912 1.1 yamt vm->vm_quantum_mask = quantum - 1;
913 1.62 rmind vm->vm_quantum_shift = SIZE2ORDER(quantum);
914 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
915 1.61 dyoung vm->vm_importfn = importfn;
916 1.61 dyoung vm->vm_releasefn = releasefn;
917 1.61 dyoung vm->vm_arg = arg;
918 1.1 yamt vm->vm_nbusytag = 0;
919 1.66 para vm->vm_size = 0;
920 1.66 para vm->vm_inuse = 0;
921 1.5 yamt #if defined(QCACHE)
922 1.31 ad qc_init(vm, qcache_max, ipl);
923 1.5 yamt #endif /* defined(QCACHE) */
924 1.1 yamt
925 1.87 christos TAILQ_INIT(&vm->vm_seglist);
926 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
927 1.1 yamt LIST_INIT(&vm->vm_freelist[i]);
928 1.1 yamt }
929 1.80 para memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist));
930 1.80 para vm->vm_hashsize = 1;
931 1.80 para vm->vm_hashlist = &vm->vm_hash0;
932 1.1 yamt
933 1.1 yamt if (size != 0) {
934 1.61 dyoung if (vmem_add(vm, base, size, flags) != 0) {
935 1.30 yamt vmem_destroy1(vm);
936 1.1 yamt return NULL;
937 1.1 yamt }
938 1.1 yamt }
939 1.1 yamt
940 1.30 yamt #if defined(_KERNEL)
941 1.66 para if (flags & VM_BOOTSTRAP) {
942 1.66 para bt_refill(vm, VM_NOSLEEP);
943 1.66 para }
944 1.66 para
945 1.30 yamt mutex_enter(&vmem_list_lock);
946 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
947 1.30 yamt mutex_exit(&vmem_list_lock);
948 1.30 yamt #endif /* defined(_KERNEL) */
949 1.30 yamt
950 1.1 yamt return vm;
951 1.1 yamt }
952 1.1 yamt
953 1.66 para
954 1.66 para
955 1.66 para /*
956 1.66 para * vmem_create: create an arena.
957 1.66 para *
958 1.66 para * => must not be called from interrupt context.
959 1.66 para */
960 1.66 para
961 1.66 para vmem_t *
962 1.66 para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
963 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
964 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
965 1.66 para {
966 1.66 para
967 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
968 1.66 para
969 1.80 para return vmem_init(NULL, name, base, size, quantum,
970 1.66 para importfn, releasefn, source, qcache_max, flags, ipl);
971 1.66 para }
972 1.66 para
973 1.66 para /*
974 1.66 para * vmem_xcreate: create an arena takes alternative import func.
975 1.66 para *
976 1.66 para * => must not be called from interrupt context.
977 1.66 para */
978 1.66 para
979 1.66 para vmem_t *
980 1.66 para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
981 1.66 para vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
982 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
983 1.66 para {
984 1.66 para
985 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
986 1.66 para
987 1.80 para return vmem_init(NULL, name, base, size, quantum,
988 1.66 para (vmem_import_t *)importfn, releasefn, source,
989 1.66 para qcache_max, flags | VM_XIMPORT, ipl);
990 1.66 para }
991 1.66 para
992 1.1 yamt void
993 1.1 yamt vmem_destroy(vmem_t *vm)
994 1.1 yamt {
995 1.1 yamt
996 1.30 yamt #if defined(_KERNEL)
997 1.30 yamt mutex_enter(&vmem_list_lock);
998 1.30 yamt LIST_REMOVE(vm, vm_alllist);
999 1.30 yamt mutex_exit(&vmem_list_lock);
1000 1.30 yamt #endif /* defined(_KERNEL) */
1001 1.1 yamt
1002 1.30 yamt vmem_destroy1(vm);
1003 1.1 yamt }
1004 1.1 yamt
1005 1.1 yamt vmem_size_t
1006 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1007 1.1 yamt {
1008 1.1 yamt
1009 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1010 1.1 yamt }
1011 1.1 yamt
1012 1.1 yamt /*
1013 1.83 yamt * vmem_alloc: allocate resource from the arena.
1014 1.1 yamt */
1015 1.1 yamt
1016 1.61 dyoung int
1017 1.61 dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1018 1.1 yamt {
1019 1.86 martin const vm_flag_t strat __diagused = flags & VM_FITMASK;
1020 1.1 yamt
1021 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1022 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1023 1.1 yamt
1024 1.1 yamt KASSERT(size > 0);
1025 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1026 1.3 yamt if ((flags & VM_SLEEP) != 0) {
1027 1.42 yamt ASSERT_SLEEPABLE();
1028 1.3 yamt }
1029 1.1 yamt
1030 1.5 yamt #if defined(QCACHE)
1031 1.5 yamt if (size <= vm->vm_qcache_max) {
1032 1.61 dyoung void *p;
1033 1.38 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1034 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1035 1.5 yamt
1036 1.61 dyoung p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1037 1.61 dyoung if (addrp != NULL)
1038 1.61 dyoung *addrp = (vmem_addr_t)p;
1039 1.61 dyoung return (p == NULL) ? ENOMEM : 0;
1040 1.5 yamt }
1041 1.5 yamt #endif /* defined(QCACHE) */
1042 1.5 yamt
1043 1.60 dyoung return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1044 1.61 dyoung flags, addrp);
1045 1.10 yamt }
1046 1.10 yamt
1047 1.61 dyoung int
1048 1.60 dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1049 1.60 dyoung const vmem_size_t phase, const vmem_size_t nocross,
1050 1.61 dyoung const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1051 1.61 dyoung vmem_addr_t *addrp)
1052 1.10 yamt {
1053 1.10 yamt struct vmem_freelist *list;
1054 1.10 yamt struct vmem_freelist *first;
1055 1.10 yamt struct vmem_freelist *end;
1056 1.10 yamt bt_t *bt;
1057 1.10 yamt bt_t *btnew;
1058 1.10 yamt bt_t *btnew2;
1059 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0);
1060 1.10 yamt vm_flag_t strat = flags & VM_FITMASK;
1061 1.10 yamt vmem_addr_t start;
1062 1.61 dyoung int rc;
1063 1.10 yamt
1064 1.10 yamt KASSERT(size0 > 0);
1065 1.10 yamt KASSERT(size > 0);
1066 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1067 1.10 yamt if ((flags & VM_SLEEP) != 0) {
1068 1.42 yamt ASSERT_SLEEPABLE();
1069 1.10 yamt }
1070 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0);
1071 1.10 yamt KASSERT((align & (align - 1)) == 0);
1072 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0);
1073 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0);
1074 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0);
1075 1.10 yamt KASSERT((align == 0 && phase == 0) || phase < align);
1076 1.10 yamt KASSERT(nocross == 0 || nocross >= size);
1077 1.60 dyoung KASSERT(minaddr <= maxaddr);
1078 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1079 1.10 yamt
1080 1.10 yamt if (align == 0) {
1081 1.10 yamt align = vm->vm_quantum_mask + 1;
1082 1.10 yamt }
1083 1.59 yamt
1084 1.59 yamt /*
1085 1.59 yamt * allocate boundary tags before acquiring the vmem lock.
1086 1.59 yamt */
1087 1.1 yamt btnew = bt_alloc(vm, flags);
1088 1.1 yamt if (btnew == NULL) {
1089 1.61 dyoung return ENOMEM;
1090 1.1 yamt }
1091 1.10 yamt btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1092 1.10 yamt if (btnew2 == NULL) {
1093 1.10 yamt bt_free(vm, btnew);
1094 1.61 dyoung return ENOMEM;
1095 1.10 yamt }
1096 1.1 yamt
1097 1.59 yamt /*
1098 1.59 yamt * choose a free block from which we allocate.
1099 1.59 yamt */
1100 1.1 yamt retry_strat:
1101 1.1 yamt first = bt_freehead_toalloc(vm, size, strat);
1102 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER];
1103 1.1 yamt retry:
1104 1.1 yamt bt = NULL;
1105 1.1 yamt VMEM_LOCK(vm);
1106 1.55 yamt vmem_check(vm);
1107 1.2 yamt if (strat == VM_INSTANTFIT) {
1108 1.59 yamt /*
1109 1.59 yamt * just choose the first block which satisfies our restrictions.
1110 1.59 yamt *
1111 1.59 yamt * note that we don't need to check the size of the blocks
1112 1.59 yamt * because any blocks found on these list should be larger than
1113 1.59 yamt * the given size.
1114 1.59 yamt */
1115 1.2 yamt for (list = first; list < end; list++) {
1116 1.2 yamt bt = LIST_FIRST(list);
1117 1.2 yamt if (bt != NULL) {
1118 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1119 1.61 dyoung nocross, minaddr, maxaddr, &start);
1120 1.61 dyoung if (rc == 0) {
1121 1.10 yamt goto gotit;
1122 1.10 yamt }
1123 1.59 yamt /*
1124 1.59 yamt * don't bother to follow the bt_freelist link
1125 1.59 yamt * here. the list can be very long and we are
1126 1.59 yamt * told to run fast. blocks from the later free
1127 1.59 yamt * lists are larger and have better chances to
1128 1.59 yamt * satisfy our restrictions.
1129 1.59 yamt */
1130 1.2 yamt }
1131 1.2 yamt }
1132 1.2 yamt } else { /* VM_BESTFIT */
1133 1.59 yamt /*
1134 1.59 yamt * we assume that, for space efficiency, it's better to
1135 1.59 yamt * allocate from a smaller block. thus we will start searching
1136 1.59 yamt * from the lower-order list than VM_INSTANTFIT.
1137 1.59 yamt * however, don't bother to find the smallest block in a free
1138 1.59 yamt * list because the list can be very long. we can revisit it
1139 1.59 yamt * if/when it turns out to be a problem.
1140 1.59 yamt *
1141 1.59 yamt * note that the 'first' list can contain blocks smaller than
1142 1.59 yamt * the requested size. thus we need to check bt_size.
1143 1.59 yamt */
1144 1.2 yamt for (list = first; list < end; list++) {
1145 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) {
1146 1.2 yamt if (bt->bt_size >= size) {
1147 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1148 1.61 dyoung nocross, minaddr, maxaddr, &start);
1149 1.61 dyoung if (rc == 0) {
1150 1.10 yamt goto gotit;
1151 1.10 yamt }
1152 1.2 yamt }
1153 1.1 yamt }
1154 1.1 yamt }
1155 1.1 yamt }
1156 1.2 yamt VMEM_UNLOCK(vm);
1157 1.1 yamt #if 1
1158 1.2 yamt if (strat == VM_INSTANTFIT) {
1159 1.2 yamt strat = VM_BESTFIT;
1160 1.2 yamt goto retry_strat;
1161 1.2 yamt }
1162 1.1 yamt #endif
1163 1.69 rmind if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
1164 1.10 yamt
1165 1.10 yamt /*
1166 1.10 yamt * XXX should try to import a region large enough to
1167 1.10 yamt * satisfy restrictions?
1168 1.10 yamt */
1169 1.10 yamt
1170 1.20 yamt goto fail;
1171 1.10 yamt }
1172 1.60 dyoung /* XXX eeek, minaddr & maxaddr not respected */
1173 1.2 yamt if (vmem_import(vm, size, flags) == 0) {
1174 1.2 yamt goto retry;
1175 1.1 yamt }
1176 1.2 yamt /* XXX */
1177 1.66 para
1178 1.68 para if ((flags & VM_SLEEP) != 0) {
1179 1.89 pooka #if defined(_KERNEL)
1180 1.71 para mutex_spin_enter(&uvm_fpageqlock);
1181 1.71 para uvm_kick_pdaemon();
1182 1.71 para mutex_spin_exit(&uvm_fpageqlock);
1183 1.71 para #endif
1184 1.68 para VMEM_LOCK(vm);
1185 1.68 para VMEM_CONDVAR_WAIT(vm);
1186 1.68 para VMEM_UNLOCK(vm);
1187 1.68 para goto retry;
1188 1.68 para }
1189 1.20 yamt fail:
1190 1.20 yamt bt_free(vm, btnew);
1191 1.20 yamt bt_free(vm, btnew2);
1192 1.61 dyoung return ENOMEM;
1193 1.2 yamt
1194 1.2 yamt gotit:
1195 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
1196 1.1 yamt KASSERT(bt->bt_size >= size);
1197 1.1 yamt bt_remfree(vm, bt);
1198 1.55 yamt vmem_check(vm);
1199 1.10 yamt if (bt->bt_start != start) {
1200 1.10 yamt btnew2->bt_type = BT_TYPE_FREE;
1201 1.10 yamt btnew2->bt_start = bt->bt_start;
1202 1.10 yamt btnew2->bt_size = start - bt->bt_start;
1203 1.10 yamt bt->bt_start = start;
1204 1.10 yamt bt->bt_size -= btnew2->bt_size;
1205 1.10 yamt bt_insfree(vm, btnew2);
1206 1.87 christos bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1207 1.10 yamt btnew2 = NULL;
1208 1.55 yamt vmem_check(vm);
1209 1.10 yamt }
1210 1.10 yamt KASSERT(bt->bt_start == start);
1211 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1212 1.1 yamt /* split */
1213 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1214 1.1 yamt btnew->bt_start = bt->bt_start;
1215 1.1 yamt btnew->bt_size = size;
1216 1.1 yamt bt->bt_start = bt->bt_start + size;
1217 1.1 yamt bt->bt_size -= size;
1218 1.1 yamt bt_insfree(vm, bt);
1219 1.87 christos bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1220 1.1 yamt bt_insbusy(vm, btnew);
1221 1.55 yamt vmem_check(vm);
1222 1.1 yamt VMEM_UNLOCK(vm);
1223 1.1 yamt } else {
1224 1.1 yamt bt->bt_type = BT_TYPE_BUSY;
1225 1.1 yamt bt_insbusy(vm, bt);
1226 1.55 yamt vmem_check(vm);
1227 1.1 yamt VMEM_UNLOCK(vm);
1228 1.1 yamt bt_free(vm, btnew);
1229 1.1 yamt btnew = bt;
1230 1.1 yamt }
1231 1.10 yamt if (btnew2 != NULL) {
1232 1.10 yamt bt_free(vm, btnew2);
1233 1.10 yamt }
1234 1.1 yamt KASSERT(btnew->bt_size >= size);
1235 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1236 1.1 yamt
1237 1.61 dyoung if (addrp != NULL)
1238 1.61 dyoung *addrp = btnew->bt_start;
1239 1.61 dyoung return 0;
1240 1.1 yamt }
1241 1.1 yamt
1242 1.1 yamt /*
1243 1.83 yamt * vmem_free: free the resource to the arena.
1244 1.1 yamt */
1245 1.1 yamt
1246 1.1 yamt void
1247 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1248 1.1 yamt {
1249 1.1 yamt
1250 1.1 yamt KASSERT(size > 0);
1251 1.1 yamt
1252 1.5 yamt #if defined(QCACHE)
1253 1.5 yamt if (size <= vm->vm_qcache_max) {
1254 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1255 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1256 1.5 yamt
1257 1.63 rmind pool_cache_put(qc->qc_cache, (void *)addr);
1258 1.63 rmind return;
1259 1.5 yamt }
1260 1.5 yamt #endif /* defined(QCACHE) */
1261 1.5 yamt
1262 1.10 yamt vmem_xfree(vm, addr, size);
1263 1.10 yamt }
1264 1.10 yamt
1265 1.10 yamt void
1266 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1267 1.10 yamt {
1268 1.10 yamt bt_t *bt;
1269 1.10 yamt bt_t *t;
1270 1.66 para LIST_HEAD(, vmem_btag) tofree;
1271 1.66 para
1272 1.66 para LIST_INIT(&tofree);
1273 1.10 yamt
1274 1.10 yamt KASSERT(size > 0);
1275 1.10 yamt
1276 1.1 yamt VMEM_LOCK(vm);
1277 1.1 yamt
1278 1.1 yamt bt = bt_lookupbusy(vm, addr);
1279 1.1 yamt KASSERT(bt != NULL);
1280 1.1 yamt KASSERT(bt->bt_start == addr);
1281 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1282 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1283 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
1284 1.1 yamt bt_rembusy(vm, bt);
1285 1.1 yamt bt->bt_type = BT_TYPE_FREE;
1286 1.1 yamt
1287 1.1 yamt /* coalesce */
1288 1.87 christos t = TAILQ_NEXT(bt, bt_seglist);
1289 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1290 1.60 dyoung KASSERT(BT_END(bt) < t->bt_start); /* YYY */
1291 1.1 yamt bt_remfree(vm, t);
1292 1.1 yamt bt_remseg(vm, t);
1293 1.1 yamt bt->bt_size += t->bt_size;
1294 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1295 1.1 yamt }
1296 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1297 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1298 1.60 dyoung KASSERT(BT_END(t) < bt->bt_start); /* YYY */
1299 1.1 yamt bt_remfree(vm, t);
1300 1.1 yamt bt_remseg(vm, t);
1301 1.1 yamt bt->bt_size += t->bt_size;
1302 1.1 yamt bt->bt_start = t->bt_start;
1303 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1304 1.1 yamt }
1305 1.1 yamt
1306 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1307 1.1 yamt KASSERT(t != NULL);
1308 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1309 1.61 dyoung if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1310 1.1 yamt t->bt_size == bt->bt_size) {
1311 1.1 yamt vmem_addr_t spanaddr;
1312 1.1 yamt vmem_size_t spansize;
1313 1.1 yamt
1314 1.1 yamt KASSERT(t->bt_start == bt->bt_start);
1315 1.1 yamt spanaddr = bt->bt_start;
1316 1.1 yamt spansize = bt->bt_size;
1317 1.1 yamt bt_remseg(vm, bt);
1318 1.66 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
1319 1.1 yamt bt_remseg(vm, t);
1320 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1321 1.66 para vm->vm_size -= spansize;
1322 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1323 1.1 yamt VMEM_UNLOCK(vm);
1324 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1325 1.1 yamt } else {
1326 1.1 yamt bt_insfree(vm, bt);
1327 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1328 1.1 yamt VMEM_UNLOCK(vm);
1329 1.1 yamt }
1330 1.66 para
1331 1.66 para while (!LIST_EMPTY(&tofree)) {
1332 1.66 para t = LIST_FIRST(&tofree);
1333 1.66 para LIST_REMOVE(t, bt_freelist);
1334 1.66 para bt_free(vm, t);
1335 1.66 para }
1336 1.88 para
1337 1.88 para bt_freetrim(vm, BT_MAXFREE);
1338 1.1 yamt }
1339 1.1 yamt
1340 1.1 yamt /*
1341 1.1 yamt * vmem_add:
1342 1.1 yamt *
1343 1.1 yamt * => caller must ensure appropriate spl,
1344 1.1 yamt * if the arena can be accessed from interrupt context.
1345 1.1 yamt */
1346 1.1 yamt
1347 1.61 dyoung int
1348 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1349 1.1 yamt {
1350 1.1 yamt
1351 1.1 yamt return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1352 1.1 yamt }
1353 1.1 yamt
1354 1.6 yamt /*
1355 1.66 para * vmem_size: information about arenas size
1356 1.6 yamt *
1357 1.66 para * => return free/allocated size in arena
1358 1.6 yamt */
1359 1.66 para vmem_size_t
1360 1.66 para vmem_size(vmem_t *vm, int typemask)
1361 1.6 yamt {
1362 1.6 yamt
1363 1.66 para switch (typemask) {
1364 1.66 para case VMEM_ALLOC:
1365 1.66 para return vm->vm_inuse;
1366 1.66 para case VMEM_FREE:
1367 1.66 para return vm->vm_size - vm->vm_inuse;
1368 1.66 para case VMEM_FREE|VMEM_ALLOC:
1369 1.66 para return vm->vm_size;
1370 1.66 para default:
1371 1.66 para panic("vmem_size");
1372 1.66 para }
1373 1.6 yamt }
1374 1.6 yamt
1375 1.30 yamt /* ---- rehash */
1376 1.30 yamt
1377 1.30 yamt #if defined(_KERNEL)
1378 1.30 yamt static struct callout vmem_rehash_ch;
1379 1.30 yamt static int vmem_rehash_interval;
1380 1.30 yamt static struct workqueue *vmem_rehash_wq;
1381 1.30 yamt static struct work vmem_rehash_wk;
1382 1.30 yamt
1383 1.30 yamt static void
1384 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy)
1385 1.30 yamt {
1386 1.30 yamt vmem_t *vm;
1387 1.30 yamt
1388 1.30 yamt KASSERT(wk == &vmem_rehash_wk);
1389 1.30 yamt mutex_enter(&vmem_list_lock);
1390 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1391 1.30 yamt size_t desired;
1392 1.30 yamt size_t current;
1393 1.30 yamt
1394 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
1395 1.30 yamt continue;
1396 1.30 yamt }
1397 1.30 yamt desired = vm->vm_nbusytag;
1398 1.30 yamt current = vm->vm_hashsize;
1399 1.30 yamt VMEM_UNLOCK(vm);
1400 1.30 yamt
1401 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) {
1402 1.30 yamt desired = VMEM_HASHSIZE_MAX;
1403 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) {
1404 1.30 yamt desired = VMEM_HASHSIZE_MIN;
1405 1.30 yamt }
1406 1.30 yamt if (desired > current * 2 || desired * 2 < current) {
1407 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP);
1408 1.30 yamt }
1409 1.30 yamt }
1410 1.30 yamt mutex_exit(&vmem_list_lock);
1411 1.30 yamt
1412 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1413 1.30 yamt }
1414 1.30 yamt
1415 1.30 yamt static void
1416 1.30 yamt vmem_rehash_all_kick(void *dummy)
1417 1.30 yamt {
1418 1.30 yamt
1419 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1420 1.30 yamt }
1421 1.30 yamt
1422 1.30 yamt void
1423 1.30 yamt vmem_rehash_start(void)
1424 1.30 yamt {
1425 1.30 yamt int error;
1426 1.30 yamt
1427 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1428 1.41 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1429 1.30 yamt if (error) {
1430 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error);
1431 1.30 yamt }
1432 1.41 ad callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1433 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1434 1.30 yamt
1435 1.30 yamt vmem_rehash_interval = hz * 10;
1436 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1437 1.30 yamt }
1438 1.30 yamt #endif /* defined(_KERNEL) */
1439 1.30 yamt
1440 1.1 yamt /* ---- debug */
1441 1.1 yamt
1442 1.55 yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1443 1.55 yamt
1444 1.82 christos static void bt_dump(const bt_t *, void (*)(const char *, ...)
1445 1.82 christos __printflike(1, 2));
1446 1.55 yamt
1447 1.55 yamt static const char *
1448 1.55 yamt bt_type_string(int type)
1449 1.55 yamt {
1450 1.55 yamt static const char * const table[] = {
1451 1.55 yamt [BT_TYPE_BUSY] = "busy",
1452 1.55 yamt [BT_TYPE_FREE] = "free",
1453 1.55 yamt [BT_TYPE_SPAN] = "span",
1454 1.55 yamt [BT_TYPE_SPAN_STATIC] = "static span",
1455 1.55 yamt };
1456 1.55 yamt
1457 1.55 yamt if (type >= __arraycount(table)) {
1458 1.55 yamt return "BOGUS";
1459 1.55 yamt }
1460 1.55 yamt return table[type];
1461 1.55 yamt }
1462 1.55 yamt
1463 1.55 yamt static void
1464 1.55 yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1465 1.55 yamt {
1466 1.55 yamt
1467 1.55 yamt (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1468 1.55 yamt bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1469 1.55 yamt bt->bt_type, bt_type_string(bt->bt_type));
1470 1.55 yamt }
1471 1.55 yamt
1472 1.55 yamt static void
1473 1.82 christos vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2))
1474 1.55 yamt {
1475 1.55 yamt const bt_t *bt;
1476 1.55 yamt int i;
1477 1.55 yamt
1478 1.55 yamt (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1479 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1480 1.55 yamt bt_dump(bt, pr);
1481 1.55 yamt }
1482 1.55 yamt
1483 1.55 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1484 1.55 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i];
1485 1.55 yamt
1486 1.55 yamt if (LIST_EMPTY(fl)) {
1487 1.55 yamt continue;
1488 1.55 yamt }
1489 1.55 yamt
1490 1.55 yamt (*pr)("freelist[%d]\n", i);
1491 1.55 yamt LIST_FOREACH(bt, fl, bt_freelist) {
1492 1.55 yamt bt_dump(bt, pr);
1493 1.55 yamt }
1494 1.55 yamt }
1495 1.55 yamt }
1496 1.55 yamt
1497 1.55 yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1498 1.55 yamt
1499 1.37 yamt #if defined(DDB)
1500 1.37 yamt static bt_t *
1501 1.37 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1502 1.37 yamt {
1503 1.39 yamt bt_t *bt;
1504 1.37 yamt
1505 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1506 1.39 yamt if (BT_ISSPAN_P(bt)) {
1507 1.39 yamt continue;
1508 1.39 yamt }
1509 1.60 dyoung if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1510 1.39 yamt return bt;
1511 1.37 yamt }
1512 1.37 yamt }
1513 1.37 yamt
1514 1.37 yamt return NULL;
1515 1.37 yamt }
1516 1.37 yamt
1517 1.37 yamt void
1518 1.37 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1519 1.37 yamt {
1520 1.37 yamt vmem_t *vm;
1521 1.37 yamt
1522 1.37 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1523 1.37 yamt bt_t *bt;
1524 1.37 yamt
1525 1.37 yamt bt = vmem_whatis_lookup(vm, addr);
1526 1.37 yamt if (bt == NULL) {
1527 1.37 yamt continue;
1528 1.37 yamt }
1529 1.39 yamt (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1530 1.37 yamt (void *)addr, (void *)bt->bt_start,
1531 1.39 yamt (size_t)(addr - bt->bt_start), vm->vm_name,
1532 1.39 yamt (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1533 1.37 yamt }
1534 1.37 yamt }
1535 1.43 cegger
1536 1.55 yamt void
1537 1.55 yamt vmem_printall(const char *modif, void (*pr)(const char *, ...))
1538 1.43 cegger {
1539 1.55 yamt const vmem_t *vm;
1540 1.43 cegger
1541 1.47 cegger LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1542 1.55 yamt vmem_dump(vm, pr);
1543 1.43 cegger }
1544 1.43 cegger }
1545 1.43 cegger
1546 1.43 cegger void
1547 1.43 cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1548 1.43 cegger {
1549 1.55 yamt const vmem_t *vm = (const void *)addr;
1550 1.43 cegger
1551 1.55 yamt vmem_dump(vm, pr);
1552 1.43 cegger }
1553 1.37 yamt #endif /* defined(DDB) */
1554 1.37 yamt
1555 1.60 dyoung #if defined(_KERNEL)
1556 1.60 dyoung #define vmem_printf printf
1557 1.60 dyoung #else
1558 1.1 yamt #include <stdio.h>
1559 1.60 dyoung #include <stdarg.h>
1560 1.60 dyoung
1561 1.60 dyoung static void
1562 1.60 dyoung vmem_printf(const char *fmt, ...)
1563 1.60 dyoung {
1564 1.60 dyoung va_list ap;
1565 1.60 dyoung va_start(ap, fmt);
1566 1.60 dyoung vprintf(fmt, ap);
1567 1.60 dyoung va_end(ap);
1568 1.60 dyoung }
1569 1.60 dyoung #endif
1570 1.1 yamt
1571 1.55 yamt #if defined(VMEM_SANITY)
1572 1.1 yamt
1573 1.55 yamt static bool
1574 1.55 yamt vmem_check_sanity(vmem_t *vm)
1575 1.1 yamt {
1576 1.55 yamt const bt_t *bt, *bt2;
1577 1.1 yamt
1578 1.55 yamt KASSERT(vm != NULL);
1579 1.1 yamt
1580 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1581 1.60 dyoung if (bt->bt_start > BT_END(bt)) {
1582 1.55 yamt printf("corrupted tag\n");
1583 1.60 dyoung bt_dump(bt, vmem_printf);
1584 1.55 yamt return false;
1585 1.55 yamt }
1586 1.55 yamt }
1587 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1588 1.87 christos TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1589 1.55 yamt if (bt == bt2) {
1590 1.55 yamt continue;
1591 1.55 yamt }
1592 1.55 yamt if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1593 1.55 yamt continue;
1594 1.55 yamt }
1595 1.60 dyoung if (bt->bt_start <= BT_END(bt2) &&
1596 1.60 dyoung bt2->bt_start <= BT_END(bt)) {
1597 1.55 yamt printf("overwrapped tags\n");
1598 1.60 dyoung bt_dump(bt, vmem_printf);
1599 1.60 dyoung bt_dump(bt2, vmem_printf);
1600 1.55 yamt return false;
1601 1.55 yamt }
1602 1.55 yamt }
1603 1.1 yamt }
1604 1.1 yamt
1605 1.55 yamt return true;
1606 1.55 yamt }
1607 1.1 yamt
1608 1.55 yamt static void
1609 1.55 yamt vmem_check(vmem_t *vm)
1610 1.55 yamt {
1611 1.1 yamt
1612 1.55 yamt if (!vmem_check_sanity(vm)) {
1613 1.55 yamt panic("insanity vmem %p", vm);
1614 1.1 yamt }
1615 1.1 yamt }
1616 1.1 yamt
1617 1.55 yamt #endif /* defined(VMEM_SANITY) */
1618 1.1 yamt
1619 1.55 yamt #if defined(UNITTEST)
1620 1.1 yamt int
1621 1.57 cegger main(void)
1622 1.1 yamt {
1623 1.61 dyoung int rc;
1624 1.1 yamt vmem_t *vm;
1625 1.1 yamt vmem_addr_t p;
1626 1.1 yamt struct reg {
1627 1.1 yamt vmem_addr_t p;
1628 1.1 yamt vmem_size_t sz;
1629 1.25 thorpej bool x;
1630 1.1 yamt } *reg = NULL;
1631 1.1 yamt int nreg = 0;
1632 1.1 yamt int nalloc = 0;
1633 1.1 yamt int nfree = 0;
1634 1.1 yamt vmem_size_t total = 0;
1635 1.1 yamt #if 1
1636 1.1 yamt vm_flag_t strat = VM_INSTANTFIT;
1637 1.1 yamt #else
1638 1.1 yamt vm_flag_t strat = VM_BESTFIT;
1639 1.1 yamt #endif
1640 1.1 yamt
1641 1.61 dyoung vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1642 1.61 dyoung #ifdef _KERNEL
1643 1.61 dyoung IPL_NONE
1644 1.61 dyoung #else
1645 1.61 dyoung 0
1646 1.61 dyoung #endif
1647 1.61 dyoung );
1648 1.1 yamt if (vm == NULL) {
1649 1.1 yamt printf("vmem_create\n");
1650 1.1 yamt exit(EXIT_FAILURE);
1651 1.1 yamt }
1652 1.60 dyoung vmem_dump(vm, vmem_printf);
1653 1.1 yamt
1654 1.61 dyoung rc = vmem_add(vm, 0, 50, VM_SLEEP);
1655 1.61 dyoung assert(rc == 0);
1656 1.61 dyoung rc = vmem_add(vm, 100, 200, VM_SLEEP);
1657 1.61 dyoung assert(rc == 0);
1658 1.61 dyoung rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1659 1.61 dyoung assert(rc == 0);
1660 1.61 dyoung rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1661 1.61 dyoung assert(rc == 0);
1662 1.61 dyoung rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1663 1.61 dyoung assert(rc == 0);
1664 1.61 dyoung rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1665 1.61 dyoung assert(rc == 0);
1666 1.61 dyoung rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1667 1.61 dyoung assert(rc == 0);
1668 1.61 dyoung rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1669 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1670 1.61 dyoung assert(rc != 0);
1671 1.61 dyoung rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1672 1.61 dyoung assert(rc == 0 && p == 0);
1673 1.61 dyoung vmem_xfree(vm, p, 50);
1674 1.61 dyoung rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1675 1.61 dyoung assert(rc == 0 && p == 0);
1676 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1677 1.61 dyoung 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1678 1.61 dyoung assert(rc != 0);
1679 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1680 1.61 dyoung 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1681 1.61 dyoung assert(rc != 0);
1682 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1683 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1684 1.61 dyoung assert(rc == 0);
1685 1.60 dyoung vmem_dump(vm, vmem_printf);
1686 1.1 yamt for (;;) {
1687 1.1 yamt struct reg *r;
1688 1.10 yamt int t = rand() % 100;
1689 1.1 yamt
1690 1.10 yamt if (t > 45) {
1691 1.10 yamt /* alloc */
1692 1.1 yamt vmem_size_t sz = rand() % 500 + 1;
1693 1.25 thorpej bool x;
1694 1.10 yamt vmem_size_t align, phase, nocross;
1695 1.10 yamt vmem_addr_t minaddr, maxaddr;
1696 1.10 yamt
1697 1.10 yamt if (t > 70) {
1698 1.26 thorpej x = true;
1699 1.10 yamt /* XXX */
1700 1.10 yamt align = 1 << (rand() % 15);
1701 1.10 yamt phase = rand() % 65536;
1702 1.10 yamt nocross = 1 << (rand() % 15);
1703 1.10 yamt if (align <= phase) {
1704 1.10 yamt phase = 0;
1705 1.10 yamt }
1706 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1,
1707 1.19 yamt nocross)) {
1708 1.10 yamt nocross = 0;
1709 1.10 yamt }
1710 1.60 dyoung do {
1711 1.60 dyoung minaddr = rand() % 50000;
1712 1.60 dyoung maxaddr = rand() % 70000;
1713 1.60 dyoung } while (minaddr > maxaddr);
1714 1.10 yamt printf("=== xalloc %" PRIu64
1715 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64
1716 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64
1717 1.10 yamt ", max=%" PRIu64 "\n",
1718 1.10 yamt (uint64_t)sz,
1719 1.10 yamt (uint64_t)align,
1720 1.10 yamt (uint64_t)phase,
1721 1.10 yamt (uint64_t)nocross,
1722 1.10 yamt (uint64_t)minaddr,
1723 1.10 yamt (uint64_t)maxaddr);
1724 1.61 dyoung rc = vmem_xalloc(vm, sz, align, phase, nocross,
1725 1.61 dyoung minaddr, maxaddr, strat|VM_SLEEP, &p);
1726 1.10 yamt } else {
1727 1.26 thorpej x = false;
1728 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1729 1.61 dyoung rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1730 1.10 yamt }
1731 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p);
1732 1.60 dyoung vmem_dump(vm, vmem_printf);
1733 1.61 dyoung if (rc != 0) {
1734 1.10 yamt if (x) {
1735 1.10 yamt continue;
1736 1.10 yamt }
1737 1.1 yamt break;
1738 1.1 yamt }
1739 1.1 yamt nreg++;
1740 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg);
1741 1.1 yamt r = ®[nreg - 1];
1742 1.1 yamt r->p = p;
1743 1.1 yamt r->sz = sz;
1744 1.10 yamt r->x = x;
1745 1.1 yamt total += sz;
1746 1.1 yamt nalloc++;
1747 1.1 yamt } else if (nreg != 0) {
1748 1.10 yamt /* free */
1749 1.1 yamt r = ®[rand() % nreg];
1750 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1751 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz);
1752 1.10 yamt if (r->x) {
1753 1.10 yamt vmem_xfree(vm, r->p, r->sz);
1754 1.10 yamt } else {
1755 1.10 yamt vmem_free(vm, r->p, r->sz);
1756 1.10 yamt }
1757 1.1 yamt total -= r->sz;
1758 1.60 dyoung vmem_dump(vm, vmem_printf);
1759 1.1 yamt *r = reg[nreg - 1];
1760 1.1 yamt nreg--;
1761 1.1 yamt nfree++;
1762 1.1 yamt }
1763 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total);
1764 1.1 yamt }
1765 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1766 1.1 yamt (uint64_t)total, nalloc, nfree);
1767 1.1 yamt exit(EXIT_SUCCESS);
1768 1.1 yamt }
1769 1.55 yamt #endif /* defined(UNITTEST) */
1770