uvm_km.c revision 1.93.4.4 1 /* $NetBSD: uvm_km.c,v 1.93.4.4 2007/09/18 15:28:13 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_km.c: handle kernel memory allocation and management
71 */
72
73 /*
74 * overview of kernel memory management:
75 *
76 * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 *
80 * the kernel_map has several "submaps." submaps can only appear in
81 * the kernel_map (user processes can't use them). submaps "take over"
82 * the management of a sub-range of the kernel's address space. submaps
83 * are typically allocated at boot time and are never released. kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
86 *
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
89 * chunks.
90 *
91 * the vm system has several standard kernel submaps, including:
92 * kmem_map => contains only wired kernel memory for the kernel
93 * malloc.
94 * mb_map => memory for large mbufs,
95 * pager_map => used to map "buf" structures into kernel space
96 * exec_map => used during exec to handle exec args
97 * etc...
98 *
99 * the kernel allocates its private memory out of special uvm_objects whose
100 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
101 * are "special" and never die). all kernel objects should be thought of
102 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
103 * object is equal to the size of kernel virtual address space (i.e. the
104 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
105 *
106 * note that just because a kernel object spans the entire kernel virutal
107 * address space doesn't mean that it has to be mapped into the entire space.
108 * large chunks of a kernel object's space go unused either because
109 * that area of kernel VM is unmapped, or there is some other type of
110 * object mapped into that range (e.g. a vnode). for submap's kernel
111 * objects, the only part of the object that can ever be populated is the
112 * offsets that are managed by the submap.
113 *
114 * note that the "offset" in a kernel object is always the kernel virtual
115 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
116 * example:
117 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
118 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
119 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
120 * then that means that the page at offset 0x235000 in kernel_object is
121 * mapped at 0xf8235000.
122 *
123 * kernel object have one other special property: when the kernel virtual
124 * memory mapping them is unmapped, the backing memory in the object is
125 * freed right away. this is done with the uvm_km_pgremove() function.
126 * this has to be done because there is no backing store for kernel pages
127 * and no need to save them after they are no longer referenced.
128 */
129
130 #include <sys/cdefs.h>
131 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.93.4.4 2007/09/18 15:28:13 ad Exp $");
132
133 #include "opt_uvmhist.h"
134
135 #include <sys/param.h>
136 #include <sys/malloc.h>
137 #include <sys/systm.h>
138 #include <sys/proc.h>
139 #include <sys/pool.h>
140
141 #include <uvm/uvm.h>
142
143 /*
144 * global data structures
145 */
146
147 struct vm_map *kernel_map = NULL;
148
149 /*
150 * local data structues
151 */
152
153 static struct vm_map_kernel kernel_map_store;
154 static struct vm_map_entry kernel_first_mapent_store;
155
156 #if !defined(PMAP_MAP_POOLPAGE)
157
158 /*
159 * kva cache
160 *
161 * XXX maybe it's better to do this at the uvm_map layer.
162 */
163
164 #define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
165
166 static void *km_vacache_alloc(struct pool *, int);
167 static void km_vacache_free(struct pool *, void *);
168 static void km_vacache_init(struct vm_map *, const char *, size_t);
169
170 /* XXX */
171 #define KM_VACACHE_POOL_TO_MAP(pp) \
172 ((struct vm_map *)((char *)(pp) - \
173 offsetof(struct vm_map_kernel, vmk_vacache)))
174
175 static void *
176 km_vacache_alloc(struct pool *pp, int flags)
177 {
178 vaddr_t va;
179 size_t size;
180 struct vm_map *map;
181 size = pp->pr_alloc->pa_pagesz;
182
183 map = KM_VACACHE_POOL_TO_MAP(pp);
184
185 va = vm_map_min(map); /* hint */
186 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
187 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
188 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
189 ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA : UVM_FLAG_NOWAIT))))
190 return NULL;
191
192 return (void *)va;
193 }
194
195 static void
196 km_vacache_free(struct pool *pp, void *v)
197 {
198 vaddr_t va = (vaddr_t)v;
199 size_t size = pp->pr_alloc->pa_pagesz;
200 struct vm_map *map;
201
202 map = KM_VACACHE_POOL_TO_MAP(pp);
203 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
204 }
205
206 /*
207 * km_vacache_init: initialize kva cache.
208 */
209
210 static void
211 km_vacache_init(struct vm_map *map, const char *name, size_t size)
212 {
213 struct vm_map_kernel *vmk;
214 struct pool *pp;
215 struct pool_allocator *pa;
216 int ipl;
217
218 KASSERT(VM_MAP_IS_KERNEL(map));
219 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
220
221
222 vmk = vm_map_to_kernel(map);
223 pp = &vmk->vmk_vacache;
224 pa = &vmk->vmk_vacache_allocator;
225 memset(pa, 0, sizeof(*pa));
226 pa->pa_alloc = km_vacache_alloc;
227 pa->pa_free = km_vacache_free;
228 pa->pa_pagesz = (unsigned int)size;
229 pa->pa_backingmap = map;
230 pa->pa_backingmapptr = NULL;
231
232 if ((map->flags & VM_MAP_INTRSAFE) != 0)
233 ipl = IPL_VM;
234 else
235 ipl = IPL_NONE;
236
237 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
238 ipl);
239 }
240
241 void
242 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
243 {
244
245 map->flags |= VM_MAP_VACACHE;
246 if (size == 0)
247 size = KM_VACACHE_SIZE;
248 km_vacache_init(map, name, size);
249 }
250
251 #else /* !defined(PMAP_MAP_POOLPAGE) */
252
253 void
254 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
255 {
256
257 /* nothing */
258 }
259
260 #endif /* !defined(PMAP_MAP_POOLPAGE) */
261
262 void
263 uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
264 {
265 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
266
267 callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
268 }
269
270 /*
271 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
272 * KVM already allocated for text, data, bss, and static data structures).
273 *
274 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
275 * we assume that [vmin -> start] has already been allocated and that
276 * "end" is the end.
277 */
278
279 void
280 uvm_km_init(vaddr_t start, vaddr_t end)
281 {
282 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
283
284 /*
285 * next, init kernel memory objects.
286 */
287
288 /* kernel_object: for pageable anonymous kernel memory */
289 uao_init();
290 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
291 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
292
293 /*
294 * init the map and reserve any space that might already
295 * have been allocated kernel space before installing.
296 */
297
298 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
299 kernel_map_store.vmk_map.pmap = pmap_kernel();
300 if (start != base) {
301 int error;
302 struct uvm_map_args args;
303
304 error = uvm_map_prepare(&kernel_map_store.vmk_map,
305 base, start - base,
306 NULL, UVM_UNKNOWN_OFFSET, 0,
307 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
308 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
309 if (!error) {
310 kernel_first_mapent_store.flags =
311 UVM_MAP_KERNEL | UVM_MAP_FIRST;
312 error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
313 &kernel_first_mapent_store);
314 }
315
316 if (error)
317 panic(
318 "uvm_km_init: could not reserve space for kernel");
319 }
320
321 /*
322 * install!
323 */
324
325 kernel_map = &kernel_map_store.vmk_map;
326 uvm_km_vacache_init(kernel_map, "kvakernel", 0);
327 }
328
329 /*
330 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
331 * is allocated all references to that area of VM must go through it. this
332 * allows the locking of VAs in kernel_map to be broken up into regions.
333 *
334 * => if `fixed' is true, *vmin specifies where the region described
335 * by the submap must start
336 * => if submap is non NULL we use that as the submap, otherwise we
337 * alloc a new map
338 */
339
340 struct vm_map *
341 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
342 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
343 struct vm_map_kernel *submap)
344 {
345 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
346
347 KASSERT(vm_map_pmap(map) == pmap_kernel());
348
349 size = round_page(size); /* round up to pagesize */
350 size += uvm_mapent_overhead(size, flags);
351
352 /*
353 * first allocate a blank spot in the parent map
354 */
355
356 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
357 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
358 UVM_ADV_RANDOM, mapflags)) != 0) {
359 panic("uvm_km_suballoc: unable to allocate space in parent map");
360 }
361
362 /*
363 * set VM bounds (vmin is filled in by uvm_map)
364 */
365
366 *vmax = *vmin + size;
367
368 /*
369 * add references to pmap and create or init the submap
370 */
371
372 pmap_reference(vm_map_pmap(map));
373 if (submap == NULL) {
374 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
375 if (submap == NULL)
376 panic("uvm_km_suballoc: unable to create submap");
377 }
378 uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
379 submap->vmk_map.pmap = vm_map_pmap(map);
380
381 /*
382 * now let uvm_map_submap plug in it...
383 */
384
385 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
386 panic("uvm_km_suballoc: submap allocation failed");
387
388 return(&submap->vmk_map);
389 }
390
391 /*
392 * uvm_km_pgremove: remove pages from a kernel uvm_object.
393 *
394 * => when you unmap a part of anonymous kernel memory you want to toss
395 * the pages right away. (this gets called from uvm_unmap_...).
396 */
397
398 void
399 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
400 {
401 struct uvm_object * const uobj = uvm_kernel_object;
402 const voff_t start = startva - vm_map_min(kernel_map);
403 const voff_t end = endva - vm_map_min(kernel_map);
404 struct vm_page *pg;
405 voff_t curoff, nextoff;
406 int swpgonlydelta = 0;
407 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
408
409 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
410 KASSERT(startva < endva);
411 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
412
413 mutex_enter(&uobj->vmobjlock);
414
415 for (curoff = start; curoff < end; curoff = nextoff) {
416 nextoff = curoff + PAGE_SIZE;
417 pg = uvm_pagelookup(uobj, curoff);
418 if (pg != NULL && pg->flags & PG_BUSY) {
419 pg->flags |= PG_WANTED;
420 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
421 "km_pgrm", 0);
422 mutex_enter(&uobj->vmobjlock);
423 nextoff = curoff;
424 continue;
425 }
426
427 /*
428 * free the swap slot, then the page.
429 */
430
431 if (pg == NULL &&
432 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
433 swpgonlydelta++;
434 }
435 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
436 if (pg != NULL) {
437 mutex_enter(&uvm_pageqlock);
438 uvm_pagefree(pg);
439 mutex_exit(&uvm_pageqlock);
440 }
441 }
442 mutex_exit(&uobj->vmobjlock);
443
444 if (swpgonlydelta > 0) {
445 mutex_enter(&uvm_swap_data_lock);
446 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
447 uvmexp.swpgonly -= swpgonlydelta;
448 mutex_exit(&uvm_swap_data_lock);
449 }
450 }
451
452
453 /*
454 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
455 * regions.
456 *
457 * => when you unmap a part of anonymous kernel memory you want to toss
458 * the pages right away. (this is called from uvm_unmap_...).
459 * => none of the pages will ever be busy, and none of them will ever
460 * be on the active or inactive queues (because they have no object).
461 */
462
463 void
464 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
465 {
466 struct vm_page *pg;
467 paddr_t pa;
468 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
469
470 KASSERT(VM_MIN_KERNEL_ADDRESS <= start);
471 KASSERT(start < end);
472 KASSERT(end <= VM_MAX_KERNEL_ADDRESS);
473
474 for (; start < end; start += PAGE_SIZE) {
475 if (!pmap_extract(pmap_kernel(), start, &pa)) {
476 continue;
477 }
478 pg = PHYS_TO_VM_PAGE(pa);
479 KASSERT(pg);
480 KASSERT(pg->uobject == NULL && pg->uanon == NULL);
481 uvm_pagefree(pg);
482 }
483 }
484
485 #if defined(DEBUG)
486 void
487 uvm_km_check_empty(vaddr_t start, vaddr_t end, bool intrsafe)
488 {
489 vaddr_t va;
490 paddr_t pa;
491
492 KDASSERT(VM_MIN_KERNEL_ADDRESS <= start);
493 KDASSERT(start < end);
494 KDASSERT(end <= VM_MAX_KERNEL_ADDRESS);
495
496 for (va = start; va < end; va += PAGE_SIZE) {
497 if (pmap_extract(pmap_kernel(), va, &pa)) {
498 panic("uvm_km_check_empty: va %p has pa 0x%llx",
499 (void *)va, (long long)pa);
500 }
501 if (!intrsafe) {
502 const struct vm_page *pg;
503
504 mutex_enter(&uvm_kernel_object->vmobjlock);
505 pg = uvm_pagelookup(uvm_kernel_object,
506 va - vm_map_min(kernel_map));
507 mutex_exit(&uvm_kernel_object->vmobjlock);
508 if (pg) {
509 panic("uvm_km_check_empty: "
510 "has page hashed at %p", (const void *)va);
511 }
512 }
513 }
514 }
515 #endif /* defined(DEBUG) */
516
517 /*
518 * uvm_km_alloc: allocate an area of kernel memory.
519 *
520 * => NOTE: we can return 0 even if we can wait if there is not enough
521 * free VM space in the map... caller should be prepared to handle
522 * this case.
523 * => we return KVA of memory allocated
524 */
525
526 vaddr_t
527 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
528 {
529 vaddr_t kva, loopva;
530 vaddr_t offset;
531 vsize_t loopsize;
532 struct vm_page *pg;
533 struct uvm_object *obj;
534 int pgaflags;
535 vm_prot_t prot;
536 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
537
538 KASSERT(vm_map_pmap(map) == pmap_kernel());
539 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
540 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
541 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
542
543 /*
544 * setup for call
545 */
546
547 kva = vm_map_min(map); /* hint */
548 size = round_page(size);
549 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
550 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
551 map, obj, size, flags);
552
553 /*
554 * allocate some virtual space
555 */
556
557 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
558 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
559 UVM_ADV_RANDOM,
560 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
561 | UVM_FLAG_QUANTUM)) != 0)) {
562 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
563 return(0);
564 }
565
566 /*
567 * if all we wanted was VA, return now
568 */
569
570 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
571 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
572 return(kva);
573 }
574
575 /*
576 * recover object offset from virtual address
577 */
578
579 offset = kva - vm_map_min(kernel_map);
580 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
581
582 /*
583 * now allocate and map in the memory... note that we are the only ones
584 * whom should ever get a handle on this area of VM.
585 */
586
587 loopva = kva;
588 loopsize = size;
589
590 pgaflags = UVM_PGA_USERESERVE;
591 if (flags & UVM_KMF_ZERO)
592 pgaflags |= UVM_PGA_ZERO;
593 prot = VM_PROT_READ | VM_PROT_WRITE;
594 if (flags & UVM_KMF_EXEC)
595 prot |= VM_PROT_EXECUTE;
596 while (loopsize) {
597 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
598
599 pg = uvm_pagealloc(NULL, offset, NULL, pgaflags);
600
601 /*
602 * out of memory?
603 */
604
605 if (__predict_false(pg == NULL)) {
606 if ((flags & UVM_KMF_NOWAIT) ||
607 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
608 /* free everything! */
609 uvm_km_free(map, kva, size,
610 flags & UVM_KMF_TYPEMASK);
611 return (0);
612 } else {
613 uvm_wait("km_getwait2"); /* sleep here */
614 continue;
615 }
616 }
617
618 pg->flags &= ~PG_BUSY; /* new page */
619 UVM_PAGE_OWN(pg, NULL);
620
621 /*
622 * map it in
623 */
624
625 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot);
626 loopva += PAGE_SIZE;
627 offset += PAGE_SIZE;
628 loopsize -= PAGE_SIZE;
629 }
630
631 pmap_update(pmap_kernel());
632
633 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
634 return(kva);
635 }
636
637 /*
638 * uvm_km_free: free an area of kernel memory
639 */
640
641 void
642 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
643 {
644
645 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
646 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
647 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
648 KASSERT((addr & PAGE_MASK) == 0);
649 KASSERT(vm_map_pmap(map) == pmap_kernel());
650
651 size = round_page(size);
652
653 if (flags & UVM_KMF_PAGEABLE) {
654 uvm_km_pgremove(addr, addr + size);
655 pmap_remove(pmap_kernel(), addr, addr + size);
656 } else if (flags & UVM_KMF_WIRED) {
657 uvm_km_pgremove_intrsafe(addr, addr + size);
658 pmap_kremove(addr, size);
659 }
660
661 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
662 }
663
664 /* Sanity; must specify both or none. */
665 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
666 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
667 #error Must specify MAP and UNMAP together.
668 #endif
669
670 /*
671 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
672 *
673 * => if the pmap specifies an alternate mapping method, we use it.
674 */
675
676 /* ARGSUSED */
677 vaddr_t
678 uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
679 {
680 #if defined(PMAP_MAP_POOLPAGE)
681 return uvm_km_alloc_poolpage(map, waitok);
682 #else
683 struct vm_page *pg;
684 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
685 vaddr_t va;
686
687 if ((map->flags & VM_MAP_VACACHE) == 0)
688 return uvm_km_alloc_poolpage(map, waitok);
689
690 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
691 if (va == 0)
692 return 0;
693 KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
694 again:
695 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
696 if (__predict_false(pg == NULL)) {
697 if (waitok) {
698 uvm_wait("plpg");
699 goto again;
700 } else {
701 pool_put(pp, (void *)va);
702 return 0;
703 }
704 }
705 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
706 pmap_update(pmap_kernel());
707
708 return va;
709 #endif /* PMAP_MAP_POOLPAGE */
710 }
711
712 vaddr_t
713 uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
714 {
715 #if defined(PMAP_MAP_POOLPAGE)
716 struct vm_page *pg;
717 vaddr_t va;
718
719 again:
720 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
721 if (__predict_false(pg == NULL)) {
722 if (waitok) {
723 uvm_wait("plpg");
724 goto again;
725 } else
726 return (0);
727 }
728 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
729 if (__predict_false(va == 0))
730 uvm_pagefree(pg);
731 return (va);
732 #else
733 vaddr_t va;
734
735 va = uvm_km_alloc(map, PAGE_SIZE, 0,
736 (waitok ? 0 : UVM_KMF_NOWAIT) | UVM_KMF_WIRED);
737 return (va);
738 #endif /* PMAP_MAP_POOLPAGE */
739 }
740
741 /*
742 * uvm_km_free_poolpage: free a previously allocated pool page
743 *
744 * => if the pmap specifies an alternate unmapping method, we use it.
745 */
746
747 /* ARGSUSED */
748 void
749 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
750 {
751 #if defined(PMAP_UNMAP_POOLPAGE)
752 uvm_km_free_poolpage(map, addr);
753 #else
754 struct pool *pp;
755
756 if ((map->flags & VM_MAP_VACACHE) == 0) {
757 uvm_km_free_poolpage(map, addr);
758 return;
759 }
760
761 KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
762 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
763 pmap_kremove(addr, PAGE_SIZE);
764 #if defined(DEBUG)
765 pmap_update(pmap_kernel());
766 #endif
767 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
768 pp = &vm_map_to_kernel(map)->vmk_vacache;
769 pool_put(pp, (void *)addr);
770 #endif
771 }
772
773 /* ARGSUSED */
774 void
775 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
776 {
777 #if defined(PMAP_UNMAP_POOLPAGE)
778 paddr_t pa;
779
780 pa = PMAP_UNMAP_POOLPAGE(addr);
781 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
782 #else
783 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
784 #endif /* PMAP_UNMAP_POOLPAGE */
785 }
786