uvm_km.c revision 1.67 1 /* $NetBSD: uvm_km.c,v 1.67 2004/01/29 12:06:02 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_km.c: handle kernel memory allocation and management
71 */
72
73 /*
74 * overview of kernel memory management:
75 *
76 * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 *
80 * the kernel_map has several "submaps." submaps can only appear in
81 * the kernel_map (user processes can't use them). submaps "take over"
82 * the management of a sub-range of the kernel's address space. submaps
83 * are typically allocated at boot time and are never released. kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
86 *
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
89 * chunks.
90 *
91 * the vm system has several standard kernel submaps, including:
92 * kmem_map => contains only wired kernel memory for the kernel
93 * malloc. *** access to kmem_map must be protected
94 * by splvm() because we are allowed to call malloc()
95 * at interrupt time ***
96 * mb_map => memory for large mbufs, *** protected by splvm ***
97 * pager_map => used to map "buf" structures into kernel space
98 * exec_map => used during exec to handle exec args
99 * etc...
100 *
101 * the kernel allocates its private memory out of special uvm_objects whose
102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
103 * are "special" and never die). all kernel objects should be thought of
104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
105 * object is equal to the size of kernel virtual address space (i.e. the
106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
107 *
108 * most kernel private memory lives in kernel_object. the only exception
109 * to this is for memory that belongs to submaps that must be protected
110 * by splvm(). pages in these submaps are not assigned to an object.
111 *
112 * note that just because a kernel object spans the entire kernel virutal
113 * address space doesn't mean that it has to be mapped into the entire space.
114 * large chunks of a kernel object's space go unused either because
115 * that area of kernel VM is unmapped, or there is some other type of
116 * object mapped into that range (e.g. a vnode). for submap's kernel
117 * objects, the only part of the object that can ever be populated is the
118 * offsets that are managed by the submap.
119 *
120 * note that the "offset" in a kernel object is always the kernel virtual
121 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
122 * example:
123 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
124 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
125 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
126 * then that means that the page at offset 0x235000 in kernel_object is
127 * mapped at 0xf8235000.
128 *
129 * kernel object have one other special property: when the kernel virtual
130 * memory mapping them is unmapped, the backing memory in the object is
131 * freed right away. this is done with the uvm_km_pgremove() function.
132 * this has to be done because there is no backing store for kernel pages
133 * and no need to save them after they are no longer referenced.
134 */
135
136 #include <sys/cdefs.h>
137 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.67 2004/01/29 12:06:02 yamt Exp $");
138
139 #include "opt_uvmhist.h"
140
141 #include <sys/param.h>
142 #include <sys/systm.h>
143 #include <sys/proc.h>
144
145 #include <uvm/uvm.h>
146
147 /*
148 * global data structures
149 */
150
151 struct vm_map *kernel_map = NULL;
152
153 /*
154 * local data structues
155 */
156
157 static struct vm_map kernel_map_store;
158 static struct vm_map_entry kernel_first_mapent_store;
159
160 /*
161 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
162 * KVM already allocated for text, data, bss, and static data structures).
163 *
164 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
165 * we assume that [min -> start] has already been allocated and that
166 * "end" is the end.
167 */
168
169 void
170 uvm_km_init(start, end)
171 vaddr_t start, end;
172 {
173 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
174
175 /*
176 * next, init kernel memory objects.
177 */
178
179 /* kernel_object: for pageable anonymous kernel memory */
180 uao_init();
181 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
182 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
183
184 /*
185 * init the map and reserve any space that might already
186 * have been allocated kernel space before installing.
187 */
188
189 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
190 kernel_map_store.pmap = pmap_kernel();
191 if (start != base) {
192 int error;
193 struct uvm_map_args args;
194
195 error = uvm_map_prepare(&kernel_map_store, base, start - base,
196 NULL, UVM_UNKNOWN_OFFSET, 0,
197 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
198 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
199 if (!error) {
200 struct vm_map_entry *entry = &kernel_first_mapent_store;
201
202 kernel_first_mapent_store.flags =
203 UVM_MAP_KERNEL | UVM_MAP_FIRST;
204 error = uvm_map_enter(&kernel_map_store, &args, &entry);
205 KASSERT(entry == NULL);
206 }
207
208 if (error)
209 panic(
210 "uvm_km_init: could not reserve space for kernel");
211 }
212
213 /*
214 * install!
215 */
216
217 kernel_map = &kernel_map_store;
218 }
219
220 /*
221 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
222 * is allocated all references to that area of VM must go through it. this
223 * allows the locking of VAs in kernel_map to be broken up into regions.
224 *
225 * => if `fixed' is true, *min specifies where the region described
226 * by the submap must start
227 * => if submap is non NULL we use that as the submap, otherwise we
228 * alloc a new map
229 */
230 struct vm_map *
231 uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
232 struct vm_map *map;
233 vaddr_t *min, *max; /* IN/OUT, OUT */
234 vsize_t size;
235 int flags;
236 boolean_t fixed;
237 struct vm_map *submap;
238 {
239 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
240
241 size = round_page(size); /* round up to pagesize */
242
243 /*
244 * first allocate a blank spot in the parent map
245 */
246
247 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
248 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
249 UVM_ADV_RANDOM, mapflags)) != 0) {
250 panic("uvm_km_suballoc: unable to allocate space in parent map");
251 }
252
253 /*
254 * set VM bounds (min is filled in by uvm_map)
255 */
256
257 *max = *min + size;
258
259 /*
260 * add references to pmap and create or init the submap
261 */
262
263 pmap_reference(vm_map_pmap(map));
264 if (submap == NULL) {
265 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
266 if (submap == NULL)
267 panic("uvm_km_suballoc: unable to create submap");
268 } else {
269 uvm_map_setup(submap, *min, *max, flags);
270 submap->pmap = vm_map_pmap(map);
271 }
272
273 /*
274 * now let uvm_map_submap plug in it...
275 */
276
277 if (uvm_map_submap(map, *min, *max, submap) != 0)
278 panic("uvm_km_suballoc: submap allocation failed");
279
280 return(submap);
281 }
282
283 /*
284 * uvm_km_pgremove: remove pages from a kernel uvm_object.
285 *
286 * => when you unmap a part of anonymous kernel memory you want to toss
287 * the pages right away. (this gets called from uvm_unmap_...).
288 */
289
290 void
291 uvm_km_pgremove(uobj, start, end)
292 struct uvm_object *uobj;
293 vaddr_t start, end;
294 {
295 struct vm_page *pg;
296 voff_t curoff, nextoff;
297 int swpgonlydelta = 0;
298 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
299
300 KASSERT(uobj->pgops == &aobj_pager);
301 simple_lock(&uobj->vmobjlock);
302
303 for (curoff = start; curoff < end; curoff = nextoff) {
304 nextoff = curoff + PAGE_SIZE;
305 pg = uvm_pagelookup(uobj, curoff);
306 if (pg != NULL && pg->flags & PG_BUSY) {
307 pg->flags |= PG_WANTED;
308 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
309 "km_pgrm", 0);
310 simple_lock(&uobj->vmobjlock);
311 nextoff = curoff;
312 continue;
313 }
314
315 /*
316 * free the swap slot, then the page.
317 */
318
319 if (pg == NULL &&
320 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
321 swpgonlydelta++;
322 }
323 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
324 if (pg != NULL) {
325 uvm_lock_pageq();
326 uvm_pagefree(pg);
327 uvm_unlock_pageq();
328 }
329 }
330 simple_unlock(&uobj->vmobjlock);
331
332 if (swpgonlydelta > 0) {
333 simple_lock(&uvm.swap_data_lock);
334 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
335 uvmexp.swpgonly -= swpgonlydelta;
336 simple_unlock(&uvm.swap_data_lock);
337 }
338 }
339
340
341 /*
342 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
343 * maps
344 *
345 * => when you unmap a part of anonymous kernel memory you want to toss
346 * the pages right away. (this is called from uvm_unmap_...).
347 * => none of the pages will ever be busy, and none of them will ever
348 * be on the active or inactive queues (because they have no object).
349 */
350
351 void
352 uvm_km_pgremove_intrsafe(start, end)
353 vaddr_t start, end;
354 {
355 struct vm_page *pg;
356 paddr_t pa;
357 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
358
359 for (; start < end; start += PAGE_SIZE) {
360 if (!pmap_extract(pmap_kernel(), start, &pa)) {
361 continue;
362 }
363 pg = PHYS_TO_VM_PAGE(pa);
364 KASSERT(pg);
365 KASSERT(pg->uobject == NULL && pg->uanon == NULL);
366 uvm_pagefree(pg);
367 }
368 }
369
370
371 /*
372 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
373 *
374 * => we map wired memory into the specified map using the obj passed in
375 * => NOTE: we can return NULL even if we can wait if there is not enough
376 * free VM space in the map... caller should be prepared to handle
377 * this case.
378 * => we return KVA of memory allocated
379 * => align,prefer - passed on to uvm_map()
380 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
381 * lock the map
382 */
383
384 vaddr_t
385 uvm_km_kmemalloc1(map, obj, size, align, prefer, flags)
386 struct vm_map *map;
387 struct uvm_object *obj;
388 vsize_t size;
389 vsize_t align;
390 voff_t prefer;
391 int flags;
392 {
393 vaddr_t kva, loopva;
394 vaddr_t offset;
395 vsize_t loopsize;
396 struct vm_page *pg;
397 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
398
399 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
400 map, obj, size, flags);
401 KASSERT(vm_map_pmap(map) == pmap_kernel());
402
403 /*
404 * setup for call
405 */
406
407 size = round_page(size);
408 kva = vm_map_min(map); /* hint */
409
410 /*
411 * allocate some virtual space
412 */
413
414 if (__predict_false(uvm_map(map, &kva, size, obj, prefer, align,
415 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
416 UVM_ADV_RANDOM,
417 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))))
418 != 0)) {
419 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
420 return(0);
421 }
422
423 /*
424 * if all we wanted was VA, return now
425 */
426
427 if (flags & UVM_KMF_VALLOC) {
428 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
429 return(kva);
430 }
431
432 /*
433 * recover object offset from virtual address
434 */
435
436 offset = kva - vm_map_min(kernel_map);
437 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
438
439 /*
440 * now allocate and map in the memory... note that we are the only ones
441 * whom should ever get a handle on this area of VM.
442 */
443
444 loopva = kva;
445 loopsize = size;
446 while (loopsize) {
447 if (obj) {
448 simple_lock(&obj->vmobjlock);
449 }
450 pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE);
451 if (__predict_true(pg != NULL)) {
452 pg->flags &= ~PG_BUSY; /* new page */
453 UVM_PAGE_OWN(pg, NULL);
454 }
455 if (obj) {
456 simple_unlock(&obj->vmobjlock);
457 }
458
459 /*
460 * out of memory?
461 */
462
463 if (__predict_false(pg == NULL)) {
464 if ((flags & UVM_KMF_NOWAIT) ||
465 ((flags & UVM_KMF_CANFAIL) && uvm_swapisfull())) {
466 /* free everything! */
467 uvm_unmap(map, kva, kva + size);
468 return (0);
469 } else {
470 uvm_wait("km_getwait2"); /* sleep here */
471 continue;
472 }
473 }
474
475 /*
476 * map it in
477 */
478
479 if (obj == NULL) {
480 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
481 VM_PROT_READ | VM_PROT_WRITE);
482 } else {
483 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
484 UVM_PROT_ALL,
485 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
486 }
487 loopva += PAGE_SIZE;
488 offset += PAGE_SIZE;
489 loopsize -= PAGE_SIZE;
490 }
491
492 pmap_update(pmap_kernel());
493
494 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
495 return(kva);
496 }
497
498 /*
499 * uvm_km_free: free an area of kernel memory
500 */
501
502 void
503 uvm_km_free(map, addr, size)
504 struct vm_map *map;
505 vaddr_t addr;
506 vsize_t size;
507 {
508 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
509 }
510
511 /*
512 * uvm_km_free_wakeup: free an area of kernel memory and wake up
513 * anyone waiting for vm space.
514 *
515 * => XXX: "wanted" bit + unlock&wait on other end?
516 */
517
518 void
519 uvm_km_free_wakeup(map, addr, size)
520 struct vm_map *map;
521 vaddr_t addr;
522 vsize_t size;
523 {
524 struct vm_map_entry *dead_entries;
525
526 vm_map_lock(map);
527 uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
528 &dead_entries);
529 wakeup(map);
530 vm_map_unlock(map);
531 if (dead_entries != NULL)
532 uvm_unmap_detach(dead_entries, 0);
533 }
534
535 /*
536 * uvm_km_alloc1: allocate wired down memory in the kernel map.
537 *
538 * => we can sleep if needed
539 */
540
541 vaddr_t
542 uvm_km_alloc1(map, size, zeroit)
543 struct vm_map *map;
544 vsize_t size;
545 boolean_t zeroit;
546 {
547 vaddr_t kva, loopva, offset;
548 struct vm_page *pg;
549 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
550
551 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
552 KASSERT(vm_map_pmap(map) == pmap_kernel());
553
554 size = round_page(size);
555 kva = vm_map_min(map); /* hint */
556
557 /*
558 * allocate some virtual space
559 */
560
561 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
562 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
563 UVM_INH_NONE, UVM_ADV_RANDOM,
564 0)) != 0)) {
565 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
566 return(0);
567 }
568
569 /*
570 * recover object offset from virtual address
571 */
572
573 offset = kva - vm_map_min(kernel_map);
574 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
575
576 /*
577 * now allocate the memory.
578 */
579
580 loopva = kva;
581 while (size) {
582 simple_lock(&uvm.kernel_object->vmobjlock);
583 KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL);
584 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
585 if (pg) {
586 pg->flags &= ~PG_BUSY;
587 UVM_PAGE_OWN(pg, NULL);
588 }
589 simple_unlock(&uvm.kernel_object->vmobjlock);
590 if (pg == NULL) {
591 uvm_wait("km_alloc1w");
592 continue;
593 }
594 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
595 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
596 loopva += PAGE_SIZE;
597 offset += PAGE_SIZE;
598 size -= PAGE_SIZE;
599 }
600 pmap_update(map->pmap);
601
602 /*
603 * zero on request (note that "size" is now zero due to the above loop
604 * so we need to subtract kva from loopva to reconstruct the size).
605 */
606
607 if (zeroit)
608 memset((caddr_t)kva, 0, loopva - kva);
609 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
610 return(kva);
611 }
612
613 /*
614 * uvm_km_valloc1: allocate zero-fill memory in the kernel's address space
615 *
616 * => memory is not allocated until fault time
617 * => the align, prefer and flags parameters are passed on to uvm_map().
618 *
619 * Note: this function is also the backend for these macros:
620 * uvm_km_valloc
621 * uvm_km_valloc_wait
622 * uvm_km_valloc_prefer
623 * uvm_km_valloc_prefer_wait
624 * uvm_km_valloc_align
625 */
626
627 vaddr_t
628 uvm_km_valloc1(map, size, align, prefer, flags)
629 struct vm_map *map;
630 vsize_t size;
631 vsize_t align;
632 voff_t prefer;
633 uvm_flag_t flags;
634 {
635 vaddr_t kva;
636 UVMHIST_FUNC("uvm_km_valloc1"); UVMHIST_CALLED(maphist);
637
638 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, align=0x%x, prefer=0x%x)",
639 map, size, align, prefer);
640
641 KASSERT(vm_map_pmap(map) == pmap_kernel());
642
643 size = round_page(size);
644 /*
645 * Check if requested size is larger than the map, in which
646 * case we can't succeed.
647 */
648 if (size > vm_map_max(map) - vm_map_min(map))
649 return (0);
650
651 for (;;) {
652 kva = vm_map_min(map); /* hint */
653
654 /*
655 * allocate some virtual space. will be demand filled
656 * by kernel_object.
657 */
658
659 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
660 prefer, align, UVM_MAPFLAG(UVM_PROT_ALL,
661 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flags))
662 == 0)) {
663 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
664 return (kva);
665 }
666
667 /*
668 * failed. sleep for a while (on map)
669 */
670 if ((flags & UVM_KMF_NOWAIT) != 0)
671 return (0);
672
673 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
674 tsleep((caddr_t)map, PVM, "vallocwait", 0);
675 }
676 /*NOTREACHED*/
677 }
678
679 /* Function definitions for binary compatibility */
680 vaddr_t
681 uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj,
682 vsize_t sz, int flags)
683 {
684 return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags);
685 }
686
687 vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz)
688 {
689 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
690 }
691
692 vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align)
693 {
694 return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
695 }
696
697 vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer)
698 {
699 return uvm_km_valloc1(map, sz, 0, prefer, 0);
700 }
701
702 vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz)
703 {
704 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0);
705 }
706
707 /* Sanity; must specify both or none. */
708 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
709 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
710 #error Must specify MAP and UNMAP together.
711 #endif
712
713 /*
714 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
715 *
716 * => if the pmap specifies an alternate mapping method, we use it.
717 */
718
719 /* ARGSUSED */
720 vaddr_t
721 uvm_km_alloc_poolpage1(map, obj, waitok)
722 struct vm_map *map;
723 struct uvm_object *obj;
724 boolean_t waitok;
725 {
726 #if defined(PMAP_MAP_POOLPAGE)
727 struct vm_page *pg;
728 vaddr_t va;
729
730 again:
731 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
732 if (__predict_false(pg == NULL)) {
733 if (waitok) {
734 uvm_wait("plpg");
735 goto again;
736 } else
737 return (0);
738 }
739 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
740 if (__predict_false(va == 0))
741 uvm_pagefree(pg);
742 return (va);
743 #else
744 vaddr_t va;
745 int s;
746
747 /*
748 * NOTE: We may be called with a map that doens't require splvm
749 * protection (e.g. kernel_map). However, it does not hurt to
750 * go to splvm in this case (since unprocted maps will never be
751 * accessed in interrupt context).
752 *
753 * XXX We may want to consider changing the interface to this
754 * XXX function.
755 */
756
757 s = splvm();
758 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
759 waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
760 splx(s);
761 return (va);
762 #endif /* PMAP_MAP_POOLPAGE */
763 }
764
765 /*
766 * uvm_km_free_poolpage: free a previously allocated pool page
767 *
768 * => if the pmap specifies an alternate unmapping method, we use it.
769 */
770
771 /* ARGSUSED */
772 void
773 uvm_km_free_poolpage1(map, addr)
774 struct vm_map *map;
775 vaddr_t addr;
776 {
777 #if defined(PMAP_UNMAP_POOLPAGE)
778 paddr_t pa;
779
780 pa = PMAP_UNMAP_POOLPAGE(addr);
781 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
782 #else
783 int s;
784
785 /*
786 * NOTE: We may be called with a map that doens't require splvm
787 * protection (e.g. kernel_map). However, it does not hurt to
788 * go to splvm in this case (since unprocted maps will never be
789 * accessed in interrupt context).
790 *
791 * XXX We may want to consider changing the interface to this
792 * XXX function.
793 */
794
795 s = splvm();
796 uvm_km_free(map, addr, PAGE_SIZE);
797 splx(s);
798 #endif /* PMAP_UNMAP_POOLPAGE */
799 }
800