uvm_km.c revision 1.46 1 /* $NetBSD: uvm_km.c,v 1.46 2001/04/24 04:31:18 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include "opt_uvmhist.h"
70
71 /*
72 * uvm_km.c: handle kernel memory allocation and management
73 */
74
75 /*
76 * overview of kernel memory management:
77 *
78 * the kernel virtual address space is mapped by "kernel_map." kernel_map
79 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
80 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
81 *
82 * the kernel_map has several "submaps." submaps can only appear in
83 * the kernel_map (user processes can't use them). submaps "take over"
84 * the management of a sub-range of the kernel's address space. submaps
85 * are typically allocated at boot time and are never released. kernel
86 * virtual address space that is mapped by a submap is locked by the
87 * submap's lock -- not the kernel_map's lock.
88 *
89 * thus, the useful feature of submaps is that they allow us to break
90 * up the locking and protection of the kernel address space into smaller
91 * chunks.
92 *
93 * the vm system has several standard kernel submaps, including:
94 * kmem_map => contains only wired kernel memory for the kernel
95 * malloc. *** access to kmem_map must be protected
96 * by splvm() because we are allowed to call malloc()
97 * at interrupt time ***
98 * mb_map => memory for large mbufs, *** protected by splvm ***
99 * pager_map => used to map "buf" structures into kernel space
100 * exec_map => used during exec to handle exec args
101 * etc...
102 *
103 * the kernel allocates its private memory out of special uvm_objects whose
104 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
105 * are "special" and never die). all kernel objects should be thought of
106 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
107 * object is equal to the size of kernel virtual address space (i.e. the
108 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
109 *
110 * most kernel private memory lives in kernel_object. the only exception
111 * to this is for memory that belongs to submaps that must be protected
112 * by splvm(). each of these submaps has their own private kernel
113 * object (e.g. kmem_object, mb_object).
114 *
115 * note that just because a kernel object spans the entire kernel virutal
116 * address space doesn't mean that it has to be mapped into the entire space.
117 * large chunks of a kernel object's space go unused either because
118 * that area of kernel VM is unmapped, or there is some other type of
119 * object mapped into that range (e.g. a vnode). for submap's kernel
120 * objects, the only part of the object that can ever be populated is the
121 * offsets that are managed by the submap.
122 *
123 * note that the "offset" in a kernel object is always the kernel virtual
124 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
125 * example:
126 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
127 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
128 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
129 * then that means that the page at offset 0x235000 in kernel_object is
130 * mapped at 0xf8235000.
131 *
132 * note that the offsets in kmem_object and mb_object also follow this
133 * rule. this means that the offsets for kmem_object must fall in the
134 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to
135 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets
136 * in those objects will typically not start at zero.
137 *
138 * kernel object have one other special property: when the kernel virtual
139 * memory mapping them is unmapped, the backing memory in the object is
140 * freed right away. this is done with the uvm_km_pgremove() function.
141 * this has to be done because there is no backing store for kernel pages
142 * and no need to save them after they are no longer referenced.
143 */
144
145 #include <sys/param.h>
146 #include <sys/systm.h>
147 #include <sys/proc.h>
148
149 #include <uvm/uvm.h>
150
151 /*
152 * global data structures
153 */
154
155 vm_map_t kernel_map = NULL;
156
157 struct vmi_list vmi_list;
158 simple_lock_data_t vmi_list_slock;
159
160 /*
161 * local data structues
162 */
163
164 static struct vm_map kernel_map_store;
165 static struct uvm_object kmem_object_store;
166 static struct uvm_object mb_object_store;
167
168 /*
169 * All pager operations here are NULL, but the object must have
170 * a pager ops vector associated with it; various places assume
171 * it to be so.
172 */
173 static struct uvm_pagerops km_pager;
174
175 /*
176 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
177 * KVM already allocated for text, data, bss, and static data structures).
178 *
179 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
180 * we assume that [min -> start] has already been allocated and that
181 * "end" is the end.
182 */
183
184 void
185 uvm_km_init(start, end)
186 vaddr_t start, end;
187 {
188 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
189
190 /*
191 * first, initialize the interrupt-safe map list.
192 */
193 LIST_INIT(&vmi_list);
194 simple_lock_init(&vmi_list_slock);
195
196 /*
197 * next, init kernel memory objects.
198 */
199
200 /* kernel_object: for pageable anonymous kernel memory */
201 uao_init();
202 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
203 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
204
205 /*
206 * kmem_object: for use by the kernel malloc(). Memory is always
207 * wired, and this object (and the kmem_map) can be accessed at
208 * interrupt time.
209 */
210 simple_lock_init(&kmem_object_store.vmobjlock);
211 kmem_object_store.pgops = &km_pager;
212 TAILQ_INIT(&kmem_object_store.memq);
213 kmem_object_store.uo_npages = 0;
214 /* we are special. we never die */
215 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
216 uvmexp.kmem_object = &kmem_object_store;
217
218 /*
219 * mb_object: for mbuf cluster pages on platforms which use the
220 * mb_map. Memory is always wired, and this object (and the mb_map)
221 * can be accessed at interrupt time.
222 */
223 simple_lock_init(&mb_object_store.vmobjlock);
224 mb_object_store.pgops = &km_pager;
225 TAILQ_INIT(&mb_object_store.memq);
226 mb_object_store.uo_npages = 0;
227 /* we are special. we never die */
228 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
229 uvmexp.mb_object = &mb_object_store;
230
231 /*
232 * init the map and reserve allready allocated kernel space
233 * before installing.
234 */
235
236 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
237 kernel_map_store.pmap = pmap_kernel();
238 if (uvm_map(&kernel_map_store, &base, start - base, NULL,
239 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
240 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
241 panic("uvm_km_init: could not reserve space for kernel");
242
243 /*
244 * install!
245 */
246
247 kernel_map = &kernel_map_store;
248 }
249
250 /*
251 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
252 * is allocated all references to that area of VM must go through it. this
253 * allows the locking of VAs in kernel_map to be broken up into regions.
254 *
255 * => if `fixed' is true, *min specifies where the region described
256 * by the submap must start
257 * => if submap is non NULL we use that as the submap, otherwise we
258 * alloc a new map
259 */
260 struct vm_map *
261 uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
262 struct vm_map *map;
263 vaddr_t *min, *max; /* OUT, OUT */
264 vsize_t size;
265 int flags;
266 boolean_t fixed;
267 struct vm_map *submap;
268 {
269 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
270
271 size = round_page(size); /* round up to pagesize */
272
273 /*
274 * first allocate a blank spot in the parent map
275 */
276
277 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
278 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
279 UVM_ADV_RANDOM, mapflags)) != 0) {
280 panic("uvm_km_suballoc: unable to allocate space in parent map");
281 }
282
283 /*
284 * set VM bounds (min is filled in by uvm_map)
285 */
286
287 *max = *min + size;
288
289 /*
290 * add references to pmap and create or init the submap
291 */
292
293 pmap_reference(vm_map_pmap(map));
294 if (submap == NULL) {
295 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
296 if (submap == NULL)
297 panic("uvm_km_suballoc: unable to create submap");
298 } else {
299 uvm_map_setup(submap, *min, *max, flags);
300 submap->pmap = vm_map_pmap(map);
301 }
302
303 /*
304 * now let uvm_map_submap plug in it...
305 */
306
307 if (uvm_map_submap(map, *min, *max, submap) != 0)
308 panic("uvm_km_suballoc: submap allocation failed");
309
310 return(submap);
311 }
312
313 /*
314 * uvm_km_pgremove: remove pages from a kernel uvm_object.
315 *
316 * => when you unmap a part of anonymous kernel memory you want to toss
317 * the pages right away. (this gets called from uvm_unmap_...).
318 */
319
320 #define UKM_HASH_PENALTY 4 /* a guess */
321
322 void
323 uvm_km_pgremove(uobj, start, end)
324 struct uvm_object *uobj;
325 vaddr_t start, end;
326 {
327 boolean_t by_list;
328 struct vm_page *pp, *ppnext;
329 vaddr_t curoff;
330 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
331
332 KASSERT(uobj->pgops == &aobj_pager);
333 simple_lock(&uobj->vmobjlock);
334
335 /* choose cheapest traversal */
336 by_list = (uobj->uo_npages <=
337 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
338
339 if (by_list)
340 goto loop_by_list;
341
342 /* by hash */
343
344 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
345 pp = uvm_pagelookup(uobj, curoff);
346 if (pp == NULL)
347 continue;
348
349 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
350 pp->flags & PG_BUSY, 0, 0);
351
352 /* now do the actual work */
353 if (pp->flags & PG_BUSY) {
354 /* owner must check for this when done */
355 pp->flags |= PG_RELEASED;
356 } else {
357 /* free the swap slot... */
358 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
359
360 /*
361 * ...and free the page; note it may be on the
362 * active or inactive queues.
363 */
364 uvm_lock_pageq();
365 uvm_pagefree(pp);
366 uvm_unlock_pageq();
367 }
368 }
369 simple_unlock(&uobj->vmobjlock);
370 return;
371
372 loop_by_list:
373
374 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
375 ppnext = TAILQ_NEXT(pp, listq);
376 if (pp->offset < start || pp->offset >= end) {
377 continue;
378 }
379
380 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
381 pp->flags & PG_BUSY, 0, 0);
382
383 if (pp->flags & PG_BUSY) {
384 /* owner must check for this when done */
385 pp->flags |= PG_RELEASED;
386 } else {
387 /* free the swap slot... */
388 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
389
390 /*
391 * ...and free the page; note it may be on the
392 * active or inactive queues.
393 */
394 uvm_lock_pageq();
395 uvm_pagefree(pp);
396 uvm_unlock_pageq();
397 }
398 }
399 simple_unlock(&uobj->vmobjlock);
400 }
401
402
403 /*
404 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
405 * objects
406 *
407 * => when you unmap a part of anonymous kernel memory you want to toss
408 * the pages right away. (this gets called from uvm_unmap_...).
409 * => none of the pages will ever be busy, and none of them will ever
410 * be on the active or inactive queues (because these objects are
411 * never allowed to "page").
412 */
413
414 void
415 uvm_km_pgremove_intrsafe(uobj, start, end)
416 struct uvm_object *uobj;
417 vaddr_t start, end;
418 {
419 boolean_t by_list;
420 struct vm_page *pp, *ppnext;
421 vaddr_t curoff;
422 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
423
424 KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj));
425 simple_lock(&uobj->vmobjlock); /* lock object */
426
427 /* choose cheapest traversal */
428 by_list = (uobj->uo_npages <=
429 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
430
431 if (by_list)
432 goto loop_by_list;
433
434 /* by hash */
435
436 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
437 pp = uvm_pagelookup(uobj, curoff);
438 if (pp == NULL) {
439 continue;
440 }
441
442 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
443 pp->flags & PG_BUSY, 0, 0);
444 KASSERT((pp->flags & PG_BUSY) == 0);
445 KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
446 KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
447 uvm_pagefree(pp);
448 }
449 simple_unlock(&uobj->vmobjlock);
450 return;
451
452 loop_by_list:
453
454 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
455 ppnext = TAILQ_NEXT(pp, listq);
456 if (pp->offset < start || pp->offset >= end) {
457 continue;
458 }
459
460 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
461 pp->flags & PG_BUSY, 0, 0);
462 KASSERT((pp->flags & PG_BUSY) == 0);
463 KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
464 KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
465 uvm_pagefree(pp);
466 }
467 simple_unlock(&uobj->vmobjlock);
468 }
469
470
471 /*
472 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
473 *
474 * => we map wired memory into the specified map using the obj passed in
475 * => NOTE: we can return NULL even if we can wait if there is not enough
476 * free VM space in the map... caller should be prepared to handle
477 * this case.
478 * => we return KVA of memory allocated
479 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
480 * lock the map
481 */
482
483 vaddr_t
484 uvm_km_kmemalloc(map, obj, size, flags)
485 vm_map_t map;
486 struct uvm_object *obj;
487 vsize_t size;
488 int flags;
489 {
490 vaddr_t kva, loopva;
491 vaddr_t offset;
492 vsize_t loopsize;
493 struct vm_page *pg;
494 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
495
496 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
497 map, obj, size, flags);
498 KASSERT(vm_map_pmap(map) == pmap_kernel());
499
500 /*
501 * setup for call
502 */
503
504 size = round_page(size);
505 kva = vm_map_min(map); /* hint */
506
507 /*
508 * allocate some virtual space
509 */
510
511 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
512 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
513 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
514 != 0)) {
515 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
516 return(0);
517 }
518
519 /*
520 * if all we wanted was VA, return now
521 */
522
523 if (flags & UVM_KMF_VALLOC) {
524 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
525 return(kva);
526 }
527
528 /*
529 * recover object offset from virtual address
530 */
531
532 offset = kva - vm_map_min(kernel_map);
533 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
534
535 /*
536 * now allocate and map in the memory... note that we are the only ones
537 * whom should ever get a handle on this area of VM.
538 */
539
540 loopva = kva;
541 loopsize = size;
542 while (loopsize) {
543 simple_lock(&obj->vmobjlock);
544 pg = uvm_pagealloc(obj, offset, NULL, 0);
545 if (__predict_true(pg != NULL)) {
546 pg->flags &= ~PG_BUSY; /* new page */
547 UVM_PAGE_OWN(pg, NULL);
548 }
549 simple_unlock(&obj->vmobjlock);
550
551 /*
552 * out of memory?
553 */
554
555 if (__predict_false(pg == NULL)) {
556 if (flags & UVM_KMF_NOWAIT) {
557 /* free everything! */
558 uvm_unmap(map, kva, kva + size);
559 return(0);
560 } else {
561 uvm_wait("km_getwait2"); /* sleep here */
562 continue;
563 }
564 }
565
566 /*
567 * map it in: note that we call pmap_enter with the map and
568 * object unlocked in case we are kmem_map/kmem_object
569 * (because if pmap_enter wants to allocate out of kmem_object
570 * it will need to lock it itself!)
571 */
572
573 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
574 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
575 VM_PROT_ALL);
576 } else {
577 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
578 UVM_PROT_ALL,
579 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
580 }
581 loopva += PAGE_SIZE;
582 offset += PAGE_SIZE;
583 loopsize -= PAGE_SIZE;
584 }
585 pmap_update();
586 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
587 return(kva);
588 }
589
590 /*
591 * uvm_km_free: free an area of kernel memory
592 */
593
594 void
595 uvm_km_free(map, addr, size)
596 vm_map_t map;
597 vaddr_t addr;
598 vsize_t size;
599 {
600 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
601 }
602
603 /*
604 * uvm_km_free_wakeup: free an area of kernel memory and wake up
605 * anyone waiting for vm space.
606 *
607 * => XXX: "wanted" bit + unlock&wait on other end?
608 */
609
610 void
611 uvm_km_free_wakeup(map, addr, size)
612 vm_map_t map;
613 vaddr_t addr;
614 vsize_t size;
615 {
616 vm_map_entry_t dead_entries;
617
618 vm_map_lock(map);
619 uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
620 &dead_entries);
621 wakeup(map);
622 vm_map_unlock(map);
623 if (dead_entries != NULL)
624 uvm_unmap_detach(dead_entries, 0);
625 }
626
627 /*
628 * uvm_km_alloc1: allocate wired down memory in the kernel map.
629 *
630 * => we can sleep if needed
631 */
632
633 vaddr_t
634 uvm_km_alloc1(map, size, zeroit)
635 vm_map_t map;
636 vsize_t size;
637 boolean_t zeroit;
638 {
639 vaddr_t kva, loopva, offset;
640 struct vm_page *pg;
641 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
642
643 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
644 KASSERT(vm_map_pmap(map) == pmap_kernel());
645
646 size = round_page(size);
647 kva = vm_map_min(map); /* hint */
648
649 /*
650 * allocate some virtual space
651 */
652
653 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
654 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
655 UVM_INH_NONE, UVM_ADV_RANDOM,
656 0)) != 0)) {
657 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
658 return(0);
659 }
660
661 /*
662 * recover object offset from virtual address
663 */
664
665 offset = kva - vm_map_min(kernel_map);
666 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
667
668 /*
669 * now allocate the memory. we must be careful about released pages.
670 */
671
672 loopva = kva;
673 while (size) {
674 simple_lock(&uvm.kernel_object->vmobjlock);
675 pg = uvm_pagelookup(uvm.kernel_object, offset);
676
677 /*
678 * if we found a page in an unallocated region, it must be
679 * released
680 */
681 if (pg) {
682 if ((pg->flags & PG_RELEASED) == 0)
683 panic("uvm_km_alloc1: non-released page");
684 pg->flags |= PG_WANTED;
685 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,
686 FALSE, "km_alloc", 0);
687 continue; /* retry */
688 }
689
690 /* allocate ram */
691 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
692 if (pg) {
693 pg->flags &= ~PG_BUSY; /* new page */
694 UVM_PAGE_OWN(pg, NULL);
695 }
696 simple_unlock(&uvm.kernel_object->vmobjlock);
697 if (__predict_false(pg == NULL)) {
698 uvm_wait("km_alloc1w"); /* wait for memory */
699 continue;
700 }
701
702 /*
703 * map it in; note we're never called with an intrsafe
704 * object, so we always use regular old pmap_enter().
705 */
706 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
707 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
708
709 loopva += PAGE_SIZE;
710 offset += PAGE_SIZE;
711 size -= PAGE_SIZE;
712 }
713
714 pmap_update();
715
716 /*
717 * zero on request (note that "size" is now zero due to the above loop
718 * so we need to subtract kva from loopva to reconstruct the size).
719 */
720
721 if (zeroit)
722 memset((caddr_t)kva, 0, loopva - kva);
723
724 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
725 return(kva);
726 }
727
728 /*
729 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
730 *
731 * => memory is not allocated until fault time
732 */
733
734 vaddr_t
735 uvm_km_valloc(map, size)
736 vm_map_t map;
737 vsize_t size;
738 {
739 return(uvm_km_valloc_align(map, size, 0));
740 }
741
742 vaddr_t
743 uvm_km_valloc_align(map, size, align)
744 vm_map_t map;
745 vsize_t size;
746 vsize_t align;
747 {
748 vaddr_t kva;
749 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
750
751 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
752 KASSERT(vm_map_pmap(map) == pmap_kernel());
753
754 size = round_page(size);
755 kva = vm_map_min(map); /* hint */
756
757 /*
758 * allocate some virtual space. will be demand filled by kernel_object.
759 */
760
761 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
762 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
763 UVM_INH_NONE, UVM_ADV_RANDOM,
764 0)) != 0)) {
765 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
766 return(0);
767 }
768
769 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
770 return(kva);
771 }
772
773 /*
774 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
775 *
776 * => memory is not allocated until fault time
777 * => if no room in map, wait for space to free, unless requested size
778 * is larger than map (in which case we return 0)
779 */
780
781 vaddr_t
782 uvm_km_valloc_prefer_wait(map, size, prefer)
783 vm_map_t map;
784 vsize_t size;
785 voff_t prefer;
786 {
787 vaddr_t kva;
788 UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
789
790 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
791 KASSERT(vm_map_pmap(map) == pmap_kernel());
792
793 size = round_page(size);
794 if (size > vm_map_max(map) - vm_map_min(map))
795 return(0);
796
797 while (1) {
798 kva = vm_map_min(map); /* hint */
799
800 /*
801 * allocate some virtual space. will be demand filled
802 * by kernel_object.
803 */
804
805 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
806 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
807 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
808 == 0)) {
809 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
810 return(kva);
811 }
812
813 /*
814 * failed. sleep for a while (on map)
815 */
816
817 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
818 tsleep((caddr_t)map, PVM, "vallocwait", 0);
819 }
820 /*NOTREACHED*/
821 }
822
823 vaddr_t
824 uvm_km_valloc_wait(map, size)
825 vm_map_t map;
826 vsize_t size;
827 {
828 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
829 }
830
831 /* Sanity; must specify both or none. */
832 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
833 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
834 #error Must specify MAP and UNMAP together.
835 #endif
836
837 /*
838 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
839 *
840 * => if the pmap specifies an alternate mapping method, we use it.
841 */
842
843 /* ARGSUSED */
844 vaddr_t
845 uvm_km_alloc_poolpage1(map, obj, waitok)
846 vm_map_t map;
847 struct uvm_object *obj;
848 boolean_t waitok;
849 {
850 #if defined(PMAP_MAP_POOLPAGE)
851 struct vm_page *pg;
852 vaddr_t va;
853
854 again:
855 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
856 if (__predict_false(pg == NULL)) {
857 if (waitok) {
858 uvm_wait("plpg");
859 goto again;
860 } else
861 return (0);
862 }
863 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
864 if (__predict_false(va == 0))
865 uvm_pagefree(pg);
866 return (va);
867 #else
868 vaddr_t va;
869 int s;
870
871 /*
872 * NOTE: We may be called with a map that doens't require splvm
873 * protection (e.g. kernel_map). However, it does not hurt to
874 * go to splvm in this case (since unprocted maps will never be
875 * accessed in interrupt context).
876 *
877 * XXX We may want to consider changing the interface to this
878 * XXX function.
879 */
880
881 s = splvm();
882 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
883 splx(s);
884 return (va);
885 #endif /* PMAP_MAP_POOLPAGE */
886 }
887
888 /*
889 * uvm_km_free_poolpage: free a previously allocated pool page
890 *
891 * => if the pmap specifies an alternate unmapping method, we use it.
892 */
893
894 /* ARGSUSED */
895 void
896 uvm_km_free_poolpage1(map, addr)
897 vm_map_t map;
898 vaddr_t addr;
899 {
900 #if defined(PMAP_UNMAP_POOLPAGE)
901 paddr_t pa;
902
903 pa = PMAP_UNMAP_POOLPAGE(addr);
904 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
905 #else
906 int s;
907
908 /*
909 * NOTE: We may be called with a map that doens't require splvm
910 * protection (e.g. kernel_map). However, it does not hurt to
911 * go to splvm in this case (since unprocted maps will never be
912 * accessed in interrupt context).
913 *
914 * XXX We may want to consider changing the interface to this
915 * XXX function.
916 */
917
918 s = splvm();
919 uvm_km_free(map, addr, PAGE_SIZE);
920 splx(s);
921 #endif /* PMAP_UNMAP_POOLPAGE */
922 }
923