uvm_km.c revision 1.33 1 /* $NetBSD: uvm_km.c,v 1.33 1999/11/13 00:24:38 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include "opt_uvmhist.h"
70
71 /*
72 * uvm_km.c: handle kernel memory allocation and management
73 */
74
75 /*
76 * overview of kernel memory management:
77 *
78 * the kernel virtual address space is mapped by "kernel_map." kernel_map
79 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
80 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
81 *
82 * the kernel_map has several "submaps." submaps can only appear in
83 * the kernel_map (user processes can't use them). submaps "take over"
84 * the management of a sub-range of the kernel's address space. submaps
85 * are typically allocated at boot time and are never released. kernel
86 * virtual address space that is mapped by a submap is locked by the
87 * submap's lock -- not the kernel_map's lock.
88 *
89 * thus, the useful feature of submaps is that they allow us to break
90 * up the locking and protection of the kernel address space into smaller
91 * chunks.
92 *
93 * the vm system has several standard kernel submaps, including:
94 * kmem_map => contains only wired kernel memory for the kernel
95 * malloc. *** access to kmem_map must be protected
96 * by splimp() because we are allowed to call malloc()
97 * at interrupt time ***
98 * mb_map => memory for large mbufs, *** protected by splimp ***
99 * pager_map => used to map "buf" structures into kernel space
100 * exec_map => used during exec to handle exec args
101 * etc...
102 *
103 * the kernel allocates its private memory out of special uvm_objects whose
104 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
105 * are "special" and never die). all kernel objects should be thought of
106 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
107 * object is equal to the size of kernel virtual address space (i.e. the
108 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
109 *
110 * most kernel private memory lives in kernel_object. the only exception
111 * to this is for memory that belongs to submaps that must be protected
112 * by splimp(). each of these submaps has their own private kernel
113 * object (e.g. kmem_object, mb_object).
114 *
115 * note that just because a kernel object spans the entire kernel virutal
116 * address space doesn't mean that it has to be mapped into the entire space.
117 * large chunks of a kernel object's space go unused either because
118 * that area of kernel VM is unmapped, or there is some other type of
119 * object mapped into that range (e.g. a vnode). for submap's kernel
120 * objects, the only part of the object that can ever be populated is the
121 * offsets that are managed by the submap.
122 *
123 * note that the "offset" in a kernel object is always the kernel virtual
124 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
125 * example:
126 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
127 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
128 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
129 * then that means that the page at offset 0x235000 in kernel_object is
130 * mapped at 0xf8235000.
131 *
132 * note that the offsets in kmem_object and mb_object also follow this
133 * rule. this means that the offsets for kmem_object must fall in the
134 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to
135 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets
136 * in those objects will typically not start at zero.
137 *
138 * kernel object have one other special property: when the kernel virtual
139 * memory mapping them is unmapped, the backing memory in the object is
140 * freed right away. this is done with the uvm_km_pgremove() function.
141 * this has to be done because there is no backing store for kernel pages
142 * and no need to save them after they are no longer referenced.
143 */
144
145 #include <sys/param.h>
146 #include <sys/systm.h>
147 #include <sys/proc.h>
148
149 #include <vm/vm.h>
150 #include <vm/vm_page.h>
151 #include <vm/vm_kern.h>
152
153 #include <uvm/uvm.h>
154
155 /*
156 * global data structures
157 */
158
159 vm_map_t kernel_map = NULL;
160
161 struct vmi_list vmi_list;
162 simple_lock_data_t vmi_list_slock;
163
164 /*
165 * local data structues
166 */
167
168 static struct vm_map kernel_map_store;
169 static struct uvm_object kmem_object_store;
170 static struct uvm_object mb_object_store;
171
172 /*
173 * All pager operations here are NULL, but the object must have
174 * a pager ops vector associated with it; various places assume
175 * it to be so.
176 */
177 static struct uvm_pagerops km_pager;
178
179 /*
180 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
181 * KVM already allocated for text, data, bss, and static data structures).
182 *
183 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
184 * we assume that [min -> start] has already been allocated and that
185 * "end" is the end.
186 */
187
188 void
189 uvm_km_init(start, end)
190 vaddr_t start, end;
191 {
192 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
193
194 /*
195 * first, initialize the interrupt-safe map list.
196 */
197 LIST_INIT(&vmi_list);
198 simple_lock_init(&vmi_list_slock);
199
200 /*
201 * next, init kernel memory objects.
202 */
203
204 /* kernel_object: for pageable anonymous kernel memory */
205 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
206 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
207
208 /*
209 * kmem_object: for use by the kernel malloc(). Memory is always
210 * wired, and this object (and the kmem_map) can be accessed at
211 * interrupt time.
212 */
213 simple_lock_init(&kmem_object_store.vmobjlock);
214 kmem_object_store.pgops = &km_pager;
215 TAILQ_INIT(&kmem_object_store.memq);
216 kmem_object_store.uo_npages = 0;
217 /* we are special. we never die */
218 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
219 uvmexp.kmem_object = &kmem_object_store;
220
221 /*
222 * mb_object: for mbuf cluster pages on platforms which use the
223 * mb_map. Memory is always wired, and this object (and the mb_map)
224 * can be accessed at interrupt time.
225 */
226 simple_lock_init(&mb_object_store.vmobjlock);
227 mb_object_store.pgops = &km_pager;
228 TAILQ_INIT(&mb_object_store.memq);
229 mb_object_store.uo_npages = 0;
230 /* we are special. we never die */
231 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
232 uvmexp.mb_object = &mb_object_store;
233
234 /*
235 * init the map and reserve allready allocated kernel space
236 * before installing.
237 */
238
239 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
240 kernel_map_store.pmap = pmap_kernel();
241 if (uvm_map(&kernel_map_store, &base, start - base, NULL,
242 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
243 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
244 panic("uvm_km_init: could not reserve space for kernel");
245
246 /*
247 * install!
248 */
249
250 kernel_map = &kernel_map_store;
251 }
252
253 /*
254 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
255 * is allocated all references to that area of VM must go through it. this
256 * allows the locking of VAs in kernel_map to be broken up into regions.
257 *
258 * => if `fixed' is true, *min specifies where the region described
259 * by the submap must start
260 * => if submap is non NULL we use that as the submap, otherwise we
261 * alloc a new map
262 */
263 struct vm_map *
264 uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
265 struct vm_map *map;
266 vaddr_t *min, *max; /* OUT, OUT */
267 vsize_t size;
268 int flags;
269 boolean_t fixed;
270 struct vm_map *submap;
271 {
272 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
273
274 size = round_page(size); /* round up to pagesize */
275
276 /*
277 * first allocate a blank spot in the parent map
278 */
279
280 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
281 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
282 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) {
283 panic("uvm_km_suballoc: unable to allocate space in parent map");
284 }
285
286 /*
287 * set VM bounds (min is filled in by uvm_map)
288 */
289
290 *max = *min + size;
291
292 /*
293 * add references to pmap and create or init the submap
294 */
295
296 pmap_reference(vm_map_pmap(map));
297 if (submap == NULL) {
298 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
299 if (submap == NULL)
300 panic("uvm_km_suballoc: unable to create submap");
301 } else {
302 uvm_map_setup(submap, *min, *max, flags);
303 submap->pmap = vm_map_pmap(map);
304 }
305
306 /*
307 * now let uvm_map_submap plug in it...
308 */
309
310 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
311 panic("uvm_km_suballoc: submap allocation failed");
312
313 return(submap);
314 }
315
316 /*
317 * uvm_km_pgremove: remove pages from a kernel uvm_object.
318 *
319 * => when you unmap a part of anonymous kernel memory you want to toss
320 * the pages right away. (this gets called from uvm_unmap_...).
321 */
322
323 #define UKM_HASH_PENALTY 4 /* a guess */
324
325 void
326 uvm_km_pgremove(uobj, start, end)
327 struct uvm_object *uobj;
328 vaddr_t start, end;
329 {
330 boolean_t by_list;
331 struct vm_page *pp, *ppnext;
332 vaddr_t curoff;
333 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
334
335 simple_lock(&uobj->vmobjlock); /* lock object */
336
337 #ifdef DIAGNOSTIC
338 if (uobj->pgops != &aobj_pager)
339 panic("uvm_km_pgremove: object %p not an aobj", uobj);
340 #endif
341
342 /* choose cheapest traversal */
343 by_list = (uobj->uo_npages <=
344 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
345
346 if (by_list)
347 goto loop_by_list;
348
349 /* by hash */
350
351 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
352 pp = uvm_pagelookup(uobj, curoff);
353 if (pp == NULL)
354 continue;
355
356 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
357 pp->flags & PG_BUSY, 0, 0);
358
359 /* now do the actual work */
360 if (pp->flags & PG_BUSY) {
361 /* owner must check for this when done */
362 pp->flags |= PG_RELEASED;
363 } else {
364 /* free the swap slot... */
365 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
366
367 /*
368 * ...and free the page; note it may be on the
369 * active or inactive queues.
370 */
371 uvm_lock_pageq();
372 uvm_pagefree(pp);
373 uvm_unlock_pageq();
374 }
375 /* done */
376 }
377 simple_unlock(&uobj->vmobjlock);
378 return;
379
380 loop_by_list:
381
382 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
383 ppnext = pp->listq.tqe_next;
384 if (pp->offset < start || pp->offset >= end) {
385 continue;
386 }
387
388 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
389 pp->flags & PG_BUSY, 0, 0);
390
391 /* now do the actual work */
392 if (pp->flags & PG_BUSY) {
393 /* owner must check for this when done */
394 pp->flags |= PG_RELEASED;
395 } else {
396 /* free the swap slot... */
397 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
398
399 /*
400 * ...and free the page; note it may be on the
401 * active or inactive queues.
402 */
403 uvm_lock_pageq();
404 uvm_pagefree(pp);
405 uvm_unlock_pageq();
406 }
407 /* done */
408 }
409 simple_unlock(&uobj->vmobjlock);
410 return;
411 }
412
413
414 /*
415 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
416 * objects
417 *
418 * => when you unmap a part of anonymous kernel memory you want to toss
419 * the pages right away. (this gets called from uvm_unmap_...).
420 * => none of the pages will ever be busy, and none of them will ever
421 * be on the active or inactive queues (because these objects are
422 * never allowed to "page").
423 */
424
425 void
426 uvm_km_pgremove_intrsafe(uobj, start, end)
427 struct uvm_object *uobj;
428 vaddr_t start, end;
429 {
430 boolean_t by_list;
431 struct vm_page *pp, *ppnext;
432 vaddr_t curoff;
433 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
434
435 simple_lock(&uobj->vmobjlock); /* lock object */
436
437 #ifdef DIAGNOSTIC
438 if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0)
439 panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
440 #endif
441
442 /* choose cheapest traversal */
443 by_list = (uobj->uo_npages <=
444 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
445
446 if (by_list)
447 goto loop_by_list;
448
449 /* by hash */
450
451 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
452 pp = uvm_pagelookup(uobj, curoff);
453 if (pp == NULL)
454 continue;
455
456 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
457 pp->flags & PG_BUSY, 0, 0);
458 #ifdef DIAGNOSTIC
459 if (pp->flags & PG_BUSY)
460 panic("uvm_km_pgremove_intrsafe: busy page");
461 if (pp->pqflags & PQ_ACTIVE)
462 panic("uvm_km_pgremove_intrsafe: active page");
463 if (pp->pqflags & PQ_INACTIVE)
464 panic("uvm_km_pgremove_intrsafe: inactive page");
465 #endif
466
467 /* free the page */
468 uvm_pagefree(pp);
469 }
470 simple_unlock(&uobj->vmobjlock);
471 return;
472
473 loop_by_list:
474
475 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
476 ppnext = pp->listq.tqe_next;
477 if (pp->offset < start || pp->offset >= end) {
478 continue;
479 }
480
481 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
482 pp->flags & PG_BUSY, 0, 0);
483
484 #ifdef DIAGNOSTIC
485 if (pp->flags & PG_BUSY)
486 panic("uvm_km_pgremove_intrsafe: busy page");
487 if (pp->pqflags & PQ_ACTIVE)
488 panic("uvm_km_pgremove_intrsafe: active page");
489 if (pp->pqflags & PQ_INACTIVE)
490 panic("uvm_km_pgremove_intrsafe: inactive page");
491 #endif
492
493 /* free the page */
494 uvm_pagefree(pp);
495 }
496 simple_unlock(&uobj->vmobjlock);
497 return;
498 }
499
500
501 /*
502 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
503 *
504 * => we map wired memory into the specified map using the obj passed in
505 * => NOTE: we can return NULL even if we can wait if there is not enough
506 * free VM space in the map... caller should be prepared to handle
507 * this case.
508 * => we return KVA of memory allocated
509 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
510 * lock the map
511 */
512
513 vaddr_t
514 uvm_km_kmemalloc(map, obj, size, flags)
515 vm_map_t map;
516 struct uvm_object *obj;
517 vsize_t size;
518 int flags;
519 {
520 vaddr_t kva, loopva;
521 vaddr_t offset;
522 struct vm_page *pg;
523 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
524
525
526 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
527 map, obj, size, flags);
528 #ifdef DIAGNOSTIC
529 /* sanity check */
530 if (vm_map_pmap(map) != pmap_kernel())
531 panic("uvm_km_kmemalloc: invalid map");
532 #endif
533
534 /*
535 * setup for call
536 */
537
538 size = round_page(size);
539 kva = vm_map_min(map); /* hint */
540
541 /*
542 * allocate some virtual space
543 */
544
545 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
546 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
547 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
548 != KERN_SUCCESS) {
549 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
550 return(0);
551 }
552
553 /*
554 * if all we wanted was VA, return now
555 */
556
557 if (flags & UVM_KMF_VALLOC) {
558 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
559 return(kva);
560 }
561 /*
562 * recover object offset from virtual address
563 */
564
565 offset = kva - vm_map_min(kernel_map);
566 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
567
568 /*
569 * now allocate and map in the memory... note that we are the only ones
570 * whom should ever get a handle on this area of VM.
571 */
572
573 loopva = kva;
574 while (size) {
575 simple_lock(&obj->vmobjlock);
576 pg = uvm_pagealloc(obj, offset, NULL, 0);
577 if (pg) {
578 pg->flags &= ~PG_BUSY; /* new page */
579 UVM_PAGE_OWN(pg, NULL);
580 }
581 simple_unlock(&obj->vmobjlock);
582
583 /*
584 * out of memory?
585 */
586
587 if (pg == NULL) {
588 if (flags & UVM_KMF_NOWAIT) {
589 /* free everything! */
590 uvm_unmap(map, kva, kva + size);
591 return(0);
592 } else {
593 uvm_wait("km_getwait2"); /* sleep here */
594 continue;
595 }
596 }
597
598 /*
599 * map it in: note that we call pmap_enter with the map and
600 * object unlocked in case we are kmem_map/kmem_object
601 * (because if pmap_enter wants to allocate out of kmem_object
602 * it will need to lock it itself!)
603 */
604 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
605 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
606 VM_PROT_ALL);
607 } else {
608 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
609 UVM_PROT_ALL,
610 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
611 }
612 loopva += PAGE_SIZE;
613 offset += PAGE_SIZE;
614 size -= PAGE_SIZE;
615 }
616
617 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
618 return(kva);
619 }
620
621 /*
622 * uvm_km_free: free an area of kernel memory
623 */
624
625 void
626 uvm_km_free(map, addr, size)
627 vm_map_t map;
628 vaddr_t addr;
629 vsize_t size;
630 {
631
632 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
633 }
634
635 /*
636 * uvm_km_free_wakeup: free an area of kernel memory and wake up
637 * anyone waiting for vm space.
638 *
639 * => XXX: "wanted" bit + unlock&wait on other end?
640 */
641
642 void
643 uvm_km_free_wakeup(map, addr, size)
644 vm_map_t map;
645 vaddr_t addr;
646 vsize_t size;
647 {
648 vm_map_entry_t dead_entries;
649
650 vm_map_lock(map);
651 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
652 &dead_entries);
653 wakeup(map);
654 vm_map_unlock(map);
655
656 if (dead_entries != NULL)
657 uvm_unmap_detach(dead_entries, 0);
658 }
659
660 /*
661 * uvm_km_alloc1: allocate wired down memory in the kernel map.
662 *
663 * => we can sleep if needed
664 */
665
666 vaddr_t
667 uvm_km_alloc1(map, size, zeroit)
668 vm_map_t map;
669 vsize_t size;
670 boolean_t zeroit;
671 {
672 vaddr_t kva, loopva, offset;
673 struct vm_page *pg;
674 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
675
676 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
677
678 #ifdef DIAGNOSTIC
679 if (vm_map_pmap(map) != pmap_kernel())
680 panic("uvm_km_alloc1");
681 #endif
682
683 size = round_page(size);
684 kva = vm_map_min(map); /* hint */
685
686 /*
687 * allocate some virtual space
688 */
689
690 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
691 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
692 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
693 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
694 return(0);
695 }
696
697 /*
698 * recover object offset from virtual address
699 */
700
701 offset = kva - vm_map_min(kernel_map);
702 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
703
704 /*
705 * now allocate the memory. we must be careful about released pages.
706 */
707
708 loopva = kva;
709 while (size) {
710 simple_lock(&uvm.kernel_object->vmobjlock);
711 pg = uvm_pagelookup(uvm.kernel_object, offset);
712
713 /*
714 * if we found a page in an unallocated region, it must be
715 * released
716 */
717 if (pg) {
718 if ((pg->flags & PG_RELEASED) == 0)
719 panic("uvm_km_alloc1: non-released page");
720 pg->flags |= PG_WANTED;
721 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,
722 FALSE, "km_alloc", 0);
723 continue; /* retry */
724 }
725
726 /* allocate ram */
727 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
728 if (pg) {
729 pg->flags &= ~PG_BUSY; /* new page */
730 UVM_PAGE_OWN(pg, NULL);
731 }
732 simple_unlock(&uvm.kernel_object->vmobjlock);
733 if (pg == NULL) {
734 uvm_wait("km_alloc1w"); /* wait for memory */
735 continue;
736 }
737
738 /*
739 * map it in; note we're never called with an intrsafe
740 * object, so we always use regular old pmap_enter().
741 */
742 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
743 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
744
745 loopva += PAGE_SIZE;
746 offset += PAGE_SIZE;
747 size -= PAGE_SIZE;
748 }
749
750 /*
751 * zero on request (note that "size" is now zero due to the above loop
752 * so we need to subtract kva from loopva to reconstruct the size).
753 */
754
755 if (zeroit)
756 memset((caddr_t)kva, 0, loopva - kva);
757
758 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
759 return(kva);
760 }
761
762 /*
763 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
764 *
765 * => memory is not allocated until fault time
766 */
767
768 vaddr_t
769 uvm_km_valloc(map, size)
770 vm_map_t map;
771 vsize_t size;
772 {
773 vaddr_t kva;
774 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
775
776 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
777
778 #ifdef DIAGNOSTIC
779 if (vm_map_pmap(map) != pmap_kernel())
780 panic("uvm_km_valloc");
781 #endif
782
783 size = round_page(size);
784 kva = vm_map_min(map); /* hint */
785
786 /*
787 * allocate some virtual space. will be demand filled by kernel_object.
788 */
789
790 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
791 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
792 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
793 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
794 return(0);
795 }
796
797 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
798 return(kva);
799 }
800
801 /*
802 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
803 *
804 * => memory is not allocated until fault time
805 * => if no room in map, wait for space to free, unless requested size
806 * is larger than map (in which case we return 0)
807 */
808
809 vaddr_t
810 uvm_km_valloc_wait(map, size)
811 vm_map_t map;
812 vsize_t size;
813 {
814 vaddr_t kva;
815 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
816
817 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
818
819 #ifdef DIAGNOSTIC
820 if (vm_map_pmap(map) != pmap_kernel())
821 panic("uvm_km_valloc_wait");
822 #endif
823
824 size = round_page(size);
825 if (size > vm_map_max(map) - vm_map_min(map))
826 return(0);
827
828 while (1) {
829 kva = vm_map_min(map); /* hint */
830
831 /*
832 * allocate some virtual space. will be demand filled
833 * by kernel_object.
834 */
835
836 if (uvm_map(map, &kva, size, uvm.kernel_object,
837 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
838 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
839 == KERN_SUCCESS) {
840 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
841 return(kva);
842 }
843
844 /*
845 * failed. sleep for a while (on map)
846 */
847
848 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
849 tsleep((caddr_t)map, PVM, "vallocwait", 0);
850 }
851 /*NOTREACHED*/
852 }
853
854 /* Sanity; must specify both or none. */
855 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
856 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
857 #error Must specify MAP and UNMAP together.
858 #endif
859
860 /*
861 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
862 *
863 * => if the pmap specifies an alternate mapping method, we use it.
864 */
865
866 /* ARGSUSED */
867 vaddr_t
868 uvm_km_alloc_poolpage1(map, obj, waitok)
869 vm_map_t map;
870 struct uvm_object *obj;
871 boolean_t waitok;
872 {
873 #if defined(PMAP_MAP_POOLPAGE)
874 struct vm_page *pg;
875 vaddr_t va;
876
877 again:
878 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
879 if (pg == NULL) {
880 if (waitok) {
881 uvm_wait("plpg");
882 goto again;
883 } else
884 return (0);
885 }
886 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
887 if (va == 0)
888 uvm_pagefree(pg);
889 return (va);
890 #else
891 vaddr_t va;
892 int s;
893
894 /*
895 * NOTE: We may be called with a map that doens't require splimp
896 * protection (e.g. kernel_map). However, it does not hurt to
897 * go to splimp in this case (since unprocted maps will never be
898 * accessed in interrupt context).
899 *
900 * XXX We may want to consider changing the interface to this
901 * XXX function.
902 */
903
904 s = splimp();
905 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
906 splx(s);
907 return (va);
908 #endif /* PMAP_MAP_POOLPAGE */
909 }
910
911 /*
912 * uvm_km_free_poolpage: free a previously allocated pool page
913 *
914 * => if the pmap specifies an alternate unmapping method, we use it.
915 */
916
917 /* ARGSUSED */
918 void
919 uvm_km_free_poolpage1(map, addr)
920 vm_map_t map;
921 vaddr_t addr;
922 {
923 #if defined(PMAP_UNMAP_POOLPAGE)
924 paddr_t pa;
925
926 pa = PMAP_UNMAP_POOLPAGE(addr);
927 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
928 #else
929 int s;
930
931 /*
932 * NOTE: We may be called with a map that doens't require splimp
933 * protection (e.g. kernel_map). However, it does not hurt to
934 * go to splimp in this case (since unprocted maps will never be
935 * accessed in interrupt context).
936 *
937 * XXX We may want to consider changing the interface to this
938 * XXX function.
939 */
940
941 s = splimp();
942 uvm_km_free(map, addr, PAGE_SIZE);
943 splx(s);
944 #endif /* PMAP_UNMAP_POOLPAGE */
945 }
946