uvm_km.c revision 1.5 1 /* $NetBSD: uvm_km.c,v 1.5 1998/02/08 06:15:59 thorpej Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
46 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
47 *
48 *
49 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * All rights reserved.
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 /*
74 * uvm_km.c: handle kernel memory allocation and management
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_kern.h>
84
85 #include <uvm/uvm.h>
86
87 /*
88 * global data structures
89 */
90
91 vm_map_t kernel_map = NULL;
92
93 /*
94 * local functions
95 */
96
97 static int uvm_km_get __P((struct uvm_object *, vm_offset_t,
98 vm_page_t *, int *, int, vm_prot_t, int, int));
99 /*
100 * local data structues
101 */
102
103 static struct vm_map kernel_map_store;
104 static struct uvm_object kmem_object_store;
105 static struct uvm_object mb_object_store;
106
107 static struct uvm_pagerops km_pager = {
108 NULL, /* init */
109 NULL, /* attach */
110 NULL, /* reference */
111 NULL, /* detach */
112 NULL, /* fault */
113 NULL, /* flush */
114 uvm_km_get, /* get */
115 /* ... rest are NULL */
116 };
117
118 /*
119 * uvm_km_get: pager get function for kernel objects
120 *
121 * => currently we do not support pageout to the swap area, so this
122 * pager is very simple. eventually we may want an anonymous
123 * object pager which will do paging.
124 */
125
126
127 static int uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type,
128 advice, flags)
129
130 struct uvm_object *uobj;
131 vm_offset_t offset;
132 struct vm_page **pps;
133 int *npagesp;
134 int centeridx, advice, flags;
135 vm_prot_t access_type;
136
137 {
138 vm_offset_t current_offset;
139 vm_page_t ptmp;
140 int lcv, gotpages, maxpages;
141 boolean_t done;
142 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
143
144 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
145
146 /*
147 * get number of pages
148 */
149
150 maxpages = *npagesp;
151
152 /*
153 * step 1: handled the case where fault data structures are locked.
154 */
155
156 if (flags & PGO_LOCKED) {
157
158 /*
159 * step 1a: get pages that are already resident. only do this
160 * if the data structures are locked (i.e. the first time through).
161 */
162
163 done = TRUE; /* be optimistic */
164 gotpages = 0; /* # of pages we got so far */
165
166 for (lcv = 0, current_offset = offset ;
167 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
168
169 /* do we care about this page? if not, skip it */
170 if (pps[lcv] == PGO_DONTCARE)
171 continue;
172
173 /* lookup page */
174 ptmp = uvm_pagelookup(uobj, current_offset);
175
176 /* null? attempt to allocate the page */
177 if (ptmp == NULL) {
178 ptmp = uvm_pagealloc(uobj, current_offset, NULL);
179 if (ptmp) {
180 ptmp->flags &= ~(PG_BUSY|PG_FAKE); /* new page */
181 UVM_PAGE_OWN(ptmp, NULL);
182 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
183 uvm_pagezero(ptmp);
184 }
185 }
186
187 /* to be useful must get a non-busy, non-released page */
188 if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
189 if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0)
190 done = FALSE; /* need to do a wait or I/O! */
191 continue;
192 }
193
194 /* useful page: busy/lock it and plug it in our result array */
195 ptmp->flags |= PG_BUSY; /* caller must un-busy this page */
196 UVM_PAGE_OWN(ptmp, "uvm_km_get1");
197 pps[lcv] = ptmp;
198 gotpages++;
199
200 } /* "for" lcv loop */
201
202 /*
203 * step 1b: now we've either done everything needed or we to unlock
204 * and do some waiting or I/O.
205 */
206
207 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
208
209 *npagesp = gotpages;
210 if (done)
211 return(VM_PAGER_OK); /* bingo! */
212 else
213 return(VM_PAGER_UNLOCK); /* EEK! Need to unlock and I/O */
214 }
215
216 /*
217 * step 2: get non-resident or busy pages.
218 * object is locked. data structures are unlocked.
219 */
220
221 for (lcv = 0, current_offset = offset ;
222 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
223
224 /* skip over pages we've already gotten or don't want */
225 /* skip over pages we don't _have_ to get */
226 if (pps[lcv] != NULL ||
227 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
228 continue;
229
230 /*
231 * we have yet to locate the current page (pps[lcv]). we first
232 * look for a page that is already at the current offset. if we
233 * find a page, we check to see if it is busy or released. if that
234 * is the case, then we sleep on the page until it is no longer busy
235 * or released and repeat the lookup. if the page we found is
236 * neither busy nor released, then we busy it (so we own it) and
237 * plug it into pps[lcv]. this 'break's the following while loop
238 * and indicates we are ready to move on to the next page in the
239 * "lcv" loop above.
240 *
241 * if we exit the while loop with pps[lcv] still set to NULL, then
242 * it means that we allocated a new busy/fake/clean page ptmp in the
243 * object and we need to do I/O to fill in the data.
244 */
245
246 while (pps[lcv] == NULL) { /* top of "pps" while loop */
247
248 /* look for a current page */
249 ptmp = uvm_pagelookup(uobj, current_offset);
250
251 /* nope? allocate one now (if we can) */
252 if (ptmp == NULL) {
253
254 ptmp = uvm_pagealloc(uobj, current_offset, NULL); /* alloc */
255
256 /* out of RAM? */
257 if (ptmp == NULL) {
258 simple_unlock(&uobj->vmobjlock);
259 uvm_wait("kmgetwait1");
260 simple_lock(&uobj->vmobjlock);
261 continue; /* goto top of pps while loop */
262 }
263
264 /*
265 * got new page ready for I/O. break pps while loop. pps[lcv] is
266 * still NULL.
267 */
268 break;
269 }
270
271 /* page is there, see if we need to wait on it */
272 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
273 ptmp->flags |= PG_WANTED;
274 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0);
275 simple_lock(&uobj->vmobjlock);
276 continue; /* goto top of pps while loop */
277 }
278
279 /*
280 * if we get here then the page has become resident and unbusy
281 * between steps 1 and 2. we busy it now (so we own it) and set
282 * pps[lcv] (so that we exit the while loop).
283 */
284 ptmp->flags |= PG_BUSY; /* we own it, caller must un-busy */
285 UVM_PAGE_OWN(ptmp, "uvm_km_get2");
286 pps[lcv] = ptmp;
287 }
288
289 /*
290 * if we own the a valid page at the correct offset, pps[lcv] will
291 * point to it. nothing more to do except go to the next page.
292 */
293
294 if (pps[lcv])
295 continue; /* next lcv */
296
297 /*
298 * we have a "fake/busy/clean" page that we just allocated.
299 * do the needed "i/o" (in this case that means zero it).
300 */
301
302 uvm_pagezero(ptmp);
303 ptmp->flags &= ~(PG_FAKE);
304 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
305 pps[lcv] = ptmp;
306
307 } /* lcv loop */
308
309 /*
310 * finally, unlock object and return.
311 */
312
313 simple_unlock(&uobj->vmobjlock);
314 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
315 return(VM_PAGER_OK);
316 }
317
318 /*
319 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
320 * KVM already allocated for text, data, bss, and static data structures).
321 *
322 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
323 * we assume that [min -> start] has already been allocated and that
324 * "end" is the end.
325 */
326
327 void uvm_km_init(start, end)
328
329 vm_offset_t start, end;
330
331 {
332 vm_offset_t base = VM_MIN_KERNEL_ADDRESS;
333
334 /*
335 * first, init kernel memory objects.
336 */
337
338 /* kernel_object: for pageable anonymous kernel memory */
339 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
340 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
341
342 /* kmem_object: for malloc'd memory (always wired) */
343 simple_lock_init(&kmem_object_store.vmobjlock);
344 kmem_object_store.pgops = &km_pager;
345 TAILQ_INIT(&kmem_object_store.memq);
346 kmem_object_store.uo_npages = 0;
347 kmem_object_store.uo_refs = UVM_OBJ_KERN;
348 /* we are special. we never die */
349 uvmexp.kmem_object = &kmem_object_store;
350
351 /* mb_object: for mbuf memory (always wired) */
352 simple_lock_init(&mb_object_store.vmobjlock);
353 mb_object_store.pgops = &km_pager;
354 TAILQ_INIT(&mb_object_store.memq);
355 mb_object_store.uo_npages = 0;
356 mb_object_store.uo_refs = UVM_OBJ_KERN;
357 /* we are special. we never die */
358 uvmexp.mb_object = &mb_object_store;
359
360 /*
361 * init the map and reserve kernel space before installing.
362 */
363
364 uvm_map_setup(&kernel_map_store, base, end, FALSE);
365 kernel_map_store.pmap = pmap_kernel();
366 if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET,
367 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
368 UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
369 panic("uvm_km_init: could not reserve space for kernel");
370
371 /*
372 * install!
373 */
374
375 kernel_map = &kernel_map_store;
376 }
377
378 /*
379 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
380 * is allocated all references to that area of VM must go through it. this
381 * allows the locking of VAs in kernel_map to be broken up into regions.
382 *
383 * => if `fixed' is true, *min specifies where the region described
384 * by the submap must start
385 * => if submap is non NULL we use that as the submap, otherwise we
386 * alloc a new map
387 */
388
389 struct vm_map *uvm_km_suballoc(map, min, max, size, pageable, fixed, submap)
390
391 struct vm_map *map;
392 vm_offset_t *min, *max; /* OUT, OUT */
393 vm_size_t size;
394 boolean_t pageable;
395 boolean_t fixed;
396 struct vm_map *submap;
397
398 {
399 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
400
401 size = round_page(size); /* round up to pagesize */
402
403 /*
404 * first allocate a blank spot in the parent map
405 */
406
407 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
408 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
409 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) {
410 panic("uvm_km_suballoc: unable to allocate space in parent map");
411 }
412
413 /*
414 * set VM bounds (min is filled in by uvm_map)
415 */
416
417 *max = *min + size;
418
419 /*
420 * add references to pmap and create or init the submap
421 */
422
423 pmap_reference(vm_map_pmap(map));
424 if (submap == NULL) {
425 submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
426 if (submap == NULL)
427 panic("uvm_km_suballoc: unable to create submap");
428 } else {
429 uvm_map_setup(submap, *min, *max, pageable);
430 submap->pmap = vm_map_pmap(map);
431 }
432
433 /*
434 * now let uvm_map_submap plug in it...
435 */
436
437 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
438 panic("uvm_km_suballoc: submap allocation failed");
439
440 return(submap);
441 }
442
443 /*
444 * uvm_km_pgremove: remove pages from a kernel uvm_object.
445 *
446 * => when you unmap a part of anonymous kernel memory you want to toss
447 * the pages right away. (this gets called from uvm_unmap_...).
448 */
449
450 #define UKM_HASH_PENALTY 4 /* a guess */
451
452 void uvm_km_pgremove(uobj, start, end)
453
454 struct uvm_object *uobj;
455 vm_offset_t start, end;
456
457 {
458 boolean_t by_list, is_aobj;
459 struct vm_page *pp, *ppnext;
460 vm_offset_t curoff;
461 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
462
463 simple_lock(&uobj->vmobjlock); /* lock object */
464
465 /* is uobj an aobj? */
466 is_aobj = uobj->pgops == &aobj_pager;
467
468 /* choose cheapest traversal */
469 by_list = (uobj->uo_npages <=
470 ((end - start) / PAGE_SIZE) * UKM_HASH_PENALTY);
471
472 if (by_list)
473 goto loop_by_list;
474
475 /* by hash */
476
477 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
478 pp = uvm_pagelookup(uobj, curoff);
479 if (pp == NULL)
480 continue;
481
482 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
483 /* now do the actual work */
484 if (pp->flags & PG_BUSY)
485 pp->flags |= PG_RELEASED; /* owner must check for this when done */
486 else {
487 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
488
489 /*
490 * if this kernel object is an aobj, free the swap slot.
491 */
492 if (is_aobj) {
493 int slot = uao_set_swslot(uobj, curoff / PAGE_SIZE, 0);
494
495 if (slot)
496 uvm_swap_free(slot, 1);
497 }
498
499 uvm_lock_pageq();
500 uvm_pagefree(pp);
501 uvm_unlock_pageq();
502 }
503 /* done */
504
505 }
506 simple_unlock(&uobj->vmobjlock);
507 return;
508
509 loop_by_list:
510
511 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
512
513 ppnext = pp->listq.tqe_next;
514 if (pp->offset < start || pp->offset >= end) {
515 continue;
516 }
517
518 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
519 /* now do the actual work */
520 if (pp->flags & PG_BUSY)
521 pp->flags |= PG_RELEASED; /* owner must check for this when done */
522 else {
523 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
524
525 /*
526 * if this kernel object is an aobj, free the swap slot.
527 */
528 if (is_aobj) {
529 int slot = uao_set_swslot(uobj, pp->offset / PAGE_SIZE, 0);
530
531 if (slot)
532 uvm_swap_free(slot, 1);
533 }
534
535 uvm_lock_pageq();
536 uvm_pagefree(pp);
537 uvm_unlock_pageq();
538 }
539 /* done */
540
541 }
542 simple_unlock(&uobj->vmobjlock);
543 return;
544 }
545
546
547 /*
548 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
549 *
550 * => we map wired memory into the specified map using the obj passed in
551 * => NOTE: we can return NULL even if we can wait if there is not enough
552 * free VM space in the map... caller should be prepared to handle
553 * this case.
554 * => we return KVA of memory allocated
555 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
556 * lock the map
557 */
558
559 vm_offset_t uvm_km_kmemalloc(map, obj, size, flags)
560
561 vm_map_t map;
562 struct uvm_object *obj;
563 vm_size_t size;
564 int flags;
565
566 {
567 vm_offset_t kva, loopva;
568 vm_offset_t offset;
569 struct vm_page *pg;
570 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
571
572
573 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
574 map, obj, size, flags);
575 #ifdef DIAGNOSTIC
576 /* sanity check */
577 if (vm_map_pmap(map) != pmap_kernel())
578 panic("uvm_km_kmemalloc: invalid map");
579 #endif
580
581 /*
582 * setup for call
583 */
584
585 size = round_page(size);
586 kva = vm_map_min(map); /* hint */
587
588 /*
589 * allocate some virtual space
590 */
591
592 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
593 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
594 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
595 != KERN_SUCCESS) {
596 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
597 return(0);
598 }
599
600 /*
601 * if all we wanted was VA, return now
602 */
603
604 if (flags & UVM_KMF_VALLOC) {
605 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
606 return(kva);
607 }
608 /*
609 * recover object offset from virtual address
610 */
611
612 offset = kva - vm_map_min(map);
613 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
614
615 /*
616 * now allocate and map in the memory... note that we are the only ones
617 * whom should ever get a handle on this area of VM.
618 */
619
620 loopva = kva;
621 while (size) {
622 simple_lock(&obj->vmobjlock);
623 pg = uvm_pagealloc(obj, offset, NULL);
624 if (pg) {
625 pg->flags &= ~PG_BUSY; /* new page */
626 UVM_PAGE_OWN(pg, NULL);
627
628 pg->wire_count = 1;
629 uvmexp.wired++;
630 }
631 simple_unlock(&obj->vmobjlock);
632
633 /*
634 * out of memory?
635 */
636
637 if (pg == NULL) {
638 if (flags & UVM_KMF_NOWAIT) {
639 uvm_unmap(map, kva, kva + size, 0); /* free everything! */
640 return(0);
641 } else {
642 uvm_wait("km_getwait2"); /* sleep here */
643 continue;
644 }
645 }
646
647 /*
648 * map it in: note that we call pmap_enter with the map and object
649 * unlocked in case we are kmem_map/kmem_object (because if pmap_enter
650 * wants to allocate out of kmem_object it will need to lock it itself!)
651 */
652 #if defined(PMAP_NEW)
653 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
654 #else
655 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
656 #endif
657 loopva += PAGE_SIZE;
658 offset += PAGE_SIZE;
659 size -= PAGE_SIZE;
660 }
661
662 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
663 return(kva);
664 }
665
666 /*
667 * uvm_km_free: free an area of kernel memory
668 */
669
670 void uvm_km_free(map, addr, size)
671
672 vm_map_t map;
673 vm_offset_t addr;
674 vm_size_t size;
675
676 {
677 uvm_unmap(map, trunc_page(addr), round_page(addr+size), 1);
678 }
679
680 /*
681 * uvm_km_free_wakeup: free an area of kernel memory and wake up
682 * anyone waiting for vm space.
683 *
684 * => XXX: "wanted" bit + unlock&wait on other end?
685 */
686
687 void uvm_km_free_wakeup(map, addr, size)
688
689 vm_map_t map;
690 vm_offset_t addr;
691 vm_size_t size;
692
693 {
694 vm_map_entry_t dead_entries;
695
696 vm_map_lock(map);
697 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 1,
698 &dead_entries);
699 thread_wakeup(map);
700 vm_map_unlock(map);
701
702 if (dead_entries != NULL)
703 uvm_unmap_detach(dead_entries, 0);
704 }
705
706 /*
707 * uvm_km_alloc1: allocate wired down memory in the kernel map.
708 *
709 * => we can sleep if needed
710 */
711
712 vm_offset_t uvm_km_alloc1(map, size, zeroit)
713
714 vm_map_t map;
715 vm_size_t size;
716 boolean_t zeroit;
717
718 {
719 vm_offset_t kva, loopva, offset;
720 struct vm_page *pg;
721 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
722
723 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
724
725 #ifdef DIAGNOSTIC
726 if (vm_map_pmap(map) != pmap_kernel())
727 panic("uvm_km_alloc1");
728 #endif
729
730 size = round_page(size);
731 kva = vm_map_min(map); /* hint */
732
733 /*
734 * allocate some virtual space
735 */
736
737 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
738 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
739 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
740 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
741 return(0);
742 }
743
744 /*
745 * recover object offset from virtual address
746 */
747
748 offset = kva - vm_map_min(map);
749 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
750
751 /*
752 * now allocate the memory. we must be careful about released pages.
753 */
754
755 loopva = kva;
756 while (size) {
757 simple_lock(&uvm.kernel_object->vmobjlock);
758 pg = uvm_pagelookup(uvm.kernel_object, offset);
759
760 /* if we found a page in an unallocated region, it must be released */
761 if (pg) {
762 if ((pg->flags & PG_RELEASED) == 0)
763 panic("uvm_km_alloc1: non-released page");
764 pg->flags |= PG_WANTED;
765 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,0,"km_alloc",0);
766 continue; /* retry */
767 }
768
769 /* allocate ram */
770 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
771 if (pg) {
772 pg->flags &= ~PG_BUSY; /* new page */
773 UVM_PAGE_OWN(pg, NULL);
774 }
775 simple_unlock(&uvm.kernel_object->vmobjlock);
776 if (pg == NULL) {
777 uvm_wait("km_alloc1w"); /* wait for memory */
778 continue;
779 }
780
781 /* map it in */
782 #if defined(PMAP_NEW)
783 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
784 #else
785 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
786 #endif
787 loopva += PAGE_SIZE;
788 offset += PAGE_SIZE;
789 size -= PAGE_SIZE;
790 }
791
792 /*
793 * zero on request (note that "size" is now zero due to the above loop
794 * so we need to subtract kva from loopva to reconstruct the size).
795 */
796
797 if (zeroit)
798 bzero((caddr_t)kva, loopva - kva);
799
800 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
801 return(kva);
802 }
803
804 /*
805 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
806 *
807 * => memory is not allocated until fault time
808 */
809
810 vm_offset_t uvm_km_valloc(map, size)
811
812 vm_map_t map;
813 vm_size_t size;
814
815 {
816 vm_offset_t kva;
817 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
818
819 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
820
821 #ifdef DIAGNOSTIC
822 if (vm_map_pmap(map) != pmap_kernel())
823 panic("uvm_km_valloc");
824 #endif
825
826 size = round_page(size);
827 kva = vm_map_min(map); /* hint */
828
829 /*
830 * allocate some virtual space. will be demand filled by kernel_object.
831 */
832
833 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
834 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
835 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
836 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
837 return(0);
838 }
839
840 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
841 return(kva);
842 }
843
844 /*
845 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
846 *
847 * => memory is not allocated until fault time
848 * => if no room in map, wait for space to free, unless requested size
849 * is larger than map (in which case we return 0)
850 */
851
852 vm_offset_t uvm_km_valloc_wait(map, size)
853
854 vm_map_t map;
855 vm_size_t size;
856
857 {
858 vm_offset_t kva;
859 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
860
861 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
862
863 #ifdef DIAGNOSTIC
864 if (vm_map_pmap(map) != pmap_kernel())
865 panic("uvm_km_valloc_wait");
866 #endif
867
868 size = round_page(size);
869 if (size > vm_map_max(map) - vm_map_min(map))
870 return(0);
871
872 while (1) {
873 kva = vm_map_min(map); /* hint */
874
875 /*
876 * allocate some virtual space. will be demand filled by kernel_object.
877 */
878
879 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
880 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
881 UVM_ADV_RANDOM, 0)) == KERN_SUCCESS){
882 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
883 return(kva);
884 }
885
886 /*
887 * failed. sleep for a while (on map)
888 */
889
890 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
891 tsleep((caddr_t)map, PVM, "vallocwait", 0);
892 }
893 /*NOTREACHED*/
894 }
895