uvm_km.c revision 1.4 1 /* $NetBSD: uvm_km.c,v 1.4 1998/02/07 11:08:47 mrg Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
46 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
47 *
48 *
49 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * All rights reserved.
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 /*
74 * uvm_km.c: handle kernel memory allocation and management
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_kern.h>
84
85 #include <uvm/uvm.h>
86
87 /*
88 * global data structures
89 */
90
91 vm_map_t kernel_map = NULL;
92
93 /*
94 * local functions
95 */
96
97 static int uvm_km_get __P((struct uvm_object *, vm_offset_t,
98 vm_page_t *, int *, int, vm_prot_t, int, int));
99 /*
100 * local data structues
101 */
102
103 static struct vm_map kernel_map_store;
104 static struct uvm_object kmem_object_store;
105 static struct uvm_object mb_object_store;
106
107 static struct uvm_pagerops km_pager = {
108 NULL, /* init */
109 NULL, /* attach */
110 NULL, /* reference */
111 NULL, /* detach */
112 NULL, /* fault */
113 NULL, /* flush */
114 uvm_km_get, /* get */
115 /* ... rest are NULL */
116 };
117
118 /*
119 * uvm_km_get: pager get function for kernel objects
120 *
121 * => currently we do not support pageout to the swap area, so this
122 * pager is very simple. eventually we may want an anonymous
123 * object pager which will do paging.
124 */
125
126
127 static int uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type,
128 advice, flags)
129
130 struct uvm_object *uobj;
131 vm_offset_t offset;
132 struct vm_page **pps;
133 int *npagesp;
134 int centeridx, advice, flags;
135 vm_prot_t access_type;
136
137 {
138 vm_offset_t current_offset;
139 vm_page_t ptmp;
140 int lcv, gotpages, maxpages;
141 boolean_t done;
142 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
143
144 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
145
146 /*
147 * get number of pages
148 */
149
150 maxpages = *npagesp;
151
152 /*
153 * step 1: handled the case where fault data structures are locked.
154 */
155
156 if (flags & PGO_LOCKED) {
157
158 /*
159 * step 1a: get pages that are already resident. only do this
160 * if the data structures are locked (i.e. the first time through).
161 */
162
163 done = TRUE; /* be optimistic */
164 gotpages = 0; /* # of pages we got so far */
165
166 for (lcv = 0, current_offset = offset ;
167 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
168
169 /* do we care about this page? if not, skip it */
170 if (pps[lcv] == PGO_DONTCARE)
171 continue;
172
173 /* lookup page */
174 ptmp = uvm_pagelookup(uobj, current_offset);
175
176 /* null? attempt to allocate the page */
177 if (ptmp == NULL) {
178 ptmp = uvm_pagealloc(uobj, current_offset, NULL);
179 if (ptmp) {
180 ptmp->flags &= ~(PG_BUSY|PG_FAKE); /* new page */
181 UVM_PAGE_OWN(ptmp, NULL);
182 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
183 uvm_pagezero(ptmp);
184 }
185 }
186
187 /* to be useful must get a non-busy, non-released page */
188 if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
189 if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0)
190 done = FALSE; /* need to do a wait or I/O! */
191 continue;
192 }
193
194 /* useful page: busy/lock it and plug it in our result array */
195 ptmp->flags |= PG_BUSY; /* caller must un-busy this page */
196 UVM_PAGE_OWN(ptmp, "uvm_km_get1");
197 pps[lcv] = ptmp;
198 gotpages++;
199
200 } /* "for" lcv loop */
201
202 /*
203 * step 1b: now we've either done everything needed or we to unlock
204 * and do some waiting or I/O.
205 */
206
207 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
208
209 *npagesp = gotpages;
210 if (done)
211 return(VM_PAGER_OK); /* bingo! */
212 else
213 return(VM_PAGER_UNLOCK); /* EEK! Need to unlock and I/O */
214 }
215
216 /*
217 * step 2: get non-resident or busy pages.
218 * object is locked. data structures are unlocked.
219 */
220
221 for (lcv = 0, current_offset = offset ;
222 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
223
224 /* skip over pages we've already gotten or don't want */
225 /* skip over pages we don't _have_ to get */
226 if (pps[lcv] != NULL ||
227 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
228 continue;
229
230 /*
231 * we have yet to locate the current page (pps[lcv]). we first
232 * look for a page that is already at the current offset. if we
233 * find a page, we check to see if it is busy or released. if that
234 * is the case, then we sleep on the page until it is no longer busy
235 * or released and repeat the lookup. if the page we found is
236 * neither busy nor released, then we busy it (so we own it) and
237 * plug it into pps[lcv]. this 'break's the following while loop
238 * and indicates we are ready to move on to the next page in the
239 * "lcv" loop above.
240 *
241 * if we exit the while loop with pps[lcv] still set to NULL, then
242 * it means that we allocated a new busy/fake/clean page ptmp in the
243 * object and we need to do I/O to fill in the data.
244 */
245
246 while (pps[lcv] == NULL) { /* top of "pps" while loop */
247
248 /* look for a current page */
249 ptmp = uvm_pagelookup(uobj, current_offset);
250
251 /* nope? allocate one now (if we can) */
252 if (ptmp == NULL) {
253
254 ptmp = uvm_pagealloc(uobj, current_offset, NULL); /* alloc */
255
256 /* out of RAM? */
257 if (ptmp == NULL) {
258 simple_unlock(&uobj->vmobjlock);
259 uvm_wait("kmgetwait1");
260 simple_lock(&uobj->vmobjlock);
261 continue; /* goto top of pps while loop */
262 }
263
264 /*
265 * got new page ready for I/O. break pps while loop. pps[lcv] is
266 * still NULL.
267 */
268 break;
269 }
270
271 /* page is there, see if we need to wait on it */
272 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
273 ptmp->flags |= PG_WANTED;
274 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0);
275 simple_lock(&uobj->vmobjlock);
276 continue; /* goto top of pps while loop */
277 }
278
279 /*
280 * if we get here then the page has become resident and unbusy
281 * between steps 1 and 2. we busy it now (so we own it) and set
282 * pps[lcv] (so that we exit the while loop).
283 */
284 ptmp->flags |= PG_BUSY; /* we own it, caller must un-busy */
285 UVM_PAGE_OWN(ptmp, "uvm_km_get2");
286 pps[lcv] = ptmp;
287 }
288
289 /*
290 * if we own the a valid page at the correct offset, pps[lcv] will
291 * point to it. nothing more to do except go to the next page.
292 */
293
294 if (pps[lcv])
295 continue; /* next lcv */
296
297 /*
298 * we have a "fake/busy/clean" page that we just allocated.
299 * do the needed "i/o" (in this case that means zero it).
300 */
301
302 uvm_pagezero(ptmp);
303 ptmp->flags &= ~(PG_FAKE);
304 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
305 pps[lcv] = ptmp;
306
307 } /* lcv loop */
308
309 /*
310 * finally, unlock object and return.
311 */
312
313 simple_unlock(&uobj->vmobjlock);
314 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
315 return(VM_PAGER_OK);
316 }
317
318 /*
319 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
320 * KVM already allocated for text, data, bss, and static data structures).
321 *
322 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
323 * we assume that [min -> start] has already been allocated and that
324 * "end" is the end.
325 */
326
327 void uvm_km_init(start, end)
328
329 vm_offset_t start, end;
330
331 {
332 vm_offset_t base = VM_MIN_KERNEL_ADDRESS;
333
334 /*
335 * first, init kernel memory objects.
336 */
337
338 /* kernel_object: for pageable anonymous kernel memory */
339 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
340 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
341
342 /* kmem_object: for malloc'd memory (always wired) */
343 simple_lock_init(&kmem_object_store.vmobjlock);
344 kmem_object_store.pgops = &km_pager;
345 TAILQ_INIT(&kmem_object_store.memq);
346 kmem_object_store.uo_npages = 0;
347 kmem_object_store.uo_refs = UVM_OBJ_KERN;
348 /* we are special. we never die */
349 uvmexp.kmem_object = &kmem_object_store;
350
351 /* mb_object: for mbuf memory (always wired) */
352 simple_lock_init(&mb_object_store.vmobjlock);
353 mb_object_store.pgops = &km_pager;
354 TAILQ_INIT(&mb_object_store.memq);
355 mb_object_store.uo_npages = 0;
356 mb_object_store.uo_refs = UVM_OBJ_KERN;
357 /* we are special. we never die */
358 uvmexp.mb_object = &mb_object_store;
359
360 /*
361 * init the map and reserve kernel space before installing.
362 */
363
364 uvm_map_setup(&kernel_map_store, base, end, FALSE);
365 kernel_map_store.pmap = pmap_kernel();
366 if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET,
367 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
368 UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
369 panic("uvm_km_init: could not reserve space for kernel");
370
371 /*
372 * install!
373 */
374
375 kernel_map = &kernel_map_store;
376 }
377
378 /*
379 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
380 * is allocated all references to that area of VM must go through it. this
381 * allows the locking of VAs in kernel_map to be broken up into regions.
382 *
383 * => if submap is non NULL we use that as the submap, otherwise we
384 * alloc a new map
385 */
386
387 struct vm_map *uvm_km_suballoc(map, min, max, size, pageable, submap)
388
389 struct vm_map *map;
390 vm_offset_t *min, *max; /* OUT, OUT */
391 vm_size_t size;
392 boolean_t pageable;
393 struct vm_map *submap;
394
395 {
396 size = round_page(size); /* round up to pagesize */
397
398 /*
399 * first allocate a blank spot in the parent map
400 */
401
402 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
403 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
404 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
405 panic("uvm_km_suballoc: unable to allocate space in parent map");
406 }
407
408 /*
409 * set VM bounds (min is filled in by uvm_map)
410 */
411
412 *max = *min + size;
413
414 /*
415 * add references to pmap and create or init the submap
416 */
417
418 pmap_reference(vm_map_pmap(map));
419 if (submap == NULL) {
420 submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
421 if (submap == NULL)
422 panic("uvm_km_suballoc: unable to create submap");
423 } else {
424 uvm_map_setup(submap, *min, *max, pageable);
425 submap->pmap = vm_map_pmap(map);
426 }
427
428 /*
429 * now let uvm_map_submap plug in it...
430 */
431
432 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
433 panic("uvm_km_suballoc: submap allocation failed");
434
435 return(submap);
436 }
437
438 /*
439 * uvm_km_pgremove: remove pages from a kernel uvm_object.
440 *
441 * => when you unmap a part of anonymous kernel memory you want to toss
442 * the pages right away. (this gets called from uvm_unmap_...).
443 */
444
445 #define UKM_HASH_PENALTY 4 /* a guess */
446
447 void uvm_km_pgremove(uobj, start, end)
448
449 struct uvm_object *uobj;
450 vm_offset_t start, end;
451
452 {
453 boolean_t by_list, is_aobj;
454 struct vm_page *pp, *ppnext;
455 vm_offset_t curoff;
456 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
457
458 simple_lock(&uobj->vmobjlock); /* lock object */
459
460 /* is uobj an aobj? */
461 is_aobj = uobj->pgops == &aobj_pager;
462
463 /* choose cheapest traversal */
464 by_list = (uobj->uo_npages <=
465 ((end - start) / PAGE_SIZE) * UKM_HASH_PENALTY);
466
467 if (by_list)
468 goto loop_by_list;
469
470 /* by hash */
471
472 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
473 pp = uvm_pagelookup(uobj, curoff);
474 if (pp == NULL)
475 continue;
476
477 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
478 /* now do the actual work */
479 if (pp->flags & PG_BUSY)
480 pp->flags |= PG_RELEASED; /* owner must check for this when done */
481 else {
482 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
483
484 /*
485 * if this kernel object is an aobj, free the swap slot.
486 */
487 if (is_aobj) {
488 int slot = uao_set_swslot(uobj, curoff / PAGE_SIZE, 0);
489
490 if (slot)
491 uvm_swap_free(slot, 1);
492 }
493
494 uvm_lock_pageq();
495 uvm_pagefree(pp);
496 uvm_unlock_pageq();
497 }
498 /* done */
499
500 }
501 simple_unlock(&uobj->vmobjlock);
502 return;
503
504 loop_by_list:
505
506 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
507
508 ppnext = pp->listq.tqe_next;
509 if (pp->offset < start || pp->offset >= end) {
510 continue;
511 }
512
513 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
514 /* now do the actual work */
515 if (pp->flags & PG_BUSY)
516 pp->flags |= PG_RELEASED; /* owner must check for this when done */
517 else {
518 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
519
520 /*
521 * if this kernel object is an aobj, free the swap slot.
522 */
523 if (is_aobj) {
524 int slot = uao_set_swslot(uobj, pp->offset / PAGE_SIZE, 0);
525
526 if (slot)
527 uvm_swap_free(slot, 1);
528 }
529
530 uvm_lock_pageq();
531 uvm_pagefree(pp);
532 uvm_unlock_pageq();
533 }
534 /* done */
535
536 }
537 simple_unlock(&uobj->vmobjlock);
538 return;
539 }
540
541
542 /*
543 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
544 *
545 * => we map wired memory into the specified map using the obj passed in
546 * => NOTE: we can return NULL even if we can wait if there is not enough
547 * free VM space in the map... caller should be prepared to handle
548 * this case.
549 * => we return KVA of memory allocated
550 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
551 * lock the map
552 */
553
554 vm_offset_t uvm_km_kmemalloc(map, obj, size, flags)
555
556 vm_map_t map;
557 struct uvm_object *obj;
558 vm_size_t size;
559 int flags;
560
561 {
562 vm_offset_t kva, loopva;
563 vm_offset_t offset;
564 struct vm_page *pg;
565 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
566
567
568 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
569 map, obj, size, flags);
570 #ifdef DIAGNOSTIC
571 /* sanity check */
572 if (vm_map_pmap(map) != pmap_kernel())
573 panic("uvm_km_kmemalloc: invalid map");
574 #endif
575
576 /*
577 * setup for call
578 */
579
580 size = round_page(size);
581 kva = vm_map_min(map); /* hint */
582
583 /*
584 * allocate some virtual space
585 */
586
587 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
588 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
589 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
590 != KERN_SUCCESS) {
591 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
592 return(0);
593 }
594
595 /*
596 * if all we wanted was VA, return now
597 */
598
599 if (flags & UVM_KMF_VALLOC) {
600 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
601 return(kva);
602 }
603 /*
604 * recover object offset from virtual address
605 */
606
607 offset = kva - vm_map_min(map);
608 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
609
610 /*
611 * now allocate and map in the memory... note that we are the only ones
612 * whom should ever get a handle on this area of VM.
613 */
614
615 loopva = kva;
616 while (size) {
617 simple_lock(&obj->vmobjlock);
618 pg = uvm_pagealloc(obj, offset, NULL);
619 if (pg) {
620 pg->flags &= ~PG_BUSY; /* new page */
621 UVM_PAGE_OWN(pg, NULL);
622
623 pg->wire_count = 1;
624 uvmexp.wired++;
625 }
626 simple_unlock(&obj->vmobjlock);
627
628 /*
629 * out of memory?
630 */
631
632 if (pg == NULL) {
633 if (flags & UVM_KMF_NOWAIT) {
634 uvm_unmap(map, kva, kva + size, 0); /* free everything! */
635 return(0);
636 } else {
637 uvm_wait("km_getwait2"); /* sleep here */
638 continue;
639 }
640 }
641
642 /*
643 * map it in: note that we call pmap_enter with the map and object
644 * unlocked in case we are kmem_map/kmem_object (because if pmap_enter
645 * wants to allocate out of kmem_object it will need to lock it itself!)
646 */
647 #if defined(PMAP_NEW)
648 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
649 #else
650 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
651 #endif
652 loopva += PAGE_SIZE;
653 offset += PAGE_SIZE;
654 size -= PAGE_SIZE;
655 }
656
657 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
658 return(kva);
659 }
660
661 /*
662 * uvm_km_free: free an area of kernel memory
663 */
664
665 void uvm_km_free(map, addr, size)
666
667 vm_map_t map;
668 vm_offset_t addr;
669 vm_size_t size;
670
671 {
672 uvm_unmap(map, trunc_page(addr), round_page(addr+size), 1);
673 }
674
675 /*
676 * uvm_km_free_wakeup: free an area of kernel memory and wake up
677 * anyone waiting for vm space.
678 *
679 * => XXX: "wanted" bit + unlock&wait on other end?
680 */
681
682 void uvm_km_free_wakeup(map, addr, size)
683
684 vm_map_t map;
685 vm_offset_t addr;
686 vm_size_t size;
687
688 {
689 vm_map_entry_t dead_entries;
690
691 vm_map_lock(map);
692 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 1,
693 &dead_entries);
694 thread_wakeup(map);
695 vm_map_unlock(map);
696
697 if (dead_entries != NULL)
698 uvm_unmap_detach(dead_entries, 0);
699 }
700
701 /*
702 * uvm_km_alloc1: allocate wired down memory in the kernel map.
703 *
704 * => we can sleep if needed
705 */
706
707 vm_offset_t uvm_km_alloc1(map, size, zeroit)
708
709 vm_map_t map;
710 vm_size_t size;
711 boolean_t zeroit;
712
713 {
714 vm_offset_t kva, loopva, offset;
715 struct vm_page *pg;
716 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
717
718 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
719
720 #ifdef DIAGNOSTIC
721 if (vm_map_pmap(map) != pmap_kernel())
722 panic("uvm_km_alloc1");
723 #endif
724
725 size = round_page(size);
726 kva = vm_map_min(map); /* hint */
727
728 /*
729 * allocate some virtual space
730 */
731
732 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
733 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
734 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
735 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
736 return(0);
737 }
738
739 /*
740 * recover object offset from virtual address
741 */
742
743 offset = kva - vm_map_min(map);
744 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
745
746 /*
747 * now allocate the memory. we must be careful about released pages.
748 */
749
750 loopva = kva;
751 while (size) {
752 simple_lock(&uvm.kernel_object->vmobjlock);
753 pg = uvm_pagelookup(uvm.kernel_object, offset);
754
755 /* if we found a page in an unallocated region, it must be released */
756 if (pg) {
757 if ((pg->flags & PG_RELEASED) == 0)
758 panic("uvm_km_alloc1: non-released page");
759 pg->flags |= PG_WANTED;
760 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,0,"km_alloc",0);
761 continue; /* retry */
762 }
763
764 /* allocate ram */
765 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
766 if (pg) {
767 pg->flags &= ~PG_BUSY; /* new page */
768 UVM_PAGE_OWN(pg, NULL);
769 }
770 simple_unlock(&uvm.kernel_object->vmobjlock);
771 if (pg == NULL) {
772 uvm_wait("km_alloc1w"); /* wait for memory */
773 continue;
774 }
775
776 /* map it in */
777 #if defined(PMAP_NEW)
778 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
779 #else
780 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
781 #endif
782 loopva += PAGE_SIZE;
783 offset += PAGE_SIZE;
784 size -= PAGE_SIZE;
785 }
786
787 /*
788 * zero on request (note that "size" is now zero due to the above loop
789 * so we need to subtract kva from loopva to reconstruct the size).
790 */
791
792 if (zeroit)
793 bzero((caddr_t)kva, loopva - kva);
794
795 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
796 return(kva);
797 }
798
799 /*
800 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
801 *
802 * => memory is not allocated until fault time
803 */
804
805 vm_offset_t uvm_km_valloc(map, size)
806
807 vm_map_t map;
808 vm_size_t size;
809
810 {
811 vm_offset_t kva;
812 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
813
814 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
815
816 #ifdef DIAGNOSTIC
817 if (vm_map_pmap(map) != pmap_kernel())
818 panic("uvm_km_valloc");
819 #endif
820
821 size = round_page(size);
822 kva = vm_map_min(map); /* hint */
823
824 /*
825 * allocate some virtual space. will be demand filled by kernel_object.
826 */
827
828 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
829 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
830 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
831 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
832 return(0);
833 }
834
835 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
836 return(kva);
837 }
838
839 /*
840 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
841 *
842 * => memory is not allocated until fault time
843 * => if no room in map, wait for space to free, unless requested size
844 * is larger than map (in which case we return 0)
845 */
846
847 vm_offset_t uvm_km_valloc_wait(map, size)
848
849 vm_map_t map;
850 vm_size_t size;
851
852 {
853 vm_offset_t kva;
854 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
855
856 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
857
858 #ifdef DIAGNOSTIC
859 if (vm_map_pmap(map) != pmap_kernel())
860 panic("uvm_km_valloc_wait");
861 #endif
862
863 size = round_page(size);
864 if (size > vm_map_max(map) - vm_map_min(map))
865 return(0);
866
867 while (1) {
868 kva = vm_map_min(map); /* hint */
869
870 /*
871 * allocate some virtual space. will be demand filled by kernel_object.
872 */
873
874 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
875 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
876 UVM_ADV_RANDOM, 0)) == KERN_SUCCESS){
877 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
878 return(kva);
879 }
880
881 /*
882 * failed. sleep for a while (on map)
883 */
884
885 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
886 tsleep((caddr_t)map, PVM, "vallocwait", 0);
887 }
888 /*NOTREACHED*/
889 }
890