Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.3
      1 /*	$NetBSD: uvm_km.c,v 1.3 1998/02/07 02:29:21 chs Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  *
     11  * All rights reserved.
     12  *
     13  * This code is derived from software contributed to Berkeley by
     14  * The Mach Operating System project at Carnegie-Mellon University.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by Charles D. Cranor,
     27  *      Washington University, the University of California, Berkeley and
     28  *      its contributors.
     29  * 4. Neither the name of the University nor the names of its contributors
     30  *    may be used to endorse or promote products derived from this software
     31  *    without specific prior written permission.
     32  *
     33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  * SUCH DAMAGE.
     44  *
     45  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     46  *
     47  *
     48  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     49  * All rights reserved.
     50  *
     51  * Permission to use, copy, modify and distribute this software and
     52  * its documentation is hereby granted, provided that both the copyright
     53  * notice and this permission notice appear in all copies of the
     54  * software, derivative works or modified versions, and any portions
     55  * thereof, and that both notices appear in supporting documentation.
     56  *
     57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     60  *
     61  * Carnegie Mellon requests users of this software to return to
     62  *
     63  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     64  *  School of Computer Science
     65  *  Carnegie Mellon University
     66  *  Pittsburgh PA 15213-3890
     67  *
     68  * any improvements or extensions that they make and grant Carnegie the
     69  * rights to redistribute these changes.
     70  */
     71 
     72 /*
     73  * uvm_km.c: handle kernel memory allocation and management
     74  */
     75 
     76 #include <sys/param.h>
     77 #include <sys/systm.h>
     78 #include <sys/proc.h>
     79 
     80 #include <vm/vm.h>
     81 #include <vm/vm_page.h>
     82 #include <vm/vm_kern.h>
     83 
     84 #include <uvm/uvm.h>
     85 
     86 /*
     87  * global data structures
     88  */
     89 
     90 vm_map_t kernel_map = NULL;
     91 
     92 /*
     93  * local functions
     94  */
     95 
     96 static int uvm_km_get __P((struct uvm_object *, vm_offset_t,
     97                            vm_page_t *, int *, int, vm_prot_t, int, int));
     98 /*
     99  * local data structues
    100  */
    101 
    102 static struct vm_map		kernel_map_store;
    103 static struct uvm_object	kmem_object_store;
    104 static struct uvm_object	mb_object_store;
    105 
    106 static struct uvm_pagerops km_pager = {
    107   NULL,	/* init */
    108   NULL, /* attach */
    109   NULL, /* reference */
    110   NULL, /* detach */
    111   NULL, /* fault */
    112   NULL, /* flush */
    113   uvm_km_get, /* get */
    114   /* ... rest are NULL */
    115 };
    116 
    117 /*
    118  * uvm_km_get: pager get function for kernel objects
    119  *
    120  * => currently we do not support pageout to the swap area, so this
    121  *    pager is very simple.    eventually we may want an anonymous
    122  *    object pager which will do paging.
    123  */
    124 
    125 
    126 static int uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type,
    127         	      advice, flags)
    128 
    129 struct uvm_object *uobj;
    130 vm_offset_t offset;
    131 struct vm_page **pps;
    132 int *npagesp;
    133 int centeridx, advice, flags;
    134 vm_prot_t access_type;
    135 
    136 {
    137   vm_offset_t current_offset;
    138   vm_page_t ptmp;
    139   int lcv, gotpages, maxpages;
    140   boolean_t done;
    141   UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
    142 
    143   UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
    144 
    145   /*
    146    * get number of pages
    147    */
    148 
    149   maxpages = *npagesp;
    150 
    151   /*
    152    * step 1: handled the case where fault data structures are locked.
    153    */
    154 
    155   if (flags & PGO_LOCKED) {
    156 
    157     /*
    158      * step 1a: get pages that are already resident.   only do this
    159      * if the data structures are locked (i.e. the first time through).
    160      */
    161 
    162     done = TRUE;	/* be optimistic */
    163     gotpages = 0;	/* # of pages we got so far */
    164 
    165     for (lcv = 0, current_offset = offset ;
    166 	 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
    167 
    168       /* do we care about this page?  if not, skip it */
    169       if (pps[lcv] == PGO_DONTCARE)
    170 	continue;
    171 
    172       /* lookup page */
    173       ptmp = uvm_pagelookup(uobj, current_offset);
    174 
    175       /* null?  attempt to allocate the page */
    176       if (ptmp == NULL) {
    177 	ptmp = uvm_pagealloc(uobj, current_offset, NULL);
    178 	if (ptmp) {
    179 	  ptmp->flags &= ~(PG_BUSY|PG_FAKE);	/* new page */
    180           UVM_PAGE_OWN(ptmp, NULL);
    181 	  ptmp->wire_count = 1;		/* XXX: prevents pageout attempts */
    182 	  uvm_pagezero(ptmp);
    183 	}
    184       }
    185 
    186       /* to be useful must get a non-busy, non-released page */
    187       if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    188 	if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0)
    189 	  done = FALSE;		/* need to do a wait or I/O! */
    190 	continue;
    191       }
    192 
    193       /* useful page: busy/lock it and plug it in our result array */
    194       ptmp->flags |= PG_BUSY;		/* caller must un-busy this page */
    195       UVM_PAGE_OWN(ptmp, "uvm_km_get1");
    196       pps[lcv] = ptmp;
    197       gotpages++;
    198 
    199     }	/* "for" lcv loop */
    200 
    201     /*
    202      * step 1b: now we've either done everything needed or we to unlock
    203      * and do some waiting or I/O.
    204      */
    205 
    206     UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
    207 
    208     *npagesp = gotpages;
    209     if (done)
    210       return(VM_PAGER_OK);		/* bingo! */
    211     else
    212       return(VM_PAGER_UNLOCK);		/* EEK!   Need to unlock and I/O */
    213   }
    214 
    215   /*
    216    * step 2: get non-resident or busy pages.
    217    * object is locked.   data structures are unlocked.
    218    */
    219 
    220   for (lcv = 0, current_offset = offset ;
    221        lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
    222 
    223     /* skip over pages we've already gotten or don't want */
    224     /* skip over pages we don't _have_ to get */
    225     if (pps[lcv] != NULL ||
    226 	(lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
    227       continue;
    228 
    229     /*
    230      * we have yet to locate the current page (pps[lcv]).   we first
    231      * look for a page that is already at the current offset.   if we
    232      * find a page, we check to see if it is busy or released.  if that
    233      * is the case, then we sleep on the page until it is no longer busy
    234      * or released and repeat the lookup.    if the page we found is
    235      * neither busy nor released, then we busy it (so we own it) and
    236      * plug it into pps[lcv].   this 'break's the following while loop
    237      * and indicates we are ready to move on to the next page in the
    238      * "lcv" loop above.
    239      *
    240      * if we exit the while loop with pps[lcv] still set to NULL, then
    241      * it means that we allocated a new busy/fake/clean page ptmp in the
    242      * object and we need to do I/O to fill in the data.
    243      */
    244 
    245     while (pps[lcv] == NULL) {		/* top of "pps" while loop */
    246 
    247       /* look for a current page */
    248       ptmp = uvm_pagelookup(uobj, current_offset);
    249 
    250       /* nope?   allocate one now (if we can) */
    251       if (ptmp == NULL) {
    252 
    253 	ptmp = uvm_pagealloc(uobj, current_offset, NULL);	/* alloc */
    254 
    255 	/* out of RAM? */
    256 	if (ptmp == NULL) {
    257 	  simple_unlock(&uobj->vmobjlock);
    258 	  uvm_wait("kmgetwait1");
    259 	  simple_lock(&uobj->vmobjlock);
    260 	  continue;		/* goto top of pps while loop */
    261 	}
    262 
    263 	/*
    264 	 * got new page ready for I/O.  break pps while loop.  pps[lcv] is
    265 	 * still NULL.
    266 	 */
    267 	break;
    268       }
    269 
    270       /* page is there, see if we need to wait on it */
    271       if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    272 	ptmp->flags |= PG_WANTED;
    273 	UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0);
    274 	simple_lock(&uobj->vmobjlock);
    275 	continue;		/* goto top of pps while loop */
    276       }
    277 
    278       /*
    279        * if we get here then the page has become resident and unbusy
    280        * between steps 1 and 2.  we busy it now (so we own it) and set
    281        * pps[lcv] (so that we exit the while loop).
    282        */
    283       ptmp->flags |= PG_BUSY;	/* we own it, caller must un-busy */
    284       UVM_PAGE_OWN(ptmp, "uvm_km_get2");
    285       pps[lcv] = ptmp;
    286     }
    287 
    288     /*
    289      * if we own the a valid page at the correct offset, pps[lcv] will
    290      * point to it.   nothing more to do except go to the next page.
    291      */
    292 
    293     if (pps[lcv])
    294       continue;			/* next lcv */
    295 
    296     /*
    297      * we have a "fake/busy/clean" page that we just allocated.
    298      * do the needed "i/o" (in this case that means zero it).
    299      */
    300 
    301     uvm_pagezero(ptmp);
    302     ptmp->flags &= ~(PG_FAKE);
    303     ptmp->wire_count = 1;		/* XXX: prevents pageout attempts */
    304     pps[lcv] = ptmp;
    305 
    306   }	/* lcv loop */
    307 
    308   /*
    309    * finally, unlock object and return.
    310    */
    311 
    312   simple_unlock(&uobj->vmobjlock);
    313   UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
    314   return(VM_PAGER_OK);
    315 }
    316 
    317 /*
    318  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    319  * KVM already allocated for text, data, bss, and static data structures).
    320  *
    321  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    322  *    we assume that [min -> start] has already been allocated and that
    323  *    "end" is the end.
    324  */
    325 
    326 void uvm_km_init(start, end)
    327 
    328 vm_offset_t start, end;
    329 
    330 {
    331   vm_offset_t base = VM_MIN_KERNEL_ADDRESS;
    332 
    333   /*
    334    * first, init kernel memory objects.
    335    */
    336 
    337   /* kernel_object: for pageable anonymous kernel memory */
    338   uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    339 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    340 
    341   /* kmem_object: for malloc'd memory (always wired) */
    342   simple_lock_init(&kmem_object_store.vmobjlock);
    343   kmem_object_store.pgops = &km_pager;
    344   TAILQ_INIT(&kmem_object_store.memq);
    345   kmem_object_store.uo_npages = 0;
    346   kmem_object_store.uo_refs = UVM_OBJ_KERN;
    347 					/* we are special.  we never die */
    348   uvmexp.kmem_object = &kmem_object_store;
    349 
    350   /* mb_object: for mbuf memory (always wired) */
    351   simple_lock_init(&mb_object_store.vmobjlock);
    352   mb_object_store.pgops = &km_pager;
    353   TAILQ_INIT(&mb_object_store.memq);
    354   mb_object_store.uo_npages = 0;
    355   mb_object_store.uo_refs = UVM_OBJ_KERN;
    356 					/* we are special.  we never die */
    357   uvmexp.mb_object = &mb_object_store;
    358 
    359   /*
    360    * init the map and reserve kernel space before installing.
    361    */
    362 
    363   uvm_map_setup(&kernel_map_store, base, end, FALSE);
    364   kernel_map_store.pmap = pmap_kernel();
    365   if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET,
    366 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    367 			  UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
    368     panic("uvm_km_init: could not reserve space for kernel");
    369 
    370   /*
    371    * install!
    372    */
    373 
    374   kernel_map = &kernel_map_store;
    375 }
    376 
    377 /*
    378  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    379  * is allocated all references to that area of VM must go through it.  this
    380  * allows the locking of VAs in kernel_map to be broken up into regions.
    381  *
    382  * => if submap is non NULL we use that as the submap, otherwise we
    383  *	alloc a new map
    384  */
    385 
    386 struct vm_map *uvm_km_suballoc(map, min, max, size, pageable, submap)
    387 
    388 struct vm_map *map;
    389 vm_offset_t *min, *max;		/* OUT, OUT */
    390 vm_size_t size;
    391 boolean_t pageable;
    392 struct vm_map *submap;
    393 
    394 {
    395   size = round_page(size);	/* round up to pagesize */
    396 
    397   /*
    398    * first allocate a blank spot in the parent map
    399    */
    400 
    401   if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
    402 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    403 			  UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
    404     panic("uvm_km_suballoc: unable to allocate space in parent map");
    405   }
    406 
    407   /*
    408    * set VM bounds (min is filled in by uvm_map)
    409    */
    410 
    411   *max = *min + size;
    412 
    413   /*
    414    * add references to pmap and create or init the submap
    415    */
    416 
    417   pmap_reference(vm_map_pmap(map));
    418   if (submap == NULL) {
    419     submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
    420     if (submap == NULL)
    421       panic("uvm_km_suballoc: unable to create submap");
    422   } else {
    423       uvm_map_setup(submap, *min, *max, pageable);
    424       submap->pmap = vm_map_pmap(map);
    425   }
    426 
    427   /*
    428    * now let uvm_map_submap plug in it...
    429    */
    430 
    431   if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
    432     panic("uvm_km_suballoc: submap allocation failed");
    433 
    434   return(submap);
    435 }
    436 
    437 /*
    438  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    439  *
    440  * => when you unmap a part of anonymous kernel memory you want to toss
    441  *    the pages right away.    (this gets called from uvm_unmap_...).
    442  */
    443 
    444 #define UKM_HASH_PENALTY 4      /* a guess */
    445 
    446 void uvm_km_pgremove(uobj, start, end)
    447 
    448 struct uvm_object *uobj;
    449 vm_offset_t start, end;
    450 
    451 {
    452   boolean_t by_list, is_aobj;
    453   struct vm_page *pp, *ppnext;
    454   vm_offset_t curoff;
    455   UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    456 
    457   simple_lock(&uobj->vmobjlock);		/* lock object */
    458 
    459   /* is uobj an aobj? */
    460   is_aobj = uobj->pgops == &aobj_pager;
    461 
    462   /* choose cheapest traversal */
    463   by_list = (uobj->uo_npages <=
    464 	     ((end - start) / PAGE_SIZE) * UKM_HASH_PENALTY);
    465 
    466   if (by_list)
    467     goto loop_by_list;
    468 
    469   /* by hash */
    470 
    471   for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
    472     pp = uvm_pagelookup(uobj, curoff);
    473     if (pp == NULL)
    474       continue;
    475 
    476     UVMHIST_LOG(maphist,"  page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
    477     /* now do the actual work */
    478     if (pp->flags & PG_BUSY)
    479       pp->flags |= PG_RELEASED;	/* owner must check for this when done */
    480     else {
    481       pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
    482 
    483       /*
    484        * if this kernel object is an aobj, free the swap slot.
    485        */
    486       if (is_aobj) {
    487 	int slot = uao_set_swslot(uobj, curoff / PAGE_SIZE, 0);
    488 
    489 	if (slot)
    490 	  uvm_swap_free(slot, 1);
    491       }
    492 
    493       uvm_lock_pageq();
    494       uvm_pagefree(pp);
    495       uvm_unlock_pageq();
    496     }
    497     /* done */
    498 
    499   }
    500   simple_unlock(&uobj->vmobjlock);
    501   return;
    502 
    503 loop_by_list:
    504 
    505   for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
    506 
    507     ppnext = pp->listq.tqe_next;
    508     if (pp->offset < start || pp->offset >= end) {
    509       continue;
    510     }
    511 
    512     UVMHIST_LOG(maphist,"  page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
    513     /* now do the actual work */
    514     if (pp->flags & PG_BUSY)
    515       pp->flags |= PG_RELEASED;	/* owner must check for this when done */
    516     else {
    517       pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
    518 
    519       /*
    520        * if this kernel object is an aobj, free the swap slot.
    521        */
    522       if (is_aobj) {
    523 	int slot = uao_set_swslot(uobj, pp->offset / PAGE_SIZE, 0);
    524 
    525 	if (slot)
    526 	  uvm_swap_free(slot, 1);
    527       }
    528 
    529       uvm_lock_pageq();
    530       uvm_pagefree(pp);
    531       uvm_unlock_pageq();
    532     }
    533     /* done */
    534 
    535   }
    536   simple_unlock(&uobj->vmobjlock);
    537   return;
    538 }
    539 
    540 
    541 /*
    542  * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
    543  *
    544  * => we map wired memory into the specified map using the obj passed in
    545  * => NOTE: we can return NULL even if we can wait if there is not enough
    546  *	free VM space in the map... caller should be prepared to handle
    547  *	this case.
    548  * => we return KVA of memory allocated
    549  * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
    550  *	lock the map
    551  */
    552 
    553 vm_offset_t uvm_km_kmemalloc(map, obj, size, flags)
    554 
    555 vm_map_t map;
    556 struct uvm_object *obj;
    557 vm_size_t size;
    558 int flags;
    559 
    560 {
    561   vm_offset_t kva, loopva;
    562   vm_offset_t offset;
    563   struct vm_page *pg;
    564   UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
    565 
    566 
    567   UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    568 	map, obj, size, flags);
    569 #ifdef DIAGNOSTIC
    570   /* sanity check */
    571   if (vm_map_pmap(map) != pmap_kernel())
    572     panic("uvm_km_kmemalloc: invalid map");
    573 #endif
    574 
    575   /*
    576    * setup for call
    577    */
    578 
    579   size = round_page(size);
    580   kva = vm_map_min(map);	/* hint */
    581 
    582   /*
    583    * allocate some virtual space
    584    */
    585 
    586   if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    587 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    588 			  UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
    589       != KERN_SUCCESS) {
    590     UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    591     return(0);
    592   }
    593 
    594   /*
    595    * if all we wanted was VA, return now
    596    */
    597 
    598   if (flags & UVM_KMF_VALLOC) {
    599     UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    600     return(kva);
    601   }
    602   /*
    603    * recover object offset from virtual address
    604    */
    605 
    606   offset = kva - vm_map_min(map);
    607   UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    608 
    609   /*
    610    * now allocate and map in the memory... note that we are the only ones
    611    * whom should ever get a handle on this area of VM.
    612    */
    613 
    614   loopva = kva;
    615   while (size) {
    616     simple_lock(&obj->vmobjlock);
    617     pg = uvm_pagealloc(obj, offset, NULL);
    618     if (pg) {
    619       pg->flags &= ~PG_BUSY;	/* new page */
    620       UVM_PAGE_OWN(pg, NULL);
    621 
    622       pg->wire_count = 1;
    623       uvmexp.wired++;
    624     }
    625     simple_unlock(&obj->vmobjlock);
    626 
    627     /*
    628      * out of memory?
    629      */
    630 
    631     if (pg == NULL) {
    632       if (flags & UVM_KMF_NOWAIT) {
    633 	uvm_unmap(map, kva, kva + size, 0); /* free everything! */
    634 	return(0);
    635       } else {
    636 	uvm_wait("km_getwait2");		/* sleep here */
    637 	continue;
    638       }
    639     }
    640 
    641     /*
    642      * map it in: note that we call pmap_enter with the map and object
    643      * unlocked in case we are kmem_map/kmem_object (because if pmap_enter
    644      * wants to allocate out of kmem_object it will need to lock it itself!)
    645      */
    646 #if defined(PMAP_NEW)
    647     pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
    648 #else
    649     pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
    650 #endif
    651     loopva += PAGE_SIZE;
    652     offset += PAGE_SIZE;
    653     size -= PAGE_SIZE;
    654   }
    655 
    656   UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    657   return(kva);
    658 }
    659 
    660 /*
    661  * uvm_km_free: free an area of kernel memory
    662  */
    663 
    664 void uvm_km_free(map, addr, size)
    665 
    666 vm_map_t map;
    667 vm_offset_t addr;
    668 vm_size_t size;
    669 
    670 {
    671   uvm_unmap(map, trunc_page(addr), round_page(addr+size), 1);
    672 }
    673 
    674 /*
    675  * uvm_km_free_wakeup: free an area of kernel memory and wake up
    676  * anyone waiting for vm space.
    677  *
    678  * => XXX: "wanted" bit + unlock&wait on other end?
    679  */
    680 
    681 void uvm_km_free_wakeup(map, addr, size)
    682 
    683 vm_map_t map;
    684 vm_offset_t addr;
    685 vm_size_t size;
    686 
    687 {
    688   vm_map_entry_t dead_entries;
    689 
    690   vm_map_lock(map);
    691   (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 1,
    692 			 &dead_entries);
    693   thread_wakeup(map);
    694   vm_map_unlock(map);
    695 
    696   if (dead_entries != NULL)
    697     uvm_unmap_detach(dead_entries, 0);
    698 }
    699 
    700 /*
    701  * uvm_km_alloc1: allocate wired down memory in the kernel map.
    702  *
    703  * => we can sleep if needed
    704  */
    705 
    706 vm_offset_t uvm_km_alloc1(map, size, zeroit)
    707 
    708 vm_map_t map;
    709 vm_size_t size;
    710 boolean_t zeroit;
    711 
    712 {
    713   vm_offset_t kva, loopva, offset;
    714   struct vm_page *pg;
    715   UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
    716 
    717   UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
    718 
    719 #ifdef DIAGNOSTIC
    720   if (vm_map_pmap(map) != pmap_kernel())
    721     panic("uvm_km_alloc1");
    722 #endif
    723 
    724   size = round_page(size);
    725   kva = vm_map_min(map);		/* hint */
    726 
    727   /*
    728    * allocate some virtual space
    729    */
    730 
    731   if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    732 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    733 			  UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
    734     UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
    735     return(0);
    736   }
    737 
    738   /*
    739    * recover object offset from virtual address
    740    */
    741 
    742   offset = kva - vm_map_min(map);
    743   UVMHIST_LOG(maphist,"  kva=0x%x, offset=0x%x", kva, offset,0,0);
    744 
    745   /*
    746    * now allocate the memory.  we must be careful about released pages.
    747    */
    748 
    749   loopva = kva;
    750   while (size) {
    751     simple_lock(&uvm.kernel_object->vmobjlock);
    752     pg = uvm_pagelookup(uvm.kernel_object, offset);
    753 
    754     /* if we found a page in an unallocated region, it must be released */
    755     if (pg) {
    756       if ((pg->flags & PG_RELEASED) == 0)
    757 	panic("uvm_km_alloc1: non-released page");
    758       pg->flags |= PG_WANTED;
    759       UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,0,"km_alloc",0);
    760       continue;   /* retry */
    761     }
    762 
    763     /* allocate ram */
    764     pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
    765     if (pg) {
    766       pg->flags &= ~PG_BUSY;	/* new page */
    767       UVM_PAGE_OWN(pg, NULL);
    768     }
    769     simple_unlock(&uvm.kernel_object->vmobjlock);
    770     if (pg == NULL) {
    771       uvm_wait("km_alloc1w");	/* wait for memory */
    772       continue;
    773     }
    774 
    775     /* map it in */
    776 #if defined(PMAP_NEW)
    777     pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
    778 #else
    779     pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
    780 #endif
    781     loopva += PAGE_SIZE;
    782     offset += PAGE_SIZE;
    783     size -= PAGE_SIZE;
    784   }
    785 
    786   /*
    787    * zero on request (note that "size" is now zero due to the above loop
    788    * so we need to subtract kva from loopva to reconstruct the size).
    789    */
    790 
    791   if (zeroit)
    792     bzero((caddr_t)kva, loopva - kva);
    793 
    794   UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    795   return(kva);
    796 }
    797 
    798 /*
    799  * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
    800  *
    801  * => memory is not allocated until fault time
    802  */
    803 
    804 vm_offset_t uvm_km_valloc(map, size)
    805 
    806 vm_map_t map;
    807 vm_size_t size;
    808 
    809 {
    810   vm_offset_t kva;
    811   UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
    812 
    813   UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
    814 
    815 #ifdef DIAGNOSTIC
    816   if (vm_map_pmap(map) != pmap_kernel())
    817     panic("uvm_km_valloc");
    818 #endif
    819 
    820   size = round_page(size);
    821   kva = vm_map_min(map);		/* hint */
    822 
    823   /*
    824    * allocate some virtual space.   will be demand filled by kernel_object.
    825    */
    826 
    827   if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    828 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    829 			  UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
    830     UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
    831     return(0);
    832   }
    833 
    834   UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
    835   return(kva);
    836 }
    837 
    838 /*
    839  * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
    840  *
    841  * => memory is not allocated until fault time
    842  * => if no room in map, wait for space to free, unless requested size
    843  *    is larger than map (in which case we return 0)
    844  */
    845 
    846 vm_offset_t uvm_km_valloc_wait(map, size)
    847 
    848 vm_map_t map;
    849 vm_size_t size;
    850 
    851 {
    852   vm_offset_t kva;
    853   UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
    854 
    855   UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
    856 
    857 #ifdef DIAGNOSTIC
    858   if (vm_map_pmap(map) != pmap_kernel())
    859     panic("uvm_km_valloc_wait");
    860 #endif
    861 
    862   size = round_page(size);
    863   if (size > vm_map_max(map) - vm_map_min(map))
    864     return(0);
    865 
    866   while (1) {
    867     kva = vm_map_min(map);		/* hint */
    868 
    869     /*
    870      * allocate some virtual space.   will be demand filled by kernel_object.
    871      */
    872 
    873     if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    874 		UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    875 			    UVM_ADV_RANDOM, 0)) == KERN_SUCCESS){
    876       UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    877       return(kva);
    878     }
    879 
    880     /*
    881      * failed.  sleep for a while (on map)
    882      */
    883 
    884     UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
    885     tsleep((caddr_t)map, PVM, "vallocwait", 0);
    886   }
    887   /*NOTREACHED*/
    888 }
    889