Home | History | Annotate | Line # | Download | only in uvm
uvm_km.c revision 1.6
      1 /*	$NetBSD: uvm_km.c,v 1.6 1998/02/10 14:12:14 mrg Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  *
     11  * All rights reserved.
     12  *
     13  * This code is derived from software contributed to Berkeley by
     14  * The Mach Operating System project at Carnegie-Mellon University.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by Charles D. Cranor,
     27  *      Washington University, the University of California, Berkeley and
     28  *      its contributors.
     29  * 4. Neither the name of the University nor the names of its contributors
     30  *    may be used to endorse or promote products derived from this software
     31  *    without specific prior written permission.
     32  *
     33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  * SUCH DAMAGE.
     44  *
     45  *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
     46  * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
     47  *
     48  *
     49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     50  * All rights reserved.
     51  *
     52  * Permission to use, copy, modify and distribute this software and
     53  * its documentation is hereby granted, provided that both the copyright
     54  * notice and this permission notice appear in all copies of the
     55  * software, derivative works or modified versions, and any portions
     56  * thereof, and that both notices appear in supporting documentation.
     57  *
     58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     61  *
     62  * Carnegie Mellon requests users of this software to return to
     63  *
     64  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     65  *  School of Computer Science
     66  *  Carnegie Mellon University
     67  *  Pittsburgh PA 15213-3890
     68  *
     69  * any improvements or extensions that they make and grant Carnegie the
     70  * rights to redistribute these changes.
     71  */
     72 
     73 #include "opt_uvmhist.h"
     74 #include "opt_pmap_new.h"
     75 
     76 /*
     77  * uvm_km.c: handle kernel memory allocation and management
     78  */
     79 
     80 #include <sys/param.h>
     81 #include <sys/systm.h>
     82 #include <sys/proc.h>
     83 
     84 #include <vm/vm.h>
     85 #include <vm/vm_page.h>
     86 #include <vm/vm_kern.h>
     87 
     88 #include <uvm/uvm.h>
     89 
     90 /*
     91  * global data structures
     92  */
     93 
     94 vm_map_t kernel_map = NULL;
     95 
     96 /*
     97  * local functions
     98  */
     99 
    100 static int uvm_km_get __P((struct uvm_object *, vm_offset_t,
    101                            vm_page_t *, int *, int, vm_prot_t, int, int));
    102 /*
    103  * local data structues
    104  */
    105 
    106 static struct vm_map		kernel_map_store;
    107 static struct uvm_object	kmem_object_store;
    108 static struct uvm_object	mb_object_store;
    109 
    110 static struct uvm_pagerops km_pager = {
    111   NULL,	/* init */
    112   NULL, /* attach */
    113   NULL, /* reference */
    114   NULL, /* detach */
    115   NULL, /* fault */
    116   NULL, /* flush */
    117   uvm_km_get, /* get */
    118   /* ... rest are NULL */
    119 };
    120 
    121 /*
    122  * uvm_km_get: pager get function for kernel objects
    123  *
    124  * => currently we do not support pageout to the swap area, so this
    125  *    pager is very simple.    eventually we may want an anonymous
    126  *    object pager which will do paging.
    127  */
    128 
    129 
    130 static int uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type,
    131         	      advice, flags)
    132 
    133 struct uvm_object *uobj;
    134 vm_offset_t offset;
    135 struct vm_page **pps;
    136 int *npagesp;
    137 int centeridx, advice, flags;
    138 vm_prot_t access_type;
    139 
    140 {
    141   vm_offset_t current_offset;
    142   vm_page_t ptmp;
    143   int lcv, gotpages, maxpages;
    144   boolean_t done;
    145   UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
    146 
    147   UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
    148 
    149   /*
    150    * get number of pages
    151    */
    152 
    153   maxpages = *npagesp;
    154 
    155   /*
    156    * step 1: handled the case where fault data structures are locked.
    157    */
    158 
    159   if (flags & PGO_LOCKED) {
    160 
    161     /*
    162      * step 1a: get pages that are already resident.   only do this
    163      * if the data structures are locked (i.e. the first time through).
    164      */
    165 
    166     done = TRUE;	/* be optimistic */
    167     gotpages = 0;	/* # of pages we got so far */
    168 
    169     for (lcv = 0, current_offset = offset ;
    170 	 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
    171 
    172       /* do we care about this page?  if not, skip it */
    173       if (pps[lcv] == PGO_DONTCARE)
    174 	continue;
    175 
    176       /* lookup page */
    177       ptmp = uvm_pagelookup(uobj, current_offset);
    178 
    179       /* null?  attempt to allocate the page */
    180       if (ptmp == NULL) {
    181 	ptmp = uvm_pagealloc(uobj, current_offset, NULL);
    182 	if (ptmp) {
    183 	  ptmp->flags &= ~(PG_BUSY|PG_FAKE);	/* new page */
    184           UVM_PAGE_OWN(ptmp, NULL);
    185 	  ptmp->wire_count = 1;		/* XXX: prevents pageout attempts */
    186 	  uvm_pagezero(ptmp);
    187 	}
    188       }
    189 
    190       /* to be useful must get a non-busy, non-released page */
    191       if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    192 	if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0)
    193 	  done = FALSE;		/* need to do a wait or I/O! */
    194 	continue;
    195       }
    196 
    197       /* useful page: busy/lock it and plug it in our result array */
    198       ptmp->flags |= PG_BUSY;		/* caller must un-busy this page */
    199       UVM_PAGE_OWN(ptmp, "uvm_km_get1");
    200       pps[lcv] = ptmp;
    201       gotpages++;
    202 
    203     }	/* "for" lcv loop */
    204 
    205     /*
    206      * step 1b: now we've either done everything needed or we to unlock
    207      * and do some waiting or I/O.
    208      */
    209 
    210     UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
    211 
    212     *npagesp = gotpages;
    213     if (done)
    214       return(VM_PAGER_OK);		/* bingo! */
    215     else
    216       return(VM_PAGER_UNLOCK);		/* EEK!   Need to unlock and I/O */
    217   }
    218 
    219   /*
    220    * step 2: get non-resident or busy pages.
    221    * object is locked.   data structures are unlocked.
    222    */
    223 
    224   for (lcv = 0, current_offset = offset ;
    225        lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
    226 
    227     /* skip over pages we've already gotten or don't want */
    228     /* skip over pages we don't _have_ to get */
    229     if (pps[lcv] != NULL ||
    230 	(lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
    231       continue;
    232 
    233     /*
    234      * we have yet to locate the current page (pps[lcv]).   we first
    235      * look for a page that is already at the current offset.   if we
    236      * find a page, we check to see if it is busy or released.  if that
    237      * is the case, then we sleep on the page until it is no longer busy
    238      * or released and repeat the lookup.    if the page we found is
    239      * neither busy nor released, then we busy it (so we own it) and
    240      * plug it into pps[lcv].   this 'break's the following while loop
    241      * and indicates we are ready to move on to the next page in the
    242      * "lcv" loop above.
    243      *
    244      * if we exit the while loop with pps[lcv] still set to NULL, then
    245      * it means that we allocated a new busy/fake/clean page ptmp in the
    246      * object and we need to do I/O to fill in the data.
    247      */
    248 
    249     while (pps[lcv] == NULL) {		/* top of "pps" while loop */
    250 
    251       /* look for a current page */
    252       ptmp = uvm_pagelookup(uobj, current_offset);
    253 
    254       /* nope?   allocate one now (if we can) */
    255       if (ptmp == NULL) {
    256 
    257 	ptmp = uvm_pagealloc(uobj, current_offset, NULL);	/* alloc */
    258 
    259 	/* out of RAM? */
    260 	if (ptmp == NULL) {
    261 	  simple_unlock(&uobj->vmobjlock);
    262 	  uvm_wait("kmgetwait1");
    263 	  simple_lock(&uobj->vmobjlock);
    264 	  continue;		/* goto top of pps while loop */
    265 	}
    266 
    267 	/*
    268 	 * got new page ready for I/O.  break pps while loop.  pps[lcv] is
    269 	 * still NULL.
    270 	 */
    271 	break;
    272       }
    273 
    274       /* page is there, see if we need to wait on it */
    275       if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    276 	ptmp->flags |= PG_WANTED;
    277 	UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0);
    278 	simple_lock(&uobj->vmobjlock);
    279 	continue;		/* goto top of pps while loop */
    280       }
    281 
    282       /*
    283        * if we get here then the page has become resident and unbusy
    284        * between steps 1 and 2.  we busy it now (so we own it) and set
    285        * pps[lcv] (so that we exit the while loop).
    286        */
    287       ptmp->flags |= PG_BUSY;	/* we own it, caller must un-busy */
    288       UVM_PAGE_OWN(ptmp, "uvm_km_get2");
    289       pps[lcv] = ptmp;
    290     }
    291 
    292     /*
    293      * if we own the a valid page at the correct offset, pps[lcv] will
    294      * point to it.   nothing more to do except go to the next page.
    295      */
    296 
    297     if (pps[lcv])
    298       continue;			/* next lcv */
    299 
    300     /*
    301      * we have a "fake/busy/clean" page that we just allocated.
    302      * do the needed "i/o" (in this case that means zero it).
    303      */
    304 
    305     uvm_pagezero(ptmp);
    306     ptmp->flags &= ~(PG_FAKE);
    307     ptmp->wire_count = 1;		/* XXX: prevents pageout attempts */
    308     pps[lcv] = ptmp;
    309 
    310   }	/* lcv loop */
    311 
    312   /*
    313    * finally, unlock object and return.
    314    */
    315 
    316   simple_unlock(&uobj->vmobjlock);
    317   UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
    318   return(VM_PAGER_OK);
    319 }
    320 
    321 /*
    322  * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
    323  * KVM already allocated for text, data, bss, and static data structures).
    324  *
    325  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
    326  *    we assume that [min -> start] has already been allocated and that
    327  *    "end" is the end.
    328  */
    329 
    330 void uvm_km_init(start, end)
    331 
    332 vm_offset_t start, end;
    333 
    334 {
    335   vm_offset_t base = VM_MIN_KERNEL_ADDRESS;
    336 
    337   /*
    338    * first, init kernel memory objects.
    339    */
    340 
    341   /* kernel_object: for pageable anonymous kernel memory */
    342   uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
    343 				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
    344 
    345   /* kmem_object: for malloc'd memory (always wired) */
    346   simple_lock_init(&kmem_object_store.vmobjlock);
    347   kmem_object_store.pgops = &km_pager;
    348   TAILQ_INIT(&kmem_object_store.memq);
    349   kmem_object_store.uo_npages = 0;
    350   kmem_object_store.uo_refs = UVM_OBJ_KERN;
    351 					/* we are special.  we never die */
    352   uvmexp.kmem_object = &kmem_object_store;
    353 
    354   /* mb_object: for mbuf memory (always wired) */
    355   simple_lock_init(&mb_object_store.vmobjlock);
    356   mb_object_store.pgops = &km_pager;
    357   TAILQ_INIT(&mb_object_store.memq);
    358   mb_object_store.uo_npages = 0;
    359   mb_object_store.uo_refs = UVM_OBJ_KERN;
    360 					/* we are special.  we never die */
    361   uvmexp.mb_object = &mb_object_store;
    362 
    363   /*
    364    * init the map and reserve kernel space before installing.
    365    */
    366 
    367   uvm_map_setup(&kernel_map_store, base, end, FALSE);
    368   kernel_map_store.pmap = pmap_kernel();
    369   if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET,
    370 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    371 			  UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
    372     panic("uvm_km_init: could not reserve space for kernel");
    373 
    374   /*
    375    * install!
    376    */
    377 
    378   kernel_map = &kernel_map_store;
    379 }
    380 
    381 /*
    382  * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
    383  * is allocated all references to that area of VM must go through it.  this
    384  * allows the locking of VAs in kernel_map to be broken up into regions.
    385  *
    386  * => if `fixed' is true, *min specifies where the region described
    387  *      by the submap must start
    388  * => if submap is non NULL we use that as the submap, otherwise we
    389  *	alloc a new map
    390  */
    391 
    392 struct vm_map *uvm_km_suballoc(map, min, max, size, pageable, fixed, submap)
    393 
    394 struct vm_map *map;
    395 vm_offset_t *min, *max;		/* OUT, OUT */
    396 vm_size_t size;
    397 boolean_t pageable;
    398 boolean_t fixed;
    399 struct vm_map *submap;
    400 
    401 {
    402   int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
    403 
    404   size = round_page(size);	/* round up to pagesize */
    405 
    406   /*
    407    * first allocate a blank spot in the parent map
    408    */
    409 
    410   if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
    411 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    412 			  UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) {
    413     panic("uvm_km_suballoc: unable to allocate space in parent map");
    414   }
    415 
    416   /*
    417    * set VM bounds (min is filled in by uvm_map)
    418    */
    419 
    420   *max = *min + size;
    421 
    422   /*
    423    * add references to pmap and create or init the submap
    424    */
    425 
    426   pmap_reference(vm_map_pmap(map));
    427   if (submap == NULL) {
    428     submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
    429     if (submap == NULL)
    430       panic("uvm_km_suballoc: unable to create submap");
    431   } else {
    432       uvm_map_setup(submap, *min, *max, pageable);
    433       submap->pmap = vm_map_pmap(map);
    434   }
    435 
    436   /*
    437    * now let uvm_map_submap plug in it...
    438    */
    439 
    440   if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
    441     panic("uvm_km_suballoc: submap allocation failed");
    442 
    443   return(submap);
    444 }
    445 
    446 /*
    447  * uvm_km_pgremove: remove pages from a kernel uvm_object.
    448  *
    449  * => when you unmap a part of anonymous kernel memory you want to toss
    450  *    the pages right away.    (this gets called from uvm_unmap_...).
    451  */
    452 
    453 #define UKM_HASH_PENALTY 4      /* a guess */
    454 
    455 void uvm_km_pgremove(uobj, start, end)
    456 
    457 struct uvm_object *uobj;
    458 vm_offset_t start, end;
    459 
    460 {
    461   boolean_t by_list, is_aobj;
    462   struct vm_page *pp, *ppnext;
    463   vm_offset_t curoff;
    464   UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
    465 
    466   simple_lock(&uobj->vmobjlock);		/* lock object */
    467 
    468   /* is uobj an aobj? */
    469   is_aobj = uobj->pgops == &aobj_pager;
    470 
    471   /* choose cheapest traversal */
    472   by_list = (uobj->uo_npages <=
    473 	     ((end - start) / PAGE_SIZE) * UKM_HASH_PENALTY);
    474 
    475   if (by_list)
    476     goto loop_by_list;
    477 
    478   /* by hash */
    479 
    480   for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
    481     pp = uvm_pagelookup(uobj, curoff);
    482     if (pp == NULL)
    483       continue;
    484 
    485     UVMHIST_LOG(maphist,"  page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
    486     /* now do the actual work */
    487     if (pp->flags & PG_BUSY)
    488       pp->flags |= PG_RELEASED;	/* owner must check for this when done */
    489     else {
    490       pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
    491 
    492       /*
    493        * if this kernel object is an aobj, free the swap slot.
    494        */
    495       if (is_aobj) {
    496 	int slot = uao_set_swslot(uobj, curoff / PAGE_SIZE, 0);
    497 
    498 	if (slot)
    499 	  uvm_swap_free(slot, 1);
    500       }
    501 
    502       uvm_lock_pageq();
    503       uvm_pagefree(pp);
    504       uvm_unlock_pageq();
    505     }
    506     /* done */
    507 
    508   }
    509   simple_unlock(&uobj->vmobjlock);
    510   return;
    511 
    512 loop_by_list:
    513 
    514   for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
    515 
    516     ppnext = pp->listq.tqe_next;
    517     if (pp->offset < start || pp->offset >= end) {
    518       continue;
    519     }
    520 
    521     UVMHIST_LOG(maphist,"  page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
    522     /* now do the actual work */
    523     if (pp->flags & PG_BUSY)
    524       pp->flags |= PG_RELEASED;	/* owner must check for this when done */
    525     else {
    526       pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
    527 
    528       /*
    529        * if this kernel object is an aobj, free the swap slot.
    530        */
    531       if (is_aobj) {
    532 	int slot = uao_set_swslot(uobj, pp->offset / PAGE_SIZE, 0);
    533 
    534 	if (slot)
    535 	  uvm_swap_free(slot, 1);
    536       }
    537 
    538       uvm_lock_pageq();
    539       uvm_pagefree(pp);
    540       uvm_unlock_pageq();
    541     }
    542     /* done */
    543 
    544   }
    545   simple_unlock(&uobj->vmobjlock);
    546   return;
    547 }
    548 
    549 
    550 /*
    551  * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
    552  *
    553  * => we map wired memory into the specified map using the obj passed in
    554  * => NOTE: we can return NULL even if we can wait if there is not enough
    555  *	free VM space in the map... caller should be prepared to handle
    556  *	this case.
    557  * => we return KVA of memory allocated
    558  * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
    559  *	lock the map
    560  */
    561 
    562 vm_offset_t uvm_km_kmemalloc(map, obj, size, flags)
    563 
    564 vm_map_t map;
    565 struct uvm_object *obj;
    566 vm_size_t size;
    567 int flags;
    568 
    569 {
    570   vm_offset_t kva, loopva;
    571   vm_offset_t offset;
    572   struct vm_page *pg;
    573   UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
    574 
    575 
    576   UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
    577 	map, obj, size, flags);
    578 #ifdef DIAGNOSTIC
    579   /* sanity check */
    580   if (vm_map_pmap(map) != pmap_kernel())
    581     panic("uvm_km_kmemalloc: invalid map");
    582 #endif
    583 
    584   /*
    585    * setup for call
    586    */
    587 
    588   size = round_page(size);
    589   kva = vm_map_min(map);	/* hint */
    590 
    591   /*
    592    * allocate some virtual space
    593    */
    594 
    595   if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
    596 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    597 			  UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
    598       != KERN_SUCCESS) {
    599     UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
    600     return(0);
    601   }
    602 
    603   /*
    604    * if all we wanted was VA, return now
    605    */
    606 
    607   if (flags & UVM_KMF_VALLOC) {
    608     UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
    609     return(kva);
    610   }
    611   /*
    612    * recover object offset from virtual address
    613    */
    614 
    615   offset = kva - vm_map_min(map);
    616   UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
    617 
    618   /*
    619    * now allocate and map in the memory... note that we are the only ones
    620    * whom should ever get a handle on this area of VM.
    621    */
    622 
    623   loopva = kva;
    624   while (size) {
    625     simple_lock(&obj->vmobjlock);
    626     pg = uvm_pagealloc(obj, offset, NULL);
    627     if (pg) {
    628       pg->flags &= ~PG_BUSY;	/* new page */
    629       UVM_PAGE_OWN(pg, NULL);
    630 
    631       pg->wire_count = 1;
    632       uvmexp.wired++;
    633     }
    634     simple_unlock(&obj->vmobjlock);
    635 
    636     /*
    637      * out of memory?
    638      */
    639 
    640     if (pg == NULL) {
    641       if (flags & UVM_KMF_NOWAIT) {
    642 	uvm_unmap(map, kva, kva + size, 0); /* free everything! */
    643 	return(0);
    644       } else {
    645 	uvm_wait("km_getwait2");		/* sleep here */
    646 	continue;
    647       }
    648     }
    649 
    650     /*
    651      * map it in: note that we call pmap_enter with the map and object
    652      * unlocked in case we are kmem_map/kmem_object (because if pmap_enter
    653      * wants to allocate out of kmem_object it will need to lock it itself!)
    654      */
    655 #if defined(PMAP_NEW)
    656     pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
    657 #else
    658     pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
    659 #endif
    660     loopva += PAGE_SIZE;
    661     offset += PAGE_SIZE;
    662     size -= PAGE_SIZE;
    663   }
    664 
    665   UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    666   return(kva);
    667 }
    668 
    669 /*
    670  * uvm_km_free: free an area of kernel memory
    671  */
    672 
    673 void uvm_km_free(map, addr, size)
    674 
    675 vm_map_t map;
    676 vm_offset_t addr;
    677 vm_size_t size;
    678 
    679 {
    680   uvm_unmap(map, trunc_page(addr), round_page(addr+size), 1);
    681 }
    682 
    683 /*
    684  * uvm_km_free_wakeup: free an area of kernel memory and wake up
    685  * anyone waiting for vm space.
    686  *
    687  * => XXX: "wanted" bit + unlock&wait on other end?
    688  */
    689 
    690 void uvm_km_free_wakeup(map, addr, size)
    691 
    692 vm_map_t map;
    693 vm_offset_t addr;
    694 vm_size_t size;
    695 
    696 {
    697   vm_map_entry_t dead_entries;
    698 
    699   vm_map_lock(map);
    700   (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 1,
    701 			 &dead_entries);
    702   thread_wakeup(map);
    703   vm_map_unlock(map);
    704 
    705   if (dead_entries != NULL)
    706     uvm_unmap_detach(dead_entries, 0);
    707 }
    708 
    709 /*
    710  * uvm_km_alloc1: allocate wired down memory in the kernel map.
    711  *
    712  * => we can sleep if needed
    713  */
    714 
    715 vm_offset_t uvm_km_alloc1(map, size, zeroit)
    716 
    717 vm_map_t map;
    718 vm_size_t size;
    719 boolean_t zeroit;
    720 
    721 {
    722   vm_offset_t kva, loopva, offset;
    723   struct vm_page *pg;
    724   UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
    725 
    726   UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
    727 
    728 #ifdef DIAGNOSTIC
    729   if (vm_map_pmap(map) != pmap_kernel())
    730     panic("uvm_km_alloc1");
    731 #endif
    732 
    733   size = round_page(size);
    734   kva = vm_map_min(map);		/* hint */
    735 
    736   /*
    737    * allocate some virtual space
    738    */
    739 
    740   if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    741 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    742 			  UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
    743     UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
    744     return(0);
    745   }
    746 
    747   /*
    748    * recover object offset from virtual address
    749    */
    750 
    751   offset = kva - vm_map_min(map);
    752   UVMHIST_LOG(maphist,"  kva=0x%x, offset=0x%x", kva, offset,0,0);
    753 
    754   /*
    755    * now allocate the memory.  we must be careful about released pages.
    756    */
    757 
    758   loopva = kva;
    759   while (size) {
    760     simple_lock(&uvm.kernel_object->vmobjlock);
    761     pg = uvm_pagelookup(uvm.kernel_object, offset);
    762 
    763     /* if we found a page in an unallocated region, it must be released */
    764     if (pg) {
    765       if ((pg->flags & PG_RELEASED) == 0)
    766 	panic("uvm_km_alloc1: non-released page");
    767       pg->flags |= PG_WANTED;
    768       UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,0,"km_alloc",0);
    769       continue;   /* retry */
    770     }
    771 
    772     /* allocate ram */
    773     pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
    774     if (pg) {
    775       pg->flags &= ~PG_BUSY;	/* new page */
    776       UVM_PAGE_OWN(pg, NULL);
    777     }
    778     simple_unlock(&uvm.kernel_object->vmobjlock);
    779     if (pg == NULL) {
    780       uvm_wait("km_alloc1w");	/* wait for memory */
    781       continue;
    782     }
    783 
    784     /* map it in */
    785 #if defined(PMAP_NEW)
    786     pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
    787 #else
    788     pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
    789 #endif
    790     loopva += PAGE_SIZE;
    791     offset += PAGE_SIZE;
    792     size -= PAGE_SIZE;
    793   }
    794 
    795   /*
    796    * zero on request (note that "size" is now zero due to the above loop
    797    * so we need to subtract kva from loopva to reconstruct the size).
    798    */
    799 
    800   if (zeroit)
    801     bzero((caddr_t)kva, loopva - kva);
    802 
    803   UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    804   return(kva);
    805 }
    806 
    807 /*
    808  * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
    809  *
    810  * => memory is not allocated until fault time
    811  */
    812 
    813 vm_offset_t uvm_km_valloc(map, size)
    814 
    815 vm_map_t map;
    816 vm_size_t size;
    817 
    818 {
    819   vm_offset_t kva;
    820   UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
    821 
    822   UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
    823 
    824 #ifdef DIAGNOSTIC
    825   if (vm_map_pmap(map) != pmap_kernel())
    826     panic("uvm_km_valloc");
    827 #endif
    828 
    829   size = round_page(size);
    830   kva = vm_map_min(map);		/* hint */
    831 
    832   /*
    833    * allocate some virtual space.   will be demand filled by kernel_object.
    834    */
    835 
    836   if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    837 	      UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    838 			  UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
    839     UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
    840     return(0);
    841   }
    842 
    843   UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
    844   return(kva);
    845 }
    846 
    847 /*
    848  * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
    849  *
    850  * => memory is not allocated until fault time
    851  * => if no room in map, wait for space to free, unless requested size
    852  *    is larger than map (in which case we return 0)
    853  */
    854 
    855 vm_offset_t uvm_km_valloc_wait(map, size)
    856 
    857 vm_map_t map;
    858 vm_size_t size;
    859 
    860 {
    861   vm_offset_t kva;
    862   UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
    863 
    864   UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
    865 
    866 #ifdef DIAGNOSTIC
    867   if (vm_map_pmap(map) != pmap_kernel())
    868     panic("uvm_km_valloc_wait");
    869 #endif
    870 
    871   size = round_page(size);
    872   if (size > vm_map_max(map) - vm_map_min(map))
    873     return(0);
    874 
    875   while (1) {
    876     kva = vm_map_min(map);		/* hint */
    877 
    878     /*
    879      * allocate some virtual space.   will be demand filled by kernel_object.
    880      */
    881 
    882     if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
    883 		UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    884 			    UVM_ADV_RANDOM, 0)) == KERN_SUCCESS){
    885       UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
    886       return(kva);
    887     }
    888 
    889     /*
    890      * failed.  sleep for a while (on map)
    891      */
    892 
    893     UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
    894     tsleep((caddr_t)map, PVM, "vallocwait", 0);
    895   }
    896   /*NOTREACHED*/
    897 }
    898