Home | History | Annotate | Line # | Download | only in uvm
uvm_device.c revision 1.1
      1 /*	$Id: uvm_device.c,v 1.1 1998/02/05 06:25:10 mrg Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  *
      9  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     10  * All rights reserved.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *      This product includes software developed by Charles D. Cranor and
     23  *      Washington University.
     24  * 4. The name of the author may not be used to endorse or promote products
     25  *    derived from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     28  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     29  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     31  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     32  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     33  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     34  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     35  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     36  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * uvm_device.c: the device pager.
     41  */
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/conf.h>
     46 #include <sys/mount.h>
     47 #include <sys/proc.h>
     48 #include <sys/malloc.h>
     49 #include <sys/vnode.h>
     50 
     51 #include <vm/vm.h>
     52 #include <vm/vm_page.h>
     53 #include <vm/vm_kern.h>
     54 
     55 #include <sys/syscallargs.h>
     56 
     57 #include <uvm/uvm.h>
     58 #include <uvm/uvm_device.h>
     59 
     60 UVMHIST_DECL(maphist);
     61 
     62 /*
     63  * private global data structure
     64  *
     65  * we keep a list of active device objects in the system.
     66  */
     67 
     68 LIST_HEAD(udv_list_struct, uvm_device);
     69 static struct udv_list_struct udv_list;
     70 #if NCPU > 1
     71 static simple_lock_data_t udv_lock;
     72 #endif
     73 
     74 /*
     75  * functions
     76  */
     77 
     78 static void		udv_init __P((void));
     79 struct uvm_object 	*udv_attach __P((void *, vm_prot_t));
     80 static void             udv_reference __P((struct uvm_object *));
     81 static void             udv_detach __P((struct uvm_object *));
     82 static int		udv_fault __P((struct uvm_faultinfo *, vm_offset_t,
     83 				       vm_page_t *, int, int, vm_fault_t,
     84 				       vm_prot_t, int));
     85 static boolean_t        udv_flush __P((struct uvm_object *, vm_offset_t,
     86 					 vm_offset_t, int));
     87 static int		udv_asyncget __P((struct uvm_object *, vm_offset_t,
     88 					    int));
     89 static int		udv_put __P((struct uvm_object *, vm_page_t *,
     90 					int, boolean_t));
     91 
     92 /*
     93  * master pager structure
     94  */
     95 
     96 struct uvm_pagerops uvm_deviceops = {
     97   udv_init,
     98   udv_attach,
     99   udv_reference,
    100   udv_detach,
    101   udv_fault,
    102   udv_flush,
    103   NULL,			/* no get function since we have udv_fault */
    104   udv_asyncget,
    105   udv_put,
    106   NULL,			/* no cluster function */
    107   NULL,			/* no put cluster function */
    108   NULL,			/* no share protect.   no share maps for us */
    109   NULL,			/* no AIO-DONE function since no async i/o */
    110   NULL,			/* no releasepg function since no normal pages */
    111 };
    112 
    113 /*
    114  * the ops!
    115  */
    116 
    117 /*
    118  * udv_init
    119  *
    120  * init pager private data structures.
    121  */
    122 
    123 void udv_init()
    124 
    125 {
    126   LIST_INIT(&udv_list);
    127   simple_lock_init(&udv_lock);
    128 }
    129 
    130 /*
    131  * udv_attach
    132  *
    133  * get a VM object that is associated with a device.   allocate a new
    134  * one if needed.
    135  *
    136  * => caller must _not_ already be holding the lock on the uvm_object.
    137  * => in fact, nothing should be locked so that we can sleep here.
    138  */
    139 
    140 struct uvm_object *udv_attach(arg, accessprot)
    141 
    142 void *arg;
    143 vm_prot_t accessprot;
    144 
    145 {
    146   dev_t device = *((dev_t *) arg);
    147   struct uvm_device *udv, *lcv;
    148   int (*mapfn) __P((dev_t, int, int));
    149   UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
    150 
    151   UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
    152 
    153   /*
    154    * before we do anything, ensure this device supports mmap
    155    */
    156 
    157   mapfn = cdevsw[major(device)].d_mmap;
    158   if (mapfn == NULL ||
    159       mapfn == (int (*) __P((dev_t, int, int))) enodev ||
    160       mapfn == (int (*) __P((dev_t, int, int))) nullop)
    161     return(NULL);
    162 
    163   /*
    164    * keep looping until we get it
    165    */
    166 
    167   while (1) {
    168 
    169     /*
    170      * first, attempt to find it on the main list
    171      */
    172 
    173     simple_lock(&udv_lock);
    174     for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
    175       if (device == lcv->u_device)
    176 	break;
    177     }
    178 
    179     /*
    180      * got it on main list.  put a hold on it and unlock udv_lock.
    181      */
    182 
    183     if (lcv) {
    184 
    185       /*
    186        * if someone else has a hold on it, sleep and start over again.
    187        */
    188 
    189       if (lcv->u_flags & UVM_DEVICE_HOLD) {
    190 	lcv->u_flags |= UVM_DEVICE_WANTED;
    191 	UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, FALSE, "udv_attach",0);
    192 	continue;
    193       }
    194 
    195       lcv->u_flags |= UVM_DEVICE_HOLD;	/* we are now holding it */
    196       simple_unlock(&udv_lock);
    197 
    198       /*
    199        * bump reference count, unhold, return.
    200        */
    201 
    202       simple_lock(&lcv->u_obj.vmobjlock);
    203       lcv->u_obj.uo_refs++;
    204       simple_unlock(&lcv->u_obj.vmobjlock);
    205 
    206       simple_lock(&udv_lock);
    207       if (lcv->u_flags & UVM_DEVICE_WANTED)
    208 	wakeup(lcv);
    209       lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
    210       simple_unlock(&udv_lock);
    211       return(&lcv->u_obj);
    212     }
    213 
    214     /*
    215      * did not find it on main list.   need to malloc a new one.
    216      */
    217 
    218     simple_unlock(&udv_lock);
    219     /* NOTE: we could sleep in the following malloc() */
    220     MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP, M_WAITOK);
    221     simple_lock(&udv_lock);
    222 
    223     /*
    224      * now we have to double check to make sure no one added it to the
    225      * list while we were sleeping...
    226      */
    227 
    228     for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
    229       if (device == lcv->u_device)
    230 	break;
    231     }
    232 
    233     /*
    234      * did we lose a race to someone else?   free our memory and retry.
    235      */
    236 
    237     if (lcv) {
    238       simple_unlock(&udv_lock);
    239       FREE(udv, M_TEMP);
    240       continue;
    241     }
    242 
    243     /*
    244      * we have it!   init the data structures, add to list and return.
    245      */
    246 
    247     simple_lock_init(&udv->u_obj.vmobjlock);
    248     udv->u_obj.pgops = &uvm_deviceops;
    249     TAILQ_INIT(&udv->u_obj.memq);	/* not used, but be safe */
    250     udv->u_obj.uo_npages = 0;
    251     udv->u_obj.uo_refs = 1;
    252     udv->u_flags = 0;
    253     udv->u_device = device;
    254     LIST_INSERT_HEAD(&udv_list, udv, u_list);
    255     simple_unlock(&udv_lock);
    256 
    257     return(&udv->u_obj);
    258 
    259   }  /* while(1) loop */
    260 
    261   /*NOTREACHED*/
    262 }
    263 
    264 /*
    265  * udv_reference
    266  *
    267  * add a reference to a VM object.   Note that the reference count must
    268  * already be one (the passed in reference) so there is no chance of the
    269  * udv being released or locked out here.
    270  *
    271  * => caller must call with object unlocked.
    272  */
    273 
    274 
    275 static void udv_reference(uobj)
    276 
    277 struct uvm_object *uobj;
    278 
    279 {
    280   UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
    281 
    282   simple_lock(&uobj->vmobjlock);
    283   uobj->uo_refs++;
    284   UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    285 	uobj, uobj->uo_refs,0,0);
    286   simple_unlock(&uobj->vmobjlock);
    287 }
    288 
    289 /*
    290  * udv_detach
    291  *
    292  * remove a reference to a VM object.
    293  *
    294  * => caller must call with object unlocked and map locked.
    295  */
    296 
    297 static void udv_detach(uobj)
    298 
    299 struct uvm_object *uobj;
    300 
    301 {
    302   struct uvm_device *udv = (struct uvm_device *) uobj;
    303   UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
    304 
    305   /*
    306    * loop until done
    307    */
    308 
    309   while (1) {
    310     simple_lock(&uobj->vmobjlock);
    311 
    312     if (uobj->uo_refs > 1) {
    313       uobj->uo_refs--;			/* drop ref! */
    314       simple_unlock(&uobj->vmobjlock);
    315       UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
    316 		  uobj,uobj->uo_refs,0,0);
    317       return;
    318     }
    319 
    320 #ifdef DIAGNOSTIC
    321     if (uobj->uo_npages || uobj->memq.tqh_first)
    322       panic("udv_detach: pages in a device object?");
    323 #endif
    324 
    325     /*
    326      * now lock udv_lock
    327      */
    328     simple_lock(&udv_lock);
    329 
    330     /*
    331      * is it being held?   if so, wait until others are done.
    332      */
    333     if (udv->u_flags & UVM_DEVICE_HOLD) {
    334 
    335       /*
    336        * want it
    337        */
    338       udv->u_flags |= UVM_DEVICE_WANTED;
    339       simple_unlock(&uobj->vmobjlock);
    340       UVM_UNLOCK_AND_WAIT(udv, &udv_lock, FALSE, "udv_detach",0);
    341       continue;
    342     }
    343 
    344     /*
    345      * got it!   nuke it now.
    346      */
    347 
    348     LIST_REMOVE(udv, u_list);
    349     if (udv->u_flags & UVM_DEVICE_WANTED)
    350       wakeup(udv);
    351     FREE(udv, M_TEMP);
    352     break;	/* DONE! */
    353 
    354   }	/* while (1) loop */
    355 
    356   UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
    357   return;
    358 }
    359 
    360 
    361 /*
    362  * udv_flush
    363  *
    364  * flush pages out of a uvm object.   a no-op for devices.
    365  */
    366 
    367 static boolean_t udv_flush(uobj, start, stop, flags)
    368 
    369 struct uvm_object *uobj;
    370 vm_offset_t start, stop;
    371 int flags;
    372 
    373 {
    374   return(TRUE);
    375 }
    376 
    377 /*
    378  * udv_fault: non-standard fault routine for device "pages"
    379  *
    380  * => rather than having a "get" function, we have a fault routine
    381  *	since we don't return vm_pages we need full control over the
    382  *	pmap_enter map in
    383  * => all the usual fault data structured are locked by the caller
    384  *	(i.e. maps(read), amap (if any), uobj)
    385  * => on return, we unlock all fault data structures
    386  * => flags: PGO_ALLPAGES: get all of the pages
    387  *	     PGO_LOCKED: fault data structures are locked
    388  *    XXX: currently PGO_LOCKED is always required ... consider removing
    389  *	it as a flag
    390  * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
    391  */
    392 
    393 static int udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type,
    394 		     access_type, flags)
    395 
    396 struct uvm_faultinfo *ufi;
    397 vm_offset_t vaddr;
    398 vm_page_t *pps;
    399 int npages, centeridx, flags;
    400 vm_fault_t fault_type;
    401 vm_prot_t access_type;
    402 
    403 {
    404   struct uvm_object *uobj;
    405   struct uvm_device *udv;
    406   vm_offset_t curr_offset, curr_va, paddr;
    407   struct vm_map_entry *entry = ufi->entry;
    408   int lcv, retval;
    409   dev_t device;
    410   int (*mapfn) __P((dev_t, int, int));
    411   UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
    412   UVMHIST_LOG(maphist,"  flags=%d", flags,0,0,0);
    413 
    414   /*
    415    * XXX: !PGO_LOCKED calls are currently not allowed (or used)
    416    */
    417 
    418   if ((flags & PGO_LOCKED) == 0)
    419     panic("udv_fault: !PGO_LOCKED fault");
    420 
    421   /*
    422    * we do not allow device mappings to be mapped copy-on-write
    423    * so we kill any attempt to do so here.
    424    */
    425 
    426   if (UVM_ET_ISCOPYONWRITE(entry)) {
    427     UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
    428 		entry->etype, 0,0,0);
    429     uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
    430     return(VM_PAGER_ERROR);
    431   }
    432 
    433   /*
    434    * get object pointers and map function.
    435    */
    436   uobj = entry->object.uvm_obj;
    437   udv = (struct uvm_device *) uobj;
    438   device = udv->u_device;
    439   mapfn = cdevsw[major(device)].d_mmap;
    440 
    441   /*
    442    * now we must determine the offset in udv to use and the VA to use
    443    * for pmap_enter.  note that we always pmap_enter() in the
    444    * ufi->orig_map's pmap, but that our ufi->entry may be from some
    445    * other map (in the submap/sharemap case).  so we must convert the
    446    * VA from ufi->map to ufi->orig_map (note that in many cases these
    447    * maps are the same).   note that ufi->orig_rvaddr and ufi->rvaddr
    448    * refer to the same physical page.
    449    */
    450   /* udv offset = (offset from start of entry) + entry's offset */
    451   curr_offset = (vaddr - entry->start) + entry->offset;
    452   /* pmap va = orig_va + (offset of vaddr from translated va) */
    453   curr_va = ufi->orig_rvaddr + (vaddr - ufi->rvaddr);
    454 
    455   /*
    456    * loop over the page range entering in as needed
    457    */
    458 
    459   retval = VM_PAGER_OK;
    460   for (lcv = 0 ; lcv < npages ;
    461        lcv++, curr_offset += PAGE_SIZE, curr_va += PAGE_SIZE) {
    462 
    463     if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
    464       continue;
    465 
    466     if (pps[lcv] == PGO_DONTCARE)
    467       continue;
    468 
    469     paddr = pmap_phys_address((*mapfn)(device, (int)curr_offset, access_type));
    470 
    471     if (paddr == -1) {
    472       retval = VM_PAGER_ERROR;
    473       break;
    474     }
    475 
    476     UVMHIST_LOG(maphist, "  MAPPING: device: pm=0x%x, va=0x%x, pa=0x%x, at=%d",
    477                 ufi->orig_map->pmap, curr_va, paddr, access_type);
    478     pmap_enter(ufi->orig_map->pmap, curr_va, paddr, access_type, 0);
    479 
    480   }
    481 
    482   uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
    483   return(retval);
    484 }
    485 
    486 /*
    487  * udv_asyncget: start async I/O to bring pages into ram
    488  *
    489  * => caller must lock object(???XXX: see if this is best)
    490  * => a no-op for devices
    491  */
    492 
    493 static int udv_asyncget(uobj, offset, npages)
    494 
    495 struct uvm_object *uobj;
    496 vm_offset_t offset;
    497 int npages;
    498 
    499 {
    500   return(KERN_SUCCESS);
    501 }
    502 
    503 /*
    504  * udv_put: flush page data to backing store.
    505  *
    506  * => this function should never be called (since we never have any
    507  *	page structures to "put")
    508  */
    509 
    510 static int udv_put(uobj, pps, npages, flags)
    511 
    512 struct uvm_object *uobj;
    513 struct vm_page **pps;
    514 int npages, flags;
    515 
    516 {
    517   panic("udv_put: trying to page out to a device!");
    518 }
    519