Home | History | Annotate | Line # | Download | only in uvm
uvm_device.c revision 1.19
      1 /*	$NetBSD: uvm_device.c,v 1.19 2000/03/26 20:46:59 kleink Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
     35  */
     36 
     37 #include "opt_uvmhist.h"
     38 
     39 /*
     40  * uvm_device.c: the device pager.
     41  */
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/conf.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 #include <sys/vnode.h>
     49 
     50 #include <vm/vm.h>
     51 #include <vm/vm_page.h>
     52 #include <vm/vm_kern.h>
     53 
     54 #include <uvm/uvm.h>
     55 #include <uvm/uvm_device.h>
     56 
     57 /*
     58  * private global data structure
     59  *
     60  * we keep a list of active device objects in the system.
     61  */
     62 
     63 LIST_HEAD(udv_list_struct, uvm_device);
     64 static struct udv_list_struct udv_list;
     65 static simple_lock_data_t udv_lock;
     66 
     67 /*
     68  * functions
     69  */
     70 
     71 static void		udv_init __P((void));
     72 static void             udv_reference __P((struct uvm_object *));
     73 static void             udv_detach __P((struct uvm_object *));
     74 static int		udv_fault __P((struct uvm_faultinfo *, vaddr_t,
     75 				       vm_page_t *, int, int, vm_fault_t,
     76 				       vm_prot_t, int));
     77 static boolean_t        udv_flush __P((struct uvm_object *, vaddr_t,
     78 					 vaddr_t, int));
     79 static int		udv_asyncget __P((struct uvm_object *, vaddr_t,
     80 					    int));
     81 static int		udv_put __P((struct uvm_object *, vm_page_t *,
     82 					int, boolean_t));
     83 
     84 /*
     85  * master pager structure
     86  */
     87 
     88 struct uvm_pagerops uvm_deviceops = {
     89 	udv_init,
     90 	udv_reference,
     91 	udv_detach,
     92 	udv_fault,
     93 	udv_flush,
     94 	NULL,		/* no get function since we have udv_fault */
     95 	udv_asyncget,
     96 	udv_put,
     97 	NULL,		/* no cluster function */
     98 	NULL,		/* no put cluster function */
     99 	NULL,		/* no share protect.   no share maps for us */
    100 	NULL,		/* no AIO-DONE function since no async i/o */
    101 	NULL,		/* no releasepg function since no normal pages */
    102 };
    103 
    104 /*
    105  * the ops!
    106  */
    107 
    108 /*
    109  * udv_init
    110  *
    111  * init pager private data structures.
    112  */
    113 
    114 void
    115 udv_init()
    116 {
    117 
    118 	LIST_INIT(&udv_list);
    119 	simple_lock_init(&udv_lock);
    120 }
    121 
    122 /*
    123  * udv_attach
    124  *
    125  * get a VM object that is associated with a device.   allocate a new
    126  * one if needed.
    127  *
    128  * => caller must _not_ already be holding the lock on the uvm_object.
    129  * => in fact, nothing should be locked so that we can sleep here.
    130  */
    131 struct uvm_object *
    132 udv_attach(arg, accessprot, off, size)
    133 	void *arg;
    134 	vm_prot_t accessprot;
    135 	vaddr_t off;			/* used only for access check */
    136 	vsize_t size;			/* used only for access check */
    137 {
    138 	dev_t device = *((dev_t *) arg);
    139 	struct uvm_device *udv, *lcv;
    140 	int (*mapfn) __P((dev_t, int, int));
    141 	UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
    142 
    143 	UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
    144 
    145 	/*
    146 	 * before we do anything, ensure this device supports mmap
    147 	 */
    148 
    149 	mapfn = cdevsw[major(device)].d_mmap;
    150 	if (mapfn == NULL ||
    151 			mapfn == (int (*) __P((dev_t, int, int))) enodev ||
    152 			mapfn == (int (*) __P((dev_t, int, int))) nullop)
    153 		return(NULL);
    154 
    155 	/*
    156 	 * Check that the specified range of the device allows the
    157 	 * desired protection.
    158 	 *
    159 	 * XXX assumes VM_PROT_* == PROT_*
    160 	 * XXX clobbers off and size, but nothing else here needs them.
    161 	 */
    162 
    163 	while (size != 0) {
    164 		if ((*mapfn)(device, off, accessprot) == -1)
    165 			return (NULL);
    166 		off += PAGE_SIZE; size -= PAGE_SIZE;
    167 	}
    168 
    169 	/*
    170 	 * keep looping until we get it
    171 	 */
    172 
    173 	while (1) {
    174 
    175 		/*
    176 		 * first, attempt to find it on the main list
    177 		 */
    178 
    179 		simple_lock(&udv_lock);
    180 		for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
    181 			if (device == lcv->u_device)
    182 				break;
    183 		}
    184 
    185 		/*
    186 		 * got it on main list.  put a hold on it and unlock udv_lock.
    187 		 */
    188 
    189 		if (lcv) {
    190 
    191 			/*
    192 			 * if someone else has a hold on it, sleep and start
    193 			 * over again.
    194 			 */
    195 
    196 			if (lcv->u_flags & UVM_DEVICE_HOLD) {
    197 				lcv->u_flags |= UVM_DEVICE_WANTED;
    198 				UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, FALSE,
    199 				    "udv_attach",0);
    200 				continue;
    201 			}
    202 
    203 			/* we are now holding it */
    204 			lcv->u_flags |= UVM_DEVICE_HOLD;
    205 			simple_unlock(&udv_lock);
    206 
    207 			/*
    208 			 * bump reference count, unhold, return.
    209 			 */
    210 
    211 			simple_lock(&lcv->u_obj.vmobjlock);
    212 			lcv->u_obj.uo_refs++;
    213 			simple_unlock(&lcv->u_obj.vmobjlock);
    214 
    215 			simple_lock(&udv_lock);
    216 			if (lcv->u_flags & UVM_DEVICE_WANTED)
    217 				wakeup(lcv);
    218 			lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
    219 			simple_unlock(&udv_lock);
    220 			return(&lcv->u_obj);
    221 		}
    222 
    223 		/*
    224 		 * did not find it on main list.   need to malloc a new one.
    225 		 */
    226 
    227 		simple_unlock(&udv_lock);
    228 		/* NOTE: we could sleep in the following malloc() */
    229 		MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP, M_WAITOK);
    230 		simple_lock(&udv_lock);
    231 
    232 		/*
    233 		 * now we have to double check to make sure no one added it
    234 		 * to the list while we were sleeping...
    235 		 */
    236 
    237 		for (lcv = udv_list.lh_first ; lcv != NULL ;
    238 		    lcv = lcv->u_list.le_next) {
    239 			if (device == lcv->u_device)
    240 				break;
    241 		}
    242 
    243 		/*
    244 		 * did we lose a race to someone else?   free our memory and retry.
    245 		 */
    246 
    247 		if (lcv) {
    248 			simple_unlock(&udv_lock);
    249 			FREE(udv, M_TEMP);
    250 			continue;
    251 		}
    252 
    253 		/*
    254 		 * we have it!   init the data structures, add to list
    255 		 * and return.
    256 		 */
    257 
    258 		simple_lock_init(&udv->u_obj.vmobjlock);
    259 		udv->u_obj.pgops = &uvm_deviceops;
    260 		TAILQ_INIT(&udv->u_obj.memq);	/* not used, but be safe */
    261 		udv->u_obj.uo_npages = 0;
    262 		udv->u_obj.uo_refs = 1;
    263 		udv->u_flags = 0;
    264 		udv->u_device = device;
    265 		LIST_INSERT_HEAD(&udv_list, udv, u_list);
    266 		simple_unlock(&udv_lock);
    267 
    268 		return(&udv->u_obj);
    269 
    270 	}  /* while(1) loop */
    271 
    272 	/*NOTREACHED*/
    273 }
    274 
    275 /*
    276  * udv_reference
    277  *
    278  * add a reference to a VM object.   Note that the reference count must
    279  * already be one (the passed in reference) so there is no chance of the
    280  * udv being released or locked out here.
    281  *
    282  * => caller must call with object unlocked.
    283  */
    284 
    285 static void
    286 udv_reference(uobj)
    287 	struct uvm_object *uobj;
    288 {
    289 	UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
    290 
    291 	simple_lock(&uobj->vmobjlock);
    292 	uobj->uo_refs++;
    293 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    294 	uobj, uobj->uo_refs,0,0);
    295 	simple_unlock(&uobj->vmobjlock);
    296 }
    297 
    298 /*
    299  * udv_detach
    300  *
    301  * remove a reference to a VM object.
    302  *
    303  * => caller must call with object unlocked and map locked.
    304  */
    305 
    306 static void
    307 udv_detach(uobj)
    308 	struct uvm_object *uobj;
    309 {
    310 	struct uvm_device *udv = (struct uvm_device *) uobj;
    311 	UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
    312 
    313 	/*
    314 	 * loop until done
    315 	 */
    316 
    317 	while (1) {
    318 		simple_lock(&uobj->vmobjlock);
    319 
    320 		if (uobj->uo_refs > 1) {
    321 			uobj->uo_refs--;			/* drop ref! */
    322 			simple_unlock(&uobj->vmobjlock);
    323 			UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
    324 				  uobj,uobj->uo_refs,0,0);
    325 			return;
    326 		}
    327 
    328 #ifdef DIAGNOSTIC
    329 		if (uobj->uo_npages || uobj->memq.tqh_first)
    330 			panic("udv_detach: pages in a device object?");
    331 #endif
    332 
    333 		/*
    334 		 * now lock udv_lock
    335 		 */
    336 		simple_lock(&udv_lock);
    337 
    338 		/*
    339 		 * is it being held?   if so, wait until others are done.
    340 		 */
    341 		if (udv->u_flags & UVM_DEVICE_HOLD) {
    342 
    343 			/*
    344 			 * want it
    345 			 */
    346 			udv->u_flags |= UVM_DEVICE_WANTED;
    347 			simple_unlock(&uobj->vmobjlock);
    348 			UVM_UNLOCK_AND_WAIT(udv, &udv_lock, FALSE, "udv_detach",0);
    349 			continue;
    350 		}
    351 
    352 		/*
    353 		 * got it!   nuke it now.
    354 		 */
    355 
    356 		LIST_REMOVE(udv, u_list);
    357 		if (udv->u_flags & UVM_DEVICE_WANTED)
    358 			wakeup(udv);
    359 		FREE(udv, M_TEMP);
    360 		break;	/* DONE! */
    361 
    362 	}	/* while (1) loop */
    363 
    364 	UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
    365 	return;
    366 }
    367 
    368 
    369 /*
    370  * udv_flush
    371  *
    372  * flush pages out of a uvm object.   a no-op for devices.
    373  */
    374 
    375 static boolean_t udv_flush(uobj, start, stop, flags)
    376 	struct uvm_object *uobj;
    377 	vaddr_t start, stop;
    378 	int flags;
    379 {
    380 
    381 	return(TRUE);
    382 }
    383 
    384 /*
    385  * udv_fault: non-standard fault routine for device "pages"
    386  *
    387  * => rather than having a "get" function, we have a fault routine
    388  *	since we don't return vm_pages we need full control over the
    389  *	pmap_enter map in
    390  * => all the usual fault data structured are locked by the caller
    391  *	(i.e. maps(read), amap (if any), uobj)
    392  * => on return, we unlock all fault data structures
    393  * => flags: PGO_ALLPAGES: get all of the pages
    394  *	     PGO_LOCKED: fault data structures are locked
    395  *    XXX: currently PGO_LOCKED is always required ... consider removing
    396  *	it as a flag
    397  * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
    398  */
    399 
    400 static int
    401 udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type, access_type, flags)
    402 	struct uvm_faultinfo *ufi;
    403 	vaddr_t vaddr;
    404 	vm_page_t *pps;
    405 	int npages, centeridx, flags;
    406 	vm_fault_t fault_type;
    407 	vm_prot_t access_type;
    408 {
    409 	struct vm_map_entry *entry = ufi->entry;
    410 	struct uvm_object *uobj = entry->object.uvm_obj;
    411 	struct uvm_device *udv = (struct uvm_device *)uobj;
    412 	vaddr_t curr_offset, curr_va;
    413 	paddr_t paddr;
    414 	int lcv, retval, mdpgno;
    415 	dev_t device;
    416 	int (*mapfn) __P((dev_t, int, int));
    417 	vm_prot_t mapprot;
    418 	UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
    419 	UVMHIST_LOG(maphist,"  flags=%d", flags,0,0,0);
    420 
    421 	/*
    422 	 * XXX: !PGO_LOCKED calls are currently not allowed (or used)
    423 	 */
    424 
    425 	if ((flags & PGO_LOCKED) == 0)
    426 		panic("udv_fault: !PGO_LOCKED fault");
    427 
    428 	/*
    429 	 * we do not allow device mappings to be mapped copy-on-write
    430 	 * so we kill any attempt to do so here.
    431 	 */
    432 
    433 	if (UVM_ET_ISCOPYONWRITE(entry)) {
    434 		UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
    435 		entry->etype, 0,0,0);
    436 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
    437 		return(VM_PAGER_ERROR);
    438 	}
    439 
    440 	/*
    441 	 * get device map function.
    442 	 */
    443 	device = udv->u_device;
    444 	mapfn = cdevsw[major(device)].d_mmap;
    445 
    446 	/*
    447 	 * now we must determine the offset in udv to use and the VA to
    448 	 * use for pmap_enter.  note that we always use orig_map's pmap
    449 	 * for pmap_enter (even if we have a submap).   since virtual
    450 	 * addresses in a submap must match the main map, this is ok.
    451 	 */
    452 	/* udv offset = (offset from start of entry) + entry's offset */
    453 	curr_offset = (vaddr - entry->start) + entry->offset;
    454 	/* pmap va = vaddr (virtual address of pps[0]) */
    455 	curr_va = vaddr;
    456 
    457 	/*
    458 	 * loop over the page range entering in as needed
    459 	 */
    460 
    461 	retval = VM_PAGER_OK;
    462 	for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
    463 	    curr_va += PAGE_SIZE) {
    464 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
    465 			continue;
    466 
    467 		if (pps[lcv] == PGO_DONTCARE)
    468 			continue;
    469 
    470 		mdpgno = (*mapfn)(device, (int)curr_offset, access_type);
    471 		if (mdpgno == -1) {
    472 			retval = VM_PAGER_ERROR;
    473 			break;
    474 		}
    475 		paddr = pmap_phys_address(mdpgno);
    476 		mapprot = ufi->entry->protection;
    477 		UVMHIST_LOG(maphist,
    478 		    "  MAPPING: device: pm=0x%x, va=0x%x, pa=0x%x, at=%d",
    479 		    ufi->orig_map->pmap, curr_va, (int)paddr, mapprot);
    480 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
    481 		    mapprot, PMAP_CANFAIL | mapprot) != KERN_SUCCESS) {
    482 			/*
    483 			 * pmap_enter() didn't have the resource to
    484 			 * enter this mapping.  Unlock everything,
    485 			 * wait for the pagedaemon to free up some
    486 			 * pages, and then tell uvm_fault() to start
    487 			 * the fault again.
    488 			 *
    489 			 * XXX Needs some rethinking for the PGO_ALLPAGES
    490 			 * XXX case.
    491 			 */
    492 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
    493 			    uobj, NULL);
    494 			uvm_wait("udv_fault");
    495 			return (VM_PAGER_REFAULT);
    496 		}
    497 	}
    498 
    499 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
    500 	return (retval);
    501 }
    502 
    503 /*
    504  * udv_asyncget: start async I/O to bring pages into ram
    505  *
    506  * => caller must lock object(???XXX: see if this is best)
    507  * => a no-op for devices
    508  */
    509 
    510 static int
    511 udv_asyncget(uobj, offset, npages)
    512 	struct uvm_object *uobj;
    513 	vaddr_t offset;
    514 	int npages;
    515 {
    516 
    517 	return(KERN_SUCCESS);
    518 }
    519 
    520 /*
    521  * udv_put: flush page data to backing store.
    522  *
    523  * => this function should never be called (since we never have any
    524  *	page structures to "put")
    525  */
    526 
    527 static int
    528 udv_put(uobj, pps, npages, flags)
    529 	struct uvm_object *uobj;
    530 	struct vm_page **pps;
    531 	int npages, flags;
    532 {
    533 
    534 	panic("udv_put: trying to page out to a device!");
    535 }
    536