Home | History | Annotate | Line # | Download | only in uvm
uvm_fault_i.h revision 1.9
      1  1.9  thorpej /*	$NetBSD: uvm_fault_i.h,v 1.9 1999/06/04 23:38:41 thorpej Exp $	*/
      2  1.1      mrg 
      3  1.1      mrg /*
      4  1.1      mrg  *
      5  1.1      mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  1.1      mrg  * All rights reserved.
      7  1.1      mrg  *
      8  1.1      mrg  * Redistribution and use in source and binary forms, with or without
      9  1.1      mrg  * modification, are permitted provided that the following conditions
     10  1.1      mrg  * are met:
     11  1.1      mrg  * 1. Redistributions of source code must retain the above copyright
     12  1.1      mrg  *    notice, this list of conditions and the following disclaimer.
     13  1.1      mrg  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1      mrg  *    notice, this list of conditions and the following disclaimer in the
     15  1.1      mrg  *    documentation and/or other materials provided with the distribution.
     16  1.1      mrg  * 3. All advertising materials mentioning features or use of this software
     17  1.1      mrg  *    must display the following acknowledgement:
     18  1.1      mrg  *      This product includes software developed by Charles D. Cranor and
     19  1.1      mrg  *      Washington University.
     20  1.1      mrg  * 4. The name of the author may not be used to endorse or promote products
     21  1.1      mrg  *    derived from this software without specific prior written permission.
     22  1.1      mrg  *
     23  1.1      mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  1.1      mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  1.1      mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  1.1      mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  1.1      mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  1.1      mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  1.1      mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  1.1      mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  1.1      mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  1.1      mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  1.3      mrg  *
     34  1.3      mrg  * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
     35  1.1      mrg  */
     36  1.1      mrg 
     37  1.4    perry #ifndef _UVM_UVM_FAULT_I_H_
     38  1.4    perry #define _UVM_UVM_FAULT_I_H_
     39  1.4    perry 
     40  1.1      mrg /*
     41  1.1      mrg  * uvm_fault_i.h: fault inline functions
     42  1.1      mrg  */
     43  1.1      mrg 
     44  1.1      mrg /*
     45  1.1      mrg  * uvmfault_unlockmaps: unlock the maps
     46  1.1      mrg  */
     47  1.1      mrg 
     48  1.5      mrg static __inline void
     49  1.5      mrg uvmfault_unlockmaps(ufi, write_locked)
     50  1.5      mrg 	struct uvm_faultinfo *ufi;
     51  1.5      mrg 	boolean_t write_locked;
     52  1.5      mrg {
     53  1.1      mrg 
     54  1.5      mrg 	if (write_locked) {
     55  1.5      mrg 		vm_map_unlock(ufi->map);
     56  1.5      mrg 	} else {
     57  1.5      mrg 		vm_map_unlock_read(ufi->map);
     58  1.5      mrg 	}
     59  1.1      mrg }
     60  1.1      mrg 
     61  1.1      mrg /*
     62  1.1      mrg  * uvmfault_unlockall: unlock everything passed in.
     63  1.1      mrg  *
     64  1.1      mrg  * => maps must be read-locked (not write-locked).
     65  1.1      mrg  */
     66  1.1      mrg 
     67  1.5      mrg static __inline void
     68  1.5      mrg uvmfault_unlockall(ufi, amap, uobj, anon)
     69  1.5      mrg 	struct uvm_faultinfo *ufi;
     70  1.5      mrg 	struct vm_amap *amap;
     71  1.5      mrg 	struct uvm_object *uobj;
     72  1.5      mrg 	struct vm_anon *anon;
     73  1.5      mrg {
     74  1.1      mrg 
     75  1.5      mrg 	if (anon)
     76  1.5      mrg 		simple_unlock(&anon->an_lock);
     77  1.5      mrg 	if (uobj)
     78  1.5      mrg 		simple_unlock(&uobj->vmobjlock);
     79  1.5      mrg 	if (amap)
     80  1.7    chuck 		amap_unlock(amap);
     81  1.5      mrg 	uvmfault_unlockmaps(ufi, FALSE);
     82  1.9  thorpej }
     83  1.9  thorpej 
     84  1.9  thorpej /*
     85  1.9  thorpej  * uvmfault_check_intrsafe: check for a virtual address managed by
     86  1.9  thorpej  * an interrupt-safe map.
     87  1.9  thorpej  *
     88  1.9  thorpej  * => caller must provide a uvm_faultinfo structure with the IN
     89  1.9  thorpej  *	params properly filled in
     90  1.9  thorpej  * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
     91  1.9  thorpej  */
     92  1.9  thorpej 
     93  1.9  thorpej static __inline boolean_t
     94  1.9  thorpej uvmfault_check_intrsafe(ufi)
     95  1.9  thorpej 	struct uvm_faultinfo *ufi;
     96  1.9  thorpej {
     97  1.9  thorpej 	struct vm_map_intrsafe *vmi;
     98  1.9  thorpej 	int s;
     99  1.9  thorpej 
    100  1.9  thorpej 	s = vmi_list_lock();
    101  1.9  thorpej 	for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
    102  1.9  thorpej 	     vmi = LIST_NEXT(vmi, vmi_list)) {
    103  1.9  thorpej 		if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
    104  1.9  thorpej 		    ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
    105  1.9  thorpej 			break;
    106  1.9  thorpej 	}
    107  1.9  thorpej 	vmi_list_unlock(s);
    108  1.9  thorpej 
    109  1.9  thorpej 	if (vmi != NULL) {
    110  1.9  thorpej 		ufi->map = &vmi->vmi_map;
    111  1.9  thorpej 		return (TRUE);
    112  1.9  thorpej 	}
    113  1.9  thorpej 
    114  1.9  thorpej 	return (FALSE);
    115  1.1      mrg }
    116  1.1      mrg 
    117  1.1      mrg /*
    118  1.1      mrg  * uvmfault_lookup: lookup a virtual address in a map
    119  1.1      mrg  *
    120  1.6    chuck  * => caller must provide a uvm_faultinfo structure with the IN
    121  1.1      mrg  *	params properly filled in
    122  1.6    chuck  * => we will lookup the map entry (handling submaps) as we go
    123  1.1      mrg  * => if the lookup is a success we will return with the maps locked
    124  1.1      mrg  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
    125  1.1      mrg  *	get a read lock.
    126  1.6    chuck  * => note that submaps can only appear in the kernel and they are
    127  1.6    chuck  *	required to use the same virtual addresses as the map they
    128  1.6    chuck  *	are referenced by (thus address translation between the main
    129  1.6    chuck  *	map and the submap is unnecessary).
    130  1.1      mrg  */
    131  1.1      mrg 
    132  1.5      mrg static __inline boolean_t
    133  1.5      mrg uvmfault_lookup(ufi, write_lock)
    134  1.5      mrg 	struct uvm_faultinfo *ufi;
    135  1.5      mrg 	boolean_t write_lock;
    136  1.1      mrg {
    137  1.5      mrg 	vm_map_t tmpmap;
    138  1.1      mrg 
    139  1.5      mrg 	/*
    140  1.5      mrg 	 * init ufi values for lookup.
    141  1.5      mrg 	 */
    142  1.5      mrg 
    143  1.5      mrg 	ufi->map = ufi->orig_map;
    144  1.5      mrg 	ufi->size = ufi->orig_size;
    145  1.5      mrg 
    146  1.5      mrg 	/*
    147  1.5      mrg 	 * keep going down levels until we are done.   note that there can
    148  1.5      mrg 	 * only be two levels so we won't loop very long.
    149  1.5      mrg 	 */
    150  1.5      mrg 
    151  1.5      mrg 	while (1) {
    152  1.5      mrg 
    153  1.5      mrg 		/*
    154  1.5      mrg 		 * lock map
    155  1.5      mrg 		 */
    156  1.5      mrg 		if (write_lock) {
    157  1.5      mrg 			vm_map_lock(ufi->map);
    158  1.5      mrg 		} else {
    159  1.5      mrg 			vm_map_lock_read(ufi->map);
    160  1.5      mrg 		}
    161  1.5      mrg 
    162  1.5      mrg 		/*
    163  1.5      mrg 		 * lookup
    164  1.5      mrg 		 */
    165  1.6    chuck 		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
    166  1.6    chuck 								&ufi->entry)) {
    167  1.5      mrg 			uvmfault_unlockmaps(ufi, write_lock);
    168  1.5      mrg 			return(FALSE);
    169  1.5      mrg 		}
    170  1.5      mrg 
    171  1.5      mrg 		/*
    172  1.5      mrg 		 * reduce size if necessary
    173  1.5      mrg 		 */
    174  1.6    chuck 		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
    175  1.6    chuck 			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
    176  1.5      mrg 
    177  1.5      mrg 		/*
    178  1.5      mrg 		 * submap?    replace map with the submap and lookup again.
    179  1.5      mrg 		 * note: VAs in submaps must match VAs in main map.
    180  1.5      mrg 		 */
    181  1.5      mrg 		if (UVM_ET_ISSUBMAP(ufi->entry)) {
    182  1.5      mrg 			tmpmap = ufi->entry->object.sub_map;
    183  1.5      mrg 			if (write_lock) {
    184  1.5      mrg 				vm_map_unlock(ufi->map);
    185  1.5      mrg 			} else {
    186  1.5      mrg 				vm_map_unlock_read(ufi->map);
    187  1.5      mrg 			}
    188  1.5      mrg 			ufi->map = tmpmap;
    189  1.5      mrg 			continue;
    190  1.5      mrg 		}
    191  1.5      mrg 
    192  1.5      mrg 		/*
    193  1.5      mrg 		 * got it!
    194  1.5      mrg 		 */
    195  1.1      mrg 
    196  1.5      mrg 		ufi->mapv = ufi->map->timestamp;
    197  1.5      mrg 		return(TRUE);
    198  1.1      mrg 
    199  1.5      mrg 	}	/* while loop */
    200  1.1      mrg 
    201  1.5      mrg 	/*NOTREACHED*/
    202  1.1      mrg }
    203  1.1      mrg 
    204  1.1      mrg /*
    205  1.1      mrg  * uvmfault_relock: attempt to relock the same version of the map
    206  1.1      mrg  *
    207  1.1      mrg  * => fault data structures should be unlocked before calling.
    208  1.1      mrg  * => if a success (TRUE) maps will be locked after call.
    209  1.1      mrg  */
    210  1.1      mrg 
    211  1.5      mrg static __inline boolean_t
    212  1.5      mrg uvmfault_relock(ufi)
    213  1.5      mrg 	struct uvm_faultinfo *ufi;
    214  1.5      mrg {
    215  1.1      mrg 
    216  1.5      mrg 	uvmexp.fltrelck++;
    217  1.5      mrg 	/*
    218  1.6    chuck 	 * relock map.   fail if version mismatch (in which case nothing
    219  1.6    chuck 	 * gets locked).
    220  1.5      mrg 	 */
    221  1.5      mrg 
    222  1.5      mrg 	vm_map_lock_read(ufi->map);
    223  1.5      mrg 	if (ufi->mapv != ufi->map->timestamp) {
    224  1.5      mrg 		vm_map_unlock_read(ufi->map);
    225  1.5      mrg 		return(FALSE);
    226  1.5      mrg 	}
    227  1.1      mrg 
    228  1.5      mrg 	uvmexp.fltrelckok++;
    229  1.5      mrg 	return(TRUE);		/* got it! */
    230  1.1      mrg }
    231  1.4    perry 
    232  1.4    perry #endif /* _UVM_UVM_FAULT_I_H_ */
    233