Home | History | Annotate | Line # | Download | only in uvm
uvm_fault_i.h revision 1.9
      1 /*	$NetBSD: uvm_fault_i.h,v 1.9 1999/06/04 23:38:41 thorpej Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
     35  */
     36 
     37 #ifndef _UVM_UVM_FAULT_I_H_
     38 #define _UVM_UVM_FAULT_I_H_
     39 
     40 /*
     41  * uvm_fault_i.h: fault inline functions
     42  */
     43 
     44 /*
     45  * uvmfault_unlockmaps: unlock the maps
     46  */
     47 
     48 static __inline void
     49 uvmfault_unlockmaps(ufi, write_locked)
     50 	struct uvm_faultinfo *ufi;
     51 	boolean_t write_locked;
     52 {
     53 
     54 	if (write_locked) {
     55 		vm_map_unlock(ufi->map);
     56 	} else {
     57 		vm_map_unlock_read(ufi->map);
     58 	}
     59 }
     60 
     61 /*
     62  * uvmfault_unlockall: unlock everything passed in.
     63  *
     64  * => maps must be read-locked (not write-locked).
     65  */
     66 
     67 static __inline void
     68 uvmfault_unlockall(ufi, amap, uobj, anon)
     69 	struct uvm_faultinfo *ufi;
     70 	struct vm_amap *amap;
     71 	struct uvm_object *uobj;
     72 	struct vm_anon *anon;
     73 {
     74 
     75 	if (anon)
     76 		simple_unlock(&anon->an_lock);
     77 	if (uobj)
     78 		simple_unlock(&uobj->vmobjlock);
     79 	if (amap)
     80 		amap_unlock(amap);
     81 	uvmfault_unlockmaps(ufi, FALSE);
     82 }
     83 
     84 /*
     85  * uvmfault_check_intrsafe: check for a virtual address managed by
     86  * an interrupt-safe map.
     87  *
     88  * => caller must provide a uvm_faultinfo structure with the IN
     89  *	params properly filled in
     90  * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
     91  */
     92 
     93 static __inline boolean_t
     94 uvmfault_check_intrsafe(ufi)
     95 	struct uvm_faultinfo *ufi;
     96 {
     97 	struct vm_map_intrsafe *vmi;
     98 	int s;
     99 
    100 	s = vmi_list_lock();
    101 	for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
    102 	     vmi = LIST_NEXT(vmi, vmi_list)) {
    103 		if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
    104 		    ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
    105 			break;
    106 	}
    107 	vmi_list_unlock(s);
    108 
    109 	if (vmi != NULL) {
    110 		ufi->map = &vmi->vmi_map;
    111 		return (TRUE);
    112 	}
    113 
    114 	return (FALSE);
    115 }
    116 
    117 /*
    118  * uvmfault_lookup: lookup a virtual address in a map
    119  *
    120  * => caller must provide a uvm_faultinfo structure with the IN
    121  *	params properly filled in
    122  * => we will lookup the map entry (handling submaps) as we go
    123  * => if the lookup is a success we will return with the maps locked
    124  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
    125  *	get a read lock.
    126  * => note that submaps can only appear in the kernel and they are
    127  *	required to use the same virtual addresses as the map they
    128  *	are referenced by (thus address translation between the main
    129  *	map and the submap is unnecessary).
    130  */
    131 
    132 static __inline boolean_t
    133 uvmfault_lookup(ufi, write_lock)
    134 	struct uvm_faultinfo *ufi;
    135 	boolean_t write_lock;
    136 {
    137 	vm_map_t tmpmap;
    138 
    139 	/*
    140 	 * init ufi values for lookup.
    141 	 */
    142 
    143 	ufi->map = ufi->orig_map;
    144 	ufi->size = ufi->orig_size;
    145 
    146 	/*
    147 	 * keep going down levels until we are done.   note that there can
    148 	 * only be two levels so we won't loop very long.
    149 	 */
    150 
    151 	while (1) {
    152 
    153 		/*
    154 		 * lock map
    155 		 */
    156 		if (write_lock) {
    157 			vm_map_lock(ufi->map);
    158 		} else {
    159 			vm_map_lock_read(ufi->map);
    160 		}
    161 
    162 		/*
    163 		 * lookup
    164 		 */
    165 		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
    166 								&ufi->entry)) {
    167 			uvmfault_unlockmaps(ufi, write_lock);
    168 			return(FALSE);
    169 		}
    170 
    171 		/*
    172 		 * reduce size if necessary
    173 		 */
    174 		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
    175 			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
    176 
    177 		/*
    178 		 * submap?    replace map with the submap and lookup again.
    179 		 * note: VAs in submaps must match VAs in main map.
    180 		 */
    181 		if (UVM_ET_ISSUBMAP(ufi->entry)) {
    182 			tmpmap = ufi->entry->object.sub_map;
    183 			if (write_lock) {
    184 				vm_map_unlock(ufi->map);
    185 			} else {
    186 				vm_map_unlock_read(ufi->map);
    187 			}
    188 			ufi->map = tmpmap;
    189 			continue;
    190 		}
    191 
    192 		/*
    193 		 * got it!
    194 		 */
    195 
    196 		ufi->mapv = ufi->map->timestamp;
    197 		return(TRUE);
    198 
    199 	}	/* while loop */
    200 
    201 	/*NOTREACHED*/
    202 }
    203 
    204 /*
    205  * uvmfault_relock: attempt to relock the same version of the map
    206  *
    207  * => fault data structures should be unlocked before calling.
    208  * => if a success (TRUE) maps will be locked after call.
    209  */
    210 
    211 static __inline boolean_t
    212 uvmfault_relock(ufi)
    213 	struct uvm_faultinfo *ufi;
    214 {
    215 
    216 	uvmexp.fltrelck++;
    217 	/*
    218 	 * relock map.   fail if version mismatch (in which case nothing
    219 	 * gets locked).
    220 	 */
    221 
    222 	vm_map_lock_read(ufi->map);
    223 	if (ufi->mapv != ufi->map->timestamp) {
    224 		vm_map_unlock_read(ufi->map);
    225 		return(FALSE);
    226 	}
    227 
    228 	uvmexp.fltrelckok++;
    229 	return(TRUE);		/* got it! */
    230 }
    231 
    232 #endif /* _UVM_UVM_FAULT_I_H_ */
    233