Home | History | Annotate | Line # | Download | only in uvm
uvm_fault_i.h revision 1.10
      1 /*	$NetBSD: uvm_fault_i.h,v 1.10 2000/01/11 06:57:50 chs Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
     35  */
     36 
     37 #ifndef _UVM_UVM_FAULT_I_H_
     38 #define _UVM_UVM_FAULT_I_H_
     39 
     40 /*
     41  * uvm_fault_i.h: fault inline functions
     42  */
     43 
     44 /*
     45  * uvmfault_unlockmaps: unlock the maps
     46  */
     47 
     48 static __inline void
     49 uvmfault_unlockmaps(ufi, write_locked)
     50 	struct uvm_faultinfo *ufi;
     51 	boolean_t write_locked;
     52 {
     53 	/*
     54 	 * ufi can be NULL when this isn't really a fault,
     55 	 * but merely paging in anon data.
     56 	 */
     57 
     58 	if (ufi == NULL) {
     59 		return;
     60 	}
     61 
     62 	if (write_locked) {
     63 		vm_map_unlock(ufi->map);
     64 	} else {
     65 		vm_map_unlock_read(ufi->map);
     66 	}
     67 }
     68 
     69 /*
     70  * uvmfault_unlockall: unlock everything passed in.
     71  *
     72  * => maps must be read-locked (not write-locked).
     73  */
     74 
     75 static __inline void
     76 uvmfault_unlockall(ufi, amap, uobj, anon)
     77 	struct uvm_faultinfo *ufi;
     78 	struct vm_amap *amap;
     79 	struct uvm_object *uobj;
     80 	struct vm_anon *anon;
     81 {
     82 
     83 	if (anon)
     84 		simple_unlock(&anon->an_lock);
     85 	if (uobj)
     86 		simple_unlock(&uobj->vmobjlock);
     87 	if (amap)
     88 		amap_unlock(amap);
     89 	uvmfault_unlockmaps(ufi, FALSE);
     90 }
     91 
     92 /*
     93  * uvmfault_check_intrsafe: check for a virtual address managed by
     94  * an interrupt-safe map.
     95  *
     96  * => caller must provide a uvm_faultinfo structure with the IN
     97  *	params properly filled in
     98  * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
     99  */
    100 
    101 static __inline boolean_t
    102 uvmfault_check_intrsafe(ufi)
    103 	struct uvm_faultinfo *ufi;
    104 {
    105 	struct vm_map_intrsafe *vmi;
    106 	int s;
    107 
    108 	s = vmi_list_lock();
    109 	for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
    110 	     vmi = LIST_NEXT(vmi, vmi_list)) {
    111 		if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
    112 		    ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
    113 			break;
    114 	}
    115 	vmi_list_unlock(s);
    116 
    117 	if (vmi != NULL) {
    118 		ufi->map = &vmi->vmi_map;
    119 		return (TRUE);
    120 	}
    121 
    122 	return (FALSE);
    123 }
    124 
    125 /*
    126  * uvmfault_lookup: lookup a virtual address in a map
    127  *
    128  * => caller must provide a uvm_faultinfo structure with the IN
    129  *	params properly filled in
    130  * => we will lookup the map entry (handling submaps) as we go
    131  * => if the lookup is a success we will return with the maps locked
    132  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
    133  *	get a read lock.
    134  * => note that submaps can only appear in the kernel and they are
    135  *	required to use the same virtual addresses as the map they
    136  *	are referenced by (thus address translation between the main
    137  *	map and the submap is unnecessary).
    138  */
    139 
    140 static __inline boolean_t
    141 uvmfault_lookup(ufi, write_lock)
    142 	struct uvm_faultinfo *ufi;
    143 	boolean_t write_lock;
    144 {
    145 	vm_map_t tmpmap;
    146 
    147 	/*
    148 	 * init ufi values for lookup.
    149 	 */
    150 
    151 	ufi->map = ufi->orig_map;
    152 	ufi->size = ufi->orig_size;
    153 
    154 	/*
    155 	 * keep going down levels until we are done.   note that there can
    156 	 * only be two levels so we won't loop very long.
    157 	 */
    158 
    159 	while (1) {
    160 
    161 		/*
    162 		 * lock map
    163 		 */
    164 		if (write_lock) {
    165 			vm_map_lock(ufi->map);
    166 		} else {
    167 			vm_map_lock_read(ufi->map);
    168 		}
    169 
    170 		/*
    171 		 * lookup
    172 		 */
    173 		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
    174 								&ufi->entry)) {
    175 			uvmfault_unlockmaps(ufi, write_lock);
    176 			return(FALSE);
    177 		}
    178 
    179 		/*
    180 		 * reduce size if necessary
    181 		 */
    182 		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
    183 			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
    184 
    185 		/*
    186 		 * submap?    replace map with the submap and lookup again.
    187 		 * note: VAs in submaps must match VAs in main map.
    188 		 */
    189 		if (UVM_ET_ISSUBMAP(ufi->entry)) {
    190 			tmpmap = ufi->entry->object.sub_map;
    191 			if (write_lock) {
    192 				vm_map_unlock(ufi->map);
    193 			} else {
    194 				vm_map_unlock_read(ufi->map);
    195 			}
    196 			ufi->map = tmpmap;
    197 			continue;
    198 		}
    199 
    200 		/*
    201 		 * got it!
    202 		 */
    203 
    204 		ufi->mapv = ufi->map->timestamp;
    205 		return(TRUE);
    206 
    207 	}	/* while loop */
    208 
    209 	/*NOTREACHED*/
    210 }
    211 
    212 /*
    213  * uvmfault_relock: attempt to relock the same version of the map
    214  *
    215  * => fault data structures should be unlocked before calling.
    216  * => if a success (TRUE) maps will be locked after call.
    217  */
    218 
    219 static __inline boolean_t
    220 uvmfault_relock(ufi)
    221 	struct uvm_faultinfo *ufi;
    222 {
    223 	/*
    224 	 * ufi can be NULL when this isn't really a fault,
    225 	 * but merely paging in anon data.
    226 	 */
    227 
    228 	if (ufi == NULL) {
    229 		return TRUE;
    230 	}
    231 
    232 	uvmexp.fltrelck++;
    233 
    234 	/*
    235 	 * relock map.   fail if version mismatch (in which case nothing
    236 	 * gets locked).
    237 	 */
    238 
    239 	vm_map_lock_read(ufi->map);
    240 	if (ufi->mapv != ufi->map->timestamp) {
    241 		vm_map_unlock_read(ufi->map);
    242 		return(FALSE);
    243 	}
    244 
    245 	uvmexp.fltrelckok++;
    246 	return(TRUE);		/* got it! */
    247 }
    248 
    249 #endif /* _UVM_UVM_FAULT_I_H_ */
    250