Home | History | Annotate | Line # | Download | only in uvm
uvm_fault_i.h revision 1.3
      1  1.2  thorpej /*	$NetBSD: uvm_fault_i.h,v 1.3 1998/02/07 11:08:30 mrg Exp $	*/
      2  1.1      mrg 
      3  1.1      mrg /*
      4  1.1      mrg  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  1.1      mrg  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  1.1      mrg  */
      7  1.1      mrg /*
      8  1.1      mrg  *
      9  1.1      mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     10  1.1      mrg  * All rights reserved.
     11  1.1      mrg  *
     12  1.1      mrg  * Redistribution and use in source and binary forms, with or without
     13  1.1      mrg  * modification, are permitted provided that the following conditions
     14  1.1      mrg  * are met:
     15  1.1      mrg  * 1. Redistributions of source code must retain the above copyright
     16  1.1      mrg  *    notice, this list of conditions and the following disclaimer.
     17  1.1      mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.1      mrg  *    notice, this list of conditions and the following disclaimer in the
     19  1.1      mrg  *    documentation and/or other materials provided with the distribution.
     20  1.1      mrg  * 3. All advertising materials mentioning features or use of this software
     21  1.1      mrg  *    must display the following acknowledgement:
     22  1.1      mrg  *      This product includes software developed by Charles D. Cranor and
     23  1.1      mrg  *      Washington University.
     24  1.1      mrg  * 4. The name of the author may not be used to endorse or promote products
     25  1.1      mrg  *    derived from this software without specific prior written permission.
     26  1.1      mrg  *
     27  1.1      mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     28  1.1      mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     29  1.1      mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30  1.1      mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     31  1.1      mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     32  1.1      mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     33  1.1      mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     34  1.1      mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     35  1.1      mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     36  1.1      mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     37  1.3      mrg  *
     38  1.3      mrg  * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
     39  1.1      mrg  */
     40  1.1      mrg 
     41  1.1      mrg /*
     42  1.1      mrg  * uvm_fault_i.h: fault inline functions
     43  1.1      mrg  */
     44  1.1      mrg 
     45  1.1      mrg /*
     46  1.1      mrg  * uvmfault_unlockmaps: unlock the maps
     47  1.1      mrg  */
     48  1.1      mrg 
     49  1.1      mrg static __inline void uvmfault_unlockmaps(ufi, write_locked)
     50  1.1      mrg 
     51  1.1      mrg struct uvm_faultinfo *ufi;
     52  1.1      mrg boolean_t write_locked;
     53  1.1      mrg 
     54  1.1      mrg {
     55  1.1      mrg   if (write_locked) {
     56  1.1      mrg     vm_map_unlock(ufi->map);
     57  1.1      mrg     if (ufi->parent_map) vm_map_unlock(ufi->parent_map);
     58  1.1      mrg   } else {
     59  1.1      mrg     vm_map_unlock_read(ufi->map);
     60  1.1      mrg     if (ufi->parent_map) vm_map_unlock_read(ufi->parent_map);
     61  1.1      mrg   }
     62  1.1      mrg }
     63  1.1      mrg 
     64  1.1      mrg /*
     65  1.1      mrg  * uvmfault_unlockall: unlock everything passed in.
     66  1.1      mrg  *
     67  1.1      mrg  * => maps must be read-locked (not write-locked).
     68  1.1      mrg  */
     69  1.1      mrg 
     70  1.1      mrg static __inline void uvmfault_unlockall(ufi, amap, uobj, anon)
     71  1.1      mrg 
     72  1.1      mrg struct uvm_faultinfo *ufi;
     73  1.1      mrg struct vm_amap *amap;
     74  1.1      mrg struct uvm_object *uobj;
     75  1.1      mrg struct vm_anon *anon;
     76  1.1      mrg 
     77  1.1      mrg {
     78  1.1      mrg   if (anon)
     79  1.1      mrg     simple_unlock(&anon->an_lock);
     80  1.1      mrg   if (uobj)
     81  1.1      mrg     simple_unlock(&uobj->vmobjlock);
     82  1.1      mrg   if (amap)
     83  1.1      mrg     simple_unlock(&amap->am_l);
     84  1.1      mrg   uvmfault_unlockmaps(ufi, FALSE);
     85  1.1      mrg }
     86  1.1      mrg 
     87  1.1      mrg /*
     88  1.1      mrg  * uvmfault_lookup: lookup a virtual address in a map
     89  1.1      mrg  *
     90  1.1      mrg  * => caller must provide a uvm_faultinfo structure with the (IN)
     91  1.1      mrg  *	params properly filled in
     92  1.1      mrg  * => we will lookup the map entry and fill in parent_map, etc, as we go
     93  1.1      mrg  * => if the lookup is a success we will return with the maps locked
     94  1.1      mrg  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
     95  1.1      mrg  *	get a read lock.
     96  1.1      mrg  * => currently we require sharemaps to have the same virtual addresses
     97  1.1      mrg  *	as the main maps they are attached to.   in other words, the share
     98  1.1      mrg  *	map starts at zero and goes to the map user address.   the main
     99  1.1      mrg  *	map references it by setting its offset to be the same as the
    100  1.1      mrg  *	starting virtual address.    if we ever wanted to have sharemaps
    101  1.1      mrg  *	have different virtual addresses than main maps we would calculate
    102  1.1      mrg  *	it like:
    103  1.1      mrg  *		share_va = (rvaddr - entry->start) + entry->offset
    104  1.1      mrg  *	[i.e. offset from start of map entry plus offset of mapping]
    105  1.1      mrg  *	since (entry->start == entry->offset), share_va must equal rvaddr.
    106  1.1      mrg  *	if we need to change this we should store share_va in rvaddr
    107  1.1      mrg  *	and move rvaddr to orig_rvaddr.
    108  1.1      mrg  */
    109  1.1      mrg 
    110  1.1      mrg static __inline boolean_t uvmfault_lookup(ufi, write_lock)
    111  1.1      mrg 
    112  1.1      mrg struct uvm_faultinfo *ufi;
    113  1.1      mrg boolean_t write_lock;
    114  1.1      mrg 
    115  1.1      mrg {
    116  1.1      mrg   vm_map_t tmpmap;
    117  1.1      mrg 
    118  1.1      mrg   /*
    119  1.1      mrg    * init ufi values for lookup.
    120  1.1      mrg    */
    121  1.1      mrg 
    122  1.1      mrg   ufi->map = ufi->orig_map;
    123  1.1      mrg   ufi->rvaddr = ufi->orig_rvaddr;
    124  1.1      mrg   ufi->parent_map = NULL;
    125  1.1      mrg   ufi->size = ufi->orig_size;
    126  1.1      mrg 
    127  1.1      mrg   /*
    128  1.1      mrg    * keep going down levels until we are done.   note that there can
    129  1.1      mrg    * only be two levels so we won't loop very long.
    130  1.1      mrg    */
    131  1.1      mrg 
    132  1.1      mrg   while (1) {
    133  1.1      mrg 
    134  1.1      mrg     /*
    135  1.1      mrg      * lock map
    136  1.1      mrg      */
    137  1.1      mrg     if (write_lock) {
    138  1.1      mrg       vm_map_lock(ufi->map);
    139  1.1      mrg     } else {
    140  1.1      mrg       vm_map_lock_read(ufi->map);
    141  1.1      mrg     }
    142  1.1      mrg 
    143  1.1      mrg     /*
    144  1.1      mrg      * lookup
    145  1.1      mrg      */
    146  1.1      mrg     if (!uvm_map_lookup_entry(ufi->map, ufi->rvaddr, &ufi->entry)) {
    147  1.1      mrg       uvmfault_unlockmaps(ufi, write_lock);
    148  1.1      mrg       return(FALSE);
    149  1.1      mrg     }
    150  1.1      mrg 
    151  1.1      mrg     /*
    152  1.1      mrg      * reduce size if necessary
    153  1.1      mrg      */
    154  1.1      mrg     if (ufi->entry->end - ufi->rvaddr < ufi->size)
    155  1.1      mrg       ufi->size = ufi->entry->end - ufi->rvaddr;
    156  1.1      mrg 
    157  1.1      mrg     /*
    158  1.1      mrg      * submap?    replace map with the submap and lookup again.
    159  1.1      mrg      * note: VAs in submaps must match VAs in main map.
    160  1.1      mrg      */
    161  1.1      mrg     if (UVM_ET_ISSUBMAP(ufi->entry)) {
    162  1.1      mrg       if (ufi->parent_map)
    163  1.1      mrg 	panic("uvmfault_lookup: submap inside a sharemap (illegal)");
    164  1.1      mrg       tmpmap = ufi->entry->object.sub_map;
    165  1.1      mrg       if (write_lock) {
    166  1.1      mrg 	vm_map_unlock(ufi->map);
    167  1.1      mrg       } else {
    168  1.1      mrg 	vm_map_unlock_read(ufi->map);
    169  1.1      mrg       }
    170  1.1      mrg       ufi->map = tmpmap;
    171  1.1      mrg       continue;
    172  1.1      mrg     }
    173  1.1      mrg 
    174  1.1      mrg     /*
    175  1.1      mrg      * share map?  drop down a level.   already taken care of submap case.
    176  1.1      mrg      */
    177  1.1      mrg     if (UVM_ET_ISMAP(ufi->entry)) {
    178  1.1      mrg       if (ufi->parent_map)
    179  1.1      mrg 	panic("uvmfault_lookup: sharemap inside a sharemap (illegal)");
    180  1.1      mrg       ufi->parent_map = ufi->map;
    181  1.1      mrg       ufi->parentv = ufi->parent_map->timestamp;
    182  1.1      mrg       ufi->map = ufi->entry->object.share_map;
    183  1.1      mrg #ifdef DIAGNOSTIC
    184  1.1      mrg       /* see note above */
    185  1.1      mrg       if (ufi->entry->offset != ufi->entry->start)
    186  1.1      mrg 	panic("uvmfault_lookup: sharemap VA != mainmap VA (not supported)");
    187  1.1      mrg #endif
    188  1.1      mrg       continue;
    189  1.1      mrg     }
    190  1.1      mrg 
    191  1.1      mrg     /*
    192  1.1      mrg      * got it!
    193  1.1      mrg      */
    194  1.1      mrg 
    195  1.1      mrg     ufi->mapv = ufi->map->timestamp;
    196  1.1      mrg     return(TRUE);
    197  1.1      mrg 
    198  1.1      mrg   }	/* while loop */
    199  1.1      mrg 
    200  1.1      mrg   /*NOTREACHED*/
    201  1.1      mrg }
    202  1.1      mrg 
    203  1.1      mrg /*
    204  1.1      mrg  * uvmfault_relock: attempt to relock the same version of the map
    205  1.1      mrg  *
    206  1.1      mrg  * => fault data structures should be unlocked before calling.
    207  1.1      mrg  * => if a success (TRUE) maps will be locked after call.
    208  1.1      mrg  */
    209  1.1      mrg 
    210  1.1      mrg static __inline boolean_t uvmfault_relock(ufi)
    211  1.1      mrg 
    212  1.1      mrg struct uvm_faultinfo *ufi;
    213  1.1      mrg 
    214  1.1      mrg {
    215  1.1      mrg   uvmexp.fltrelck++;
    216  1.1      mrg   /*
    217  1.1      mrg    * simply relock parent (if any) then map in order.   fail if version
    218  1.1      mrg    * mismatch (in which case nothing gets locked).
    219  1.1      mrg    */
    220  1.1      mrg 
    221  1.1      mrg   if (ufi->parent_map) {
    222  1.1      mrg     vm_map_lock_read(ufi->parent_map);
    223  1.1      mrg     if (ufi->parentv != ufi->parent_map->timestamp) {
    224  1.1      mrg       vm_map_unlock_read(ufi->parent_map);
    225  1.1      mrg       return(FALSE);
    226  1.1      mrg     }
    227  1.1      mrg   }
    228  1.1      mrg 
    229  1.1      mrg   vm_map_lock_read(ufi->map);
    230  1.1      mrg   if (ufi->mapv != ufi->map->timestamp) {
    231  1.1      mrg     if (ufi->parent_map)
    232  1.1      mrg       vm_map_unlock_read(ufi->parent_map);
    233  1.1      mrg     vm_map_unlock_read(ufi->map);
    234  1.1      mrg     return(FALSE);
    235  1.1      mrg   }
    236  1.1      mrg 
    237  1.1      mrg   uvmexp.fltrelckok++;
    238  1.1      mrg   return(TRUE);		/* got it! */
    239  1.1      mrg }
    240