uvm_fault_i.h revision 1.1 1 1.1 mrg /* $Id: uvm_fault_i.h,v 1.1 1998/02/05 06:25:10 mrg Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg *
9 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 1.1 mrg * All rights reserved.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor and
23 1.1 mrg * Washington University.
24 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
25 1.1 mrg * derived from this software without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.1 mrg */
38 1.1 mrg
39 1.1 mrg /*
40 1.1 mrg * uvm_fault_i.h: fault inline functions
41 1.1 mrg */
42 1.1 mrg
43 1.1 mrg /*
44 1.1 mrg * uvmfault_unlockmaps: unlock the maps
45 1.1 mrg */
46 1.1 mrg
47 1.1 mrg static __inline void uvmfault_unlockmaps(ufi, write_locked)
48 1.1 mrg
49 1.1 mrg struct uvm_faultinfo *ufi;
50 1.1 mrg boolean_t write_locked;
51 1.1 mrg
52 1.1 mrg {
53 1.1 mrg if (write_locked) {
54 1.1 mrg vm_map_unlock(ufi->map);
55 1.1 mrg if (ufi->parent_map) vm_map_unlock(ufi->parent_map);
56 1.1 mrg } else {
57 1.1 mrg vm_map_unlock_read(ufi->map);
58 1.1 mrg if (ufi->parent_map) vm_map_unlock_read(ufi->parent_map);
59 1.1 mrg }
60 1.1 mrg }
61 1.1 mrg
62 1.1 mrg /*
63 1.1 mrg * uvmfault_unlockall: unlock everything passed in.
64 1.1 mrg *
65 1.1 mrg * => maps must be read-locked (not write-locked).
66 1.1 mrg */
67 1.1 mrg
68 1.1 mrg static __inline void uvmfault_unlockall(ufi, amap, uobj, anon)
69 1.1 mrg
70 1.1 mrg struct uvm_faultinfo *ufi;
71 1.1 mrg struct vm_amap *amap;
72 1.1 mrg struct uvm_object *uobj;
73 1.1 mrg struct vm_anon *anon;
74 1.1 mrg
75 1.1 mrg {
76 1.1 mrg if (anon)
77 1.1 mrg simple_unlock(&anon->an_lock);
78 1.1 mrg if (uobj)
79 1.1 mrg simple_unlock(&uobj->vmobjlock);
80 1.1 mrg if (amap)
81 1.1 mrg simple_unlock(&amap->am_l);
82 1.1 mrg uvmfault_unlockmaps(ufi, FALSE);
83 1.1 mrg }
84 1.1 mrg
85 1.1 mrg /*
86 1.1 mrg * uvmfault_lookup: lookup a virtual address in a map
87 1.1 mrg *
88 1.1 mrg * => caller must provide a uvm_faultinfo structure with the (IN)
89 1.1 mrg * params properly filled in
90 1.1 mrg * => we will lookup the map entry and fill in parent_map, etc, as we go
91 1.1 mrg * => if the lookup is a success we will return with the maps locked
92 1.1 mrg * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
93 1.1 mrg * get a read lock.
94 1.1 mrg * => currently we require sharemaps to have the same virtual addresses
95 1.1 mrg * as the main maps they are attached to. in other words, the share
96 1.1 mrg * map starts at zero and goes to the map user address. the main
97 1.1 mrg * map references it by setting its offset to be the same as the
98 1.1 mrg * starting virtual address. if we ever wanted to have sharemaps
99 1.1 mrg * have different virtual addresses than main maps we would calculate
100 1.1 mrg * it like:
101 1.1 mrg * share_va = (rvaddr - entry->start) + entry->offset
102 1.1 mrg * [i.e. offset from start of map entry plus offset of mapping]
103 1.1 mrg * since (entry->start == entry->offset), share_va must equal rvaddr.
104 1.1 mrg * if we need to change this we should store share_va in rvaddr
105 1.1 mrg * and move rvaddr to orig_rvaddr.
106 1.1 mrg */
107 1.1 mrg
108 1.1 mrg static __inline boolean_t uvmfault_lookup(ufi, write_lock)
109 1.1 mrg
110 1.1 mrg struct uvm_faultinfo *ufi;
111 1.1 mrg boolean_t write_lock;
112 1.1 mrg
113 1.1 mrg {
114 1.1 mrg vm_map_t tmpmap;
115 1.1 mrg
116 1.1 mrg /*
117 1.1 mrg * init ufi values for lookup.
118 1.1 mrg */
119 1.1 mrg
120 1.1 mrg ufi->map = ufi->orig_map;
121 1.1 mrg ufi->rvaddr = ufi->orig_rvaddr;
122 1.1 mrg ufi->parent_map = NULL;
123 1.1 mrg ufi->size = ufi->orig_size;
124 1.1 mrg
125 1.1 mrg /*
126 1.1 mrg * keep going down levels until we are done. note that there can
127 1.1 mrg * only be two levels so we won't loop very long.
128 1.1 mrg */
129 1.1 mrg
130 1.1 mrg while (1) {
131 1.1 mrg
132 1.1 mrg /*
133 1.1 mrg * lock map
134 1.1 mrg */
135 1.1 mrg if (write_lock) {
136 1.1 mrg vm_map_lock(ufi->map);
137 1.1 mrg } else {
138 1.1 mrg vm_map_lock_read(ufi->map);
139 1.1 mrg }
140 1.1 mrg
141 1.1 mrg /*
142 1.1 mrg * lookup
143 1.1 mrg */
144 1.1 mrg if (!uvm_map_lookup_entry(ufi->map, ufi->rvaddr, &ufi->entry)) {
145 1.1 mrg uvmfault_unlockmaps(ufi, write_lock);
146 1.1 mrg return(FALSE);
147 1.1 mrg }
148 1.1 mrg
149 1.1 mrg /*
150 1.1 mrg * reduce size if necessary
151 1.1 mrg */
152 1.1 mrg if (ufi->entry->end - ufi->rvaddr < ufi->size)
153 1.1 mrg ufi->size = ufi->entry->end - ufi->rvaddr;
154 1.1 mrg
155 1.1 mrg /*
156 1.1 mrg * submap? replace map with the submap and lookup again.
157 1.1 mrg * note: VAs in submaps must match VAs in main map.
158 1.1 mrg */
159 1.1 mrg if (UVM_ET_ISSUBMAP(ufi->entry)) {
160 1.1 mrg if (ufi->parent_map)
161 1.1 mrg panic("uvmfault_lookup: submap inside a sharemap (illegal)");
162 1.1 mrg tmpmap = ufi->entry->object.sub_map;
163 1.1 mrg if (write_lock) {
164 1.1 mrg vm_map_unlock(ufi->map);
165 1.1 mrg } else {
166 1.1 mrg vm_map_unlock_read(ufi->map);
167 1.1 mrg }
168 1.1 mrg ufi->map = tmpmap;
169 1.1 mrg continue;
170 1.1 mrg }
171 1.1 mrg
172 1.1 mrg /*
173 1.1 mrg * share map? drop down a level. already taken care of submap case.
174 1.1 mrg */
175 1.1 mrg if (UVM_ET_ISMAP(ufi->entry)) {
176 1.1 mrg if (ufi->parent_map)
177 1.1 mrg panic("uvmfault_lookup: sharemap inside a sharemap (illegal)");
178 1.1 mrg ufi->parent_map = ufi->map;
179 1.1 mrg ufi->parentv = ufi->parent_map->timestamp;
180 1.1 mrg ufi->map = ufi->entry->object.share_map;
181 1.1 mrg #ifdef DIAGNOSTIC
182 1.1 mrg /* see note above */
183 1.1 mrg if (ufi->entry->offset != ufi->entry->start)
184 1.1 mrg panic("uvmfault_lookup: sharemap VA != mainmap VA (not supported)");
185 1.1 mrg #endif
186 1.1 mrg continue;
187 1.1 mrg }
188 1.1 mrg
189 1.1 mrg /*
190 1.1 mrg * got it!
191 1.1 mrg */
192 1.1 mrg
193 1.1 mrg ufi->mapv = ufi->map->timestamp;
194 1.1 mrg return(TRUE);
195 1.1 mrg
196 1.1 mrg } /* while loop */
197 1.1 mrg
198 1.1 mrg /*NOTREACHED*/
199 1.1 mrg }
200 1.1 mrg
201 1.1 mrg /*
202 1.1 mrg * uvmfault_relock: attempt to relock the same version of the map
203 1.1 mrg *
204 1.1 mrg * => fault data structures should be unlocked before calling.
205 1.1 mrg * => if a success (TRUE) maps will be locked after call.
206 1.1 mrg */
207 1.1 mrg
208 1.1 mrg static __inline boolean_t uvmfault_relock(ufi)
209 1.1 mrg
210 1.1 mrg struct uvm_faultinfo *ufi;
211 1.1 mrg
212 1.1 mrg {
213 1.1 mrg uvmexp.fltrelck++;
214 1.1 mrg /*
215 1.1 mrg * simply relock parent (if any) then map in order. fail if version
216 1.1 mrg * mismatch (in which case nothing gets locked).
217 1.1 mrg */
218 1.1 mrg
219 1.1 mrg if (ufi->parent_map) {
220 1.1 mrg vm_map_lock_read(ufi->parent_map);
221 1.1 mrg if (ufi->parentv != ufi->parent_map->timestamp) {
222 1.1 mrg vm_map_unlock_read(ufi->parent_map);
223 1.1 mrg return(FALSE);
224 1.1 mrg }
225 1.1 mrg }
226 1.1 mrg
227 1.1 mrg vm_map_lock_read(ufi->map);
228 1.1 mrg if (ufi->mapv != ufi->map->timestamp) {
229 1.1 mrg if (ufi->parent_map)
230 1.1 mrg vm_map_unlock_read(ufi->parent_map);
231 1.1 mrg vm_map_unlock_read(ufi->map);
232 1.1 mrg return(FALSE);
233 1.1 mrg }
234 1.1 mrg
235 1.1 mrg uvmexp.fltrelckok++;
236 1.1 mrg return(TRUE); /* got it! */
237 1.1 mrg }
238