uvm_fault_i.h revision 1.4 1 /* $NetBSD: uvm_fault_i.h,v 1.4 1998/02/10 02:34:35 perry Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
39 */
40
41 #ifndef _UVM_UVM_FAULT_I_H_
42 #define _UVM_UVM_FAULT_I_H_
43
44 /*
45 * uvm_fault_i.h: fault inline functions
46 */
47
48 /*
49 * uvmfault_unlockmaps: unlock the maps
50 */
51
52 static __inline void uvmfault_unlockmaps(ufi, write_locked)
53
54 struct uvm_faultinfo *ufi;
55 boolean_t write_locked;
56
57 {
58 if (write_locked) {
59 vm_map_unlock(ufi->map);
60 if (ufi->parent_map) vm_map_unlock(ufi->parent_map);
61 } else {
62 vm_map_unlock_read(ufi->map);
63 if (ufi->parent_map) vm_map_unlock_read(ufi->parent_map);
64 }
65 }
66
67 /*
68 * uvmfault_unlockall: unlock everything passed in.
69 *
70 * => maps must be read-locked (not write-locked).
71 */
72
73 static __inline void uvmfault_unlockall(ufi, amap, uobj, anon)
74
75 struct uvm_faultinfo *ufi;
76 struct vm_amap *amap;
77 struct uvm_object *uobj;
78 struct vm_anon *anon;
79
80 {
81 if (anon)
82 simple_unlock(&anon->an_lock);
83 if (uobj)
84 simple_unlock(&uobj->vmobjlock);
85 if (amap)
86 simple_unlock(&amap->am_l);
87 uvmfault_unlockmaps(ufi, FALSE);
88 }
89
90 /*
91 * uvmfault_lookup: lookup a virtual address in a map
92 *
93 * => caller must provide a uvm_faultinfo structure with the (IN)
94 * params properly filled in
95 * => we will lookup the map entry and fill in parent_map, etc, as we go
96 * => if the lookup is a success we will return with the maps locked
97 * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
98 * get a read lock.
99 * => currently we require sharemaps to have the same virtual addresses
100 * as the main maps they are attached to. in other words, the share
101 * map starts at zero and goes to the map user address. the main
102 * map references it by setting its offset to be the same as the
103 * starting virtual address. if we ever wanted to have sharemaps
104 * have different virtual addresses than main maps we would calculate
105 * it like:
106 * share_va = (rvaddr - entry->start) + entry->offset
107 * [i.e. offset from start of map entry plus offset of mapping]
108 * since (entry->start == entry->offset), share_va must equal rvaddr.
109 * if we need to change this we should store share_va in rvaddr
110 * and move rvaddr to orig_rvaddr.
111 */
112
113 static __inline boolean_t uvmfault_lookup(ufi, write_lock)
114
115 struct uvm_faultinfo *ufi;
116 boolean_t write_lock;
117
118 {
119 vm_map_t tmpmap;
120
121 /*
122 * init ufi values for lookup.
123 */
124
125 ufi->map = ufi->orig_map;
126 ufi->rvaddr = ufi->orig_rvaddr;
127 ufi->parent_map = NULL;
128 ufi->size = ufi->orig_size;
129
130 /*
131 * keep going down levels until we are done. note that there can
132 * only be two levels so we won't loop very long.
133 */
134
135 while (1) {
136
137 /*
138 * lock map
139 */
140 if (write_lock) {
141 vm_map_lock(ufi->map);
142 } else {
143 vm_map_lock_read(ufi->map);
144 }
145
146 /*
147 * lookup
148 */
149 if (!uvm_map_lookup_entry(ufi->map, ufi->rvaddr, &ufi->entry)) {
150 uvmfault_unlockmaps(ufi, write_lock);
151 return(FALSE);
152 }
153
154 /*
155 * reduce size if necessary
156 */
157 if (ufi->entry->end - ufi->rvaddr < ufi->size)
158 ufi->size = ufi->entry->end - ufi->rvaddr;
159
160 /*
161 * submap? replace map with the submap and lookup again.
162 * note: VAs in submaps must match VAs in main map.
163 */
164 if (UVM_ET_ISSUBMAP(ufi->entry)) {
165 if (ufi->parent_map)
166 panic("uvmfault_lookup: submap inside a sharemap (illegal)");
167 tmpmap = ufi->entry->object.sub_map;
168 if (write_lock) {
169 vm_map_unlock(ufi->map);
170 } else {
171 vm_map_unlock_read(ufi->map);
172 }
173 ufi->map = tmpmap;
174 continue;
175 }
176
177 /*
178 * share map? drop down a level. already taken care of submap case.
179 */
180 if (UVM_ET_ISMAP(ufi->entry)) {
181 if (ufi->parent_map)
182 panic("uvmfault_lookup: sharemap inside a sharemap (illegal)");
183 ufi->parent_map = ufi->map;
184 ufi->parentv = ufi->parent_map->timestamp;
185 ufi->map = ufi->entry->object.share_map;
186 #ifdef DIAGNOSTIC
187 /* see note above */
188 if (ufi->entry->offset != ufi->entry->start)
189 panic("uvmfault_lookup: sharemap VA != mainmap VA (not supported)");
190 #endif
191 continue;
192 }
193
194 /*
195 * got it!
196 */
197
198 ufi->mapv = ufi->map->timestamp;
199 return(TRUE);
200
201 } /* while loop */
202
203 /*NOTREACHED*/
204 }
205
206 /*
207 * uvmfault_relock: attempt to relock the same version of the map
208 *
209 * => fault data structures should be unlocked before calling.
210 * => if a success (TRUE) maps will be locked after call.
211 */
212
213 static __inline boolean_t uvmfault_relock(ufi)
214
215 struct uvm_faultinfo *ufi;
216
217 {
218 uvmexp.fltrelck++;
219 /*
220 * simply relock parent (if any) then map in order. fail if version
221 * mismatch (in which case nothing gets locked).
222 */
223
224 if (ufi->parent_map) {
225 vm_map_lock_read(ufi->parent_map);
226 if (ufi->parentv != ufi->parent_map->timestamp) {
227 vm_map_unlock_read(ufi->parent_map);
228 return(FALSE);
229 }
230 }
231
232 vm_map_lock_read(ufi->map);
233 if (ufi->mapv != ufi->map->timestamp) {
234 if (ufi->parent_map)
235 vm_map_unlock_read(ufi->parent_map);
236 vm_map_unlock_read(ufi->map);
237 return(FALSE);
238 }
239
240 uvmexp.fltrelckok++;
241 return(TRUE); /* got it! */
242 }
243
244 #endif /* _UVM_UVM_FAULT_I_H_ */
245