uvm_fault_i.h revision 1.13 1 /* $NetBSD: uvm_fault_i.h,v 1.13 2001/06/02 18:09:26 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
35 */
36
37 #ifndef _UVM_UVM_FAULT_I_H_
38 #define _UVM_UVM_FAULT_I_H_
39
40 /*
41 * uvm_fault_i.h: fault inline functions
42 */
43 static boolean_t uvmfault_check_intrsafe __P((struct uvm_faultinfo *));
44 static boolean_t uvmfault_lookup __P((struct uvm_faultinfo *, boolean_t));
45 static boolean_t uvmfault_relock __P((struct uvm_faultinfo *));
46 static void uvmfault_unlockall __P((struct uvm_faultinfo *, struct vm_amap *,
47 struct uvm_object *, struct vm_anon *));
48 static void uvmfault_unlockmaps __P((struct uvm_faultinfo *, boolean_t));
49
50 /*
51 * uvmfault_unlockmaps: unlock the maps
52 */
53
54 static __inline void
55 uvmfault_unlockmaps(ufi, write_locked)
56 struct uvm_faultinfo *ufi;
57 boolean_t write_locked;
58 {
59 /*
60 * ufi can be NULL when this isn't really a fault,
61 * but merely paging in anon data.
62 */
63
64 if (ufi == NULL) {
65 return;
66 }
67
68 if (write_locked) {
69 vm_map_unlock(ufi->map);
70 } else {
71 vm_map_unlock_read(ufi->map);
72 }
73 }
74
75 /*
76 * uvmfault_unlockall: unlock everything passed in.
77 *
78 * => maps must be read-locked (not write-locked).
79 */
80
81 static __inline void
82 uvmfault_unlockall(ufi, amap, uobj, anon)
83 struct uvm_faultinfo *ufi;
84 struct vm_amap *amap;
85 struct uvm_object *uobj;
86 struct vm_anon *anon;
87 {
88
89 if (anon)
90 simple_unlock(&anon->an_lock);
91 if (uobj)
92 simple_unlock(&uobj->vmobjlock);
93 if (amap)
94 amap_unlock(amap);
95 uvmfault_unlockmaps(ufi, FALSE);
96 }
97
98 /*
99 * uvmfault_check_intrsafe: check for a virtual address managed by
100 * an interrupt-safe map.
101 *
102 * => caller must provide a uvm_faultinfo structure with the IN
103 * params properly filled in
104 * => if we find an intersafe VA, we fill in ufi->map, and return TRUE
105 */
106
107 static __inline boolean_t
108 uvmfault_check_intrsafe(ufi)
109 struct uvm_faultinfo *ufi;
110 {
111 struct vm_map_intrsafe *vmi;
112 int s;
113
114 s = vmi_list_lock();
115 for (vmi = LIST_FIRST(&vmi_list); vmi != NULL;
116 vmi = LIST_NEXT(vmi, vmi_list)) {
117 if (ufi->orig_rvaddr >= vm_map_min(&vmi->vmi_map) &&
118 ufi->orig_rvaddr < vm_map_max(&vmi->vmi_map))
119 break;
120 }
121 vmi_list_unlock(s);
122
123 if (vmi != NULL) {
124 ufi->map = &vmi->vmi_map;
125 return (TRUE);
126 }
127
128 return (FALSE);
129 }
130
131 /*
132 * uvmfault_lookup: lookup a virtual address in a map
133 *
134 * => caller must provide a uvm_faultinfo structure with the IN
135 * params properly filled in
136 * => we will lookup the map entry (handling submaps) as we go
137 * => if the lookup is a success we will return with the maps locked
138 * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
139 * get a read lock.
140 * => note that submaps can only appear in the kernel and they are
141 * required to use the same virtual addresses as the map they
142 * are referenced by (thus address translation between the main
143 * map and the submap is unnecessary).
144 */
145
146 static __inline boolean_t
147 uvmfault_lookup(ufi, write_lock)
148 struct uvm_faultinfo *ufi;
149 boolean_t write_lock;
150 {
151 struct vm_map *tmpmap;
152
153 /*
154 * init ufi values for lookup.
155 */
156
157 ufi->map = ufi->orig_map;
158 ufi->size = ufi->orig_size;
159
160 /*
161 * keep going down levels until we are done. note that there can
162 * only be two levels so we won't loop very long.
163 */
164
165 while (1) {
166
167 /*
168 * lock map
169 */
170 if (write_lock) {
171 vm_map_lock(ufi->map);
172 } else {
173 vm_map_lock_read(ufi->map);
174 }
175
176 /*
177 * lookup
178 */
179 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
180 &ufi->entry)) {
181 uvmfault_unlockmaps(ufi, write_lock);
182 return(FALSE);
183 }
184
185 /*
186 * reduce size if necessary
187 */
188 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
189 ufi->size = ufi->entry->end - ufi->orig_rvaddr;
190
191 /*
192 * submap? replace map with the submap and lookup again.
193 * note: VAs in submaps must match VAs in main map.
194 */
195 if (UVM_ET_ISSUBMAP(ufi->entry)) {
196 tmpmap = ufi->entry->object.sub_map;
197 if (write_lock) {
198 vm_map_unlock(ufi->map);
199 } else {
200 vm_map_unlock_read(ufi->map);
201 }
202 ufi->map = tmpmap;
203 continue;
204 }
205
206 /*
207 * got it!
208 */
209
210 ufi->mapv = ufi->map->timestamp;
211 return(TRUE);
212
213 } /* while loop */
214
215 /*NOTREACHED*/
216 }
217
218 /*
219 * uvmfault_relock: attempt to relock the same version of the map
220 *
221 * => fault data structures should be unlocked before calling.
222 * => if a success (TRUE) maps will be locked after call.
223 */
224
225 static __inline boolean_t
226 uvmfault_relock(ufi)
227 struct uvm_faultinfo *ufi;
228 {
229 /*
230 * ufi can be NULL when this isn't really a fault,
231 * but merely paging in anon data.
232 */
233
234 if (ufi == NULL) {
235 return TRUE;
236 }
237
238 uvmexp.fltrelck++;
239
240 /*
241 * relock map. fail if version mismatch (in which case nothing
242 * gets locked).
243 */
244
245 vm_map_lock_read(ufi->map);
246 if (ufi->mapv != ufi->map->timestamp) {
247 vm_map_unlock_read(ufi->map);
248 return(FALSE);
249 }
250
251 uvmexp.fltrelckok++;
252 return(TRUE); /* got it! */
253 }
254
255 #endif /* _UVM_UVM_FAULT_I_H_ */
256