uvm_fault_i.h revision 1.5 1 1.5 mrg /* $NetBSD: uvm_fault_i.h,v 1.5 1998/03/09 00:58:56 mrg Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg *
9 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 1.1 mrg * All rights reserved.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor and
23 1.1 mrg * Washington University.
24 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
25 1.1 mrg * derived from this software without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.3 mrg *
38 1.3 mrg * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
39 1.1 mrg */
40 1.1 mrg
41 1.4 perry #ifndef _UVM_UVM_FAULT_I_H_
42 1.4 perry #define _UVM_UVM_FAULT_I_H_
43 1.4 perry
44 1.1 mrg /*
45 1.1 mrg * uvm_fault_i.h: fault inline functions
46 1.1 mrg */
47 1.1 mrg
48 1.1 mrg /*
49 1.1 mrg * uvmfault_unlockmaps: unlock the maps
50 1.1 mrg */
51 1.1 mrg
52 1.5 mrg static __inline void
53 1.5 mrg uvmfault_unlockmaps(ufi, write_locked)
54 1.5 mrg struct uvm_faultinfo *ufi;
55 1.5 mrg boolean_t write_locked;
56 1.5 mrg {
57 1.1 mrg
58 1.5 mrg if (write_locked) {
59 1.5 mrg vm_map_unlock(ufi->map);
60 1.5 mrg if (ufi->parent_map) vm_map_unlock(ufi->parent_map);
61 1.5 mrg } else {
62 1.5 mrg vm_map_unlock_read(ufi->map);
63 1.5 mrg if (ufi->parent_map) vm_map_unlock_read(ufi->parent_map);
64 1.5 mrg }
65 1.1 mrg }
66 1.1 mrg
67 1.1 mrg /*
68 1.1 mrg * uvmfault_unlockall: unlock everything passed in.
69 1.1 mrg *
70 1.1 mrg * => maps must be read-locked (not write-locked).
71 1.1 mrg */
72 1.1 mrg
73 1.5 mrg static __inline void
74 1.5 mrg uvmfault_unlockall(ufi, amap, uobj, anon)
75 1.5 mrg struct uvm_faultinfo *ufi;
76 1.5 mrg struct vm_amap *amap;
77 1.5 mrg struct uvm_object *uobj;
78 1.5 mrg struct vm_anon *anon;
79 1.5 mrg {
80 1.1 mrg
81 1.5 mrg if (anon)
82 1.5 mrg simple_unlock(&anon->an_lock);
83 1.5 mrg if (uobj)
84 1.5 mrg simple_unlock(&uobj->vmobjlock);
85 1.5 mrg if (amap)
86 1.5 mrg simple_unlock(&amap->am_l);
87 1.5 mrg uvmfault_unlockmaps(ufi, FALSE);
88 1.1 mrg }
89 1.1 mrg
90 1.1 mrg /*
91 1.1 mrg * uvmfault_lookup: lookup a virtual address in a map
92 1.1 mrg *
93 1.1 mrg * => caller must provide a uvm_faultinfo structure with the (IN)
94 1.1 mrg * params properly filled in
95 1.1 mrg * => we will lookup the map entry and fill in parent_map, etc, as we go
96 1.1 mrg * => if the lookup is a success we will return with the maps locked
97 1.1 mrg * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
98 1.1 mrg * get a read lock.
99 1.1 mrg * => currently we require sharemaps to have the same virtual addresses
100 1.1 mrg * as the main maps they are attached to. in other words, the share
101 1.1 mrg * map starts at zero and goes to the map user address. the main
102 1.1 mrg * map references it by setting its offset to be the same as the
103 1.1 mrg * starting virtual address. if we ever wanted to have sharemaps
104 1.1 mrg * have different virtual addresses than main maps we would calculate
105 1.1 mrg * it like:
106 1.1 mrg * share_va = (rvaddr - entry->start) + entry->offset
107 1.1 mrg * [i.e. offset from start of map entry plus offset of mapping]
108 1.1 mrg * since (entry->start == entry->offset), share_va must equal rvaddr.
109 1.1 mrg * if we need to change this we should store share_va in rvaddr
110 1.1 mrg * and move rvaddr to orig_rvaddr.
111 1.1 mrg */
112 1.1 mrg
113 1.5 mrg static __inline boolean_t
114 1.5 mrg uvmfault_lookup(ufi, write_lock)
115 1.5 mrg struct uvm_faultinfo *ufi;
116 1.5 mrg boolean_t write_lock;
117 1.1 mrg {
118 1.5 mrg vm_map_t tmpmap;
119 1.1 mrg
120 1.5 mrg /*
121 1.5 mrg * init ufi values for lookup.
122 1.5 mrg */
123 1.5 mrg
124 1.5 mrg ufi->map = ufi->orig_map;
125 1.5 mrg ufi->rvaddr = ufi->orig_rvaddr;
126 1.5 mrg ufi->parent_map = NULL;
127 1.5 mrg ufi->size = ufi->orig_size;
128 1.5 mrg
129 1.5 mrg /*
130 1.5 mrg * keep going down levels until we are done. note that there can
131 1.5 mrg * only be two levels so we won't loop very long.
132 1.5 mrg */
133 1.5 mrg
134 1.5 mrg while (1) {
135 1.5 mrg
136 1.5 mrg /*
137 1.5 mrg * lock map
138 1.5 mrg */
139 1.5 mrg if (write_lock) {
140 1.5 mrg vm_map_lock(ufi->map);
141 1.5 mrg } else {
142 1.5 mrg vm_map_lock_read(ufi->map);
143 1.5 mrg }
144 1.5 mrg
145 1.5 mrg /*
146 1.5 mrg * lookup
147 1.5 mrg */
148 1.5 mrg if (!uvm_map_lookup_entry(ufi->map, ufi->rvaddr, &ufi->entry)) {
149 1.5 mrg uvmfault_unlockmaps(ufi, write_lock);
150 1.5 mrg return(FALSE);
151 1.5 mrg }
152 1.5 mrg
153 1.5 mrg /*
154 1.5 mrg * reduce size if necessary
155 1.5 mrg */
156 1.5 mrg if (ufi->entry->end - ufi->rvaddr < ufi->size)
157 1.5 mrg ufi->size = ufi->entry->end - ufi->rvaddr;
158 1.5 mrg
159 1.5 mrg /*
160 1.5 mrg * submap? replace map with the submap and lookup again.
161 1.5 mrg * note: VAs in submaps must match VAs in main map.
162 1.5 mrg */
163 1.5 mrg if (UVM_ET_ISSUBMAP(ufi->entry)) {
164 1.5 mrg if (ufi->parent_map)
165 1.5 mrg panic("uvmfault_lookup: submap inside a "
166 1.5 mrg "sharemap (illegal)");
167 1.5 mrg tmpmap = ufi->entry->object.sub_map;
168 1.5 mrg if (write_lock) {
169 1.5 mrg vm_map_unlock(ufi->map);
170 1.5 mrg } else {
171 1.5 mrg vm_map_unlock_read(ufi->map);
172 1.5 mrg }
173 1.5 mrg ufi->map = tmpmap;
174 1.5 mrg continue;
175 1.5 mrg }
176 1.5 mrg
177 1.5 mrg /*
178 1.5 mrg * share map? drop down a level. already taken care of
179 1.5 mrg * submap case.
180 1.5 mrg */
181 1.5 mrg if (UVM_ET_ISMAP(ufi->entry)) {
182 1.5 mrg if (ufi->parent_map)
183 1.5 mrg panic("uvmfault_lookup: sharemap inside a "
184 1.5 mrg "sharemap (illegal)");
185 1.5 mrg ufi->parent_map = ufi->map;
186 1.5 mrg ufi->parentv = ufi->parent_map->timestamp;
187 1.5 mrg ufi->map = ufi->entry->object.share_map;
188 1.1 mrg #ifdef DIAGNOSTIC
189 1.5 mrg /* see note above */
190 1.5 mrg if (ufi->entry->offset != ufi->entry->start)
191 1.5 mrg panic("uvmfault_lookup: sharemap VA != "
192 1.5 mrg "mainmap VA (not supported)");
193 1.1 mrg #endif
194 1.5 mrg continue;
195 1.5 mrg }
196 1.5 mrg
197 1.5 mrg /*
198 1.5 mrg * got it!
199 1.5 mrg */
200 1.1 mrg
201 1.5 mrg ufi->mapv = ufi->map->timestamp;
202 1.5 mrg return(TRUE);
203 1.1 mrg
204 1.5 mrg } /* while loop */
205 1.1 mrg
206 1.5 mrg /*NOTREACHED*/
207 1.1 mrg }
208 1.1 mrg
209 1.1 mrg /*
210 1.1 mrg * uvmfault_relock: attempt to relock the same version of the map
211 1.1 mrg *
212 1.1 mrg * => fault data structures should be unlocked before calling.
213 1.1 mrg * => if a success (TRUE) maps will be locked after call.
214 1.1 mrg */
215 1.1 mrg
216 1.5 mrg static __inline boolean_t
217 1.5 mrg uvmfault_relock(ufi)
218 1.5 mrg struct uvm_faultinfo *ufi;
219 1.5 mrg {
220 1.1 mrg
221 1.5 mrg uvmexp.fltrelck++;
222 1.5 mrg /*
223 1.5 mrg * simply relock parent (if any) then map in order. fail if version
224 1.5 mrg * mismatch (in which case nothing gets locked).
225 1.5 mrg */
226 1.5 mrg
227 1.5 mrg if (ufi->parent_map) {
228 1.5 mrg vm_map_lock_read(ufi->parent_map);
229 1.5 mrg if (ufi->parentv != ufi->parent_map->timestamp) {
230 1.5 mrg vm_map_unlock_read(ufi->parent_map);
231 1.5 mrg return(FALSE);
232 1.5 mrg }
233 1.5 mrg }
234 1.5 mrg
235 1.5 mrg vm_map_lock_read(ufi->map);
236 1.5 mrg if (ufi->mapv != ufi->map->timestamp) {
237 1.5 mrg if (ufi->parent_map)
238 1.5 mrg vm_map_unlock_read(ufi->parent_map);
239 1.5 mrg vm_map_unlock_read(ufi->map);
240 1.5 mrg return(FALSE);
241 1.5 mrg }
242 1.1 mrg
243 1.5 mrg uvmexp.fltrelckok++;
244 1.5 mrg return(TRUE); /* got it! */
245 1.1 mrg }
246 1.4 perry
247 1.4 perry #endif /* _UVM_UVM_FAULT_I_H_ */
248