uvm_fault_i.h revision 1.4 1 1.4 perry /* $NetBSD: uvm_fault_i.h,v 1.4 1998/02/10 02:34:35 perry Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg *
9 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 1.1 mrg * All rights reserved.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor and
23 1.1 mrg * Washington University.
24 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
25 1.1 mrg * derived from this software without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.3 mrg *
38 1.3 mrg * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
39 1.1 mrg */
40 1.1 mrg
41 1.4 perry #ifndef _UVM_UVM_FAULT_I_H_
42 1.4 perry #define _UVM_UVM_FAULT_I_H_
43 1.4 perry
44 1.1 mrg /*
45 1.1 mrg * uvm_fault_i.h: fault inline functions
46 1.1 mrg */
47 1.1 mrg
48 1.1 mrg /*
49 1.1 mrg * uvmfault_unlockmaps: unlock the maps
50 1.1 mrg */
51 1.1 mrg
52 1.1 mrg static __inline void uvmfault_unlockmaps(ufi, write_locked)
53 1.1 mrg
54 1.1 mrg struct uvm_faultinfo *ufi;
55 1.1 mrg boolean_t write_locked;
56 1.1 mrg
57 1.1 mrg {
58 1.1 mrg if (write_locked) {
59 1.1 mrg vm_map_unlock(ufi->map);
60 1.1 mrg if (ufi->parent_map) vm_map_unlock(ufi->parent_map);
61 1.1 mrg } else {
62 1.1 mrg vm_map_unlock_read(ufi->map);
63 1.1 mrg if (ufi->parent_map) vm_map_unlock_read(ufi->parent_map);
64 1.1 mrg }
65 1.1 mrg }
66 1.1 mrg
67 1.1 mrg /*
68 1.1 mrg * uvmfault_unlockall: unlock everything passed in.
69 1.1 mrg *
70 1.1 mrg * => maps must be read-locked (not write-locked).
71 1.1 mrg */
72 1.1 mrg
73 1.1 mrg static __inline void uvmfault_unlockall(ufi, amap, uobj, anon)
74 1.1 mrg
75 1.1 mrg struct uvm_faultinfo *ufi;
76 1.1 mrg struct vm_amap *amap;
77 1.1 mrg struct uvm_object *uobj;
78 1.1 mrg struct vm_anon *anon;
79 1.1 mrg
80 1.1 mrg {
81 1.1 mrg if (anon)
82 1.1 mrg simple_unlock(&anon->an_lock);
83 1.1 mrg if (uobj)
84 1.1 mrg simple_unlock(&uobj->vmobjlock);
85 1.1 mrg if (amap)
86 1.1 mrg simple_unlock(&amap->am_l);
87 1.1 mrg uvmfault_unlockmaps(ufi, FALSE);
88 1.1 mrg }
89 1.1 mrg
90 1.1 mrg /*
91 1.1 mrg * uvmfault_lookup: lookup a virtual address in a map
92 1.1 mrg *
93 1.1 mrg * => caller must provide a uvm_faultinfo structure with the (IN)
94 1.1 mrg * params properly filled in
95 1.1 mrg * => we will lookup the map entry and fill in parent_map, etc, as we go
96 1.1 mrg * => if the lookup is a success we will return with the maps locked
97 1.1 mrg * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
98 1.1 mrg * get a read lock.
99 1.1 mrg * => currently we require sharemaps to have the same virtual addresses
100 1.1 mrg * as the main maps they are attached to. in other words, the share
101 1.1 mrg * map starts at zero and goes to the map user address. the main
102 1.1 mrg * map references it by setting its offset to be the same as the
103 1.1 mrg * starting virtual address. if we ever wanted to have sharemaps
104 1.1 mrg * have different virtual addresses than main maps we would calculate
105 1.1 mrg * it like:
106 1.1 mrg * share_va = (rvaddr - entry->start) + entry->offset
107 1.1 mrg * [i.e. offset from start of map entry plus offset of mapping]
108 1.1 mrg * since (entry->start == entry->offset), share_va must equal rvaddr.
109 1.1 mrg * if we need to change this we should store share_va in rvaddr
110 1.1 mrg * and move rvaddr to orig_rvaddr.
111 1.1 mrg */
112 1.1 mrg
113 1.1 mrg static __inline boolean_t uvmfault_lookup(ufi, write_lock)
114 1.1 mrg
115 1.1 mrg struct uvm_faultinfo *ufi;
116 1.1 mrg boolean_t write_lock;
117 1.1 mrg
118 1.1 mrg {
119 1.1 mrg vm_map_t tmpmap;
120 1.1 mrg
121 1.1 mrg /*
122 1.1 mrg * init ufi values for lookup.
123 1.1 mrg */
124 1.1 mrg
125 1.1 mrg ufi->map = ufi->orig_map;
126 1.1 mrg ufi->rvaddr = ufi->orig_rvaddr;
127 1.1 mrg ufi->parent_map = NULL;
128 1.1 mrg ufi->size = ufi->orig_size;
129 1.1 mrg
130 1.1 mrg /*
131 1.1 mrg * keep going down levels until we are done. note that there can
132 1.1 mrg * only be two levels so we won't loop very long.
133 1.1 mrg */
134 1.1 mrg
135 1.1 mrg while (1) {
136 1.1 mrg
137 1.1 mrg /*
138 1.1 mrg * lock map
139 1.1 mrg */
140 1.1 mrg if (write_lock) {
141 1.1 mrg vm_map_lock(ufi->map);
142 1.1 mrg } else {
143 1.1 mrg vm_map_lock_read(ufi->map);
144 1.1 mrg }
145 1.1 mrg
146 1.1 mrg /*
147 1.1 mrg * lookup
148 1.1 mrg */
149 1.1 mrg if (!uvm_map_lookup_entry(ufi->map, ufi->rvaddr, &ufi->entry)) {
150 1.1 mrg uvmfault_unlockmaps(ufi, write_lock);
151 1.1 mrg return(FALSE);
152 1.1 mrg }
153 1.1 mrg
154 1.1 mrg /*
155 1.1 mrg * reduce size if necessary
156 1.1 mrg */
157 1.1 mrg if (ufi->entry->end - ufi->rvaddr < ufi->size)
158 1.1 mrg ufi->size = ufi->entry->end - ufi->rvaddr;
159 1.1 mrg
160 1.1 mrg /*
161 1.1 mrg * submap? replace map with the submap and lookup again.
162 1.1 mrg * note: VAs in submaps must match VAs in main map.
163 1.1 mrg */
164 1.1 mrg if (UVM_ET_ISSUBMAP(ufi->entry)) {
165 1.1 mrg if (ufi->parent_map)
166 1.1 mrg panic("uvmfault_lookup: submap inside a sharemap (illegal)");
167 1.1 mrg tmpmap = ufi->entry->object.sub_map;
168 1.1 mrg if (write_lock) {
169 1.1 mrg vm_map_unlock(ufi->map);
170 1.1 mrg } else {
171 1.1 mrg vm_map_unlock_read(ufi->map);
172 1.1 mrg }
173 1.1 mrg ufi->map = tmpmap;
174 1.1 mrg continue;
175 1.1 mrg }
176 1.1 mrg
177 1.1 mrg /*
178 1.1 mrg * share map? drop down a level. already taken care of submap case.
179 1.1 mrg */
180 1.1 mrg if (UVM_ET_ISMAP(ufi->entry)) {
181 1.1 mrg if (ufi->parent_map)
182 1.1 mrg panic("uvmfault_lookup: sharemap inside a sharemap (illegal)");
183 1.1 mrg ufi->parent_map = ufi->map;
184 1.1 mrg ufi->parentv = ufi->parent_map->timestamp;
185 1.1 mrg ufi->map = ufi->entry->object.share_map;
186 1.1 mrg #ifdef DIAGNOSTIC
187 1.1 mrg /* see note above */
188 1.1 mrg if (ufi->entry->offset != ufi->entry->start)
189 1.1 mrg panic("uvmfault_lookup: sharemap VA != mainmap VA (not supported)");
190 1.1 mrg #endif
191 1.1 mrg continue;
192 1.1 mrg }
193 1.1 mrg
194 1.1 mrg /*
195 1.1 mrg * got it!
196 1.1 mrg */
197 1.1 mrg
198 1.1 mrg ufi->mapv = ufi->map->timestamp;
199 1.1 mrg return(TRUE);
200 1.1 mrg
201 1.1 mrg } /* while loop */
202 1.1 mrg
203 1.1 mrg /*NOTREACHED*/
204 1.1 mrg }
205 1.1 mrg
206 1.1 mrg /*
207 1.1 mrg * uvmfault_relock: attempt to relock the same version of the map
208 1.1 mrg *
209 1.1 mrg * => fault data structures should be unlocked before calling.
210 1.1 mrg * => if a success (TRUE) maps will be locked after call.
211 1.1 mrg */
212 1.1 mrg
213 1.1 mrg static __inline boolean_t uvmfault_relock(ufi)
214 1.1 mrg
215 1.1 mrg struct uvm_faultinfo *ufi;
216 1.1 mrg
217 1.1 mrg {
218 1.1 mrg uvmexp.fltrelck++;
219 1.1 mrg /*
220 1.1 mrg * simply relock parent (if any) then map in order. fail if version
221 1.1 mrg * mismatch (in which case nothing gets locked).
222 1.1 mrg */
223 1.1 mrg
224 1.1 mrg if (ufi->parent_map) {
225 1.1 mrg vm_map_lock_read(ufi->parent_map);
226 1.1 mrg if (ufi->parentv != ufi->parent_map->timestamp) {
227 1.1 mrg vm_map_unlock_read(ufi->parent_map);
228 1.1 mrg return(FALSE);
229 1.1 mrg }
230 1.1 mrg }
231 1.1 mrg
232 1.1 mrg vm_map_lock_read(ufi->map);
233 1.1 mrg if (ufi->mapv != ufi->map->timestamp) {
234 1.1 mrg if (ufi->parent_map)
235 1.1 mrg vm_map_unlock_read(ufi->parent_map);
236 1.1 mrg vm_map_unlock_read(ufi->map);
237 1.1 mrg return(FALSE);
238 1.1 mrg }
239 1.1 mrg
240 1.1 mrg uvmexp.fltrelckok++;
241 1.1 mrg return(TRUE); /* got it! */
242 1.1 mrg }
243 1.4 perry
244 1.4 perry #endif /* _UVM_UVM_FAULT_I_H_ */
245