uvm_fault_i.h revision 1.32 1 1.32 ad /* $NetBSD: uvm_fault_i.h,v 1.32 2019/12/16 22:47:55 ad Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg *
16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.3 mrg *
27 1.3 mrg * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
28 1.1 mrg */
29 1.1 mrg
30 1.4 perry #ifndef _UVM_UVM_FAULT_I_H_
31 1.4 perry #define _UVM_UVM_FAULT_I_H_
32 1.4 perry
33 1.1 mrg /*
34 1.1 mrg * uvm_fault_i.h: fault inline functions
35 1.1 mrg */
36 1.30 christos void uvmfault_update_stats(struct uvm_faultinfo *);
37 1.30 christos
38 1.1 mrg
39 1.1 mrg /*
40 1.1 mrg * uvmfault_unlockmaps: unlock the maps
41 1.1 mrg */
42 1.1 mrg
43 1.29 christos static __inline void
44 1.22 thorpej uvmfault_unlockmaps(struct uvm_faultinfo *ufi, bool write_locked)
45 1.5 mrg {
46 1.10 chs /*
47 1.10 chs * ufi can be NULL when this isn't really a fault,
48 1.10 chs * but merely paging in anon data.
49 1.10 chs */
50 1.10 chs
51 1.10 chs if (ufi == NULL) {
52 1.10 chs return;
53 1.10 chs }
54 1.1 mrg
55 1.31 christos #ifndef __HAVE_NO_PMAP_STATS
56 1.30 christos uvmfault_update_stats(ufi);
57 1.31 christos #endif
58 1.5 mrg if (write_locked) {
59 1.5 mrg vm_map_unlock(ufi->map);
60 1.5 mrg } else {
61 1.5 mrg vm_map_unlock_read(ufi->map);
62 1.5 mrg }
63 1.1 mrg }
64 1.1 mrg
65 1.1 mrg /*
66 1.1 mrg * uvmfault_unlockall: unlock everything passed in.
67 1.1 mrg *
68 1.1 mrg * => maps must be read-locked (not write-locked).
69 1.1 mrg */
70 1.1 mrg
71 1.29 christos static __inline void
72 1.18 thorpej uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
73 1.27 rmind struct uvm_object *uobj)
74 1.5 mrg {
75 1.1 mrg
76 1.5 mrg if (uobj)
77 1.27 rmind mutex_exit(uobj->vmobjlock);
78 1.5 mrg if (amap)
79 1.7 chuck amap_unlock(amap);
80 1.23 thorpej uvmfault_unlockmaps(ufi, false);
81 1.9 thorpej }
82 1.9 thorpej
83 1.9 thorpej /*
84 1.1 mrg * uvmfault_lookup: lookup a virtual address in a map
85 1.1 mrg *
86 1.6 chuck * => caller must provide a uvm_faultinfo structure with the IN
87 1.1 mrg * params properly filled in
88 1.6 chuck * => we will lookup the map entry (handling submaps) as we go
89 1.1 mrg * => if the lookup is a success we will return with the maps locked
90 1.23 thorpej * => if "write_lock" is true, we write_lock the map, otherwise we only
91 1.1 mrg * get a read lock.
92 1.12 chs * => note that submaps can only appear in the kernel and they are
93 1.6 chuck * required to use the same virtual addresses as the map they
94 1.6 chuck * are referenced by (thus address translation between the main
95 1.6 chuck * map and the submap is unnecessary).
96 1.1 mrg */
97 1.1 mrg
98 1.29 christos static __inline bool
99 1.22 thorpej uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
100 1.1 mrg {
101 1.13 chs struct vm_map *tmpmap;
102 1.1 mrg
103 1.5 mrg /*
104 1.5 mrg * init ufi values for lookup.
105 1.5 mrg */
106 1.5 mrg
107 1.5 mrg ufi->map = ufi->orig_map;
108 1.5 mrg ufi->size = ufi->orig_size;
109 1.5 mrg
110 1.5 mrg /*
111 1.5 mrg * keep going down levels until we are done. note that there can
112 1.5 mrg * only be two levels so we won't loop very long.
113 1.5 mrg */
114 1.5 mrg
115 1.28 rmind for (;;) {
116 1.5 mrg /*
117 1.5 mrg * lock map
118 1.5 mrg */
119 1.5 mrg if (write_lock) {
120 1.5 mrg vm_map_lock(ufi->map);
121 1.5 mrg } else {
122 1.5 mrg vm_map_lock_read(ufi->map);
123 1.5 mrg }
124 1.5 mrg
125 1.5 mrg /*
126 1.5 mrg * lookup
127 1.5 mrg */
128 1.12 chs if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
129 1.27 rmind &ufi->entry)) {
130 1.5 mrg uvmfault_unlockmaps(ufi, write_lock);
131 1.23 thorpej return(false);
132 1.5 mrg }
133 1.5 mrg
134 1.5 mrg /*
135 1.5 mrg * reduce size if necessary
136 1.5 mrg */
137 1.6 chuck if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
138 1.6 chuck ufi->size = ufi->entry->end - ufi->orig_rvaddr;
139 1.5 mrg
140 1.5 mrg /*
141 1.5 mrg * submap? replace map with the submap and lookup again.
142 1.5 mrg * note: VAs in submaps must match VAs in main map.
143 1.5 mrg */
144 1.5 mrg if (UVM_ET_ISSUBMAP(ufi->entry)) {
145 1.5 mrg tmpmap = ufi->entry->object.sub_map;
146 1.5 mrg if (write_lock) {
147 1.5 mrg vm_map_unlock(ufi->map);
148 1.5 mrg } else {
149 1.5 mrg vm_map_unlock_read(ufi->map);
150 1.5 mrg }
151 1.5 mrg ufi->map = tmpmap;
152 1.5 mrg continue;
153 1.5 mrg }
154 1.5 mrg
155 1.5 mrg /*
156 1.5 mrg * got it!
157 1.5 mrg */
158 1.1 mrg
159 1.5 mrg ufi->mapv = ufi->map->timestamp;
160 1.23 thorpej return(true);
161 1.1 mrg
162 1.5 mrg } /* while loop */
163 1.1 mrg
164 1.5 mrg /*NOTREACHED*/
165 1.1 mrg }
166 1.1 mrg
167 1.1 mrg /*
168 1.1 mrg * uvmfault_relock: attempt to relock the same version of the map
169 1.1 mrg *
170 1.1 mrg * => fault data structures should be unlocked before calling.
171 1.23 thorpej * => if a success (true) maps will be locked after call.
172 1.1 mrg */
173 1.1 mrg
174 1.29 christos static __inline bool
175 1.18 thorpej uvmfault_relock(struct uvm_faultinfo *ufi)
176 1.5 mrg {
177 1.10 chs /*
178 1.10 chs * ufi can be NULL when this isn't really a fault,
179 1.10 chs * but merely paging in anon data.
180 1.10 chs */
181 1.10 chs
182 1.10 chs if (ufi == NULL) {
183 1.23 thorpej return true;
184 1.10 chs }
185 1.1 mrg
186 1.32 ad cpu_count(CPU_COUNT_FLTRELCK, 1);
187 1.10 chs
188 1.5 mrg /*
189 1.12 chs * relock map. fail if version mismatch (in which case nothing
190 1.6 chuck * gets locked).
191 1.5 mrg */
192 1.5 mrg
193 1.5 mrg vm_map_lock_read(ufi->map);
194 1.5 mrg if (ufi->mapv != ufi->map->timestamp) {
195 1.5 mrg vm_map_unlock_read(ufi->map);
196 1.23 thorpej return(false);
197 1.5 mrg }
198 1.1 mrg
199 1.32 ad cpu_count(CPU_COUNT_FLTRELCKOK, 1);
200 1.23 thorpej return(true);
201 1.1 mrg }
202 1.4 perry
203 1.4 perry #endif /* _UVM_UVM_FAULT_I_H_ */
204