uvm_fault.c revision 1.120.10.2 1 1.120.10.2 ad /* $NetBSD: uvm_fault.c,v 1.120.10.2 2007/07/21 19:21:55 ad Exp $ */
2 1.120.10.2 ad
3 1.120.10.2 ad /*
4 1.120.10.2 ad *
5 1.120.10.2 ad * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.120.10.2 ad * All rights reserved.
7 1.120.10.2 ad *
8 1.120.10.2 ad * Redistribution and use in source and binary forms, with or without
9 1.120.10.2 ad * modification, are permitted provided that the following conditions
10 1.120.10.2 ad * are met:
11 1.120.10.2 ad * 1. Redistributions of source code must retain the above copyright
12 1.120.10.2 ad * notice, this list of conditions and the following disclaimer.
13 1.120.10.2 ad * 2. Redistributions in binary form must reproduce the above copyright
14 1.120.10.2 ad * notice, this list of conditions and the following disclaimer in the
15 1.120.10.2 ad * documentation and/or other materials provided with the distribution.
16 1.120.10.2 ad * 3. All advertising materials mentioning features or use of this software
17 1.120.10.2 ad * must display the following acknowledgement:
18 1.120.10.2 ad * This product includes software developed by Charles D. Cranor and
19 1.120.10.2 ad * Washington University.
20 1.120.10.2 ad * 4. The name of the author may not be used to endorse or promote products
21 1.120.10.2 ad * derived from this software without specific prior written permission.
22 1.120.10.2 ad *
23 1.120.10.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.120.10.2 ad * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.120.10.2 ad * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.120.10.2 ad * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.120.10.2 ad * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.120.10.2 ad * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.120.10.2 ad * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.120.10.2 ad * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.120.10.2 ad * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.120.10.2 ad * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.120.10.2 ad *
34 1.120.10.2 ad * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
35 1.120.10.2 ad */
36 1.120.10.2 ad
37 1.120.10.2 ad /*
38 1.120.10.2 ad * uvm_fault.c: fault handler
39 1.120.10.2 ad */
40 1.120.10.2 ad
41 1.120.10.2 ad #include <sys/cdefs.h>
42 1.120.10.2 ad __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.120.10.2 2007/07/21 19:21:55 ad Exp $");
43 1.120.10.2 ad
44 1.120.10.2 ad #include "opt_uvmhist.h"
45 1.120.10.2 ad
46 1.120.10.2 ad #include <sys/param.h>
47 1.120.10.2 ad #include <sys/systm.h>
48 1.120.10.2 ad #include <sys/kernel.h>
49 1.120.10.2 ad #include <sys/proc.h>
50 1.120.10.2 ad #include <sys/malloc.h>
51 1.120.10.2 ad #include <sys/mman.h>
52 1.120.10.2 ad #include <sys/user.h>
53 1.120.10.2 ad
54 1.120.10.2 ad #include <uvm/uvm.h>
55 1.120.10.2 ad
56 1.120.10.2 ad /*
57 1.120.10.2 ad *
58 1.120.10.2 ad * a word on page faults:
59 1.120.10.2 ad *
60 1.120.10.2 ad * types of page faults we handle:
61 1.120.10.2 ad *
62 1.120.10.2 ad * CASE 1: upper layer faults CASE 2: lower layer faults
63 1.120.10.2 ad *
64 1.120.10.2 ad * CASE 1A CASE 1B CASE 2A CASE 2B
65 1.120.10.2 ad * read/write1 write>1 read/write +-cow_write/zero
66 1.120.10.2 ad * | | | |
67 1.120.10.2 ad * +--|--+ +--|--+ +-----+ + | + | +-----+
68 1.120.10.2 ad * amap | V | | ----------->new| | | | ^ |
69 1.120.10.2 ad * +-----+ +-----+ +-----+ + | + | +--|--+
70 1.120.10.2 ad * | | |
71 1.120.10.2 ad * +-----+ +-----+ +--|--+ | +--|--+
72 1.120.10.2 ad * uobj | d/c | | d/c | | V | +----| |
73 1.120.10.2 ad * +-----+ +-----+ +-----+ +-----+
74 1.120.10.2 ad *
75 1.120.10.2 ad * d/c = don't care
76 1.120.10.2 ad *
77 1.120.10.2 ad * case [0]: layerless fault
78 1.120.10.2 ad * no amap or uobj is present. this is an error.
79 1.120.10.2 ad *
80 1.120.10.2 ad * case [1]: upper layer fault [anon active]
81 1.120.10.2 ad * 1A: [read] or [write with anon->an_ref == 1]
82 1.120.10.2 ad * I/O takes place in top level anon and uobj is not touched.
83 1.120.10.2 ad * 1B: [write with anon->an_ref > 1]
84 1.120.10.2 ad * new anon is alloc'd and data is copied off ["COW"]
85 1.120.10.2 ad *
86 1.120.10.2 ad * case [2]: lower layer fault [uobj]
87 1.120.10.2 ad * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
88 1.120.10.2 ad * I/O takes place directly in object.
89 1.120.10.2 ad * 2B: [write to copy_on_write] or [read on NULL uobj]
90 1.120.10.2 ad * data is "promoted" from uobj to a new anon.
91 1.120.10.2 ad * if uobj is null, then we zero fill.
92 1.120.10.2 ad *
93 1.120.10.2 ad * we follow the standard UVM locking protocol ordering:
94 1.120.10.2 ad *
95 1.120.10.2 ad * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
96 1.120.10.2 ad * we hold a PG_BUSY page if we unlock for I/O
97 1.120.10.2 ad *
98 1.120.10.2 ad *
99 1.120.10.2 ad * the code is structured as follows:
100 1.120.10.2 ad *
101 1.120.10.2 ad * - init the "IN" params in the ufi structure
102 1.120.10.2 ad * ReFault:
103 1.120.10.2 ad * - do lookups [locks maps], check protection, handle needs_copy
104 1.120.10.2 ad * - check for case 0 fault (error)
105 1.120.10.2 ad * - establish "range" of fault
106 1.120.10.2 ad * - if we have an amap lock it and extract the anons
107 1.120.10.2 ad * - if sequential advice deactivate pages behind us
108 1.120.10.2 ad * - at the same time check pmap for unmapped areas and anon for pages
109 1.120.10.2 ad * that we could map in (and do map it if found)
110 1.120.10.2 ad * - check object for resident pages that we could map in
111 1.120.10.2 ad * - if (case 2) goto Case2
112 1.120.10.2 ad * - >>> handle case 1
113 1.120.10.2 ad * - ensure source anon is resident in RAM
114 1.120.10.2 ad * - if case 1B alloc new anon and copy from source
115 1.120.10.2 ad * - map the correct page in
116 1.120.10.2 ad * Case2:
117 1.120.10.2 ad * - >>> handle case 2
118 1.120.10.2 ad * - ensure source page is resident (if uobj)
119 1.120.10.2 ad * - if case 2B alloc new anon and copy from source (could be zero
120 1.120.10.2 ad * fill if uobj == NULL)
121 1.120.10.2 ad * - map the correct page in
122 1.120.10.2 ad * - done!
123 1.120.10.2 ad *
124 1.120.10.2 ad * note on paging:
125 1.120.10.2 ad * if we have to do I/O we place a PG_BUSY page in the correct object,
126 1.120.10.2 ad * unlock everything, and do the I/O. when I/O is done we must reverify
127 1.120.10.2 ad * the state of the world before assuming that our data structures are
128 1.120.10.2 ad * valid. [because mappings could change while the map is unlocked]
129 1.120.10.2 ad *
130 1.120.10.2 ad * alternative 1: unbusy the page in question and restart the page fault
131 1.120.10.2 ad * from the top (ReFault). this is easy but does not take advantage
132 1.120.10.2 ad * of the information that we already have from our previous lookup,
133 1.120.10.2 ad * although it is possible that the "hints" in the vm_map will help here.
134 1.120.10.2 ad *
135 1.120.10.2 ad * alternative 2: the system already keeps track of a "version" number of
136 1.120.10.2 ad * a map. [i.e. every time you write-lock a map (e.g. to change a
137 1.120.10.2 ad * mapping) you bump the version number up by one...] so, we can save
138 1.120.10.2 ad * the version number of the map before we release the lock and start I/O.
139 1.120.10.2 ad * then when I/O is done we can relock and check the version numbers
140 1.120.10.2 ad * to see if anything changed. this might save us some over 1 because
141 1.120.10.2 ad * we don't have to unbusy the page and may be less compares(?).
142 1.120.10.2 ad *
143 1.120.10.2 ad * alternative 3: put in backpointers or a way to "hold" part of a map
144 1.120.10.2 ad * in place while I/O is in progress. this could be complex to
145 1.120.10.2 ad * implement (especially with structures like amap that can be referenced
146 1.120.10.2 ad * by multiple map entries, and figuring out what should wait could be
147 1.120.10.2 ad * complex as well...).
148 1.120.10.2 ad *
149 1.120.10.2 ad * given that we are not currently multiprocessor or multithreaded we might
150 1.120.10.2 ad * as well choose alternative 2 now. maybe alternative 3 would be useful
151 1.120.10.2 ad * in the future. XXX keep in mind for future consideration//rechecking.
152 1.120.10.2 ad */
153 1.120.10.2 ad
154 1.120.10.2 ad /*
155 1.120.10.2 ad * local data structures
156 1.120.10.2 ad */
157 1.120.10.2 ad
158 1.120.10.2 ad struct uvm_advice {
159 1.120.10.2 ad int advice;
160 1.120.10.2 ad int nback;
161 1.120.10.2 ad int nforw;
162 1.120.10.2 ad };
163 1.120.10.2 ad
164 1.120.10.2 ad /*
165 1.120.10.2 ad * page range array:
166 1.120.10.2 ad * note: index in array must match "advice" value
167 1.120.10.2 ad * XXX: borrowed numbers from freebsd. do they work well for us?
168 1.120.10.2 ad */
169 1.120.10.2 ad
170 1.120.10.2 ad static const struct uvm_advice uvmadvice[] = {
171 1.120.10.2 ad { MADV_NORMAL, 3, 4 },
172 1.120.10.2 ad { MADV_RANDOM, 0, 0 },
173 1.120.10.2 ad { MADV_SEQUENTIAL, 8, 7},
174 1.120.10.2 ad };
175 1.120.10.2 ad
176 1.120.10.2 ad #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
177 1.120.10.2 ad
178 1.120.10.2 ad /*
179 1.120.10.2 ad * private prototypes
180 1.120.10.2 ad */
181 1.120.10.2 ad
182 1.120.10.2 ad /*
183 1.120.10.2 ad * inline functions
184 1.120.10.2 ad */
185 1.120.10.2 ad
186 1.120.10.2 ad /*
187 1.120.10.2 ad * uvmfault_anonflush: try and deactivate pages in specified anons
188 1.120.10.2 ad *
189 1.120.10.2 ad * => does not have to deactivate page if it is busy
190 1.120.10.2 ad */
191 1.120.10.2 ad
192 1.120.10.2 ad static inline void
193 1.120.10.2 ad uvmfault_anonflush(struct vm_anon **anons, int n)
194 1.120.10.2 ad {
195 1.120.10.2 ad int lcv;
196 1.120.10.2 ad struct vm_page *pg;
197 1.120.10.2 ad
198 1.120.10.2 ad for (lcv = 0 ; lcv < n ; lcv++) {
199 1.120.10.2 ad if (anons[lcv] == NULL)
200 1.120.10.2 ad continue;
201 1.120.10.2 ad simple_lock(&anons[lcv]->an_lock);
202 1.120.10.2 ad pg = anons[lcv]->an_page;
203 1.120.10.2 ad if (pg && (pg->flags & PG_BUSY) == 0) {
204 1.120.10.2 ad uvm_lock_pageq();
205 1.120.10.2 ad if (pg->wire_count == 0) {
206 1.120.10.2 ad pmap_clear_reference(pg);
207 1.120.10.2 ad uvm_pagedeactivate(pg);
208 1.120.10.2 ad }
209 1.120.10.2 ad uvm_unlock_pageq();
210 1.120.10.2 ad }
211 1.120.10.2 ad simple_unlock(&anons[lcv]->an_lock);
212 1.120.10.2 ad }
213 1.120.10.2 ad }
214 1.120.10.2 ad
215 1.120.10.2 ad /*
216 1.120.10.2 ad * normal functions
217 1.120.10.2 ad */
218 1.120.10.2 ad
219 1.120.10.2 ad /*
220 1.120.10.2 ad * uvmfault_amapcopy: clear "needs_copy" in a map.
221 1.120.10.2 ad *
222 1.120.10.2 ad * => called with VM data structures unlocked (usually, see below)
223 1.120.10.2 ad * => we get a write lock on the maps and clear needs_copy for a VA
224 1.120.10.2 ad * => if we are out of RAM we sleep (waiting for more)
225 1.120.10.2 ad */
226 1.120.10.2 ad
227 1.120.10.2 ad static void
228 1.120.10.2 ad uvmfault_amapcopy(struct uvm_faultinfo *ufi)
229 1.120.10.2 ad {
230 1.120.10.2 ad for (;;) {
231 1.120.10.2 ad
232 1.120.10.2 ad /*
233 1.120.10.2 ad * no mapping? give up.
234 1.120.10.2 ad */
235 1.120.10.2 ad
236 1.120.10.2 ad if (uvmfault_lookup(ufi, true) == false)
237 1.120.10.2 ad return;
238 1.120.10.2 ad
239 1.120.10.2 ad /*
240 1.120.10.2 ad * copy if needed.
241 1.120.10.2 ad */
242 1.120.10.2 ad
243 1.120.10.2 ad if (UVM_ET_ISNEEDSCOPY(ufi->entry))
244 1.120.10.2 ad amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT,
245 1.120.10.2 ad ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
246 1.120.10.2 ad
247 1.120.10.2 ad /*
248 1.120.10.2 ad * didn't work? must be out of RAM. unlock and sleep.
249 1.120.10.2 ad */
250 1.120.10.2 ad
251 1.120.10.2 ad if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
252 1.120.10.2 ad uvmfault_unlockmaps(ufi, true);
253 1.120.10.2 ad uvm_wait("fltamapcopy");
254 1.120.10.2 ad continue;
255 1.120.10.2 ad }
256 1.120.10.2 ad
257 1.120.10.2 ad /*
258 1.120.10.2 ad * got it! unlock and return.
259 1.120.10.2 ad */
260 1.120.10.2 ad
261 1.120.10.2 ad uvmfault_unlockmaps(ufi, true);
262 1.120.10.2 ad return;
263 1.120.10.2 ad }
264 1.120.10.2 ad /*NOTREACHED*/
265 1.120.10.2 ad }
266 1.120.10.2 ad
267 1.120.10.2 ad /*
268 1.120.10.2 ad * uvmfault_anonget: get data in an anon into a non-busy, non-released
269 1.120.10.2 ad * page in that anon.
270 1.120.10.2 ad *
271 1.120.10.2 ad * => maps, amap, and anon locked by caller.
272 1.120.10.2 ad * => if we fail (result != 0) we unlock everything.
273 1.120.10.2 ad * => if we are successful, we return with everything still locked.
274 1.120.10.2 ad * => we don't move the page on the queues [gets moved later]
275 1.120.10.2 ad * => if we allocate a new page [we_own], it gets put on the queues.
276 1.120.10.2 ad * either way, the result is that the page is on the queues at return time
277 1.120.10.2 ad * => for pages which are on loan from a uvm_object (and thus are not
278 1.120.10.2 ad * owned by the anon): if successful, we return with the owning object
279 1.120.10.2 ad * locked. the caller must unlock this object when it unlocks everything
280 1.120.10.2 ad * else.
281 1.120.10.2 ad */
282 1.120.10.2 ad
283 1.120.10.2 ad int
284 1.120.10.2 ad uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
285 1.120.10.2 ad struct vm_anon *anon)
286 1.120.10.2 ad {
287 1.120.10.2 ad bool we_own; /* we own anon's page? */
288 1.120.10.2 ad bool locked; /* did we relock? */
289 1.120.10.2 ad struct vm_page *pg;
290 1.120.10.2 ad int error;
291 1.120.10.2 ad UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
292 1.120.10.2 ad
293 1.120.10.2 ad LOCK_ASSERT(simple_lock_held(&anon->an_lock));
294 1.120.10.2 ad
295 1.120.10.2 ad error = 0;
296 1.120.10.2 ad uvmexp.fltanget++;
297 1.120.10.2 ad /* bump rusage counters */
298 1.120.10.2 ad if (anon->an_page)
299 1.120.10.2 ad curproc->p_stats->p_ru.ru_minflt++;
300 1.120.10.2 ad else
301 1.120.10.2 ad curproc->p_stats->p_ru.ru_majflt++;
302 1.120.10.2 ad
303 1.120.10.2 ad /*
304 1.120.10.2 ad * loop until we get it, or fail.
305 1.120.10.2 ad */
306 1.120.10.2 ad
307 1.120.10.2 ad for (;;) {
308 1.120.10.2 ad we_own = false; /* true if we set PG_BUSY on a page */
309 1.120.10.2 ad pg = anon->an_page;
310 1.120.10.2 ad
311 1.120.10.2 ad /*
312 1.120.10.2 ad * if there is a resident page and it is loaned, then anon
313 1.120.10.2 ad * may not own it. call out to uvm_anon_lockpage() to ensure
314 1.120.10.2 ad * the real owner of the page has been identified and locked.
315 1.120.10.2 ad */
316 1.120.10.2 ad
317 1.120.10.2 ad if (pg && pg->loan_count)
318 1.120.10.2 ad pg = uvm_anon_lockloanpg(anon);
319 1.120.10.2 ad
320 1.120.10.2 ad /*
321 1.120.10.2 ad * page there? make sure it is not busy/released.
322 1.120.10.2 ad */
323 1.120.10.2 ad
324 1.120.10.2 ad if (pg) {
325 1.120.10.2 ad
326 1.120.10.2 ad /*
327 1.120.10.2 ad * at this point, if the page has a uobject [meaning
328 1.120.10.2 ad * we have it on loan], then that uobject is locked
329 1.120.10.2 ad * by us! if the page is busy, we drop all the
330 1.120.10.2 ad * locks (including uobject) and try again.
331 1.120.10.2 ad */
332 1.120.10.2 ad
333 1.120.10.2 ad if ((pg->flags & PG_BUSY) == 0) {
334 1.120.10.2 ad UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
335 1.120.10.2 ad return (0);
336 1.120.10.2 ad }
337 1.120.10.2 ad pg->flags |= PG_WANTED;
338 1.120.10.2 ad uvmexp.fltpgwait++;
339 1.120.10.2 ad
340 1.120.10.2 ad /*
341 1.120.10.2 ad * the last unlock must be an atomic unlock+wait on
342 1.120.10.2 ad * the owner of page
343 1.120.10.2 ad */
344 1.120.10.2 ad
345 1.120.10.2 ad if (pg->uobject) { /* owner is uobject ? */
346 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL, anon);
347 1.120.10.2 ad UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
348 1.120.10.2 ad 0,0,0);
349 1.120.10.2 ad UVM_UNLOCK_AND_WAIT(pg,
350 1.120.10.2 ad &pg->uobject->vmobjlock,
351 1.120.10.2 ad false, "anonget1",0);
352 1.120.10.2 ad } else {
353 1.120.10.2 ad /* anon owns page */
354 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL, NULL);
355 1.120.10.2 ad UVMHIST_LOG(maphist, " unlock+wait on anon",0,
356 1.120.10.2 ad 0,0,0);
357 1.120.10.2 ad UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
358 1.120.10.2 ad "anonget2",0);
359 1.120.10.2 ad }
360 1.120.10.2 ad } else {
361 1.120.10.2 ad #if defined(VMSWAP)
362 1.120.10.2 ad
363 1.120.10.2 ad /*
364 1.120.10.2 ad * no page, we must try and bring it in.
365 1.120.10.2 ad */
366 1.120.10.2 ad
367 1.120.10.2 ad pg = uvm_pagealloc(NULL, 0, anon, 0);
368 1.120.10.2 ad if (pg == NULL) { /* out of RAM. */
369 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL, anon);
370 1.120.10.2 ad uvmexp.fltnoram++;
371 1.120.10.2 ad UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
372 1.120.10.2 ad 0,0,0);
373 1.120.10.2 ad if (!uvm_reclaimable()) {
374 1.120.10.2 ad return ENOMEM;
375 1.120.10.2 ad }
376 1.120.10.2 ad uvm_wait("flt_noram1");
377 1.120.10.2 ad } else {
378 1.120.10.2 ad /* we set the PG_BUSY bit */
379 1.120.10.2 ad we_own = true;
380 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL, anon);
381 1.120.10.2 ad
382 1.120.10.2 ad /*
383 1.120.10.2 ad * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
384 1.120.10.2 ad * page into the uvm_swap_get function with
385 1.120.10.2 ad * all data structures unlocked. note that
386 1.120.10.2 ad * it is ok to read an_swslot here because
387 1.120.10.2 ad * we hold PG_BUSY on the page.
388 1.120.10.2 ad */
389 1.120.10.2 ad uvmexp.pageins++;
390 1.120.10.2 ad error = uvm_swap_get(pg, anon->an_swslot,
391 1.120.10.2 ad PGO_SYNCIO);
392 1.120.10.2 ad
393 1.120.10.2 ad /*
394 1.120.10.2 ad * we clean up after the i/o below in the
395 1.120.10.2 ad * "we_own" case
396 1.120.10.2 ad */
397 1.120.10.2 ad }
398 1.120.10.2 ad #else /* defined(VMSWAP) */
399 1.120.10.2 ad panic("%s: no page", __func__);
400 1.120.10.2 ad #endif /* defined(VMSWAP) */
401 1.120.10.2 ad }
402 1.120.10.2 ad
403 1.120.10.2 ad /*
404 1.120.10.2 ad * now relock and try again
405 1.120.10.2 ad */
406 1.120.10.2 ad
407 1.120.10.2 ad locked = uvmfault_relock(ufi);
408 1.120.10.2 ad if (locked && amap != NULL) {
409 1.120.10.2 ad amap_lock(amap);
410 1.120.10.2 ad }
411 1.120.10.2 ad if (locked || we_own)
412 1.120.10.2 ad simple_lock(&anon->an_lock);
413 1.120.10.2 ad
414 1.120.10.2 ad /*
415 1.120.10.2 ad * if we own the page (i.e. we set PG_BUSY), then we need
416 1.120.10.2 ad * to clean up after the I/O. there are three cases to
417 1.120.10.2 ad * consider:
418 1.120.10.2 ad * [1] page released during I/O: free anon and ReFault.
419 1.120.10.2 ad * [2] I/O not OK. free the page and cause the fault
420 1.120.10.2 ad * to fail.
421 1.120.10.2 ad * [3] I/O OK! activate the page and sync with the
422 1.120.10.2 ad * non-we_own case (i.e. drop anon lock if not locked).
423 1.120.10.2 ad */
424 1.120.10.2 ad
425 1.120.10.2 ad if (we_own) {
426 1.120.10.2 ad #if defined(VMSWAP)
427 1.120.10.2 ad if (pg->flags & PG_WANTED) {
428 1.120.10.2 ad wakeup(pg);
429 1.120.10.2 ad }
430 1.120.10.2 ad if (error) {
431 1.120.10.2 ad
432 1.120.10.2 ad /*
433 1.120.10.2 ad * remove the swap slot from the anon
434 1.120.10.2 ad * and mark the anon as having no real slot.
435 1.120.10.2 ad * don't free the swap slot, thus preventing
436 1.120.10.2 ad * it from being used again.
437 1.120.10.2 ad */
438 1.120.10.2 ad
439 1.120.10.2 ad if (anon->an_swslot > 0)
440 1.120.10.2 ad uvm_swap_markbad(anon->an_swslot, 1);
441 1.120.10.2 ad anon->an_swslot = SWSLOT_BAD;
442 1.120.10.2 ad
443 1.120.10.2 ad if ((pg->flags & PG_RELEASED) != 0)
444 1.120.10.2 ad goto released;
445 1.120.10.2 ad
446 1.120.10.2 ad /*
447 1.120.10.2 ad * note: page was never !PG_BUSY, so it
448 1.120.10.2 ad * can't be mapped and thus no need to
449 1.120.10.2 ad * pmap_page_protect it...
450 1.120.10.2 ad */
451 1.120.10.2 ad
452 1.120.10.2 ad uvm_lock_pageq();
453 1.120.10.2 ad uvm_pagefree(pg);
454 1.120.10.2 ad uvm_unlock_pageq();
455 1.120.10.2 ad
456 1.120.10.2 ad if (locked)
457 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL,
458 1.120.10.2 ad anon);
459 1.120.10.2 ad else
460 1.120.10.2 ad simple_unlock(&anon->an_lock);
461 1.120.10.2 ad UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
462 1.120.10.2 ad return error;
463 1.120.10.2 ad }
464 1.120.10.2 ad
465 1.120.10.2 ad if ((pg->flags & PG_RELEASED) != 0) {
466 1.120.10.2 ad released:
467 1.120.10.2 ad KASSERT(anon->an_ref == 0);
468 1.120.10.2 ad
469 1.120.10.2 ad /*
470 1.120.10.2 ad * released while we unlocked amap.
471 1.120.10.2 ad */
472 1.120.10.2 ad
473 1.120.10.2 ad if (locked)
474 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL,
475 1.120.10.2 ad NULL);
476 1.120.10.2 ad
477 1.120.10.2 ad uvm_anon_release(anon);
478 1.120.10.2 ad
479 1.120.10.2 ad if (error) {
480 1.120.10.2 ad UVMHIST_LOG(maphist,
481 1.120.10.2 ad "<- ERROR/RELEASED", 0,0,0,0);
482 1.120.10.2 ad return error;
483 1.120.10.2 ad }
484 1.120.10.2 ad
485 1.120.10.2 ad UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
486 1.120.10.2 ad return ERESTART;
487 1.120.10.2 ad }
488 1.120.10.2 ad
489 1.120.10.2 ad /*
490 1.120.10.2 ad * we've successfully read the page, activate it.
491 1.120.10.2 ad */
492 1.120.10.2 ad
493 1.120.10.2 ad uvm_lock_pageq();
494 1.120.10.2 ad uvm_pageactivate(pg);
495 1.120.10.2 ad uvm_unlock_pageq();
496 1.120.10.2 ad pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
497 1.120.10.2 ad UVM_PAGE_OWN(pg, NULL);
498 1.120.10.2 ad if (!locked)
499 1.120.10.2 ad simple_unlock(&anon->an_lock);
500 1.120.10.2 ad #else /* defined(VMSWAP) */
501 1.120.10.2 ad panic("%s: we_own", __func__);
502 1.120.10.2 ad #endif /* defined(VMSWAP) */
503 1.120.10.2 ad }
504 1.120.10.2 ad
505 1.120.10.2 ad /*
506 1.120.10.2 ad * we were not able to relock. restart fault.
507 1.120.10.2 ad */
508 1.120.10.2 ad
509 1.120.10.2 ad if (!locked) {
510 1.120.10.2 ad UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
511 1.120.10.2 ad return (ERESTART);
512 1.120.10.2 ad }
513 1.120.10.2 ad
514 1.120.10.2 ad /*
515 1.120.10.2 ad * verify no one has touched the amap and moved the anon on us.
516 1.120.10.2 ad */
517 1.120.10.2 ad
518 1.120.10.2 ad if (ufi != NULL &&
519 1.120.10.2 ad amap_lookup(&ufi->entry->aref,
520 1.120.10.2 ad ufi->orig_rvaddr - ufi->entry->start) != anon) {
521 1.120.10.2 ad
522 1.120.10.2 ad uvmfault_unlockall(ufi, amap, NULL, anon);
523 1.120.10.2 ad UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
524 1.120.10.2 ad return (ERESTART);
525 1.120.10.2 ad }
526 1.120.10.2 ad
527 1.120.10.2 ad /*
528 1.120.10.2 ad * try it again!
529 1.120.10.2 ad */
530 1.120.10.2 ad
531 1.120.10.2 ad uvmexp.fltanretry++;
532 1.120.10.2 ad continue;
533 1.120.10.2 ad }
534 1.120.10.2 ad /*NOTREACHED*/
535 1.120.10.2 ad }
536 1.120.10.2 ad
537 1.120.10.2 ad /*
538 1.120.10.2 ad * uvmfault_promote: promote data to a new anon. used for 1B and 2B.
539 1.120.10.2 ad *
540 1.120.10.2 ad * 1. allocate an anon and a page.
541 1.120.10.2 ad * 2. fill its contents.
542 1.120.10.2 ad * 3. put it into amap.
543 1.120.10.2 ad *
544 1.120.10.2 ad * => if we fail (result != 0) we unlock everything.
545 1.120.10.2 ad * => on success, return a new locked anon via 'nanon'.
546 1.120.10.2 ad * (*nanon)->an_page will be a resident, locked, dirty page.
547 1.120.10.2 ad */
548 1.120.10.2 ad
549 1.120.10.2 ad static int
550 1.120.10.2 ad uvmfault_promote(struct uvm_faultinfo *ufi,
551 1.120.10.2 ad struct vm_anon *oanon,
552 1.120.10.2 ad struct vm_page *uobjpage,
553 1.120.10.2 ad struct vm_anon **nanon, /* OUT: allocated anon */
554 1.120.10.2 ad struct vm_anon **spare)
555 1.120.10.2 ad {
556 1.120.10.2 ad struct vm_amap *amap = ufi->entry->aref.ar_amap;
557 1.120.10.2 ad struct uvm_object *uobj;
558 1.120.10.2 ad struct vm_anon *anon;
559 1.120.10.2 ad struct vm_page *pg;
560 1.120.10.2 ad struct vm_page *opg;
561 1.120.10.2 ad int error;
562 1.120.10.2 ad UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
563 1.120.10.2 ad
564 1.120.10.2 ad if (oanon) {
565 1.120.10.2 ad /* anon COW */
566 1.120.10.2 ad opg = oanon->an_page;
567 1.120.10.2 ad KASSERT(opg != NULL);
568 1.120.10.2 ad KASSERT(opg->uobject == NULL || opg->loan_count > 0);
569 1.120.10.2 ad } else if (uobjpage != PGO_DONTCARE) {
570 1.120.10.2 ad /* object-backed COW */
571 1.120.10.2 ad opg = uobjpage;
572 1.120.10.2 ad } else {
573 1.120.10.2 ad /* ZFOD */
574 1.120.10.2 ad opg = NULL;
575 1.120.10.2 ad }
576 1.120.10.2 ad if (opg != NULL) {
577 1.120.10.2 ad uobj = opg->uobject;
578 1.120.10.2 ad } else {
579 1.120.10.2 ad uobj = NULL;
580 1.120.10.2 ad }
581 1.120.10.2 ad
582 1.120.10.2 ad KASSERT(amap != NULL);
583 1.120.10.2 ad KASSERT(uobjpage != NULL);
584 1.120.10.2 ad KASSERT(uobjpage == PGO_DONTCARE || (uobjpage->flags & PG_BUSY) != 0);
585 1.120.10.2 ad KASSERT(mutex_owned(&amap->am_l));
586 1.120.10.2 ad LOCK_ASSERT(oanon == NULL || simple_lock_held(&oanon->an_lock));
587 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
588 1.120.10.2 ad LOCK_ASSERT(*spare == NULL || !simple_lock_held(&(*spare)->an_lock));
589 1.120.10.2 ad
590 1.120.10.2 ad if (*spare != NULL) {
591 1.120.10.2 ad anon = *spare;
592 1.120.10.2 ad *spare = NULL;
593 1.120.10.2 ad simple_lock(&anon->an_lock);
594 1.120.10.2 ad } else if (ufi->map != kernel_map) {
595 1.120.10.2 ad anon = uvm_analloc();
596 1.120.10.2 ad } else {
597 1.120.10.2 ad UVMHIST_LOG(maphist, "kernel_map, unlock and retry", 0,0,0,0);
598 1.120.10.2 ad
599 1.120.10.2 ad /*
600 1.120.10.2 ad * we can't allocate anons with kernel_map locked.
601 1.120.10.2 ad */
602 1.120.10.2 ad
603 1.120.10.2 ad uvm_page_unbusy(&uobjpage, 1);
604 1.120.10.2 ad uvmfault_unlockall(ufi, amap, uobj, oanon);
605 1.120.10.2 ad
606 1.120.10.2 ad *spare = uvm_analloc();
607 1.120.10.2 ad if (*spare == NULL) {
608 1.120.10.2 ad goto nomem;
609 1.120.10.2 ad }
610 1.120.10.2 ad simple_unlock(&(*spare)->an_lock);
611 1.120.10.2 ad error = ERESTART;
612 1.120.10.2 ad goto done;
613 1.120.10.2 ad }
614 1.120.10.2 ad if (anon) {
615 1.120.10.2 ad
616 1.120.10.2 ad /*
617 1.120.10.2 ad * The new anon is locked.
618 1.120.10.2 ad *
619 1.120.10.2 ad * if opg == NULL, we want a zero'd, dirty page,
620 1.120.10.2 ad * so have uvm_pagealloc() do that for us.
621 1.120.10.2 ad */
622 1.120.10.2 ad
623 1.120.10.2 ad pg = uvm_pagealloc(NULL, 0, anon,
624 1.120.10.2 ad (opg == NULL) ? UVM_PGA_ZERO : 0);
625 1.120.10.2 ad } else {
626 1.120.10.2 ad pg = NULL;
627 1.120.10.2 ad }
628 1.120.10.2 ad
629 1.120.10.2 ad /*
630 1.120.10.2 ad * out of memory resources?
631 1.120.10.2 ad */
632 1.120.10.2 ad
633 1.120.10.2 ad if (pg == NULL) {
634 1.120.10.2 ad /* save anon for the next try. */
635 1.120.10.2 ad if (anon != NULL) {
636 1.120.10.2 ad simple_unlock(&anon->an_lock);
637 1.120.10.2 ad *spare = anon;
638 1.120.10.2 ad }
639 1.120.10.2 ad
640 1.120.10.2 ad /* unlock and fail ... */
641 1.120.10.2 ad uvm_page_unbusy(&uobjpage, 1);
642 1.120.10.2 ad uvmfault_unlockall(ufi, amap, uobj, oanon);
643 1.120.10.2 ad nomem:
644 1.120.10.2 ad if (!uvm_reclaimable()) {
645 1.120.10.2 ad UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
646 1.120.10.2 ad uvmexp.fltnoanon++;
647 1.120.10.2 ad error = ENOMEM;
648 1.120.10.2 ad goto done;
649 1.120.10.2 ad }
650 1.120.10.2 ad
651 1.120.10.2 ad UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
652 1.120.10.2 ad uvmexp.fltnoram++;
653 1.120.10.2 ad uvm_wait("flt_noram5");
654 1.120.10.2 ad error = ERESTART;
655 1.120.10.2 ad goto done;
656 1.120.10.2 ad }
657 1.120.10.2 ad
658 1.120.10.2 ad /* copy page [pg now dirty] */
659 1.120.10.2 ad if (opg) {
660 1.120.10.2 ad uvm_pagecopy(opg, pg);
661 1.120.10.2 ad }
662 1.120.10.2 ad
663 1.120.10.2 ad amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
664 1.120.10.2 ad oanon != NULL);
665 1.120.10.2 ad
666 1.120.10.2 ad *nanon = anon;
667 1.120.10.2 ad error = 0;
668 1.120.10.2 ad done:
669 1.120.10.2 ad return error;
670 1.120.10.2 ad }
671 1.120.10.2 ad
672 1.120.10.2 ad
673 1.120.10.2 ad /*
674 1.120.10.2 ad * F A U L T - m a i n e n t r y p o i n t
675 1.120.10.2 ad */
676 1.120.10.2 ad
677 1.120.10.2 ad /*
678 1.120.10.2 ad * uvm_fault: page fault handler
679 1.120.10.2 ad *
680 1.120.10.2 ad * => called from MD code to resolve a page fault
681 1.120.10.2 ad * => VM data structures usually should be unlocked. however, it is
682 1.120.10.2 ad * possible to call here with the main map locked if the caller
683 1.120.10.2 ad * gets a write lock, sets it recusive, and then calls us (c.f.
684 1.120.10.2 ad * uvm_map_pageable). this should be avoided because it keeps
685 1.120.10.2 ad * the map locked off during I/O.
686 1.120.10.2 ad * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
687 1.120.10.2 ad */
688 1.120.10.2 ad
689 1.120.10.2 ad #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
690 1.120.10.2 ad ~VM_PROT_WRITE : VM_PROT_ALL)
691 1.120.10.2 ad
692 1.120.10.2 ad /* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */
693 1.120.10.2 ad #define UVM_FAULT_WIRE 1
694 1.120.10.2 ad #define UVM_FAULT_WIREMAX 2
695 1.120.10.2 ad
696 1.120.10.2 ad int
697 1.120.10.2 ad uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
698 1.120.10.2 ad vm_prot_t access_type, int fault_flag)
699 1.120.10.2 ad {
700 1.120.10.2 ad struct uvm_faultinfo ufi;
701 1.120.10.2 ad vm_prot_t enter_prot, check_prot;
702 1.120.10.2 ad bool wired, narrow, promote, locked, shadowed, wire_fault, cow_now;
703 1.120.10.2 ad int npages, nback, nforw, centeridx, error, lcv, gotpages;
704 1.120.10.2 ad vaddr_t startva, currva;
705 1.120.10.2 ad voff_t uoff;
706 1.120.10.2 ad struct vm_amap *amap;
707 1.120.10.2 ad struct uvm_object *uobj;
708 1.120.10.2 ad struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
709 1.120.10.2 ad struct vm_anon *anon_spare;
710 1.120.10.2 ad struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;
711 1.120.10.2 ad UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
712 1.120.10.2 ad
713 1.120.10.2 ad UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, at=%d, ff=%d)",
714 1.120.10.2 ad orig_map, vaddr, access_type, fault_flag);
715 1.120.10.2 ad
716 1.120.10.2 ad anon = anon_spare = NULL;
717 1.120.10.2 ad pg = NULL;
718 1.120.10.2 ad
719 1.120.10.2 ad uvmexp.faults++; /* XXX: locking? */
720 1.120.10.2 ad
721 1.120.10.2 ad /*
722 1.120.10.2 ad * init the IN parameters in the ufi
723 1.120.10.2 ad */
724 1.120.10.2 ad
725 1.120.10.2 ad ufi.orig_map = orig_map;
726 1.120.10.2 ad ufi.orig_rvaddr = trunc_page(vaddr);
727 1.120.10.2 ad ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
728 1.120.10.2 ad wire_fault = (fault_flag > 0);
729 1.120.10.2 ad if (wire_fault)
730 1.120.10.2 ad narrow = true; /* don't look for neighborhood
731 1.120.10.2 ad * pages on wire */
732 1.120.10.2 ad else
733 1.120.10.2 ad narrow = false; /* normal fault */
734 1.120.10.2 ad
735 1.120.10.2 ad /*
736 1.120.10.2 ad * "goto ReFault" means restart the page fault from ground zero.
737 1.120.10.2 ad */
738 1.120.10.2 ad ReFault:
739 1.120.10.2 ad
740 1.120.10.2 ad /*
741 1.120.10.2 ad * lookup and lock the maps
742 1.120.10.2 ad */
743 1.120.10.2 ad
744 1.120.10.2 ad if (uvmfault_lookup(&ufi, false) == false) {
745 1.120.10.2 ad UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0);
746 1.120.10.2 ad error = EFAULT;
747 1.120.10.2 ad goto done;
748 1.120.10.2 ad }
749 1.120.10.2 ad /* locked: maps(read) */
750 1.120.10.2 ad
751 1.120.10.2 ad #ifdef DIAGNOSTIC
752 1.120.10.2 ad if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
753 1.120.10.2 ad printf("Page fault on non-pageable map:\n");
754 1.120.10.2 ad printf("ufi.map = %p\n", ufi.map);
755 1.120.10.2 ad printf("ufi.orig_map = %p\n", ufi.orig_map);
756 1.120.10.2 ad printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr);
757 1.120.10.2 ad panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0");
758 1.120.10.2 ad }
759 1.120.10.2 ad #endif
760 1.120.10.2 ad
761 1.120.10.2 ad /*
762 1.120.10.2 ad * check protection
763 1.120.10.2 ad */
764 1.120.10.2 ad
765 1.120.10.2 ad check_prot = fault_flag == UVM_FAULT_WIREMAX ?
766 1.120.10.2 ad ufi.entry->max_protection : ufi.entry->protection;
767 1.120.10.2 ad if ((check_prot & access_type) != access_type) {
768 1.120.10.2 ad UVMHIST_LOG(maphist,
769 1.120.10.2 ad "<- protection failure (prot=0x%x, access=0x%x)",
770 1.120.10.2 ad ufi.entry->protection, access_type, 0, 0);
771 1.120.10.2 ad uvmfault_unlockmaps(&ufi, false);
772 1.120.10.2 ad error = EACCES;
773 1.120.10.2 ad goto done;
774 1.120.10.2 ad }
775 1.120.10.2 ad
776 1.120.10.2 ad /*
777 1.120.10.2 ad * "enter_prot" is the protection we want to enter the page in at.
778 1.120.10.2 ad * for certain pages (e.g. copy-on-write pages) this protection can
779 1.120.10.2 ad * be more strict than ufi.entry->protection. "wired" means either
780 1.120.10.2 ad * the entry is wired or we are fault-wiring the pg.
781 1.120.10.2 ad */
782 1.120.10.2 ad
783 1.120.10.2 ad enter_prot = ufi.entry->protection;
784 1.120.10.2 ad wired = VM_MAPENT_ISWIRED(ufi.entry) || wire_fault;
785 1.120.10.2 ad if (wired) {
786 1.120.10.2 ad access_type = enter_prot; /* full access for wired */
787 1.120.10.2 ad cow_now = (check_prot & VM_PROT_WRITE) != 0;
788 1.120.10.2 ad } else {
789 1.120.10.2 ad cow_now = (access_type & VM_PROT_WRITE) != 0;
790 1.120.10.2 ad }
791 1.120.10.2 ad
792 1.120.10.2 ad /*
793 1.120.10.2 ad * handle "needs_copy" case. if we need to copy the amap we will
794 1.120.10.2 ad * have to drop our readlock and relock it with a write lock. (we
795 1.120.10.2 ad * need a write lock to change anything in a map entry [e.g.
796 1.120.10.2 ad * needs_copy]).
797 1.120.10.2 ad */
798 1.120.10.2 ad
799 1.120.10.2 ad if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
800 1.120.10.2 ad if (cow_now || (ufi.entry->object.uvm_obj == NULL)) {
801 1.120.10.2 ad KASSERT(fault_flag != UVM_FAULT_WIREMAX);
802 1.120.10.2 ad /* need to clear */
803 1.120.10.2 ad UVMHIST_LOG(maphist,
804 1.120.10.2 ad " need to clear needs_copy and refault",0,0,0,0);
805 1.120.10.2 ad uvmfault_unlockmaps(&ufi, false);
806 1.120.10.2 ad uvmfault_amapcopy(&ufi);
807 1.120.10.2 ad uvmexp.fltamcopy++;
808 1.120.10.2 ad goto ReFault;
809 1.120.10.2 ad
810 1.120.10.2 ad } else {
811 1.120.10.2 ad
812 1.120.10.2 ad /*
813 1.120.10.2 ad * ensure that we pmap_enter page R/O since
814 1.120.10.2 ad * needs_copy is still true
815 1.120.10.2 ad */
816 1.120.10.2 ad
817 1.120.10.2 ad enter_prot &= ~VM_PROT_WRITE;
818 1.120.10.2 ad }
819 1.120.10.2 ad }
820 1.120.10.2 ad
821 1.120.10.2 ad /*
822 1.120.10.2 ad * identify the players
823 1.120.10.2 ad */
824 1.120.10.2 ad
825 1.120.10.2 ad amap = ufi.entry->aref.ar_amap; /* top layer */
826 1.120.10.2 ad uobj = ufi.entry->object.uvm_obj; /* bottom layer */
827 1.120.10.2 ad
828 1.120.10.2 ad /*
829 1.120.10.2 ad * check for a case 0 fault. if nothing backing the entry then
830 1.120.10.2 ad * error now.
831 1.120.10.2 ad */
832 1.120.10.2 ad
833 1.120.10.2 ad if (amap == NULL && uobj == NULL) {
834 1.120.10.2 ad uvmfault_unlockmaps(&ufi, false);
835 1.120.10.2 ad UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
836 1.120.10.2 ad error = EFAULT;
837 1.120.10.2 ad goto done;
838 1.120.10.2 ad }
839 1.120.10.2 ad
840 1.120.10.2 ad /*
841 1.120.10.2 ad * establish range of interest based on advice from mapper
842 1.120.10.2 ad * and then clip to fit map entry. note that we only want
843 1.120.10.2 ad * to do this the first time through the fault. if we
844 1.120.10.2 ad * ReFault we will disable this by setting "narrow" to true.
845 1.120.10.2 ad */
846 1.120.10.2 ad
847 1.120.10.2 ad if (narrow == false) {
848 1.120.10.2 ad
849 1.120.10.2 ad /* wide fault (!narrow) */
850 1.120.10.2 ad KASSERT(uvmadvice[ufi.entry->advice].advice ==
851 1.120.10.2 ad ufi.entry->advice);
852 1.120.10.2 ad nback = MIN(uvmadvice[ufi.entry->advice].nback,
853 1.120.10.2 ad (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
854 1.120.10.2 ad startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
855 1.120.10.2 ad nforw = MIN(uvmadvice[ufi.entry->advice].nforw,
856 1.120.10.2 ad ((ufi.entry->end - ufi.orig_rvaddr) >>
857 1.120.10.2 ad PAGE_SHIFT) - 1);
858 1.120.10.2 ad /*
859 1.120.10.2 ad * note: "-1" because we don't want to count the
860 1.120.10.2 ad * faulting page as forw
861 1.120.10.2 ad */
862 1.120.10.2 ad npages = nback + nforw + 1;
863 1.120.10.2 ad centeridx = nback;
864 1.120.10.2 ad
865 1.120.10.2 ad narrow = true; /* ensure only once per-fault */
866 1.120.10.2 ad
867 1.120.10.2 ad } else {
868 1.120.10.2 ad
869 1.120.10.2 ad /* narrow fault! */
870 1.120.10.2 ad nback = nforw = 0;
871 1.120.10.2 ad startva = ufi.orig_rvaddr;
872 1.120.10.2 ad npages = 1;
873 1.120.10.2 ad centeridx = 0;
874 1.120.10.2 ad
875 1.120.10.2 ad }
876 1.120.10.2 ad
877 1.120.10.2 ad /* locked: maps(read) */
878 1.120.10.2 ad UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x",
879 1.120.10.2 ad narrow, nback, nforw, startva);
880 1.120.10.2 ad UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi.entry,
881 1.120.10.2 ad amap, uobj, 0);
882 1.120.10.2 ad
883 1.120.10.2 ad /*
884 1.120.10.2 ad * if we've got an amap, lock it and extract current anons.
885 1.120.10.2 ad */
886 1.120.10.2 ad
887 1.120.10.2 ad if (amap) {
888 1.120.10.2 ad amap_lock(amap);
889 1.120.10.2 ad anons = anons_store;
890 1.120.10.2 ad amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
891 1.120.10.2 ad anons, npages);
892 1.120.10.2 ad } else {
893 1.120.10.2 ad anons = NULL; /* to be safe */
894 1.120.10.2 ad }
895 1.120.10.2 ad
896 1.120.10.2 ad /* locked: maps(read), amap(if there) */
897 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
898 1.120.10.2 ad
899 1.120.10.2 ad /*
900 1.120.10.2 ad * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
901 1.120.10.2 ad * now and then forget about them (for the rest of the fault).
902 1.120.10.2 ad */
903 1.120.10.2 ad
904 1.120.10.2 ad if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
905 1.120.10.2 ad
906 1.120.10.2 ad UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
907 1.120.10.2 ad 0,0,0,0);
908 1.120.10.2 ad /* flush back-page anons? */
909 1.120.10.2 ad if (amap)
910 1.120.10.2 ad uvmfault_anonflush(anons, nback);
911 1.120.10.2 ad
912 1.120.10.2 ad /* flush object? */
913 1.120.10.2 ad if (uobj) {
914 1.120.10.2 ad uoff = (startva - ufi.entry->start) + ufi.entry->offset;
915 1.120.10.2 ad simple_lock(&uobj->vmobjlock);
916 1.120.10.2 ad (void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
917 1.120.10.2 ad (nback << PAGE_SHIFT), PGO_DEACTIVATE);
918 1.120.10.2 ad }
919 1.120.10.2 ad
920 1.120.10.2 ad /* now forget about the backpages */
921 1.120.10.2 ad if (amap)
922 1.120.10.2 ad anons += nback;
923 1.120.10.2 ad startva += (nback << PAGE_SHIFT);
924 1.120.10.2 ad npages -= nback;
925 1.120.10.2 ad nback = centeridx = 0;
926 1.120.10.2 ad }
927 1.120.10.2 ad
928 1.120.10.2 ad /* locked: maps(read), amap(if there) */
929 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
930 1.120.10.2 ad
931 1.120.10.2 ad /*
932 1.120.10.2 ad * map in the backpages and frontpages we found in the amap in hopes
933 1.120.10.2 ad * of preventing future faults. we also init the pages[] array as
934 1.120.10.2 ad * we go.
935 1.120.10.2 ad */
936 1.120.10.2 ad
937 1.120.10.2 ad currva = startva;
938 1.120.10.2 ad shadowed = false;
939 1.120.10.2 ad for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
940 1.120.10.2 ad
941 1.120.10.2 ad /*
942 1.120.10.2 ad * dont play with VAs that are already mapped
943 1.120.10.2 ad * except for center)
944 1.120.10.2 ad */
945 1.120.10.2 ad if (lcv != centeridx &&
946 1.120.10.2 ad pmap_extract(ufi.orig_map->pmap, currva, NULL)) {
947 1.120.10.2 ad pages[lcv] = PGO_DONTCARE;
948 1.120.10.2 ad continue;
949 1.120.10.2 ad }
950 1.120.10.2 ad
951 1.120.10.2 ad /*
952 1.120.10.2 ad * unmapped or center page. check if any anon at this level.
953 1.120.10.2 ad */
954 1.120.10.2 ad if (amap == NULL || anons[lcv] == NULL) {
955 1.120.10.2 ad pages[lcv] = NULL;
956 1.120.10.2 ad continue;
957 1.120.10.2 ad }
958 1.120.10.2 ad
959 1.120.10.2 ad /*
960 1.120.10.2 ad * check for present page and map if possible. re-activate it.
961 1.120.10.2 ad */
962 1.120.10.2 ad
963 1.120.10.2 ad pages[lcv] = PGO_DONTCARE;
964 1.120.10.2 ad if (lcv == centeridx) { /* save center for later! */
965 1.120.10.2 ad shadowed = true;
966 1.120.10.2 ad continue;
967 1.120.10.2 ad }
968 1.120.10.2 ad anon = anons[lcv];
969 1.120.10.2 ad simple_lock(&anon->an_lock);
970 1.120.10.2 ad /* ignore loaned pages */
971 1.120.10.2 ad if (anon->an_page && anon->an_page->loan_count == 0 &&
972 1.120.10.2 ad (anon->an_page->flags & PG_BUSY) == 0) {
973 1.120.10.2 ad uvm_lock_pageq();
974 1.120.10.2 ad uvm_pageenqueue(anon->an_page);
975 1.120.10.2 ad uvm_unlock_pageq();
976 1.120.10.2 ad UVMHIST_LOG(maphist,
977 1.120.10.2 ad " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x",
978 1.120.10.2 ad ufi.orig_map->pmap, currva, anon->an_page, 0);
979 1.120.10.2 ad uvmexp.fltnamap++;
980 1.120.10.2 ad
981 1.120.10.2 ad /*
982 1.120.10.2 ad * Since this isn't the page that's actually faulting,
983 1.120.10.2 ad * ignore pmap_enter() failures; it's not critical
984 1.120.10.2 ad * that we enter these right now.
985 1.120.10.2 ad */
986 1.120.10.2 ad
987 1.120.10.2 ad (void) pmap_enter(ufi.orig_map->pmap, currva,
988 1.120.10.2 ad VM_PAGE_TO_PHYS(anon->an_page),
989 1.120.10.2 ad (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
990 1.120.10.2 ad enter_prot,
991 1.120.10.2 ad PMAP_CANFAIL |
992 1.120.10.2 ad (VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
993 1.120.10.2 ad }
994 1.120.10.2 ad simple_unlock(&anon->an_lock);
995 1.120.10.2 ad pmap_update(ufi.orig_map->pmap);
996 1.120.10.2 ad }
997 1.120.10.2 ad
998 1.120.10.2 ad /* locked: maps(read), amap(if there) */
999 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1000 1.120.10.2 ad /* (shadowed == true) if there is an anon at the faulting address */
1001 1.120.10.2 ad UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
1002 1.120.10.2 ad (uobj && shadowed == false),0,0);
1003 1.120.10.2 ad
1004 1.120.10.2 ad /*
1005 1.120.10.2 ad * note that if we are really short of RAM we could sleep in the above
1006 1.120.10.2 ad * call to pmap_enter with everything locked. bad?
1007 1.120.10.2 ad *
1008 1.120.10.2 ad * XXX Actually, that is bad; pmap_enter() should just fail in that
1009 1.120.10.2 ad * XXX case. --thorpej
1010 1.120.10.2 ad */
1011 1.120.10.2 ad
1012 1.120.10.2 ad /*
1013 1.120.10.2 ad * if the desired page is not shadowed by the amap and we have a
1014 1.120.10.2 ad * backing object, then we check to see if the backing object would
1015 1.120.10.2 ad * prefer to handle the fault itself (rather than letting us do it
1016 1.120.10.2 ad * with the usual pgo_get hook). the backing object signals this by
1017 1.120.10.2 ad * providing a pgo_fault routine.
1018 1.120.10.2 ad */
1019 1.120.10.2 ad
1020 1.120.10.2 ad if (uobj && shadowed == false && uobj->pgops->pgo_fault != NULL) {
1021 1.120.10.2 ad simple_lock(&uobj->vmobjlock);
1022 1.120.10.2 ad
1023 1.120.10.2 ad /* locked: maps(read), amap (if there), uobj */
1024 1.120.10.2 ad error = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
1025 1.120.10.2 ad centeridx, access_type, PGO_LOCKED|PGO_SYNCIO);
1026 1.120.10.2 ad
1027 1.120.10.2 ad /* locked: nothing, pgo_fault has unlocked everything */
1028 1.120.10.2 ad
1029 1.120.10.2 ad if (error == ERESTART)
1030 1.120.10.2 ad goto ReFault; /* try again! */
1031 1.120.10.2 ad /*
1032 1.120.10.2 ad * object fault routine responsible for pmap_update().
1033 1.120.10.2 ad */
1034 1.120.10.2 ad goto done;
1035 1.120.10.2 ad }
1036 1.120.10.2 ad
1037 1.120.10.2 ad /*
1038 1.120.10.2 ad * now, if the desired page is not shadowed by the amap and we have
1039 1.120.10.2 ad * a backing object that does not have a special fault routine, then
1040 1.120.10.2 ad * we ask (with pgo_get) the object for resident pages that we care
1041 1.120.10.2 ad * about and attempt to map them in. we do not let pgo_get block
1042 1.120.10.2 ad * (PGO_LOCKED).
1043 1.120.10.2 ad */
1044 1.120.10.2 ad
1045 1.120.10.2 ad if (uobj && shadowed == false) {
1046 1.120.10.2 ad simple_lock(&uobj->vmobjlock);
1047 1.120.10.2 ad
1048 1.120.10.2 ad /* locked (!shadowed): maps(read), amap (if there), uobj */
1049 1.120.10.2 ad /*
1050 1.120.10.2 ad * the following call to pgo_get does _not_ change locking state
1051 1.120.10.2 ad */
1052 1.120.10.2 ad
1053 1.120.10.2 ad uvmexp.fltlget++;
1054 1.120.10.2 ad gotpages = npages;
1055 1.120.10.2 ad (void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
1056 1.120.10.2 ad (startva - ufi.entry->start),
1057 1.120.10.2 ad pages, &gotpages, centeridx,
1058 1.120.10.2 ad access_type & MASK(ufi.entry),
1059 1.120.10.2 ad ufi.entry->advice, PGO_LOCKED);
1060 1.120.10.2 ad
1061 1.120.10.2 ad /*
1062 1.120.10.2 ad * check for pages to map, if we got any
1063 1.120.10.2 ad */
1064 1.120.10.2 ad
1065 1.120.10.2 ad uobjpage = NULL;
1066 1.120.10.2 ad
1067 1.120.10.2 ad if (gotpages) {
1068 1.120.10.2 ad currva = startva;
1069 1.120.10.2 ad for (lcv = 0; lcv < npages;
1070 1.120.10.2 ad lcv++, currva += PAGE_SIZE) {
1071 1.120.10.2 ad struct vm_page *curpg;
1072 1.120.10.2 ad bool readonly;
1073 1.120.10.2 ad
1074 1.120.10.2 ad curpg = pages[lcv];
1075 1.120.10.2 ad if (curpg == NULL || curpg == PGO_DONTCARE) {
1076 1.120.10.2 ad continue;
1077 1.120.10.2 ad }
1078 1.120.10.2 ad KASSERT(curpg->uobject == uobj);
1079 1.120.10.2 ad
1080 1.120.10.2 ad /*
1081 1.120.10.2 ad * if center page is resident and not
1082 1.120.10.2 ad * PG_BUSY|PG_RELEASED then pgo_get
1083 1.120.10.2 ad * made it PG_BUSY for us and gave
1084 1.120.10.2 ad * us a handle to it. remember this
1085 1.120.10.2 ad * page as "uobjpage." (for later use).
1086 1.120.10.2 ad */
1087 1.120.10.2 ad
1088 1.120.10.2 ad if (lcv == centeridx) {
1089 1.120.10.2 ad uobjpage = curpg;
1090 1.120.10.2 ad UVMHIST_LOG(maphist, " got uobjpage "
1091 1.120.10.2 ad "(0x%x) with locked get",
1092 1.120.10.2 ad uobjpage, 0,0,0);
1093 1.120.10.2 ad continue;
1094 1.120.10.2 ad }
1095 1.120.10.2 ad
1096 1.120.10.2 ad /*
1097 1.120.10.2 ad * calling pgo_get with PGO_LOCKED returns us
1098 1.120.10.2 ad * pages which are neither busy nor released,
1099 1.120.10.2 ad * so we don't need to check for this.
1100 1.120.10.2 ad * we can just directly enter the pages.
1101 1.120.10.2 ad */
1102 1.120.10.2 ad
1103 1.120.10.2 ad uvm_lock_pageq();
1104 1.120.10.2 ad uvm_pageenqueue(curpg);
1105 1.120.10.2 ad uvm_unlock_pageq();
1106 1.120.10.2 ad UVMHIST_LOG(maphist,
1107 1.120.10.2 ad " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x",
1108 1.120.10.2 ad ufi.orig_map->pmap, currva, curpg, 0);
1109 1.120.10.2 ad uvmexp.fltnomap++;
1110 1.120.10.2 ad
1111 1.120.10.2 ad /*
1112 1.120.10.2 ad * Since this page isn't the page that's
1113 1.120.10.2 ad * actually faulting, ignore pmap_enter()
1114 1.120.10.2 ad * failures; it's not critical that we
1115 1.120.10.2 ad * enter these right now.
1116 1.120.10.2 ad */
1117 1.120.10.2 ad KASSERT((curpg->flags & PG_PAGEOUT) == 0);
1118 1.120.10.2 ad KASSERT((curpg->flags & PG_RELEASED) == 0);
1119 1.120.10.2 ad KASSERT(!UVM_OBJ_IS_CLEAN(curpg->uobject) ||
1120 1.120.10.2 ad (curpg->flags & PG_CLEAN) != 0);
1121 1.120.10.2 ad readonly = (curpg->flags & PG_RDONLY)
1122 1.120.10.2 ad || (curpg->loan_count > 0)
1123 1.120.10.2 ad || UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject);
1124 1.120.10.2 ad
1125 1.120.10.2 ad (void) pmap_enter(ufi.orig_map->pmap, currva,
1126 1.120.10.2 ad VM_PAGE_TO_PHYS(curpg),
1127 1.120.10.2 ad readonly ?
1128 1.120.10.2 ad enter_prot & ~VM_PROT_WRITE :
1129 1.120.10.2 ad enter_prot & MASK(ufi.entry),
1130 1.120.10.2 ad PMAP_CANFAIL |
1131 1.120.10.2 ad (wired ? PMAP_WIRED : 0));
1132 1.120.10.2 ad
1133 1.120.10.2 ad /*
1134 1.120.10.2 ad * NOTE: page can't be PG_WANTED or PG_RELEASED
1135 1.120.10.2 ad * because we've held the lock the whole time
1136 1.120.10.2 ad * we've had the handle.
1137 1.120.10.2 ad */
1138 1.120.10.2 ad KASSERT((curpg->flags & PG_WANTED) == 0);
1139 1.120.10.2 ad KASSERT((curpg->flags & PG_RELEASED) == 0);
1140 1.120.10.2 ad
1141 1.120.10.2 ad curpg->flags &= ~(PG_BUSY);
1142 1.120.10.2 ad UVM_PAGE_OWN(curpg, NULL);
1143 1.120.10.2 ad }
1144 1.120.10.2 ad pmap_update(ufi.orig_map->pmap);
1145 1.120.10.2 ad }
1146 1.120.10.2 ad } else {
1147 1.120.10.2 ad uobjpage = NULL;
1148 1.120.10.2 ad }
1149 1.120.10.2 ad
1150 1.120.10.2 ad /* locked (shadowed): maps(read), amap */
1151 1.120.10.2 ad /* locked (!shadowed): maps(read), amap(if there),
1152 1.120.10.2 ad uobj(if !null), uobjpage(if !null) */
1153 1.120.10.2 ad if (shadowed) {
1154 1.120.10.2 ad KASSERT(mutex_owned(&amap->am_l));
1155 1.120.10.2 ad } else {
1156 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1157 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
1158 1.120.10.2 ad KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0);
1159 1.120.10.2 ad }
1160 1.120.10.2 ad
1161 1.120.10.2 ad /*
1162 1.120.10.2 ad * note that at this point we are done with any front or back pages.
1163 1.120.10.2 ad * we are now going to focus on the center page (i.e. the one we've
1164 1.120.10.2 ad * faulted on). if we have faulted on the top (anon) layer
1165 1.120.10.2 ad * [i.e. case 1], then the anon we want is anons[centeridx] (we have
1166 1.120.10.2 ad * not touched it yet). if we have faulted on the bottom (uobj)
1167 1.120.10.2 ad * layer [i.e. case 2] and the page was both present and available,
1168 1.120.10.2 ad * then we've got a pointer to it as "uobjpage" and we've already
1169 1.120.10.2 ad * made it BUSY.
1170 1.120.10.2 ad */
1171 1.120.10.2 ad
1172 1.120.10.2 ad /*
1173 1.120.10.2 ad * there are four possible cases we must address: 1A, 1B, 2A, and 2B
1174 1.120.10.2 ad */
1175 1.120.10.2 ad
1176 1.120.10.2 ad /*
1177 1.120.10.2 ad * redirect case 2: if we are not shadowed, go to case 2.
1178 1.120.10.2 ad */
1179 1.120.10.2 ad
1180 1.120.10.2 ad if (shadowed == false)
1181 1.120.10.2 ad goto Case2;
1182 1.120.10.2 ad
1183 1.120.10.2 ad /* locked: maps(read), amap */
1184 1.120.10.2 ad
1185 1.120.10.2 ad /*
1186 1.120.10.2 ad * handle case 1: fault on an anon in our amap
1187 1.120.10.2 ad */
1188 1.120.10.2 ad
1189 1.120.10.2 ad anon = anons[centeridx];
1190 1.120.10.2 ad UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0);
1191 1.120.10.2 ad simple_lock(&anon->an_lock);
1192 1.120.10.2 ad
1193 1.120.10.2 ad /* locked: maps(read), amap, anon */
1194 1.120.10.2 ad KASSERT(mutex_owned(&amap->am_l));
1195 1.120.10.2 ad LOCK_ASSERT(simple_lock_held(&anon->an_lock));
1196 1.120.10.2 ad
1197 1.120.10.2 ad /*
1198 1.120.10.2 ad * no matter if we have case 1A or case 1B we are going to need to
1199 1.120.10.2 ad * have the anon's memory resident. ensure that now.
1200 1.120.10.2 ad */
1201 1.120.10.2 ad
1202 1.120.10.2 ad /*
1203 1.120.10.2 ad * let uvmfault_anonget do the dirty work.
1204 1.120.10.2 ad * if it fails (!OK) it will unlock everything for us.
1205 1.120.10.2 ad * if it succeeds, locks are still valid and locked.
1206 1.120.10.2 ad * also, if it is OK, then the anon's page is on the queues.
1207 1.120.10.2 ad * if the page is on loan from a uvm_object, then anonget will
1208 1.120.10.2 ad * lock that object for us if it does not fail.
1209 1.120.10.2 ad */
1210 1.120.10.2 ad
1211 1.120.10.2 ad error = uvmfault_anonget(&ufi, amap, anon);
1212 1.120.10.2 ad switch (error) {
1213 1.120.10.2 ad case 0:
1214 1.120.10.2 ad break;
1215 1.120.10.2 ad
1216 1.120.10.2 ad case ERESTART:
1217 1.120.10.2 ad goto ReFault;
1218 1.120.10.2 ad
1219 1.120.10.2 ad case EAGAIN:
1220 1.120.10.2 ad tsleep(&lbolt, PVM, "fltagain1", 0);
1221 1.120.10.2 ad goto ReFault;
1222 1.120.10.2 ad
1223 1.120.10.2 ad default:
1224 1.120.10.2 ad goto done;
1225 1.120.10.2 ad }
1226 1.120.10.2 ad
1227 1.120.10.2 ad /*
1228 1.120.10.2 ad * uobj is non null if the page is on loan from an object (i.e. uobj)
1229 1.120.10.2 ad */
1230 1.120.10.2 ad
1231 1.120.10.2 ad uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
1232 1.120.10.2 ad
1233 1.120.10.2 ad /* locked: maps(read), amap, anon, uobj(if one) */
1234 1.120.10.2 ad KASSERT(mutex_owned(&amap->am_l));
1235 1.120.10.2 ad LOCK_ASSERT(simple_lock_held(&anon->an_lock));
1236 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
1237 1.120.10.2 ad
1238 1.120.10.2 ad /*
1239 1.120.10.2 ad * special handling for loaned pages
1240 1.120.10.2 ad */
1241 1.120.10.2 ad
1242 1.120.10.2 ad if (anon->an_page->loan_count) {
1243 1.120.10.2 ad
1244 1.120.10.2 ad if (!cow_now) {
1245 1.120.10.2 ad
1246 1.120.10.2 ad /*
1247 1.120.10.2 ad * for read faults on loaned pages we just cap the
1248 1.120.10.2 ad * protection at read-only.
1249 1.120.10.2 ad */
1250 1.120.10.2 ad
1251 1.120.10.2 ad enter_prot = enter_prot & ~VM_PROT_WRITE;
1252 1.120.10.2 ad
1253 1.120.10.2 ad } else {
1254 1.120.10.2 ad /*
1255 1.120.10.2 ad * note that we can't allow writes into a loaned page!
1256 1.120.10.2 ad *
1257 1.120.10.2 ad * if we have a write fault on a loaned page in an
1258 1.120.10.2 ad * anon then we need to look at the anon's ref count.
1259 1.120.10.2 ad * if it is greater than one then we are going to do
1260 1.120.10.2 ad * a normal copy-on-write fault into a new anon (this
1261 1.120.10.2 ad * is not a problem). however, if the reference count
1262 1.120.10.2 ad * is one (a case where we would normally allow a
1263 1.120.10.2 ad * write directly to the page) then we need to kill
1264 1.120.10.2 ad * the loan before we continue.
1265 1.120.10.2 ad */
1266 1.120.10.2 ad
1267 1.120.10.2 ad /* >1 case is already ok */
1268 1.120.10.2 ad if (anon->an_ref == 1) {
1269 1.120.10.2 ad
1270 1.120.10.2 ad /* get new un-owned replacement page */
1271 1.120.10.2 ad pg = uvm_pagealloc(NULL, 0, NULL, 0);
1272 1.120.10.2 ad if (pg == NULL) {
1273 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj,
1274 1.120.10.2 ad anon);
1275 1.120.10.2 ad uvm_wait("flt_noram2");
1276 1.120.10.2 ad goto ReFault;
1277 1.120.10.2 ad }
1278 1.120.10.2 ad
1279 1.120.10.2 ad /*
1280 1.120.10.2 ad * copy data, kill loan, and drop uobj lock
1281 1.120.10.2 ad * (if any)
1282 1.120.10.2 ad */
1283 1.120.10.2 ad /* copy old -> new */
1284 1.120.10.2 ad uvm_pagecopy(anon->an_page, pg);
1285 1.120.10.2 ad
1286 1.120.10.2 ad /* force reload */
1287 1.120.10.2 ad pmap_page_protect(anon->an_page, VM_PROT_NONE);
1288 1.120.10.2 ad uvm_lock_pageq(); /* KILL loan */
1289 1.120.10.2 ad
1290 1.120.10.2 ad anon->an_page->uanon = NULL;
1291 1.120.10.2 ad /* in case we owned */
1292 1.120.10.2 ad anon->an_page->pqflags &= ~PQ_ANON;
1293 1.120.10.2 ad
1294 1.120.10.2 ad if (uobj) {
1295 1.120.10.2 ad /* if we were receiver of loan */
1296 1.120.10.2 ad anon->an_page->loan_count--;
1297 1.120.10.2 ad } else {
1298 1.120.10.2 ad /*
1299 1.120.10.2 ad * we were the lender (A->K); need
1300 1.120.10.2 ad * to remove the page from pageq's.
1301 1.120.10.2 ad */
1302 1.120.10.2 ad uvm_pagedequeue(anon->an_page);
1303 1.120.10.2 ad }
1304 1.120.10.2 ad
1305 1.120.10.2 ad if (uobj) {
1306 1.120.10.2 ad simple_unlock(&uobj->vmobjlock);
1307 1.120.10.2 ad uobj = NULL;
1308 1.120.10.2 ad }
1309 1.120.10.2 ad
1310 1.120.10.2 ad /* install new page in anon */
1311 1.120.10.2 ad anon->an_page = pg;
1312 1.120.10.2 ad pg->uanon = anon;
1313 1.120.10.2 ad pg->pqflags |= PQ_ANON;
1314 1.120.10.2 ad
1315 1.120.10.2 ad uvm_pageactivate(pg);
1316 1.120.10.2 ad uvm_unlock_pageq();
1317 1.120.10.2 ad
1318 1.120.10.2 ad pg->flags &= ~(PG_BUSY|PG_FAKE);
1319 1.120.10.2 ad UVM_PAGE_OWN(pg, NULL);
1320 1.120.10.2 ad
1321 1.120.10.2 ad /* done! */
1322 1.120.10.2 ad } /* ref == 1 */
1323 1.120.10.2 ad } /* write fault */
1324 1.120.10.2 ad } /* loan count */
1325 1.120.10.2 ad
1326 1.120.10.2 ad /*
1327 1.120.10.2 ad * if we are case 1B then we will need to allocate a new blank
1328 1.120.10.2 ad * anon to transfer the data into. note that we have a lock
1329 1.120.10.2 ad * on anon, so no one can busy or release the page until we are done.
1330 1.120.10.2 ad * also note that the ref count can't drop to zero here because
1331 1.120.10.2 ad * it is > 1 and we are only dropping one ref.
1332 1.120.10.2 ad *
1333 1.120.10.2 ad * in the (hopefully very rare) case that we are out of RAM we
1334 1.120.10.2 ad * will unlock, wait for more RAM, and refault.
1335 1.120.10.2 ad *
1336 1.120.10.2 ad * if we are out of anon VM we kill the process (XXX: could wait?).
1337 1.120.10.2 ad */
1338 1.120.10.2 ad
1339 1.120.10.2 ad if (cow_now && anon->an_ref > 1) {
1340 1.120.10.2 ad
1341 1.120.10.2 ad UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
1342 1.120.10.2 ad uvmexp.flt_acow++;
1343 1.120.10.2 ad oanon = anon; /* oanon = old, locked anon */
1344 1.120.10.2 ad
1345 1.120.10.2 ad error = uvmfault_promote(&ufi, oanon, PGO_DONTCARE,
1346 1.120.10.2 ad &anon, &anon_spare);
1347 1.120.10.2 ad switch (error) {
1348 1.120.10.2 ad case 0:
1349 1.120.10.2 ad break;
1350 1.120.10.2 ad case ERESTART:
1351 1.120.10.2 ad goto ReFault;
1352 1.120.10.2 ad default:
1353 1.120.10.2 ad goto done;
1354 1.120.10.2 ad }
1355 1.120.10.2 ad
1356 1.120.10.2 ad pg = anon->an_page;
1357 1.120.10.2 ad uvm_lock_pageq();
1358 1.120.10.2 ad uvm_pageactivate(pg);
1359 1.120.10.2 ad uvm_unlock_pageq();
1360 1.120.10.2 ad pg->flags &= ~(PG_BUSY|PG_FAKE);
1361 1.120.10.2 ad UVM_PAGE_OWN(pg, NULL);
1362 1.120.10.2 ad
1363 1.120.10.2 ad /* deref: can not drop to zero here by defn! */
1364 1.120.10.2 ad oanon->an_ref--;
1365 1.120.10.2 ad
1366 1.120.10.2 ad /*
1367 1.120.10.2 ad * note: oanon is still locked, as is the new anon. we
1368 1.120.10.2 ad * need to check for this later when we unlock oanon; if
1369 1.120.10.2 ad * oanon != anon, we'll have to unlock anon, too.
1370 1.120.10.2 ad */
1371 1.120.10.2 ad
1372 1.120.10.2 ad } else {
1373 1.120.10.2 ad
1374 1.120.10.2 ad uvmexp.flt_anon++;
1375 1.120.10.2 ad oanon = anon; /* old, locked anon is same as anon */
1376 1.120.10.2 ad pg = anon->an_page;
1377 1.120.10.2 ad if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
1378 1.120.10.2 ad enter_prot = enter_prot & ~VM_PROT_WRITE;
1379 1.120.10.2 ad
1380 1.120.10.2 ad }
1381 1.120.10.2 ad
1382 1.120.10.2 ad /* locked: maps(read), amap, oanon, anon (if different from oanon) */
1383 1.120.10.2 ad KASSERT(mutex_owned(&amap->am_l));
1384 1.120.10.2 ad LOCK_ASSERT(simple_lock_held(&anon->an_lock));
1385 1.120.10.2 ad LOCK_ASSERT(simple_lock_held(&oanon->an_lock));
1386 1.120.10.2 ad
1387 1.120.10.2 ad /*
1388 1.120.10.2 ad * now map the page in.
1389 1.120.10.2 ad */
1390 1.120.10.2 ad
1391 1.120.10.2 ad UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x",
1392 1.120.10.2 ad ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0);
1393 1.120.10.2 ad if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1394 1.120.10.2 ad enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
1395 1.120.10.2 ad != 0) {
1396 1.120.10.2 ad
1397 1.120.10.2 ad /*
1398 1.120.10.2 ad * No need to undo what we did; we can simply think of
1399 1.120.10.2 ad * this as the pmap throwing away the mapping information.
1400 1.120.10.2 ad *
1401 1.120.10.2 ad * We do, however, have to go through the ReFault path,
1402 1.120.10.2 ad * as the map may change while we're asleep.
1403 1.120.10.2 ad */
1404 1.120.10.2 ad
1405 1.120.10.2 ad if (anon != oanon)
1406 1.120.10.2 ad simple_unlock(&anon->an_lock);
1407 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj, oanon);
1408 1.120.10.2 ad if (!uvm_reclaimable()) {
1409 1.120.10.2 ad UVMHIST_LOG(maphist,
1410 1.120.10.2 ad "<- failed. out of VM",0,0,0,0);
1411 1.120.10.2 ad /* XXX instrumentation */
1412 1.120.10.2 ad error = ENOMEM;
1413 1.120.10.2 ad goto done;
1414 1.120.10.2 ad }
1415 1.120.10.2 ad /* XXX instrumentation */
1416 1.120.10.2 ad uvm_wait("flt_pmfail1");
1417 1.120.10.2 ad goto ReFault;
1418 1.120.10.2 ad }
1419 1.120.10.2 ad
1420 1.120.10.2 ad /*
1421 1.120.10.2 ad * ... update the page queues.
1422 1.120.10.2 ad */
1423 1.120.10.2 ad
1424 1.120.10.2 ad uvm_lock_pageq();
1425 1.120.10.2 ad if (wire_fault) {
1426 1.120.10.2 ad uvm_pagewire(pg);
1427 1.120.10.2 ad
1428 1.120.10.2 ad /*
1429 1.120.10.2 ad * since the now-wired page cannot be paged out,
1430 1.120.10.2 ad * release its swap resources for others to use.
1431 1.120.10.2 ad * since an anon with no swap cannot be PG_CLEAN,
1432 1.120.10.2 ad * clear its clean flag now.
1433 1.120.10.2 ad */
1434 1.120.10.2 ad
1435 1.120.10.2 ad pg->flags &= ~(PG_CLEAN);
1436 1.120.10.2 ad uvm_anon_dropswap(anon);
1437 1.120.10.2 ad } else {
1438 1.120.10.2 ad uvm_pageactivate(pg);
1439 1.120.10.2 ad }
1440 1.120.10.2 ad uvm_unlock_pageq();
1441 1.120.10.2 ad
1442 1.120.10.2 ad /*
1443 1.120.10.2 ad * done case 1! finish up by unlocking everything and returning success
1444 1.120.10.2 ad */
1445 1.120.10.2 ad
1446 1.120.10.2 ad if (anon != oanon)
1447 1.120.10.2 ad simple_unlock(&anon->an_lock);
1448 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj, oanon);
1449 1.120.10.2 ad pmap_update(ufi.orig_map->pmap);
1450 1.120.10.2 ad error = 0;
1451 1.120.10.2 ad goto done;
1452 1.120.10.2 ad
1453 1.120.10.2 ad Case2:
1454 1.120.10.2 ad /*
1455 1.120.10.2 ad * handle case 2: faulting on backing object or zero fill
1456 1.120.10.2 ad */
1457 1.120.10.2 ad
1458 1.120.10.2 ad /*
1459 1.120.10.2 ad * locked:
1460 1.120.10.2 ad * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1461 1.120.10.2 ad */
1462 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1463 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
1464 1.120.10.2 ad KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0);
1465 1.120.10.2 ad
1466 1.120.10.2 ad /*
1467 1.120.10.2 ad * note that uobjpage can not be PGO_DONTCARE at this point. we now
1468 1.120.10.2 ad * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
1469 1.120.10.2 ad * have a backing object, check and see if we are going to promote
1470 1.120.10.2 ad * the data up to an anon during the fault.
1471 1.120.10.2 ad */
1472 1.120.10.2 ad
1473 1.120.10.2 ad if (uobj == NULL) {
1474 1.120.10.2 ad uobjpage = PGO_DONTCARE;
1475 1.120.10.2 ad promote = true; /* always need anon here */
1476 1.120.10.2 ad } else {
1477 1.120.10.2 ad KASSERT(uobjpage != PGO_DONTCARE);
1478 1.120.10.2 ad promote = cow_now && UVM_ET_ISCOPYONWRITE(ufi.entry);
1479 1.120.10.2 ad }
1480 1.120.10.2 ad UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d",
1481 1.120.10.2 ad promote, (uobj == NULL), 0,0);
1482 1.120.10.2 ad
1483 1.120.10.2 ad /*
1484 1.120.10.2 ad * if uobjpage is not null then we do not need to do I/O to get the
1485 1.120.10.2 ad * uobjpage.
1486 1.120.10.2 ad *
1487 1.120.10.2 ad * if uobjpage is null, then we need to unlock and ask the pager to
1488 1.120.10.2 ad * get the data for us. once we have the data, we need to reverify
1489 1.120.10.2 ad * the state the world. we are currently not holding any resources.
1490 1.120.10.2 ad */
1491 1.120.10.2 ad
1492 1.120.10.2 ad if (uobjpage) {
1493 1.120.10.2 ad /* update rusage counters */
1494 1.120.10.2 ad curproc->p_stats->p_ru.ru_minflt++;
1495 1.120.10.2 ad } else {
1496 1.120.10.2 ad /* update rusage counters */
1497 1.120.10.2 ad curproc->p_stats->p_ru.ru_majflt++;
1498 1.120.10.2 ad
1499 1.120.10.2 ad /* locked: maps(read), amap(if there), uobj */
1500 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, NULL, NULL);
1501 1.120.10.2 ad /* locked: uobj */
1502 1.120.10.2 ad
1503 1.120.10.2 ad uvmexp.fltget++;
1504 1.120.10.2 ad gotpages = 1;
1505 1.120.10.2 ad uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
1506 1.120.10.2 ad error = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
1507 1.120.10.2 ad 0, access_type & MASK(ufi.entry), ufi.entry->advice,
1508 1.120.10.2 ad PGO_SYNCIO);
1509 1.120.10.2 ad /* locked: uobjpage(if no error) */
1510 1.120.10.2 ad KASSERT(error != 0 || (uobjpage->flags & PG_BUSY) != 0);
1511 1.120.10.2 ad
1512 1.120.10.2 ad /*
1513 1.120.10.2 ad * recover from I/O
1514 1.120.10.2 ad */
1515 1.120.10.2 ad
1516 1.120.10.2 ad if (error) {
1517 1.120.10.2 ad if (error == EAGAIN) {
1518 1.120.10.2 ad UVMHIST_LOG(maphist,
1519 1.120.10.2 ad " pgo_get says TRY AGAIN!",0,0,0,0);
1520 1.120.10.2 ad tsleep(&lbolt, PVM, "fltagain2", 0);
1521 1.120.10.2 ad goto ReFault;
1522 1.120.10.2 ad }
1523 1.120.10.2 ad
1524 1.120.10.2 ad UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)",
1525 1.120.10.2 ad error, 0,0,0);
1526 1.120.10.2 ad goto done;
1527 1.120.10.2 ad }
1528 1.120.10.2 ad
1529 1.120.10.2 ad /* locked: uobjpage */
1530 1.120.10.2 ad
1531 1.120.10.2 ad uvm_lock_pageq();
1532 1.120.10.2 ad uvm_pageactivate(uobjpage);
1533 1.120.10.2 ad uvm_unlock_pageq();
1534 1.120.10.2 ad
1535 1.120.10.2 ad /*
1536 1.120.10.2 ad * re-verify the state of the world by first trying to relock
1537 1.120.10.2 ad * the maps. always relock the object.
1538 1.120.10.2 ad */
1539 1.120.10.2 ad
1540 1.120.10.2 ad locked = uvmfault_relock(&ufi);
1541 1.120.10.2 ad if (locked && amap)
1542 1.120.10.2 ad amap_lock(amap);
1543 1.120.10.2 ad uobj = uobjpage->uobject;
1544 1.120.10.2 ad simple_lock(&uobj->vmobjlock);
1545 1.120.10.2 ad
1546 1.120.10.2 ad /* locked(locked): maps(read), amap(if !null), uobj, uobjpage */
1547 1.120.10.2 ad /* locked(!locked): uobj, uobjpage */
1548 1.120.10.2 ad
1549 1.120.10.2 ad /*
1550 1.120.10.2 ad * verify that the page has not be released and re-verify
1551 1.120.10.2 ad * that amap slot is still free. if there is a problem,
1552 1.120.10.2 ad * we unlock and clean up.
1553 1.120.10.2 ad */
1554 1.120.10.2 ad
1555 1.120.10.2 ad if ((uobjpage->flags & PG_RELEASED) != 0 ||
1556 1.120.10.2 ad (locked && amap &&
1557 1.120.10.2 ad amap_lookup(&ufi.entry->aref,
1558 1.120.10.2 ad ufi.orig_rvaddr - ufi.entry->start))) {
1559 1.120.10.2 ad if (locked)
1560 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, NULL, NULL);
1561 1.120.10.2 ad locked = false;
1562 1.120.10.2 ad }
1563 1.120.10.2 ad
1564 1.120.10.2 ad /*
1565 1.120.10.2 ad * didn't get the lock? release the page and retry.
1566 1.120.10.2 ad */
1567 1.120.10.2 ad
1568 1.120.10.2 ad if (locked == false) {
1569 1.120.10.2 ad UVMHIST_LOG(maphist,
1570 1.120.10.2 ad " wasn't able to relock after fault: retry",
1571 1.120.10.2 ad 0,0,0,0);
1572 1.120.10.2 ad if (uobjpage->flags & PG_WANTED)
1573 1.120.10.2 ad wakeup(uobjpage);
1574 1.120.10.2 ad if (uobjpage->flags & PG_RELEASED) {
1575 1.120.10.2 ad uvmexp.fltpgrele++;
1576 1.120.10.2 ad uvm_pagefree(uobjpage);
1577 1.120.10.2 ad goto ReFault;
1578 1.120.10.2 ad }
1579 1.120.10.2 ad uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1580 1.120.10.2 ad UVM_PAGE_OWN(uobjpage, NULL);
1581 1.120.10.2 ad simple_unlock(&uobj->vmobjlock);
1582 1.120.10.2 ad goto ReFault;
1583 1.120.10.2 ad }
1584 1.120.10.2 ad
1585 1.120.10.2 ad /*
1586 1.120.10.2 ad * we have the data in uobjpage which is busy and
1587 1.120.10.2 ad * not released. we are holding object lock (so the page
1588 1.120.10.2 ad * can't be released on us).
1589 1.120.10.2 ad */
1590 1.120.10.2 ad
1591 1.120.10.2 ad /* locked: maps(read), amap(if !null), uobj, uobjpage */
1592 1.120.10.2 ad }
1593 1.120.10.2 ad
1594 1.120.10.2 ad /*
1595 1.120.10.2 ad * locked:
1596 1.120.10.2 ad * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1597 1.120.10.2 ad */
1598 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1599 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
1600 1.120.10.2 ad KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
1601 1.120.10.2 ad
1602 1.120.10.2 ad /*
1603 1.120.10.2 ad * notes:
1604 1.120.10.2 ad * - at this point uobjpage can not be NULL
1605 1.120.10.2 ad * - at this point uobjpage can not be PG_RELEASED (since we checked
1606 1.120.10.2 ad * for it above)
1607 1.120.10.2 ad * - at this point uobjpage could be PG_WANTED (handle later)
1608 1.120.10.2 ad */
1609 1.120.10.2 ad
1610 1.120.10.2 ad KASSERT(uobj == NULL || uobj == uobjpage->uobject);
1611 1.120.10.2 ad KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
1612 1.120.10.2 ad (uobjpage->flags & PG_CLEAN) != 0);
1613 1.120.10.2 ad if (promote == false) {
1614 1.120.10.2 ad
1615 1.120.10.2 ad /*
1616 1.120.10.2 ad * we are not promoting. if the mapping is COW ensure that we
1617 1.120.10.2 ad * don't give more access than we should (e.g. when doing a read
1618 1.120.10.2 ad * fault on a COPYONWRITE mapping we want to map the COW page in
1619 1.120.10.2 ad * R/O even though the entry protection could be R/W).
1620 1.120.10.2 ad *
1621 1.120.10.2 ad * set "pg" to the page we want to map in (uobjpage, usually)
1622 1.120.10.2 ad */
1623 1.120.10.2 ad
1624 1.120.10.2 ad /* no anon in this case. */
1625 1.120.10.2 ad anon = NULL;
1626 1.120.10.2 ad
1627 1.120.10.2 ad uvmexp.flt_obj++;
1628 1.120.10.2 ad if (UVM_ET_ISCOPYONWRITE(ufi.entry) ||
1629 1.120.10.2 ad UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
1630 1.120.10.2 ad enter_prot &= ~VM_PROT_WRITE;
1631 1.120.10.2 ad pg = uobjpage; /* map in the actual object */
1632 1.120.10.2 ad
1633 1.120.10.2 ad KASSERT(uobjpage != PGO_DONTCARE);
1634 1.120.10.2 ad
1635 1.120.10.2 ad /*
1636 1.120.10.2 ad * we are faulting directly on the page. be careful
1637 1.120.10.2 ad * about writing to loaned pages...
1638 1.120.10.2 ad */
1639 1.120.10.2 ad
1640 1.120.10.2 ad if (uobjpage->loan_count) {
1641 1.120.10.2 ad if (!cow_now) {
1642 1.120.10.2 ad /* read fault: cap the protection at readonly */
1643 1.120.10.2 ad /* cap! */
1644 1.120.10.2 ad enter_prot = enter_prot & ~VM_PROT_WRITE;
1645 1.120.10.2 ad } else {
1646 1.120.10.2 ad /* write fault: must break the loan here */
1647 1.120.10.2 ad
1648 1.120.10.2 ad pg = uvm_loanbreak(uobjpage);
1649 1.120.10.2 ad if (pg == NULL) {
1650 1.120.10.2 ad
1651 1.120.10.2 ad /*
1652 1.120.10.2 ad * drop ownership of page, it can't
1653 1.120.10.2 ad * be released
1654 1.120.10.2 ad */
1655 1.120.10.2 ad
1656 1.120.10.2 ad if (uobjpage->flags & PG_WANTED)
1657 1.120.10.2 ad wakeup(uobjpage);
1658 1.120.10.2 ad uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1659 1.120.10.2 ad UVM_PAGE_OWN(uobjpage, NULL);
1660 1.120.10.2 ad
1661 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj,
1662 1.120.10.2 ad NULL);
1663 1.120.10.2 ad UVMHIST_LOG(maphist,
1664 1.120.10.2 ad " out of RAM breaking loan, waiting",
1665 1.120.10.2 ad 0,0,0,0);
1666 1.120.10.2 ad uvmexp.fltnoram++;
1667 1.120.10.2 ad uvm_wait("flt_noram4");
1668 1.120.10.2 ad goto ReFault;
1669 1.120.10.2 ad }
1670 1.120.10.2 ad uobjpage = pg;
1671 1.120.10.2 ad }
1672 1.120.10.2 ad }
1673 1.120.10.2 ad } else {
1674 1.120.10.2 ad
1675 1.120.10.2 ad /*
1676 1.120.10.2 ad * if we are going to promote the data to an anon we
1677 1.120.10.2 ad * allocate a blank anon here and plug it into our amap.
1678 1.120.10.2 ad */
1679 1.120.10.2 ad #if DIAGNOSTIC
1680 1.120.10.2 ad if (amap == NULL)
1681 1.120.10.2 ad panic("uvm_fault: want to promote data, but no anon");
1682 1.120.10.2 ad #endif
1683 1.120.10.2 ad error = uvmfault_promote(&ufi, NULL, uobjpage,
1684 1.120.10.2 ad &anon, &anon_spare);
1685 1.120.10.2 ad switch (error) {
1686 1.120.10.2 ad case 0:
1687 1.120.10.2 ad break;
1688 1.120.10.2 ad case ERESTART:
1689 1.120.10.2 ad goto ReFault;
1690 1.120.10.2 ad default:
1691 1.120.10.2 ad goto done;
1692 1.120.10.2 ad }
1693 1.120.10.2 ad
1694 1.120.10.2 ad pg = anon->an_page;
1695 1.120.10.2 ad
1696 1.120.10.2 ad /*
1697 1.120.10.2 ad * fill in the data
1698 1.120.10.2 ad */
1699 1.120.10.2 ad
1700 1.120.10.2 ad if (uobjpage != PGO_DONTCARE) {
1701 1.120.10.2 ad uvmexp.flt_prcopy++;
1702 1.120.10.2 ad
1703 1.120.10.2 ad /*
1704 1.120.10.2 ad * promote to shared amap? make sure all sharing
1705 1.120.10.2 ad * procs see it
1706 1.120.10.2 ad */
1707 1.120.10.2 ad
1708 1.120.10.2 ad if ((amap_flags(amap) & AMAP_SHARED) != 0) {
1709 1.120.10.2 ad pmap_page_protect(uobjpage, VM_PROT_NONE);
1710 1.120.10.2 ad /*
1711 1.120.10.2 ad * XXX: PAGE MIGHT BE WIRED!
1712 1.120.10.2 ad */
1713 1.120.10.2 ad }
1714 1.120.10.2 ad
1715 1.120.10.2 ad /*
1716 1.120.10.2 ad * dispose of uobjpage. it can't be PG_RELEASED
1717 1.120.10.2 ad * since we still hold the object lock.
1718 1.120.10.2 ad * drop handle to uobj as well.
1719 1.120.10.2 ad */
1720 1.120.10.2 ad
1721 1.120.10.2 ad if (uobjpage->flags & PG_WANTED)
1722 1.120.10.2 ad /* still have the obj lock */
1723 1.120.10.2 ad wakeup(uobjpage);
1724 1.120.10.2 ad uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1725 1.120.10.2 ad UVM_PAGE_OWN(uobjpage, NULL);
1726 1.120.10.2 ad simple_unlock(&uobj->vmobjlock);
1727 1.120.10.2 ad uobj = NULL;
1728 1.120.10.2 ad
1729 1.120.10.2 ad UVMHIST_LOG(maphist,
1730 1.120.10.2 ad " promote uobjpage 0x%x to anon/page 0x%x/0x%x",
1731 1.120.10.2 ad uobjpage, anon, pg, 0);
1732 1.120.10.2 ad
1733 1.120.10.2 ad } else {
1734 1.120.10.2 ad uvmexp.flt_przero++;
1735 1.120.10.2 ad
1736 1.120.10.2 ad /*
1737 1.120.10.2 ad * Page is zero'd and marked dirty by
1738 1.120.10.2 ad * uvmfault_promote().
1739 1.120.10.2 ad */
1740 1.120.10.2 ad
1741 1.120.10.2 ad UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
1742 1.120.10.2 ad anon, pg, 0, 0);
1743 1.120.10.2 ad }
1744 1.120.10.2 ad }
1745 1.120.10.2 ad
1746 1.120.10.2 ad /*
1747 1.120.10.2 ad * locked:
1748 1.120.10.2 ad * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj),
1749 1.120.10.2 ad * anon(if !null), pg(if anon)
1750 1.120.10.2 ad *
1751 1.120.10.2 ad * note: pg is either the uobjpage or the new page in the new anon
1752 1.120.10.2 ad */
1753 1.120.10.2 ad KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1754 1.120.10.2 ad LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
1755 1.120.10.2 ad KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
1756 1.120.10.2 ad LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock));
1757 1.120.10.2 ad KASSERT((pg->flags & PG_BUSY) != 0);
1758 1.120.10.2 ad
1759 1.120.10.2 ad /*
1760 1.120.10.2 ad * all resources are present. we can now map it in and free our
1761 1.120.10.2 ad * resources.
1762 1.120.10.2 ad */
1763 1.120.10.2 ad
1764 1.120.10.2 ad UVMHIST_LOG(maphist,
1765 1.120.10.2 ad " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
1766 1.120.10.2 ad ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote);
1767 1.120.10.2 ad KASSERT((access_type & VM_PROT_WRITE) == 0 ||
1768 1.120.10.2 ad (pg->flags & PG_RDONLY) == 0);
1769 1.120.10.2 ad if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1770 1.120.10.2 ad pg->flags & PG_RDONLY ? enter_prot & ~VM_PROT_WRITE : enter_prot,
1771 1.120.10.2 ad access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) {
1772 1.120.10.2 ad
1773 1.120.10.2 ad /*
1774 1.120.10.2 ad * No need to undo what we did; we can simply think of
1775 1.120.10.2 ad * this as the pmap throwing away the mapping information.
1776 1.120.10.2 ad *
1777 1.120.10.2 ad * We do, however, have to go through the ReFault path,
1778 1.120.10.2 ad * as the map may change while we're asleep.
1779 1.120.10.2 ad */
1780 1.120.10.2 ad
1781 1.120.10.2 ad if (pg->flags & PG_WANTED)
1782 1.120.10.2 ad wakeup(pg);
1783 1.120.10.2 ad
1784 1.120.10.2 ad /*
1785 1.120.10.2 ad * note that pg can't be PG_RELEASED since we did not drop
1786 1.120.10.2 ad * the object lock since the last time we checked.
1787 1.120.10.2 ad */
1788 1.120.10.2 ad KASSERT((pg->flags & PG_RELEASED) == 0);
1789 1.120.10.2 ad
1790 1.120.10.2 ad pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
1791 1.120.10.2 ad UVM_PAGE_OWN(pg, NULL);
1792 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj, anon);
1793 1.120.10.2 ad if (!uvm_reclaimable()) {
1794 1.120.10.2 ad UVMHIST_LOG(maphist,
1795 1.120.10.2 ad "<- failed. out of VM",0,0,0,0);
1796 1.120.10.2 ad /* XXX instrumentation */
1797 1.120.10.2 ad error = ENOMEM;
1798 1.120.10.2 ad goto done;
1799 1.120.10.2 ad }
1800 1.120.10.2 ad /* XXX instrumentation */
1801 1.120.10.2 ad uvm_wait("flt_pmfail2");
1802 1.120.10.2 ad goto ReFault;
1803 1.120.10.2 ad }
1804 1.120.10.2 ad
1805 1.120.10.2 ad uvm_lock_pageq();
1806 1.120.10.2 ad if (wire_fault) {
1807 1.120.10.2 ad uvm_pagewire(pg);
1808 1.120.10.2 ad if (pg->pqflags & PQ_AOBJ) {
1809 1.120.10.2 ad
1810 1.120.10.2 ad /*
1811 1.120.10.2 ad * since the now-wired page cannot be paged out,
1812 1.120.10.2 ad * release its swap resources for others to use.
1813 1.120.10.2 ad * since an aobj page with no swap cannot be PG_CLEAN,
1814 1.120.10.2 ad * clear its clean flag now.
1815 1.120.10.2 ad */
1816 1.120.10.2 ad
1817 1.120.10.2 ad KASSERT(uobj != NULL);
1818 1.120.10.2 ad pg->flags &= ~(PG_CLEAN);
1819 1.120.10.2 ad uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
1820 1.120.10.2 ad }
1821 1.120.10.2 ad } else {
1822 1.120.10.2 ad uvm_pageactivate(pg);
1823 1.120.10.2 ad }
1824 1.120.10.2 ad uvm_unlock_pageq();
1825 1.120.10.2 ad if (pg->flags & PG_WANTED)
1826 1.120.10.2 ad wakeup(pg);
1827 1.120.10.2 ad
1828 1.120.10.2 ad /*
1829 1.120.10.2 ad * note that pg can't be PG_RELEASED since we did not drop the object
1830 1.120.10.2 ad * lock since the last time we checked.
1831 1.120.10.2 ad */
1832 1.120.10.2 ad KASSERT((pg->flags & PG_RELEASED) == 0);
1833 1.120.10.2 ad
1834 1.120.10.2 ad pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
1835 1.120.10.2 ad UVM_PAGE_OWN(pg, NULL);
1836 1.120.10.2 ad uvmfault_unlockall(&ufi, amap, uobj, anon);
1837 1.120.10.2 ad pmap_update(ufi.orig_map->pmap);
1838 1.120.10.2 ad UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
1839 1.120.10.2 ad error = 0;
1840 1.120.10.2 ad done:
1841 1.120.10.2 ad if (anon_spare != NULL) {
1842 1.120.10.2 ad anon_spare->an_ref--;
1843 1.120.10.2 ad uvm_anfree(anon_spare);
1844 1.120.10.2 ad }
1845 1.120.10.2 ad return error;
1846 1.120.10.2 ad }
1847 1.120.10.2 ad
1848 1.120.10.2 ad
1849 1.120.10.2 ad /*
1850 1.120.10.2 ad * uvm_fault_wire: wire down a range of virtual addresses in a map.
1851 1.120.10.2 ad *
1852 1.120.10.2 ad * => map may be read-locked by caller, but MUST NOT be write-locked.
1853 1.120.10.2 ad * => if map is read-locked, any operations which may cause map to
1854 1.120.10.2 ad * be write-locked in uvm_fault() must be taken care of by
1855 1.120.10.2 ad * the caller. See uvm_map_pageable().
1856 1.120.10.2 ad */
1857 1.120.10.2 ad
1858 1.120.10.2 ad int
1859 1.120.10.2 ad uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
1860 1.120.10.2 ad vm_prot_t access_type, int wiremax)
1861 1.120.10.2 ad {
1862 1.120.10.2 ad vaddr_t va;
1863 1.120.10.2 ad int error;
1864 1.120.10.2 ad
1865 1.120.10.2 ad /*
1866 1.120.10.2 ad * now fault it in a page at a time. if the fault fails then we have
1867 1.120.10.2 ad * to undo what we have done. note that in uvm_fault VM_PROT_NONE
1868 1.120.10.2 ad * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
1869 1.120.10.2 ad */
1870 1.120.10.2 ad
1871 1.120.10.2 ad /*
1872 1.120.10.2 ad * XXX work around overflowing a vaddr_t. this prevents us from
1873 1.120.10.2 ad * wiring the last page in the address space, though.
1874 1.120.10.2 ad */
1875 1.120.10.2 ad if (start > end) {
1876 1.120.10.2 ad return EFAULT;
1877 1.120.10.2 ad }
1878 1.120.10.2 ad
1879 1.120.10.2 ad for (va = start ; va < end ; va += PAGE_SIZE) {
1880 1.120.10.2 ad error = uvm_fault_internal(map, va, access_type,
1881 1.120.10.2 ad wiremax ? UVM_FAULT_WIREMAX : UVM_FAULT_WIRE);
1882 1.120.10.2 ad if (error) {
1883 1.120.10.2 ad if (va != start) {
1884 1.120.10.2 ad uvm_fault_unwire(map, start, va);
1885 1.120.10.2 ad }
1886 1.120.10.2 ad return error;
1887 1.120.10.2 ad }
1888 1.120.10.2 ad }
1889 1.120.10.2 ad return 0;
1890 1.120.10.2 ad }
1891 1.120.10.2 ad
1892 1.120.10.2 ad /*
1893 1.120.10.2 ad * uvm_fault_unwire(): unwire range of virtual space.
1894 1.120.10.2 ad */
1895 1.120.10.2 ad
1896 1.120.10.2 ad void
1897 1.120.10.2 ad uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
1898 1.120.10.2 ad {
1899 1.120.10.2 ad vm_map_lock_read(map);
1900 1.120.10.2 ad uvm_fault_unwire_locked(map, start, end);
1901 1.120.10.2 ad vm_map_unlock_read(map);
1902 1.120.10.2 ad }
1903 1.120.10.2 ad
1904 1.120.10.2 ad /*
1905 1.120.10.2 ad * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
1906 1.120.10.2 ad *
1907 1.120.10.2 ad * => map must be at least read-locked.
1908 1.120.10.2 ad */
1909 1.120.10.2 ad
1910 1.120.10.2 ad void
1911 1.120.10.2 ad uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
1912 1.120.10.2 ad {
1913 1.120.10.2 ad struct vm_map_entry *entry;
1914 1.120.10.2 ad pmap_t pmap = vm_map_pmap(map);
1915 1.120.10.2 ad vaddr_t va;
1916 1.120.10.2 ad paddr_t pa;
1917 1.120.10.2 ad struct vm_page *pg;
1918 1.120.10.2 ad
1919 1.120.10.2 ad KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1920 1.120.10.2 ad
1921 1.120.10.2 ad /*
1922 1.120.10.2 ad * we assume that the area we are unwiring has actually been wired
1923 1.120.10.2 ad * in the first place. this means that we should be able to extract
1924 1.120.10.2 ad * the PAs from the pmap. we also lock out the page daemon so that
1925 1.120.10.2 ad * we can call uvm_pageunwire.
1926 1.120.10.2 ad */
1927 1.120.10.2 ad
1928 1.120.10.2 ad uvm_lock_pageq();
1929 1.120.10.2 ad
1930 1.120.10.2 ad /*
1931 1.120.10.2 ad * find the beginning map entry for the region.
1932 1.120.10.2 ad */
1933 1.120.10.2 ad
1934 1.120.10.2 ad KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
1935 1.120.10.2 ad if (uvm_map_lookup_entry(map, start, &entry) == false)
1936 1.120.10.2 ad panic("uvm_fault_unwire_locked: address not in map");
1937 1.120.10.2 ad
1938 1.120.10.2 ad for (va = start; va < end; va += PAGE_SIZE) {
1939 1.120.10.2 ad if (pmap_extract(pmap, va, &pa) == false)
1940 1.120.10.2 ad continue;
1941 1.120.10.2 ad
1942 1.120.10.2 ad /*
1943 1.120.10.2 ad * find the map entry for the current address.
1944 1.120.10.2 ad */
1945 1.120.10.2 ad
1946 1.120.10.2 ad KASSERT(va >= entry->start);
1947 1.120.10.2 ad while (va >= entry->end) {
1948 1.120.10.2 ad KASSERT(entry->next != &map->header &&
1949 1.120.10.2 ad entry->next->start <= entry->end);
1950 1.120.10.2 ad entry = entry->next;
1951 1.120.10.2 ad }
1952 1.120.10.2 ad
1953 1.120.10.2 ad /*
1954 1.120.10.2 ad * if the entry is no longer wired, tell the pmap.
1955 1.120.10.2 ad */
1956 1.120.10.2 ad
1957 1.120.10.2 ad if (VM_MAPENT_ISWIRED(entry) == 0)
1958 1.120.10.2 ad pmap_unwire(pmap, va);
1959 1.120.10.2 ad
1960 1.120.10.2 ad pg = PHYS_TO_VM_PAGE(pa);
1961 1.120.10.2 ad if (pg)
1962 1.120.10.2 ad uvm_pageunwire(pg);
1963 1.120.10.2 ad }
1964 1.120.10.2 ad
1965 1.120.10.2 ad uvm_unlock_pageq();
1966 1.120.10.2 ad }
1967