uvm_anon.c revision 1.19 1 /* $NetBSD: uvm_anon.c,v 1.19 2001/10/21 00:04:42 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * uvm_anon.c: uvm anon ops
37 */
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/pool.h>
46 #include <sys/kernel.h>
47
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_swap.h>
50
51 /*
52 * anonblock_list: global list of anon blocks,
53 * locked by swap_syscall_lock (since we never remove
54 * anything from this list and we only add to it via swapctl(2)).
55 */
56
57 struct uvm_anonblock {
58 LIST_ENTRY(uvm_anonblock) list;
59 int count;
60 struct vm_anon *anons;
61 };
62 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
63
64
65 static boolean_t anon_pagein __P((struct vm_anon *));
66
67
68 /*
69 * allocate anons
70 */
71 void
72 uvm_anon_init()
73 {
74 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
75
76 simple_lock_init(&uvm.afreelock);
77 LIST_INIT(&anonblock_list);
78
79 /*
80 * Allocate the initial anons.
81 */
82 uvm_anon_add(nanon);
83 }
84
85 /*
86 * add some more anons to the free pool. called when we add
87 * more swap space.
88 *
89 * => swap_syscall_lock should be held (protects anonblock_list).
90 */
91 int
92 uvm_anon_add(count)
93 int count;
94 {
95 struct uvm_anonblock *anonblock;
96 struct vm_anon *anon;
97 int lcv, needed;
98
99 simple_lock(&uvm.afreelock);
100 uvmexp.nanonneeded += count;
101 needed = uvmexp.nanonneeded - uvmexp.nanon;
102 simple_unlock(&uvm.afreelock);
103
104 if (needed <= 0) {
105 return 0;
106 }
107 anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
108 if (anon == NULL) {
109 simple_lock(&uvm.afreelock);
110 uvmexp.nanonneeded -= count;
111 simple_unlock(&uvm.afreelock);
112 return ENOMEM;
113 }
114 MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
115
116 anonblock->count = needed;
117 anonblock->anons = anon;
118 LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
119 memset(anon, 0, sizeof(*anon) * needed);
120
121 simple_lock(&uvm.afreelock);
122 uvmexp.nanon += needed;
123 uvmexp.nfreeanon += needed;
124 for (lcv = 0; lcv < needed; lcv++) {
125 simple_lock_init(&anon->an_lock);
126 anon[lcv].u.an_nxt = uvm.afree;
127 uvm.afree = &anon[lcv];
128 simple_lock_init(&uvm.afree->an_lock);
129 }
130 simple_unlock(&uvm.afreelock);
131 return 0;
132 }
133
134 /*
135 * remove anons from the free pool.
136 */
137 void
138 uvm_anon_remove(count)
139 int count;
140 {
141 /*
142 * we never actually free any anons, to avoid allocation overhead.
143 * XXX someday we might want to try to free anons.
144 */
145
146 simple_lock(&uvm.afreelock);
147 uvmexp.nanonneeded -= count;
148 simple_unlock(&uvm.afreelock);
149 }
150
151 /*
152 * allocate an anon
153 *
154 * => new anon is returned locked!
155 */
156 struct vm_anon *
157 uvm_analloc()
158 {
159 struct vm_anon *a;
160
161 simple_lock(&uvm.afreelock);
162 a = uvm.afree;
163 if (a) {
164 uvm.afree = a->u.an_nxt;
165 uvmexp.nfreeanon--;
166 a->an_ref = 1;
167 a->an_swslot = 0;
168 a->u.an_page = NULL; /* so we can free quickly */
169 LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0);
170 simple_lock(&a->an_lock);
171 }
172 simple_unlock(&uvm.afreelock);
173 return(a);
174 }
175
176 /*
177 * uvm_anfree: free a single anon structure
178 *
179 * => caller must remove anon from its amap before calling (if it was in
180 * an amap).
181 * => anon must be unlocked and have a zero reference count.
182 * => we may lock the pageq's.
183 */
184 void
185 uvm_anfree(anon)
186 struct vm_anon *anon;
187 {
188 struct vm_page *pg;
189 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
190 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
191
192 KASSERT(anon->an_ref == 0);
193 LOCK_ASSERT(!simple_lock_held(&anon->an_lock));
194
195 /*
196 * get page
197 */
198
199 pg = anon->u.an_page;
200
201 /*
202 * if there is a resident page and it is loaned, then anon may not
203 * own it. call out to uvm_anon_lockpage() to ensure the real owner
204 * of the page has been identified and locked.
205 */
206
207 if (pg && pg->loan_count)
208 pg = uvm_anon_lockloanpg(anon);
209
210 /*
211 * if we have a resident page, we must dispose of it before freeing
212 * the anon.
213 */
214
215 if (pg) {
216
217 /*
218 * if the page is owned by a uobject (now locked), then we must
219 * kill the loan on the page rather than free it.
220 */
221
222 if (pg->uobject) {
223 uvm_lock_pageq();
224 KASSERT(pg->loan_count > 0);
225 pg->loan_count--;
226 pg->uanon = NULL;
227 uvm_unlock_pageq();
228 simple_unlock(&pg->uobject->vmobjlock);
229 } else {
230
231 /*
232 * page has no uobject, so we must be the owner of it.
233 * if page is busy then we wait until it is not busy,
234 * and then free it.
235 */
236
237 KASSERT((pg->flags & PG_RELEASED) == 0);
238 simple_lock(&anon->an_lock);
239 pmap_page_protect(pg, VM_PROT_NONE);
240 while ((pg = anon->u.an_page) &&
241 (pg->flags & PG_BUSY) != 0) {
242 pg->flags |= PG_WANTED;
243 UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, 0,
244 "anfree", 0);
245 simple_lock(&anon->an_lock);
246 }
247 if (pg) {
248 uvm_lock_pageq();
249 uvm_pagefree(pg);
250 uvm_unlock_pageq();
251 }
252 simple_unlock(&anon->an_lock);
253 UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
254 "freed now!", anon, pg, 0, 0);
255 }
256 }
257
258 /*
259 * free any swap resources.
260 */
261
262 uvm_anon_dropswap(anon);
263
264 /*
265 * now that we've stripped the data areas from the anon,
266 * free the anon itself.
267 */
268
269 simple_lock(&uvm.afreelock);
270 anon->u.an_nxt = uvm.afree;
271 uvm.afree = anon;
272 uvmexp.nfreeanon++;
273 simple_unlock(&uvm.afreelock);
274 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
275 }
276
277 /*
278 * uvm_anon_dropswap: release any swap resources from this anon.
279 *
280 * => anon must be locked or have a reference count of 0.
281 */
282 void
283 uvm_anon_dropswap(anon)
284 struct vm_anon *anon;
285 {
286 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
287
288 if (anon->an_swslot == 0)
289 return;
290
291 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
292 anon, anon->an_swslot, 0, 0);
293 uvm_swap_free(anon->an_swslot, 1);
294 anon->an_swslot = 0;
295
296 if (anon->u.an_page == NULL) {
297 /* this page is no longer only in swap. */
298 simple_lock(&uvm.swap_data_lock);
299 uvmexp.swpgonly--;
300 simple_unlock(&uvm.swap_data_lock);
301 }
302 }
303
304 /*
305 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
306 *
307 * => anon is locked by caller
308 * => on return: anon is locked
309 * if there is a resident page:
310 * if it has a uobject, it is locked by us
311 * if it is ownerless, we take over as owner
312 * we return the resident page (it can change during
313 * this function)
314 * => note that the only time an anon has an ownerless resident page
315 * is if the page was loaned from a uvm_object and the uvm_object
316 * disowned it
317 * => this only needs to be called when you want to do an operation
318 * on an anon's resident page and that page has a non-zero loan
319 * count.
320 */
321 struct vm_page *
322 uvm_anon_lockloanpg(anon)
323 struct vm_anon *anon;
324 {
325 struct vm_page *pg;
326 boolean_t locked = FALSE;
327
328 LOCK_ASSERT(simple_lock_held(&anon->an_lock));
329
330 /*
331 * loop while we have a resident page that has a non-zero loan count.
332 * if we successfully get our lock, we will "break" the loop.
333 * note that the test for pg->loan_count is not protected -- this
334 * may produce false positive results. note that a false positive
335 * result may cause us to do more work than we need to, but it will
336 * not produce an incorrect result.
337 */
338
339 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
340
341 /*
342 * quickly check to see if the page has an object before
343 * bothering to lock the page queues. this may also produce
344 * a false positive result, but that's ok because we do a real
345 * check after that.
346 */
347
348 if (pg->uobject) {
349 uvm_lock_pageq();
350 if (pg->uobject) {
351 locked =
352 simple_lock_try(&pg->uobject->vmobjlock);
353 } else {
354 /* object disowned before we got PQ lock */
355 locked = TRUE;
356 }
357 uvm_unlock_pageq();
358
359 /*
360 * if we didn't get a lock (try lock failed), then we
361 * toggle our anon lock and try again
362 */
363
364 if (!locked) {
365 simple_unlock(&anon->an_lock);
366
367 /*
368 * someone locking the object has a chance to
369 * lock us right now
370 */
371
372 simple_lock(&anon->an_lock);
373 continue;
374 }
375 }
376
377 /*
378 * if page is un-owned [i.e. the object dropped its ownership],
379 * then we can take over as owner!
380 */
381
382 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
383 uvm_lock_pageq();
384 pg->pqflags |= PQ_ANON;
385 pg->loan_count--;
386 uvm_unlock_pageq();
387 }
388 break;
389 }
390 return(pg);
391 }
392
393
394
395 /*
396 * page in every anon that is paged out to a range of swslots.
397 *
398 * swap_syscall_lock should be held (protects anonblock_list).
399 */
400
401 boolean_t
402 anon_swap_off(startslot, endslot)
403 int startslot, endslot;
404 {
405 struct uvm_anonblock *anonblock;
406
407 LIST_FOREACH(anonblock, &anonblock_list, list) {
408 int i;
409
410 /*
411 * loop thru all the anons in the anonblock,
412 * paging in where needed.
413 */
414
415 for (i = 0; i < anonblock->count; i++) {
416 struct vm_anon *anon = &anonblock->anons[i];
417 int slot;
418
419 /*
420 * lock anon to work on it.
421 */
422
423 simple_lock(&anon->an_lock);
424
425 /*
426 * is this anon's swap slot in range?
427 */
428
429 slot = anon->an_swslot;
430 if (slot >= startslot && slot < endslot) {
431 boolean_t rv;
432
433 /*
434 * yup, page it in.
435 */
436
437 /* locked: anon */
438 rv = anon_pagein(anon);
439 /* unlocked: anon */
440
441 if (rv) {
442 return rv;
443 }
444 } else {
445
446 /*
447 * nope, unlock and proceed.
448 */
449
450 simple_unlock(&anon->an_lock);
451 }
452 }
453 }
454 return FALSE;
455 }
456
457
458 /*
459 * fetch an anon's page.
460 *
461 * => anon must be locked, and is unlocked upon return.
462 * => returns TRUE if pagein was aborted due to lack of memory.
463 */
464
465 static boolean_t
466 anon_pagein(anon)
467 struct vm_anon *anon;
468 {
469 struct vm_page *pg;
470 struct uvm_object *uobj;
471 int rv;
472
473 /* locked: anon */
474 LOCK_ASSERT(simple_lock_held(&anon->an_lock));
475
476 rv = uvmfault_anonget(NULL, NULL, anon);
477
478 /*
479 * if rv == 0, anon is still locked, else anon
480 * is unlocked
481 */
482
483 switch (rv) {
484 case 0:
485 break;
486
487 case EIO:
488 case ERESTART:
489
490 /*
491 * nothing more to do on errors.
492 * ERESTART can only mean that the anon was freed,
493 * so again there's nothing to do.
494 */
495
496 return FALSE;
497 }
498
499 /*
500 * ok, we've got the page now.
501 * mark it as dirty, clear its swslot and un-busy it.
502 */
503
504 pg = anon->u.an_page;
505 uobj = pg->uobject;
506 uvm_swap_free(anon->an_swslot, 1);
507 anon->an_swslot = 0;
508 pg->flags &= ~(PG_CLEAN);
509
510 /*
511 * deactivate the page (to put it on a page queue)
512 */
513
514 pmap_clear_reference(pg);
515 uvm_lock_pageq();
516 uvm_pagedeactivate(pg);
517 uvm_unlock_pageq();
518
519 /*
520 * unlock the anon and we're done.
521 */
522
523 simple_unlock(&anon->an_lock);
524 if (uobj) {
525 simple_unlock(&uobj->vmobjlock);
526 }
527 return FALSE;
528 }
529