uvm_anon.c revision 1.10 1 /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * uvm_anon.c: uvm anon ops
37 */
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/pool.h>
46 #include <sys/kernel.h>
47
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_swap.h>
50
51 /*
52 * anonblock_list: global list of anon blocks,
53 * locked by swap_syscall_lock (since we never remove
54 * anything from this list and we only add to it via swapctl(2)).
55 */
56
57 struct uvm_anonblock {
58 LIST_ENTRY(uvm_anonblock) list;
59 int count;
60 struct vm_anon *anons;
61 };
62 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
63
64
65 static boolean_t anon_pagein __P((struct vm_anon *));
66
67
68 /*
69 * allocate anons
70 */
71 void
72 uvm_anon_init()
73 {
74 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
75
76 simple_lock_init(&uvm.afreelock);
77 LIST_INIT(&anonblock_list);
78
79 /*
80 * Allocate the initial anons.
81 */
82 uvm_anon_add(nanon);
83 }
84
85 /*
86 * add some more anons to the free pool. called when we add
87 * more swap space.
88 *
89 * => swap_syscall_lock should be held (protects anonblock_list).
90 */
91 void
92 uvm_anon_add(count)
93 int count;
94 {
95 struct uvm_anonblock *anonblock;
96 struct vm_anon *anon;
97 int lcv, needed;
98
99 simple_lock(&uvm.afreelock);
100 uvmexp.nanonneeded += count;
101 needed = uvmexp.nanonneeded - uvmexp.nanon;
102 simple_unlock(&uvm.afreelock);
103
104 if (needed <= 0) {
105 return;
106 }
107
108 MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
109 anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
110
111 /* XXX Should wait for VM to free up. */
112 if (anonblock == NULL || anon == NULL) {
113 printf("uvm_anon_add: can not allocate %d anons\n", needed);
114 panic("uvm_anon_add");
115 }
116
117 anonblock->count = needed;
118 anonblock->anons = anon;
119 LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
120 memset(anon, 0, sizeof(*anon) * needed);
121
122 simple_lock(&uvm.afreelock);
123 uvmexp.nanon += needed;
124 uvmexp.nfreeanon += needed;
125 for (lcv = 0; lcv < needed; lcv++) {
126 simple_lock_init(&anon->an_lock);
127 anon[lcv].u.an_nxt = uvm.afree;
128 uvm.afree = &anon[lcv];
129 simple_lock_init(&uvm.afree->an_lock);
130 }
131 simple_unlock(&uvm.afreelock);
132 }
133
134 /*
135 * remove anons from the free pool.
136 */
137 void
138 uvm_anon_remove(count)
139 int count;
140 {
141 /*
142 * we never actually free any anons, to avoid allocation overhead.
143 * XXX someday we might want to try to free anons.
144 */
145
146 simple_lock(&uvm.afreelock);
147 uvmexp.nanonneeded -= count;
148 simple_unlock(&uvm.afreelock);
149 }
150
151 /*
152 * allocate an anon
153 */
154 struct vm_anon *
155 uvm_analloc()
156 {
157 struct vm_anon *a;
158
159 simple_lock(&uvm.afreelock);
160 a = uvm.afree;
161 if (a) {
162 uvm.afree = a->u.an_nxt;
163 uvmexp.nfreeanon--;
164 a->an_ref = 1;
165 a->an_swslot = 0;
166 a->u.an_page = NULL; /* so we can free quickly */
167 }
168 simple_unlock(&uvm.afreelock);
169 return(a);
170 }
171
172 /*
173 * uvm_anfree: free a single anon structure
174 *
175 * => caller must remove anon from its amap before calling (if it was in
176 * an amap).
177 * => anon must be unlocked and have a zero reference count.
178 * => we may lock the pageq's.
179 */
180 void
181 uvm_anfree(anon)
182 struct vm_anon *anon;
183 {
184 struct vm_page *pg;
185 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
186 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
187
188 /*
189 * get page
190 */
191
192 pg = anon->u.an_page;
193
194 /*
195 * if there is a resident page and it is loaned, then anon may not
196 * own it. call out to uvm_anon_lockpage() to ensure the real owner
197 * of the page has been identified and locked.
198 */
199
200 if (pg && pg->loan_count)
201 pg = uvm_anon_lockloanpg(anon);
202
203 /*
204 * if we have a resident page, we must dispose of it before freeing
205 * the anon.
206 */
207
208 if (pg) {
209
210 /*
211 * if the page is owned by a uobject (now locked), then we must
212 * kill the loan on the page rather than free it.
213 */
214
215 if (pg->uobject) {
216 uvm_lock_pageq();
217 KASSERT(pg->loan_count > 0);
218 pg->loan_count--;
219 pg->uanon = NULL;
220 uvm_unlock_pageq();
221 simple_unlock(&pg->uobject->vmobjlock);
222 } else {
223
224 /*
225 * page has no uobject, so we must be the owner of it.
226 *
227 * if page is busy then we just mark it as released
228 * (who ever has it busy must check for this when they
229 * wake up). if the page is not busy then we can
230 * free it now.
231 */
232
233 if ((pg->flags & PG_BUSY) != 0) {
234 /* tell them to dump it when done */
235 pg->flags |= PG_RELEASED;
236 UVMHIST_LOG(maphist,
237 " anon 0x%x, page 0x%x: BUSY (released!)",
238 anon, pg, 0, 0);
239 return;
240 }
241 pmap_page_protect(pg, VM_PROT_NONE);
242 uvm_lock_pageq(); /* lock out pagedaemon */
243 uvm_pagefree(pg); /* bye bye */
244 uvm_unlock_pageq(); /* free the daemon */
245 UVMHIST_LOG(maphist,"anon 0x%x, page 0x%x: freed now!",
246 anon, pg, 0, 0);
247 }
248 }
249
250 /*
251 * free any swap resources.
252 */
253 uvm_anon_dropswap(anon);
254
255 /*
256 * now that we've stripped the data areas from the anon, free the anon
257 * itself!
258 */
259 simple_lock(&uvm.afreelock);
260 anon->u.an_nxt = uvm.afree;
261 uvm.afree = anon;
262 uvmexp.nfreeanon++;
263 simple_unlock(&uvm.afreelock);
264 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
265 }
266
267 /*
268 * uvm_anon_dropswap: release any swap resources from this anon.
269 *
270 * => anon must be locked or have a reference count of 0.
271 */
272 void
273 uvm_anon_dropswap(anon)
274 struct vm_anon *anon;
275 {
276 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
277 if (anon->an_swslot == 0) {
278 return;
279 }
280
281 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
282 anon, anon->an_swslot, 0, 0);
283 uvm_swap_free(anon->an_swslot, 1);
284 anon->an_swslot = 0;
285
286 if (anon->u.an_page == NULL) {
287 /* this page is no longer only in swap. */
288 simple_lock(&uvm.swap_data_lock);
289 uvmexp.swpgonly--;
290 simple_unlock(&uvm.swap_data_lock);
291 }
292 }
293
294 /*
295 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
296 *
297 * => anon is locked by caller
298 * => on return: anon is locked
299 * if there is a resident page:
300 * if it has a uobject, it is locked by us
301 * if it is ownerless, we take over as owner
302 * we return the resident page (it can change during
303 * this function)
304 * => note that the only time an anon has an ownerless resident page
305 * is if the page was loaned from a uvm_object and the uvm_object
306 * disowned it
307 * => this only needs to be called when you want to do an operation
308 * on an anon's resident page and that page has a non-zero loan
309 * count.
310 */
311 struct vm_page *
312 uvm_anon_lockloanpg(anon)
313 struct vm_anon *anon;
314 {
315 struct vm_page *pg;
316 boolean_t locked = FALSE;
317
318 /*
319 * loop while we have a resident page that has a non-zero loan count.
320 * if we successfully get our lock, we will "break" the loop.
321 * note that the test for pg->loan_count is not protected -- this
322 * may produce false positive results. note that a false positive
323 * result may cause us to do more work than we need to, but it will
324 * not produce an incorrect result.
325 */
326
327 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
328
329 /*
330 * quickly check to see if the page has an object before
331 * bothering to lock the page queues. this may also produce
332 * a false positive result, but that's ok because we do a real
333 * check after that.
334 *
335 * XXX: quick check -- worth it? need volatile?
336 */
337
338 if (pg->uobject) {
339
340 uvm_lock_pageq();
341 if (pg->uobject) { /* the "real" check */
342 locked =
343 simple_lock_try(&pg->uobject->vmobjlock);
344 } else {
345 /* object disowned before we got PQ lock */
346 locked = TRUE;
347 }
348 uvm_unlock_pageq();
349
350 /*
351 * if we didn't get a lock (try lock failed), then we
352 * toggle our anon lock and try again
353 */
354
355 if (!locked) {
356 simple_unlock(&anon->an_lock);
357
358 /*
359 * someone locking the object has a chance to
360 * lock us right now
361 */
362
363 simple_lock(&anon->an_lock);
364 continue;
365 }
366 }
367
368 /*
369 * if page is un-owned [i.e. the object dropped its ownership],
370 * then we can take over as owner!
371 */
372
373 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
374 uvm_lock_pageq();
375 pg->pqflags |= PQ_ANON; /* take ownership... */
376 pg->loan_count--; /* ... and drop our loan */
377 uvm_unlock_pageq();
378 }
379
380 /*
381 * we did it! break the loop
382 */
383
384 break;
385 }
386 return(pg);
387 }
388
389
390
391 /*
392 * page in every anon that is paged out to a range of swslots.
393 *
394 * swap_syscall_lock should be held (protects anonblock_list).
395 */
396
397 boolean_t
398 anon_swap_off(startslot, endslot)
399 int startslot, endslot;
400 {
401 struct uvm_anonblock *anonblock;
402
403 for (anonblock = LIST_FIRST(&anonblock_list);
404 anonblock != NULL;
405 anonblock = LIST_NEXT(anonblock, list)) {
406 int i;
407
408 /*
409 * loop thru all the anons in the anonblock,
410 * paging in where needed.
411 */
412
413 for (i = 0; i < anonblock->count; i++) {
414 struct vm_anon *anon = &anonblock->anons[i];
415 int slot;
416
417 /*
418 * lock anon to work on it.
419 */
420
421 simple_lock(&anon->an_lock);
422
423 /*
424 * is this anon's swap slot in range?
425 */
426
427 slot = anon->an_swslot;
428 if (slot >= startslot && slot < endslot) {
429 boolean_t rv;
430
431 /*
432 * yup, page it in.
433 */
434
435 /* locked: anon */
436 rv = anon_pagein(anon);
437 /* unlocked: anon */
438
439 if (rv) {
440 return rv;
441 }
442 } else {
443
444 /*
445 * nope, unlock and proceed.
446 */
447
448 simple_unlock(&anon->an_lock);
449 }
450 }
451 }
452 return FALSE;
453 }
454
455
456 /*
457 * fetch an anon's page.
458 *
459 * => anon must be locked, and is unlocked upon return.
460 * => returns TRUE if pagein was aborted due to lack of memory.
461 */
462
463 static boolean_t
464 anon_pagein(anon)
465 struct vm_anon *anon;
466 {
467 struct vm_page *pg;
468 struct uvm_object *uobj;
469 int rv;
470
471 /* locked: anon */
472 rv = uvmfault_anonget(NULL, NULL, anon);
473 /*
474 * if rv == VM_PAGER_OK, anon is still locked, else anon
475 * is unlocked
476 */
477
478 switch (rv) {
479 case VM_PAGER_OK:
480 break;
481
482 case VM_PAGER_ERROR:
483 case VM_PAGER_REFAULT:
484
485 /*
486 * nothing more to do on errors.
487 * VM_PAGER_REFAULT can only mean that the anon was freed,
488 * so again there's nothing to do.
489 */
490
491 return FALSE;
492
493 default:
494 #ifdef DIAGNOSTIC
495 panic("anon_pagein: uvmfault_anonget -> %d", rv);
496 #else
497 return FALSE;
498 #endif
499 }
500
501 /*
502 * ok, we've got the page now.
503 * mark it as dirty, clear its swslot and un-busy it.
504 */
505
506 pg = anon->u.an_page;
507 uobj = pg->uobject;
508 uvm_swap_free(anon->an_swslot, 1);
509 anon->an_swslot = 0;
510 pg->flags &= ~(PG_CLEAN);
511
512 /*
513 * deactivate the page (to put it on a page queue)
514 */
515
516 pmap_clear_reference(pg);
517 pmap_page_protect(pg, VM_PROT_NONE);
518 uvm_lock_pageq();
519 uvm_pagedeactivate(pg);
520 uvm_unlock_pageq();
521
522 /*
523 * unlock the anon and we're done.
524 */
525
526 simple_unlock(&anon->an_lock);
527 if (uobj) {
528 simple_unlock(&uobj->vmobjlock);
529 }
530 return FALSE;
531 }
532