uvm_anon.c revision 1.5 1 /* $NetBSD: uvm_anon.c,v 1.5 2000/01/11 06:57:49 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * uvm_anon.c: uvm anon ops
37 */
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/pool.h>
46 #include <sys/kernel.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_kern.h>
51
52 #include <uvm/uvm.h>
53 #include <uvm/uvm_swap.h>
54
55 /*
56 * anonblock_list: global list of anon blocks,
57 * locked by swap_syscall_lock (since we never remove
58 * anything from this list and we only add to it via swapctl(2)).
59 */
60
61 struct uvm_anonblock {
62 LIST_ENTRY(uvm_anonblock) list;
63 int count;
64 struct vm_anon *anons;
65 };
66 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
67
68
69 static boolean_t anon_pagein __P((struct vm_anon *));
70
71
72 /*
73 * allocate anons
74 */
75 void
76 uvm_anon_init()
77 {
78 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
79
80 simple_lock_init(&uvm.afreelock);
81 LIST_INIT(&anonblock_list);
82
83 /*
84 * Allocate the initial anons.
85 */
86 uvm_anon_add(nanon);
87 }
88
89 /*
90 * add some more anons to the free pool. called when we add
91 * more swap space.
92 *
93 * => swap_syscall_lock should be held (protects anonblock_list).
94 */
95 void
96 uvm_anon_add(count)
97 int count;
98 {
99 struct uvm_anonblock *anonblock;
100 struct vm_anon *anon;
101 int lcv, needed;
102
103 simple_lock(&uvm.afreelock);
104 uvmexp.nanonneeded += count;
105 needed = uvmexp.nanonneeded - uvmexp.nanon;
106 simple_unlock(&uvm.afreelock);
107
108 if (needed <= 0) {
109 return;
110 }
111
112 MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
113 anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
114
115 /* XXX Should wait for VM to free up. */
116 if (anonblock == NULL || anon == NULL) {
117 printf("uvm_anon_add: can not allocate %d anons\n", needed);
118 panic("uvm_anon_add");
119 }
120
121 anonblock->count = needed;
122 anonblock->anons = anon;
123 LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
124 memset(anon, 0, sizeof(*anon) * needed);
125
126 simple_lock(&uvm.afreelock);
127 uvmexp.nanon += needed;
128 uvmexp.nfreeanon += needed;
129 for (lcv = 0; lcv < needed; lcv++) {
130 simple_lock_init(&anon->an_lock);
131 anon[lcv].u.an_nxt = uvm.afree;
132 uvm.afree = &anon[lcv];
133 simple_lock_init(&uvm.afree->an_lock);
134 }
135 simple_unlock(&uvm.afreelock);
136 }
137
138 /*
139 * remove anons from the free pool.
140 */
141 void
142 uvm_anon_remove(count)
143 int count;
144 {
145 /*
146 * we never actually free any anons, to avoid allocation overhead.
147 * XXX someday we might want to try to free anons.
148 */
149
150 simple_lock(&uvm.afreelock);
151 uvmexp.nanonneeded -= count;
152 simple_unlock(&uvm.afreelock);
153 }
154
155 /*
156 * allocate an anon
157 */
158 struct vm_anon *
159 uvm_analloc()
160 {
161 struct vm_anon *a;
162
163 simple_lock(&uvm.afreelock);
164 a = uvm.afree;
165 if (a) {
166 uvm.afree = a->u.an_nxt;
167 uvmexp.nfreeanon--;
168 a->an_ref = 1;
169 a->an_swslot = 0;
170 a->u.an_page = NULL; /* so we can free quickly */
171 }
172 simple_unlock(&uvm.afreelock);
173 return(a);
174 }
175
176 /*
177 * uvm_anfree: free a single anon structure
178 *
179 * => caller must remove anon from its amap before calling (if it was in
180 * an amap).
181 * => anon must be unlocked and have a zero reference count.
182 * => we may lock the pageq's.
183 */
184 void
185 uvm_anfree(anon)
186 struct vm_anon *anon;
187 {
188 struct vm_page *pg;
189 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
190 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
191
192 /*
193 * get page
194 */
195
196 pg = anon->u.an_page;
197
198 /*
199 * if there is a resident page and it is loaned, then anon may not
200 * own it. call out to uvm_anon_lockpage() to ensure the real owner
201 * of the page has been identified and locked.
202 */
203
204 if (pg && pg->loan_count)
205 pg = uvm_anon_lockloanpg(anon);
206
207 /*
208 * if we have a resident page, we must dispose of it before freeing
209 * the anon.
210 */
211
212 if (pg) {
213
214 /*
215 * if the page is owned by a uobject (now locked), then we must
216 * kill the loan on the page rather than free it.
217 */
218
219 if (pg->uobject) {
220
221 /* kill loan */
222 uvm_lock_pageq();
223 #ifdef DIAGNOSTIC
224 if (pg->loan_count < 1)
225 panic("uvm_anfree: obj owned page "
226 "with no loan count");
227 #endif
228 pg->loan_count--;
229 pg->uanon = NULL;
230 uvm_unlock_pageq();
231 simple_unlock(&pg->uobject->vmobjlock);
232
233 } else {
234
235 /*
236 * page has no uobject, so we must be the owner of it.
237 *
238 * if page is busy then we just mark it as released
239 * (who ever has it busy must check for this when they
240 * wake up). if the page is not busy then we can
241 * free it now.
242 */
243
244 if ((pg->flags & PG_BUSY) != 0) {
245 /* tell them to dump it when done */
246 pg->flags |= PG_RELEASED;
247 UVMHIST_LOG(maphist,
248 " anon 0x%x, page 0x%x: BUSY (released!)",
249 anon, pg, 0, 0);
250 return;
251 }
252
253 pmap_page_protect(pg, VM_PROT_NONE);
254 uvm_lock_pageq(); /* lock out pagedaemon */
255 uvm_pagefree(pg); /* bye bye */
256 uvm_unlock_pageq(); /* free the daemon */
257
258 UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!",
259 anon, pg, 0, 0);
260 }
261 }
262
263 /*
264 * free any swap resources.
265 */
266 uvm_anon_dropswap(anon);
267
268 /*
269 * now that we've stripped the data areas from the anon, free the anon
270 * itself!
271 */
272 simple_lock(&uvm.afreelock);
273 anon->u.an_nxt = uvm.afree;
274 uvm.afree = anon;
275 uvmexp.nfreeanon++;
276 simple_unlock(&uvm.afreelock);
277 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
278 }
279
280 /*
281 * uvm_anon_dropswap: release any swap resources from this anon.
282 *
283 * => anon must be locked or have a reference count of 0.
284 */
285 void
286 uvm_anon_dropswap(anon)
287 struct vm_anon *anon;
288 {
289 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
290 if (anon->an_swslot == 0) {
291 return;
292 }
293
294 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
295 anon, anon->an_swslot, 0, 0);
296 uvm_swap_free(anon->an_swslot, 1);
297 anon->an_swslot = 0;
298
299 if (anon->u.an_page == NULL) {
300 /* this page is no longer only in swap. */
301 simple_lock(&uvm.swap_data_lock);
302 uvmexp.swpgonly--;
303 simple_unlock(&uvm.swap_data_lock);
304 }
305 }
306
307 /*
308 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
309 *
310 * => anon is locked by caller
311 * => on return: anon is locked
312 * if there is a resident page:
313 * if it has a uobject, it is locked by us
314 * if it is ownerless, we take over as owner
315 * we return the resident page (it can change during
316 * this function)
317 * => note that the only time an anon has an ownerless resident page
318 * is if the page was loaned from a uvm_object and the uvm_object
319 * disowned it
320 * => this only needs to be called when you want to do an operation
321 * on an anon's resident page and that page has a non-zero loan
322 * count.
323 */
324 struct vm_page *
325 uvm_anon_lockloanpg(anon)
326 struct vm_anon *anon;
327 {
328 struct vm_page *pg;
329 boolean_t locked = FALSE;
330
331 /*
332 * loop while we have a resident page that has a non-zero loan count.
333 * if we successfully get our lock, we will "break" the loop.
334 * note that the test for pg->loan_count is not protected -- this
335 * may produce false positive results. note that a false positive
336 * result may cause us to do more work than we need to, but it will
337 * not produce an incorrect result.
338 */
339
340 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
341
342 /*
343 * quickly check to see if the page has an object before
344 * bothering to lock the page queues. this may also produce
345 * a false positive result, but that's ok because we do a real
346 * check after that.
347 *
348 * XXX: quick check -- worth it? need volatile?
349 */
350
351 if (pg->uobject) {
352
353 uvm_lock_pageq();
354 if (pg->uobject) { /* the "real" check */
355 locked =
356 simple_lock_try(&pg->uobject->vmobjlock);
357 } else {
358 /* object disowned before we got PQ lock */
359 locked = TRUE;
360 }
361 uvm_unlock_pageq();
362
363 /*
364 * if we didn't get a lock (try lock failed), then we
365 * toggle our anon lock and try again
366 */
367
368 if (!locked) {
369 simple_unlock(&anon->an_lock);
370 /*
371 * someone locking the object has a chance to
372 * lock us right now
373 */
374 simple_lock(&anon->an_lock);
375 continue; /* start over */
376 }
377 }
378
379 /*
380 * if page is un-owned [i.e. the object dropped its ownership],
381 * then we can take over as owner!
382 */
383
384 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
385 uvm_lock_pageq();
386 pg->pqflags |= PQ_ANON; /* take ownership... */
387 pg->loan_count--; /* ... and drop our loan */
388 uvm_unlock_pageq();
389 }
390
391 /*
392 * we did it! break the loop
393 */
394 break;
395 }
396
397 /*
398 * done!
399 */
400
401 return(pg);
402 }
403
404
405
406 /*
407 * page in every anon that is paged out to a range of swslots.
408 *
409 * swap_syscall_lock should be held (protects anonblock_list).
410 */
411
412 boolean_t
413 anon_swap_off(startslot, endslot)
414 int startslot, endslot;
415 {
416 struct uvm_anonblock *anonblock;
417
418 for (anonblock = LIST_FIRST(&anonblock_list);
419 anonblock != NULL;
420 anonblock = LIST_NEXT(anonblock, list)) {
421 int i;
422
423 /*
424 * loop thru all the anons in the anonblock,
425 * paging in where needed.
426 */
427
428 for (i = 0; i < anonblock->count; i++) {
429 struct vm_anon *anon = &anonblock->anons[i];
430 int slot;
431
432 /*
433 * lock anon to work on it.
434 */
435
436 simple_lock(&anon->an_lock);
437
438 /*
439 * is this anon's swap slot in range?
440 */
441
442 slot = anon->an_swslot;
443 if (slot >= startslot && slot < endslot) {
444 boolean_t rv;
445
446 /*
447 * yup, page it in.
448 */
449
450 /* locked: anon */
451 rv = anon_pagein(anon);
452 /* unlocked: anon */
453
454 if (rv) {
455 return rv;
456 }
457 } else {
458
459 /*
460 * nope, unlock and proceed.
461 */
462
463 simple_unlock(&anon->an_lock);
464 }
465 }
466 }
467 return FALSE;
468 }
469
470
471 /*
472 * fetch an anon's page.
473 *
474 * => anon must be locked, and is unlocked upon return.
475 * => returns TRUE if pagein was aborted due to lack of memory.
476 */
477
478 static boolean_t
479 anon_pagein(anon)
480 struct vm_anon *anon;
481 {
482 struct vm_page *pg;
483 struct uvm_object *uobj;
484 int rv;
485 UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
486
487 /* locked: anon */
488 rv = uvmfault_anonget(NULL, NULL, anon);
489 /* unlocked: anon */
490
491 switch (rv) {
492 case VM_PAGER_OK:
493 break;
494
495 case VM_PAGER_ERROR:
496 case VM_PAGER_REFAULT:
497
498 /*
499 * nothing more to do on errors.
500 * VM_PAGER_REFAULT can only mean that the anon was freed,
501 * so again there's nothing to do.
502 */
503
504 return FALSE;
505
506 #ifdef DIAGNOSTIC
507 default:
508 panic("anon_pagein: uvmfault_anonget -> %d", rv);
509 #endif
510 }
511
512 /*
513 * ok, we've got the page now.
514 * mark it as dirty, clear its swslot and un-busy it.
515 */
516
517 pg = anon->u.an_page;
518 uobj = pg->uobject;
519 uvm_swap_free(anon->an_swslot, 1);
520 anon->an_swslot = 0;
521 pg->flags &= ~(PG_CLEAN);
522
523 /*
524 * deactivate the page (to put it on a page queue)
525 */
526
527 pmap_clear_reference(pg);
528 pmap_page_protect(pg, VM_PROT_NONE);
529 uvm_lock_pageq();
530 uvm_pagedeactivate(pg);
531 uvm_unlock_pageq();
532
533 /*
534 * unlock the anon and we're done.
535 */
536
537 simple_unlock(&anon->an_lock);
538 if (uobj) {
539 simple_unlock(&uobj->vmobjlock);
540 }
541 return FALSE;
542 }
543