uvm_anon.c revision 1.54 1 /* $NetBSD: uvm_anon.c,v 1.54 2011/06/12 03:36:02 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * uvm_anon.c: uvm anon ops
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.54 2011/06/12 03:36:02 rmind Exp $");
34
35 #include "opt_uvmhist.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/pool.h>
40 #include <sys/kernel.h>
41
42 #include <uvm/uvm.h>
43 #include <uvm/uvm_swap.h>
44 #include <uvm/uvm_pdpolicy.h>
45
46 static struct pool_cache uvm_anon_cache;
47
48 static int uvm_anon_ctor(void *, void *, int);
49 static void uvm_anon_dtor(void *, void *);
50
51 /*
52 * allocate anons
53 */
54 void
55 uvm_anon_init(void)
56 {
57
58 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
59 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
60 uvm_anon_dtor, NULL);
61 }
62
63 static int
64 uvm_anon_ctor(void *arg, void *object, int flags)
65 {
66 struct vm_anon *anon = object;
67
68 anon->an_ref = 0;
69 anon->an_page = NULL;
70 #if defined(VMSWAP)
71 anon->an_swslot = 0;
72 #endif /* defined(VMSWAP) */
73
74 return 0;
75 }
76
77 static void
78 uvm_anon_dtor(void *arg, void *object)
79 {
80
81 }
82
83 /*
84 * allocate an anon
85 *
86 * => new anon is returned locked!
87 */
88 struct vm_anon *
89 uvm_analloc(void)
90 {
91 struct vm_anon *anon;
92
93 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
94 if (anon) {
95 KASSERT(anon->an_ref == 0);
96 KASSERT(anon->an_page == NULL);
97 #if defined(VMSWAP)
98 KASSERT(anon->an_swslot == 0);
99 #endif /* defined(VMSWAP) */
100 anon->an_ref = 1;
101 anon->an_lock = NULL;
102 }
103 return anon;
104 }
105
106 /*
107 * uvm_anfree: free a linked list of anon structures
108 *
109 * => caller must remove anon from its amap before calling (if it was in
110 * an amap).
111 * => amap must be locked, or anon must not be associated with a lock
112 * or any other objects.
113 * => we may lock the pageq's.
114 * => we may drop and re-acquire amap lock
115 */
116
117 static void
118 uvm_anfree1(struct vm_anon *anon)
119 {
120 struct vm_page *pg;
121 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
122 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
123
124 KASSERT(anon->an_lock == NULL || mutex_owned(anon->an_lock));
125
126 /*
127 * get page
128 */
129
130 pg = anon->an_page;
131
132 /*
133 * if there is a resident page and it is loaned, then anon may not
134 * own it. call out to uvm_anon_lockpage() to ensure the real owner
135 * of the page has been identified and locked.
136 */
137
138 if (pg && pg->loan_count) {
139 KASSERT(anon->an_lock != NULL);
140 pg = uvm_anon_lockloanpg(anon);
141 }
142
143 /*
144 * if we have a resident page, we must dispose of it before freeing
145 * the anon.
146 */
147
148 if (pg) {
149 KASSERT(anon->an_lock != NULL);
150
151 /*
152 * if the page is owned by a uobject (now locked), then we must
153 * kill the loan on the page rather than free it.
154 */
155
156 if (pg->uobject) {
157 mutex_enter(&uvm_pageqlock);
158 KASSERT(pg->loan_count > 0);
159 pg->loan_count--;
160 pg->uanon = NULL;
161 mutex_exit(&uvm_pageqlock);
162 mutex_exit(pg->uobject->vmobjlock);
163 } else {
164
165 /*
166 * page has no uobject, so we must be the owner of it.
167 */
168
169 KASSERT((pg->flags & PG_RELEASED) == 0);
170 pmap_page_protect(pg, VM_PROT_NONE);
171
172 /*
173 * if the page is busy, mark it as PG_RELEASED
174 * so that uvm_anon_release will release it later.
175 */
176
177 if (pg->flags & PG_BUSY) {
178 pg->flags |= PG_RELEASED;
179 return;
180 }
181 mutex_enter(&uvm_pageqlock);
182 uvm_pagefree(pg);
183 mutex_exit(&uvm_pageqlock);
184 UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
185 "freed now!", anon, pg, 0, 0);
186 }
187 }
188 #if defined(VMSWAP)
189 if (pg == NULL && anon->an_swslot > 0) {
190 /* this page is no longer only in swap. */
191 mutex_enter(&uvm_swap_data_lock);
192 KASSERT(uvmexp.swpgonly > 0);
193 uvmexp.swpgonly--;
194 mutex_exit(&uvm_swap_data_lock);
195 }
196 #endif /* defined(VMSWAP) */
197
198 /*
199 * free any swap resources.
200 */
201
202 uvm_anon_dropswap(anon);
203
204 /*
205 * give a page replacement hint.
206 */
207
208 uvmpdpol_anfree(anon);
209
210 /*
211 * now that we've stripped the data areas from the anon,
212 * free the anon itself.
213 */
214
215 KASSERT(anon->an_page == NULL);
216 #if defined(VMSWAP)
217 KASSERT(anon->an_swslot == 0);
218 #endif /* defined(VMSWAP) */
219
220 if (anon->an_lock != NULL) {
221 mutex_obj_free(anon->an_lock);
222 }
223 pool_cache_put(&uvm_anon_cache, anon);
224 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
225 }
226
227 void
228 uvm_anfree(struct vm_anon *anon)
229 {
230 struct vm_anon *next;
231
232 for (; anon != NULL; anon = next) {
233 next = anon->an_link;
234 anon->an_link = NULL; /* also clears reference count */
235 uvm_anfree1(anon);
236 }
237 }
238
239 #if defined(VMSWAP)
240
241 /*
242 * uvm_anon_dropswap: release any swap resources from this anon.
243 *
244 * => anon must be locked or have a reference count of 0.
245 */
246 void
247 uvm_anon_dropswap(struct vm_anon *anon)
248 {
249 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
250
251 if (anon->an_swslot == 0)
252 return;
253
254 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
255 anon, anon->an_swslot, 0, 0);
256 uvm_swap_free(anon->an_swslot, 1);
257 anon->an_swslot = 0;
258 }
259
260 #endif /* defined(VMSWAP) */
261
262 /*
263 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
264 *
265 * => anon is locked by caller
266 * => on return: anon is locked
267 * if there is a resident page:
268 * if it has a uobject, it is locked by us
269 * if it is ownerless, we take over as owner
270 * we return the resident page (it can change during
271 * this function)
272 * => note that the only time an anon has an ownerless resident page
273 * is if the page was loaned from a uvm_object and the uvm_object
274 * disowned it
275 * => this only needs to be called when you want to do an operation
276 * on an anon's resident page and that page has a non-zero loan
277 * count.
278 */
279 struct vm_page *
280 uvm_anon_lockloanpg(struct vm_anon *anon)
281 {
282 struct vm_page *pg;
283 bool locked = false;
284
285 KASSERT(mutex_owned(anon->an_lock));
286
287 /*
288 * loop while we have a resident page that has a non-zero loan count.
289 * if we successfully get our lock, we will "break" the loop.
290 * note that the test for pg->loan_count is not protected -- this
291 * may produce false positive results. note that a false positive
292 * result may cause us to do more work than we need to, but it will
293 * not produce an incorrect result.
294 */
295
296 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
297
298 /*
299 * quickly check to see if the page has an object before
300 * bothering to lock the page queues. this may also produce
301 * a false positive result, but that's ok because we do a real
302 * check after that.
303 */
304
305 if (pg->uobject) {
306 mutex_enter(&uvm_pageqlock);
307 if (pg->uobject) {
308 locked =
309 mutex_tryenter(pg->uobject->vmobjlock);
310 } else {
311 /* object disowned before we got PQ lock */
312 locked = true;
313 }
314 mutex_exit(&uvm_pageqlock);
315
316 /*
317 * if we didn't get a lock (try lock failed), then we
318 * toggle our anon lock and try again
319 */
320
321 if (!locked) {
322 /*
323 * someone locking the object has a chance to
324 * lock us right now
325 *
326 * XXX Better than yielding but inadequate.
327 */
328 kpause("livelock", false, 1, anon->an_lock);
329 continue;
330 }
331 }
332
333 /*
334 * if page is un-owned [i.e. the object dropped its ownership],
335 * then we can take over as owner!
336 */
337
338 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
339 mutex_enter(&uvm_pageqlock);
340 pg->pqflags |= PQ_ANON;
341 pg->loan_count--;
342 mutex_exit(&uvm_pageqlock);
343 }
344 break;
345 }
346 return(pg);
347 }
348
349 #if defined(VMSWAP)
350
351 /*
352 * fetch an anon's page.
353 *
354 * => anon must be locked, and is unlocked upon return.
355 * => returns true if pagein was aborted due to lack of memory.
356 */
357
358 bool
359 uvm_anon_pagein(struct vm_anon *anon)
360 {
361 struct vm_page *pg;
362 struct uvm_object *uobj;
363 int rv;
364
365 /* locked: anon */
366 KASSERT(mutex_owned(anon->an_lock));
367
368 rv = uvmfault_anonget(NULL, NULL, anon);
369
370 /*
371 * if rv == 0, anon is still locked, else anon
372 * is unlocked
373 */
374
375 switch (rv) {
376 case 0:
377 break;
378
379 case EIO:
380 case ERESTART:
381
382 /*
383 * nothing more to do on errors.
384 * ERESTART can only mean that the anon was freed,
385 * so again there's nothing to do.
386 */
387
388 return false;
389
390 default:
391 return true;
392 }
393
394 /*
395 * ok, we've got the page now.
396 * mark it as dirty, clear its swslot and un-busy it.
397 */
398
399 pg = anon->an_page;
400 uobj = pg->uobject;
401 if (anon->an_swslot > 0)
402 uvm_swap_free(anon->an_swslot, 1);
403 anon->an_swslot = 0;
404 pg->flags &= ~(PG_CLEAN);
405
406 /*
407 * deactivate the page (to put it on a page queue)
408 */
409
410 mutex_enter(&uvm_pageqlock);
411 if (pg->wire_count == 0)
412 uvm_pagedeactivate(pg);
413 mutex_exit(&uvm_pageqlock);
414
415 if (pg->flags & PG_WANTED) {
416 wakeup(pg);
417 pg->flags &= ~(PG_WANTED);
418 }
419
420 /*
421 * unlock the anon and we're done.
422 */
423
424 mutex_exit(anon->an_lock);
425 if (uobj) {
426 mutex_exit(uobj->vmobjlock);
427 }
428 return false;
429 }
430
431 #endif /* defined(VMSWAP) */
432
433 /*
434 * uvm_anon_release: release an anon and its page.
435 *
436 * => caller must lock the anon.
437 */
438
439 void
440 uvm_anon_release(struct vm_anon *anon)
441 {
442 struct vm_page *pg = anon->an_page;
443
444 KASSERT(mutex_owned(anon->an_lock));
445 KASSERT(pg != NULL);
446 KASSERT((pg->flags & PG_RELEASED) != 0);
447 KASSERT((pg->flags & PG_BUSY) != 0);
448 KASSERT(pg->uobject == NULL);
449 KASSERT(pg->uanon == anon);
450 KASSERT(pg->loan_count == 0);
451 KASSERT(anon->an_ref == 0);
452
453 mutex_enter(&uvm_pageqlock);
454 uvm_pagefree(pg);
455 mutex_exit(&uvm_pageqlock);
456 mutex_exit(anon->an_lock);
457
458 KASSERT(anon->an_page == NULL);
459
460 uvm_anfree(anon);
461 }
462