uvm_anon.c revision 1.70 1 /* $NetBSD: uvm_anon.c,v 1.70 2019/12/31 22:42:51 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * uvm_anon.c: uvm anon ops
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.70 2019/12/31 22:42:51 ad Exp $");
34
35 #include "opt_uvmhist.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/pool.h>
40 #include <sys/kernel.h>
41 #include <sys/atomic.h>
42
43 #include <uvm/uvm.h>
44 #include <uvm/uvm_swap.h>
45 #include <uvm/uvm_pdpolicy.h>
46
47 static struct pool_cache uvm_anon_cache;
48
49 static int uvm_anon_ctor(void *, void *, int);
50
51 void
52 uvm_anon_init(void)
53 {
54
55 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
56 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
57 NULL, NULL);
58 }
59
60 static int
61 uvm_anon_ctor(void *arg, void *object, int flags)
62 {
63 struct vm_anon *anon = object;
64
65 anon->an_ref = 0;
66 anon->an_lock = NULL;
67 anon->an_page = NULL;
68 #if defined(VMSWAP)
69 anon->an_swslot = 0;
70 #endif
71 return 0;
72 }
73
74 /*
75 * uvm_analloc: allocate a new anon.
76 *
77 * => anon will have no lock associated.
78 */
79 struct vm_anon *
80 uvm_analloc(void)
81 {
82 struct vm_anon *anon;
83
84 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
85 if (anon) {
86 KASSERT(anon->an_ref == 0);
87 KASSERT(anon->an_lock == NULL);
88 KASSERT(anon->an_page == NULL);
89 #if defined(VMSWAP)
90 KASSERT(anon->an_swslot == 0);
91 #endif
92 anon->an_ref = 1;
93 }
94 return anon;
95 }
96
97 /*
98 * uvm_anon_dispose: free any resident page or swap resources of anon.
99 *
100 * => anon must be removed from the amap (if anon was in an amap).
101 * => amap must be locked; we may drop and re-acquire the lock here.
102 */
103 static bool
104 uvm_anon_dispose(struct vm_anon *anon)
105 {
106 struct vm_page *pg = anon->an_page;
107
108 UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist);
109 UVMHIST_LOG(maphist,"(anon=0x%#jx)", (uintptr_t)anon, 0,0,0);
110
111 KASSERT(mutex_owned(anon->an_lock));
112
113 /*
114 * Dispose the page, if it is resident.
115 */
116
117 if (pg) {
118 KASSERT(anon->an_lock != NULL);
119
120 /*
121 * If there is a resident page and it is loaned, then anon
122 * may not own it. Call out to uvm_anon_lockloanpg() to
123 * identify and lock the real owner of the page.
124 */
125
126 if (pg->loan_count) {
127 pg = uvm_anon_lockloanpg(anon);
128 }
129
130 /*
131 * If the page is owned by a UVM object (now locked),
132 * then kill the loan on the page rather than free it,
133 * and release the object lock.
134 */
135
136 if (pg->uobject) {
137 mutex_enter(&pg->interlock);
138 KASSERT(pg->loan_count > 0);
139 pg->loan_count--;
140 pg->uanon = NULL;
141 mutex_exit(&pg->interlock);
142 mutex_exit(pg->uobject->vmobjlock);
143 } else {
144
145 /*
146 * If page has no UVM object, then anon is the owner,
147 * and it is already locked.
148 */
149
150 KASSERT((pg->flags & PG_RELEASED) == 0);
151 pmap_page_protect(pg, VM_PROT_NONE);
152
153 /*
154 * If the page is busy, mark it as PG_RELEASED, so
155 * that uvm_anon_release(9) would release it later.
156 */
157
158 if (pg->flags & PG_BUSY) {
159 pg->flags |= PG_RELEASED;
160 mutex_obj_hold(anon->an_lock);
161 return false;
162 }
163 uvm_pagefree(pg);
164 UVMHIST_LOG(maphist, "anon 0x%#jx, page 0x%#jx: "
165 "freed now!", (uintptr_t)anon, (uintptr_t)pg,
166 0, 0);
167 }
168 }
169
170 #if defined(VMSWAP)
171 if (pg == NULL && anon->an_swslot > 0) {
172 /* This page is no longer only in swap. */
173 KASSERT(uvmexp.swpgonly > 0);
174 atomic_dec_uint(&uvmexp.swpgonly);
175 }
176 #endif
177
178 /*
179 * Free any swap resources, leave a page replacement hint.
180 */
181
182 uvm_anon_dropswap(anon);
183 uvmpdpol_anfree(anon);
184 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
185 return true;
186 }
187
188 /*
189 * uvm_anon_free: free a single anon.
190 *
191 * => anon must be already disposed.
192 */
193 void
194 uvm_anon_free(struct vm_anon *anon)
195 {
196
197 KASSERT(anon->an_ref == 0);
198 KASSERT(anon->an_lock == NULL);
199 KASSERT(anon->an_page == NULL);
200 #if defined(VMSWAP)
201 KASSERT(anon->an_swslot == 0);
202 #endif
203 pool_cache_put(&uvm_anon_cache, anon);
204 }
205
206 /*
207 * uvm_anon_freelst: free a linked list of anon structures.
208 *
209 * => amap must be locked, we will unlock it.
210 */
211 void
212 uvm_anon_freelst(struct vm_amap *amap, struct vm_anon *anonlst)
213 {
214 struct vm_anon *next;
215
216 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
217
218 KASSERT(mutex_owned(amap->am_lock));
219
220 for (; anonlst != NULL; anonlst = next) {
221 next = anonlst->an_link;
222 /* Note: clears an_ref as well. */
223 anonlst->an_link = NULL;
224 if (uvm_anon_dispose(anonlst)) {
225 anonlst->an_lock = NULL;
226 uvm_anon_free(anonlst);
227 }
228 }
229 amap_unlock(amap);
230 }
231
232 /*
233 * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner.
234 *
235 * => anon is locked by caller
236 * => on return: anon is locked
237 * if there is a resident page:
238 * if it has a uobject, it is locked by us
239 * if it is ownerless, we take over as owner
240 * we return the resident page (it can change during
241 * this function)
242 * => note that the only time an anon has an ownerless resident page
243 * is if the page was loaned from a uvm_object and the uvm_object
244 * disowned it
245 * => this only needs to be called when you want to do an operation
246 * on an anon's resident page and that page has a non-zero loan
247 * count.
248 */
249 struct vm_page *
250 uvm_anon_lockloanpg(struct vm_anon *anon)
251 {
252 struct vm_page *pg;
253
254 KASSERT(mutex_owned(anon->an_lock));
255
256 /*
257 * loop while we have a resident page that has a non-zero loan count.
258 * if we successfully get our lock, we will "break" the loop.
259 * note that the test for pg->loan_count is not protected -- this
260 * may produce false positive results. note that a false positive
261 * result may cause us to do more work than we need to, but it will
262 * not produce an incorrect result.
263 */
264
265 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
266 mutex_enter(&pg->interlock);
267 if (pg->uobject) {
268 /*
269 * if we didn't get a lock (try lock failed), then we
270 * toggle our anon lock and try again
271 */
272
273 if (!mutex_tryenter(pg->uobject->vmobjlock)) {
274 /*
275 * someone locking the object has a chance to
276 * lock us right now
277 *
278 * XXX Better than yielding but inadequate.
279 */
280 mutex_exit(&pg->interlock);
281 kpause("livelock", false, 1, anon->an_lock);
282 continue;
283 }
284 }
285
286 /*
287 * If page is un-owned i.e. the object dropped its ownership,
288 * then we have to take the ownership.
289 */
290
291 if (pg->uobject == NULL && (pg->flags & PG_ANON) == 0) {
292 pg->flags |= PG_ANON;
293 pg->loan_count--;
294 }
295 mutex_exit(&pg->interlock);
296 break;
297 }
298 return pg;
299 }
300
301 #if defined(VMSWAP)
302
303 /*
304 * uvm_anon_pagein: fetch an anon's page.
305 *
306 * => anon must be locked, and is unlocked upon return.
307 * => returns true if pagein was aborted due to lack of memory.
308 */
309
310 bool
311 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
312 {
313 struct vm_page *pg;
314 struct uvm_object *uobj;
315
316 KASSERT(mutex_owned(anon->an_lock));
317 KASSERT(anon->an_lock == amap->am_lock);
318
319 /*
320 * Get the page of the anon.
321 */
322
323 switch (uvmfault_anonget(NULL, amap, anon)) {
324 case 0:
325 /* Success - we have the page. */
326 KASSERT(mutex_owned(anon->an_lock));
327 break;
328 case EIO:
329 case ERESTART:
330 /*
331 * Nothing more to do on errors. ERESTART means that the
332 * anon was freed.
333 */
334 return false;
335 default:
336 return true;
337 }
338
339 /*
340 * Mark the page as dirty, clear its swslot and un-busy it.
341 */
342
343 pg = anon->an_page;
344 uobj = pg->uobject;
345 if (anon->an_swslot > 0) {
346 uvm_swap_free(anon->an_swslot, 1);
347 }
348 anon->an_swslot = 0;
349 pg->flags &= ~PG_CLEAN;
350
351 /*
352 * Deactivate the page (to put it on a page queue).
353 */
354
355 uvm_pagelock(pg);
356 uvm_pagedeactivate(pg);
357 uvm_pageunlock(pg);
358 if (pg->flags & PG_WANTED) {
359 pg->flags &= ~PG_WANTED;
360 wakeup(pg);
361 }
362
363 mutex_exit(anon->an_lock);
364 if (uobj) {
365 mutex_exit(uobj->vmobjlock);
366 }
367 return false;
368 }
369
370 /*
371 * uvm_anon_dropswap: release any swap resources from this anon.
372 *
373 * => anon must be locked or have a reference count of 0.
374 */
375 void
376 uvm_anon_dropswap(struct vm_anon *anon)
377 {
378 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
379
380 if (anon->an_swslot == 0)
381 return;
382
383 UVMHIST_LOG(maphist,"freeing swap for anon %#jx, paged to swslot 0x%jx",
384 (uintptr_t)anon, anon->an_swslot, 0, 0);
385 uvm_swap_free(anon->an_swslot, 1);
386 anon->an_swslot = 0;
387 }
388
389 #endif
390
391 /*
392 * uvm_anon_release: release an anon and its page.
393 *
394 * => anon should not have any references.
395 * => anon must be locked.
396 */
397
398 void
399 uvm_anon_release(struct vm_anon *anon)
400 {
401 struct vm_page *pg = anon->an_page;
402 bool success __diagused;
403
404 KASSERT(mutex_owned(anon->an_lock));
405 KASSERT(pg != NULL);
406 KASSERT((pg->flags & PG_RELEASED) != 0);
407 KASSERT((pg->flags & PG_BUSY) != 0);
408 KASSERT(pg->uobject == NULL);
409 KASSERT(pg->uanon == anon);
410 KASSERT(pg->loan_count == 0);
411 KASSERT(anon->an_ref == 0);
412
413 uvm_pagefree(pg);
414 KASSERT(anon->an_page == NULL);
415 /* dispose should succeed as no one can reach this anon anymore. */
416 success = uvm_anon_dispose(anon);
417 KASSERT(success);
418 mutex_exit(anon->an_lock);
419 /* Note: extra reference is held for PG_RELEASED case. */
420 mutex_obj_free(anon->an_lock);
421 anon->an_lock = NULL;
422 uvm_anon_free(anon);
423 }
424