uvm_anon.c revision 1.3 1 /* $NetBSD: uvm_anon.c,v 1.3 1999/08/14 06:25:48 ross Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * uvm_anon.c: uvm anon ops
37 */
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/pool.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_kern.h>
50
51 #include <uvm/uvm.h>
52 #include <uvm/uvm_swap.h>
53
54 /*
55 * allocate anons
56 */
57 void
58 uvm_anon_init()
59 {
60 struct vm_anon *anon;
61 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
62 int lcv;
63
64 /*
65 * Allocate the initial anons.
66 */
67 anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
68 sizeof(*anon) * nanon);
69 if (anon == NULL) {
70 printf("uvm_anon_init: can not allocate %d anons\n", nanon);
71 panic("uvm_anon_init");
72 }
73
74 memset(anon, 0, sizeof(*anon) * nanon);
75 uvm.afree = NULL;
76 uvmexp.nanon = uvmexp.nfreeanon = nanon;
77 for (lcv = 0 ; lcv < nanon ; lcv++) {
78 anon[lcv].u.an_nxt = uvm.afree;
79 uvm.afree = &anon[lcv];
80 simple_lock_init(&uvm.afree->an_lock);
81 }
82 simple_lock_init(&uvm.afreelock);
83 }
84
85 /*
86 * add some more anons to the free pool. called when we add
87 * more swap space.
88 */
89 void
90 uvm_anon_add(pages)
91 int pages;
92 {
93 struct vm_anon *anon;
94 int lcv;
95
96 anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
97 sizeof(*anon) * pages);
98
99 /* XXX Should wait for VM to free up. */
100 if (anon == NULL) {
101 printf("uvm_anon_add: can not allocate %d anons\n", pages);
102 panic("uvm_anon_add");
103 }
104
105 simple_lock(&uvm.afreelock);
106 memset(anon, 0, sizeof(*anon) * pages);
107 uvmexp.nanon += pages;
108 uvmexp.nfreeanon += pages;
109 for (lcv = 0; lcv < pages; lcv++) {
110 simple_lock_init(&anon->an_lock);
111 anon[lcv].u.an_nxt = uvm.afree;
112 uvm.afree = &anon[lcv];
113 simple_lock_init(&uvm.afree->an_lock);
114 }
115 simple_unlock(&uvm.afreelock);
116 }
117
118 /*
119 * allocate an anon
120 */
121 struct vm_anon *
122 uvm_analloc()
123 {
124 struct vm_anon *a;
125
126 simple_lock(&uvm.afreelock);
127 a = uvm.afree;
128 if (a) {
129 uvm.afree = a->u.an_nxt;
130 uvmexp.nfreeanon--;
131 a->an_ref = 1;
132 a->an_swslot = 0;
133 a->u.an_page = NULL; /* so we can free quickly */
134 }
135 simple_unlock(&uvm.afreelock);
136 return(a);
137 }
138
139 /*
140 * uvm_anfree: free a single anon structure
141 *
142 * => caller must remove anon from its amap before calling (if it was in
143 * an amap).
144 * => anon must be unlocked and have a zero reference count.
145 * => we may lock the pageq's.
146 */
147 void
148 uvm_anfree(anon)
149 struct vm_anon *anon;
150 {
151 struct vm_page *pg;
152 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
153 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
154
155 /*
156 * get page
157 */
158
159 pg = anon->u.an_page;
160
161 /*
162 * if there is a resident page and it is loaned, then anon may not
163 * own it. call out to uvm_anon_lockpage() to ensure the real owner
164 * of the page has been identified and locked.
165 */
166
167 if (pg && pg->loan_count)
168 pg = uvm_anon_lockloanpg(anon);
169
170 /*
171 * if we have a resident page, we must dispose of it before freeing
172 * the anon.
173 */
174
175 if (pg) {
176
177 /*
178 * if the page is owned by a uobject (now locked), then we must
179 * kill the loan on the page rather than free it.
180 */
181
182 if (pg->uobject) {
183
184 /* kill loan */
185 uvm_lock_pageq();
186 #ifdef DIAGNOSTIC
187 if (pg->loan_count < 1)
188 panic("uvm_anfree: obj owned page "
189 "with no loan count");
190 #endif
191 pg->loan_count--;
192 pg->uanon = NULL;
193 uvm_unlock_pageq();
194 simple_unlock(&pg->uobject->vmobjlock);
195
196 } else {
197
198 /*
199 * page has no uobject, so we must be the owner of it.
200 *
201 * if page is busy then we just mark it as released
202 * (who ever has it busy must check for this when they
203 * wake up). if the page is not busy then we can
204 * free it now.
205 */
206
207 if ((pg->flags & PG_BUSY) != 0) {
208 /* tell them to dump it when done */
209 pg->flags |= PG_RELEASED;
210 UVMHIST_LOG(maphist,
211 " anon 0x%x, page 0x%x: BUSY (released!)",
212 anon, pg, 0, 0);
213 return;
214 }
215
216 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
217 uvm_lock_pageq(); /* lock out pagedaemon */
218 uvm_pagefree(pg); /* bye bye */
219 uvm_unlock_pageq(); /* free the daemon */
220
221 UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!",
222 anon, pg, 0, 0);
223 }
224 }
225
226 /*
227 * free any swap resources.
228 */
229 uvm_anon_dropswap(anon);
230
231 /*
232 * now that we've stripped the data areas from the anon, free the anon
233 * itself!
234 */
235 simple_lock(&uvm.afreelock);
236 anon->u.an_nxt = uvm.afree;
237 uvm.afree = anon;
238 uvmexp.nfreeanon++;
239 simple_unlock(&uvm.afreelock);
240 UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
241 }
242
243 /*
244 * uvm_anon_dropswap: release any swap resources from this anon.
245 *
246 * => anon must be locked or have a reference count of 0.
247 */
248 void
249 uvm_anon_dropswap(anon)
250 struct vm_anon *anon;
251 {
252 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
253 if (anon->an_swslot == 0) {
254 return;
255 }
256
257 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
258 anon, anon->an_swslot, 0, 0);
259 uvm_swap_free(anon->an_swslot, 1);
260 anon->an_swslot = 0;
261
262 if (anon->u.an_page == NULL) {
263 /* this page is no longer only in swap. */
264 simple_lock(&uvm.swap_data_lock);
265 uvmexp.swpgonly--;
266 simple_unlock(&uvm.swap_data_lock);
267 }
268 }
269
270 /*
271 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
272 *
273 * => anon is locked by caller
274 * => on return: anon is locked
275 * if there is a resident page:
276 * if it has a uobject, it is locked by us
277 * if it is ownerless, we take over as owner
278 * we return the resident page (it can change during
279 * this function)
280 * => note that the only time an anon has an ownerless resident page
281 * is if the page was loaned from a uvm_object and the uvm_object
282 * disowned it
283 * => this only needs to be called when you want to do an operation
284 * on an anon's resident page and that page has a non-zero loan
285 * count.
286 */
287 struct vm_page *
288 uvm_anon_lockloanpg(anon)
289 struct vm_anon *anon;
290 {
291 struct vm_page *pg;
292 boolean_t locked = FALSE;
293
294 /*
295 * loop while we have a resident page that has a non-zero loan count.
296 * if we successfully get our lock, we will "break" the loop.
297 * note that the test for pg->loan_count is not protected -- this
298 * may produce false positive results. note that a false positive
299 * result may cause us to do more work than we need to, but it will
300 * not produce an incorrect result.
301 */
302
303 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
304
305 /*
306 * quickly check to see if the page has an object before
307 * bothering to lock the page queues. this may also produce
308 * a false positive result, but that's ok because we do a real
309 * check after that.
310 *
311 * XXX: quick check -- worth it? need volatile?
312 */
313
314 if (pg->uobject) {
315
316 uvm_lock_pageq();
317 if (pg->uobject) { /* the "real" check */
318 locked =
319 simple_lock_try(&pg->uobject->vmobjlock);
320 } else {
321 /* object disowned before we got PQ lock */
322 locked = TRUE;
323 }
324 uvm_unlock_pageq();
325
326 /*
327 * if we didn't get a lock (try lock failed), then we
328 * toggle our anon lock and try again
329 */
330
331 if (!locked) {
332 simple_unlock(&anon->an_lock);
333 /*
334 * someone locking the object has a chance to
335 * lock us right now
336 */
337 simple_lock(&anon->an_lock);
338 continue; /* start over */
339 }
340 }
341
342 /*
343 * if page is un-owned [i.e. the object dropped its ownership],
344 * then we can take over as owner!
345 */
346
347 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
348 uvm_lock_pageq();
349 pg->pqflags |= PQ_ANON; /* take ownership... */
350 pg->loan_count--; /* ... and drop our loan */
351 uvm_unlock_pageq();
352 }
353
354 /*
355 * we did it! break the loop
356 */
357 break;
358 }
359
360 /*
361 * done!
362 */
363
364 return(pg);
365 }
366