uvm_anon.c revision 1.3 1 1.3 ross /* $NetBSD: uvm_anon.c,v 1.3 1999/08/14 06:25:48 ross Exp $ */
2 1.1 chuck
3 1.1 chuck /*
4 1.1 chuck *
5 1.1 chuck * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.1 chuck * All rights reserved.
7 1.1 chuck *
8 1.1 chuck * Redistribution and use in source and binary forms, with or without
9 1.1 chuck * modification, are permitted provided that the following conditions
10 1.1 chuck * are met:
11 1.1 chuck * 1. Redistributions of source code must retain the above copyright
12 1.1 chuck * notice, this list of conditions and the following disclaimer.
13 1.1 chuck * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 chuck * notice, this list of conditions and the following disclaimer in the
15 1.1 chuck * documentation and/or other materials provided with the distribution.
16 1.1 chuck * 3. All advertising materials mentioning features or use of this software
17 1.1 chuck * must display the following acknowledgement:
18 1.1 chuck * This product includes software developed by Charles D. Cranor and
19 1.1 chuck * Washington University.
20 1.1 chuck * 4. The name of the author may not be used to endorse or promote products
21 1.1 chuck * derived from this software without specific prior written permission.
22 1.1 chuck *
23 1.1 chuck * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.1 chuck * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.1 chuck * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.1 chuck * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.1 chuck * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.1 chuck * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.1 chuck * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.1 chuck * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.1 chuck * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.1 chuck * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.1 chuck */
34 1.1 chuck
35 1.1 chuck /*
36 1.1 chuck * uvm_anon.c: uvm anon ops
37 1.1 chuck */
38 1.1 chuck
39 1.1 chuck #include "opt_uvmhist.h"
40 1.1 chuck
41 1.1 chuck #include <sys/param.h>
42 1.1 chuck #include <sys/systm.h>
43 1.1 chuck #include <sys/proc.h>
44 1.1 chuck #include <sys/malloc.h>
45 1.1 chuck #include <sys/pool.h>
46 1.1 chuck
47 1.1 chuck #include <vm/vm.h>
48 1.1 chuck #include <vm/vm_page.h>
49 1.1 chuck #include <vm/vm_kern.h>
50 1.1 chuck
51 1.1 chuck #include <uvm/uvm.h>
52 1.1 chuck #include <uvm/uvm_swap.h>
53 1.1 chuck
54 1.1 chuck /*
55 1.1 chuck * allocate anons
56 1.1 chuck */
57 1.1 chuck void
58 1.1 chuck uvm_anon_init()
59 1.1 chuck {
60 1.1 chuck struct vm_anon *anon;
61 1.1 chuck int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
62 1.1 chuck int lcv;
63 1.1 chuck
64 1.1 chuck /*
65 1.1 chuck * Allocate the initial anons.
66 1.1 chuck */
67 1.1 chuck anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
68 1.1 chuck sizeof(*anon) * nanon);
69 1.1 chuck if (anon == NULL) {
70 1.1 chuck printf("uvm_anon_init: can not allocate %d anons\n", nanon);
71 1.1 chuck panic("uvm_anon_init");
72 1.1 chuck }
73 1.1 chuck
74 1.1 chuck memset(anon, 0, sizeof(*anon) * nanon);
75 1.1 chuck uvm.afree = NULL;
76 1.1 chuck uvmexp.nanon = uvmexp.nfreeanon = nanon;
77 1.1 chuck for (lcv = 0 ; lcv < nanon ; lcv++) {
78 1.1 chuck anon[lcv].u.an_nxt = uvm.afree;
79 1.1 chuck uvm.afree = &anon[lcv];
80 1.3 ross simple_lock_init(&uvm.afree->an_lock);
81 1.1 chuck }
82 1.1 chuck simple_lock_init(&uvm.afreelock);
83 1.1 chuck }
84 1.1 chuck
85 1.1 chuck /*
86 1.1 chuck * add some more anons to the free pool. called when we add
87 1.1 chuck * more swap space.
88 1.1 chuck */
89 1.1 chuck void
90 1.1 chuck uvm_anon_add(pages)
91 1.1 chuck int pages;
92 1.1 chuck {
93 1.1 chuck struct vm_anon *anon;
94 1.1 chuck int lcv;
95 1.1 chuck
96 1.1 chuck anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
97 1.1 chuck sizeof(*anon) * pages);
98 1.1 chuck
99 1.1 chuck /* XXX Should wait for VM to free up. */
100 1.1 chuck if (anon == NULL) {
101 1.1 chuck printf("uvm_anon_add: can not allocate %d anons\n", pages);
102 1.1 chuck panic("uvm_anon_add");
103 1.1 chuck }
104 1.1 chuck
105 1.1 chuck simple_lock(&uvm.afreelock);
106 1.1 chuck memset(anon, 0, sizeof(*anon) * pages);
107 1.1 chuck uvmexp.nanon += pages;
108 1.1 chuck uvmexp.nfreeanon += pages;
109 1.1 chuck for (lcv = 0; lcv < pages; lcv++) {
110 1.1 chuck simple_lock_init(&anon->an_lock);
111 1.1 chuck anon[lcv].u.an_nxt = uvm.afree;
112 1.1 chuck uvm.afree = &anon[lcv];
113 1.3 ross simple_lock_init(&uvm.afree->an_lock);
114 1.1 chuck }
115 1.1 chuck simple_unlock(&uvm.afreelock);
116 1.1 chuck }
117 1.1 chuck
118 1.1 chuck /*
119 1.1 chuck * allocate an anon
120 1.1 chuck */
121 1.1 chuck struct vm_anon *
122 1.1 chuck uvm_analloc()
123 1.1 chuck {
124 1.1 chuck struct vm_anon *a;
125 1.1 chuck
126 1.1 chuck simple_lock(&uvm.afreelock);
127 1.1 chuck a = uvm.afree;
128 1.1 chuck if (a) {
129 1.1 chuck uvm.afree = a->u.an_nxt;
130 1.1 chuck uvmexp.nfreeanon--;
131 1.1 chuck a->an_ref = 1;
132 1.1 chuck a->an_swslot = 0;
133 1.1 chuck a->u.an_page = NULL; /* so we can free quickly */
134 1.1 chuck }
135 1.1 chuck simple_unlock(&uvm.afreelock);
136 1.1 chuck return(a);
137 1.1 chuck }
138 1.1 chuck
139 1.1 chuck /*
140 1.1 chuck * uvm_anfree: free a single anon structure
141 1.1 chuck *
142 1.1 chuck * => caller must remove anon from its amap before calling (if it was in
143 1.1 chuck * an amap).
144 1.1 chuck * => anon must be unlocked and have a zero reference count.
145 1.1 chuck * => we may lock the pageq's.
146 1.1 chuck */
147 1.1 chuck void
148 1.1 chuck uvm_anfree(anon)
149 1.1 chuck struct vm_anon *anon;
150 1.1 chuck {
151 1.1 chuck struct vm_page *pg;
152 1.1 chuck UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
153 1.1 chuck UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
154 1.1 chuck
155 1.1 chuck /*
156 1.1 chuck * get page
157 1.1 chuck */
158 1.1 chuck
159 1.1 chuck pg = anon->u.an_page;
160 1.1 chuck
161 1.1 chuck /*
162 1.1 chuck * if there is a resident page and it is loaned, then anon may not
163 1.1 chuck * own it. call out to uvm_anon_lockpage() to ensure the real owner
164 1.1 chuck * of the page has been identified and locked.
165 1.1 chuck */
166 1.1 chuck
167 1.1 chuck if (pg && pg->loan_count)
168 1.1 chuck pg = uvm_anon_lockloanpg(anon);
169 1.1 chuck
170 1.1 chuck /*
171 1.1 chuck * if we have a resident page, we must dispose of it before freeing
172 1.1 chuck * the anon.
173 1.1 chuck */
174 1.1 chuck
175 1.1 chuck if (pg) {
176 1.1 chuck
177 1.1 chuck /*
178 1.1 chuck * if the page is owned by a uobject (now locked), then we must
179 1.1 chuck * kill the loan on the page rather than free it.
180 1.1 chuck */
181 1.1 chuck
182 1.1 chuck if (pg->uobject) {
183 1.1 chuck
184 1.1 chuck /* kill loan */
185 1.1 chuck uvm_lock_pageq();
186 1.1 chuck #ifdef DIAGNOSTIC
187 1.1 chuck if (pg->loan_count < 1)
188 1.1 chuck panic("uvm_anfree: obj owned page "
189 1.1 chuck "with no loan count");
190 1.1 chuck #endif
191 1.1 chuck pg->loan_count--;
192 1.1 chuck pg->uanon = NULL;
193 1.1 chuck uvm_unlock_pageq();
194 1.1 chuck simple_unlock(&pg->uobject->vmobjlock);
195 1.1 chuck
196 1.1 chuck } else {
197 1.1 chuck
198 1.1 chuck /*
199 1.1 chuck * page has no uobject, so we must be the owner of it.
200 1.1 chuck *
201 1.1 chuck * if page is busy then we just mark it as released
202 1.1 chuck * (who ever has it busy must check for this when they
203 1.1 chuck * wake up). if the page is not busy then we can
204 1.1 chuck * free it now.
205 1.1 chuck */
206 1.1 chuck
207 1.1 chuck if ((pg->flags & PG_BUSY) != 0) {
208 1.1 chuck /* tell them to dump it when done */
209 1.1 chuck pg->flags |= PG_RELEASED;
210 1.1 chuck UVMHIST_LOG(maphist,
211 1.1 chuck " anon 0x%x, page 0x%x: BUSY (released!)",
212 1.1 chuck anon, pg, 0, 0);
213 1.1 chuck return;
214 1.1 chuck }
215 1.1 chuck
216 1.1 chuck pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
217 1.1 chuck uvm_lock_pageq(); /* lock out pagedaemon */
218 1.1 chuck uvm_pagefree(pg); /* bye bye */
219 1.1 chuck uvm_unlock_pageq(); /* free the daemon */
220 1.1 chuck
221 1.1 chuck UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!",
222 1.1 chuck anon, pg, 0, 0);
223 1.1 chuck }
224 1.1 chuck }
225 1.1 chuck
226 1.1 chuck /*
227 1.2 chs * free any swap resources.
228 1.1 chuck */
229 1.2 chs uvm_anon_dropswap(anon);
230 1.1 chuck
231 1.1 chuck /*
232 1.1 chuck * now that we've stripped the data areas from the anon, free the anon
233 1.1 chuck * itself!
234 1.1 chuck */
235 1.1 chuck simple_lock(&uvm.afreelock);
236 1.1 chuck anon->u.an_nxt = uvm.afree;
237 1.1 chuck uvm.afree = anon;
238 1.1 chuck uvmexp.nfreeanon++;
239 1.1 chuck simple_unlock(&uvm.afreelock);
240 1.1 chuck UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
241 1.2 chs }
242 1.2 chs
243 1.2 chs /*
244 1.2 chs * uvm_anon_dropswap: release any swap resources from this anon.
245 1.2 chs *
246 1.2 chs * => anon must be locked or have a reference count of 0.
247 1.2 chs */
248 1.2 chs void
249 1.2 chs uvm_anon_dropswap(anon)
250 1.2 chs struct vm_anon *anon;
251 1.2 chs {
252 1.2 chs UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
253 1.2 chs if (anon->an_swslot == 0) {
254 1.2 chs return;
255 1.2 chs }
256 1.2 chs
257 1.2 chs UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
258 1.2 chs anon, anon->an_swslot, 0, 0);
259 1.2 chs uvm_swap_free(anon->an_swslot, 1);
260 1.2 chs anon->an_swslot = 0;
261 1.2 chs
262 1.2 chs if (anon->u.an_page == NULL) {
263 1.2 chs /* this page is no longer only in swap. */
264 1.2 chs simple_lock(&uvm.swap_data_lock);
265 1.2 chs uvmexp.swpgonly--;
266 1.2 chs simple_unlock(&uvm.swap_data_lock);
267 1.2 chs }
268 1.1 chuck }
269 1.1 chuck
270 1.1 chuck /*
271 1.1 chuck * uvm_anon_lockloanpg: given a locked anon, lock its resident page
272 1.1 chuck *
273 1.1 chuck * => anon is locked by caller
274 1.1 chuck * => on return: anon is locked
275 1.1 chuck * if there is a resident page:
276 1.1 chuck * if it has a uobject, it is locked by us
277 1.1 chuck * if it is ownerless, we take over as owner
278 1.1 chuck * we return the resident page (it can change during
279 1.1 chuck * this function)
280 1.1 chuck * => note that the only time an anon has an ownerless resident page
281 1.1 chuck * is if the page was loaned from a uvm_object and the uvm_object
282 1.1 chuck * disowned it
283 1.1 chuck * => this only needs to be called when you want to do an operation
284 1.1 chuck * on an anon's resident page and that page has a non-zero loan
285 1.1 chuck * count.
286 1.1 chuck */
287 1.1 chuck struct vm_page *
288 1.1 chuck uvm_anon_lockloanpg(anon)
289 1.1 chuck struct vm_anon *anon;
290 1.1 chuck {
291 1.1 chuck struct vm_page *pg;
292 1.1 chuck boolean_t locked = FALSE;
293 1.1 chuck
294 1.1 chuck /*
295 1.1 chuck * loop while we have a resident page that has a non-zero loan count.
296 1.1 chuck * if we successfully get our lock, we will "break" the loop.
297 1.1 chuck * note that the test for pg->loan_count is not protected -- this
298 1.1 chuck * may produce false positive results. note that a false positive
299 1.1 chuck * result may cause us to do more work than we need to, but it will
300 1.1 chuck * not produce an incorrect result.
301 1.1 chuck */
302 1.1 chuck
303 1.1 chuck while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
304 1.1 chuck
305 1.1 chuck /*
306 1.1 chuck * quickly check to see if the page has an object before
307 1.1 chuck * bothering to lock the page queues. this may also produce
308 1.1 chuck * a false positive result, but that's ok because we do a real
309 1.1 chuck * check after that.
310 1.1 chuck *
311 1.1 chuck * XXX: quick check -- worth it? need volatile?
312 1.1 chuck */
313 1.1 chuck
314 1.1 chuck if (pg->uobject) {
315 1.1 chuck
316 1.1 chuck uvm_lock_pageq();
317 1.1 chuck if (pg->uobject) { /* the "real" check */
318 1.1 chuck locked =
319 1.1 chuck simple_lock_try(&pg->uobject->vmobjlock);
320 1.1 chuck } else {
321 1.1 chuck /* object disowned before we got PQ lock */
322 1.1 chuck locked = TRUE;
323 1.1 chuck }
324 1.1 chuck uvm_unlock_pageq();
325 1.1 chuck
326 1.1 chuck /*
327 1.1 chuck * if we didn't get a lock (try lock failed), then we
328 1.1 chuck * toggle our anon lock and try again
329 1.1 chuck */
330 1.1 chuck
331 1.1 chuck if (!locked) {
332 1.1 chuck simple_unlock(&anon->an_lock);
333 1.1 chuck /*
334 1.1 chuck * someone locking the object has a chance to
335 1.1 chuck * lock us right now
336 1.1 chuck */
337 1.1 chuck simple_lock(&anon->an_lock);
338 1.1 chuck continue; /* start over */
339 1.1 chuck }
340 1.1 chuck }
341 1.1 chuck
342 1.1 chuck /*
343 1.1 chuck * if page is un-owned [i.e. the object dropped its ownership],
344 1.1 chuck * then we can take over as owner!
345 1.1 chuck */
346 1.1 chuck
347 1.1 chuck if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
348 1.1 chuck uvm_lock_pageq();
349 1.1 chuck pg->pqflags |= PQ_ANON; /* take ownership... */
350 1.1 chuck pg->loan_count--; /* ... and drop our loan */
351 1.1 chuck uvm_unlock_pageq();
352 1.1 chuck }
353 1.1 chuck
354 1.1 chuck /*
355 1.1 chuck * we did it! break the loop
356 1.1 chuck */
357 1.1 chuck break;
358 1.1 chuck }
359 1.1 chuck
360 1.1 chuck /*
361 1.1 chuck * done!
362 1.1 chuck */
363 1.1 chuck
364 1.1 chuck return(pg);
365 1.1 chuck }
366