uvm_pdaemon.c revision 1.103.2.3 1 1.103.2.3 yamt /* $NetBSD: uvm_pdaemon.c,v 1.103.2.3 2011/12/26 16:03:11 yamt Exp $ */
2 1.1 mrg
3 1.34 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.34 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.102 chuck * 3. Neither the name of the University nor the names of its contributors
21 1.1 mrg * may be used to endorse or promote products derived from this software
22 1.1 mrg * without specific prior written permission.
23 1.1 mrg *
24 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 mrg * SUCH DAMAGE.
35 1.1 mrg *
36 1.1 mrg * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
37 1.4 mrg * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
38 1.1 mrg *
39 1.1 mrg *
40 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 1.1 mrg * All rights reserved.
42 1.34 chs *
43 1.1 mrg * Permission to use, copy, modify and distribute this software and
44 1.1 mrg * its documentation is hereby granted, provided that both the copyright
45 1.1 mrg * notice and this permission notice appear in all copies of the
46 1.1 mrg * software, derivative works or modified versions, and any portions
47 1.1 mrg * thereof, and that both notices appear in supporting documentation.
48 1.34 chs *
49 1.34 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 1.34 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 1.34 chs *
53 1.1 mrg * Carnegie Mellon requests users of this software to return to
54 1.1 mrg *
55 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 1.1 mrg * School of Computer Science
57 1.1 mrg * Carnegie Mellon University
58 1.1 mrg * Pittsburgh PA 15213-3890
59 1.1 mrg *
60 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
61 1.1 mrg * rights to redistribute these changes.
62 1.1 mrg */
63 1.1 mrg
64 1.1 mrg /*
65 1.1 mrg * uvm_pdaemon.c: the page daemon
66 1.1 mrg */
67 1.42 lukem
68 1.42 lukem #include <sys/cdefs.h>
69 1.103.2.3 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.103.2.3 2011/12/26 16:03:11 yamt Exp $");
70 1.42 lukem
71 1.42 lukem #include "opt_uvmhist.h"
72 1.69 yamt #include "opt_readahead.h"
73 1.1 mrg
74 1.1 mrg #include <sys/param.h>
75 1.1 mrg #include <sys/proc.h>
76 1.1 mrg #include <sys/systm.h>
77 1.1 mrg #include <sys/kernel.h>
78 1.9 pk #include <sys/pool.h>
79 1.24 chs #include <sys/buf.h>
80 1.94 ad #include <sys/module.h>
81 1.96 ad #include <sys/atomic.h>
82 1.1 mrg
83 1.1 mrg #include <uvm/uvm.h>
84 1.77 yamt #include <uvm/uvm_pdpolicy.h>
85 1.1 mrg
86 1.1 mrg /*
87 1.45 wiz * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
88 1.14 chs * in a pass thru the inactive list when swap is full. the value should be
89 1.14 chs * "small"... if it's too large we'll cycle the active pages thru the inactive
90 1.14 chs * queue too quickly to for them to be referenced and avoid being freed.
91 1.14 chs */
92 1.14 chs
93 1.89 ad #define UVMPD_NUMDIRTYREACTS 16
94 1.14 chs
95 1.89 ad #define UVMPD_NUMTRYLOCKOWNER 16
96 1.14 chs
97 1.14 chs /*
98 1.1 mrg * local prototypes
99 1.1 mrg */
100 1.1 mrg
101 1.65 thorpej static void uvmpd_scan(void);
102 1.77 yamt static void uvmpd_scan_queue(void);
103 1.65 thorpej static void uvmpd_tune(void);
104 1.1 mrg
105 1.101 pooka static unsigned int uvm_pagedaemon_waiters;
106 1.89 ad
107 1.1 mrg /*
108 1.61 chs * XXX hack to avoid hangs when large processes fork.
109 1.61 chs */
110 1.96 ad u_int uvm_extrapages;
111 1.61 chs
112 1.98 haad static kmutex_t uvm_reclaim_lock;
113 1.98 haad
114 1.98 haad SLIST_HEAD(uvm_reclaim_hooks, uvm_reclaim_hook) uvm_reclaim_list;
115 1.98 haad
116 1.61 chs /*
117 1.1 mrg * uvm_wait: wait (sleep) for the page daemon to free some pages
118 1.1 mrg *
119 1.1 mrg * => should be called with all locks released
120 1.1 mrg * => should _not_ be called by the page daemon (to avoid deadlock)
121 1.1 mrg */
122 1.1 mrg
123 1.19 thorpej void
124 1.65 thorpej uvm_wait(const char *wmsg)
125 1.8 mrg {
126 1.8 mrg int timo = 0;
127 1.89 ad
128 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
129 1.1 mrg
130 1.8 mrg /*
131 1.8 mrg * check for page daemon going to sleep (waiting for itself)
132 1.8 mrg */
133 1.1 mrg
134 1.86 ad if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
135 1.8 mrg /*
136 1.8 mrg * now we have a problem: the pagedaemon wants to go to
137 1.8 mrg * sleep until it frees more memory. but how can it
138 1.8 mrg * free more memory if it is asleep? that is a deadlock.
139 1.8 mrg * we have two options:
140 1.8 mrg * [1] panic now
141 1.8 mrg * [2] put a timeout on the sleep, thus causing the
142 1.8 mrg * pagedaemon to only pause (rather than sleep forever)
143 1.8 mrg *
144 1.8 mrg * note that option [2] will only help us if we get lucky
145 1.8 mrg * and some other process on the system breaks the deadlock
146 1.8 mrg * by exiting or freeing memory (thus allowing the pagedaemon
147 1.8 mrg * to continue). for now we panic if DEBUG is defined,
148 1.8 mrg * otherwise we hope for the best with option [2] (better
149 1.8 mrg * yet, this should never happen in the first place!).
150 1.8 mrg */
151 1.1 mrg
152 1.8 mrg printf("pagedaemon: deadlock detected!\n");
153 1.8 mrg timo = hz >> 3; /* set timeout */
154 1.1 mrg #if defined(DEBUG)
155 1.8 mrg /* DEBUG: panic so we can debug it */
156 1.8 mrg panic("pagedaemon deadlock");
157 1.1 mrg #endif
158 1.8 mrg }
159 1.1 mrg
160 1.89 ad uvm_pagedaemon_waiters++;
161 1.17 thorpej wakeup(&uvm.pagedaemon); /* wake the daemon! */
162 1.89 ad UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
163 1.1 mrg }
164 1.1 mrg
165 1.77 yamt /*
166 1.77 yamt * uvm_kick_pdaemon: perform checks to determine if we need to
167 1.77 yamt * give the pagedaemon a nudge, and do so if necessary.
168 1.89 ad *
169 1.89 ad * => called with uvm_fpageqlock held.
170 1.77 yamt */
171 1.77 yamt
172 1.77 yamt void
173 1.77 yamt uvm_kick_pdaemon(void)
174 1.77 yamt {
175 1.77 yamt
176 1.89 ad KASSERT(mutex_owned(&uvm_fpageqlock));
177 1.89 ad
178 1.77 yamt if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
179 1.77 yamt (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
180 1.77 yamt uvmpdpol_needsscan_p())) {
181 1.77 yamt wakeup(&uvm.pagedaemon);
182 1.77 yamt }
183 1.77 yamt }
184 1.1 mrg
185 1.1 mrg /*
186 1.1 mrg * uvmpd_tune: tune paging parameters
187 1.1 mrg *
188 1.1 mrg * => called when ever memory is added (or removed?) to the system
189 1.1 mrg * => caller must call with page queues locked
190 1.1 mrg */
191 1.1 mrg
192 1.65 thorpej static void
193 1.37 chs uvmpd_tune(void)
194 1.8 mrg {
195 1.95 ad int val;
196 1.95 ad
197 1.8 mrg UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
198 1.1 mrg
199 1.93 ad /*
200 1.93 ad * try to keep 0.5% of available RAM free, but limit to between
201 1.93 ad * 128k and 1024k per-CPU. XXX: what are these values good for?
202 1.93 ad */
203 1.95 ad val = uvmexp.npages / 200;
204 1.95 ad val = MAX(val, (128*1024) >> PAGE_SHIFT);
205 1.95 ad val = MIN(val, (1024*1024) >> PAGE_SHIFT);
206 1.95 ad val *= ncpu;
207 1.23 bjh21
208 1.23 bjh21 /* Make sure there's always a user page free. */
209 1.95 ad if (val < uvmexp.reserve_kernel + 1)
210 1.95 ad val = uvmexp.reserve_kernel + 1;
211 1.95 ad uvmexp.freemin = val;
212 1.95 ad
213 1.96 ad /* Calculate free target. */
214 1.95 ad val = (uvmexp.freemin * 4) / 3;
215 1.95 ad if (val <= uvmexp.freemin)
216 1.95 ad val = uvmexp.freemin + 1;
217 1.96 ad uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
218 1.61 chs
219 1.8 mrg uvmexp.wiredmax = uvmexp.npages / 3;
220 1.8 mrg UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
221 1.1 mrg uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
222 1.1 mrg }
223 1.1 mrg
224 1.1 mrg /*
225 1.1 mrg * uvm_pageout: the main loop for the pagedaemon
226 1.1 mrg */
227 1.1 mrg
228 1.8 mrg void
229 1.80 yamt uvm_pageout(void *arg)
230 1.8 mrg {
231 1.60 enami int bufcnt, npages = 0;
232 1.61 chs int extrapages = 0;
233 1.88 ad struct pool *pp;
234 1.88 ad uint64_t where;
235 1.98 haad struct uvm_reclaim_hook *hook;
236 1.98 haad
237 1.8 mrg UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
238 1.24 chs
239 1.8 mrg UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
240 1.8 mrg
241 1.8 mrg /*
242 1.8 mrg * ensure correct priority and set paging parameters...
243 1.8 mrg */
244 1.8 mrg
245 1.86 ad uvm.pagedaemon_lwp = curlwp;
246 1.89 ad mutex_enter(&uvm_pageqlock);
247 1.8 mrg npages = uvmexp.npages;
248 1.8 mrg uvmpd_tune();
249 1.89 ad mutex_exit(&uvm_pageqlock);
250 1.8 mrg
251 1.8 mrg /*
252 1.8 mrg * main loop
253 1.8 mrg */
254 1.24 chs
255 1.24 chs for (;;) {
256 1.93 ad bool needsscan, needsfree;
257 1.24 chs
258 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
259 1.89 ad if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) {
260 1.89 ad UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
261 1.89 ad UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
262 1.89 ad &uvm_fpageqlock, false, "pgdaemon", 0);
263 1.89 ad uvmexp.pdwoke++;
264 1.89 ad UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
265 1.89 ad } else {
266 1.89 ad mutex_spin_exit(&uvm_fpageqlock);
267 1.89 ad }
268 1.24 chs
269 1.8 mrg /*
270 1.24 chs * now lock page queues and recompute inactive count
271 1.8 mrg */
272 1.8 mrg
273 1.89 ad mutex_enter(&uvm_pageqlock);
274 1.61 chs if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
275 1.24 chs npages = uvmexp.npages;
276 1.61 chs extrapages = uvm_extrapages;
277 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
278 1.24 chs uvmpd_tune();
279 1.89 ad mutex_spin_exit(&uvm_fpageqlock);
280 1.24 chs }
281 1.24 chs
282 1.77 yamt uvmpdpol_tune();
283 1.24 chs
284 1.60 enami /*
285 1.60 enami * Estimate a hint. Note that bufmem are returned to
286 1.60 enami * system only when entire pool page is empty.
287 1.60 enami */
288 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
289 1.60 enami bufcnt = uvmexp.freetarg - uvmexp.free;
290 1.60 enami if (bufcnt < 0)
291 1.60 enami bufcnt = 0;
292 1.60 enami
293 1.77 yamt UVMHIST_LOG(pdhist," free/ftarg=%d/%d",
294 1.77 yamt uvmexp.free, uvmexp.freetarg, 0,0);
295 1.8 mrg
296 1.93 ad needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
297 1.93 ad needsscan = needsfree || uvmpdpol_needsscan_p();
298 1.89 ad
299 1.8 mrg /*
300 1.24 chs * scan if needed
301 1.8 mrg */
302 1.97 ad if (needsscan) {
303 1.97 ad mutex_spin_exit(&uvm_fpageqlock);
304 1.24 chs uvmpd_scan();
305 1.97 ad mutex_spin_enter(&uvm_fpageqlock);
306 1.97 ad }
307 1.8 mrg
308 1.8 mrg /*
309 1.24 chs * if there's any free memory to be had,
310 1.24 chs * wake up any waiters.
311 1.8 mrg */
312 1.24 chs if (uvmexp.free > uvmexp.reserve_kernel ||
313 1.24 chs uvmexp.paging == 0) {
314 1.24 chs wakeup(&uvmexp.free);
315 1.89 ad uvm_pagedaemon_waiters = 0;
316 1.8 mrg }
317 1.89 ad mutex_spin_exit(&uvm_fpageqlock);
318 1.1 mrg
319 1.8 mrg /*
320 1.24 chs * scan done. unlock page queues (the only lock we are holding)
321 1.8 mrg */
322 1.89 ad mutex_exit(&uvm_pageqlock);
323 1.38 chs
324 1.88 ad /*
325 1.93 ad * if we don't need free memory, we're done.
326 1.93 ad */
327 1.93 ad
328 1.93 ad if (!needsfree)
329 1.93 ad continue;
330 1.93 ad
331 1.93 ad /*
332 1.88 ad * start draining pool resources now that we're not
333 1.88 ad * holding any locks.
334 1.88 ad */
335 1.88 ad pool_drain_start(&pp, &where);
336 1.60 enami
337 1.38 chs /*
338 1.88 ad * kill unused metadata buffers.
339 1.38 chs */
340 1.89 ad mutex_enter(&bufcache_lock);
341 1.88 ad buf_drain(bufcnt << PAGE_SHIFT);
342 1.89 ad mutex_exit(&bufcache_lock);
343 1.57 jdolecek
344 1.98 haad mutex_enter(&uvm_reclaim_lock);
345 1.98 haad SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
346 1.98 haad (*hook->uvm_reclaim_hook)();
347 1.98 haad }
348 1.98 haad mutex_exit(&uvm_reclaim_lock);
349 1.98 haad
350 1.57 jdolecek /*
351 1.88 ad * complete draining the pools.
352 1.88 ad */
353 1.88 ad pool_drain_end(pp, where);
354 1.24 chs }
355 1.24 chs /*NOTREACHED*/
356 1.24 chs }
357 1.24 chs
358 1.8 mrg
359 1.24 chs /*
360 1.81 yamt * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
361 1.24 chs */
362 1.8 mrg
363 1.24 chs void
364 1.81 yamt uvm_aiodone_worker(struct work *wk, void *dummy)
365 1.24 chs {
366 1.81 yamt struct buf *bp = (void *)wk;
367 1.9 pk
368 1.81 yamt KASSERT(&bp->b_work == wk);
369 1.8 mrg
370 1.81 yamt /*
371 1.81 yamt * process an i/o that's done.
372 1.81 yamt */
373 1.8 mrg
374 1.81 yamt (*bp->b_iodone)(bp);
375 1.89 ad }
376 1.89 ad
377 1.89 ad void
378 1.89 ad uvm_pageout_start(int npages)
379 1.89 ad {
380 1.89 ad
381 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
382 1.89 ad uvmexp.paging += npages;
383 1.89 ad mutex_spin_exit(&uvm_fpageqlock);
384 1.89 ad }
385 1.89 ad
386 1.89 ad void
387 1.89 ad uvm_pageout_done(int npages)
388 1.89 ad {
389 1.89 ad
390 1.89 ad mutex_spin_enter(&uvm_fpageqlock);
391 1.89 ad KASSERT(uvmexp.paging >= npages);
392 1.89 ad uvmexp.paging -= npages;
393 1.89 ad
394 1.89 ad /*
395 1.89 ad * wake up either of pagedaemon or LWPs waiting for it.
396 1.89 ad */
397 1.89 ad
398 1.89 ad if (uvmexp.free <= uvmexp.reserve_kernel) {
399 1.81 yamt wakeup(&uvm.pagedaemon);
400 1.81 yamt } else {
401 1.81 yamt wakeup(&uvmexp.free);
402 1.89 ad uvm_pagedaemon_waiters = 0;
403 1.8 mrg }
404 1.89 ad mutex_spin_exit(&uvm_fpageqlock);
405 1.1 mrg }
406 1.1 mrg
407 1.76 yamt /*
408 1.76 yamt * uvmpd_trylockowner: trylock the page's owner.
409 1.76 yamt *
410 1.76 yamt * => called with pageq locked.
411 1.76 yamt * => resolve orphaned O->A loaned page.
412 1.89 ad * => return the locked mutex on success. otherwise, return NULL.
413 1.76 yamt */
414 1.76 yamt
415 1.89 ad kmutex_t *
416 1.76 yamt uvmpd_trylockowner(struct vm_page *pg)
417 1.76 yamt {
418 1.103.2.2 yamt kmutex_t *lock;
419 1.89 ad
420 1.89 ad KASSERT(mutex_owned(&uvm_pageqlock));
421 1.103.2.2 yamt lock = uvm_page_getlock(pg);
422 1.103.2.2 yamt KASSERT(lock != NULL);
423 1.103.2.2 yamt if (!mutex_tryenter(lock)) {
424 1.76 yamt return NULL;
425 1.76 yamt }
426 1.103.2.3 yamt uvm_loan_resolve_orphan(pg, true);
427 1.103.2.2 yamt return lock;
428 1.76 yamt }
429 1.76 yamt
430 1.73 yamt #if defined(VMSWAP)
431 1.73 yamt struct swapcluster {
432 1.73 yamt int swc_slot;
433 1.73 yamt int swc_nallocated;
434 1.73 yamt int swc_nused;
435 1.75 yamt struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
436 1.73 yamt };
437 1.73 yamt
438 1.73 yamt static void
439 1.73 yamt swapcluster_init(struct swapcluster *swc)
440 1.73 yamt {
441 1.73 yamt
442 1.73 yamt swc->swc_slot = 0;
443 1.89 ad swc->swc_nused = 0;
444 1.73 yamt }
445 1.73 yamt
446 1.73 yamt static int
447 1.73 yamt swapcluster_allocslots(struct swapcluster *swc)
448 1.73 yamt {
449 1.73 yamt int slot;
450 1.73 yamt int npages;
451 1.73 yamt
452 1.73 yamt if (swc->swc_slot != 0) {
453 1.73 yamt return 0;
454 1.73 yamt }
455 1.73 yamt
456 1.73 yamt /* Even with strange MAXPHYS, the shift
457 1.73 yamt implicitly rounds down to a page. */
458 1.73 yamt npages = MAXPHYS >> PAGE_SHIFT;
459 1.84 thorpej slot = uvm_swap_alloc(&npages, true);
460 1.73 yamt if (slot == 0) {
461 1.73 yamt return ENOMEM;
462 1.73 yamt }
463 1.73 yamt swc->swc_slot = slot;
464 1.73 yamt swc->swc_nallocated = npages;
465 1.73 yamt swc->swc_nused = 0;
466 1.73 yamt
467 1.73 yamt return 0;
468 1.73 yamt }
469 1.73 yamt
470 1.73 yamt static int
471 1.73 yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
472 1.73 yamt {
473 1.73 yamt int slot;
474 1.73 yamt struct uvm_object *uobj;
475 1.73 yamt
476 1.73 yamt KASSERT(swc->swc_slot != 0);
477 1.73 yamt KASSERT(swc->swc_nused < swc->swc_nallocated);
478 1.73 yamt KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
479 1.73 yamt
480 1.73 yamt slot = swc->swc_slot + swc->swc_nused;
481 1.73 yamt uobj = pg->uobject;
482 1.73 yamt if (uobj == NULL) {
483 1.103 rmind KASSERT(mutex_owned(pg->uanon->an_lock));
484 1.73 yamt pg->uanon->an_swslot = slot;
485 1.73 yamt } else {
486 1.73 yamt int result;
487 1.73 yamt
488 1.103 rmind KASSERT(mutex_owned(uobj->vmobjlock));
489 1.73 yamt result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
490 1.73 yamt if (result == -1) {
491 1.73 yamt return ENOMEM;
492 1.73 yamt }
493 1.73 yamt }
494 1.73 yamt swc->swc_pages[swc->swc_nused] = pg;
495 1.73 yamt swc->swc_nused++;
496 1.73 yamt
497 1.73 yamt return 0;
498 1.73 yamt }
499 1.73 yamt
500 1.73 yamt static void
501 1.83 thorpej swapcluster_flush(struct swapcluster *swc, bool now)
502 1.73 yamt {
503 1.73 yamt int slot;
504 1.73 yamt int nused;
505 1.73 yamt int nallocated;
506 1.73 yamt int error;
507 1.73 yamt
508 1.73 yamt if (swc->swc_slot == 0) {
509 1.73 yamt return;
510 1.73 yamt }
511 1.73 yamt KASSERT(swc->swc_nused <= swc->swc_nallocated);
512 1.73 yamt
513 1.73 yamt slot = swc->swc_slot;
514 1.73 yamt nused = swc->swc_nused;
515 1.73 yamt nallocated = swc->swc_nallocated;
516 1.73 yamt
517 1.73 yamt /*
518 1.73 yamt * if this is the final pageout we could have a few
519 1.73 yamt * unused swap blocks. if so, free them now.
520 1.73 yamt */
521 1.73 yamt
522 1.73 yamt if (nused < nallocated) {
523 1.73 yamt if (!now) {
524 1.73 yamt return;
525 1.73 yamt }
526 1.73 yamt uvm_swap_free(slot + nused, nallocated - nused);
527 1.73 yamt }
528 1.73 yamt
529 1.73 yamt /*
530 1.73 yamt * now start the pageout.
531 1.73 yamt */
532 1.73 yamt
533 1.91 yamt if (nused > 0) {
534 1.91 yamt uvmexp.pdpageouts++;
535 1.91 yamt uvm_pageout_start(nused);
536 1.91 yamt error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
537 1.92 yamt KASSERT(error == 0 || error == ENOMEM);
538 1.91 yamt }
539 1.73 yamt
540 1.73 yamt /*
541 1.73 yamt * zero swslot to indicate that we are
542 1.73 yamt * no longer building a swap-backed cluster.
543 1.73 yamt */
544 1.73 yamt
545 1.73 yamt swc->swc_slot = 0;
546 1.89 ad swc->swc_nused = 0;
547 1.89 ad }
548 1.89 ad
549 1.89 ad static int
550 1.89 ad swapcluster_nused(struct swapcluster *swc)
551 1.89 ad {
552 1.89 ad
553 1.89 ad return swc->swc_nused;
554 1.73 yamt }
555 1.77 yamt
556 1.77 yamt /*
557 1.77 yamt * uvmpd_dropswap: free any swap allocated to this page.
558 1.77 yamt *
559 1.77 yamt * => called with owner locked.
560 1.84 thorpej * => return true if a page had an associated slot.
561 1.77 yamt */
562 1.77 yamt
563 1.83 thorpej static bool
564 1.77 yamt uvmpd_dropswap(struct vm_page *pg)
565 1.77 yamt {
566 1.84 thorpej bool result = false;
567 1.77 yamt struct vm_anon *anon = pg->uanon;
568 1.77 yamt
569 1.77 yamt if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
570 1.77 yamt uvm_swap_free(anon->an_swslot, 1);
571 1.77 yamt anon->an_swslot = 0;
572 1.103.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
573 1.84 thorpej result = true;
574 1.77 yamt } else if (pg->pqflags & PQ_AOBJ) {
575 1.77 yamt int slot = uao_set_swslot(pg->uobject,
576 1.77 yamt pg->offset >> PAGE_SHIFT, 0);
577 1.77 yamt if (slot) {
578 1.77 yamt uvm_swap_free(slot, 1);
579 1.103.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
580 1.84 thorpej result = true;
581 1.77 yamt }
582 1.77 yamt }
583 1.77 yamt
584 1.77 yamt return result;
585 1.77 yamt }
586 1.77 yamt
587 1.77 yamt /*
588 1.77 yamt * uvmpd_trydropswap: try to free any swap allocated to this page.
589 1.77 yamt *
590 1.84 thorpej * => return true if a slot is successfully freed.
591 1.77 yamt */
592 1.77 yamt
593 1.83 thorpej bool
594 1.77 yamt uvmpd_trydropswap(struct vm_page *pg)
595 1.77 yamt {
596 1.89 ad kmutex_t *slock;
597 1.83 thorpej bool result;
598 1.77 yamt
599 1.77 yamt if ((pg->flags & PG_BUSY) != 0) {
600 1.84 thorpej return false;
601 1.77 yamt }
602 1.77 yamt
603 1.77 yamt /*
604 1.77 yamt * lock the page's owner.
605 1.77 yamt */
606 1.77 yamt
607 1.77 yamt slock = uvmpd_trylockowner(pg);
608 1.77 yamt if (slock == NULL) {
609 1.84 thorpej return false;
610 1.77 yamt }
611 1.77 yamt
612 1.77 yamt /*
613 1.77 yamt * skip this page if it's busy.
614 1.77 yamt */
615 1.77 yamt
616 1.77 yamt if ((pg->flags & PG_BUSY) != 0) {
617 1.89 ad mutex_exit(slock);
618 1.84 thorpej return false;
619 1.77 yamt }
620 1.77 yamt
621 1.77 yamt result = uvmpd_dropswap(pg);
622 1.77 yamt
623 1.89 ad mutex_exit(slock);
624 1.77 yamt
625 1.77 yamt return result;
626 1.77 yamt }
627 1.77 yamt
628 1.73 yamt #endif /* defined(VMSWAP) */
629 1.73 yamt
630 1.1 mrg /*
631 1.77 yamt * uvmpd_scan_queue: scan an replace candidate list for pages
632 1.77 yamt * to clean or free.
633 1.1 mrg *
634 1.1 mrg * => called with page queues locked
635 1.1 mrg * => we work on meeting our free target by converting inactive pages
636 1.1 mrg * into free pages.
637 1.1 mrg * => we handle the building of swap-backed clusters
638 1.1 mrg */
639 1.1 mrg
640 1.65 thorpej static void
641 1.77 yamt uvmpd_scan_queue(void)
642 1.8 mrg {
643 1.77 yamt struct vm_page *p;
644 1.8 mrg struct uvm_object *uobj;
645 1.37 chs struct vm_anon *anon;
646 1.68 yamt #if defined(VMSWAP)
647 1.73 yamt struct swapcluster swc;
648 1.68 yamt #endif /* defined(VMSWAP) */
649 1.77 yamt int dirtyreacts;
650 1.89 ad int lockownerfail;
651 1.89 ad kmutex_t *slock;
652 1.77 yamt UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
653 1.1 mrg
654 1.73 yamt #if defined(VMSWAP)
655 1.73 yamt swapcluster_init(&swc);
656 1.73 yamt #endif /* defined(VMSWAP) */
657 1.77 yamt
658 1.14 chs dirtyreacts = 0;
659 1.89 ad lockownerfail = 0;
660 1.77 yamt uvmpdpol_scaninit();
661 1.43 chs
662 1.77 yamt while (/* CONSTCOND */ 1) {
663 1.24 chs
664 1.73 yamt /*
665 1.73 yamt * see if we've met the free target.
666 1.73 yamt */
667 1.73 yamt
668 1.89 ad if (uvmexp.free + uvmexp.paging
669 1.89 ad #if defined(VMSWAP)
670 1.89 ad + swapcluster_nused(&swc)
671 1.89 ad #endif /* defined(VMSWAP) */
672 1.89 ad >= uvmexp.freetarg << 2 ||
673 1.73 yamt dirtyreacts == UVMPD_NUMDIRTYREACTS) {
674 1.73 yamt UVMHIST_LOG(pdhist," met free target: "
675 1.73 yamt "exit loop", 0, 0, 0, 0);
676 1.73 yamt break;
677 1.73 yamt }
678 1.24 chs
679 1.77 yamt p = uvmpdpol_selectvictim();
680 1.77 yamt if (p == NULL) {
681 1.77 yamt break;
682 1.77 yamt }
683 1.77 yamt KASSERT(uvmpdpol_pageisqueued_p(p));
684 1.77 yamt KASSERT(p->wire_count == 0);
685 1.77 yamt
686 1.73 yamt /*
687 1.73 yamt * we are below target and have a new page to consider.
688 1.73 yamt */
689 1.30 chs
690 1.73 yamt anon = p->uanon;
691 1.73 yamt uobj = p->uobject;
692 1.8 mrg
693 1.73 yamt /*
694 1.73 yamt * first we attempt to lock the object that this page
695 1.73 yamt * belongs to. if our attempt fails we skip on to
696 1.73 yamt * the next page (no harm done). it is important to
697 1.73 yamt * "try" locking the object as we are locking in the
698 1.73 yamt * wrong order (pageq -> object) and we don't want to
699 1.73 yamt * deadlock.
700 1.73 yamt *
701 1.73 yamt * the only time we expect to see an ownerless page
702 1.73 yamt * (i.e. a page with no uobject and !PQ_ANON) is if an
703 1.73 yamt * anon has loaned a page from a uvm_object and the
704 1.73 yamt * uvm_object has dropped the ownership. in that
705 1.73 yamt * case, the anon can "take over" the loaned page
706 1.73 yamt * and make it its own.
707 1.73 yamt */
708 1.30 chs
709 1.76 yamt slock = uvmpd_trylockowner(p);
710 1.76 yamt if (slock == NULL) {
711 1.89 ad /*
712 1.89 ad * yield cpu to make a chance for an LWP holding
713 1.89 ad * the lock run. otherwise we can busy-loop too long
714 1.89 ad * if the page queue is filled with a lot of pages
715 1.89 ad * from few objects.
716 1.89 ad */
717 1.89 ad lockownerfail++;
718 1.89 ad if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
719 1.103.2.2 yamt mutex_obj_pause(uvm_page_getlock(p),
720 1.103.2.2 yamt &uvm_pageqlock);
721 1.89 ad lockownerfail = 0;
722 1.89 ad }
723 1.76 yamt continue;
724 1.76 yamt }
725 1.76 yamt if (p->flags & PG_BUSY) {
726 1.89 ad mutex_exit(slock);
727 1.76 yamt uvmexp.pdbusy++;
728 1.76 yamt continue;
729 1.76 yamt }
730 1.76 yamt
731 1.73 yamt /* does the page belong to an object? */
732 1.73 yamt if (uobj != NULL) {
733 1.73 yamt uvmexp.pdobscan++;
734 1.73 yamt } else {
735 1.73 yamt #if defined(VMSWAP)
736 1.73 yamt KASSERT(anon != NULL);
737 1.73 yamt uvmexp.pdanscan++;
738 1.68 yamt #else /* defined(VMSWAP) */
739 1.73 yamt panic("%s: anon", __func__);
740 1.68 yamt #endif /* defined(VMSWAP) */
741 1.73 yamt }
742 1.8 mrg
743 1.37 chs
744 1.73 yamt /*
745 1.73 yamt * we now have the object and the page queues locked.
746 1.73 yamt * if the page is not swap-backed, call the object's
747 1.73 yamt * pager to flush and free the page.
748 1.73 yamt */
749 1.37 chs
750 1.69 yamt #if defined(READAHEAD_STATS)
751 1.77 yamt if ((p->pqflags & PQ_READAHEAD) != 0) {
752 1.77 yamt p->pqflags &= ~PQ_READAHEAD;
753 1.73 yamt uvm_ra_miss.ev_count++;
754 1.73 yamt }
755 1.69 yamt #endif /* defined(READAHEAD_STATS) */
756 1.69 yamt
757 1.73 yamt if ((p->pqflags & PQ_SWAPBACKED) == 0) {
758 1.82 alc KASSERT(uobj != NULL);
759 1.89 ad mutex_exit(&uvm_pageqlock);
760 1.73 yamt (void) (uobj->pgops->pgo_put)(uobj, p->offset,
761 1.73 yamt p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
762 1.89 ad mutex_enter(&uvm_pageqlock);
763 1.73 yamt continue;
764 1.73 yamt }
765 1.37 chs
766 1.73 yamt /*
767 1.73 yamt * the page is swap-backed. remove all the permissions
768 1.73 yamt * from the page so we can sync the modified info
769 1.73 yamt * without any race conditions. if the page is clean
770 1.73 yamt * we can free it now and continue.
771 1.73 yamt */
772 1.8 mrg
773 1.73 yamt pmap_page_protect(p, VM_PROT_NONE);
774 1.103.2.1 yamt if (uvm_pagegetdirty(p) == UVM_PAGE_STATUS_UNKNOWN) {
775 1.103.2.1 yamt if (pmap_clear_modify(p)) {
776 1.103.2.1 yamt uvm_pagemarkdirty(p, UVM_PAGE_STATUS_DIRTY);
777 1.103.2.1 yamt } else {
778 1.103.2.1 yamt uvm_pagemarkdirty(p, UVM_PAGE_STATUS_CLEAN);
779 1.103.2.1 yamt }
780 1.73 yamt }
781 1.103.2.1 yamt if (uvm_pagegetdirty(p) != UVM_PAGE_STATUS_DIRTY) {
782 1.73 yamt int slot;
783 1.73 yamt int pageidx;
784 1.73 yamt
785 1.73 yamt pageidx = p->offset >> PAGE_SHIFT;
786 1.73 yamt uvm_pagefree(p);
787 1.73 yamt uvmexp.pdfreed++;
788 1.8 mrg
789 1.8 mrg /*
790 1.73 yamt * for anons, we need to remove the page
791 1.73 yamt * from the anon ourselves. for aobjs,
792 1.73 yamt * pagefree did that for us.
793 1.8 mrg */
794 1.24 chs
795 1.73 yamt if (anon) {
796 1.73 yamt KASSERT(anon->an_swslot != 0);
797 1.73 yamt anon->an_page = NULL;
798 1.73 yamt slot = anon->an_swslot;
799 1.73 yamt } else {
800 1.73 yamt slot = uao_find_swslot(uobj, pageidx);
801 1.8 mrg }
802 1.89 ad mutex_exit(slock);
803 1.8 mrg
804 1.73 yamt if (slot > 0) {
805 1.73 yamt /* this page is now only in swap. */
806 1.87 ad mutex_enter(&uvm_swap_data_lock);
807 1.73 yamt KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
808 1.73 yamt uvmexp.swpgonly++;
809 1.87 ad mutex_exit(&uvm_swap_data_lock);
810 1.37 chs }
811 1.73 yamt continue;
812 1.73 yamt }
813 1.37 chs
814 1.77 yamt #if defined(VMSWAP)
815 1.73 yamt /*
816 1.73 yamt * this page is dirty, skip it if we'll have met our
817 1.73 yamt * free target when all the current pageouts complete.
818 1.73 yamt */
819 1.24 chs
820 1.73 yamt if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
821 1.89 ad mutex_exit(slock);
822 1.73 yamt continue;
823 1.73 yamt }
824 1.14 chs
825 1.73 yamt /*
826 1.73 yamt * free any swap space allocated to the page since
827 1.73 yamt * we'll have to write it again with its new data.
828 1.73 yamt */
829 1.24 chs
830 1.77 yamt uvmpd_dropswap(p);
831 1.14 chs
832 1.73 yamt /*
833 1.97 ad * start new swap pageout cluster (if necessary).
834 1.97 ad *
835 1.97 ad * if swap is full reactivate this page so that
836 1.97 ad * we eventually cycle all pages through the
837 1.97 ad * inactive queue.
838 1.73 yamt */
839 1.68 yamt
840 1.97 ad if (swapcluster_allocslots(&swc)) {
841 1.73 yamt dirtyreacts++;
842 1.73 yamt uvm_pageactivate(p);
843 1.89 ad mutex_exit(slock);
844 1.73 yamt continue;
845 1.8 mrg }
846 1.8 mrg
847 1.8 mrg /*
848 1.73 yamt * at this point, we're definitely going reuse this
849 1.73 yamt * page. mark the page busy and delayed-free.
850 1.73 yamt * we should remove the page from the page queues
851 1.73 yamt * so we don't ever look at it again.
852 1.73 yamt * adjust counters and such.
853 1.8 mrg */
854 1.8 mrg
855 1.73 yamt p->flags |= PG_BUSY;
856 1.77 yamt UVM_PAGE_OWN(p, "scan_queue");
857 1.73 yamt
858 1.73 yamt p->flags |= PG_PAGEOUT;
859 1.73 yamt uvm_pagedequeue(p);
860 1.73 yamt
861 1.73 yamt uvmexp.pgswapout++;
862 1.89 ad mutex_exit(&uvm_pageqlock);
863 1.8 mrg
864 1.8 mrg /*
865 1.73 yamt * add the new page to the cluster.
866 1.8 mrg */
867 1.8 mrg
868 1.73 yamt if (swapcluster_add(&swc, p)) {
869 1.73 yamt p->flags &= ~(PG_BUSY|PG_PAGEOUT);
870 1.73 yamt UVM_PAGE_OWN(p, NULL);
871 1.89 ad mutex_enter(&uvm_pageqlock);
872 1.77 yamt dirtyreacts++;
873 1.73 yamt uvm_pageactivate(p);
874 1.89 ad mutex_exit(slock);
875 1.73 yamt continue;
876 1.73 yamt }
877 1.89 ad mutex_exit(slock);
878 1.73 yamt
879 1.84 thorpej swapcluster_flush(&swc, false);
880 1.89 ad mutex_enter(&uvm_pageqlock);
881 1.73 yamt
882 1.8 mrg /*
883 1.31 chs * the pageout is in progress. bump counters and set up
884 1.31 chs * for the next loop.
885 1.8 mrg */
886 1.8 mrg
887 1.31 chs uvmexp.pdpending++;
888 1.77 yamt
889 1.77 yamt #else /* defined(VMSWAP) */
890 1.77 yamt uvm_pageactivate(p);
891 1.89 ad mutex_exit(slock);
892 1.77 yamt #endif /* defined(VMSWAP) */
893 1.73 yamt }
894 1.73 yamt
895 1.73 yamt #if defined(VMSWAP)
896 1.89 ad mutex_exit(&uvm_pageqlock);
897 1.84 thorpej swapcluster_flush(&swc, true);
898 1.89 ad mutex_enter(&uvm_pageqlock);
899 1.68 yamt #endif /* defined(VMSWAP) */
900 1.1 mrg }
901 1.1 mrg
902 1.1 mrg /*
903 1.1 mrg * uvmpd_scan: scan the page queues and attempt to meet our targets.
904 1.1 mrg *
905 1.1 mrg * => called with pageq's locked
906 1.1 mrg */
907 1.1 mrg
908 1.65 thorpej static void
909 1.37 chs uvmpd_scan(void)
910 1.1 mrg {
911 1.77 yamt int swap_shortage, pages_freed;
912 1.8 mrg UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
913 1.1 mrg
914 1.37 chs uvmexp.pdrevs++;
915 1.1 mrg
916 1.8 mrg /*
917 1.93 ad * work on meeting our targets. first we work on our free target
918 1.93 ad * by converting inactive pages into free pages. then we work on
919 1.93 ad * meeting our inactive target by converting active pages to
920 1.93 ad * inactive ones.
921 1.8 mrg */
922 1.8 mrg
923 1.8 mrg UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
924 1.8 mrg
925 1.14 chs pages_freed = uvmexp.pdfreed;
926 1.77 yamt uvmpd_scan_queue();
927 1.14 chs pages_freed = uvmexp.pdfreed - pages_freed;
928 1.8 mrg
929 1.8 mrg /*
930 1.14 chs * detect if we're not going to be able to page anything out
931 1.14 chs * until we free some swap resources from active pages.
932 1.14 chs */
933 1.24 chs
934 1.14 chs swap_shortage = 0;
935 1.14 chs if (uvmexp.free < uvmexp.freetarg &&
936 1.52 pk uvmexp.swpginuse >= uvmexp.swpgavail &&
937 1.52 pk !uvm_swapisfull() &&
938 1.14 chs pages_freed == 0) {
939 1.14 chs swap_shortage = uvmexp.freetarg - uvmexp.free;
940 1.14 chs }
941 1.24 chs
942 1.77 yamt uvmpdpol_balancequeue(swap_shortage);
943 1.93 ad
944 1.93 ad /*
945 1.94 ad * if still below the minimum target, try unloading kernel
946 1.94 ad * modules.
947 1.94 ad */
948 1.93 ad
949 1.94 ad if (uvmexp.free < uvmexp.freemin) {
950 1.94 ad module_thread_kick();
951 1.93 ad }
952 1.1 mrg }
953 1.62 yamt
954 1.62 yamt /*
955 1.62 yamt * uvm_reclaimable: decide whether to wait for pagedaemon.
956 1.62 yamt *
957 1.84 thorpej * => return true if it seems to be worth to do uvm_wait.
958 1.62 yamt *
959 1.62 yamt * XXX should be tunable.
960 1.62 yamt * XXX should consider pools, etc?
961 1.62 yamt */
962 1.62 yamt
963 1.83 thorpej bool
964 1.62 yamt uvm_reclaimable(void)
965 1.62 yamt {
966 1.62 yamt int filepages;
967 1.77 yamt int active, inactive;
968 1.62 yamt
969 1.62 yamt /*
970 1.62 yamt * if swap is not full, no problem.
971 1.62 yamt */
972 1.62 yamt
973 1.62 yamt if (!uvm_swapisfull()) {
974 1.84 thorpej return true;
975 1.62 yamt }
976 1.62 yamt
977 1.62 yamt /*
978 1.62 yamt * file-backed pages can be reclaimed even when swap is full.
979 1.62 yamt * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
980 1.62 yamt *
981 1.62 yamt * XXX assume the worst case, ie. all wired pages are file-backed.
982 1.63 yamt *
983 1.63 yamt * XXX should consider about other reclaimable memory.
984 1.63 yamt * XXX ie. pools, traditional buffer cache.
985 1.62 yamt */
986 1.62 yamt
987 1.62 yamt filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
988 1.77 yamt uvm_estimatepageable(&active, &inactive);
989 1.77 yamt if (filepages >= MIN((active + inactive) >> 4,
990 1.62 yamt 5 * 1024 * 1024 >> PAGE_SHIFT)) {
991 1.84 thorpej return true;
992 1.62 yamt }
993 1.62 yamt
994 1.62 yamt /*
995 1.62 yamt * kill the process, fail allocation, etc..
996 1.62 yamt */
997 1.62 yamt
998 1.84 thorpej return false;
999 1.62 yamt }
1000 1.77 yamt
1001 1.77 yamt void
1002 1.77 yamt uvm_estimatepageable(int *active, int *inactive)
1003 1.77 yamt {
1004 1.77 yamt
1005 1.77 yamt uvmpdpol_estimatepageable(active, inactive);
1006 1.77 yamt }
1007 1.98 haad
1008 1.98 haad void
1009 1.98 haad uvm_reclaim_init(void)
1010 1.98 haad {
1011 1.98 haad
1012 1.98 haad /* Initialize UVM reclaim hooks. */
1013 1.98 haad mutex_init(&uvm_reclaim_lock, MUTEX_DEFAULT, IPL_NONE);
1014 1.98 haad SLIST_INIT(&uvm_reclaim_list);
1015 1.98 haad }
1016 1.98 haad
1017 1.98 haad void
1018 1.98 haad uvm_reclaim_hook_add(struct uvm_reclaim_hook *hook)
1019 1.98 haad {
1020 1.98 haad
1021 1.98 haad KASSERT(hook != NULL);
1022 1.98 haad
1023 1.98 haad mutex_enter(&uvm_reclaim_lock);
1024 1.98 haad SLIST_INSERT_HEAD(&uvm_reclaim_list, hook, uvm_reclaim_next);
1025 1.98 haad mutex_exit(&uvm_reclaim_lock);
1026 1.98 haad }
1027 1.98 haad
1028 1.98 haad void
1029 1.98 haad uvm_reclaim_hook_del(struct uvm_reclaim_hook *hook_entry)
1030 1.98 haad {
1031 1.98 haad struct uvm_reclaim_hook *hook;
1032 1.98 haad
1033 1.98 haad KASSERT(hook_entry != NULL);
1034 1.98 haad
1035 1.98 haad mutex_enter(&uvm_reclaim_lock);
1036 1.98 haad SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
1037 1.98 haad if (hook != hook_entry) {
1038 1.98 haad continue;
1039 1.98 haad }
1040 1.98 haad
1041 1.98 haad SLIST_REMOVE(&uvm_reclaim_list, hook, uvm_reclaim_hook,
1042 1.98 haad uvm_reclaim_next);
1043 1.98 haad break;
1044 1.98 haad }
1045 1.98 haad
1046 1.98 haad mutex_exit(&uvm_reclaim_lock);
1047 1.98 haad }
1048