Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.24
      1  1.24       ad /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.24 2019/12/30 18:08:38 ad Exp $	*/
      2   1.2     yamt /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3   1.2     yamt 
      4   1.2     yamt /*
      5   1.2     yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6   1.2     yamt  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7   1.2     yamt  *
      8   1.2     yamt  * All rights reserved.
      9   1.2     yamt  *
     10   1.2     yamt  * This code is derived from software contributed to Berkeley by
     11   1.2     yamt  * The Mach Operating System project at Carnegie-Mellon University.
     12   1.2     yamt  *
     13   1.2     yamt  * Redistribution and use in source and binary forms, with or without
     14   1.2     yamt  * modification, are permitted provided that the following conditions
     15   1.2     yamt  * are met:
     16   1.2     yamt  * 1. Redistributions of source code must retain the above copyright
     17   1.2     yamt  *    notice, this list of conditions and the following disclaimer.
     18   1.2     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     19   1.2     yamt  *    notice, this list of conditions and the following disclaimer in the
     20   1.2     yamt  *    documentation and/or other materials provided with the distribution.
     21  1.13    chuck  * 3. Neither the name of the University nor the names of its contributors
     22   1.2     yamt  *    may be used to endorse or promote products derived from this software
     23   1.2     yamt  *    without specific prior written permission.
     24   1.2     yamt  *
     25   1.2     yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26   1.2     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27   1.2     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28   1.2     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29   1.2     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30   1.2     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31   1.2     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.2     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.2     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.2     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.2     yamt  * SUCH DAMAGE.
     36   1.2     yamt  *
     37   1.2     yamt  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     38   1.2     yamt  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     39   1.2     yamt  *
     40   1.2     yamt  *
     41   1.2     yamt  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     42   1.2     yamt  * All rights reserved.
     43   1.2     yamt  *
     44   1.2     yamt  * Permission to use, copy, modify and distribute this software and
     45   1.2     yamt  * its documentation is hereby granted, provided that both the copyright
     46   1.2     yamt  * notice and this permission notice appear in all copies of the
     47   1.2     yamt  * software, derivative works or modified versions, and any portions
     48   1.2     yamt  * thereof, and that both notices appear in supporting documentation.
     49   1.2     yamt  *
     50   1.2     yamt  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     51   1.2     yamt  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     52   1.2     yamt  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     53   1.2     yamt  *
     54   1.2     yamt  * Carnegie Mellon requests users of this software to return to
     55   1.2     yamt  *
     56   1.2     yamt  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     57   1.2     yamt  *  School of Computer Science
     58   1.2     yamt  *  Carnegie Mellon University
     59   1.2     yamt  *  Pittsburgh PA 15213-3890
     60   1.2     yamt  *
     61   1.2     yamt  * any improvements or extensions that they make and grant Carnegie the
     62   1.2     yamt  * rights to redistribute these changes.
     63   1.2     yamt  */
     64   1.2     yamt 
     65   1.2     yamt #if defined(PDSIM)
     66   1.2     yamt 
     67   1.2     yamt #include "pdsim.h"
     68   1.2     yamt 
     69   1.2     yamt #else /* defined(PDSIM) */
     70   1.2     yamt 
     71   1.2     yamt #include <sys/cdefs.h>
     72  1.24       ad __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.24 2019/12/30 18:08:38 ad Exp $");
     73   1.2     yamt 
     74   1.2     yamt #include <sys/param.h>
     75   1.2     yamt #include <sys/proc.h>
     76   1.2     yamt #include <sys/systm.h>
     77   1.2     yamt #include <sys/kernel.h>
     78   1.2     yamt 
     79   1.2     yamt #include <uvm/uvm.h>
     80   1.2     yamt #include <uvm/uvm_pdpolicy.h>
     81   1.2     yamt #include <uvm/uvm_pdpolicy_impl.h>
     82  1.18       ad #include <uvm/uvm_stat.h>
     83   1.2     yamt 
     84   1.2     yamt #endif /* defined(PDSIM) */
     85   1.2     yamt 
     86  1.19       ad #define	PQ_TIME		0xfffffffc	/* time of last activation */
     87  1.19       ad #define PQ_INACTIVE	0x00000001	/* page is in inactive list */
     88  1.19       ad #define PQ_ACTIVE	0x00000002	/* page is in active list */
     89   1.2     yamt 
     90   1.2     yamt #if !defined(CLOCK_INACTIVEPCT)
     91   1.2     yamt #define	CLOCK_INACTIVEPCT	33
     92   1.2     yamt #endif /* !defined(CLOCK_INACTIVEPCT) */
     93   1.2     yamt 
     94   1.2     yamt struct uvmpdpol_globalstate {
     95  1.18       ad 	kmutex_t lock;			/* lock on state */
     96  1.18       ad 					/* <= compiler pads here */
     97  1.18       ad 	struct pglist s_activeq		/* allocated pages, in use */
     98  1.18       ad 	    __aligned(COHERENCY_UNIT);
     99   1.2     yamt 	struct pglist s_inactiveq;	/* pages between the clock hands */
    100   1.2     yamt 	int s_active;
    101   1.2     yamt 	int s_inactive;
    102   1.2     yamt 	int s_inactarg;
    103   1.2     yamt 	struct uvm_pctparam s_anonmin;
    104   1.2     yamt 	struct uvm_pctparam s_filemin;
    105   1.2     yamt 	struct uvm_pctparam s_execmin;
    106   1.2     yamt 	struct uvm_pctparam s_anonmax;
    107   1.2     yamt 	struct uvm_pctparam s_filemax;
    108   1.2     yamt 	struct uvm_pctparam s_execmax;
    109   1.2     yamt 	struct uvm_pctparam s_inactivepct;
    110   1.2     yamt };
    111   1.2     yamt 
    112   1.2     yamt struct uvmpdpol_scanstate {
    113   1.7  thorpej 	bool ss_anonreact, ss_filereact, ss_execreact;
    114  1.24       ad 	struct vm_page ss_marker;
    115   1.2     yamt };
    116   1.2     yamt 
    117  1.18       ad static void	uvmpdpol_pageactivate_locked(struct vm_page *);
    118  1.18       ad static void	uvmpdpol_pagedeactivate_locked(struct vm_page *);
    119  1.18       ad static void	uvmpdpol_pagedequeue_locked(struct vm_page *);
    120  1.18       ad 
    121  1.18       ad static struct uvmpdpol_globalstate pdpol_state __cacheline_aligned;
    122   1.2     yamt static struct uvmpdpol_scanstate pdpol_scanstate;
    123   1.2     yamt 
    124   1.2     yamt PDPOL_EVCNT_DEFINE(reactexec)
    125   1.2     yamt PDPOL_EVCNT_DEFINE(reactfile)
    126   1.2     yamt PDPOL_EVCNT_DEFINE(reactanon)
    127   1.2     yamt 
    128   1.2     yamt static void
    129   1.2     yamt clock_tune(void)
    130   1.2     yamt {
    131   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    132   1.2     yamt 
    133   1.2     yamt 	s->s_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    134   1.2     yamt 	    s->s_active + s->s_inactive);
    135   1.2     yamt 	if (s->s_inactarg <= uvmexp.freetarg) {
    136   1.2     yamt 		s->s_inactarg = uvmexp.freetarg + 1;
    137   1.2     yamt 	}
    138   1.2     yamt }
    139   1.2     yamt 
    140   1.2     yamt void
    141   1.2     yamt uvmpdpol_scaninit(void)
    142   1.2     yamt {
    143   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    144   1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    145   1.2     yamt 	int t;
    146   1.7  thorpej 	bool anonunder, fileunder, execunder;
    147   1.7  thorpej 	bool anonover, fileover, execover;
    148   1.7  thorpej 	bool anonreact, filereact, execreact;
    149  1.20       ad 	int64_t freepg, anonpg, filepg, execpg;
    150   1.2     yamt 
    151   1.2     yamt 	/*
    152   1.2     yamt 	 * decide which types of pages we want to reactivate instead of freeing
    153   1.2     yamt 	 * to keep usage within the minimum and maximum usage limits.
    154   1.2     yamt 	 */
    155   1.2     yamt 
    156  1.20       ad 	cpu_count_sync_all();
    157  1.21       ad 	freepg = uvm_free();
    158  1.20       ad 	anonpg = cpu_count_get(CPU_COUNT_ANONPAGES);
    159  1.20       ad 	filepg = cpu_count_get(CPU_COUNT_FILEPAGES);
    160  1.20       ad 	execpg = cpu_count_get(CPU_COUNT_EXECPAGES);
    161  1.20       ad 
    162  1.18       ad 	mutex_enter(&s->lock);
    163  1.20       ad 	t = s->s_active + s->s_inactive + freepg;
    164  1.20       ad 	anonunder = anonpg <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    165  1.20       ad 	fileunder = filepg <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    166  1.20       ad 	execunder = execpg <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    167  1.20       ad 	anonover = anonpg > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    168  1.20       ad 	fileover = filepg > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    169  1.20       ad 	execover = execpg > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    170   1.2     yamt 	anonreact = anonunder || (!anonover && (fileover || execover));
    171   1.2     yamt 	filereact = fileunder || (!fileover && (anonover || execover));
    172   1.2     yamt 	execreact = execunder || (!execover && (anonover || fileover));
    173   1.2     yamt 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    174   1.8  thorpej 		anonreact = filereact = execreact = false;
    175   1.2     yamt 	}
    176   1.2     yamt 	ss->ss_anonreact = anonreact;
    177   1.2     yamt 	ss->ss_filereact = filereact;
    178   1.2     yamt 	ss->ss_execreact = execreact;
    179  1.24       ad 	memset(&ss->ss_marker, 0, sizeof(ss->ss_marker));
    180  1.24       ad 	ss->ss_marker.flags = PG_MARKER;
    181  1.24       ad 	TAILQ_INSERT_HEAD(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
    182  1.24       ad 	mutex_exit(&s->lock);
    183  1.24       ad }
    184  1.24       ad 
    185  1.24       ad void
    186  1.24       ad uvmpdpol_scanfini(void)
    187  1.24       ad {
    188  1.24       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    189  1.24       ad 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    190   1.2     yamt 
    191  1.24       ad 	mutex_enter(&s->lock);
    192  1.24       ad 	TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
    193  1.18       ad 	mutex_exit(&s->lock);
    194   1.2     yamt }
    195   1.2     yamt 
    196   1.2     yamt struct vm_page *
    197  1.18       ad uvmpdpol_selectvictim(kmutex_t **plock)
    198   1.2     yamt {
    199  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    200   1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    201   1.2     yamt 	struct vm_page *pg;
    202  1.14    rmind 	kmutex_t *lock;
    203   1.2     yamt 
    204  1.18       ad 	mutex_enter(&s->lock);
    205   1.2     yamt 	while (/* CONSTCOND */ 1) {
    206   1.2     yamt 		struct vm_anon *anon;
    207   1.2     yamt 		struct uvm_object *uobj;
    208   1.2     yamt 
    209  1.24       ad 		pg = TAILQ_NEXT(&ss->ss_marker, pdqueue);
    210   1.2     yamt 		if (pg == NULL) {
    211   1.2     yamt 			break;
    212   1.2     yamt 		}
    213  1.24       ad 		KASSERT((pg->flags & PG_MARKER) == 0);
    214   1.2     yamt 		uvmexp.pdscans++;
    215   1.2     yamt 
    216   1.2     yamt 		/*
    217  1.18       ad 		 * acquire interlock to stablize page identity.
    218  1.18       ad 		 * if we have caught the page in a state of flux
    219  1.18       ad 		 * and it should be dequeued, do it now and then
    220  1.18       ad 		 * move on to the next.
    221   1.2     yamt 		 */
    222  1.18       ad 		mutex_enter(&pg->interlock);
    223  1.18       ad 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
    224  1.18       ad 	            pg->wire_count > 0) {
    225  1.18       ad 	            	mutex_exit(&pg->interlock);
    226  1.18       ad 	            	uvmpdpol_pagedequeue_locked(pg);
    227  1.18       ad 	            	continue;
    228   1.2     yamt 		}
    229   1.2     yamt 
    230   1.2     yamt 		/*
    231  1.24       ad 		 * now prepare to move on to the next page.
    232  1.24       ad 		 */
    233  1.24       ad 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker,
    234  1.24       ad 		    pdqueue);
    235  1.24       ad 		TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg,
    236  1.24       ad 		    &ss->ss_marker, pdqueue);
    237  1.24       ad 
    238  1.24       ad 		/*
    239   1.2     yamt 		 * enforce the minimum thresholds on different
    240   1.2     yamt 		 * types of memory usage.  if reusing the current
    241   1.2     yamt 		 * page would reduce that type of usage below its
    242   1.2     yamt 		 * minimum, reactivate the page instead and move
    243   1.2     yamt 		 * on to the next page.
    244   1.2     yamt 		 */
    245  1.18       ad 		anon = pg->uanon;
    246  1.18       ad 		uobj = pg->uobject;
    247   1.2     yamt 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    248  1.18       ad 			mutex_exit(&pg->interlock);
    249  1.18       ad 			uvmpdpol_pageactivate_locked(pg);
    250   1.2     yamt 			PDPOL_EVCNT_INCR(reactexec);
    251   1.2     yamt 			continue;
    252   1.2     yamt 		}
    253   1.2     yamt 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    254   1.2     yamt 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    255  1.18       ad 			mutex_exit(&pg->interlock);
    256  1.18       ad 			uvmpdpol_pageactivate_locked(pg);
    257   1.2     yamt 			PDPOL_EVCNT_INCR(reactfile);
    258   1.2     yamt 			continue;
    259   1.2     yamt 		}
    260   1.2     yamt 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    261  1.18       ad 			mutex_exit(&pg->interlock);
    262  1.18       ad 			uvmpdpol_pageactivate_locked(pg);
    263   1.2     yamt 			PDPOL_EVCNT_INCR(reactanon);
    264   1.2     yamt 			continue;
    265   1.2     yamt 		}
    266   1.2     yamt 
    267  1.18       ad 		/*
    268  1.18       ad 		 * try to lock the object that owns the page.
    269  1.18       ad 		 *
    270  1.18       ad 		 * with the page interlock held, we can drop s->lock, which
    271  1.18       ad 		 * could otherwise serve as a barrier to us getting the
    272  1.18       ad 		 * object locked, because the owner of the object's lock may
    273  1.18       ad 		 * be blocked on s->lock (i.e. a deadlock).
    274  1.18       ad 		 *
    275  1.18       ad 		 * whatever happens, uvmpd_trylockowner() will release the
    276  1.18       ad 		 * interlock.  with the interlock dropped we can then
    277  1.18       ad 		 * re-acquire our own lock.  the order is:
    278  1.18       ad 		 *
    279  1.18       ad 		 *	object -> pdpol -> interlock.
    280  1.18       ad 	         */
    281  1.18       ad 	        mutex_exit(&s->lock);
    282  1.18       ad         	lock = uvmpd_trylockowner(pg);
    283  1.18       ad         	/* pg->interlock now released */
    284  1.18       ad         	mutex_enter(&s->lock);
    285  1.18       ad 		if (lock == NULL) {
    286  1.18       ad 			/* didn't get it - try the next page. */
    287  1.18       ad 			continue;
    288  1.18       ad 		}
    289  1.18       ad 
    290  1.18       ad 		/*
    291  1.18       ad 		 * move referenced pages back to active queue and skip to
    292  1.18       ad 		 * next page.
    293  1.18       ad 		 */
    294  1.18       ad 		if (pmap_is_referenced(pg)) {
    295  1.18       ad 			uvmpdpol_pageactivate_locked(pg);
    296  1.18       ad 			uvmexp.pdreact++;
    297  1.18       ad 			mutex_exit(lock);
    298  1.18       ad 			continue;
    299  1.18       ad 		}
    300  1.18       ad 
    301  1.18       ad 		/* we have a potential victim. */
    302  1.18       ad 		*plock = lock;
    303   1.2     yamt 		break;
    304   1.2     yamt 	}
    305  1.18       ad 	mutex_exit(&s->lock);
    306   1.2     yamt 	return pg;
    307   1.2     yamt }
    308   1.2     yamt 
    309   1.2     yamt void
    310   1.2     yamt uvmpdpol_balancequeue(int swap_shortage)
    311   1.2     yamt {
    312  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    313   1.2     yamt 	int inactive_shortage;
    314  1.24       ad 	struct vm_page *p, marker;
    315  1.14    rmind 	kmutex_t *lock;
    316   1.2     yamt 
    317   1.2     yamt 	/*
    318   1.2     yamt 	 * we have done the scan to get free pages.   now we work on meeting
    319   1.2     yamt 	 * our inactive target.
    320   1.2     yamt 	 */
    321   1.2     yamt 
    322  1.24       ad 	memset(&marker, 0, sizeof(marker));
    323  1.24       ad 	marker.flags = PG_MARKER;
    324  1.24       ad 
    325  1.18       ad 	mutex_enter(&s->lock);
    326  1.24       ad 	TAILQ_INSERT_HEAD(&pdpol_state.s_activeq, &marker, pdqueue);
    327  1.24       ad 	for (;;) {
    328  1.24       ad 		inactive_shortage =
    329  1.24       ad 		    pdpol_state.s_inactarg - pdpol_state.s_inactive;
    330  1.24       ad 		if (inactive_shortage <= 0 && swap_shortage <= 0) {
    331  1.24       ad 			break;
    332   1.2     yamt 		}
    333  1.24       ad 		p = TAILQ_NEXT(&marker, pdqueue);
    334  1.24       ad 		if (p == NULL) {
    335  1.24       ad 			break;
    336  1.14    rmind 		}
    337  1.24       ad 		KASSERT((p->flags & PG_MARKER) == 0);
    338  1.14    rmind 
    339  1.18       ad 		/*
    340  1.18       ad 		 * acquire interlock to stablize page identity.
    341  1.18       ad 		 * if we have caught the page in a state of flux
    342  1.18       ad 		 * and it should be dequeued, do it now and then
    343  1.18       ad 		 * move on to the next.
    344  1.18       ad 		 */
    345  1.18       ad 		mutex_enter(&p->interlock);
    346  1.18       ad 	        if ((p->uobject == NULL && p->uanon == NULL) ||
    347  1.18       ad 	            p->wire_count > 0) {
    348  1.18       ad 	            	mutex_exit(&p->interlock);
    349  1.18       ad 	            	uvmpdpol_pagedequeue_locked(p);
    350  1.18       ad 	            	continue;
    351  1.18       ad 		}
    352  1.24       ad 
    353  1.24       ad 		/*
    354  1.24       ad 		 * now prepare to move on to the next page.
    355  1.24       ad 		 */
    356  1.24       ad 
    357  1.24       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, &marker, pdqueue);
    358  1.24       ad 		TAILQ_INSERT_AFTER(&pdpol_state.s_activeq, p, &marker,
    359  1.24       ad 		    pdqueue);
    360  1.24       ad 
    361  1.24       ad 		/*
    362  1.24       ad 		 * try to lock the object that owns the page.  see comments
    363  1.24       ad 		 * in uvmpdol_selectvictim().
    364  1.24       ad 	         */
    365  1.24       ad 	        mutex_exit(&s->lock);
    366  1.24       ad         	lock = uvmpd_trylockowner(p);
    367  1.24       ad         	/* p->interlock now released */
    368  1.24       ad         	mutex_enter(&s->lock);
    369  1.24       ad 		if (lock == NULL) {
    370  1.24       ad 			/* didn't get it - try the next page. */
    371  1.24       ad 			continue;
    372  1.24       ad 		}
    373  1.24       ad 
    374  1.24       ad 		/*
    375  1.24       ad 		 * if there's a shortage of swap slots, try to free it.
    376  1.24       ad 		 */
    377  1.24       ad 		if (swap_shortage > 0 && (p->flags & PG_SWAPBACKED) != 0 &&
    378  1.24       ad 		    (p->flags & PG_BUSY) == 0) {
    379  1.24       ad 			if (uvmpd_dropswap(p)) {
    380  1.24       ad 				swap_shortage--;
    381  1.24       ad 			}
    382  1.24       ad 		}
    383  1.24       ad 
    384  1.24       ad 		/*
    385  1.24       ad 		 * if there's a shortage of inactive pages, deactivate.
    386  1.24       ad 		 */
    387  1.24       ad 
    388  1.24       ad 		if (inactive_shortage > 0) {
    389  1.18       ad 			uvmpdpol_pagedeactivate_locked(p);
    390   1.2     yamt 			uvmexp.pddeact++;
    391   1.2     yamt 			inactive_shortage--;
    392   1.2     yamt 		}
    393  1.24       ad 		mutex_exit(lock);
    394   1.2     yamt 	}
    395  1.24       ad 	TAILQ_REMOVE(&pdpol_state.s_activeq, &marker, pdqueue);
    396  1.18       ad 	mutex_exit(&s->lock);
    397   1.2     yamt }
    398   1.2     yamt 
    399  1.18       ad static void
    400  1.18       ad uvmpdpol_pagedeactivate_locked(struct vm_page *pg)
    401   1.2     yamt {
    402   1.2     yamt 
    403  1.14    rmind 	KASSERT(uvm_page_locked_p(pg));
    404  1.14    rmind 
    405   1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    406  1.23       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
    407  1.18       ad 		pg->pqflags &= ~(PQ_ACTIVE | PQ_TIME);
    408   1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    409   1.2     yamt 		pdpol_state.s_active--;
    410   1.2     yamt 	}
    411   1.2     yamt 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    412   1.2     yamt 		KASSERT(pg->wire_count == 0);
    413  1.10     yamt 		pmap_clear_reference(pg);
    414  1.23       ad 		TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pdqueue);
    415   1.2     yamt 		pg->pqflags |= PQ_INACTIVE;
    416   1.2     yamt 		pdpol_state.s_inactive++;
    417   1.2     yamt 	}
    418   1.2     yamt }
    419   1.2     yamt 
    420   1.2     yamt void
    421  1.18       ad uvmpdpol_pagedeactivate(struct vm_page *pg)
    422  1.18       ad {
    423  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    424  1.18       ad 
    425  1.18       ad 	mutex_enter(&s->lock);
    426  1.18       ad 	uvmpdpol_pagedeactivate_locked(pg);
    427  1.18       ad 	mutex_exit(&s->lock);
    428  1.18       ad }
    429  1.18       ad 
    430  1.18       ad static void
    431  1.18       ad uvmpdpol_pageactivate_locked(struct vm_page *pg)
    432   1.2     yamt {
    433   1.2     yamt 
    434  1.18       ad 	uvmpdpol_pagedequeue_locked(pg);
    435  1.23       ad 	TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pdqueue);
    436  1.18       ad 	pg->pqflags = PQ_ACTIVE | (hardclock_ticks & PQ_TIME);
    437   1.2     yamt 	pdpol_state.s_active++;
    438   1.2     yamt }
    439   1.2     yamt 
    440   1.2     yamt void
    441  1.18       ad uvmpdpol_pageactivate(struct vm_page *pg)
    442  1.18       ad {
    443  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    444  1.18       ad 
    445  1.18       ad 	/* Safety: PQ_ACTIVE clear also tells us if it is not enqueued. */
    446  1.18       ad 	if ((pg->pqflags & PQ_ACTIVE) == 0 ||
    447  1.19       ad 	    ((hardclock_ticks & PQ_TIME) - (pg->pqflags & PQ_TIME)) >= hz) {
    448  1.18       ad 		mutex_enter(&s->lock);
    449  1.18       ad 		uvmpdpol_pageactivate_locked(pg);
    450  1.18       ad 		mutex_exit(&s->lock);
    451  1.18       ad 	}
    452  1.18       ad }
    453  1.18       ad 
    454  1.18       ad static void
    455  1.18       ad uvmpdpol_pagedequeue_locked(struct vm_page *pg)
    456   1.2     yamt {
    457   1.2     yamt 
    458   1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    459  1.23       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
    460  1.18       ad 		pg->pqflags &= ~(PQ_ACTIVE | PQ_TIME);
    461   1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    462   1.2     yamt 		pdpol_state.s_active--;
    463   1.2     yamt 	} else if (pg->pqflags & PQ_INACTIVE) {
    464  1.23       ad 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pdqueue);
    465   1.2     yamt 		pg->pqflags &= ~PQ_INACTIVE;
    466   1.2     yamt 		KASSERT(pdpol_state.s_inactive > 0);
    467   1.2     yamt 		pdpol_state.s_inactive--;
    468   1.2     yamt 	}
    469   1.2     yamt }
    470   1.2     yamt 
    471   1.2     yamt void
    472  1.18       ad uvmpdpol_pagedequeue(struct vm_page *pg)
    473  1.18       ad {
    474  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    475  1.18       ad 
    476  1.18       ad 	mutex_enter(&s->lock);
    477  1.18       ad 	uvmpdpol_pagedequeue_locked(pg);
    478  1.18       ad 	mutex_exit(&s->lock);
    479  1.18       ad }
    480  1.18       ad 
    481  1.18       ad void
    482   1.2     yamt uvmpdpol_pageenqueue(struct vm_page *pg)
    483   1.2     yamt {
    484  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    485   1.2     yamt 
    486  1.18       ad 	mutex_enter(&s->lock);
    487  1.18       ad 	uvmpdpol_pageactivate_locked(pg);
    488  1.18       ad 	mutex_exit(&s->lock);
    489   1.2     yamt }
    490   1.2     yamt 
    491   1.2     yamt void
    492   1.5     yamt uvmpdpol_anfree(struct vm_anon *an)
    493   1.2     yamt {
    494   1.2     yamt }
    495   1.2     yamt 
    496   1.7  thorpej bool
    497   1.2     yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
    498   1.2     yamt {
    499   1.2     yamt 
    500  1.18       ad 	/* Safe to test unlocked due to page life-cycle. */
    501   1.2     yamt 	return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
    502   1.2     yamt }
    503   1.2     yamt 
    504   1.2     yamt void
    505   1.2     yamt uvmpdpol_estimatepageable(int *active, int *inactive)
    506   1.2     yamt {
    507  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    508   1.2     yamt 
    509  1.18       ad 	mutex_enter(&s->lock);
    510   1.2     yamt 	if (active) {
    511   1.2     yamt 		*active = pdpol_state.s_active;
    512   1.2     yamt 	}
    513   1.2     yamt 	if (inactive) {
    514   1.2     yamt 		*inactive = pdpol_state.s_inactive;
    515   1.2     yamt 	}
    516  1.18       ad 	mutex_exit(&s->lock);
    517   1.2     yamt }
    518   1.2     yamt 
    519   1.2     yamt #if !defined(PDSIM)
    520   1.2     yamt static int
    521   1.2     yamt min_check(struct uvm_pctparam *pct, int t)
    522   1.2     yamt {
    523   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    524   1.2     yamt 	int total = t;
    525   1.2     yamt 
    526   1.2     yamt 	if (pct != &s->s_anonmin) {
    527   1.2     yamt 		total += uvm_pctparam_get(&s->s_anonmin);
    528   1.2     yamt 	}
    529   1.2     yamt 	if (pct != &s->s_filemin) {
    530   1.2     yamt 		total += uvm_pctparam_get(&s->s_filemin);
    531   1.2     yamt 	}
    532   1.2     yamt 	if (pct != &s->s_execmin) {
    533   1.2     yamt 		total += uvm_pctparam_get(&s->s_execmin);
    534   1.2     yamt 	}
    535   1.2     yamt 	if (total > 95) {
    536   1.2     yamt 		return EINVAL;
    537   1.2     yamt 	}
    538   1.2     yamt 	return 0;
    539   1.2     yamt }
    540   1.2     yamt #endif /* !defined(PDSIM) */
    541   1.2     yamt 
    542   1.2     yamt void
    543   1.2     yamt uvmpdpol_init(void)
    544   1.2     yamt {
    545   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    546   1.2     yamt 
    547  1.18       ad 	mutex_init(&s->lock, MUTEX_DEFAULT, IPL_NONE);
    548   1.2     yamt 	TAILQ_INIT(&s->s_activeq);
    549   1.2     yamt 	TAILQ_INIT(&s->s_inactiveq);
    550   1.2     yamt 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    551   1.2     yamt 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    552   1.2     yamt 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    553   1.2     yamt 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    554   1.2     yamt 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    555   1.2     yamt 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    556   1.2     yamt 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    557   1.2     yamt }
    558   1.2     yamt 
    559   1.2     yamt void
    560   1.2     yamt uvmpdpol_reinit(void)
    561   1.2     yamt {
    562   1.2     yamt }
    563   1.2     yamt 
    564   1.7  thorpej bool
    565   1.2     yamt uvmpdpol_needsscan_p(void)
    566   1.2     yamt {
    567   1.2     yamt 
    568  1.18       ad 	/* This must be an unlocked check: can be called from interrupt. */
    569  1.17     para 	return pdpol_state.s_inactive < pdpol_state.s_inactarg;
    570   1.2     yamt }
    571   1.2     yamt 
    572   1.2     yamt void
    573   1.2     yamt uvmpdpol_tune(void)
    574   1.2     yamt {
    575  1.18       ad 	struct uvmpdpol_globalstate *s = &pdpol_state;
    576   1.2     yamt 
    577  1.18       ad 	mutex_enter(&s->lock);
    578   1.2     yamt 	clock_tune();
    579  1.18       ad 	mutex_exit(&s->lock);
    580   1.2     yamt }
    581   1.2     yamt 
    582   1.2     yamt #if !defined(PDSIM)
    583   1.2     yamt 
    584   1.2     yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    585   1.2     yamt 
    586   1.2     yamt void
    587   1.2     yamt uvmpdpol_sysctlsetup(void)
    588   1.2     yamt {
    589   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    590   1.2     yamt 
    591   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    592   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    593   1.2     yamt 	    "for anonymous application data"));
    594   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    595   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    596  1.11   martin 	    "for cached file data"));
    597   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    598   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    599  1.11   martin 	    "for cached executable data"));
    600   1.2     yamt 
    601   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    602   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    603   1.2     yamt 	    "be reclaimed from other usage for "
    604   1.2     yamt 	    "anonymous application data"));
    605   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    606   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    607   1.2     yamt 	    "be reclaimed from other usage for cached "
    608   1.2     yamt 	    "file data"));
    609   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    610   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    611   1.2     yamt 	    "be reclaimed from other usage for cached "
    612   1.2     yamt 	    "executable data"));
    613   1.2     yamt 
    614   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    615   1.2     yamt 	    SYSCTL_DESCR("Percentage of inactive queue of "
    616   1.2     yamt 	    "the entire (active + inactive) queue"));
    617   1.2     yamt }
    618   1.2     yamt 
    619   1.2     yamt #endif /* !defined(PDSIM) */
    620   1.2     yamt 
    621   1.2     yamt #if defined(PDSIM)
    622   1.2     yamt void
    623   1.2     yamt pdsim_dump(const char *id)
    624   1.2     yamt {
    625   1.2     yamt #if defined(DEBUG)
    626   1.2     yamt 	/* XXX */
    627   1.2     yamt #endif /* defined(DEBUG) */
    628   1.2     yamt }
    629   1.2     yamt #endif /* defined(PDSIM) */
    630