Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clockpro.c revision 1.15.16.1
      1  1.15.16.1      matt /*	$NetBSD: uvm_pdpolicy_clockpro.c,v 1.15.16.1 2012/02/09 03:05:01 matt Exp $	*/
      2        1.2      yamt 
      3        1.2      yamt /*-
      4        1.2      yamt  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
      5        1.2      yamt  * All rights reserved.
      6        1.2      yamt  *
      7        1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8        1.2      yamt  * modification, are permitted provided that the following conditions
      9        1.2      yamt  * are met:
     10        1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11        1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12        1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14        1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15        1.2      yamt  *
     16        1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17        1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18        1.2      yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19        1.2      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20        1.2      yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21        1.2      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22        1.2      yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23        1.2      yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24        1.2      yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25        1.2      yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26        1.2      yamt  * SUCH DAMAGE.
     27        1.2      yamt  */
     28        1.2      yamt 
     29        1.2      yamt /*
     30        1.2      yamt  * CLOCK-Pro replacement policy:
     31        1.2      yamt  *	http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
     32        1.2      yamt  *
     33        1.2      yamt  * approximation of the list of non-resident pages using hash:
     34        1.2      yamt  *	http://linux-mm.org/ClockProApproximation
     35        1.2      yamt  */
     36        1.2      yamt 
     37        1.2      yamt /* #define	CLOCKPRO_DEBUG */
     38        1.2      yamt 
     39        1.2      yamt #if defined(PDSIM)
     40        1.2      yamt 
     41        1.2      yamt #include "pdsim.h"
     42        1.2      yamt 
     43        1.2      yamt #else /* defined(PDSIM) */
     44        1.2      yamt 
     45        1.2      yamt #include <sys/cdefs.h>
     46  1.15.16.1      matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.15.16.1 2012/02/09 03:05:01 matt Exp $");
     47        1.2      yamt 
     48        1.2      yamt #include "opt_ddb.h"
     49        1.2      yamt 
     50        1.2      yamt #include <sys/param.h>
     51        1.2      yamt #include <sys/proc.h>
     52        1.2      yamt #include <sys/systm.h>
     53        1.2      yamt #include <sys/kernel.h>
     54        1.2      yamt #include <sys/hash.h>
     55        1.2      yamt 
     56        1.2      yamt #include <uvm/uvm.h>
     57        1.2      yamt #include <uvm/uvm_pdpolicy.h>
     58        1.2      yamt #include <uvm/uvm_pdpolicy_impl.h>
     59        1.2      yamt 
     60        1.2      yamt #if ((__STDC_VERSION__ - 0) >= 199901L)
     61        1.2      yamt #define	DPRINTF(...)	/* nothing */
     62        1.2      yamt #define	WARN(...)	printf(__VA_ARGS__)
     63        1.2      yamt #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
     64        1.2      yamt #define	DPRINTF(a...)	/* nothing */	/* GCC */
     65        1.2      yamt #define	WARN(a...)	printf(a)
     66        1.2      yamt #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
     67        1.2      yamt 
     68        1.2      yamt #define	dump(a)		/* nothing */
     69        1.2      yamt 
     70        1.2      yamt #undef	USEONCE2
     71        1.2      yamt #define	LISTQ
     72        1.2      yamt #undef	ADAPTIVE
     73        1.2      yamt 
     74        1.2      yamt #endif /* defined(PDSIM) */
     75        1.2      yamt 
     76        1.2      yamt #if !defined(CLOCKPRO_COLDPCT)
     77        1.2      yamt #define	CLOCKPRO_COLDPCT	10
     78        1.2      yamt #endif /* !defined(CLOCKPRO_COLDPCT) */
     79        1.2      yamt 
     80        1.2      yamt #define	CLOCKPRO_COLDPCTMAX	90
     81        1.2      yamt 
     82        1.2      yamt #if !defined(CLOCKPRO_HASHFACTOR)
     83        1.2      yamt #define	CLOCKPRO_HASHFACTOR	2
     84        1.2      yamt #endif /* !defined(CLOCKPRO_HASHFACTOR) */
     85        1.2      yamt 
     86        1.2      yamt #define	CLOCKPRO_NEWQMIN	((1024 * 1024) >> PAGE_SHIFT)	/* XXX */
     87        1.2      yamt 
     88        1.2      yamt int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
     89        1.2      yamt 
     90        1.2      yamt PDPOL_EVCNT_DEFINE(nresrecordobj)
     91        1.2      yamt PDPOL_EVCNT_DEFINE(nresrecordanon)
     92        1.9      yamt PDPOL_EVCNT_DEFINE(nreslookupobj)
     93        1.9      yamt PDPOL_EVCNT_DEFINE(nreslookupanon)
     94        1.2      yamt PDPOL_EVCNT_DEFINE(nresfoundobj)
     95        1.2      yamt PDPOL_EVCNT_DEFINE(nresfoundanon)
     96        1.2      yamt PDPOL_EVCNT_DEFINE(nresanonfree)
     97        1.2      yamt PDPOL_EVCNT_DEFINE(nresconflict)
     98        1.2      yamt PDPOL_EVCNT_DEFINE(nresoverwritten)
     99        1.2      yamt PDPOL_EVCNT_DEFINE(nreshandhot)
    100        1.2      yamt 
    101        1.2      yamt PDPOL_EVCNT_DEFINE(hhottakeover)
    102        1.2      yamt PDPOL_EVCNT_DEFINE(hhotref)
    103        1.2      yamt PDPOL_EVCNT_DEFINE(hhotunref)
    104        1.2      yamt PDPOL_EVCNT_DEFINE(hhotcold)
    105        1.2      yamt PDPOL_EVCNT_DEFINE(hhotcoldtest)
    106        1.2      yamt 
    107        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldtakeover)
    108        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldref)
    109        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunref)
    110        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldreftest)
    111        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunreftest)
    112        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
    113        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldhot)
    114        1.2      yamt 
    115        1.2      yamt PDPOL_EVCNT_DEFINE(speculativeenqueue)
    116        1.2      yamt PDPOL_EVCNT_DEFINE(speculativehit1)
    117        1.2      yamt PDPOL_EVCNT_DEFINE(speculativehit2)
    118        1.2      yamt PDPOL_EVCNT_DEFINE(speculativemiss)
    119        1.2      yamt 
    120        1.2      yamt #define	PQ_REFERENCED	PQ_PRIVATE1
    121        1.2      yamt #define	PQ_HOT		PQ_PRIVATE2
    122        1.2      yamt #define	PQ_TEST		PQ_PRIVATE3
    123        1.2      yamt #define	PQ_INITIALREF	PQ_PRIVATE4
    124        1.2      yamt #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2
    125        1.2      yamt #error PQ_PRIVATE
    126        1.2      yamt #endif
    127        1.2      yamt #define	PQ_QMASK	(PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7)
    128        1.2      yamt #define	PQ_QFACTOR	PQ_PRIVATE5
    129        1.2      yamt #define	PQ_SPECULATIVE	PQ_PRIVATE8
    130        1.2      yamt 
    131        1.2      yamt #define	CLOCKPRO_NOQUEUE	0
    132        1.2      yamt #define	CLOCKPRO_NEWQ		1	/* small queue to clear initial ref. */
    133        1.2      yamt #if defined(LISTQ)
    134  1.15.16.1      matt #define	CLOCKPRO_COLDQ(gs)	2
    135  1.15.16.1      matt #define	CLOCKPRO_HOTQ(gs)	3
    136        1.2      yamt #else /* defined(LISTQ) */
    137  1.15.16.1      matt #define	CLOCKPRO_COLDQ(gs)	(2 + (gs)->gs_coldqidx)	/* XXX */
    138  1.15.16.1      matt #define	CLOCKPRO_HOTQ(gs)	(3 - (gs)->gs_coldqidx)	/* XXX */
    139        1.2      yamt #endif /* defined(LISTQ) */
    140        1.2      yamt #define	CLOCKPRO_LISTQ		4
    141        1.2      yamt #define	CLOCKPRO_NQUEUE		4
    142        1.2      yamt 
    143        1.2      yamt static inline void
    144        1.2      yamt clockpro_setq(struct vm_page *pg, int qidx)
    145        1.2      yamt {
    146        1.2      yamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
    147        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    148        1.2      yamt 
    149        1.2      yamt 	pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
    150        1.2      yamt }
    151        1.2      yamt 
    152        1.2      yamt static inline int
    153        1.2      yamt clockpro_getq(struct vm_page *pg)
    154        1.2      yamt {
    155        1.2      yamt 	int qidx;
    156        1.2      yamt 
    157        1.2      yamt 	qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
    158        1.2      yamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
    159        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    160        1.2      yamt 	return qidx;
    161        1.2      yamt }
    162        1.2      yamt 
    163        1.2      yamt typedef struct {
    164        1.2      yamt 	struct pglist q_q;
    165  1.15.16.1      matt 	u_int q_len;
    166        1.2      yamt } pageq_t;
    167        1.2      yamt 
    168  1.15.16.1      matt typedef uint32_t nonres_cookie_t;
    169  1.15.16.1      matt #define	NONRES_COOKIE_INVAL	0
    170        1.2      yamt 
    171  1.15.16.1      matt #define	BUCKETSIZE	14
    172  1.15.16.1      matt struct bucket {
    173  1.15.16.1      matt 	u_int cycle;
    174  1.15.16.1      matt 	u_int cur;
    175  1.15.16.1      matt 	nonres_cookie_t pages[BUCKETSIZE];
    176  1.15.16.1      matt };
    177        1.2      yamt 
    178  1.15.16.1      matt static size_t cycle_target;
    179  1.15.16.1      matt static size_t cycle_target_frac;
    180  1.15.16.1      matt static size_t hashsize;
    181  1.15.16.1      matt static struct bucket *buckets;
    182  1.15.16.1      matt 
    183  1.15.16.1      matt struct uvmpdpol_groupstate {
    184  1.15.16.1      matt 	pageq_t gs_q[CLOCKPRO_NQUEUE];
    185  1.15.16.1      matt 	struct uvm_pggroup *gs_pgrp;
    186  1.15.16.1      matt 	u_int gs_npages;
    187  1.15.16.1      matt 	u_int gs_coldtarget;
    188  1.15.16.1      matt 	u_int gs_ncold;
    189  1.15.16.1      matt 	u_int gs_newqlenmax;
    190  1.15.16.1      matt #if !defined(LISTQ)
    191  1.15.16.1      matt 	u_int gs_coldqidx;
    192  1.15.16.1      matt #endif
    193  1.15.16.1      matt 	u_int gs_nscanned;
    194  1.15.16.1      matt 	u_int gs_coldadj;
    195  1.15.16.1      matt };
    196  1.15.16.1      matt 
    197  1.15.16.1      matt struct clockpro_state {
    198  1.15.16.1      matt 	struct uvmpdpol_groupstate *s_gs;
    199        1.2      yamt 	struct uvm_pctparam s_coldtargetpct;
    200        1.2      yamt };
    201        1.2      yamt 
    202  1.15.16.1      matt static inline pageq_t *
    203  1.15.16.1      matt clockpro_queue(struct uvmpdpol_groupstate *gs, u_int qidx)
    204        1.2      yamt {
    205        1.2      yamt 
    206        1.2      yamt 	KASSERT(CLOCKPRO_NOQUEUE < qidx);
    207        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    208        1.2      yamt 
    209  1.15.16.1      matt 	return &gs->gs_q[qidx - 1];
    210        1.2      yamt }
    211        1.2      yamt 
    212        1.2      yamt #if !defined(LISTQ)
    213        1.2      yamt 
    214  1.15.16.1      matt static inline void
    215  1.15.16.1      matt clockpro_switchqueue(struct uvmpdpol_groupstate *gs)
    216        1.2      yamt {
    217        1.2      yamt 
    218  1.15.16.1      matt 	gs->gs_coldqidx ^= 1;
    219        1.2      yamt }
    220        1.2      yamt 
    221        1.2      yamt #endif /* !defined(LISTQ) */
    222        1.2      yamt 
    223        1.2      yamt static struct clockpro_state clockpro;
    224        1.2      yamt 
    225        1.2      yamt /* ---------------------------------------- */
    226        1.2      yamt 
    227        1.2      yamt static void
    228        1.2      yamt pageq_init(pageq_t *q)
    229        1.2      yamt {
    230        1.2      yamt 
    231        1.2      yamt 	TAILQ_INIT(&q->q_q);
    232        1.2      yamt 	q->q_len = 0;
    233        1.2      yamt }
    234        1.2      yamt 
    235  1.15.16.1      matt static u_int
    236        1.2      yamt pageq_len(const pageq_t *q)
    237        1.2      yamt {
    238        1.2      yamt 
    239        1.2      yamt 	return q->q_len;
    240        1.2      yamt }
    241        1.2      yamt 
    242        1.2      yamt static struct vm_page *
    243        1.2      yamt pageq_first(const pageq_t *q)
    244        1.2      yamt {
    245        1.2      yamt 
    246        1.2      yamt 	return TAILQ_FIRST(&q->q_q);
    247        1.2      yamt }
    248        1.2      yamt 
    249        1.2      yamt static void
    250  1.15.16.1      matt pageq_insert_tail(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
    251        1.2      yamt {
    252  1.15.16.1      matt 	KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
    253        1.2      yamt 
    254       1.15        ad 	TAILQ_INSERT_TAIL(&q->q_q, pg, pageq.queue);
    255        1.2      yamt 	q->q_len++;
    256        1.2      yamt }
    257        1.2      yamt 
    258       1.14       bjs #if defined(LISTQ)
    259        1.2      yamt static void
    260  1.15.16.1      matt pageq_insert_head(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
    261        1.2      yamt {
    262  1.15.16.1      matt 	KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
    263        1.2      yamt 
    264       1.15        ad 	TAILQ_INSERT_HEAD(&q->q_q, pg, pageq.queue);
    265        1.2      yamt 	q->q_len++;
    266        1.2      yamt }
    267       1.14       bjs #endif
    268        1.2      yamt 
    269        1.2      yamt static void
    270  1.15.16.1      matt pageq_remove(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
    271        1.2      yamt {
    272  1.15.16.1      matt 	KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
    273        1.2      yamt 	KASSERT(q->q_len > 0);
    274       1.15        ad 	TAILQ_REMOVE(&q->q_q, pg, pageq.queue);
    275        1.2      yamt 	q->q_len--;
    276        1.2      yamt }
    277        1.2      yamt 
    278        1.2      yamt static struct vm_page *
    279  1.15.16.1      matt pageq_remove_head(struct uvmpdpol_groupstate *gs, pageq_t *q)
    280        1.2      yamt {
    281        1.2      yamt 	struct vm_page *pg;
    282        1.2      yamt 
    283        1.2      yamt 	pg = TAILQ_FIRST(&q->q_q);
    284        1.2      yamt 	if (pg == NULL) {
    285        1.2      yamt 		KASSERT(q->q_len == 0);
    286        1.2      yamt 		return NULL;
    287        1.2      yamt 	}
    288  1.15.16.1      matt 
    289  1.15.16.1      matt 	pageq_remove(gs, q, pg);
    290        1.2      yamt 	return pg;
    291        1.2      yamt }
    292        1.2      yamt 
    293        1.2      yamt /* ---------------------------------------- */
    294        1.2      yamt 
    295        1.2      yamt static void
    296  1.15.16.1      matt clockpro_insert_tail(struct uvmpdpol_groupstate *gs, u_int qidx, struct vm_page *pg)
    297        1.2      yamt {
    298  1.15.16.1      matt 	pageq_t *q = clockpro_queue(gs, qidx);
    299        1.2      yamt 
    300        1.2      yamt 	clockpro_setq(pg, qidx);
    301  1.15.16.1      matt 	pageq_insert_tail(gs, q, pg);
    302        1.2      yamt }
    303        1.2      yamt 
    304       1.14       bjs #if defined(LISTQ)
    305        1.5  christos static void
    306  1.15.16.1      matt clockpro_insert_head(struct uvmpdpol_groupstate *gs, u_int qidx, struct vm_page *pg)
    307        1.2      yamt {
    308  1.15.16.1      matt 	pageq_t *q = clockpro_queue(gs, qidx);
    309        1.2      yamt 
    310        1.2      yamt 	clockpro_setq(pg, qidx);
    311  1.15.16.1      matt 	pageq_insert_head(gs, q, pg);
    312        1.2      yamt }
    313        1.2      yamt 
    314       1.14       bjs #endif
    315        1.2      yamt /* ---------------------------------------- */
    316        1.2      yamt 
    317        1.2      yamt typedef uintptr_t objid_t;
    318        1.2      yamt 
    319        1.2      yamt /*
    320        1.2      yamt  * XXX maybe these hash functions need reconsideration,
    321        1.2      yamt  * given that hash distribution is critical here.
    322        1.2      yamt  */
    323        1.2      yamt 
    324        1.2      yamt static uint32_t
    325        1.2      yamt pageidentityhash1(objid_t obj, off_t idx)
    326        1.2      yamt {
    327        1.2      yamt 	uint32_t hash = HASH32_BUF_INIT;
    328        1.2      yamt 
    329        1.2      yamt #if 1
    330        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    331        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    332        1.2      yamt #else
    333        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    334        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    335        1.2      yamt #endif
    336        1.2      yamt 	return hash;
    337        1.2      yamt }
    338        1.2      yamt 
    339        1.2      yamt static uint32_t
    340        1.2      yamt pageidentityhash2(objid_t obj, off_t idx)
    341        1.2      yamt {
    342        1.2      yamt 	uint32_t hash = HASH32_BUF_INIT;
    343        1.2      yamt 
    344        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    345        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    346        1.2      yamt 	return hash;
    347        1.2      yamt }
    348        1.2      yamt 
    349        1.2      yamt static nonres_cookie_t
    350        1.2      yamt calccookie(objid_t obj, off_t idx)
    351        1.2      yamt {
    352        1.2      yamt 	uint32_t hash = pageidentityhash2(obj, idx);
    353        1.2      yamt 	nonres_cookie_t cookie = hash;
    354        1.2      yamt 
    355        1.2      yamt 	if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
    356        1.2      yamt 		cookie++; /* XXX */
    357        1.2      yamt 	}
    358        1.2      yamt 	return cookie;
    359        1.2      yamt }
    360        1.2      yamt 
    361  1.15.16.1      matt #define	COLDTARGET_ADJ(gs, d)	((gs)->gs_coldadj += (d))
    362        1.2      yamt 
    363        1.2      yamt #if defined(PDSIM)
    364        1.2      yamt 
    365        1.2      yamt static void *
    366  1.15.16.1      matt clockpro_hashalloc(u_int n)
    367        1.2      yamt {
    368  1.15.16.1      matt 	size_t allocsz = sizeof(struct bucket) * n;
    369        1.2      yamt 
    370        1.2      yamt 	return malloc(allocsz);
    371        1.2      yamt }
    372        1.2      yamt 
    373        1.2      yamt static void
    374        1.2      yamt clockpro_hashfree(void *p, int n)
    375        1.2      yamt {
    376        1.2      yamt 
    377        1.2      yamt 	free(p);
    378        1.2      yamt }
    379        1.2      yamt 
    380        1.2      yamt #else /* defined(PDSIM) */
    381        1.2      yamt 
    382        1.2      yamt static void *
    383  1.15.16.1      matt clockpro_hashalloc(u_int n)
    384        1.2      yamt {
    385  1.15.16.1      matt 	size_t allocsz = round_page(sizeof(struct bucket) * n);
    386        1.2      yamt 
    387        1.2      yamt 	return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
    388        1.2      yamt }
    389        1.2      yamt 
    390        1.2      yamt static void
    391  1.15.16.1      matt clockpro_hashfree(void *p, u_int n)
    392        1.2      yamt {
    393  1.15.16.1      matt 	size_t allocsz = round_page(sizeof(struct bucket) * n);
    394        1.2      yamt 
    395        1.2      yamt 	uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
    396        1.2      yamt }
    397        1.2      yamt 
    398        1.2      yamt #endif /* defined(PDSIM) */
    399        1.2      yamt 
    400        1.2      yamt static void
    401        1.2      yamt clockpro_hashinit(uint64_t n)
    402        1.2      yamt {
    403        1.2      yamt 	struct bucket *newbuckets;
    404        1.2      yamt 	struct bucket *oldbuckets;
    405        1.2      yamt 	size_t sz;
    406        1.2      yamt 	size_t oldsz;
    407        1.2      yamt 	int i;
    408        1.2      yamt 
    409        1.2      yamt 	sz = howmany(n, BUCKETSIZE);
    410        1.2      yamt 	sz *= clockpro_hashfactor;
    411        1.2      yamt 	newbuckets = clockpro_hashalloc(sz);
    412        1.2      yamt 	if (newbuckets == NULL) {
    413        1.2      yamt 		panic("%s: allocation failure", __func__);
    414        1.2      yamt 	}
    415        1.2      yamt 	for (i = 0; i < sz; i++) {
    416        1.2      yamt 		struct bucket *b = &newbuckets[i];
    417        1.2      yamt 		int j;
    418        1.2      yamt 
    419        1.2      yamt 		b->cycle = cycle_target;
    420        1.2      yamt 		b->cur = 0;
    421        1.2      yamt 		for (j = 0; j < BUCKETSIZE; j++) {
    422        1.2      yamt 			b->pages[j] = NONRES_COOKIE_INVAL;
    423        1.2      yamt 		}
    424        1.2      yamt 	}
    425        1.2      yamt 	/* XXX lock */
    426        1.2      yamt 	oldbuckets = buckets;
    427        1.2      yamt 	oldsz = hashsize;
    428        1.2      yamt 	buckets = newbuckets;
    429        1.2      yamt 	hashsize = sz;
    430        1.2      yamt 	/* XXX unlock */
    431  1.15.16.1      matt 	if (oldbuckets) {
    432        1.2      yamt 		clockpro_hashfree(oldbuckets, oldsz);
    433        1.2      yamt 	}
    434        1.2      yamt }
    435        1.2      yamt 
    436        1.2      yamt static struct bucket *
    437        1.2      yamt nonresident_getbucket(objid_t obj, off_t idx)
    438        1.2      yamt {
    439        1.2      yamt 	uint32_t hash;
    440        1.2      yamt 
    441        1.2      yamt 	hash = pageidentityhash1(obj, idx);
    442        1.2      yamt 	return &buckets[hash % hashsize];
    443        1.2      yamt }
    444        1.2      yamt 
    445        1.2      yamt static void
    446  1.15.16.1      matt nonresident_rotate(struct uvmpdpol_groupstate *gs, struct bucket *b)
    447        1.2      yamt {
    448       1.13      yamt 	const int target = cycle_target;
    449       1.13      yamt 	const int cycle = b->cycle;
    450       1.11      yamt 	int cur;
    451       1.13      yamt 	int todo;
    452        1.2      yamt 
    453       1.13      yamt 	todo = target - cycle;
    454       1.13      yamt 	if (todo >= BUCKETSIZE * 2) {
    455       1.13      yamt 		todo = (todo % BUCKETSIZE) + BUCKETSIZE;
    456       1.13      yamt 	}
    457       1.11      yamt 	cur = b->cur;
    458       1.13      yamt 	while (todo > 0) {
    459       1.11      yamt 		if (b->pages[cur] != NONRES_COOKIE_INVAL) {
    460        1.2      yamt 			PDPOL_EVCNT_INCR(nreshandhot);
    461  1.15.16.1      matt 			if (gs != NULL)
    462  1.15.16.1      matt 				COLDTARGET_ADJ(gs, -1);
    463        1.2      yamt 		}
    464       1.11      yamt 		b->pages[cur] = NONRES_COOKIE_INVAL;
    465       1.11      yamt 		cur++;
    466       1.11      yamt 		if (cur == BUCKETSIZE) {
    467       1.11      yamt 			cur = 0;
    468       1.11      yamt 		}
    469       1.13      yamt 		todo--;
    470        1.2      yamt 	}
    471       1.13      yamt 	b->cycle = target;
    472       1.11      yamt 	b->cur = cur;
    473        1.2      yamt }
    474        1.2      yamt 
    475        1.7   thorpej static bool
    476  1.15.16.1      matt nonresident_lookupremove(struct uvmpdpol_groupstate *gs, objid_t obj, off_t idx)
    477        1.2      yamt {
    478        1.2      yamt 	struct bucket *b = nonresident_getbucket(obj, idx);
    479        1.2      yamt 	nonres_cookie_t cookie = calccookie(obj, idx);
    480        1.2      yamt 
    481  1.15.16.1      matt 	nonresident_rotate(gs, b);
    482  1.15.16.1      matt 	for (u_int i = 0; i < BUCKETSIZE; i++) {
    483        1.2      yamt 		if (b->pages[i] == cookie) {
    484        1.2      yamt 			b->pages[i] = NONRES_COOKIE_INVAL;
    485        1.8   thorpej 			return true;
    486        1.2      yamt 		}
    487        1.2      yamt 	}
    488        1.8   thorpej 	return false;
    489        1.2      yamt }
    490        1.2      yamt 
    491        1.2      yamt static objid_t
    492        1.2      yamt pageobj(struct vm_page *pg)
    493        1.2      yamt {
    494        1.2      yamt 	const void *obj;
    495        1.2      yamt 
    496        1.2      yamt 	/*
    497        1.2      yamt 	 * XXX object pointer is often freed and reused for unrelated object.
    498        1.2      yamt 	 * for vnodes, it would be better to use something like
    499        1.2      yamt 	 * a hash of fsid/fileid/generation.
    500        1.2      yamt 	 */
    501        1.2      yamt 
    502        1.2      yamt 	obj = pg->uobject;
    503        1.2      yamt 	if (obj == NULL) {
    504        1.2      yamt 		obj = pg->uanon;
    505        1.2      yamt 		KASSERT(obj != NULL);
    506        1.2      yamt 		KASSERT(pg->offset == 0);
    507        1.2      yamt 	}
    508        1.2      yamt 
    509        1.2      yamt 	return (objid_t)obj;
    510        1.2      yamt }
    511        1.2      yamt 
    512        1.2      yamt static off_t
    513        1.2      yamt pageidx(struct vm_page *pg)
    514        1.2      yamt {
    515        1.2      yamt 
    516        1.2      yamt 	KASSERT((pg->offset & PAGE_MASK) == 0);
    517        1.2      yamt 	return pg->offset >> PAGE_SHIFT;
    518        1.2      yamt }
    519        1.2      yamt 
    520        1.7   thorpej static bool
    521  1.15.16.1      matt nonresident_pagelookupremove(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
    522        1.2      yamt {
    523  1.15.16.1      matt 	bool found = nonresident_lookupremove(gs, pageobj(pg), pageidx(pg));
    524        1.2      yamt 
    525        1.9      yamt 	if (pg->uobject) {
    526        1.9      yamt 		PDPOL_EVCNT_INCR(nreslookupobj);
    527        1.9      yamt 	} else {
    528        1.9      yamt 		PDPOL_EVCNT_INCR(nreslookupanon);
    529        1.9      yamt 	}
    530        1.2      yamt 	if (found) {
    531        1.2      yamt 		if (pg->uobject) {
    532        1.2      yamt 			PDPOL_EVCNT_INCR(nresfoundobj);
    533        1.2      yamt 		} else {
    534        1.2      yamt 			PDPOL_EVCNT_INCR(nresfoundanon);
    535        1.2      yamt 		}
    536        1.2      yamt 	}
    537        1.2      yamt 	return found;
    538        1.2      yamt }
    539        1.2      yamt 
    540        1.2      yamt static void
    541  1.15.16.1      matt nonresident_pagerecord(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
    542        1.2      yamt {
    543  1.15.16.1      matt 	const objid_t obj = pageobj(pg);
    544  1.15.16.1      matt 	const off_t idx = pageidx(pg);
    545  1.15.16.1      matt 	struct bucket * const b = nonresident_getbucket(obj, idx);
    546        1.2      yamt 	nonres_cookie_t cookie = calccookie(obj, idx);
    547        1.2      yamt 
    548        1.2      yamt #if defined(DEBUG)
    549  1.15.16.1      matt 	for (u_int i = 0; i < BUCKETSIZE; i++) {
    550        1.2      yamt 		if (b->pages[i] == cookie) {
    551        1.2      yamt 			PDPOL_EVCNT_INCR(nresconflict);
    552        1.2      yamt 		}
    553        1.2      yamt 	}
    554        1.2      yamt #endif /* defined(DEBUG) */
    555        1.2      yamt 
    556        1.2      yamt 	if (pg->uobject) {
    557        1.2      yamt 		PDPOL_EVCNT_INCR(nresrecordobj);
    558        1.2      yamt 	} else {
    559        1.2      yamt 		PDPOL_EVCNT_INCR(nresrecordanon);
    560        1.2      yamt 	}
    561  1.15.16.1      matt 	nonresident_rotate(gs, b);
    562        1.2      yamt 	if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
    563        1.2      yamt 		PDPOL_EVCNT_INCR(nresoverwritten);
    564  1.15.16.1      matt 		COLDTARGET_ADJ(gs, -1);
    565        1.2      yamt 	}
    566        1.2      yamt 	b->pages[b->cur] = cookie;
    567        1.2      yamt 	b->cur = (b->cur + 1) % BUCKETSIZE;
    568        1.2      yamt }
    569        1.2      yamt 
    570        1.2      yamt /* ---------------------------------------- */
    571        1.2      yamt 
    572        1.2      yamt #if defined(CLOCKPRO_DEBUG)
    573        1.2      yamt static void
    574        1.2      yamt check_sanity(void)
    575        1.2      yamt {
    576        1.2      yamt }
    577        1.2      yamt #else /* defined(CLOCKPRO_DEBUG) */
    578        1.2      yamt #define	check_sanity()	/* nothing */
    579        1.2      yamt #endif /* defined(CLOCKPRO_DEBUG) */
    580        1.2      yamt 
    581        1.2      yamt static void
    582        1.2      yamt clockpro_reinit(void)
    583        1.2      yamt {
    584        1.2      yamt 
    585        1.2      yamt 	clockpro_hashinit(uvmexp.npages);
    586        1.2      yamt }
    587        1.2      yamt 
    588        1.2      yamt static void
    589  1.15.16.1      matt clockpro_recolor(void *new_gs, struct uvm_pggroup *grparray,
    590  1.15.16.1      matt 	size_t npggroup, size_t old_ncolors)
    591        1.2      yamt {
    592  1.15.16.1      matt 	struct uvmpdpol_groupstate *old_gs = clockpro.s_gs;
    593  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm.pggroups;
    594  1.15.16.1      matt 	struct uvmpdpol_groupstate *gs = new_gs;
    595  1.15.16.1      matt 	const size_t old_npggroup = VM_NPGGROUP(old_ncolors);
    596  1.15.16.1      matt 
    597  1.15.16.1      matt 	clockpro.s_gs = gs;
    598  1.15.16.1      matt 
    599  1.15.16.1      matt 	for (size_t pggroup = 0; pggroup < npggroup; pggroup++, gs++, grp++) {
    600  1.15.16.1      matt 		grp->pgrp_gs = gs;
    601  1.15.16.1      matt 		gs->gs_pgrp = grp;
    602  1.15.16.1      matt 		for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++) {
    603  1.15.16.1      matt 			pageq_init(&gs->gs_q[i]);
    604  1.15.16.1      matt 		}
    605  1.15.16.1      matt 		gs->gs_newqlenmax = 1;
    606  1.15.16.1      matt 		gs->gs_coldtarget = 1;
    607  1.15.16.1      matt 	}
    608  1.15.16.1      matt 
    609  1.15.16.1      matt 	for (size_t pggroup = 0; pggroup < old_npggroup; pggroup++, old_gs++) {
    610  1.15.16.1      matt 		pageq_t *oldq = old_gs->gs_q;
    611  1.15.16.1      matt 		for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++, oldq++) {
    612  1.15.16.1      matt 			while (pageq_len(oldq) > 0) {
    613  1.15.16.1      matt 				struct vm_page *pg = pageq_remove_head(old_gs, oldq);
    614  1.15.16.1      matt 				KASSERT(pg != NULL);
    615  1.15.16.1      matt 				grp = uvm_page_to_pggroup(pg);
    616  1.15.16.1      matt 				gs = grp->pgrp_gs;
    617  1.15.16.1      matt 				pageq_insert_tail(gs, &gs->gs_q[i], pg);
    618  1.15.16.1      matt #if defined(USEONCE2)
    619  1.15.16.1      matt #else
    620  1.15.16.1      matt 				gs->gs_npages++;
    621  1.15.16.1      matt 				if (pg->pqflags & (PQ_TEST|PQ_SPECULATIVE)) {
    622  1.15.16.1      matt 					gs->gs_ncold++;
    623  1.15.16.1      matt 				}
    624  1.15.16.1      matt #endif
    625  1.15.16.1      matt 			}
    626  1.15.16.1      matt 		}
    627  1.15.16.1      matt 	}
    628  1.15.16.1      matt 
    629  1.15.16.1      matt 	uvm_pctparam_init(&clockpro.s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
    630  1.15.16.1      matt 
    631  1.15.16.1      matt }
    632  1.15.16.1      matt 
    633  1.15.16.1      matt static void
    634  1.15.16.1      matt clockpro_init(void *new_gs, size_t npggroup)
    635  1.15.16.1      matt {
    636  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm.pggroups;
    637  1.15.16.1      matt 	struct uvmpdpol_groupstate *gs = new_gs;
    638        1.2      yamt 
    639  1.15.16.1      matt 	for (size_t pggroup = 0; pggroup < npggroup; pggroup++, gs++, grp++) {
    640  1.15.16.1      matt 		grp->pgrp_gs = gs;
    641  1.15.16.1      matt 		gs->gs_pgrp = grp;
    642  1.15.16.1      matt 		for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++) {
    643  1.15.16.1      matt 			pageq_init(&gs->gs_q[i]);
    644  1.15.16.1      matt 		}
    645  1.15.16.1      matt 		gs->gs_newqlenmax = 1;
    646  1.15.16.1      matt 		gs->gs_coldtarget = 1;
    647        1.2      yamt 	}
    648  1.15.16.1      matt 
    649  1.15.16.1      matt 	uvm_pctparam_init(&clockpro.s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
    650        1.2      yamt }
    651        1.2      yamt 
    652        1.2      yamt static void
    653  1.15.16.1      matt clockpro_tune(struct uvmpdpol_groupstate *gs)
    654        1.2      yamt {
    655        1.2      yamt 	int coldtarget;
    656        1.2      yamt 
    657        1.2      yamt #if defined(ADAPTIVE)
    658  1.15.16.1      matt 	u_int coldmax = gs->s_npages * CLOCKPRO_COLDPCTMAX / 100;
    659  1.15.16.1      matt 	u_int coldmin = 1;
    660        1.2      yamt 
    661  1.15.16.1      matt 	coldtarget = gs->gs_coldtarget;
    662  1.15.16.1      matt 	if (coldtarget + gs->gs_coldadj < coldmin) {
    663  1.15.16.1      matt 		gs->gs_coldadj = coldmin - coldtarget;
    664  1.15.16.1      matt 	} else if (coldtarget + gs->gs_coldadj > coldmax) {
    665  1.15.16.1      matt 		gs->gs_coldadj = coldmax - coldtarget;
    666        1.2      yamt 	}
    667  1.15.16.1      matt 	coldtarget += gs->gs_coldadj;
    668        1.2      yamt #else /* defined(ADAPTIVE) */
    669  1.15.16.1      matt 	coldtarget = UVM_PCTPARAM_APPLY(&clockpro.s_coldtargetpct,
    670  1.15.16.1      matt 	    gs->gs_npages);
    671        1.2      yamt 	if (coldtarget < 1) {
    672        1.2      yamt 		coldtarget = 1;
    673        1.2      yamt 	}
    674        1.2      yamt #endif /* defined(ADAPTIVE) */
    675        1.2      yamt 
    676  1.15.16.1      matt 	gs->gs_coldtarget = coldtarget;
    677  1.15.16.1      matt 	gs->gs_newqlenmax = coldtarget / 4;
    678  1.15.16.1      matt 	if (gs->gs_newqlenmax < CLOCKPRO_NEWQMIN) {
    679  1.15.16.1      matt 		gs->gs_newqlenmax = CLOCKPRO_NEWQMIN;
    680        1.2      yamt 	}
    681        1.2      yamt }
    682        1.2      yamt 
    683        1.2      yamt static void
    684        1.2      yamt clockpro_movereferencebit(struct vm_page *pg)
    685        1.2      yamt {
    686        1.7   thorpej 	bool referenced;
    687        1.2      yamt 
    688        1.2      yamt 	referenced = pmap_clear_reference(pg);
    689        1.2      yamt 	if (referenced) {
    690        1.2      yamt 		pg->pqflags |= PQ_REFERENCED;
    691        1.2      yamt 	}
    692        1.2      yamt }
    693        1.2      yamt 
    694        1.2      yamt static void
    695        1.2      yamt clockpro_clearreferencebit(struct vm_page *pg)
    696        1.2      yamt {
    697        1.2      yamt 
    698        1.2      yamt 	clockpro_movereferencebit(pg);
    699        1.2      yamt 	pg->pqflags &= ~PQ_REFERENCED;
    700        1.2      yamt }
    701        1.2      yamt 
    702        1.2      yamt static void
    703  1.15.16.1      matt clockpro___newqrotate(struct uvmpdpol_groupstate * const gs, int len)
    704        1.2      yamt {
    705  1.15.16.1      matt 	pageq_t * const newq = clockpro_queue(gs, CLOCKPRO_NEWQ);
    706        1.2      yamt 
    707        1.2      yamt 	while (pageq_len(newq) > len) {
    708  1.15.16.1      matt 		struct vm_page *pg = pageq_remove_head(gs, newq);
    709        1.2      yamt 		KASSERT(pg != NULL);
    710        1.2      yamt 		KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
    711        1.2      yamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) {
    712        1.2      yamt 			clockpro_clearreferencebit(pg);
    713        1.2      yamt 			pg->pqflags &= ~PQ_INITIALREF;
    714        1.2      yamt 		}
    715        1.2      yamt 		/* place at the list head */
    716  1.15.16.1      matt 		clockpro_insert_tail(gs, CLOCKPRO_COLDQ(gs), pg);
    717        1.2      yamt 	}
    718        1.2      yamt }
    719        1.2      yamt 
    720        1.2      yamt static void
    721  1.15.16.1      matt clockpro_newqrotate(struct uvmpdpol_groupstate * const gs)
    722        1.2      yamt {
    723        1.2      yamt 
    724        1.2      yamt 	check_sanity();
    725  1.15.16.1      matt 	clockpro___newqrotate(gs, gs->gs_newqlenmax);
    726        1.2      yamt 	check_sanity();
    727        1.2      yamt }
    728        1.2      yamt 
    729        1.2      yamt static void
    730  1.15.16.1      matt clockpro_newqflush(struct uvmpdpol_groupstate * const gs, int n)
    731        1.2      yamt {
    732        1.2      yamt 
    733        1.2      yamt 	check_sanity();
    734  1.15.16.1      matt 	clockpro___newqrotate(gs, n);
    735        1.2      yamt 	check_sanity();
    736        1.2      yamt }
    737        1.2      yamt 
    738        1.2      yamt static void
    739  1.15.16.1      matt clockpro_newqflushone(struct uvmpdpol_groupstate *gs)
    740        1.2      yamt {
    741        1.2      yamt 
    742  1.15.16.1      matt 	clockpro_newqflush(gs,
    743  1.15.16.1      matt 	    MAX(pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)) - 1, 0));
    744        1.2      yamt }
    745        1.2      yamt 
    746        1.2      yamt /*
    747        1.2      yamt  * our "tail" is called "list-head" in the paper.
    748        1.2      yamt  */
    749        1.2      yamt 
    750        1.2      yamt static void
    751  1.15.16.1      matt clockpro___enqueuetail(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
    752        1.2      yamt {
    753        1.2      yamt 
    754        1.2      yamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
    755        1.2      yamt 
    756        1.2      yamt 	check_sanity();
    757        1.2      yamt #if !defined(USEONCE2)
    758  1.15.16.1      matt 	clockpro_insert_tail(gs, CLOCKPRO_NEWQ, pg);
    759  1.15.16.1      matt 	clockpro_newqrotate(gs);
    760        1.2      yamt #else /* !defined(USEONCE2) */
    761        1.2      yamt #if defined(LISTQ)
    762        1.2      yamt 	KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
    763        1.2      yamt #endif /* defined(LISTQ) */
    764  1.15.16.1      matt 	clockpro_insert_tail(gs, CLOCKPRO_COLDQ(gs), pg);
    765        1.2      yamt #endif /* !defined(USEONCE2) */
    766        1.2      yamt 	check_sanity();
    767        1.2      yamt }
    768        1.2      yamt 
    769        1.2      yamt static void
    770        1.2      yamt clockpro_pageenqueue(struct vm_page *pg)
    771        1.2      yamt {
    772  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
    773  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    774        1.7   thorpej 	bool hot;
    775        1.7   thorpej 	bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
    776        1.2      yamt 
    777        1.2      yamt 	KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
    778       1.10        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    779        1.2      yamt 	check_sanity();
    780        1.2      yamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
    781  1.15.16.1      matt 	gs->gs_npages++;
    782        1.2      yamt 	pg->pqflags &= ~(PQ_HOT|PQ_TEST);
    783        1.2      yamt 	if (speculative) {
    784        1.8   thorpej 		hot = false;
    785        1.2      yamt 		PDPOL_EVCNT_INCR(speculativeenqueue);
    786        1.2      yamt 	} else {
    787  1.15.16.1      matt 		hot = nonresident_pagelookupremove(gs, pg);
    788        1.2      yamt 		if (hot) {
    789  1.15.16.1      matt 			COLDTARGET_ADJ(gs, 1);
    790        1.2      yamt 		}
    791        1.2      yamt 	}
    792        1.2      yamt 
    793        1.2      yamt 	/*
    794        1.2      yamt 	 * consider mmap'ed file:
    795        1.2      yamt 	 *
    796        1.2      yamt 	 * - read-ahead enqueues a page.
    797        1.2      yamt 	 *
    798        1.2      yamt 	 * - on the following read-ahead hit, the fault handler activates it.
    799        1.2      yamt 	 *
    800        1.2      yamt 	 * - finally, the userland code which caused the above fault
    801        1.2      yamt 	 *   actually accesses the page.  it makes its reference bit set.
    802        1.2      yamt 	 *
    803        1.2      yamt 	 * we want to count the above as a single access, rather than
    804        1.2      yamt 	 * three accesses with short reuse distances.
    805        1.2      yamt 	 */
    806        1.2      yamt 
    807        1.2      yamt #if defined(USEONCE2)
    808        1.2      yamt 	pg->pqflags &= ~PQ_INITIALREF;
    809        1.2      yamt 	if (hot) {
    810        1.2      yamt 		pg->pqflags |= PQ_TEST;
    811        1.2      yamt 	}
    812  1.15.16.1      matt 	gs->gs_ncold++;
    813        1.2      yamt 	clockpro_clearreferencebit(pg);
    814  1.15.16.1      matt 	clockpro___enqueuetail(gs, pg);
    815        1.2      yamt #else /* defined(USEONCE2) */
    816        1.2      yamt 	if (speculative) {
    817  1.15.16.1      matt 		gs->gs_ncold++;
    818        1.2      yamt 	} else if (hot) {
    819        1.2      yamt 		pg->pqflags |= PQ_HOT;
    820        1.2      yamt 	} else {
    821        1.2      yamt 		pg->pqflags |= PQ_TEST;
    822  1.15.16.1      matt 		gs->gs_ncold++;
    823        1.2      yamt 	}
    824  1.15.16.1      matt 	clockpro___enqueuetail(gs, pg);
    825        1.2      yamt #endif /* defined(USEONCE2) */
    826  1.15.16.1      matt 	grp->pgrp_inactive = gs->gs_ncold;
    827  1.15.16.1      matt 	grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
    828  1.15.16.1      matt 	KASSERT(gs->gs_ncold <= gs->gs_npages);
    829        1.2      yamt }
    830        1.2      yamt 
    831        1.2      yamt static pageq_t *
    832        1.2      yamt clockpro_pagequeue(struct vm_page *pg)
    833        1.2      yamt {
    834  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
    835  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    836  1.15.16.1      matt 	u_int qidx;
    837        1.2      yamt 
    838        1.2      yamt 	qidx = clockpro_getq(pg);
    839        1.2      yamt 	KASSERT(qidx != CLOCKPRO_NOQUEUE);
    840        1.2      yamt 
    841  1.15.16.1      matt 	return clockpro_queue(gs, qidx);
    842        1.2      yamt }
    843        1.2      yamt 
    844        1.2      yamt static void
    845        1.2      yamt clockpro_pagedequeue(struct vm_page *pg)
    846        1.2      yamt {
    847  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
    848  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    849        1.2      yamt 	pageq_t *q;
    850        1.2      yamt 
    851  1.15.16.1      matt 	KASSERT(gs->gs_npages > 0);
    852        1.2      yamt 	check_sanity();
    853        1.2      yamt 	q = clockpro_pagequeue(pg);
    854  1.15.16.1      matt 	pageq_remove(gs, q, pg);
    855        1.2      yamt 	check_sanity();
    856        1.2      yamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
    857        1.2      yamt 	if ((pg->pqflags & PQ_HOT) == 0) {
    858  1.15.16.1      matt 		KASSERT(gs->gs_ncold > 0);
    859  1.15.16.1      matt 		gs->gs_ncold--;
    860        1.2      yamt 	}
    861  1.15.16.1      matt 	KASSERT(gs->gs_npages > 0);
    862  1.15.16.1      matt 	gs->gs_npages--;
    863  1.15.16.1      matt 	grp->pgrp_inactive = gs->gs_ncold;
    864  1.15.16.1      matt 	grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
    865        1.2      yamt 	check_sanity();
    866        1.2      yamt }
    867        1.2      yamt 
    868        1.2      yamt static void
    869        1.2      yamt clockpro_pagerequeue(struct vm_page *pg)
    870        1.2      yamt {
    871  1.15.16.1      matt 	struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
    872  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    873  1.15.16.1      matt 	u_int qidx;
    874        1.2      yamt 
    875        1.2      yamt 	qidx = clockpro_getq(pg);
    876  1.15.16.1      matt 	KASSERT(qidx == CLOCKPRO_HOTQ(gs) || qidx == CLOCKPRO_COLDQ(gs));
    877  1.15.16.1      matt 	pageq_remove(gs, clockpro_queue(gs, qidx), pg);
    878        1.2      yamt 	check_sanity();
    879        1.2      yamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
    880        1.2      yamt 
    881  1.15.16.1      matt 	clockpro___enqueuetail(gs, pg);
    882        1.2      yamt }
    883        1.2      yamt 
    884        1.2      yamt static void
    885  1.15.16.1      matt handhot_endtest(struct uvmpdpol_groupstate * const gs, struct vm_page *pg)
    886        1.2      yamt {
    887        1.2      yamt 
    888        1.2      yamt 	KASSERT((pg->pqflags & PQ_HOT) == 0);
    889        1.2      yamt 	if ((pg->pqflags & PQ_TEST) != 0) {
    890        1.2      yamt 		PDPOL_EVCNT_INCR(hhotcoldtest);
    891  1.15.16.1      matt 		COLDTARGET_ADJ(gs, -1);
    892        1.2      yamt 		pg->pqflags &= ~PQ_TEST;
    893        1.2      yamt 	} else {
    894        1.2      yamt 		PDPOL_EVCNT_INCR(hhotcold);
    895        1.2      yamt 	}
    896        1.2      yamt }
    897        1.2      yamt 
    898        1.2      yamt static void
    899  1.15.16.1      matt handhot_advance(struct uvmpdpol_groupstate * const gs)
    900        1.2      yamt {
    901        1.2      yamt 	struct vm_page *pg;
    902        1.2      yamt 	pageq_t *hotq;
    903  1.15.16.1      matt 	u_int hotqlen;
    904        1.2      yamt 
    905  1.15.16.1      matt 	clockpro_tune(gs);
    906        1.2      yamt 
    907        1.2      yamt 	dump("hot called");
    908  1.15.16.1      matt 	if (gs->gs_ncold >= gs->gs_coldtarget) {
    909        1.2      yamt 		return;
    910        1.2      yamt 	}
    911  1.15.16.1      matt 	hotq = clockpro_queue(gs, CLOCKPRO_HOTQ(gs));
    912        1.2      yamt again:
    913        1.2      yamt 	pg = pageq_first(hotq);
    914        1.2      yamt 	if (pg == NULL) {
    915        1.2      yamt 		DPRINTF("%s: HHOT TAKEOVER\n", __func__);
    916        1.2      yamt 		dump("hhottakeover");
    917        1.2      yamt 		PDPOL_EVCNT_INCR(hhottakeover);
    918        1.2      yamt #if defined(LISTQ)
    919        1.2      yamt 		while (/* CONSTCOND */ 1) {
    920  1.15.16.1      matt 			pageq_t *coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
    921        1.2      yamt 
    922        1.2      yamt 			pg = pageq_first(coldq);
    923        1.2      yamt 			if (pg == NULL) {
    924  1.15.16.1      matt 				clockpro_newqflushone(gs);
    925        1.2      yamt 				pg = pageq_first(coldq);
    926        1.2      yamt 				if (pg == NULL) {
    927        1.2      yamt 					WARN("hhot: no page?\n");
    928        1.2      yamt 					return;
    929        1.2      yamt 				}
    930        1.2      yamt 			}
    931        1.2      yamt 			KASSERT(clockpro_pagequeue(pg) == coldq);
    932  1.15.16.1      matt 			pageq_remove(gs, coldq, pg);
    933        1.2      yamt 			check_sanity();
    934        1.2      yamt 			if ((pg->pqflags & PQ_HOT) == 0) {
    935  1.15.16.1      matt 				handhot_endtest(gs, pg);
    936  1.15.16.1      matt 				clockpro_insert_tail(gs, CLOCKPRO_LISTQ, pg);
    937        1.2      yamt 			} else {
    938  1.15.16.1      matt 				clockpro_insert_head(gs, CLOCKPRO_HOTQ(gs), pg);
    939        1.2      yamt 				break;
    940        1.2      yamt 			}
    941        1.2      yamt 		}
    942        1.2      yamt #else /* defined(LISTQ) */
    943  1.15.16.1      matt 		clockpro_newqflush(gs, 0); /* XXX XXX */
    944  1.15.16.1      matt 		clockpro_switchqueue(gs);
    945  1.15.16.1      matt 		hotq = clockpro_queue(gs, CLOCKPRO_HOTQ(gs));
    946        1.2      yamt 		goto again;
    947        1.2      yamt #endif /* defined(LISTQ) */
    948        1.2      yamt 	}
    949        1.2      yamt 
    950        1.2      yamt 	KASSERT(clockpro_pagequeue(pg) == hotq);
    951        1.2      yamt 
    952        1.2      yamt 	/*
    953        1.2      yamt 	 * terminate test period of nonresident pages by cycling them.
    954        1.2      yamt 	 */
    955        1.2      yamt 
    956        1.2      yamt 	cycle_target_frac += BUCKETSIZE;
    957        1.2      yamt 	hotqlen = pageq_len(hotq);
    958        1.2      yamt 	while (cycle_target_frac >= hotqlen) {
    959        1.2      yamt 		cycle_target++;
    960        1.2      yamt 		cycle_target_frac -= hotqlen;
    961        1.2      yamt 	}
    962        1.2      yamt 
    963        1.2      yamt 	if ((pg->pqflags & PQ_HOT) == 0) {
    964        1.2      yamt #if defined(LISTQ)
    965        1.2      yamt 		panic("cold page in hotq: %p", pg);
    966        1.2      yamt #else /* defined(LISTQ) */
    967  1.15.16.1      matt 		handhot_endtest(gs, pg);
    968        1.2      yamt 		goto next;
    969        1.2      yamt #endif /* defined(LISTQ) */
    970        1.2      yamt 	}
    971        1.2      yamt 	KASSERT((pg->pqflags & PQ_TEST) == 0);
    972        1.2      yamt 	KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
    973        1.2      yamt 	KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
    974        1.2      yamt 
    975        1.2      yamt 	/*
    976        1.2      yamt 	 * once we met our target,
    977        1.2      yamt 	 * stop at a hot page so that no cold pages in test period
    978        1.2      yamt 	 * have larger recency than any hot pages.
    979        1.2      yamt 	 */
    980        1.2      yamt 
    981  1.15.16.1      matt 	if (gs->gs_ncold >= gs->gs_coldtarget) {
    982        1.2      yamt 		dump("hot done");
    983        1.2      yamt 		return;
    984        1.2      yamt 	}
    985        1.2      yamt 	clockpro_movereferencebit(pg);
    986        1.2      yamt 	if ((pg->pqflags & PQ_REFERENCED) == 0) {
    987  1.15.16.1      matt 		struct uvm_pggroup *grp = gs->gs_pgrp;
    988        1.2      yamt 		PDPOL_EVCNT_INCR(hhotunref);
    989  1.15.16.1      matt 		grp->pgrp_pddeact++;
    990        1.2      yamt 		pg->pqflags &= ~PQ_HOT;
    991  1.15.16.1      matt 		gs->gs_ncold++;
    992  1.15.16.1      matt 		grp->pgrp_inactive = gs->gs_ncold;
    993  1.15.16.1      matt 		grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
    994  1.15.16.1      matt 		KASSERT(gs->gs_ncold <= gs->gs_npages);
    995        1.2      yamt 	} else {
    996        1.2      yamt 		PDPOL_EVCNT_INCR(hhotref);
    997        1.2      yamt 	}
    998        1.2      yamt 	pg->pqflags &= ~PQ_REFERENCED;
    999        1.2      yamt #if !defined(LISTQ)
   1000        1.2      yamt next:
   1001        1.2      yamt #endif /* !defined(LISTQ) */
   1002        1.2      yamt 	clockpro_pagerequeue(pg);
   1003        1.2      yamt 	dump("hot");
   1004        1.2      yamt 	goto again;
   1005        1.2      yamt }
   1006        1.2      yamt 
   1007        1.2      yamt static struct vm_page *
   1008  1.15.16.1      matt handcold_advance(struct uvmpdpol_groupstate * const gs)
   1009        1.2      yamt {
   1010  1.15.16.1      matt 	struct uvm_pggroup * const grp = gs->gs_pgrp;
   1011        1.2      yamt 	struct vm_page *pg;
   1012        1.2      yamt 
   1013        1.2      yamt 	for (;;) {
   1014        1.3      yamt #if defined(LISTQ)
   1015  1.15.16.1      matt 		pageq_t *listq = clockpro_queue(gs, CLOCKPRO_LISTQ);
   1016        1.3      yamt #endif /* defined(LISTQ) */
   1017        1.2      yamt 		pageq_t *coldq;
   1018        1.2      yamt 
   1019  1.15.16.1      matt 		clockpro_newqrotate(gs);
   1020  1.15.16.1      matt 		handhot_advance(gs);
   1021        1.2      yamt #if defined(LISTQ)
   1022        1.2      yamt 		pg = pageq_first(listq);
   1023        1.2      yamt 		if (pg != NULL) {
   1024        1.2      yamt 			KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
   1025        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1026        1.2      yamt 			KASSERT((pg->pqflags & PQ_HOT) == 0);
   1027        1.2      yamt 			KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
   1028  1.15.16.1      matt 			pageq_remove(gs, listq, pg);
   1029        1.2      yamt 			check_sanity();
   1030  1.15.16.1      matt 			clockpro_insert_head(gs, CLOCKPRO_COLDQ(gs), pg); /* XXX */
   1031        1.2      yamt 			goto gotcold;
   1032        1.2      yamt 		}
   1033        1.2      yamt #endif /* defined(LISTQ) */
   1034        1.2      yamt 		check_sanity();
   1035  1.15.16.1      matt 		coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
   1036        1.2      yamt 		pg = pageq_first(coldq);
   1037        1.2      yamt 		if (pg == NULL) {
   1038  1.15.16.1      matt 			clockpro_newqflushone(gs);
   1039        1.2      yamt 			pg = pageq_first(coldq);
   1040        1.2      yamt 		}
   1041        1.2      yamt 		if (pg == NULL) {
   1042        1.2      yamt 			DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
   1043        1.2      yamt 			dump("hcoldtakeover");
   1044        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldtakeover);
   1045        1.2      yamt 			KASSERT(
   1046  1.15.16.1      matt 			    pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)) == 0);
   1047        1.2      yamt #if defined(LISTQ)
   1048        1.2      yamt 			KASSERT(
   1049  1.15.16.1      matt 			    pageq_len(clockpro_queue(gs, CLOCKPRO_HOTQ(gs))) == 0);
   1050        1.2      yamt #else /* defined(LISTQ) */
   1051  1.15.16.1      matt 			clockpro_switchqueue(gs);
   1052  1.15.16.1      matt 			coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
   1053        1.2      yamt 			pg = pageq_first(coldq);
   1054        1.2      yamt #endif /* defined(LISTQ) */
   1055        1.2      yamt 		}
   1056        1.2      yamt 		if (pg == NULL) {
   1057        1.2      yamt 			WARN("hcold: no page?\n");
   1058        1.2      yamt 			return NULL;
   1059        1.2      yamt 		}
   1060        1.2      yamt 		KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
   1061        1.2      yamt 		if ((pg->pqflags & PQ_HOT) != 0) {
   1062        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldhot);
   1063  1.15.16.1      matt 			pageq_remove(gs, coldq, pg);
   1064  1.15.16.1      matt 			clockpro_insert_tail(gs, CLOCKPRO_HOTQ(gs), pg);
   1065        1.2      yamt 			check_sanity();
   1066        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1067  1.15.16.1      matt 			grp->pgrp_pdscans++;
   1068        1.2      yamt 			continue;
   1069        1.2      yamt 		}
   1070        1.2      yamt #if defined(LISTQ)
   1071        1.2      yamt gotcold:
   1072        1.2      yamt #endif /* defined(LISTQ) */
   1073        1.2      yamt 		KASSERT((pg->pqflags & PQ_HOT) == 0);
   1074  1.15.16.1      matt 		grp->pgrp_pdscans++;
   1075        1.2      yamt 		clockpro_movereferencebit(pg);
   1076        1.2      yamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
   1077        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1078        1.2      yamt 			if ((pg->pqflags & PQ_REFERENCED) != 0) {
   1079        1.2      yamt 				PDPOL_EVCNT_INCR(speculativehit2);
   1080        1.2      yamt 				pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
   1081        1.2      yamt 				clockpro_pagedequeue(pg);
   1082        1.2      yamt 				clockpro_pageenqueue(pg);
   1083        1.2      yamt 				continue;
   1084        1.2      yamt 			}
   1085        1.2      yamt 			PDPOL_EVCNT_INCR(speculativemiss);
   1086        1.2      yamt 		}
   1087        1.2      yamt 		switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
   1088        1.2      yamt 		case PQ_TEST:
   1089        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldunreftest);
   1090  1.15.16.1      matt 			nonresident_pagerecord(gs, pg);
   1091        1.2      yamt 			goto gotit;
   1092        1.2      yamt 		case 0:
   1093        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldunref);
   1094        1.2      yamt gotit:
   1095  1.15.16.1      matt 			KASSERT(gs->gs_ncold > 0);
   1096        1.2      yamt 			clockpro_pagerequeue(pg); /* XXX */
   1097        1.2      yamt 			dump("cold done");
   1098        1.2      yamt 			/* XXX "pg" is still in queue */
   1099  1.15.16.1      matt 			handhot_advance(gs);
   1100        1.2      yamt 			goto done;
   1101        1.2      yamt 
   1102        1.2      yamt 		case PQ_REFERENCED|PQ_TEST:
   1103        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldreftest);
   1104  1.15.16.1      matt 			gs->gs_ncold--;
   1105  1.15.16.1      matt 			grp->pgrp_inactive = gs->gs_ncold;
   1106  1.15.16.1      matt 			grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
   1107  1.15.16.1      matt 			COLDTARGET_ADJ(gs, 1);
   1108        1.2      yamt 			pg->pqflags |= PQ_HOT;
   1109        1.2      yamt 			pg->pqflags &= ~PQ_TEST;
   1110        1.2      yamt 			break;
   1111        1.2      yamt 
   1112        1.2      yamt 		case PQ_REFERENCED:
   1113        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldref);
   1114        1.2      yamt 			pg->pqflags |= PQ_TEST;
   1115        1.2      yamt 			break;
   1116        1.2      yamt 		}
   1117        1.2      yamt 		pg->pqflags &= ~PQ_REFERENCED;
   1118  1.15.16.1      matt 		grp->pgrp_pdreact++;
   1119        1.2      yamt 		/* move to the list head */
   1120        1.2      yamt 		clockpro_pagerequeue(pg);
   1121        1.2      yamt 		dump("cold");
   1122        1.2      yamt 	}
   1123        1.2      yamt done:;
   1124        1.2      yamt 	return pg;
   1125        1.2      yamt }
   1126        1.2      yamt 
   1127        1.2      yamt void
   1128        1.2      yamt uvmpdpol_pageactivate(struct vm_page *pg)
   1129        1.2      yamt {
   1130        1.2      yamt 
   1131        1.2      yamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1132        1.2      yamt 		KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
   1133        1.2      yamt 		pg->pqflags |= PQ_INITIALREF;
   1134        1.2      yamt 		clockpro_pageenqueue(pg);
   1135        1.2      yamt 	} else if ((pg->pqflags & PQ_SPECULATIVE)) {
   1136        1.2      yamt 		PDPOL_EVCNT_INCR(speculativehit1);
   1137        1.2      yamt 		pg->pqflags &= ~PQ_SPECULATIVE;
   1138        1.2      yamt 		pg->pqflags |= PQ_INITIALREF;
   1139        1.2      yamt 		clockpro_pagedequeue(pg);
   1140        1.2      yamt 		clockpro_pageenqueue(pg);
   1141        1.2      yamt 	}
   1142        1.2      yamt 	pg->pqflags |= PQ_REFERENCED;
   1143        1.2      yamt }
   1144        1.2      yamt 
   1145        1.2      yamt void
   1146        1.2      yamt uvmpdpol_pagedeactivate(struct vm_page *pg)
   1147        1.2      yamt {
   1148        1.2      yamt 
   1149       1.12      yamt 	clockpro_clearreferencebit(pg);
   1150        1.2      yamt }
   1151        1.2      yamt 
   1152        1.2      yamt void
   1153        1.2      yamt uvmpdpol_pagedequeue(struct vm_page *pg)
   1154        1.2      yamt {
   1155        1.2      yamt 
   1156        1.2      yamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1157        1.2      yamt 		return;
   1158        1.2      yamt 	}
   1159        1.2      yamt 	clockpro_pagedequeue(pg);
   1160        1.6      yamt 	pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
   1161        1.2      yamt }
   1162        1.2      yamt 
   1163        1.2      yamt void
   1164        1.2      yamt uvmpdpol_pageenqueue(struct vm_page *pg)
   1165        1.2      yamt {
   1166        1.2      yamt 
   1167        1.2      yamt #if 1
   1168        1.2      yamt 	if (uvmpdpol_pageisqueued_p(pg)) {
   1169        1.2      yamt 		return;
   1170        1.2      yamt 	}
   1171        1.2      yamt 	clockpro_clearreferencebit(pg);
   1172        1.2      yamt 	pg->pqflags |= PQ_SPECULATIVE;
   1173        1.2      yamt 	clockpro_pageenqueue(pg);
   1174        1.2      yamt #else
   1175        1.2      yamt 	uvmpdpol_pageactivate(pg);
   1176        1.2      yamt #endif
   1177        1.2      yamt }
   1178        1.2      yamt 
   1179        1.2      yamt void
   1180        1.2      yamt uvmpdpol_anfree(struct vm_anon *an)
   1181        1.2      yamt {
   1182        1.2      yamt 
   1183        1.2      yamt 	KASSERT(an->an_page == NULL);
   1184  1.15.16.1      matt 	if (nonresident_lookupremove(NULL, (objid_t)an, 0)) {
   1185        1.2      yamt 		PDPOL_EVCNT_INCR(nresanonfree);
   1186        1.2      yamt 	}
   1187        1.2      yamt }
   1188        1.2      yamt 
   1189        1.2      yamt void
   1190  1.15.16.1      matt uvmpdpol_init(void *new_gs, size_t npggroup)
   1191        1.2      yamt {
   1192        1.2      yamt 
   1193  1.15.16.1      matt 	clockpro_init(new_gs, npggroup);
   1194        1.2      yamt }
   1195        1.2      yamt 
   1196        1.2      yamt void
   1197        1.2      yamt uvmpdpol_reinit(void)
   1198        1.2      yamt {
   1199        1.2      yamt 
   1200        1.2      yamt 	clockpro_reinit();
   1201        1.2      yamt }
   1202        1.2      yamt 
   1203  1.15.16.1      matt size_t
   1204  1.15.16.1      matt uvmpdpol_space(void)
   1205  1.15.16.1      matt {
   1206  1.15.16.1      matt 
   1207  1.15.16.1      matt 	return sizeof(struct uvmpdpol_groupstate);
   1208  1.15.16.1      matt }
   1209  1.15.16.1      matt 
   1210        1.2      yamt void
   1211  1.15.16.1      matt uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
   1212  1.15.16.1      matt 	size_t npggroup, size_t old_ncolors)
   1213        1.2      yamt {
   1214        1.2      yamt 
   1215  1.15.16.1      matt 	clockpro_recolor(new_gs, grparray, npggroup, old_ncolors);
   1216  1.15.16.1      matt }
   1217  1.15.16.1      matt 
   1218  1.15.16.1      matt void
   1219  1.15.16.1      matt uvmpdpol_estimatepageable(u_int *activep, u_int *inactivep)
   1220  1.15.16.1      matt {
   1221  1.15.16.1      matt 	u_int active = 0;
   1222  1.15.16.1      matt 	u_int inactive = 0;
   1223  1.15.16.1      matt 
   1224  1.15.16.1      matt 	struct uvm_pggroup *grp;
   1225  1.15.16.1      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
   1226  1.15.16.1      matt 		struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
   1227  1.15.16.1      matt 		active += gs->gs_npages - gs->gs_ncold;
   1228  1.15.16.1      matt 		inactive += gs->gs_ncold;
   1229  1.15.16.1      matt 	}
   1230  1.15.16.1      matt 	if (activep) {
   1231  1.15.16.1      matt 		*activep = active;
   1232        1.2      yamt 	}
   1233        1.2      yamt 	if (inactive) {
   1234  1.15.16.1      matt 		*inactivep = inactive;
   1235        1.2      yamt 	}
   1236        1.2      yamt }
   1237        1.2      yamt 
   1238        1.7   thorpej bool
   1239        1.2      yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
   1240        1.2      yamt {
   1241        1.2      yamt 
   1242        1.2      yamt 	return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
   1243        1.2      yamt }
   1244        1.2      yamt 
   1245        1.2      yamt void
   1246  1.15.16.1      matt uvmpdpol_scaninit(struct uvm_pggroup *grp)
   1247        1.2      yamt {
   1248        1.2      yamt 
   1249  1.15.16.1      matt 	grp->pgrp_gs->gs_nscanned = 0;
   1250        1.2      yamt }
   1251        1.2      yamt 
   1252        1.2      yamt struct vm_page *
   1253  1.15.16.1      matt uvmpdpol_selectvictim(struct uvm_pggroup *grp)
   1254        1.2      yamt {
   1255  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
   1256        1.2      yamt 	struct vm_page *pg;
   1257        1.2      yamt 
   1258  1.15.16.1      matt 	if (gs->gs_nscanned > gs->gs_npages) {
   1259        1.2      yamt 		DPRINTF("scan too much\n");
   1260        1.2      yamt 		return NULL;
   1261        1.2      yamt 	}
   1262  1.15.16.1      matt 	pg = handcold_advance(gs);
   1263  1.15.16.1      matt 	gs->gs_nscanned++;
   1264        1.2      yamt 	return pg;
   1265        1.2      yamt }
   1266        1.2      yamt 
   1267        1.2      yamt static void
   1268        1.2      yamt clockpro_dropswap(pageq_t *q, int *todo)
   1269        1.2      yamt {
   1270        1.2      yamt 	struct vm_page *pg;
   1271        1.2      yamt 
   1272       1.15        ad 	TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq.queue) {
   1273        1.2      yamt 		if (*todo <= 0) {
   1274        1.2      yamt 			break;
   1275        1.2      yamt 		}
   1276        1.2      yamt 		if ((pg->pqflags & PQ_HOT) == 0) {
   1277        1.2      yamt 			continue;
   1278        1.2      yamt 		}
   1279        1.2      yamt 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
   1280        1.2      yamt 			continue;
   1281        1.2      yamt 		}
   1282        1.2      yamt 		if (uvmpd_trydropswap(pg)) {
   1283        1.2      yamt 			(*todo)--;
   1284        1.2      yamt 		}
   1285        1.2      yamt 	}
   1286        1.2      yamt }
   1287        1.2      yamt 
   1288        1.2      yamt void
   1289  1.15.16.1      matt uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
   1290        1.2      yamt {
   1291  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
   1292  1.15.16.1      matt 	u_int todo = swap_shortage;
   1293        1.2      yamt 
   1294        1.2      yamt 	if (todo == 0) {
   1295        1.2      yamt 		return;
   1296        1.2      yamt 	}
   1297        1.2      yamt 
   1298        1.2      yamt 	/*
   1299        1.2      yamt 	 * reclaim swap slots from hot pages
   1300        1.2      yamt 	 */
   1301        1.2      yamt 
   1302  1.15.16.1      matt 	DPRINTF("%s: [%zd] swap_shortage=%u\n",
   1303  1.15.16.1      matt 	    __func__, grp - uvm.pggroups, swap_shortage);
   1304        1.2      yamt 
   1305  1.15.16.1      matt 	clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_NEWQ), &todo);
   1306  1.15.16.1      matt 	clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_COLDQ(gs)), &todo);
   1307  1.15.16.1      matt 	clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_HOTQ(gs)), &todo);
   1308        1.2      yamt 
   1309  1.15.16.1      matt 	DPRINTF("%s: [%zd]: done=%u\n",
   1310  1.15.16.1      matt 	    __func__, grp - uvm.pggroups, swap_shortage - todo);
   1311        1.2      yamt }
   1312        1.2      yamt 
   1313        1.7   thorpej bool
   1314  1.15.16.1      matt uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
   1315        1.2      yamt {
   1316  1.15.16.1      matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
   1317        1.2      yamt 
   1318  1.15.16.1      matt 	return (gs->gs_ncold < gs->gs_coldtarget);
   1319        1.2      yamt }
   1320        1.2      yamt 
   1321        1.2      yamt void
   1322  1.15.16.1      matt uvmpdpol_tune(struct uvm_pggroup *grp)
   1323        1.2      yamt {
   1324        1.2      yamt 
   1325  1.15.16.1      matt 	clockpro_tune(grp->pgrp_gs);
   1326        1.2      yamt }
   1327        1.2      yamt 
   1328        1.2      yamt #if !defined(PDSIM)
   1329        1.2      yamt 
   1330        1.2      yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
   1331        1.2      yamt 
   1332        1.2      yamt void
   1333        1.2      yamt uvmpdpol_sysctlsetup(void)
   1334        1.2      yamt {
   1335        1.2      yamt #if !defined(ADAPTIVE)
   1336        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1337        1.2      yamt 
   1338        1.2      yamt 	uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
   1339        1.2      yamt 	    SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
   1340        1.2      yamt #endif /* !defined(ADAPTIVE) */
   1341        1.2      yamt }
   1342        1.2      yamt 
   1343        1.2      yamt #endif /* !defined(PDSIM) */
   1344        1.2      yamt 
   1345        1.2      yamt #if defined(DDB)
   1346        1.2      yamt 
   1347        1.2      yamt void clockpro_dump(void);
   1348        1.2      yamt 
   1349        1.2      yamt void
   1350        1.2      yamt clockpro_dump(void)
   1351        1.2      yamt {
   1352  1.15.16.1      matt 	struct uvm_pggroup *grp;
   1353  1.15.16.1      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
   1354  1.15.16.1      matt 		struct uvmpdpol_groupstate *gs = grp->pgrp_gs;
   1355  1.15.16.1      matt 		struct vm_page *pg;
   1356  1.15.16.1      matt 		int ncold, nhot, ntest, nspeculative, ninitialref, nref;
   1357  1.15.16.1      matt 		int newqlen, coldqlen, hotqlen, listqlen;
   1358  1.15.16.1      matt 
   1359  1.15.16.1      matt 		newqlen = coldqlen = hotqlen = listqlen = 0;
   1360  1.15.16.1      matt 		printf(" [%zd]: npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
   1361  1.15.16.1      matt 		    grp - uvm.pggroups, gs->gs_npages, gs->gs_ncold,
   1362  1.15.16.1      matt 		    gs->gs_coldtarget, gs->gs_newqlenmax);
   1363        1.2      yamt 
   1364        1.2      yamt #define	INITCOUNT()	\
   1365        1.2      yamt 	ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
   1366        1.2      yamt 
   1367        1.2      yamt #define	COUNT(pg)	\
   1368        1.2      yamt 	if ((pg->pqflags & PQ_HOT) != 0) { \
   1369        1.2      yamt 		nhot++; \
   1370        1.2      yamt 	} else { \
   1371        1.2      yamt 		ncold++; \
   1372        1.2      yamt 		if ((pg->pqflags & PQ_TEST) != 0) { \
   1373        1.2      yamt 			ntest++; \
   1374        1.2      yamt 		} \
   1375        1.2      yamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
   1376        1.2      yamt 			nspeculative++; \
   1377        1.2      yamt 		} \
   1378        1.2      yamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) { \
   1379        1.2      yamt 			ninitialref++; \
   1380        1.2      yamt 		} else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
   1381        1.2      yamt 		    pmap_is_referenced(pg)) { \
   1382        1.2      yamt 			nref++; \
   1383        1.2      yamt 		} \
   1384        1.2      yamt 	}
   1385        1.2      yamt 
   1386        1.2      yamt #define	PRINTCOUNT(name)	\
   1387  1.15.16.1      matt 	printf("%s#%zd hot=%d, cold=%d, test=%d, speculative=%d, " \
   1388  1.15.16.1      matt 	    "initialref=%d, nref=%d\n", \
   1389  1.15.16.1      matt 	    (name), grp - uvm.pggroups, nhot, ncold, ntest, nspeculative, ninitialref, nref)
   1390  1.15.16.1      matt 
   1391  1.15.16.1      matt 		INITCOUNT();
   1392  1.15.16.1      matt 		TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_NEWQ)->q_q, pageq.queue) {
   1393  1.15.16.1      matt 			if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
   1394  1.15.16.1      matt 				printf("newq corrupt %p\n", pg);
   1395  1.15.16.1      matt 			}
   1396  1.15.16.1      matt 			COUNT(pg)
   1397  1.15.16.1      matt 			newqlen++;
   1398        1.2      yamt 		}
   1399  1.15.16.1      matt 		PRINTCOUNT("newq");
   1400  1.15.16.1      matt 
   1401  1.15.16.1      matt 		INITCOUNT();
   1402  1.15.16.1      matt 		TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_COLDQ(gs))->q_q, pageq.queue) {
   1403  1.15.16.1      matt 			if (clockpro_getq(pg) != CLOCKPRO_COLDQ(gs)) {
   1404  1.15.16.1      matt 				printf("coldq corrupt %p\n", pg);
   1405  1.15.16.1      matt 			}
   1406  1.15.16.1      matt 			COUNT(pg)
   1407  1.15.16.1      matt 			coldqlen++;
   1408        1.2      yamt 		}
   1409  1.15.16.1      matt 		PRINTCOUNT("coldq");
   1410  1.15.16.1      matt 
   1411  1.15.16.1      matt 		INITCOUNT();
   1412  1.15.16.1      matt 		TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_HOTQ(gs))->q_q, pageq.queue) {
   1413  1.15.16.1      matt 			if (clockpro_getq(pg) != CLOCKPRO_HOTQ(gs)) {
   1414  1.15.16.1      matt 				printf("hotq corrupt %p\n", pg);
   1415  1.15.16.1      matt 			}
   1416  1.15.16.1      matt #if defined(LISTQ)
   1417  1.15.16.1      matt 			if ((pg->pqflags & PQ_HOT) == 0) {
   1418  1.15.16.1      matt 				printf("cold page in hotq: %p\n", pg);
   1419  1.15.16.1      matt 			}
   1420        1.2      yamt #endif /* defined(LISTQ) */
   1421  1.15.16.1      matt 			COUNT(pg)
   1422  1.15.16.1      matt 			hotqlen++;
   1423  1.15.16.1      matt 		}
   1424  1.15.16.1      matt 		PRINTCOUNT("hotq");
   1425        1.2      yamt 
   1426  1.15.16.1      matt 		INITCOUNT();
   1427  1.15.16.1      matt 		TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_LISTQ)->q_q, pageq.queue) {
   1428        1.2      yamt #if !defined(LISTQ)
   1429  1.15.16.1      matt 			printf("listq %p\n", pg);
   1430        1.2      yamt #endif /* !defined(LISTQ) */
   1431  1.15.16.1      matt 			if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
   1432  1.15.16.1      matt 				printf("listq corrupt %p\n", pg);
   1433  1.15.16.1      matt 			}
   1434  1.15.16.1      matt 			COUNT(pg)
   1435  1.15.16.1      matt 			listqlen++;
   1436        1.2      yamt 		}
   1437  1.15.16.1      matt 		PRINTCOUNT("listq");
   1438        1.2      yamt 
   1439  1.15.16.1      matt 		printf("#%zd: newqlen=%u/%u, coldqlen=%u/%u, hotqlen=%u/%u, listqlen=%d/%d\n",
   1440  1.15.16.1      matt 		    grp - uvm.pggroups,
   1441  1.15.16.1      matt 		    newqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)),
   1442  1.15.16.1      matt 		    coldqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_COLDQ(gs))),
   1443  1.15.16.1      matt 		    hotqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_HOTQ(gs))),
   1444  1.15.16.1      matt 		    listqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_LISTQ)));
   1445  1.15.16.1      matt 	}
   1446        1.2      yamt }
   1447        1.2      yamt #endif /* defined(DDB) */
   1448        1.2      yamt 
   1449        1.2      yamt #if defined(PDSIM)
   1450        1.3      yamt #if defined(DEBUG)
   1451        1.2      yamt static void
   1452  1.15.16.1      matt pdsim_dumpq(struct uvmpdpol_groupstate *gs, int qidx)
   1453        1.2      yamt {
   1454  1.15.16.1      matt 	pageq_t *q = clockpro_queue(gs, qidx);
   1455        1.2      yamt 	struct vm_page *pg;
   1456        1.2      yamt 
   1457       1.15        ad 	TAILQ_FOREACH(pg, &q->q_q, pageq.queue) {
   1458        1.2      yamt 		DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
   1459        1.2      yamt 		    pg->offset >> PAGE_SHIFT,
   1460        1.2      yamt 		    (pg->pqflags & PQ_HOT) ? "H" : "",
   1461        1.2      yamt 		    (pg->pqflags & PQ_TEST) ? "T" : "",
   1462        1.2      yamt 		    (pg->pqflags & PQ_REFERENCED) ? "R" : "",
   1463        1.2      yamt 		    pmap_is_referenced(pg) ? "r" : "",
   1464        1.2      yamt 		    (pg->pqflags & PQ_INITIALREF) ? "I" : "",
   1465        1.2      yamt 		    (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
   1466        1.2      yamt 		    );
   1467        1.2      yamt 	}
   1468        1.2      yamt }
   1469        1.3      yamt #endif /* defined(DEBUG) */
   1470        1.2      yamt 
   1471        1.2      yamt void
   1472        1.2      yamt pdsim_dump(const char *id)
   1473        1.2      yamt {
   1474        1.2      yamt #if defined(DEBUG)
   1475        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1476        1.2      yamt 
   1477        1.2      yamt 	DPRINTF("  %s L(", id);
   1478  1.15.16.1      matt 	pdsim_dumpq(gs, CLOCKPRO_LISTQ);
   1479        1.2      yamt 	DPRINTF(" ) H(");
   1480  1.15.16.1      matt 	pdsim_dumpq(gs, CLOCKPRO_HOTQ(gs));
   1481        1.2      yamt 	DPRINTF(" ) C(");
   1482  1.15.16.1      matt 	pdsim_dumpq(gs, CLOCKPRO_COLDQ(gs));
   1483        1.2      yamt 	DPRINTF(" ) N(");
   1484  1.15.16.1      matt 	pdsim_dumpq(gs, CLOCKPRO_NEWQ);
   1485        1.2      yamt 	DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
   1486  1.15.16.1      matt 	    gs->gs_ncold, gs->gs_coldtarget, gs->gs_coldadj);
   1487        1.2      yamt #endif /* defined(DEBUG) */
   1488        1.2      yamt }
   1489        1.2      yamt #endif /* defined(PDSIM) */
   1490