Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clockpro.c revision 1.17.54.1
      1  1.17.54.1    martin /*	$NetBSD: uvm_pdpolicy_clockpro.c,v 1.17.54.1 2020/04/08 14:09:04 martin Exp $	*/
      2        1.2      yamt 
      3        1.2      yamt /*-
      4        1.2      yamt  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
      5        1.2      yamt  * All rights reserved.
      6        1.2      yamt  *
      7        1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8        1.2      yamt  * modification, are permitted provided that the following conditions
      9        1.2      yamt  * are met:
     10        1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11        1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12        1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14        1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15        1.2      yamt  *
     16        1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17        1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18        1.2      yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19        1.2      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20        1.2      yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21        1.2      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22        1.2      yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23        1.2      yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24        1.2      yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25        1.2      yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26        1.2      yamt  * SUCH DAMAGE.
     27        1.2      yamt  */
     28        1.2      yamt 
     29        1.2      yamt /*
     30        1.2      yamt  * CLOCK-Pro replacement policy:
     31        1.2      yamt  *	http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
     32        1.2      yamt  *
     33        1.2      yamt  * approximation of the list of non-resident pages using hash:
     34        1.2      yamt  *	http://linux-mm.org/ClockProApproximation
     35        1.2      yamt  */
     36        1.2      yamt 
     37        1.2      yamt /* #define	CLOCKPRO_DEBUG */
     38        1.2      yamt 
     39        1.2      yamt #if defined(PDSIM)
     40        1.2      yamt 
     41        1.2      yamt #include "pdsim.h"
     42        1.2      yamt 
     43        1.2      yamt #else /* defined(PDSIM) */
     44        1.2      yamt 
     45        1.2      yamt #include <sys/cdefs.h>
     46  1.17.54.1    martin __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.17.54.1 2020/04/08 14:09:04 martin Exp $");
     47        1.2      yamt 
     48        1.2      yamt #include "opt_ddb.h"
     49        1.2      yamt 
     50        1.2      yamt #include <sys/param.h>
     51        1.2      yamt #include <sys/proc.h>
     52        1.2      yamt #include <sys/systm.h>
     53        1.2      yamt #include <sys/kernel.h>
     54        1.2      yamt #include <sys/hash.h>
     55        1.2      yamt 
     56        1.2      yamt #include <uvm/uvm.h>
     57       1.17      yamt #include <uvm/uvm_pdaemon.h>	/* for uvmpd_trylockowner */
     58        1.2      yamt #include <uvm/uvm_pdpolicy.h>
     59        1.2      yamt #include <uvm/uvm_pdpolicy_impl.h>
     60        1.2      yamt 
     61        1.2      yamt #if ((__STDC_VERSION__ - 0) >= 199901L)
     62        1.2      yamt #define	DPRINTF(...)	/* nothing */
     63        1.2      yamt #define	WARN(...)	printf(__VA_ARGS__)
     64        1.2      yamt #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
     65        1.2      yamt #define	DPRINTF(a...)	/* nothing */	/* GCC */
     66        1.2      yamt #define	WARN(a...)	printf(a)
     67        1.2      yamt #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
     68        1.2      yamt 
     69        1.2      yamt #define	dump(a)		/* nothing */
     70        1.2      yamt 
     71        1.2      yamt #undef	USEONCE2
     72        1.2      yamt #define	LISTQ
     73        1.2      yamt #undef	ADAPTIVE
     74        1.2      yamt 
     75        1.2      yamt #endif /* defined(PDSIM) */
     76        1.2      yamt 
     77        1.2      yamt #if !defined(CLOCKPRO_COLDPCT)
     78        1.2      yamt #define	CLOCKPRO_COLDPCT	10
     79        1.2      yamt #endif /* !defined(CLOCKPRO_COLDPCT) */
     80        1.2      yamt 
     81        1.2      yamt #define	CLOCKPRO_COLDPCTMAX	90
     82        1.2      yamt 
     83        1.2      yamt #if !defined(CLOCKPRO_HASHFACTOR)
     84        1.2      yamt #define	CLOCKPRO_HASHFACTOR	2
     85        1.2      yamt #endif /* !defined(CLOCKPRO_HASHFACTOR) */
     86        1.2      yamt 
     87        1.2      yamt #define	CLOCKPRO_NEWQMIN	((1024 * 1024) >> PAGE_SHIFT)	/* XXX */
     88        1.2      yamt 
     89        1.2      yamt int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
     90        1.2      yamt 
     91        1.2      yamt PDPOL_EVCNT_DEFINE(nresrecordobj)
     92        1.2      yamt PDPOL_EVCNT_DEFINE(nresrecordanon)
     93        1.9      yamt PDPOL_EVCNT_DEFINE(nreslookupobj)
     94        1.9      yamt PDPOL_EVCNT_DEFINE(nreslookupanon)
     95        1.2      yamt PDPOL_EVCNT_DEFINE(nresfoundobj)
     96        1.2      yamt PDPOL_EVCNT_DEFINE(nresfoundanon)
     97        1.2      yamt PDPOL_EVCNT_DEFINE(nresanonfree)
     98        1.2      yamt PDPOL_EVCNT_DEFINE(nresconflict)
     99        1.2      yamt PDPOL_EVCNT_DEFINE(nresoverwritten)
    100        1.2      yamt PDPOL_EVCNT_DEFINE(nreshandhot)
    101        1.2      yamt 
    102        1.2      yamt PDPOL_EVCNT_DEFINE(hhottakeover)
    103        1.2      yamt PDPOL_EVCNT_DEFINE(hhotref)
    104        1.2      yamt PDPOL_EVCNT_DEFINE(hhotunref)
    105        1.2      yamt PDPOL_EVCNT_DEFINE(hhotcold)
    106        1.2      yamt PDPOL_EVCNT_DEFINE(hhotcoldtest)
    107        1.2      yamt 
    108        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldtakeover)
    109        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldref)
    110        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunref)
    111        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldreftest)
    112        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunreftest)
    113        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
    114        1.2      yamt PDPOL_EVCNT_DEFINE(hcoldhot)
    115        1.2      yamt 
    116        1.2      yamt PDPOL_EVCNT_DEFINE(speculativeenqueue)
    117        1.2      yamt PDPOL_EVCNT_DEFINE(speculativehit1)
    118        1.2      yamt PDPOL_EVCNT_DEFINE(speculativehit2)
    119        1.2      yamt PDPOL_EVCNT_DEFINE(speculativemiss)
    120        1.2      yamt 
    121       1.17      yamt PDPOL_EVCNT_DEFINE(locksuccess)
    122       1.17      yamt PDPOL_EVCNT_DEFINE(lockfail)
    123       1.17      yamt 
    124  1.17.54.1    martin #define	PQ_REFERENCED	0x000000010
    125  1.17.54.1    martin #define	PQ_HOT		0x000000020
    126  1.17.54.1    martin #define	PQ_TEST		0x000000040
    127  1.17.54.1    martin #define	PQ_INITIALREF	0x000000080
    128  1.17.54.1    martin #define	PQ_QMASK	0x000000700
    129  1.17.54.1    martin #define	PQ_QFACTOR	0x000000100
    130  1.17.54.1    martin #define	PQ_SPECULATIVE	0x000000800
    131        1.2      yamt 
    132        1.2      yamt #define	CLOCKPRO_NOQUEUE	0
    133        1.2      yamt #define	CLOCKPRO_NEWQ		1	/* small queue to clear initial ref. */
    134        1.2      yamt #if defined(LISTQ)
    135        1.2      yamt #define	CLOCKPRO_COLDQ		2
    136        1.2      yamt #define	CLOCKPRO_HOTQ		3
    137        1.2      yamt #else /* defined(LISTQ) */
    138        1.2      yamt #define	CLOCKPRO_COLDQ		(2 + coldqidx)	/* XXX */
    139        1.2      yamt #define	CLOCKPRO_HOTQ		(3 - coldqidx)	/* XXX */
    140        1.2      yamt #endif /* defined(LISTQ) */
    141        1.2      yamt #define	CLOCKPRO_LISTQ		4
    142        1.2      yamt #define	CLOCKPRO_NQUEUE		4
    143        1.2      yamt 
    144  1.17.54.1    martin static bool	uvmpdpol_pagerealize_locked(struct vm_page *);
    145  1.17.54.1    martin 
    146        1.2      yamt static inline void
    147        1.2      yamt clockpro_setq(struct vm_page *pg, int qidx)
    148        1.2      yamt {
    149        1.2      yamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
    150        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    151        1.2      yamt 
    152        1.2      yamt 	pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
    153        1.2      yamt }
    154        1.2      yamt 
    155        1.2      yamt static inline int
    156        1.2      yamt clockpro_getq(struct vm_page *pg)
    157        1.2      yamt {
    158        1.2      yamt 	int qidx;
    159        1.2      yamt 
    160        1.2      yamt 	qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
    161        1.2      yamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
    162        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    163        1.2      yamt 	return qidx;
    164        1.2      yamt }
    165        1.2      yamt 
    166        1.2      yamt typedef struct {
    167        1.2      yamt 	struct pglist q_q;
    168        1.2      yamt 	int q_len;
    169        1.2      yamt } pageq_t;
    170        1.2      yamt 
    171        1.2      yamt struct clockpro_state {
    172  1.17.54.1    martin 	kmutex_t lock;
    173        1.2      yamt 	int s_npages;
    174        1.2      yamt 	int s_coldtarget;
    175        1.2      yamt 	int s_ncold;
    176        1.2      yamt 
    177        1.2      yamt 	int s_newqlenmax;
    178        1.2      yamt 	pageq_t s_q[CLOCKPRO_NQUEUE];
    179        1.2      yamt 
    180        1.2      yamt 	struct uvm_pctparam s_coldtargetpct;
    181        1.2      yamt };
    182        1.2      yamt 
    183        1.2      yamt static pageq_t *
    184        1.2      yamt clockpro_queue(struct clockpro_state *s, int qidx)
    185        1.2      yamt {
    186        1.2      yamt 
    187        1.2      yamt 	KASSERT(CLOCKPRO_NOQUEUE < qidx);
    188        1.2      yamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
    189        1.2      yamt 
    190        1.2      yamt 	return &s->s_q[qidx - 1];
    191        1.2      yamt }
    192        1.2      yamt 
    193        1.2      yamt #if !defined(LISTQ)
    194        1.2      yamt 
    195        1.2      yamt static int coldqidx;
    196        1.2      yamt 
    197        1.2      yamt static void
    198        1.2      yamt clockpro_switchqueue(void)
    199        1.2      yamt {
    200        1.2      yamt 
    201        1.2      yamt 	coldqidx = 1 - coldqidx;
    202        1.2      yamt }
    203        1.2      yamt 
    204        1.2      yamt #endif /* !defined(LISTQ) */
    205        1.2      yamt 
    206  1.17.54.1    martin static struct clockpro_state clockpro __cacheline_aligned;
    207        1.2      yamt static struct clockpro_scanstate {
    208        1.2      yamt 	int ss_nscanned;
    209        1.2      yamt } scanstate;
    210        1.2      yamt 
    211        1.2      yamt /* ---------------------------------------- */
    212        1.2      yamt 
    213        1.2      yamt static void
    214        1.2      yamt pageq_init(pageq_t *q)
    215        1.2      yamt {
    216        1.2      yamt 
    217        1.2      yamt 	TAILQ_INIT(&q->q_q);
    218        1.2      yamt 	q->q_len = 0;
    219        1.2      yamt }
    220        1.2      yamt 
    221        1.2      yamt static int
    222        1.2      yamt pageq_len(const pageq_t *q)
    223        1.2      yamt {
    224        1.2      yamt 
    225        1.2      yamt 	return q->q_len;
    226        1.2      yamt }
    227        1.2      yamt 
    228        1.2      yamt static struct vm_page *
    229        1.2      yamt pageq_first(const pageq_t *q)
    230        1.2      yamt {
    231        1.2      yamt 
    232        1.2      yamt 	return TAILQ_FIRST(&q->q_q);
    233        1.2      yamt }
    234        1.2      yamt 
    235        1.2      yamt static void
    236        1.2      yamt pageq_insert_tail(pageq_t *q, struct vm_page *pg)
    237        1.2      yamt {
    238        1.2      yamt 
    239  1.17.54.1    martin 	TAILQ_INSERT_TAIL(&q->q_q, pg, pdqueue);
    240        1.2      yamt 	q->q_len++;
    241        1.2      yamt }
    242        1.2      yamt 
    243       1.14       bjs #if defined(LISTQ)
    244        1.2      yamt static void
    245        1.2      yamt pageq_insert_head(pageq_t *q, struct vm_page *pg)
    246        1.2      yamt {
    247        1.2      yamt 
    248  1.17.54.1    martin 	TAILQ_INSERT_HEAD(&q->q_q, pg, pdqueue);
    249        1.2      yamt 	q->q_len++;
    250        1.2      yamt }
    251       1.14       bjs #endif
    252        1.2      yamt 
    253        1.2      yamt static void
    254        1.2      yamt pageq_remove(pageq_t *q, struct vm_page *pg)
    255        1.2      yamt {
    256        1.2      yamt 
    257        1.2      yamt #if 1
    258        1.2      yamt 	KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q);
    259        1.2      yamt #endif
    260        1.2      yamt 	KASSERT(q->q_len > 0);
    261  1.17.54.1    martin 	TAILQ_REMOVE(&q->q_q, pg, pdqueue);
    262        1.2      yamt 	q->q_len--;
    263        1.2      yamt }
    264        1.2      yamt 
    265        1.2      yamt static struct vm_page *
    266        1.2      yamt pageq_remove_head(pageq_t *q)
    267        1.2      yamt {
    268        1.2      yamt 	struct vm_page *pg;
    269        1.2      yamt 
    270        1.2      yamt 	pg = TAILQ_FIRST(&q->q_q);
    271        1.2      yamt 	if (pg == NULL) {
    272        1.2      yamt 		KASSERT(q->q_len == 0);
    273        1.2      yamt 		return NULL;
    274        1.2      yamt 	}
    275        1.2      yamt 	pageq_remove(q, pg);
    276        1.2      yamt 	return pg;
    277        1.2      yamt }
    278        1.2      yamt 
    279        1.2      yamt /* ---------------------------------------- */
    280        1.2      yamt 
    281        1.2      yamt static void
    282        1.2      yamt clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg)
    283        1.2      yamt {
    284        1.2      yamt 	pageq_t *q = clockpro_queue(s, qidx);
    285        1.2      yamt 
    286        1.2      yamt 	clockpro_setq(pg, qidx);
    287        1.2      yamt 	pageq_insert_tail(q, pg);
    288        1.2      yamt }
    289        1.2      yamt 
    290       1.14       bjs #if defined(LISTQ)
    291        1.5  christos static void
    292        1.2      yamt clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg)
    293        1.2      yamt {
    294        1.2      yamt 	pageq_t *q = clockpro_queue(s, qidx);
    295        1.2      yamt 
    296        1.2      yamt 	clockpro_setq(pg, qidx);
    297        1.2      yamt 	pageq_insert_head(q, pg);
    298        1.2      yamt }
    299        1.2      yamt 
    300       1.14       bjs #endif
    301        1.2      yamt /* ---------------------------------------- */
    302        1.2      yamt 
    303        1.2      yamt typedef uint32_t nonres_cookie_t;
    304        1.2      yamt #define	NONRES_COOKIE_INVAL	0
    305        1.2      yamt 
    306        1.2      yamt typedef uintptr_t objid_t;
    307        1.2      yamt 
    308        1.2      yamt /*
    309        1.2      yamt  * XXX maybe these hash functions need reconsideration,
    310        1.2      yamt  * given that hash distribution is critical here.
    311        1.2      yamt  */
    312        1.2      yamt 
    313        1.2      yamt static uint32_t
    314        1.2      yamt pageidentityhash1(objid_t obj, off_t idx)
    315        1.2      yamt {
    316        1.2      yamt 	uint32_t hash = HASH32_BUF_INIT;
    317        1.2      yamt 
    318        1.2      yamt #if 1
    319        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    320        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    321        1.2      yamt #else
    322        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    323        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    324        1.2      yamt #endif
    325        1.2      yamt 	return hash;
    326        1.2      yamt }
    327        1.2      yamt 
    328        1.2      yamt static uint32_t
    329        1.2      yamt pageidentityhash2(objid_t obj, off_t idx)
    330        1.2      yamt {
    331        1.2      yamt 	uint32_t hash = HASH32_BUF_INIT;
    332        1.2      yamt 
    333        1.2      yamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
    334        1.2      yamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
    335        1.2      yamt 	return hash;
    336        1.2      yamt }
    337        1.2      yamt 
    338        1.2      yamt static nonres_cookie_t
    339        1.2      yamt calccookie(objid_t obj, off_t idx)
    340        1.2      yamt {
    341        1.2      yamt 	uint32_t hash = pageidentityhash2(obj, idx);
    342        1.2      yamt 	nonres_cookie_t cookie = hash;
    343        1.2      yamt 
    344        1.2      yamt 	if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
    345        1.2      yamt 		cookie++; /* XXX */
    346        1.2      yamt 	}
    347        1.2      yamt 	return cookie;
    348        1.2      yamt }
    349        1.2      yamt 
    350        1.2      yamt #define	BUCKETSIZE	14
    351        1.2      yamt struct bucket {
    352        1.2      yamt 	int cycle;
    353        1.2      yamt 	int cur;
    354        1.2      yamt 	nonres_cookie_t pages[BUCKETSIZE];
    355        1.2      yamt };
    356        1.2      yamt static int cycle_target;
    357        1.2      yamt static int cycle_target_frac;
    358        1.2      yamt 
    359        1.2      yamt static struct bucket static_bucket;
    360        1.2      yamt static struct bucket *buckets = &static_bucket;
    361        1.2      yamt static size_t hashsize = 1;
    362        1.2      yamt 
    363        1.2      yamt static int coldadj;
    364        1.2      yamt #define	COLDTARGET_ADJ(d)	coldadj += (d)
    365        1.2      yamt 
    366        1.2      yamt #if defined(PDSIM)
    367        1.2      yamt 
    368        1.2      yamt static void *
    369        1.2      yamt clockpro_hashalloc(int n)
    370        1.2      yamt {
    371        1.2      yamt 	size_t allocsz = sizeof(*buckets) * n;
    372        1.2      yamt 
    373        1.2      yamt 	return malloc(allocsz);
    374        1.2      yamt }
    375        1.2      yamt 
    376        1.2      yamt static void
    377        1.2      yamt clockpro_hashfree(void *p, int n)
    378        1.2      yamt {
    379        1.2      yamt 
    380        1.2      yamt 	free(p);
    381        1.2      yamt }
    382        1.2      yamt 
    383        1.2      yamt #else /* defined(PDSIM) */
    384        1.2      yamt 
    385        1.2      yamt static void *
    386        1.2      yamt clockpro_hashalloc(int n)
    387        1.2      yamt {
    388        1.2      yamt 	size_t allocsz = round_page(sizeof(*buckets) * n);
    389        1.2      yamt 
    390        1.2      yamt 	return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
    391        1.2      yamt }
    392        1.2      yamt 
    393        1.2      yamt static void
    394        1.2      yamt clockpro_hashfree(void *p, int n)
    395        1.2      yamt {
    396        1.2      yamt 	size_t allocsz = round_page(sizeof(*buckets) * n);
    397        1.2      yamt 
    398        1.2      yamt 	uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
    399        1.2      yamt }
    400        1.2      yamt 
    401        1.2      yamt #endif /* defined(PDSIM) */
    402        1.2      yamt 
    403        1.2      yamt static void
    404        1.2      yamt clockpro_hashinit(uint64_t n)
    405        1.2      yamt {
    406        1.2      yamt 	struct bucket *newbuckets;
    407        1.2      yamt 	struct bucket *oldbuckets;
    408        1.2      yamt 	size_t sz;
    409        1.2      yamt 	size_t oldsz;
    410        1.2      yamt 	int i;
    411        1.2      yamt 
    412        1.2      yamt 	sz = howmany(n, BUCKETSIZE);
    413        1.2      yamt 	sz *= clockpro_hashfactor;
    414        1.2      yamt 	newbuckets = clockpro_hashalloc(sz);
    415        1.2      yamt 	if (newbuckets == NULL) {
    416        1.2      yamt 		panic("%s: allocation failure", __func__);
    417        1.2      yamt 	}
    418        1.2      yamt 	for (i = 0; i < sz; i++) {
    419        1.2      yamt 		struct bucket *b = &newbuckets[i];
    420        1.2      yamt 		int j;
    421        1.2      yamt 
    422        1.2      yamt 		b->cycle = cycle_target;
    423        1.2      yamt 		b->cur = 0;
    424        1.2      yamt 		for (j = 0; j < BUCKETSIZE; j++) {
    425        1.2      yamt 			b->pages[j] = NONRES_COOKIE_INVAL;
    426        1.2      yamt 		}
    427        1.2      yamt 	}
    428        1.2      yamt 	/* XXX lock */
    429        1.2      yamt 	oldbuckets = buckets;
    430        1.2      yamt 	oldsz = hashsize;
    431        1.2      yamt 	buckets = newbuckets;
    432        1.2      yamt 	hashsize = sz;
    433        1.2      yamt 	/* XXX unlock */
    434        1.2      yamt 	if (oldbuckets != &static_bucket) {
    435        1.2      yamt 		clockpro_hashfree(oldbuckets, oldsz);
    436        1.2      yamt 	}
    437        1.2      yamt }
    438        1.2      yamt 
    439        1.2      yamt static struct bucket *
    440        1.2      yamt nonresident_getbucket(objid_t obj, off_t idx)
    441        1.2      yamt {
    442        1.2      yamt 	uint32_t hash;
    443        1.2      yamt 
    444        1.2      yamt 	hash = pageidentityhash1(obj, idx);
    445        1.2      yamt 	return &buckets[hash % hashsize];
    446        1.2      yamt }
    447        1.2      yamt 
    448        1.2      yamt static void
    449        1.2      yamt nonresident_rotate(struct bucket *b)
    450        1.2      yamt {
    451       1.13      yamt 	const int target = cycle_target;
    452       1.13      yamt 	const int cycle = b->cycle;
    453       1.11      yamt 	int cur;
    454       1.13      yamt 	int todo;
    455        1.2      yamt 
    456       1.13      yamt 	todo = target - cycle;
    457       1.13      yamt 	if (todo >= BUCKETSIZE * 2) {
    458       1.13      yamt 		todo = (todo % BUCKETSIZE) + BUCKETSIZE;
    459       1.13      yamt 	}
    460       1.11      yamt 	cur = b->cur;
    461       1.13      yamt 	while (todo > 0) {
    462       1.11      yamt 		if (b->pages[cur] != NONRES_COOKIE_INVAL) {
    463        1.2      yamt 			PDPOL_EVCNT_INCR(nreshandhot);
    464        1.2      yamt 			COLDTARGET_ADJ(-1);
    465        1.2      yamt 		}
    466       1.11      yamt 		b->pages[cur] = NONRES_COOKIE_INVAL;
    467       1.11      yamt 		cur++;
    468       1.11      yamt 		if (cur == BUCKETSIZE) {
    469       1.11      yamt 			cur = 0;
    470       1.11      yamt 		}
    471       1.13      yamt 		todo--;
    472        1.2      yamt 	}
    473       1.13      yamt 	b->cycle = target;
    474       1.11      yamt 	b->cur = cur;
    475        1.2      yamt }
    476        1.2      yamt 
    477        1.7   thorpej static bool
    478        1.2      yamt nonresident_lookupremove(objid_t obj, off_t idx)
    479        1.2      yamt {
    480        1.2      yamt 	struct bucket *b = nonresident_getbucket(obj, idx);
    481        1.2      yamt 	nonres_cookie_t cookie = calccookie(obj, idx);
    482        1.2      yamt 	int i;
    483        1.2      yamt 
    484        1.2      yamt 	nonresident_rotate(b);
    485        1.2      yamt 	for (i = 0; i < BUCKETSIZE; i++) {
    486        1.2      yamt 		if (b->pages[i] == cookie) {
    487        1.2      yamt 			b->pages[i] = NONRES_COOKIE_INVAL;
    488        1.8   thorpej 			return true;
    489        1.2      yamt 		}
    490        1.2      yamt 	}
    491        1.8   thorpej 	return false;
    492        1.2      yamt }
    493        1.2      yamt 
    494        1.2      yamt static objid_t
    495        1.2      yamt pageobj(struct vm_page *pg)
    496        1.2      yamt {
    497        1.2      yamt 	const void *obj;
    498        1.2      yamt 
    499        1.2      yamt 	/*
    500        1.2      yamt 	 * XXX object pointer is often freed and reused for unrelated object.
    501        1.2      yamt 	 * for vnodes, it would be better to use something like
    502        1.2      yamt 	 * a hash of fsid/fileid/generation.
    503        1.2      yamt 	 */
    504        1.2      yamt 
    505        1.2      yamt 	obj = pg->uobject;
    506        1.2      yamt 	if (obj == NULL) {
    507        1.2      yamt 		obj = pg->uanon;
    508        1.2      yamt 		KASSERT(obj != NULL);
    509        1.2      yamt 	}
    510        1.2      yamt 	return (objid_t)obj;
    511        1.2      yamt }
    512        1.2      yamt 
    513        1.2      yamt static off_t
    514        1.2      yamt pageidx(struct vm_page *pg)
    515        1.2      yamt {
    516        1.2      yamt 
    517        1.2      yamt 	KASSERT((pg->offset & PAGE_MASK) == 0);
    518        1.2      yamt 	return pg->offset >> PAGE_SHIFT;
    519        1.2      yamt }
    520        1.2      yamt 
    521        1.7   thorpej static bool
    522        1.2      yamt nonresident_pagelookupremove(struct vm_page *pg)
    523        1.2      yamt {
    524        1.7   thorpej 	bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg));
    525        1.2      yamt 
    526        1.9      yamt 	if (pg->uobject) {
    527        1.9      yamt 		PDPOL_EVCNT_INCR(nreslookupobj);
    528        1.9      yamt 	} else {
    529        1.9      yamt 		PDPOL_EVCNT_INCR(nreslookupanon);
    530        1.9      yamt 	}
    531        1.2      yamt 	if (found) {
    532        1.2      yamt 		if (pg->uobject) {
    533        1.2      yamt 			PDPOL_EVCNT_INCR(nresfoundobj);
    534        1.2      yamt 		} else {
    535        1.2      yamt 			PDPOL_EVCNT_INCR(nresfoundanon);
    536        1.2      yamt 		}
    537        1.2      yamt 	}
    538        1.2      yamt 	return found;
    539        1.2      yamt }
    540        1.2      yamt 
    541        1.2      yamt static void
    542        1.2      yamt nonresident_pagerecord(struct vm_page *pg)
    543        1.2      yamt {
    544        1.2      yamt 	objid_t obj = pageobj(pg);
    545        1.2      yamt 	off_t idx = pageidx(pg);
    546        1.2      yamt 	struct bucket *b = nonresident_getbucket(obj, idx);
    547        1.2      yamt 	nonres_cookie_t cookie = calccookie(obj, idx);
    548        1.2      yamt 
    549        1.2      yamt #if defined(DEBUG)
    550        1.2      yamt 	int i;
    551        1.2      yamt 
    552        1.2      yamt 	for (i = 0; i < BUCKETSIZE; i++) {
    553        1.2      yamt 		if (b->pages[i] == cookie) {
    554        1.2      yamt 			PDPOL_EVCNT_INCR(nresconflict);
    555        1.2      yamt 		}
    556        1.2      yamt 	}
    557        1.2      yamt #endif /* defined(DEBUG) */
    558        1.2      yamt 
    559        1.2      yamt 	if (pg->uobject) {
    560        1.2      yamt 		PDPOL_EVCNT_INCR(nresrecordobj);
    561        1.2      yamt 	} else {
    562        1.2      yamt 		PDPOL_EVCNT_INCR(nresrecordanon);
    563        1.2      yamt 	}
    564        1.2      yamt 	nonresident_rotate(b);
    565        1.2      yamt 	if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
    566        1.2      yamt 		PDPOL_EVCNT_INCR(nresoverwritten);
    567        1.2      yamt 		COLDTARGET_ADJ(-1);
    568        1.2      yamt 	}
    569        1.2      yamt 	b->pages[b->cur] = cookie;
    570        1.2      yamt 	b->cur = (b->cur + 1) % BUCKETSIZE;
    571        1.2      yamt }
    572        1.2      yamt 
    573        1.2      yamt /* ---------------------------------------- */
    574        1.2      yamt 
    575        1.2      yamt #if defined(CLOCKPRO_DEBUG)
    576        1.2      yamt static void
    577        1.2      yamt check_sanity(void)
    578        1.2      yamt {
    579        1.2      yamt }
    580        1.2      yamt #else /* defined(CLOCKPRO_DEBUG) */
    581        1.2      yamt #define	check_sanity()	/* nothing */
    582        1.2      yamt #endif /* defined(CLOCKPRO_DEBUG) */
    583        1.2      yamt 
    584        1.2      yamt static void
    585        1.2      yamt clockpro_reinit(void)
    586        1.2      yamt {
    587        1.2      yamt 
    588  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
    589  1.17.54.1    martin 
    590        1.2      yamt 	clockpro_hashinit(uvmexp.npages);
    591        1.2      yamt }
    592        1.2      yamt 
    593        1.2      yamt static void
    594        1.2      yamt clockpro_init(void)
    595        1.2      yamt {
    596        1.2      yamt 	struct clockpro_state *s = &clockpro;
    597        1.2      yamt 	int i;
    598        1.2      yamt 
    599  1.17.54.1    martin 	mutex_init(&s->lock, MUTEX_DEFAULT, IPL_NONE);
    600        1.2      yamt 	for (i = 0; i < CLOCKPRO_NQUEUE; i++) {
    601        1.2      yamt 		pageq_init(&s->s_q[i]);
    602        1.2      yamt 	}
    603        1.2      yamt 	s->s_newqlenmax = 1;
    604        1.2      yamt 	s->s_coldtarget = 1;
    605        1.2      yamt 	uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
    606        1.2      yamt }
    607        1.2      yamt 
    608        1.2      yamt static void
    609        1.2      yamt clockpro_tune(void)
    610        1.2      yamt {
    611        1.2      yamt 	struct clockpro_state *s = &clockpro;
    612        1.2      yamt 	int coldtarget;
    613        1.2      yamt 
    614  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    615  1.17.54.1    martin 
    616        1.2      yamt #if defined(ADAPTIVE)
    617        1.2      yamt 	int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100;
    618        1.2      yamt 	int coldmin = 1;
    619        1.2      yamt 
    620        1.2      yamt 	coldtarget = s->s_coldtarget;
    621        1.2      yamt 	if (coldtarget + coldadj < coldmin) {
    622        1.2      yamt 		coldadj = coldmin - coldtarget;
    623        1.2      yamt 	} else if (coldtarget + coldadj > coldmax) {
    624        1.2      yamt 		coldadj = coldmax - coldtarget;
    625        1.2      yamt 	}
    626        1.2      yamt 	coldtarget += coldadj;
    627        1.2      yamt #else /* defined(ADAPTIVE) */
    628        1.2      yamt 	coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages);
    629        1.2      yamt 	if (coldtarget < 1) {
    630        1.2      yamt 		coldtarget = 1;
    631        1.2      yamt 	}
    632        1.2      yamt #endif /* defined(ADAPTIVE) */
    633        1.2      yamt 
    634        1.2      yamt 	s->s_coldtarget = coldtarget;
    635        1.2      yamt 	s->s_newqlenmax = coldtarget / 4;
    636        1.2      yamt 	if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) {
    637        1.2      yamt 		s->s_newqlenmax = CLOCKPRO_NEWQMIN;
    638        1.2      yamt 	}
    639        1.2      yamt }
    640        1.2      yamt 
    641        1.2      yamt static void
    642       1.17      yamt clockpro_movereferencebit(struct vm_page *pg, bool locked)
    643        1.2      yamt {
    644       1.17      yamt 	kmutex_t *lock;
    645        1.7   thorpej 	bool referenced;
    646        1.2      yamt 
    647  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
    648  1.17.54.1    martin 	KASSERT(!locked || uvm_page_owner_locked_p(pg, false));
    649       1.17      yamt 	if (!locked) {
    650  1.17.54.1    martin 		/*
    651  1.17.54.1    martin 		 * acquire interlock to stablize page identity.
    652  1.17.54.1    martin 		 * if we have caught the page in a state of flux
    653  1.17.54.1    martin 		 * and it should be dequeued, abort.  it will be
    654  1.17.54.1    martin 		 * dequeued later.
    655  1.17.54.1    martin 		 */
    656  1.17.54.1    martin 		mutex_enter(&pg->interlock);
    657  1.17.54.1    martin 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
    658  1.17.54.1    martin 	            pg->wire_count > 0) {
    659  1.17.54.1    martin 	            	mutex_exit(&pg->interlock);
    660  1.17.54.1    martin 			PDPOL_EVCNT_INCR(lockfail);
    661  1.17.54.1    martin 			return;
    662  1.17.54.1    martin 		}
    663  1.17.54.1    martin 		mutex_exit(&clockpro.lock);	/* XXX */
    664       1.17      yamt 		lock = uvmpd_trylockowner(pg);
    665  1.17.54.1    martin 		/* pg->interlock now dropped */
    666  1.17.54.1    martin 		mutex_enter(&clockpro.lock);	/* XXX */
    667       1.17      yamt 		if (lock == NULL) {
    668       1.17      yamt 			/*
    669       1.17      yamt 			 * XXXuvmplock
    670       1.17      yamt 			 */
    671       1.17      yamt 			PDPOL_EVCNT_INCR(lockfail);
    672       1.17      yamt 			return;
    673       1.17      yamt 		}
    674       1.17      yamt 		PDPOL_EVCNT_INCR(locksuccess);
    675       1.17      yamt 	}
    676        1.2      yamt 	referenced = pmap_clear_reference(pg);
    677       1.17      yamt 	if (!locked) {
    678       1.17      yamt 		mutex_exit(lock);
    679       1.17      yamt 	}
    680        1.2      yamt 	if (referenced) {
    681        1.2      yamt 		pg->pqflags |= PQ_REFERENCED;
    682        1.2      yamt 	}
    683        1.2      yamt }
    684        1.2      yamt 
    685        1.2      yamt static void
    686       1.17      yamt clockpro_clearreferencebit(struct vm_page *pg, bool locked)
    687        1.2      yamt {
    688        1.2      yamt 
    689  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
    690  1.17.54.1    martin 
    691       1.17      yamt 	clockpro_movereferencebit(pg, locked);
    692        1.2      yamt 	pg->pqflags &= ~PQ_REFERENCED;
    693        1.2      yamt }
    694        1.2      yamt 
    695        1.2      yamt static void
    696        1.2      yamt clockpro___newqrotate(int len)
    697        1.2      yamt {
    698        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    699        1.2      yamt 	pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ);
    700        1.2      yamt 	struct vm_page *pg;
    701        1.2      yamt 
    702  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    703  1.17.54.1    martin 
    704        1.2      yamt 	while (pageq_len(newq) > len) {
    705        1.2      yamt 		pg = pageq_remove_head(newq);
    706        1.2      yamt 		KASSERT(pg != NULL);
    707        1.2      yamt 		KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
    708        1.2      yamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) {
    709       1.17      yamt 			clockpro_clearreferencebit(pg, false);
    710        1.2      yamt 			pg->pqflags &= ~PQ_INITIALREF;
    711        1.2      yamt 		}
    712        1.2      yamt 		/* place at the list head */
    713        1.2      yamt 		clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
    714        1.2      yamt 	}
    715        1.2      yamt }
    716        1.2      yamt 
    717        1.2      yamt static void
    718        1.2      yamt clockpro_newqrotate(void)
    719        1.2      yamt {
    720        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    721        1.2      yamt 
    722  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    723  1.17.54.1    martin 
    724        1.2      yamt 	check_sanity();
    725        1.2      yamt 	clockpro___newqrotate(s->s_newqlenmax);
    726        1.2      yamt 	check_sanity();
    727        1.2      yamt }
    728        1.2      yamt 
    729        1.2      yamt static void
    730        1.2      yamt clockpro_newqflush(int n)
    731        1.2      yamt {
    732        1.2      yamt 
    733  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
    734  1.17.54.1    martin 
    735        1.2      yamt 	check_sanity();
    736        1.2      yamt 	clockpro___newqrotate(n);
    737        1.2      yamt 	check_sanity();
    738        1.2      yamt }
    739        1.2      yamt 
    740        1.2      yamt static void
    741        1.2      yamt clockpro_newqflushone(void)
    742        1.2      yamt {
    743        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    744        1.2      yamt 
    745  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    746  1.17.54.1    martin 
    747        1.2      yamt 	clockpro_newqflush(
    748        1.2      yamt 	    MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0));
    749        1.2      yamt }
    750        1.2      yamt 
    751        1.2      yamt /*
    752        1.2      yamt  * our "tail" is called "list-head" in the paper.
    753        1.2      yamt  */
    754        1.2      yamt 
    755        1.2      yamt static void
    756        1.2      yamt clockpro___enqueuetail(struct vm_page *pg)
    757        1.2      yamt {
    758        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    759        1.2      yamt 
    760  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    761        1.2      yamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
    762        1.2      yamt 
    763        1.2      yamt 	check_sanity();
    764        1.2      yamt #if !defined(USEONCE2)
    765        1.2      yamt 	clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg);
    766        1.2      yamt 	clockpro_newqrotate();
    767        1.2      yamt #else /* !defined(USEONCE2) */
    768        1.2      yamt #if defined(LISTQ)
    769        1.2      yamt 	KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
    770        1.2      yamt #endif /* defined(LISTQ) */
    771        1.2      yamt 	clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
    772        1.2      yamt #endif /* !defined(USEONCE2) */
    773        1.2      yamt 	check_sanity();
    774        1.2      yamt }
    775        1.2      yamt 
    776        1.2      yamt static void
    777        1.2      yamt clockpro_pageenqueue(struct vm_page *pg)
    778        1.2      yamt {
    779        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    780        1.7   thorpej 	bool hot;
    781        1.7   thorpej 	bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
    782        1.2      yamt 
    783        1.2      yamt 	KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
    784  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    785        1.2      yamt 	check_sanity();
    786        1.2      yamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
    787        1.2      yamt 	s->s_npages++;
    788        1.2      yamt 	pg->pqflags &= ~(PQ_HOT|PQ_TEST);
    789        1.2      yamt 	if (speculative) {
    790        1.8   thorpej 		hot = false;
    791        1.2      yamt 		PDPOL_EVCNT_INCR(speculativeenqueue);
    792        1.2      yamt 	} else {
    793        1.2      yamt 		hot = nonresident_pagelookupremove(pg);
    794        1.2      yamt 		if (hot) {
    795        1.2      yamt 			COLDTARGET_ADJ(1);
    796        1.2      yamt 		}
    797        1.2      yamt 	}
    798        1.2      yamt 
    799        1.2      yamt 	/*
    800        1.2      yamt 	 * consider mmap'ed file:
    801        1.2      yamt 	 *
    802        1.2      yamt 	 * - read-ahead enqueues a page.
    803        1.2      yamt 	 *
    804        1.2      yamt 	 * - on the following read-ahead hit, the fault handler activates it.
    805        1.2      yamt 	 *
    806        1.2      yamt 	 * - finally, the userland code which caused the above fault
    807        1.2      yamt 	 *   actually accesses the page.  it makes its reference bit set.
    808        1.2      yamt 	 *
    809        1.2      yamt 	 * we want to count the above as a single access, rather than
    810        1.2      yamt 	 * three accesses with short reuse distances.
    811        1.2      yamt 	 */
    812        1.2      yamt 
    813        1.2      yamt #if defined(USEONCE2)
    814        1.2      yamt 	pg->pqflags &= ~PQ_INITIALREF;
    815        1.2      yamt 	if (hot) {
    816        1.2      yamt 		pg->pqflags |= PQ_TEST;
    817        1.2      yamt 	}
    818        1.2      yamt 	s->s_ncold++;
    819       1.17      yamt 	clockpro_clearreferencebit(pg, false);
    820        1.2      yamt 	clockpro___enqueuetail(pg);
    821        1.2      yamt #else /* defined(USEONCE2) */
    822        1.2      yamt 	if (speculative) {
    823        1.2      yamt 		s->s_ncold++;
    824        1.2      yamt 	} else if (hot) {
    825        1.2      yamt 		pg->pqflags |= PQ_HOT;
    826        1.2      yamt 	} else {
    827        1.2      yamt 		pg->pqflags |= PQ_TEST;
    828        1.2      yamt 		s->s_ncold++;
    829        1.2      yamt 	}
    830        1.2      yamt 	clockpro___enqueuetail(pg);
    831        1.2      yamt #endif /* defined(USEONCE2) */
    832        1.2      yamt 	KASSERT(s->s_ncold <= s->s_npages);
    833        1.2      yamt }
    834        1.2      yamt 
    835        1.2      yamt static pageq_t *
    836        1.2      yamt clockpro_pagequeue(struct vm_page *pg)
    837        1.2      yamt {
    838        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    839        1.2      yamt 	int qidx;
    840        1.2      yamt 
    841  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    842  1.17.54.1    martin 
    843        1.2      yamt 	qidx = clockpro_getq(pg);
    844        1.2      yamt 	KASSERT(qidx != CLOCKPRO_NOQUEUE);
    845        1.2      yamt 
    846        1.2      yamt 	return clockpro_queue(s, qidx);
    847        1.2      yamt }
    848        1.2      yamt 
    849        1.2      yamt static void
    850        1.2      yamt clockpro_pagedequeue(struct vm_page *pg)
    851        1.2      yamt {
    852        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    853        1.2      yamt 	pageq_t *q;
    854        1.2      yamt 
    855  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    856  1.17.54.1    martin 
    857        1.2      yamt 	KASSERT(s->s_npages > 0);
    858        1.2      yamt 	check_sanity();
    859        1.2      yamt 	q = clockpro_pagequeue(pg);
    860        1.2      yamt 	pageq_remove(q, pg);
    861        1.2      yamt 	check_sanity();
    862        1.2      yamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
    863        1.2      yamt 	if ((pg->pqflags & PQ_HOT) == 0) {
    864        1.2      yamt 		KASSERT(s->s_ncold > 0);
    865        1.2      yamt 		s->s_ncold--;
    866        1.2      yamt 	}
    867        1.2      yamt 	KASSERT(s->s_npages > 0);
    868        1.2      yamt 	s->s_npages--;
    869        1.2      yamt 	check_sanity();
    870        1.2      yamt }
    871        1.2      yamt 
    872        1.2      yamt static void
    873        1.2      yamt clockpro_pagerequeue(struct vm_page *pg)
    874        1.2      yamt {
    875        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    876        1.2      yamt 	int qidx;
    877        1.2      yamt 
    878  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    879  1.17.54.1    martin 
    880        1.2      yamt 	qidx = clockpro_getq(pg);
    881        1.2      yamt 	KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ);
    882        1.2      yamt 	pageq_remove(clockpro_queue(s, qidx), pg);
    883        1.2      yamt 	check_sanity();
    884        1.2      yamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
    885        1.2      yamt 
    886        1.2      yamt 	clockpro___enqueuetail(pg);
    887        1.2      yamt }
    888        1.2      yamt 
    889        1.2      yamt static void
    890        1.2      yamt handhot_endtest(struct vm_page *pg)
    891        1.2      yamt {
    892        1.2      yamt 
    893  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
    894  1.17.54.1    martin 
    895        1.2      yamt 	KASSERT((pg->pqflags & PQ_HOT) == 0);
    896        1.2      yamt 	if ((pg->pqflags & PQ_TEST) != 0) {
    897        1.2      yamt 		PDPOL_EVCNT_INCR(hhotcoldtest);
    898        1.2      yamt 		COLDTARGET_ADJ(-1);
    899        1.2      yamt 		pg->pqflags &= ~PQ_TEST;
    900        1.2      yamt 	} else {
    901        1.2      yamt 		PDPOL_EVCNT_INCR(hhotcold);
    902        1.2      yamt 	}
    903        1.2      yamt }
    904        1.2      yamt 
    905        1.2      yamt static void
    906        1.2      yamt handhot_advance(void)
    907        1.2      yamt {
    908        1.2      yamt 	struct clockpro_state * const s = &clockpro;
    909        1.2      yamt 	struct vm_page *pg;
    910        1.2      yamt 	pageq_t *hotq;
    911        1.2      yamt 	int hotqlen;
    912        1.2      yamt 
    913  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
    914  1.17.54.1    martin 
    915        1.2      yamt 	clockpro_tune();
    916        1.2      yamt 
    917        1.2      yamt 	dump("hot called");
    918        1.2      yamt 	if (s->s_ncold >= s->s_coldtarget) {
    919        1.2      yamt 		return;
    920        1.2      yamt 	}
    921        1.2      yamt 	hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
    922        1.2      yamt again:
    923        1.2      yamt 	pg = pageq_first(hotq);
    924        1.2      yamt 	if (pg == NULL) {
    925        1.2      yamt 		DPRINTF("%s: HHOT TAKEOVER\n", __func__);
    926        1.2      yamt 		dump("hhottakeover");
    927        1.2      yamt 		PDPOL_EVCNT_INCR(hhottakeover);
    928        1.2      yamt #if defined(LISTQ)
    929        1.2      yamt 		while (/* CONSTCOND */ 1) {
    930        1.2      yamt 			pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
    931        1.2      yamt 
    932        1.2      yamt 			pg = pageq_first(coldq);
    933        1.2      yamt 			if (pg == NULL) {
    934        1.2      yamt 				clockpro_newqflushone();
    935        1.2      yamt 				pg = pageq_first(coldq);
    936        1.2      yamt 				if (pg == NULL) {
    937        1.2      yamt 					WARN("hhot: no page?\n");
    938        1.2      yamt 					return;
    939        1.2      yamt 				}
    940        1.2      yamt 			}
    941        1.2      yamt 			KASSERT(clockpro_pagequeue(pg) == coldq);
    942        1.2      yamt 			pageq_remove(coldq, pg);
    943        1.2      yamt 			check_sanity();
    944        1.2      yamt 			if ((pg->pqflags & PQ_HOT) == 0) {
    945        1.2      yamt 				handhot_endtest(pg);
    946        1.2      yamt 				clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg);
    947        1.2      yamt 			} else {
    948        1.2      yamt 				clockpro_insert_head(s, CLOCKPRO_HOTQ, pg);
    949        1.2      yamt 				break;
    950        1.2      yamt 			}
    951        1.2      yamt 		}
    952        1.2      yamt #else /* defined(LISTQ) */
    953        1.2      yamt 		clockpro_newqflush(0); /* XXX XXX */
    954        1.2      yamt 		clockpro_switchqueue();
    955        1.2      yamt 		hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
    956        1.2      yamt 		goto again;
    957        1.2      yamt #endif /* defined(LISTQ) */
    958        1.2      yamt 	}
    959        1.2      yamt 
    960        1.2      yamt 	KASSERT(clockpro_pagequeue(pg) == hotq);
    961        1.2      yamt 
    962        1.2      yamt 	/*
    963        1.2      yamt 	 * terminate test period of nonresident pages by cycling them.
    964        1.2      yamt 	 */
    965        1.2      yamt 
    966        1.2      yamt 	cycle_target_frac += BUCKETSIZE;
    967        1.2      yamt 	hotqlen = pageq_len(hotq);
    968        1.2      yamt 	while (cycle_target_frac >= hotqlen) {
    969        1.2      yamt 		cycle_target++;
    970        1.2      yamt 		cycle_target_frac -= hotqlen;
    971        1.2      yamt 	}
    972        1.2      yamt 
    973        1.2      yamt 	if ((pg->pqflags & PQ_HOT) == 0) {
    974        1.2      yamt #if defined(LISTQ)
    975        1.2      yamt 		panic("cold page in hotq: %p", pg);
    976        1.2      yamt #else /* defined(LISTQ) */
    977        1.2      yamt 		handhot_endtest(pg);
    978        1.2      yamt 		goto next;
    979        1.2      yamt #endif /* defined(LISTQ) */
    980        1.2      yamt 	}
    981        1.2      yamt 	KASSERT((pg->pqflags & PQ_TEST) == 0);
    982        1.2      yamt 	KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
    983        1.2      yamt 	KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
    984        1.2      yamt 
    985        1.2      yamt 	/*
    986        1.2      yamt 	 * once we met our target,
    987        1.2      yamt 	 * stop at a hot page so that no cold pages in test period
    988        1.2      yamt 	 * have larger recency than any hot pages.
    989        1.2      yamt 	 */
    990        1.2      yamt 
    991        1.2      yamt 	if (s->s_ncold >= s->s_coldtarget) {
    992        1.2      yamt 		dump("hot done");
    993        1.2      yamt 		return;
    994        1.2      yamt 	}
    995       1.17      yamt 	clockpro_movereferencebit(pg, false);
    996        1.2      yamt 	if ((pg->pqflags & PQ_REFERENCED) == 0) {
    997        1.2      yamt 		PDPOL_EVCNT_INCR(hhotunref);
    998        1.2      yamt 		uvmexp.pddeact++;
    999        1.2      yamt 		pg->pqflags &= ~PQ_HOT;
   1000        1.2      yamt 		clockpro.s_ncold++;
   1001        1.2      yamt 		KASSERT(s->s_ncold <= s->s_npages);
   1002        1.2      yamt 	} else {
   1003        1.2      yamt 		PDPOL_EVCNT_INCR(hhotref);
   1004        1.2      yamt 	}
   1005        1.2      yamt 	pg->pqflags &= ~PQ_REFERENCED;
   1006        1.2      yamt #if !defined(LISTQ)
   1007        1.2      yamt next:
   1008        1.2      yamt #endif /* !defined(LISTQ) */
   1009        1.2      yamt 	clockpro_pagerequeue(pg);
   1010        1.2      yamt 	dump("hot");
   1011        1.2      yamt 	goto again;
   1012        1.2      yamt }
   1013        1.2      yamt 
   1014        1.2      yamt static struct vm_page *
   1015        1.2      yamt handcold_advance(void)
   1016        1.2      yamt {
   1017        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1018        1.2      yamt 	struct vm_page *pg;
   1019        1.2      yamt 
   1020  1.17.54.1    martin 	KASSERT(mutex_owned(&s->lock));
   1021  1.17.54.1    martin 
   1022        1.2      yamt 	for (;;) {
   1023        1.3      yamt #if defined(LISTQ)
   1024        1.2      yamt 		pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ);
   1025        1.3      yamt #endif /* defined(LISTQ) */
   1026        1.2      yamt 		pageq_t *coldq;
   1027        1.2      yamt 
   1028        1.2      yamt 		clockpro_newqrotate();
   1029        1.2      yamt 		handhot_advance();
   1030        1.2      yamt #if defined(LISTQ)
   1031        1.2      yamt 		pg = pageq_first(listq);
   1032        1.2      yamt 		if (pg != NULL) {
   1033        1.2      yamt 			KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
   1034        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1035        1.2      yamt 			KASSERT((pg->pqflags & PQ_HOT) == 0);
   1036        1.2      yamt 			KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
   1037        1.2      yamt 			pageq_remove(listq, pg);
   1038        1.2      yamt 			check_sanity();
   1039        1.2      yamt 			clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */
   1040        1.2      yamt 			goto gotcold;
   1041        1.2      yamt 		}
   1042        1.2      yamt #endif /* defined(LISTQ) */
   1043        1.2      yamt 		check_sanity();
   1044        1.2      yamt 		coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
   1045        1.2      yamt 		pg = pageq_first(coldq);
   1046        1.2      yamt 		if (pg == NULL) {
   1047        1.2      yamt 			clockpro_newqflushone();
   1048        1.2      yamt 			pg = pageq_first(coldq);
   1049        1.2      yamt 		}
   1050        1.2      yamt 		if (pg == NULL) {
   1051        1.2      yamt 			DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
   1052        1.2      yamt 			dump("hcoldtakeover");
   1053        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldtakeover);
   1054        1.2      yamt 			KASSERT(
   1055        1.2      yamt 			    pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0);
   1056        1.2      yamt #if defined(LISTQ)
   1057        1.2      yamt 			KASSERT(
   1058        1.2      yamt 			    pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0);
   1059        1.2      yamt #else /* defined(LISTQ) */
   1060        1.2      yamt 			clockpro_switchqueue();
   1061        1.2      yamt 			coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
   1062        1.2      yamt 			pg = pageq_first(coldq);
   1063        1.2      yamt #endif /* defined(LISTQ) */
   1064        1.2      yamt 		}
   1065        1.2      yamt 		if (pg == NULL) {
   1066        1.2      yamt 			WARN("hcold: no page?\n");
   1067        1.2      yamt 			return NULL;
   1068        1.2      yamt 		}
   1069        1.2      yamt 		KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
   1070        1.2      yamt 		if ((pg->pqflags & PQ_HOT) != 0) {
   1071        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldhot);
   1072        1.2      yamt 			pageq_remove(coldq, pg);
   1073        1.2      yamt 			clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg);
   1074        1.2      yamt 			check_sanity();
   1075        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1076        1.2      yamt 			uvmexp.pdscans++;
   1077        1.2      yamt 			continue;
   1078        1.2      yamt 		}
   1079        1.2      yamt #if defined(LISTQ)
   1080        1.2      yamt gotcold:
   1081        1.2      yamt #endif /* defined(LISTQ) */
   1082        1.2      yamt 		KASSERT((pg->pqflags & PQ_HOT) == 0);
   1083        1.2      yamt 		uvmexp.pdscans++;
   1084       1.17      yamt 		clockpro_movereferencebit(pg, false);
   1085        1.2      yamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
   1086        1.2      yamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
   1087        1.2      yamt 			if ((pg->pqflags & PQ_REFERENCED) != 0) {
   1088        1.2      yamt 				PDPOL_EVCNT_INCR(speculativehit2);
   1089        1.2      yamt 				pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
   1090        1.2      yamt 				clockpro_pagedequeue(pg);
   1091        1.2      yamt 				clockpro_pageenqueue(pg);
   1092        1.2      yamt 				continue;
   1093        1.2      yamt 			}
   1094        1.2      yamt 			PDPOL_EVCNT_INCR(speculativemiss);
   1095        1.2      yamt 		}
   1096        1.2      yamt 		switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
   1097        1.2      yamt 		case PQ_TEST:
   1098        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldunreftest);
   1099        1.2      yamt 			nonresident_pagerecord(pg);
   1100        1.2      yamt 			goto gotit;
   1101        1.2      yamt 		case 0:
   1102        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldunref);
   1103        1.2      yamt gotit:
   1104        1.2      yamt 			KASSERT(s->s_ncold > 0);
   1105        1.2      yamt 			clockpro_pagerequeue(pg); /* XXX */
   1106        1.2      yamt 			dump("cold done");
   1107        1.2      yamt 			/* XXX "pg" is still in queue */
   1108        1.2      yamt 			handhot_advance();
   1109        1.2      yamt 			goto done;
   1110        1.2      yamt 
   1111        1.2      yamt 		case PQ_REFERENCED|PQ_TEST:
   1112        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldreftest);
   1113        1.2      yamt 			s->s_ncold--;
   1114        1.2      yamt 			COLDTARGET_ADJ(1);
   1115        1.2      yamt 			pg->pqflags |= PQ_HOT;
   1116        1.2      yamt 			pg->pqflags &= ~PQ_TEST;
   1117        1.2      yamt 			break;
   1118        1.2      yamt 
   1119        1.2      yamt 		case PQ_REFERENCED:
   1120        1.2      yamt 			PDPOL_EVCNT_INCR(hcoldref);
   1121        1.2      yamt 			pg->pqflags |= PQ_TEST;
   1122        1.2      yamt 			break;
   1123        1.2      yamt 		}
   1124        1.2      yamt 		pg->pqflags &= ~PQ_REFERENCED;
   1125        1.2      yamt 		uvmexp.pdreact++;
   1126        1.2      yamt 		/* move to the list head */
   1127        1.2      yamt 		clockpro_pagerequeue(pg);
   1128        1.2      yamt 		dump("cold");
   1129        1.2      yamt 	}
   1130        1.2      yamt done:;
   1131        1.2      yamt 	return pg;
   1132        1.2      yamt }
   1133        1.2      yamt 
   1134  1.17.54.1    martin static void
   1135  1.17.54.1    martin uvmpdpol_pageactivate_locked(struct vm_page *pg)
   1136        1.2      yamt {
   1137        1.2      yamt 
   1138        1.2      yamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1139        1.2      yamt 		KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
   1140        1.2      yamt 		pg->pqflags |= PQ_INITIALREF;
   1141        1.2      yamt 		clockpro_pageenqueue(pg);
   1142        1.2      yamt 	} else if ((pg->pqflags & PQ_SPECULATIVE)) {
   1143        1.2      yamt 		PDPOL_EVCNT_INCR(speculativehit1);
   1144        1.2      yamt 		pg->pqflags &= ~PQ_SPECULATIVE;
   1145        1.2      yamt 		pg->pqflags |= PQ_INITIALREF;
   1146        1.2      yamt 		clockpro_pagedequeue(pg);
   1147        1.2      yamt 		clockpro_pageenqueue(pg);
   1148        1.2      yamt 	}
   1149        1.2      yamt 	pg->pqflags |= PQ_REFERENCED;
   1150        1.2      yamt }
   1151        1.2      yamt 
   1152        1.2      yamt void
   1153  1.17.54.1    martin uvmpdpol_pageactivate(struct vm_page *pg)
   1154  1.17.54.1    martin {
   1155  1.17.54.1    martin 
   1156  1.17.54.1    martin 	uvmpdpol_set_intent(pg, PQ_INTENT_A);
   1157  1.17.54.1    martin }
   1158  1.17.54.1    martin 
   1159  1.17.54.1    martin static void
   1160  1.17.54.1    martin uvmpdpol_pagedeactivate_locked(struct vm_page *pg)
   1161        1.2      yamt {
   1162        1.2      yamt 
   1163       1.17      yamt 	clockpro_clearreferencebit(pg, true);
   1164        1.2      yamt }
   1165        1.2      yamt 
   1166        1.2      yamt void
   1167  1.17.54.1    martin uvmpdpol_pagedeactivate(struct vm_page *pg)
   1168  1.17.54.1    martin {
   1169  1.17.54.1    martin 
   1170  1.17.54.1    martin 	uvmpdpol_set_intent(pg, PQ_INTENT_I);
   1171  1.17.54.1    martin }
   1172  1.17.54.1    martin 
   1173  1.17.54.1    martin static void
   1174  1.17.54.1    martin uvmpdpol_pagedequeue_locked(struct vm_page *pg)
   1175        1.2      yamt {
   1176        1.2      yamt 
   1177        1.2      yamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1178        1.2      yamt 		return;
   1179        1.2      yamt 	}
   1180        1.2      yamt 	clockpro_pagedequeue(pg);
   1181        1.6      yamt 	pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
   1182        1.2      yamt }
   1183        1.2      yamt 
   1184        1.2      yamt void
   1185  1.17.54.1    martin uvmpdpol_pagedequeue(struct vm_page *pg)
   1186  1.17.54.1    martin {
   1187  1.17.54.1    martin 
   1188  1.17.54.1    martin 	uvmpdpol_set_intent(pg, PQ_INTENT_D);
   1189  1.17.54.1    martin }
   1190  1.17.54.1    martin 
   1191  1.17.54.1    martin static void
   1192  1.17.54.1    martin uvmpdpol_pageenqueue_locked(struct vm_page *pg)
   1193        1.2      yamt {
   1194        1.2      yamt 
   1195        1.2      yamt #if 1
   1196        1.2      yamt 	if (uvmpdpol_pageisqueued_p(pg)) {
   1197        1.2      yamt 		return;
   1198        1.2      yamt 	}
   1199       1.17      yamt 	clockpro_clearreferencebit(pg, true);
   1200        1.2      yamt 	pg->pqflags |= PQ_SPECULATIVE;
   1201        1.2      yamt 	clockpro_pageenqueue(pg);
   1202        1.2      yamt #else
   1203  1.17.54.1    martin 	uvmpdpol_pageactivate_locked(pg);
   1204        1.2      yamt #endif
   1205        1.2      yamt }
   1206        1.2      yamt 
   1207        1.2      yamt void
   1208  1.17.54.1    martin uvmpdpol_pageenqueue(struct vm_page *pg)
   1209  1.17.54.1    martin {
   1210  1.17.54.1    martin 
   1211  1.17.54.1    martin 	uvmpdpol_set_intent(pg, PQ_INTENT_D);
   1212  1.17.54.1    martin }
   1213  1.17.54.1    martin 
   1214  1.17.54.1    martin static bool
   1215  1.17.54.1    martin uvmpdpol_pagerealize_locked(struct vm_page *pg)
   1216  1.17.54.1    martin {
   1217  1.17.54.1    martin 	uint32_t pqflags;
   1218  1.17.54.1    martin 
   1219  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
   1220  1.17.54.1    martin 	KASSERT(mutex_owned(&pg->interlock));
   1221  1.17.54.1    martin 
   1222  1.17.54.1    martin 	/* XXX this needs to be called from elsewhere, like uvmpdpol_clock. */
   1223  1.17.54.1    martin 
   1224  1.17.54.1    martin 	pqflags = pg->pqflags;
   1225  1.17.54.1    martin 	pq->pqflags &= ~(PQ_INTENT_SET | PQ_INTENT_QUEUED);
   1226  1.17.54.1    martin 	switch (pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) {
   1227  1.17.54.1    martin 	case PQ_INTENT_A | PQ_INTENT_SET:
   1228  1.17.54.1    martin 		uvmpdpol_pageactivate_locked(pg);
   1229  1.17.54.1    martin 		return true;
   1230  1.17.54.1    martin 	case PQ_INTENT_E | PQ_INTENT_SET:
   1231  1.17.54.1    martin 		uvmpdpol_pageenqueue_locked(pg);
   1232  1.17.54.1    martin 		return true;
   1233  1.17.54.1    martin 	case PQ_INTENT_I | PQ_INTENT_SET:
   1234  1.17.54.1    martin 		uvmpdpol_pagedeactivate_locked(pg);
   1235  1.17.54.1    martin 		return true;
   1236  1.17.54.1    martin 	case PQ_INTENT_D | PQ_INTENT_SET:
   1237  1.17.54.1    martin 		uvmpdpol_pagedequeue_locked(pg);
   1238  1.17.54.1    martin 		return true;
   1239  1.17.54.1    martin 	default:
   1240  1.17.54.1    martin 		return false;
   1241  1.17.54.1    martin 	}
   1242  1.17.54.1    martin }
   1243  1.17.54.1    martin 
   1244  1.17.54.1    martin void
   1245  1.17.54.1    martin uvmpdpol_pagerealize(struct vm_page *pg)
   1246  1.17.54.1    martin {
   1247  1.17.54.1    martin 	struct clockpro_state * const s = &clockpro;
   1248  1.17.54.1    martin 
   1249  1.17.54.1    martin 	mutex_enter(&s->lock);
   1250  1.17.54.1    martin 	uvmpdpol_pagerealize_locked(pg);
   1251  1.17.54.1    martin 	mutex_exit(&s->lock);
   1252  1.17.54.1    martin }
   1253  1.17.54.1    martin 
   1254  1.17.54.1    martin void
   1255        1.2      yamt uvmpdpol_anfree(struct vm_anon *an)
   1256        1.2      yamt {
   1257  1.17.54.1    martin 	struct clockpro_state * const s = &clockpro;
   1258        1.2      yamt 
   1259        1.2      yamt 	KASSERT(an->an_page == NULL);
   1260  1.17.54.1    martin 	mutex_enter(&s->lock);
   1261        1.2      yamt 	if (nonresident_lookupremove((objid_t)an, 0)) {
   1262        1.2      yamt 		PDPOL_EVCNT_INCR(nresanonfree);
   1263        1.2      yamt 	}
   1264  1.17.54.1    martin 	mutex_exit(&s->lock);
   1265        1.2      yamt }
   1266        1.2      yamt 
   1267        1.2      yamt void
   1268        1.2      yamt uvmpdpol_init(void)
   1269        1.2      yamt {
   1270        1.2      yamt 
   1271        1.2      yamt 	clockpro_init();
   1272        1.2      yamt }
   1273        1.2      yamt 
   1274        1.2      yamt void
   1275        1.2      yamt uvmpdpol_reinit(void)
   1276        1.2      yamt {
   1277  1.17.54.1    martin 	struct clockpro_state * const s = &clockpro;
   1278        1.2      yamt 
   1279  1.17.54.1    martin 	mutex_enter(&s->lock);
   1280        1.2      yamt 	clockpro_reinit();
   1281  1.17.54.1    martin 	mutex_exit(&s->lock);
   1282        1.2      yamt }
   1283        1.2      yamt 
   1284        1.2      yamt void
   1285        1.2      yamt uvmpdpol_estimatepageable(int *active, int *inactive)
   1286        1.2      yamt {
   1287        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1288        1.2      yamt 
   1289  1.17.54.1    martin 	/*
   1290  1.17.54.1    martin 	 * Don't take any locks here.  This can be called from DDB, and in
   1291  1.17.54.1    martin 	 * any case the numbers are stale the instant the lock is dropped,
   1292  1.17.54.1    martin 	 * so it just doesn't matter.
   1293  1.17.54.1    martin 	 */
   1294        1.2      yamt 	if (active) {
   1295        1.2      yamt 		*active = s->s_npages - s->s_ncold;
   1296        1.2      yamt 	}
   1297        1.2      yamt 	if (inactive) {
   1298        1.2      yamt 		*inactive = s->s_ncold;
   1299        1.2      yamt 	}
   1300        1.2      yamt }
   1301        1.2      yamt 
   1302        1.7   thorpej bool
   1303        1.2      yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
   1304        1.2      yamt {
   1305        1.2      yamt 
   1306  1.17.54.1    martin 	/* Unlocked check OK due to page lifecycle. */
   1307        1.2      yamt 	return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
   1308        1.2      yamt }
   1309        1.2      yamt 
   1310        1.2      yamt void
   1311        1.2      yamt uvmpdpol_scaninit(void)
   1312        1.2      yamt {
   1313  1.17.54.1    martin 	struct clockpro_state * const s = &clockpro;
   1314        1.2      yamt 	struct clockpro_scanstate * const ss = &scanstate;
   1315        1.2      yamt 
   1316  1.17.54.1    martin 	mutex_enter(&s->lock);
   1317        1.2      yamt 	ss->ss_nscanned = 0;
   1318  1.17.54.1    martin 	mutex_exit(&s->lock);
   1319  1.17.54.1    martin }
   1320  1.17.54.1    martin 
   1321  1.17.54.1    martin void
   1322  1.17.54.1    martin uvmpdpol_scanfini(void)
   1323  1.17.54.1    martin {
   1324  1.17.54.1    martin 
   1325        1.2      yamt }
   1326        1.2      yamt 
   1327        1.2      yamt struct vm_page *
   1328  1.17.54.1    martin uvmpdpol_selectvictim(kmutex_t **plock)
   1329        1.2      yamt {
   1330        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1331        1.2      yamt 	struct clockpro_scanstate * const ss = &scanstate;
   1332        1.2      yamt 	struct vm_page *pg;
   1333  1.17.54.1    martin 	kmutex_t *lock = NULL;
   1334        1.2      yamt 
   1335  1.17.54.1    martin 	do {
   1336  1.17.54.1    martin 		mutex_enter(&s->lock);
   1337  1.17.54.1    martin 		if (ss->ss_nscanned > s->s_npages) {
   1338  1.17.54.1    martin 			DPRINTF("scan too much\n");
   1339  1.17.54.1    martin 			mutex_exit(&s->lock);
   1340  1.17.54.1    martin 			return NULL;
   1341  1.17.54.1    martin 		}
   1342  1.17.54.1    martin 		pg = handcold_advance();
   1343  1.17.54.1    martin 		if (pg == NULL) {
   1344  1.17.54.1    martin 			mutex_exit(&s->lock);
   1345  1.17.54.1    martin 			break;
   1346  1.17.54.1    martin 		}
   1347  1.17.54.1    martin 		ss->ss_nscanned++;
   1348  1.17.54.1    martin 		/*
   1349  1.17.54.1    martin 		 * acquire interlock to stablize page identity.
   1350  1.17.54.1    martin 		 * if we have caught the page in a state of flux
   1351  1.17.54.1    martin 		 * and it should be dequeued, do it now and then
   1352  1.17.54.1    martin 		 * move on to the next.
   1353  1.17.54.1    martin 		 */
   1354  1.17.54.1    martin 		mutex_enter(&pg->interlock);
   1355  1.17.54.1    martin 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
   1356  1.17.54.1    martin 	            pg->wire_count > 0) {
   1357  1.17.54.1    martin 	            	mutex_exit(&pg->interlock);
   1358  1.17.54.1    martin 			clockpro_pagedequeue(pg);
   1359  1.17.54.1    martin 			pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
   1360  1.17.54.1    martin 	            	continue;
   1361  1.17.54.1    martin 		}
   1362  1.17.54.1    martin 		mutex_exit(&s->lock);
   1363  1.17.54.1    martin 		lock = uvmpd_trylockowner(pg);
   1364  1.17.54.1    martin 		/* pg->interlock now dropped */
   1365  1.17.54.1    martin 	} while (lock == NULL);
   1366  1.17.54.1    martin 	*plock = lock;
   1367        1.2      yamt 	return pg;
   1368        1.2      yamt }
   1369        1.2      yamt 
   1370        1.2      yamt static void
   1371        1.2      yamt clockpro_dropswap(pageq_t *q, int *todo)
   1372        1.2      yamt {
   1373        1.2      yamt 	struct vm_page *pg;
   1374  1.17.54.1    martin 	kmutex_t *lock;
   1375  1.17.54.1    martin 
   1376  1.17.54.1    martin 	KASSERT(mutex_owned(&clockpro.lock));
   1377        1.2      yamt 
   1378  1.17.54.1    martin 	TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pdqueue) {
   1379        1.2      yamt 		if (*todo <= 0) {
   1380        1.2      yamt 			break;
   1381        1.2      yamt 		}
   1382        1.2      yamt 		if ((pg->pqflags & PQ_HOT) == 0) {
   1383        1.2      yamt 			continue;
   1384        1.2      yamt 		}
   1385  1.17.54.1    martin 		mutex_enter(&pg->interlock);
   1386  1.17.54.1    martin 		if ((pg->flags & PG_SWAPBACKED) == 0) {
   1387  1.17.54.1    martin 			mutex_exit(&pg->interlock);
   1388        1.2      yamt 			continue;
   1389        1.2      yamt 		}
   1390  1.17.54.1    martin 
   1391  1.17.54.1    martin 		/*
   1392  1.17.54.1    martin 		 * try to lock the object that owns the page.
   1393  1.17.54.1    martin 	         */
   1394  1.17.54.1    martin 	        mutex_exit(&clockpro.lock);
   1395  1.17.54.1    martin         	lock = uvmpd_trylockowner(pg);
   1396  1.17.54.1    martin         	/* pg->interlock now released */
   1397  1.17.54.1    martin         	mutex_enter(&clockpro.lock);
   1398  1.17.54.1    martin 		if (lock == NULL) {
   1399  1.17.54.1    martin 			/* didn't get it - try the next page. */
   1400  1.17.54.1    martin 			/* XXXAD lost position in queue */
   1401  1.17.54.1    martin 			continue;
   1402        1.2      yamt 		}
   1403  1.17.54.1    martin 
   1404  1.17.54.1    martin 		/*
   1405  1.17.54.1    martin 		 * if there's a shortage of swap slots, try to free it.
   1406  1.17.54.1    martin 		 */
   1407  1.17.54.1    martin 		if ((pg->flags & PG_SWAPBACKED) != 0 &&
   1408  1.17.54.1    martin 		    (pg->flags & PG_BUSY) == 0) {
   1409  1.17.54.1    martin 			if (uvmpd_dropswap(pg)) {
   1410  1.17.54.1    martin 				(*todo)--;
   1411  1.17.54.1    martin 			}
   1412  1.17.54.1    martin 		}
   1413  1.17.54.1    martin 		mutex_exit(lock);
   1414        1.2      yamt 	}
   1415        1.2      yamt }
   1416        1.2      yamt 
   1417        1.2      yamt void
   1418        1.2      yamt uvmpdpol_balancequeue(int swap_shortage)
   1419        1.2      yamt {
   1420        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1421        1.2      yamt 	int todo = swap_shortage;
   1422        1.2      yamt 
   1423        1.2      yamt 	if (todo == 0) {
   1424        1.2      yamt 		return;
   1425        1.2      yamt 	}
   1426        1.2      yamt 
   1427        1.2      yamt 	/*
   1428        1.2      yamt 	 * reclaim swap slots from hot pages
   1429        1.2      yamt 	 */
   1430        1.2      yamt 
   1431        1.2      yamt 	DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage);
   1432        1.2      yamt 
   1433  1.17.54.1    martin 	mutex_enter(&s->lock);
   1434        1.2      yamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo);
   1435        1.2      yamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo);
   1436        1.2      yamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo);
   1437  1.17.54.1    martin 	mutex_exit(&s->lock);
   1438        1.2      yamt 
   1439        1.2      yamt 	DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo);
   1440        1.2      yamt }
   1441        1.2      yamt 
   1442        1.7   thorpej bool
   1443        1.2      yamt uvmpdpol_needsscan_p(void)
   1444        1.2      yamt {
   1445        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1446        1.2      yamt 
   1447  1.17.54.1    martin 	/* This must be an unlocked check: can be called from interrupt. */
   1448  1.17.54.1    martin 	return s->s_ncold < s->s_coldtarget;
   1449        1.2      yamt }
   1450        1.2      yamt 
   1451        1.2      yamt void
   1452        1.2      yamt uvmpdpol_tune(void)
   1453        1.2      yamt {
   1454  1.17.54.1    martin 	struct clockpro_state * const s = &clockpro;
   1455        1.2      yamt 
   1456  1.17.54.1    martin 	mutex_enter(&s->lock);
   1457        1.2      yamt 	clockpro_tune();
   1458  1.17.54.1    martin 	mutex_exit(&s->lock);
   1459  1.17.54.1    martin }
   1460  1.17.54.1    martin 
   1461  1.17.54.1    martin void
   1462  1.17.54.1    martin uvmpdpol_idle(void)
   1463  1.17.54.1    martin {
   1464  1.17.54.1    martin 
   1465        1.2      yamt }
   1466        1.2      yamt 
   1467        1.2      yamt #if !defined(PDSIM)
   1468        1.2      yamt 
   1469        1.2      yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
   1470        1.2      yamt 
   1471        1.2      yamt void
   1472        1.2      yamt uvmpdpol_sysctlsetup(void)
   1473        1.2      yamt {
   1474        1.2      yamt #if !defined(ADAPTIVE)
   1475        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1476        1.2      yamt 
   1477        1.2      yamt 	uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
   1478        1.2      yamt 	    SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
   1479        1.2      yamt #endif /* !defined(ADAPTIVE) */
   1480        1.2      yamt }
   1481        1.2      yamt 
   1482        1.2      yamt #endif /* !defined(PDSIM) */
   1483        1.2      yamt 
   1484        1.2      yamt #if defined(DDB)
   1485        1.2      yamt 
   1486       1.17      yamt #if 0 /* XXXuvmplock */
   1487       1.17      yamt #define	_pmap_is_referenced(pg)	pmap_is_referenced(pg)
   1488       1.17      yamt #else
   1489       1.17      yamt #define	_pmap_is_referenced(pg)	false
   1490       1.17      yamt #endif
   1491       1.17      yamt 
   1492        1.2      yamt void clockpro_dump(void);
   1493        1.2      yamt 
   1494        1.2      yamt void
   1495        1.2      yamt clockpro_dump(void)
   1496        1.2      yamt {
   1497        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1498        1.2      yamt 
   1499        1.2      yamt 	struct vm_page *pg;
   1500        1.2      yamt 	int ncold, nhot, ntest, nspeculative, ninitialref, nref;
   1501        1.2      yamt 	int newqlen, coldqlen, hotqlen, listqlen;
   1502        1.2      yamt 
   1503        1.2      yamt 	newqlen = coldqlen = hotqlen = listqlen = 0;
   1504        1.2      yamt 	printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
   1505        1.2      yamt 	    s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax);
   1506        1.2      yamt 
   1507        1.2      yamt #define	INITCOUNT()	\
   1508        1.2      yamt 	ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
   1509        1.2      yamt 
   1510        1.2      yamt #define	COUNT(pg)	\
   1511        1.2      yamt 	if ((pg->pqflags & PQ_HOT) != 0) { \
   1512        1.2      yamt 		nhot++; \
   1513        1.2      yamt 	} else { \
   1514        1.2      yamt 		ncold++; \
   1515        1.2      yamt 		if ((pg->pqflags & PQ_TEST) != 0) { \
   1516        1.2      yamt 			ntest++; \
   1517        1.2      yamt 		} \
   1518        1.2      yamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
   1519        1.2      yamt 			nspeculative++; \
   1520        1.2      yamt 		} \
   1521        1.2      yamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) { \
   1522        1.2      yamt 			ninitialref++; \
   1523        1.2      yamt 		} else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
   1524       1.17      yamt 		    _pmap_is_referenced(pg)) { \
   1525        1.2      yamt 			nref++; \
   1526        1.2      yamt 		} \
   1527        1.2      yamt 	}
   1528        1.2      yamt 
   1529        1.2      yamt #define	PRINTCOUNT(name)	\
   1530        1.2      yamt 	printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
   1531        1.2      yamt 	    "nref=%d\n", \
   1532        1.2      yamt 	    (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
   1533        1.2      yamt 
   1534        1.2      yamt 	INITCOUNT();
   1535  1.17.54.1    martin 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pdqueue) {
   1536        1.2      yamt 		if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
   1537        1.2      yamt 			printf("newq corrupt %p\n", pg);
   1538        1.2      yamt 		}
   1539        1.2      yamt 		COUNT(pg)
   1540        1.2      yamt 		newqlen++;
   1541        1.2      yamt 	}
   1542        1.2      yamt 	PRINTCOUNT("newq");
   1543        1.2      yamt 
   1544        1.2      yamt 	INITCOUNT();
   1545  1.17.54.1    martin 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pdqueue) {
   1546        1.2      yamt 		if (clockpro_getq(pg) != CLOCKPRO_COLDQ) {
   1547        1.2      yamt 			printf("coldq corrupt %p\n", pg);
   1548        1.2      yamt 		}
   1549        1.2      yamt 		COUNT(pg)
   1550        1.2      yamt 		coldqlen++;
   1551        1.2      yamt 	}
   1552        1.2      yamt 	PRINTCOUNT("coldq");
   1553        1.2      yamt 
   1554        1.2      yamt 	INITCOUNT();
   1555  1.17.54.1    martin 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pdqueue) {
   1556        1.2      yamt 		if (clockpro_getq(pg) != CLOCKPRO_HOTQ) {
   1557        1.2      yamt 			printf("hotq corrupt %p\n", pg);
   1558        1.2      yamt 		}
   1559        1.2      yamt #if defined(LISTQ)
   1560        1.2      yamt 		if ((pg->pqflags & PQ_HOT) == 0) {
   1561        1.2      yamt 			printf("cold page in hotq: %p\n", pg);
   1562        1.2      yamt 		}
   1563        1.2      yamt #endif /* defined(LISTQ) */
   1564        1.2      yamt 		COUNT(pg)
   1565        1.2      yamt 		hotqlen++;
   1566        1.2      yamt 	}
   1567        1.2      yamt 	PRINTCOUNT("hotq");
   1568        1.2      yamt 
   1569        1.2      yamt 	INITCOUNT();
   1570  1.17.54.1    martin 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pdqueue) {
   1571        1.2      yamt #if !defined(LISTQ)
   1572       1.14       bjs 		printf("listq %p\n", pg);
   1573        1.2      yamt #endif /* !defined(LISTQ) */
   1574        1.2      yamt 		if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
   1575        1.2      yamt 			printf("listq corrupt %p\n", pg);
   1576        1.2      yamt 		}
   1577        1.2      yamt 		COUNT(pg)
   1578        1.2      yamt 		listqlen++;
   1579        1.2      yamt 	}
   1580        1.2      yamt 	PRINTCOUNT("listq");
   1581        1.2      yamt 
   1582        1.2      yamt 	printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
   1583        1.2      yamt 	    newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)),
   1584        1.2      yamt 	    coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)),
   1585        1.2      yamt 	    hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)),
   1586        1.2      yamt 	    listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ)));
   1587        1.2      yamt }
   1588        1.2      yamt 
   1589        1.2      yamt #endif /* defined(DDB) */
   1590        1.2      yamt 
   1591        1.2      yamt #if defined(PDSIM)
   1592        1.3      yamt #if defined(DEBUG)
   1593        1.2      yamt static void
   1594        1.2      yamt pdsim_dumpq(int qidx)
   1595        1.2      yamt {
   1596        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1597        1.2      yamt 	pageq_t *q = clockpro_queue(s, qidx);
   1598        1.2      yamt 	struct vm_page *pg;
   1599        1.2      yamt 
   1600  1.17.54.1    martin 	TAILQ_FOREACH(pg, &q->q_q, pdqueue) {
   1601        1.2      yamt 		DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
   1602        1.2      yamt 		    pg->offset >> PAGE_SHIFT,
   1603        1.2      yamt 		    (pg->pqflags & PQ_HOT) ? "H" : "",
   1604        1.2      yamt 		    (pg->pqflags & PQ_TEST) ? "T" : "",
   1605        1.2      yamt 		    (pg->pqflags & PQ_REFERENCED) ? "R" : "",
   1606       1.17      yamt 		    _pmap_is_referenced(pg) ? "r" : "",
   1607        1.2      yamt 		    (pg->pqflags & PQ_INITIALREF) ? "I" : "",
   1608        1.2      yamt 		    (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
   1609        1.2      yamt 		    );
   1610        1.2      yamt 	}
   1611        1.2      yamt }
   1612        1.3      yamt #endif /* defined(DEBUG) */
   1613        1.2      yamt 
   1614        1.2      yamt void
   1615        1.2      yamt pdsim_dump(const char *id)
   1616        1.2      yamt {
   1617        1.2      yamt #if defined(DEBUG)
   1618        1.2      yamt 	struct clockpro_state * const s = &clockpro;
   1619        1.2      yamt 
   1620        1.2      yamt 	DPRINTF("  %s L(", id);
   1621        1.2      yamt 	pdsim_dumpq(CLOCKPRO_LISTQ);
   1622        1.2      yamt 	DPRINTF(" ) H(");
   1623        1.2      yamt 	pdsim_dumpq(CLOCKPRO_HOTQ);
   1624        1.2      yamt 	DPRINTF(" ) C(");
   1625        1.2      yamt 	pdsim_dumpq(CLOCKPRO_COLDQ);
   1626        1.2      yamt 	DPRINTF(" ) N(");
   1627        1.2      yamt 	pdsim_dumpq(CLOCKPRO_NEWQ);
   1628        1.2      yamt 	DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
   1629        1.2      yamt 	    s->s_ncold, s->s_coldtarget, coldadj);
   1630        1.2      yamt #endif /* defined(DEBUG) */
   1631        1.2      yamt }
   1632        1.2      yamt #endif /* defined(PDSIM) */
   1633