uvm_pdpolicy_clockpro.c revision 1.18 1 /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.18 2019/12/13 20:10:22 ad Exp $ */
2
3 /*-
4 * Copyright (c)2005, 2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * CLOCK-Pro replacement policy:
31 * http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
32 *
33 * approximation of the list of non-resident pages using hash:
34 * http://linux-mm.org/ClockProApproximation
35 */
36
37 /* #define CLOCKPRO_DEBUG */
38
39 #if defined(PDSIM)
40
41 #include "pdsim.h"
42
43 #else /* defined(PDSIM) */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.18 2019/12/13 20:10:22 ad Exp $");
47
48 #include "opt_ddb.h"
49
50 #include <sys/param.h>
51 #include <sys/proc.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/hash.h>
55
56 #include <uvm/uvm.h>
57 #include <uvm/uvm_pdaemon.h> /* for uvmpd_trylockowner */
58 #include <uvm/uvm_pdpolicy.h>
59 #include <uvm/uvm_pdpolicy_impl.h>
60
61 #if ((__STDC_VERSION__ - 0) >= 199901L)
62 #define DPRINTF(...) /* nothing */
63 #define WARN(...) printf(__VA_ARGS__)
64 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
65 #define DPRINTF(a...) /* nothing */ /* GCC */
66 #define WARN(a...) printf(a)
67 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
68
69 #define dump(a) /* nothing */
70
71 #undef USEONCE2
72 #define LISTQ
73 #undef ADAPTIVE
74
75 #endif /* defined(PDSIM) */
76
77 #if !defined(CLOCKPRO_COLDPCT)
78 #define CLOCKPRO_COLDPCT 10
79 #endif /* !defined(CLOCKPRO_COLDPCT) */
80
81 #define CLOCKPRO_COLDPCTMAX 90
82
83 #if !defined(CLOCKPRO_HASHFACTOR)
84 #define CLOCKPRO_HASHFACTOR 2
85 #endif /* !defined(CLOCKPRO_HASHFACTOR) */
86
87 #define CLOCKPRO_NEWQMIN ((1024 * 1024) >> PAGE_SHIFT) /* XXX */
88
89 int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
90
91 PDPOL_EVCNT_DEFINE(nresrecordobj)
92 PDPOL_EVCNT_DEFINE(nresrecordanon)
93 PDPOL_EVCNT_DEFINE(nreslookupobj)
94 PDPOL_EVCNT_DEFINE(nreslookupanon)
95 PDPOL_EVCNT_DEFINE(nresfoundobj)
96 PDPOL_EVCNT_DEFINE(nresfoundanon)
97 PDPOL_EVCNT_DEFINE(nresanonfree)
98 PDPOL_EVCNT_DEFINE(nresconflict)
99 PDPOL_EVCNT_DEFINE(nresoverwritten)
100 PDPOL_EVCNT_DEFINE(nreshandhot)
101
102 PDPOL_EVCNT_DEFINE(hhottakeover)
103 PDPOL_EVCNT_DEFINE(hhotref)
104 PDPOL_EVCNT_DEFINE(hhotunref)
105 PDPOL_EVCNT_DEFINE(hhotcold)
106 PDPOL_EVCNT_DEFINE(hhotcoldtest)
107
108 PDPOL_EVCNT_DEFINE(hcoldtakeover)
109 PDPOL_EVCNT_DEFINE(hcoldref)
110 PDPOL_EVCNT_DEFINE(hcoldunref)
111 PDPOL_EVCNT_DEFINE(hcoldreftest)
112 PDPOL_EVCNT_DEFINE(hcoldunreftest)
113 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
114 PDPOL_EVCNT_DEFINE(hcoldhot)
115
116 PDPOL_EVCNT_DEFINE(speculativeenqueue)
117 PDPOL_EVCNT_DEFINE(speculativehit1)
118 PDPOL_EVCNT_DEFINE(speculativehit2)
119 PDPOL_EVCNT_DEFINE(speculativemiss)
120
121 PDPOL_EVCNT_DEFINE(locksuccess)
122 PDPOL_EVCNT_DEFINE(lockfail)
123
124 #define PQ_REFERENCED 0x000000001
125 #define PQ_HOT 0x000000002
126 #define PQ_TEST 0x000000004
127 #define PQ_INITIALREF 0x000000008
128 #define PQ_QMASK 0x000000070
129 #define PQ_QFACTOR 0x000000010
130 #define PQ_SPECULATIVE 0x000000080
131
132 #define CLOCKPRO_NOQUEUE 0
133 #define CLOCKPRO_NEWQ 1 /* small queue to clear initial ref. */
134 #if defined(LISTQ)
135 #define CLOCKPRO_COLDQ 2
136 #define CLOCKPRO_HOTQ 3
137 #else /* defined(LISTQ) */
138 #define CLOCKPRO_COLDQ (2 + coldqidx) /* XXX */
139 #define CLOCKPRO_HOTQ (3 - coldqidx) /* XXX */
140 #endif /* defined(LISTQ) */
141 #define CLOCKPRO_LISTQ 4
142 #define CLOCKPRO_NQUEUE 4
143
144 static inline void
145 clockpro_setq(struct vm_page *pg, int qidx)
146 {
147 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
148 KASSERT(qidx <= CLOCKPRO_NQUEUE);
149
150 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
151 }
152
153 static inline int
154 clockpro_getq(struct vm_page *pg)
155 {
156 int qidx;
157
158 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
159 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
160 KASSERT(qidx <= CLOCKPRO_NQUEUE);
161 return qidx;
162 }
163
164 typedef struct {
165 struct pglist q_q;
166 int q_len;
167 } pageq_t;
168
169 struct clockpro_state {
170 kmutex_t lock;
171 int s_npages;
172 int s_coldtarget;
173 int s_ncold;
174
175 int s_newqlenmax;
176 pageq_t s_q[CLOCKPRO_NQUEUE];
177
178 struct uvm_pctparam s_coldtargetpct;
179 };
180
181 static pageq_t *
182 clockpro_queue(struct clockpro_state *s, int qidx)
183 {
184
185 KASSERT(CLOCKPRO_NOQUEUE < qidx);
186 KASSERT(qidx <= CLOCKPRO_NQUEUE);
187
188 return &s->s_q[qidx - 1];
189 }
190
191 #if !defined(LISTQ)
192
193 static int coldqidx;
194
195 static void
196 clockpro_switchqueue(void)
197 {
198
199 coldqidx = 1 - coldqidx;
200 }
201
202 #endif /* !defined(LISTQ) */
203
204 static struct clockpro_state clockpro __cacheline_aligned;
205 static struct clockpro_scanstate {
206 int ss_nscanned;
207 } scanstate;
208
209 /* ---------------------------------------- */
210
211 static void
212 pageq_init(pageq_t *q)
213 {
214
215 TAILQ_INIT(&q->q_q);
216 q->q_len = 0;
217 }
218
219 static int
220 pageq_len(const pageq_t *q)
221 {
222
223 return q->q_len;
224 }
225
226 static struct vm_page *
227 pageq_first(const pageq_t *q)
228 {
229
230 return TAILQ_FIRST(&q->q_q);
231 }
232
233 static void
234 pageq_insert_tail(pageq_t *q, struct vm_page *pg)
235 {
236
237 TAILQ_INSERT_TAIL(&q->q_q, pg, pageq.queue);
238 q->q_len++;
239 }
240
241 #if defined(LISTQ)
242 static void
243 pageq_insert_head(pageq_t *q, struct vm_page *pg)
244 {
245
246 TAILQ_INSERT_HEAD(&q->q_q, pg, pageq.queue);
247 q->q_len++;
248 }
249 #endif
250
251 static void
252 pageq_remove(pageq_t *q, struct vm_page *pg)
253 {
254
255 #if 1
256 KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q);
257 #endif
258 KASSERT(q->q_len > 0);
259 TAILQ_REMOVE(&q->q_q, pg, pageq.queue);
260 q->q_len--;
261 }
262
263 static struct vm_page *
264 pageq_remove_head(pageq_t *q)
265 {
266 struct vm_page *pg;
267
268 pg = TAILQ_FIRST(&q->q_q);
269 if (pg == NULL) {
270 KASSERT(q->q_len == 0);
271 return NULL;
272 }
273 pageq_remove(q, pg);
274 return pg;
275 }
276
277 /* ---------------------------------------- */
278
279 static void
280 clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg)
281 {
282 pageq_t *q = clockpro_queue(s, qidx);
283
284 clockpro_setq(pg, qidx);
285 pageq_insert_tail(q, pg);
286 }
287
288 #if defined(LISTQ)
289 static void
290 clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg)
291 {
292 pageq_t *q = clockpro_queue(s, qidx);
293
294 clockpro_setq(pg, qidx);
295 pageq_insert_head(q, pg);
296 }
297
298 #endif
299 /* ---------------------------------------- */
300
301 typedef uint32_t nonres_cookie_t;
302 #define NONRES_COOKIE_INVAL 0
303
304 typedef uintptr_t objid_t;
305
306 /*
307 * XXX maybe these hash functions need reconsideration,
308 * given that hash distribution is critical here.
309 */
310
311 static uint32_t
312 pageidentityhash1(objid_t obj, off_t idx)
313 {
314 uint32_t hash = HASH32_BUF_INIT;
315
316 #if 1
317 hash = hash32_buf(&idx, sizeof(idx), hash);
318 hash = hash32_buf(&obj, sizeof(obj), hash);
319 #else
320 hash = hash32_buf(&obj, sizeof(obj), hash);
321 hash = hash32_buf(&idx, sizeof(idx), hash);
322 #endif
323 return hash;
324 }
325
326 static uint32_t
327 pageidentityhash2(objid_t obj, off_t idx)
328 {
329 uint32_t hash = HASH32_BUF_INIT;
330
331 hash = hash32_buf(&obj, sizeof(obj), hash);
332 hash = hash32_buf(&idx, sizeof(idx), hash);
333 return hash;
334 }
335
336 static nonres_cookie_t
337 calccookie(objid_t obj, off_t idx)
338 {
339 uint32_t hash = pageidentityhash2(obj, idx);
340 nonres_cookie_t cookie = hash;
341
342 if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
343 cookie++; /* XXX */
344 }
345 return cookie;
346 }
347
348 #define BUCKETSIZE 14
349 struct bucket {
350 int cycle;
351 int cur;
352 nonres_cookie_t pages[BUCKETSIZE];
353 };
354 static int cycle_target;
355 static int cycle_target_frac;
356
357 static struct bucket static_bucket;
358 static struct bucket *buckets = &static_bucket;
359 static size_t hashsize = 1;
360
361 static int coldadj;
362 #define COLDTARGET_ADJ(d) coldadj += (d)
363
364 #if defined(PDSIM)
365
366 static void *
367 clockpro_hashalloc(int n)
368 {
369 size_t allocsz = sizeof(*buckets) * n;
370
371 return malloc(allocsz);
372 }
373
374 static void
375 clockpro_hashfree(void *p, int n)
376 {
377
378 free(p);
379 }
380
381 #else /* defined(PDSIM) */
382
383 static void *
384 clockpro_hashalloc(int n)
385 {
386 size_t allocsz = round_page(sizeof(*buckets) * n);
387
388 return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
389 }
390
391 static void
392 clockpro_hashfree(void *p, int n)
393 {
394 size_t allocsz = round_page(sizeof(*buckets) * n);
395
396 uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
397 }
398
399 #endif /* defined(PDSIM) */
400
401 static void
402 clockpro_hashinit(uint64_t n)
403 {
404 struct bucket *newbuckets;
405 struct bucket *oldbuckets;
406 size_t sz;
407 size_t oldsz;
408 int i;
409
410 sz = howmany(n, BUCKETSIZE);
411 sz *= clockpro_hashfactor;
412 newbuckets = clockpro_hashalloc(sz);
413 if (newbuckets == NULL) {
414 panic("%s: allocation failure", __func__);
415 }
416 for (i = 0; i < sz; i++) {
417 struct bucket *b = &newbuckets[i];
418 int j;
419
420 b->cycle = cycle_target;
421 b->cur = 0;
422 for (j = 0; j < BUCKETSIZE; j++) {
423 b->pages[j] = NONRES_COOKIE_INVAL;
424 }
425 }
426 /* XXX lock */
427 oldbuckets = buckets;
428 oldsz = hashsize;
429 buckets = newbuckets;
430 hashsize = sz;
431 /* XXX unlock */
432 if (oldbuckets != &static_bucket) {
433 clockpro_hashfree(oldbuckets, oldsz);
434 }
435 }
436
437 static struct bucket *
438 nonresident_getbucket(objid_t obj, off_t idx)
439 {
440 uint32_t hash;
441
442 hash = pageidentityhash1(obj, idx);
443 return &buckets[hash % hashsize];
444 }
445
446 static void
447 nonresident_rotate(struct bucket *b)
448 {
449 const int target = cycle_target;
450 const int cycle = b->cycle;
451 int cur;
452 int todo;
453
454 todo = target - cycle;
455 if (todo >= BUCKETSIZE * 2) {
456 todo = (todo % BUCKETSIZE) + BUCKETSIZE;
457 }
458 cur = b->cur;
459 while (todo > 0) {
460 if (b->pages[cur] != NONRES_COOKIE_INVAL) {
461 PDPOL_EVCNT_INCR(nreshandhot);
462 COLDTARGET_ADJ(-1);
463 }
464 b->pages[cur] = NONRES_COOKIE_INVAL;
465 cur++;
466 if (cur == BUCKETSIZE) {
467 cur = 0;
468 }
469 todo--;
470 }
471 b->cycle = target;
472 b->cur = cur;
473 }
474
475 static bool
476 nonresident_lookupremove(objid_t obj, off_t idx)
477 {
478 struct bucket *b = nonresident_getbucket(obj, idx);
479 nonres_cookie_t cookie = calccookie(obj, idx);
480 int i;
481
482 nonresident_rotate(b);
483 for (i = 0; i < BUCKETSIZE; i++) {
484 if (b->pages[i] == cookie) {
485 b->pages[i] = NONRES_COOKIE_INVAL;
486 return true;
487 }
488 }
489 return false;
490 }
491
492 static objid_t
493 pageobj(struct vm_page *pg)
494 {
495 const void *obj;
496
497 /*
498 * XXX object pointer is often freed and reused for unrelated object.
499 * for vnodes, it would be better to use something like
500 * a hash of fsid/fileid/generation.
501 */
502
503 obj = pg->uobject;
504 if (obj == NULL) {
505 obj = pg->uanon;
506 KASSERT(obj != NULL);
507 }
508 return (objid_t)obj;
509 }
510
511 static off_t
512 pageidx(struct vm_page *pg)
513 {
514
515 KASSERT((pg->offset & PAGE_MASK) == 0);
516 return pg->offset >> PAGE_SHIFT;
517 }
518
519 static bool
520 nonresident_pagelookupremove(struct vm_page *pg)
521 {
522 bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg));
523
524 if (pg->uobject) {
525 PDPOL_EVCNT_INCR(nreslookupobj);
526 } else {
527 PDPOL_EVCNT_INCR(nreslookupanon);
528 }
529 if (found) {
530 if (pg->uobject) {
531 PDPOL_EVCNT_INCR(nresfoundobj);
532 } else {
533 PDPOL_EVCNT_INCR(nresfoundanon);
534 }
535 }
536 return found;
537 }
538
539 static void
540 nonresident_pagerecord(struct vm_page *pg)
541 {
542 objid_t obj = pageobj(pg);
543 off_t idx = pageidx(pg);
544 struct bucket *b = nonresident_getbucket(obj, idx);
545 nonres_cookie_t cookie = calccookie(obj, idx);
546
547 #if defined(DEBUG)
548 int i;
549
550 for (i = 0; i < BUCKETSIZE; i++) {
551 if (b->pages[i] == cookie) {
552 PDPOL_EVCNT_INCR(nresconflict);
553 }
554 }
555 #endif /* defined(DEBUG) */
556
557 if (pg->uobject) {
558 PDPOL_EVCNT_INCR(nresrecordobj);
559 } else {
560 PDPOL_EVCNT_INCR(nresrecordanon);
561 }
562 nonresident_rotate(b);
563 if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
564 PDPOL_EVCNT_INCR(nresoverwritten);
565 COLDTARGET_ADJ(-1);
566 }
567 b->pages[b->cur] = cookie;
568 b->cur = (b->cur + 1) % BUCKETSIZE;
569 }
570
571 /* ---------------------------------------- */
572
573 #if defined(CLOCKPRO_DEBUG)
574 static void
575 check_sanity(void)
576 {
577 }
578 #else /* defined(CLOCKPRO_DEBUG) */
579 #define check_sanity() /* nothing */
580 #endif /* defined(CLOCKPRO_DEBUG) */
581
582 static void
583 clockpro_reinit(void)
584 {
585
586 KASSERT(mutex_owned(&clockpro.lock));
587
588 clockpro_hashinit(uvmexp.npages);
589 }
590
591 static void
592 clockpro_init(void)
593 {
594 struct clockpro_state *s = &clockpro;
595 int i;
596
597 mutex_init(&s->lock, MUTEX_DEFAULT, IPL_NONE);
598 for (i = 0; i < CLOCKPRO_NQUEUE; i++) {
599 pageq_init(&s->s_q[i]);
600 }
601 s->s_newqlenmax = 1;
602 s->s_coldtarget = 1;
603 uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
604 }
605
606 static void
607 clockpro_tune(void)
608 {
609 struct clockpro_state *s = &clockpro;
610 int coldtarget;
611
612 KASSERT(mutex_owned(&s->lock));
613
614 #if defined(ADAPTIVE)
615 int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100;
616 int coldmin = 1;
617
618 coldtarget = s->s_coldtarget;
619 if (coldtarget + coldadj < coldmin) {
620 coldadj = coldmin - coldtarget;
621 } else if (coldtarget + coldadj > coldmax) {
622 coldadj = coldmax - coldtarget;
623 }
624 coldtarget += coldadj;
625 #else /* defined(ADAPTIVE) */
626 coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages);
627 if (coldtarget < 1) {
628 coldtarget = 1;
629 }
630 #endif /* defined(ADAPTIVE) */
631
632 s->s_coldtarget = coldtarget;
633 s->s_newqlenmax = coldtarget / 4;
634 if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) {
635 s->s_newqlenmax = CLOCKPRO_NEWQMIN;
636 }
637 }
638
639 static void
640 clockpro_movereferencebit(struct vm_page *pg, bool locked)
641 {
642 kmutex_t *lock;
643 bool referenced;
644
645 KASSERT(mutex_owned(&clockpro.lock));
646 KASSERT(!locked || uvm_page_locked_p(pg));
647 if (!locked) {
648 /*
649 * acquire interlock to stablize page identity.
650 * if we have caught the page in a state of flux
651 * and it should be dequeued, abort. it will be
652 * dequeued later.
653 */
654 mutex_enter(&pg->interlock);
655 if ((pg->uobject == NULL && pg->uanon == NULL) ||
656 pg->wire_count > 0) {
657 mutex_exit(&pg->interlock);
658 PDPOL_EVCNT_INCR(lockfail);
659 return;
660 }
661 mutex_exit(&clockpro.lock); /* XXX */
662 lock = uvmpd_trylockowner(pg);
663 /* pg->interlock now dropped */
664 mutex_enter(&clockpro.lock); /* XXX */
665 if (lock == NULL) {
666 /*
667 * XXXuvmplock
668 */
669 PDPOL_EVCNT_INCR(lockfail);
670 return;
671 }
672 PDPOL_EVCNT_INCR(locksuccess);
673 }
674 referenced = pmap_clear_reference(pg);
675 if (!locked) {
676 mutex_exit(lock);
677 }
678 if (referenced) {
679 pg->pqflags |= PQ_REFERENCED;
680 }
681 }
682
683 static void
684 clockpro_clearreferencebit(struct vm_page *pg, bool locked)
685 {
686
687 KASSERT(mutex_owned(&clockpro.lock));
688
689 clockpro_movereferencebit(pg, locked);
690 pg->pqflags &= ~PQ_REFERENCED;
691 }
692
693 static void
694 clockpro___newqrotate(int len)
695 {
696 struct clockpro_state * const s = &clockpro;
697 pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ);
698 struct vm_page *pg;
699
700 KASSERT(mutex_owned(&s->lock));
701
702 while (pageq_len(newq) > len) {
703 pg = pageq_remove_head(newq);
704 KASSERT(pg != NULL);
705 KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
706 if ((pg->pqflags & PQ_INITIALREF) != 0) {
707 clockpro_clearreferencebit(pg, false);
708 pg->pqflags &= ~PQ_INITIALREF;
709 }
710 /* place at the list head */
711 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
712 }
713 }
714
715 static void
716 clockpro_newqrotate(void)
717 {
718 struct clockpro_state * const s = &clockpro;
719
720 KASSERT(mutex_owned(&s->lock));
721
722 check_sanity();
723 clockpro___newqrotate(s->s_newqlenmax);
724 check_sanity();
725 }
726
727 static void
728 clockpro_newqflush(int n)
729 {
730
731 KASSERT(mutex_owned(&clockpro.lock));
732
733 check_sanity();
734 clockpro___newqrotate(n);
735 check_sanity();
736 }
737
738 static void
739 clockpro_newqflushone(void)
740 {
741 struct clockpro_state * const s = &clockpro;
742
743 KASSERT(mutex_owned(&s->lock));
744
745 clockpro_newqflush(
746 MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0));
747 }
748
749 /*
750 * our "tail" is called "list-head" in the paper.
751 */
752
753 static void
754 clockpro___enqueuetail(struct vm_page *pg)
755 {
756 struct clockpro_state * const s = &clockpro;
757
758 KASSERT(mutex_owned(&s->lock));
759 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
760
761 check_sanity();
762 #if !defined(USEONCE2)
763 clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg);
764 clockpro_newqrotate();
765 #else /* !defined(USEONCE2) */
766 #if defined(LISTQ)
767 KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
768 #endif /* defined(LISTQ) */
769 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
770 #endif /* !defined(USEONCE2) */
771 check_sanity();
772 }
773
774 static void
775 clockpro_pageenqueue(struct vm_page *pg)
776 {
777 struct clockpro_state * const s = &clockpro;
778 bool hot;
779 bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
780
781 KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
782 KASSERT(mutex_owned(&s->lock));
783 check_sanity();
784 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
785 s->s_npages++;
786 pg->pqflags &= ~(PQ_HOT|PQ_TEST);
787 if (speculative) {
788 hot = false;
789 PDPOL_EVCNT_INCR(speculativeenqueue);
790 } else {
791 hot = nonresident_pagelookupremove(pg);
792 if (hot) {
793 COLDTARGET_ADJ(1);
794 }
795 }
796
797 /*
798 * consider mmap'ed file:
799 *
800 * - read-ahead enqueues a page.
801 *
802 * - on the following read-ahead hit, the fault handler activates it.
803 *
804 * - finally, the userland code which caused the above fault
805 * actually accesses the page. it makes its reference bit set.
806 *
807 * we want to count the above as a single access, rather than
808 * three accesses with short reuse distances.
809 */
810
811 #if defined(USEONCE2)
812 pg->pqflags &= ~PQ_INITIALREF;
813 if (hot) {
814 pg->pqflags |= PQ_TEST;
815 }
816 s->s_ncold++;
817 clockpro_clearreferencebit(pg, false);
818 clockpro___enqueuetail(pg);
819 #else /* defined(USEONCE2) */
820 if (speculative) {
821 s->s_ncold++;
822 } else if (hot) {
823 pg->pqflags |= PQ_HOT;
824 } else {
825 pg->pqflags |= PQ_TEST;
826 s->s_ncold++;
827 }
828 clockpro___enqueuetail(pg);
829 #endif /* defined(USEONCE2) */
830 KASSERT(s->s_ncold <= s->s_npages);
831 }
832
833 static pageq_t *
834 clockpro_pagequeue(struct vm_page *pg)
835 {
836 struct clockpro_state * const s = &clockpro;
837 int qidx;
838
839 KASSERT(mutex_owned(&s->lock));
840
841 qidx = clockpro_getq(pg);
842 KASSERT(qidx != CLOCKPRO_NOQUEUE);
843
844 return clockpro_queue(s, qidx);
845 }
846
847 static void
848 clockpro_pagedequeue(struct vm_page *pg)
849 {
850 struct clockpro_state * const s = &clockpro;
851 pageq_t *q;
852
853 KASSERT(mutex_owned(&s->lock));
854
855 KASSERT(s->s_npages > 0);
856 check_sanity();
857 q = clockpro_pagequeue(pg);
858 pageq_remove(q, pg);
859 check_sanity();
860 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
861 if ((pg->pqflags & PQ_HOT) == 0) {
862 KASSERT(s->s_ncold > 0);
863 s->s_ncold--;
864 }
865 KASSERT(s->s_npages > 0);
866 s->s_npages--;
867 check_sanity();
868 }
869
870 static void
871 clockpro_pagerequeue(struct vm_page *pg)
872 {
873 struct clockpro_state * const s = &clockpro;
874 int qidx;
875
876 KASSERT(mutex_owned(&s->lock));
877
878 qidx = clockpro_getq(pg);
879 KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ);
880 pageq_remove(clockpro_queue(s, qidx), pg);
881 check_sanity();
882 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
883
884 clockpro___enqueuetail(pg);
885 }
886
887 static void
888 handhot_endtest(struct vm_page *pg)
889 {
890
891 KASSERT(mutex_owned(&clockpro.lock));
892
893 KASSERT((pg->pqflags & PQ_HOT) == 0);
894 if ((pg->pqflags & PQ_TEST) != 0) {
895 PDPOL_EVCNT_INCR(hhotcoldtest);
896 COLDTARGET_ADJ(-1);
897 pg->pqflags &= ~PQ_TEST;
898 } else {
899 PDPOL_EVCNT_INCR(hhotcold);
900 }
901 }
902
903 static void
904 handhot_advance(void)
905 {
906 struct clockpro_state * const s = &clockpro;
907 struct vm_page *pg;
908 pageq_t *hotq;
909 int hotqlen;
910
911 KASSERT(mutex_owned(&s->lock));
912
913 clockpro_tune();
914
915 dump("hot called");
916 if (s->s_ncold >= s->s_coldtarget) {
917 return;
918 }
919 hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
920 again:
921 pg = pageq_first(hotq);
922 if (pg == NULL) {
923 DPRINTF("%s: HHOT TAKEOVER\n", __func__);
924 dump("hhottakeover");
925 PDPOL_EVCNT_INCR(hhottakeover);
926 #if defined(LISTQ)
927 while (/* CONSTCOND */ 1) {
928 pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
929
930 pg = pageq_first(coldq);
931 if (pg == NULL) {
932 clockpro_newqflushone();
933 pg = pageq_first(coldq);
934 if (pg == NULL) {
935 WARN("hhot: no page?\n");
936 return;
937 }
938 }
939 KASSERT(clockpro_pagequeue(pg) == coldq);
940 pageq_remove(coldq, pg);
941 check_sanity();
942 if ((pg->pqflags & PQ_HOT) == 0) {
943 handhot_endtest(pg);
944 clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg);
945 } else {
946 clockpro_insert_head(s, CLOCKPRO_HOTQ, pg);
947 break;
948 }
949 }
950 #else /* defined(LISTQ) */
951 clockpro_newqflush(0); /* XXX XXX */
952 clockpro_switchqueue();
953 hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
954 goto again;
955 #endif /* defined(LISTQ) */
956 }
957
958 KASSERT(clockpro_pagequeue(pg) == hotq);
959
960 /*
961 * terminate test period of nonresident pages by cycling them.
962 */
963
964 cycle_target_frac += BUCKETSIZE;
965 hotqlen = pageq_len(hotq);
966 while (cycle_target_frac >= hotqlen) {
967 cycle_target++;
968 cycle_target_frac -= hotqlen;
969 }
970
971 if ((pg->pqflags & PQ_HOT) == 0) {
972 #if defined(LISTQ)
973 panic("cold page in hotq: %p", pg);
974 #else /* defined(LISTQ) */
975 handhot_endtest(pg);
976 goto next;
977 #endif /* defined(LISTQ) */
978 }
979 KASSERT((pg->pqflags & PQ_TEST) == 0);
980 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
981 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
982
983 /*
984 * once we met our target,
985 * stop at a hot page so that no cold pages in test period
986 * have larger recency than any hot pages.
987 */
988
989 if (s->s_ncold >= s->s_coldtarget) {
990 dump("hot done");
991 return;
992 }
993 clockpro_movereferencebit(pg, false);
994 if ((pg->pqflags & PQ_REFERENCED) == 0) {
995 PDPOL_EVCNT_INCR(hhotunref);
996 uvmexp.pddeact++;
997 pg->pqflags &= ~PQ_HOT;
998 clockpro.s_ncold++;
999 KASSERT(s->s_ncold <= s->s_npages);
1000 } else {
1001 PDPOL_EVCNT_INCR(hhotref);
1002 }
1003 pg->pqflags &= ~PQ_REFERENCED;
1004 #if !defined(LISTQ)
1005 next:
1006 #endif /* !defined(LISTQ) */
1007 clockpro_pagerequeue(pg);
1008 dump("hot");
1009 goto again;
1010 }
1011
1012 static struct vm_page *
1013 handcold_advance(void)
1014 {
1015 struct clockpro_state * const s = &clockpro;
1016 struct vm_page *pg;
1017
1018 KASSERT(mutex_owned(&s->lock));
1019
1020 for (;;) {
1021 #if defined(LISTQ)
1022 pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ);
1023 #endif /* defined(LISTQ) */
1024 pageq_t *coldq;
1025
1026 clockpro_newqrotate();
1027 handhot_advance();
1028 #if defined(LISTQ)
1029 pg = pageq_first(listq);
1030 if (pg != NULL) {
1031 KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
1032 KASSERT((pg->pqflags & PQ_TEST) == 0);
1033 KASSERT((pg->pqflags & PQ_HOT) == 0);
1034 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1035 pageq_remove(listq, pg);
1036 check_sanity();
1037 clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */
1038 goto gotcold;
1039 }
1040 #endif /* defined(LISTQ) */
1041 check_sanity();
1042 coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
1043 pg = pageq_first(coldq);
1044 if (pg == NULL) {
1045 clockpro_newqflushone();
1046 pg = pageq_first(coldq);
1047 }
1048 if (pg == NULL) {
1049 DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
1050 dump("hcoldtakeover");
1051 PDPOL_EVCNT_INCR(hcoldtakeover);
1052 KASSERT(
1053 pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0);
1054 #if defined(LISTQ)
1055 KASSERT(
1056 pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0);
1057 #else /* defined(LISTQ) */
1058 clockpro_switchqueue();
1059 coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
1060 pg = pageq_first(coldq);
1061 #endif /* defined(LISTQ) */
1062 }
1063 if (pg == NULL) {
1064 WARN("hcold: no page?\n");
1065 return NULL;
1066 }
1067 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1068 if ((pg->pqflags & PQ_HOT) != 0) {
1069 PDPOL_EVCNT_INCR(hcoldhot);
1070 pageq_remove(coldq, pg);
1071 clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg);
1072 check_sanity();
1073 KASSERT((pg->pqflags & PQ_TEST) == 0);
1074 uvmexp.pdscans++;
1075 continue;
1076 }
1077 #if defined(LISTQ)
1078 gotcold:
1079 #endif /* defined(LISTQ) */
1080 KASSERT((pg->pqflags & PQ_HOT) == 0);
1081 uvmexp.pdscans++;
1082 clockpro_movereferencebit(pg, false);
1083 if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
1084 KASSERT((pg->pqflags & PQ_TEST) == 0);
1085 if ((pg->pqflags & PQ_REFERENCED) != 0) {
1086 PDPOL_EVCNT_INCR(speculativehit2);
1087 pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
1088 clockpro_pagedequeue(pg);
1089 clockpro_pageenqueue(pg);
1090 continue;
1091 }
1092 PDPOL_EVCNT_INCR(speculativemiss);
1093 }
1094 switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
1095 case PQ_TEST:
1096 PDPOL_EVCNT_INCR(hcoldunreftest);
1097 nonresident_pagerecord(pg);
1098 goto gotit;
1099 case 0:
1100 PDPOL_EVCNT_INCR(hcoldunref);
1101 gotit:
1102 KASSERT(s->s_ncold > 0);
1103 clockpro_pagerequeue(pg); /* XXX */
1104 dump("cold done");
1105 /* XXX "pg" is still in queue */
1106 handhot_advance();
1107 goto done;
1108
1109 case PQ_REFERENCED|PQ_TEST:
1110 PDPOL_EVCNT_INCR(hcoldreftest);
1111 s->s_ncold--;
1112 COLDTARGET_ADJ(1);
1113 pg->pqflags |= PQ_HOT;
1114 pg->pqflags &= ~PQ_TEST;
1115 break;
1116
1117 case PQ_REFERENCED:
1118 PDPOL_EVCNT_INCR(hcoldref);
1119 pg->pqflags |= PQ_TEST;
1120 break;
1121 }
1122 pg->pqflags &= ~PQ_REFERENCED;
1123 uvmexp.pdreact++;
1124 /* move to the list head */
1125 clockpro_pagerequeue(pg);
1126 dump("cold");
1127 }
1128 done:;
1129 return pg;
1130 }
1131
1132 void
1133 uvmpdpol_pageactivate(struct vm_page *pg)
1134 {
1135 struct clockpro_state * const s = &clockpro;
1136
1137 mutex_enter(&s->lock);
1138 if (!uvmpdpol_pageisqueued_p(pg)) {
1139 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
1140 pg->pqflags |= PQ_INITIALREF;
1141 clockpro_pageenqueue(pg);
1142 } else if ((pg->pqflags & PQ_SPECULATIVE)) {
1143 PDPOL_EVCNT_INCR(speculativehit1);
1144 pg->pqflags &= ~PQ_SPECULATIVE;
1145 pg->pqflags |= PQ_INITIALREF;
1146 clockpro_pagedequeue(pg);
1147 clockpro_pageenqueue(pg);
1148 }
1149 pg->pqflags |= PQ_REFERENCED;
1150 mutex_exit(&s->lock);
1151 }
1152
1153 void
1154 uvmpdpol_pagedeactivate(struct vm_page *pg)
1155 {
1156 struct clockpro_state * const s = &clockpro;
1157
1158 mutex_enter(&s->lock);
1159 clockpro_clearreferencebit(pg, true);
1160 mutex_exit(&s->lock);
1161 }
1162
1163 void
1164 uvmpdpol_pagedequeue(struct vm_page *pg)
1165 {
1166 struct clockpro_state * const s = &clockpro;
1167
1168 if (!uvmpdpol_pageisqueued_p(pg)) {
1169 return;
1170 }
1171 mutex_enter(&s->lock);
1172 clockpro_pagedequeue(pg);
1173 pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
1174 mutex_exit(&s->lock);
1175 }
1176
1177 void
1178 uvmpdpol_pageenqueue(struct vm_page *pg)
1179 {
1180
1181 #if 1
1182 struct clockpro_state * const s = &clockpro;
1183
1184 if (uvmpdpol_pageisqueued_p(pg)) {
1185 return;
1186 }
1187 mutex_enter(&s->lock);
1188 clockpro_clearreferencebit(pg, true);
1189 pg->pqflags |= PQ_SPECULATIVE;
1190 clockpro_pageenqueue(pg);
1191 mutex_exit(&s->lock);
1192 #else
1193 uvmpdpol_pageactivate(pg);
1194 #endif
1195 }
1196
1197 void
1198 uvmpdpol_anfree(struct vm_anon *an)
1199 {
1200 struct clockpro_state * const s = &clockpro;
1201
1202 KASSERT(an->an_page == NULL);
1203 mutex_enter(&s->lock);
1204 if (nonresident_lookupremove((objid_t)an, 0)) {
1205 PDPOL_EVCNT_INCR(nresanonfree);
1206 }
1207 mutex_exit(&s->lock);
1208 }
1209
1210 void
1211 uvmpdpol_init(void)
1212 {
1213
1214 clockpro_init();
1215 }
1216
1217 void
1218 uvmpdpol_reinit(void)
1219 {
1220 struct clockpro_state * const s = &clockpro;
1221
1222 mutex_enter(&s->lock);
1223 clockpro_reinit();
1224 mutex_exit(&s->lock);
1225 }
1226
1227 void
1228 uvmpdpol_estimatepageable(int *active, int *inactive)
1229 {
1230 struct clockpro_state * const s = &clockpro;
1231
1232 mutex_enter(&s->lock);
1233 if (active) {
1234 *active = s->s_npages - s->s_ncold;
1235 }
1236 if (inactive) {
1237 *inactive = s->s_ncold;
1238 }
1239 mutex_exit(&s->lock);
1240 }
1241
1242 bool
1243 uvmpdpol_pageisqueued_p(struct vm_page *pg)
1244 {
1245
1246 /* Unlocked check OK due to page lifecycle. */
1247 return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
1248 }
1249
1250 void
1251 uvmpdpol_scaninit(void)
1252 {
1253 struct clockpro_state * const s = &clockpro;
1254 struct clockpro_scanstate * const ss = &scanstate;
1255
1256 mutex_enter(&s->lock);
1257 ss->ss_nscanned = 0;
1258 mutex_exit(&s->lock);
1259 }
1260
1261 struct vm_page *
1262 uvmpdpol_selectvictim(kmutex_t **plock)
1263 {
1264 struct clockpro_state * const s = &clockpro;
1265 struct clockpro_scanstate * const ss = &scanstate;
1266 struct vm_page *pg;
1267 kmutex_t *lock = NULL;
1268
1269 do {
1270 mutex_enter(&s->lock);
1271 if (ss->ss_nscanned > s->s_npages) {
1272 DPRINTF("scan too much\n");
1273 mutex_exit(&s->lock);
1274 return NULL;
1275 }
1276 pg = handcold_advance();
1277 if (pg == NULL) {
1278 mutex_exit(&s->lock);
1279 break;
1280 }
1281 ss->ss_nscanned++;
1282 /*
1283 * acquire interlock to stablize page identity.
1284 * if we have caught the page in a state of flux
1285 * and it should be dequeued, do it now and then
1286 * move on to the next.
1287 */
1288 mutex_enter(&pg->interlock);
1289 if ((pg->uobject == NULL && pg->uanon == NULL) ||
1290 pg->wire_count > 0) {
1291 mutex_exit(&pg->interlock);
1292 clockpro_pagedequeue(pg);
1293 pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
1294 continue;
1295 }
1296 mutex_exit(&s->lock);
1297 lock = uvmpd_trylockowner(pg);
1298 /* pg->interlock now dropped */
1299 } while (lock == NULL);
1300 *plock = lock;
1301 return pg;
1302 }
1303
1304 static void
1305 clockpro_dropswap(pageq_t *q, int *todo)
1306 {
1307 struct vm_page *pg;
1308
1309 KASSERT(mutex_owned(&clockpro.lock));
1310
1311 TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq.queue) {
1312 if (*todo <= 0) {
1313 break;
1314 }
1315 if ((pg->pqflags & PQ_HOT) == 0) {
1316 continue;
1317 }
1318 mutex_enter(&pg->interlock);
1319 if ((pg->flags & PG_SWAPBACKED) == 0) {
1320 mutex_exit(&pg->interlock);
1321 continue;
1322 }
1323 if (uvmpd_trydropswap(pg)) {
1324 (*todo)--;
1325 }
1326 /* pg->interlock now dropped */
1327 }
1328 }
1329
1330 void
1331 uvmpdpol_balancequeue(int swap_shortage)
1332 {
1333 struct clockpro_state * const s = &clockpro;
1334 int todo = swap_shortage;
1335
1336 if (todo == 0) {
1337 return;
1338 }
1339
1340 /*
1341 * reclaim swap slots from hot pages
1342 */
1343
1344 DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage);
1345
1346 mutex_enter(&s->lock);
1347 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo);
1348 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo);
1349 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo);
1350 mutex_exit(&s->lock);
1351
1352 DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo);
1353 }
1354
1355 bool
1356 uvmpdpol_needsscan_p(void)
1357 {
1358 struct clockpro_state * const s = &clockpro;
1359
1360 /* This must be an unlocked check: can be called from interrupt. */
1361 return s->s_ncold < s->s_coldtarget;
1362 }
1363
1364 void
1365 uvmpdpol_tune(void)
1366 {
1367 struct clockpro_state * const s = &clockpro;
1368
1369 mutex_enter(&s->lock);
1370 clockpro_tune();
1371 mutex_exit(&s->lock);
1372 }
1373
1374 #if !defined(PDSIM)
1375
1376 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
1377
1378 void
1379 uvmpdpol_sysctlsetup(void)
1380 {
1381 #if !defined(ADAPTIVE)
1382 struct clockpro_state * const s = &clockpro;
1383
1384 uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
1385 SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
1386 #endif /* !defined(ADAPTIVE) */
1387 }
1388
1389 #endif /* !defined(PDSIM) */
1390
1391 #if defined(DDB)
1392
1393 #if 0 /* XXXuvmplock */
1394 #define _pmap_is_referenced(pg) pmap_is_referenced(pg)
1395 #else
1396 #define _pmap_is_referenced(pg) false
1397 #endif
1398
1399 void clockpro_dump(void);
1400
1401 void
1402 clockpro_dump(void)
1403 {
1404 struct clockpro_state * const s = &clockpro;
1405
1406 struct vm_page *pg;
1407 int ncold, nhot, ntest, nspeculative, ninitialref, nref;
1408 int newqlen, coldqlen, hotqlen, listqlen;
1409
1410 newqlen = coldqlen = hotqlen = listqlen = 0;
1411 printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
1412 s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax);
1413
1414 #define INITCOUNT() \
1415 ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
1416
1417 #define COUNT(pg) \
1418 if ((pg->pqflags & PQ_HOT) != 0) { \
1419 nhot++; \
1420 } else { \
1421 ncold++; \
1422 if ((pg->pqflags & PQ_TEST) != 0) { \
1423 ntest++; \
1424 } \
1425 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
1426 nspeculative++; \
1427 } \
1428 if ((pg->pqflags & PQ_INITIALREF) != 0) { \
1429 ninitialref++; \
1430 } else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
1431 _pmap_is_referenced(pg)) { \
1432 nref++; \
1433 } \
1434 }
1435
1436 #define PRINTCOUNT(name) \
1437 printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
1438 "nref=%d\n", \
1439 (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
1440
1441 INITCOUNT();
1442 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pageq.queue) {
1443 if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
1444 printf("newq corrupt %p\n", pg);
1445 }
1446 COUNT(pg)
1447 newqlen++;
1448 }
1449 PRINTCOUNT("newq");
1450
1451 INITCOUNT();
1452 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pageq.queue) {
1453 if (clockpro_getq(pg) != CLOCKPRO_COLDQ) {
1454 printf("coldq corrupt %p\n", pg);
1455 }
1456 COUNT(pg)
1457 coldqlen++;
1458 }
1459 PRINTCOUNT("coldq");
1460
1461 INITCOUNT();
1462 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pageq.queue) {
1463 if (clockpro_getq(pg) != CLOCKPRO_HOTQ) {
1464 printf("hotq corrupt %p\n", pg);
1465 }
1466 #if defined(LISTQ)
1467 if ((pg->pqflags & PQ_HOT) == 0) {
1468 printf("cold page in hotq: %p\n", pg);
1469 }
1470 #endif /* defined(LISTQ) */
1471 COUNT(pg)
1472 hotqlen++;
1473 }
1474 PRINTCOUNT("hotq");
1475
1476 INITCOUNT();
1477 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pageq.queue) {
1478 #if !defined(LISTQ)
1479 printf("listq %p\n", pg);
1480 #endif /* !defined(LISTQ) */
1481 if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
1482 printf("listq corrupt %p\n", pg);
1483 }
1484 COUNT(pg)
1485 listqlen++;
1486 }
1487 PRINTCOUNT("listq");
1488
1489 printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
1490 newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)),
1491 coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)),
1492 hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)),
1493 listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ)));
1494 }
1495
1496 #endif /* defined(DDB) */
1497
1498 #if defined(PDSIM)
1499 #if defined(DEBUG)
1500 static void
1501 pdsim_dumpq(int qidx)
1502 {
1503 struct clockpro_state * const s = &clockpro;
1504 pageq_t *q = clockpro_queue(s, qidx);
1505 struct vm_page *pg;
1506
1507 TAILQ_FOREACH(pg, &q->q_q, pageq.queue) {
1508 DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
1509 pg->offset >> PAGE_SHIFT,
1510 (pg->pqflags & PQ_HOT) ? "H" : "",
1511 (pg->pqflags & PQ_TEST) ? "T" : "",
1512 (pg->pqflags & PQ_REFERENCED) ? "R" : "",
1513 _pmap_is_referenced(pg) ? "r" : "",
1514 (pg->pqflags & PQ_INITIALREF) ? "I" : "",
1515 (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
1516 );
1517 }
1518 }
1519 #endif /* defined(DEBUG) */
1520
1521 void
1522 pdsim_dump(const char *id)
1523 {
1524 #if defined(DEBUG)
1525 struct clockpro_state * const s = &clockpro;
1526
1527 DPRINTF(" %s L(", id);
1528 pdsim_dumpq(CLOCKPRO_LISTQ);
1529 DPRINTF(" ) H(");
1530 pdsim_dumpq(CLOCKPRO_HOTQ);
1531 DPRINTF(" ) C(");
1532 pdsim_dumpq(CLOCKPRO_COLDQ);
1533 DPRINTF(" ) N(");
1534 pdsim_dumpq(CLOCKPRO_NEWQ);
1535 DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
1536 s->s_ncold, s->s_coldtarget, coldadj);
1537 #endif /* defined(DEBUG) */
1538 }
1539 #endif /* defined(PDSIM) */
1540