uvm_pdpolicy_clockpro.c revision 1.15.16.1 1 /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.15.16.1 2012/02/09 03:05:01 matt Exp $ */
2
3 /*-
4 * Copyright (c)2005, 2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * CLOCK-Pro replacement policy:
31 * http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
32 *
33 * approximation of the list of non-resident pages using hash:
34 * http://linux-mm.org/ClockProApproximation
35 */
36
37 /* #define CLOCKPRO_DEBUG */
38
39 #if defined(PDSIM)
40
41 #include "pdsim.h"
42
43 #else /* defined(PDSIM) */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.15.16.1 2012/02/09 03:05:01 matt Exp $");
47
48 #include "opt_ddb.h"
49
50 #include <sys/param.h>
51 #include <sys/proc.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/hash.h>
55
56 #include <uvm/uvm.h>
57 #include <uvm/uvm_pdpolicy.h>
58 #include <uvm/uvm_pdpolicy_impl.h>
59
60 #if ((__STDC_VERSION__ - 0) >= 199901L)
61 #define DPRINTF(...) /* nothing */
62 #define WARN(...) printf(__VA_ARGS__)
63 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
64 #define DPRINTF(a...) /* nothing */ /* GCC */
65 #define WARN(a...) printf(a)
66 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
67
68 #define dump(a) /* nothing */
69
70 #undef USEONCE2
71 #define LISTQ
72 #undef ADAPTIVE
73
74 #endif /* defined(PDSIM) */
75
76 #if !defined(CLOCKPRO_COLDPCT)
77 #define CLOCKPRO_COLDPCT 10
78 #endif /* !defined(CLOCKPRO_COLDPCT) */
79
80 #define CLOCKPRO_COLDPCTMAX 90
81
82 #if !defined(CLOCKPRO_HASHFACTOR)
83 #define CLOCKPRO_HASHFACTOR 2
84 #endif /* !defined(CLOCKPRO_HASHFACTOR) */
85
86 #define CLOCKPRO_NEWQMIN ((1024 * 1024) >> PAGE_SHIFT) /* XXX */
87
88 int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
89
90 PDPOL_EVCNT_DEFINE(nresrecordobj)
91 PDPOL_EVCNT_DEFINE(nresrecordanon)
92 PDPOL_EVCNT_DEFINE(nreslookupobj)
93 PDPOL_EVCNT_DEFINE(nreslookupanon)
94 PDPOL_EVCNT_DEFINE(nresfoundobj)
95 PDPOL_EVCNT_DEFINE(nresfoundanon)
96 PDPOL_EVCNT_DEFINE(nresanonfree)
97 PDPOL_EVCNT_DEFINE(nresconflict)
98 PDPOL_EVCNT_DEFINE(nresoverwritten)
99 PDPOL_EVCNT_DEFINE(nreshandhot)
100
101 PDPOL_EVCNT_DEFINE(hhottakeover)
102 PDPOL_EVCNT_DEFINE(hhotref)
103 PDPOL_EVCNT_DEFINE(hhotunref)
104 PDPOL_EVCNT_DEFINE(hhotcold)
105 PDPOL_EVCNT_DEFINE(hhotcoldtest)
106
107 PDPOL_EVCNT_DEFINE(hcoldtakeover)
108 PDPOL_EVCNT_DEFINE(hcoldref)
109 PDPOL_EVCNT_DEFINE(hcoldunref)
110 PDPOL_EVCNT_DEFINE(hcoldreftest)
111 PDPOL_EVCNT_DEFINE(hcoldunreftest)
112 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
113 PDPOL_EVCNT_DEFINE(hcoldhot)
114
115 PDPOL_EVCNT_DEFINE(speculativeenqueue)
116 PDPOL_EVCNT_DEFINE(speculativehit1)
117 PDPOL_EVCNT_DEFINE(speculativehit2)
118 PDPOL_EVCNT_DEFINE(speculativemiss)
119
120 #define PQ_REFERENCED PQ_PRIVATE1
121 #define PQ_HOT PQ_PRIVATE2
122 #define PQ_TEST PQ_PRIVATE3
123 #define PQ_INITIALREF PQ_PRIVATE4
124 #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2
125 #error PQ_PRIVATE
126 #endif
127 #define PQ_QMASK (PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7)
128 #define PQ_QFACTOR PQ_PRIVATE5
129 #define PQ_SPECULATIVE PQ_PRIVATE8
130
131 #define CLOCKPRO_NOQUEUE 0
132 #define CLOCKPRO_NEWQ 1 /* small queue to clear initial ref. */
133 #if defined(LISTQ)
134 #define CLOCKPRO_COLDQ(gs) 2
135 #define CLOCKPRO_HOTQ(gs) 3
136 #else /* defined(LISTQ) */
137 #define CLOCKPRO_COLDQ(gs) (2 + (gs)->gs_coldqidx) /* XXX */
138 #define CLOCKPRO_HOTQ(gs) (3 - (gs)->gs_coldqidx) /* XXX */
139 #endif /* defined(LISTQ) */
140 #define CLOCKPRO_LISTQ 4
141 #define CLOCKPRO_NQUEUE 4
142
143 static inline void
144 clockpro_setq(struct vm_page *pg, int qidx)
145 {
146 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
147 KASSERT(qidx <= CLOCKPRO_NQUEUE);
148
149 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
150 }
151
152 static inline int
153 clockpro_getq(struct vm_page *pg)
154 {
155 int qidx;
156
157 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
158 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
159 KASSERT(qidx <= CLOCKPRO_NQUEUE);
160 return qidx;
161 }
162
163 typedef struct {
164 struct pglist q_q;
165 u_int q_len;
166 } pageq_t;
167
168 typedef uint32_t nonres_cookie_t;
169 #define NONRES_COOKIE_INVAL 0
170
171 #define BUCKETSIZE 14
172 struct bucket {
173 u_int cycle;
174 u_int cur;
175 nonres_cookie_t pages[BUCKETSIZE];
176 };
177
178 static size_t cycle_target;
179 static size_t cycle_target_frac;
180 static size_t hashsize;
181 static struct bucket *buckets;
182
183 struct uvmpdpol_groupstate {
184 pageq_t gs_q[CLOCKPRO_NQUEUE];
185 struct uvm_pggroup *gs_pgrp;
186 u_int gs_npages;
187 u_int gs_coldtarget;
188 u_int gs_ncold;
189 u_int gs_newqlenmax;
190 #if !defined(LISTQ)
191 u_int gs_coldqidx;
192 #endif
193 u_int gs_nscanned;
194 u_int gs_coldadj;
195 };
196
197 struct clockpro_state {
198 struct uvmpdpol_groupstate *s_gs;
199 struct uvm_pctparam s_coldtargetpct;
200 };
201
202 static inline pageq_t *
203 clockpro_queue(struct uvmpdpol_groupstate *gs, u_int qidx)
204 {
205
206 KASSERT(CLOCKPRO_NOQUEUE < qidx);
207 KASSERT(qidx <= CLOCKPRO_NQUEUE);
208
209 return &gs->gs_q[qidx - 1];
210 }
211
212 #if !defined(LISTQ)
213
214 static inline void
215 clockpro_switchqueue(struct uvmpdpol_groupstate *gs)
216 {
217
218 gs->gs_coldqidx ^= 1;
219 }
220
221 #endif /* !defined(LISTQ) */
222
223 static struct clockpro_state clockpro;
224
225 /* ---------------------------------------- */
226
227 static void
228 pageq_init(pageq_t *q)
229 {
230
231 TAILQ_INIT(&q->q_q);
232 q->q_len = 0;
233 }
234
235 static u_int
236 pageq_len(const pageq_t *q)
237 {
238
239 return q->q_len;
240 }
241
242 static struct vm_page *
243 pageq_first(const pageq_t *q)
244 {
245
246 return TAILQ_FIRST(&q->q_q);
247 }
248
249 static void
250 pageq_insert_tail(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
251 {
252 KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
253
254 TAILQ_INSERT_TAIL(&q->q_q, pg, pageq.queue);
255 q->q_len++;
256 }
257
258 #if defined(LISTQ)
259 static void
260 pageq_insert_head(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
261 {
262 KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
263
264 TAILQ_INSERT_HEAD(&q->q_q, pg, pageq.queue);
265 q->q_len++;
266 }
267 #endif
268
269 static void
270 pageq_remove(struct uvmpdpol_groupstate *gs, pageq_t *q, struct vm_page *pg)
271 {
272 KASSERT(clockpro_queue(gs, clockpro_getq(pg)) == q);
273 KASSERT(q->q_len > 0);
274 TAILQ_REMOVE(&q->q_q, pg, pageq.queue);
275 q->q_len--;
276 }
277
278 static struct vm_page *
279 pageq_remove_head(struct uvmpdpol_groupstate *gs, pageq_t *q)
280 {
281 struct vm_page *pg;
282
283 pg = TAILQ_FIRST(&q->q_q);
284 if (pg == NULL) {
285 KASSERT(q->q_len == 0);
286 return NULL;
287 }
288
289 pageq_remove(gs, q, pg);
290 return pg;
291 }
292
293 /* ---------------------------------------- */
294
295 static void
296 clockpro_insert_tail(struct uvmpdpol_groupstate *gs, u_int qidx, struct vm_page *pg)
297 {
298 pageq_t *q = clockpro_queue(gs, qidx);
299
300 clockpro_setq(pg, qidx);
301 pageq_insert_tail(gs, q, pg);
302 }
303
304 #if defined(LISTQ)
305 static void
306 clockpro_insert_head(struct uvmpdpol_groupstate *gs, u_int qidx, struct vm_page *pg)
307 {
308 pageq_t *q = clockpro_queue(gs, qidx);
309
310 clockpro_setq(pg, qidx);
311 pageq_insert_head(gs, q, pg);
312 }
313
314 #endif
315 /* ---------------------------------------- */
316
317 typedef uintptr_t objid_t;
318
319 /*
320 * XXX maybe these hash functions need reconsideration,
321 * given that hash distribution is critical here.
322 */
323
324 static uint32_t
325 pageidentityhash1(objid_t obj, off_t idx)
326 {
327 uint32_t hash = HASH32_BUF_INIT;
328
329 #if 1
330 hash = hash32_buf(&idx, sizeof(idx), hash);
331 hash = hash32_buf(&obj, sizeof(obj), hash);
332 #else
333 hash = hash32_buf(&obj, sizeof(obj), hash);
334 hash = hash32_buf(&idx, sizeof(idx), hash);
335 #endif
336 return hash;
337 }
338
339 static uint32_t
340 pageidentityhash2(objid_t obj, off_t idx)
341 {
342 uint32_t hash = HASH32_BUF_INIT;
343
344 hash = hash32_buf(&obj, sizeof(obj), hash);
345 hash = hash32_buf(&idx, sizeof(idx), hash);
346 return hash;
347 }
348
349 static nonres_cookie_t
350 calccookie(objid_t obj, off_t idx)
351 {
352 uint32_t hash = pageidentityhash2(obj, idx);
353 nonres_cookie_t cookie = hash;
354
355 if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
356 cookie++; /* XXX */
357 }
358 return cookie;
359 }
360
361 #define COLDTARGET_ADJ(gs, d) ((gs)->gs_coldadj += (d))
362
363 #if defined(PDSIM)
364
365 static void *
366 clockpro_hashalloc(u_int n)
367 {
368 size_t allocsz = sizeof(struct bucket) * n;
369
370 return malloc(allocsz);
371 }
372
373 static void
374 clockpro_hashfree(void *p, int n)
375 {
376
377 free(p);
378 }
379
380 #else /* defined(PDSIM) */
381
382 static void *
383 clockpro_hashalloc(u_int n)
384 {
385 size_t allocsz = round_page(sizeof(struct bucket) * n);
386
387 return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
388 }
389
390 static void
391 clockpro_hashfree(void *p, u_int n)
392 {
393 size_t allocsz = round_page(sizeof(struct bucket) * n);
394
395 uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
396 }
397
398 #endif /* defined(PDSIM) */
399
400 static void
401 clockpro_hashinit(uint64_t n)
402 {
403 struct bucket *newbuckets;
404 struct bucket *oldbuckets;
405 size_t sz;
406 size_t oldsz;
407 int i;
408
409 sz = howmany(n, BUCKETSIZE);
410 sz *= clockpro_hashfactor;
411 newbuckets = clockpro_hashalloc(sz);
412 if (newbuckets == NULL) {
413 panic("%s: allocation failure", __func__);
414 }
415 for (i = 0; i < sz; i++) {
416 struct bucket *b = &newbuckets[i];
417 int j;
418
419 b->cycle = cycle_target;
420 b->cur = 0;
421 for (j = 0; j < BUCKETSIZE; j++) {
422 b->pages[j] = NONRES_COOKIE_INVAL;
423 }
424 }
425 /* XXX lock */
426 oldbuckets = buckets;
427 oldsz = hashsize;
428 buckets = newbuckets;
429 hashsize = sz;
430 /* XXX unlock */
431 if (oldbuckets) {
432 clockpro_hashfree(oldbuckets, oldsz);
433 }
434 }
435
436 static struct bucket *
437 nonresident_getbucket(objid_t obj, off_t idx)
438 {
439 uint32_t hash;
440
441 hash = pageidentityhash1(obj, idx);
442 return &buckets[hash % hashsize];
443 }
444
445 static void
446 nonresident_rotate(struct uvmpdpol_groupstate *gs, struct bucket *b)
447 {
448 const int target = cycle_target;
449 const int cycle = b->cycle;
450 int cur;
451 int todo;
452
453 todo = target - cycle;
454 if (todo >= BUCKETSIZE * 2) {
455 todo = (todo % BUCKETSIZE) + BUCKETSIZE;
456 }
457 cur = b->cur;
458 while (todo > 0) {
459 if (b->pages[cur] != NONRES_COOKIE_INVAL) {
460 PDPOL_EVCNT_INCR(nreshandhot);
461 if (gs != NULL)
462 COLDTARGET_ADJ(gs, -1);
463 }
464 b->pages[cur] = NONRES_COOKIE_INVAL;
465 cur++;
466 if (cur == BUCKETSIZE) {
467 cur = 0;
468 }
469 todo--;
470 }
471 b->cycle = target;
472 b->cur = cur;
473 }
474
475 static bool
476 nonresident_lookupremove(struct uvmpdpol_groupstate *gs, objid_t obj, off_t idx)
477 {
478 struct bucket *b = nonresident_getbucket(obj, idx);
479 nonres_cookie_t cookie = calccookie(obj, idx);
480
481 nonresident_rotate(gs, b);
482 for (u_int i = 0; i < BUCKETSIZE; i++) {
483 if (b->pages[i] == cookie) {
484 b->pages[i] = NONRES_COOKIE_INVAL;
485 return true;
486 }
487 }
488 return false;
489 }
490
491 static objid_t
492 pageobj(struct vm_page *pg)
493 {
494 const void *obj;
495
496 /*
497 * XXX object pointer is often freed and reused for unrelated object.
498 * for vnodes, it would be better to use something like
499 * a hash of fsid/fileid/generation.
500 */
501
502 obj = pg->uobject;
503 if (obj == NULL) {
504 obj = pg->uanon;
505 KASSERT(obj != NULL);
506 KASSERT(pg->offset == 0);
507 }
508
509 return (objid_t)obj;
510 }
511
512 static off_t
513 pageidx(struct vm_page *pg)
514 {
515
516 KASSERT((pg->offset & PAGE_MASK) == 0);
517 return pg->offset >> PAGE_SHIFT;
518 }
519
520 static bool
521 nonresident_pagelookupremove(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
522 {
523 bool found = nonresident_lookupremove(gs, pageobj(pg), pageidx(pg));
524
525 if (pg->uobject) {
526 PDPOL_EVCNT_INCR(nreslookupobj);
527 } else {
528 PDPOL_EVCNT_INCR(nreslookupanon);
529 }
530 if (found) {
531 if (pg->uobject) {
532 PDPOL_EVCNT_INCR(nresfoundobj);
533 } else {
534 PDPOL_EVCNT_INCR(nresfoundanon);
535 }
536 }
537 return found;
538 }
539
540 static void
541 nonresident_pagerecord(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
542 {
543 const objid_t obj = pageobj(pg);
544 const off_t idx = pageidx(pg);
545 struct bucket * const b = nonresident_getbucket(obj, idx);
546 nonres_cookie_t cookie = calccookie(obj, idx);
547
548 #if defined(DEBUG)
549 for (u_int i = 0; i < BUCKETSIZE; i++) {
550 if (b->pages[i] == cookie) {
551 PDPOL_EVCNT_INCR(nresconflict);
552 }
553 }
554 #endif /* defined(DEBUG) */
555
556 if (pg->uobject) {
557 PDPOL_EVCNT_INCR(nresrecordobj);
558 } else {
559 PDPOL_EVCNT_INCR(nresrecordanon);
560 }
561 nonresident_rotate(gs, b);
562 if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
563 PDPOL_EVCNT_INCR(nresoverwritten);
564 COLDTARGET_ADJ(gs, -1);
565 }
566 b->pages[b->cur] = cookie;
567 b->cur = (b->cur + 1) % BUCKETSIZE;
568 }
569
570 /* ---------------------------------------- */
571
572 #if defined(CLOCKPRO_DEBUG)
573 static void
574 check_sanity(void)
575 {
576 }
577 #else /* defined(CLOCKPRO_DEBUG) */
578 #define check_sanity() /* nothing */
579 #endif /* defined(CLOCKPRO_DEBUG) */
580
581 static void
582 clockpro_reinit(void)
583 {
584
585 clockpro_hashinit(uvmexp.npages);
586 }
587
588 static void
589 clockpro_recolor(void *new_gs, struct uvm_pggroup *grparray,
590 size_t npggroup, size_t old_ncolors)
591 {
592 struct uvmpdpol_groupstate *old_gs = clockpro.s_gs;
593 struct uvm_pggroup *grp = uvm.pggroups;
594 struct uvmpdpol_groupstate *gs = new_gs;
595 const size_t old_npggroup = VM_NPGGROUP(old_ncolors);
596
597 clockpro.s_gs = gs;
598
599 for (size_t pggroup = 0; pggroup < npggroup; pggroup++, gs++, grp++) {
600 grp->pgrp_gs = gs;
601 gs->gs_pgrp = grp;
602 for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++) {
603 pageq_init(&gs->gs_q[i]);
604 }
605 gs->gs_newqlenmax = 1;
606 gs->gs_coldtarget = 1;
607 }
608
609 for (size_t pggroup = 0; pggroup < old_npggroup; pggroup++, old_gs++) {
610 pageq_t *oldq = old_gs->gs_q;
611 for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++, oldq++) {
612 while (pageq_len(oldq) > 0) {
613 struct vm_page *pg = pageq_remove_head(old_gs, oldq);
614 KASSERT(pg != NULL);
615 grp = uvm_page_to_pggroup(pg);
616 gs = grp->pgrp_gs;
617 pageq_insert_tail(gs, &gs->gs_q[i], pg);
618 #if defined(USEONCE2)
619 #else
620 gs->gs_npages++;
621 if (pg->pqflags & (PQ_TEST|PQ_SPECULATIVE)) {
622 gs->gs_ncold++;
623 }
624 #endif
625 }
626 }
627 }
628
629 uvm_pctparam_init(&clockpro.s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
630
631 }
632
633 static void
634 clockpro_init(void *new_gs, size_t npggroup)
635 {
636 struct uvm_pggroup *grp = uvm.pggroups;
637 struct uvmpdpol_groupstate *gs = new_gs;
638
639 for (size_t pggroup = 0; pggroup < npggroup; pggroup++, gs++, grp++) {
640 grp->pgrp_gs = gs;
641 gs->gs_pgrp = grp;
642 for (u_int i = 0; i < CLOCKPRO_NQUEUE; i++) {
643 pageq_init(&gs->gs_q[i]);
644 }
645 gs->gs_newqlenmax = 1;
646 gs->gs_coldtarget = 1;
647 }
648
649 uvm_pctparam_init(&clockpro.s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
650 }
651
652 static void
653 clockpro_tune(struct uvmpdpol_groupstate *gs)
654 {
655 int coldtarget;
656
657 #if defined(ADAPTIVE)
658 u_int coldmax = gs->s_npages * CLOCKPRO_COLDPCTMAX / 100;
659 u_int coldmin = 1;
660
661 coldtarget = gs->gs_coldtarget;
662 if (coldtarget + gs->gs_coldadj < coldmin) {
663 gs->gs_coldadj = coldmin - coldtarget;
664 } else if (coldtarget + gs->gs_coldadj > coldmax) {
665 gs->gs_coldadj = coldmax - coldtarget;
666 }
667 coldtarget += gs->gs_coldadj;
668 #else /* defined(ADAPTIVE) */
669 coldtarget = UVM_PCTPARAM_APPLY(&clockpro.s_coldtargetpct,
670 gs->gs_npages);
671 if (coldtarget < 1) {
672 coldtarget = 1;
673 }
674 #endif /* defined(ADAPTIVE) */
675
676 gs->gs_coldtarget = coldtarget;
677 gs->gs_newqlenmax = coldtarget / 4;
678 if (gs->gs_newqlenmax < CLOCKPRO_NEWQMIN) {
679 gs->gs_newqlenmax = CLOCKPRO_NEWQMIN;
680 }
681 }
682
683 static void
684 clockpro_movereferencebit(struct vm_page *pg)
685 {
686 bool referenced;
687
688 referenced = pmap_clear_reference(pg);
689 if (referenced) {
690 pg->pqflags |= PQ_REFERENCED;
691 }
692 }
693
694 static void
695 clockpro_clearreferencebit(struct vm_page *pg)
696 {
697
698 clockpro_movereferencebit(pg);
699 pg->pqflags &= ~PQ_REFERENCED;
700 }
701
702 static void
703 clockpro___newqrotate(struct uvmpdpol_groupstate * const gs, int len)
704 {
705 pageq_t * const newq = clockpro_queue(gs, CLOCKPRO_NEWQ);
706
707 while (pageq_len(newq) > len) {
708 struct vm_page *pg = pageq_remove_head(gs, newq);
709 KASSERT(pg != NULL);
710 KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
711 if ((pg->pqflags & PQ_INITIALREF) != 0) {
712 clockpro_clearreferencebit(pg);
713 pg->pqflags &= ~PQ_INITIALREF;
714 }
715 /* place at the list head */
716 clockpro_insert_tail(gs, CLOCKPRO_COLDQ(gs), pg);
717 }
718 }
719
720 static void
721 clockpro_newqrotate(struct uvmpdpol_groupstate * const gs)
722 {
723
724 check_sanity();
725 clockpro___newqrotate(gs, gs->gs_newqlenmax);
726 check_sanity();
727 }
728
729 static void
730 clockpro_newqflush(struct uvmpdpol_groupstate * const gs, int n)
731 {
732
733 check_sanity();
734 clockpro___newqrotate(gs, n);
735 check_sanity();
736 }
737
738 static void
739 clockpro_newqflushone(struct uvmpdpol_groupstate *gs)
740 {
741
742 clockpro_newqflush(gs,
743 MAX(pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)) - 1, 0));
744 }
745
746 /*
747 * our "tail" is called "list-head" in the paper.
748 */
749
750 static void
751 clockpro___enqueuetail(struct uvmpdpol_groupstate *gs, struct vm_page *pg)
752 {
753
754 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
755
756 check_sanity();
757 #if !defined(USEONCE2)
758 clockpro_insert_tail(gs, CLOCKPRO_NEWQ, pg);
759 clockpro_newqrotate(gs);
760 #else /* !defined(USEONCE2) */
761 #if defined(LISTQ)
762 KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
763 #endif /* defined(LISTQ) */
764 clockpro_insert_tail(gs, CLOCKPRO_COLDQ(gs), pg);
765 #endif /* !defined(USEONCE2) */
766 check_sanity();
767 }
768
769 static void
770 clockpro_pageenqueue(struct vm_page *pg)
771 {
772 struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
773 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
774 bool hot;
775 bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
776
777 KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
778 KASSERT(mutex_owned(&uvm_pageqlock));
779 check_sanity();
780 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
781 gs->gs_npages++;
782 pg->pqflags &= ~(PQ_HOT|PQ_TEST);
783 if (speculative) {
784 hot = false;
785 PDPOL_EVCNT_INCR(speculativeenqueue);
786 } else {
787 hot = nonresident_pagelookupremove(gs, pg);
788 if (hot) {
789 COLDTARGET_ADJ(gs, 1);
790 }
791 }
792
793 /*
794 * consider mmap'ed file:
795 *
796 * - read-ahead enqueues a page.
797 *
798 * - on the following read-ahead hit, the fault handler activates it.
799 *
800 * - finally, the userland code which caused the above fault
801 * actually accesses the page. it makes its reference bit set.
802 *
803 * we want to count the above as a single access, rather than
804 * three accesses with short reuse distances.
805 */
806
807 #if defined(USEONCE2)
808 pg->pqflags &= ~PQ_INITIALREF;
809 if (hot) {
810 pg->pqflags |= PQ_TEST;
811 }
812 gs->gs_ncold++;
813 clockpro_clearreferencebit(pg);
814 clockpro___enqueuetail(gs, pg);
815 #else /* defined(USEONCE2) */
816 if (speculative) {
817 gs->gs_ncold++;
818 } else if (hot) {
819 pg->pqflags |= PQ_HOT;
820 } else {
821 pg->pqflags |= PQ_TEST;
822 gs->gs_ncold++;
823 }
824 clockpro___enqueuetail(gs, pg);
825 #endif /* defined(USEONCE2) */
826 grp->pgrp_inactive = gs->gs_ncold;
827 grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
828 KASSERT(gs->gs_ncold <= gs->gs_npages);
829 }
830
831 static pageq_t *
832 clockpro_pagequeue(struct vm_page *pg)
833 {
834 struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
835 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
836 u_int qidx;
837
838 qidx = clockpro_getq(pg);
839 KASSERT(qidx != CLOCKPRO_NOQUEUE);
840
841 return clockpro_queue(gs, qidx);
842 }
843
844 static void
845 clockpro_pagedequeue(struct vm_page *pg)
846 {
847 struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
848 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
849 pageq_t *q;
850
851 KASSERT(gs->gs_npages > 0);
852 check_sanity();
853 q = clockpro_pagequeue(pg);
854 pageq_remove(gs, q, pg);
855 check_sanity();
856 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
857 if ((pg->pqflags & PQ_HOT) == 0) {
858 KASSERT(gs->gs_ncold > 0);
859 gs->gs_ncold--;
860 }
861 KASSERT(gs->gs_npages > 0);
862 gs->gs_npages--;
863 grp->pgrp_inactive = gs->gs_ncold;
864 grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
865 check_sanity();
866 }
867
868 static void
869 clockpro_pagerequeue(struct vm_page *pg)
870 {
871 struct uvm_pggroup *grp = uvm_page_to_pggroup(pg);
872 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
873 u_int qidx;
874
875 qidx = clockpro_getq(pg);
876 KASSERT(qidx == CLOCKPRO_HOTQ(gs) || qidx == CLOCKPRO_COLDQ(gs));
877 pageq_remove(gs, clockpro_queue(gs, qidx), pg);
878 check_sanity();
879 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
880
881 clockpro___enqueuetail(gs, pg);
882 }
883
884 static void
885 handhot_endtest(struct uvmpdpol_groupstate * const gs, struct vm_page *pg)
886 {
887
888 KASSERT((pg->pqflags & PQ_HOT) == 0);
889 if ((pg->pqflags & PQ_TEST) != 0) {
890 PDPOL_EVCNT_INCR(hhotcoldtest);
891 COLDTARGET_ADJ(gs, -1);
892 pg->pqflags &= ~PQ_TEST;
893 } else {
894 PDPOL_EVCNT_INCR(hhotcold);
895 }
896 }
897
898 static void
899 handhot_advance(struct uvmpdpol_groupstate * const gs)
900 {
901 struct vm_page *pg;
902 pageq_t *hotq;
903 u_int hotqlen;
904
905 clockpro_tune(gs);
906
907 dump("hot called");
908 if (gs->gs_ncold >= gs->gs_coldtarget) {
909 return;
910 }
911 hotq = clockpro_queue(gs, CLOCKPRO_HOTQ(gs));
912 again:
913 pg = pageq_first(hotq);
914 if (pg == NULL) {
915 DPRINTF("%s: HHOT TAKEOVER\n", __func__);
916 dump("hhottakeover");
917 PDPOL_EVCNT_INCR(hhottakeover);
918 #if defined(LISTQ)
919 while (/* CONSTCOND */ 1) {
920 pageq_t *coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
921
922 pg = pageq_first(coldq);
923 if (pg == NULL) {
924 clockpro_newqflushone(gs);
925 pg = pageq_first(coldq);
926 if (pg == NULL) {
927 WARN("hhot: no page?\n");
928 return;
929 }
930 }
931 KASSERT(clockpro_pagequeue(pg) == coldq);
932 pageq_remove(gs, coldq, pg);
933 check_sanity();
934 if ((pg->pqflags & PQ_HOT) == 0) {
935 handhot_endtest(gs, pg);
936 clockpro_insert_tail(gs, CLOCKPRO_LISTQ, pg);
937 } else {
938 clockpro_insert_head(gs, CLOCKPRO_HOTQ(gs), pg);
939 break;
940 }
941 }
942 #else /* defined(LISTQ) */
943 clockpro_newqflush(gs, 0); /* XXX XXX */
944 clockpro_switchqueue(gs);
945 hotq = clockpro_queue(gs, CLOCKPRO_HOTQ(gs));
946 goto again;
947 #endif /* defined(LISTQ) */
948 }
949
950 KASSERT(clockpro_pagequeue(pg) == hotq);
951
952 /*
953 * terminate test period of nonresident pages by cycling them.
954 */
955
956 cycle_target_frac += BUCKETSIZE;
957 hotqlen = pageq_len(hotq);
958 while (cycle_target_frac >= hotqlen) {
959 cycle_target++;
960 cycle_target_frac -= hotqlen;
961 }
962
963 if ((pg->pqflags & PQ_HOT) == 0) {
964 #if defined(LISTQ)
965 panic("cold page in hotq: %p", pg);
966 #else /* defined(LISTQ) */
967 handhot_endtest(gs, pg);
968 goto next;
969 #endif /* defined(LISTQ) */
970 }
971 KASSERT((pg->pqflags & PQ_TEST) == 0);
972 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
973 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
974
975 /*
976 * once we met our target,
977 * stop at a hot page so that no cold pages in test period
978 * have larger recency than any hot pages.
979 */
980
981 if (gs->gs_ncold >= gs->gs_coldtarget) {
982 dump("hot done");
983 return;
984 }
985 clockpro_movereferencebit(pg);
986 if ((pg->pqflags & PQ_REFERENCED) == 0) {
987 struct uvm_pggroup *grp = gs->gs_pgrp;
988 PDPOL_EVCNT_INCR(hhotunref);
989 grp->pgrp_pddeact++;
990 pg->pqflags &= ~PQ_HOT;
991 gs->gs_ncold++;
992 grp->pgrp_inactive = gs->gs_ncold;
993 grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
994 KASSERT(gs->gs_ncold <= gs->gs_npages);
995 } else {
996 PDPOL_EVCNT_INCR(hhotref);
997 }
998 pg->pqflags &= ~PQ_REFERENCED;
999 #if !defined(LISTQ)
1000 next:
1001 #endif /* !defined(LISTQ) */
1002 clockpro_pagerequeue(pg);
1003 dump("hot");
1004 goto again;
1005 }
1006
1007 static struct vm_page *
1008 handcold_advance(struct uvmpdpol_groupstate * const gs)
1009 {
1010 struct uvm_pggroup * const grp = gs->gs_pgrp;
1011 struct vm_page *pg;
1012
1013 for (;;) {
1014 #if defined(LISTQ)
1015 pageq_t *listq = clockpro_queue(gs, CLOCKPRO_LISTQ);
1016 #endif /* defined(LISTQ) */
1017 pageq_t *coldq;
1018
1019 clockpro_newqrotate(gs);
1020 handhot_advance(gs);
1021 #if defined(LISTQ)
1022 pg = pageq_first(listq);
1023 if (pg != NULL) {
1024 KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
1025 KASSERT((pg->pqflags & PQ_TEST) == 0);
1026 KASSERT((pg->pqflags & PQ_HOT) == 0);
1027 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1028 pageq_remove(gs, listq, pg);
1029 check_sanity();
1030 clockpro_insert_head(gs, CLOCKPRO_COLDQ(gs), pg); /* XXX */
1031 goto gotcold;
1032 }
1033 #endif /* defined(LISTQ) */
1034 check_sanity();
1035 coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
1036 pg = pageq_first(coldq);
1037 if (pg == NULL) {
1038 clockpro_newqflushone(gs);
1039 pg = pageq_first(coldq);
1040 }
1041 if (pg == NULL) {
1042 DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
1043 dump("hcoldtakeover");
1044 PDPOL_EVCNT_INCR(hcoldtakeover);
1045 KASSERT(
1046 pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)) == 0);
1047 #if defined(LISTQ)
1048 KASSERT(
1049 pageq_len(clockpro_queue(gs, CLOCKPRO_HOTQ(gs))) == 0);
1050 #else /* defined(LISTQ) */
1051 clockpro_switchqueue(gs);
1052 coldq = clockpro_queue(gs, CLOCKPRO_COLDQ(gs));
1053 pg = pageq_first(coldq);
1054 #endif /* defined(LISTQ) */
1055 }
1056 if (pg == NULL) {
1057 WARN("hcold: no page?\n");
1058 return NULL;
1059 }
1060 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1061 if ((pg->pqflags & PQ_HOT) != 0) {
1062 PDPOL_EVCNT_INCR(hcoldhot);
1063 pageq_remove(gs, coldq, pg);
1064 clockpro_insert_tail(gs, CLOCKPRO_HOTQ(gs), pg);
1065 check_sanity();
1066 KASSERT((pg->pqflags & PQ_TEST) == 0);
1067 grp->pgrp_pdscans++;
1068 continue;
1069 }
1070 #if defined(LISTQ)
1071 gotcold:
1072 #endif /* defined(LISTQ) */
1073 KASSERT((pg->pqflags & PQ_HOT) == 0);
1074 grp->pgrp_pdscans++;
1075 clockpro_movereferencebit(pg);
1076 if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
1077 KASSERT((pg->pqflags & PQ_TEST) == 0);
1078 if ((pg->pqflags & PQ_REFERENCED) != 0) {
1079 PDPOL_EVCNT_INCR(speculativehit2);
1080 pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
1081 clockpro_pagedequeue(pg);
1082 clockpro_pageenqueue(pg);
1083 continue;
1084 }
1085 PDPOL_EVCNT_INCR(speculativemiss);
1086 }
1087 switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
1088 case PQ_TEST:
1089 PDPOL_EVCNT_INCR(hcoldunreftest);
1090 nonresident_pagerecord(gs, pg);
1091 goto gotit;
1092 case 0:
1093 PDPOL_EVCNT_INCR(hcoldunref);
1094 gotit:
1095 KASSERT(gs->gs_ncold > 0);
1096 clockpro_pagerequeue(pg); /* XXX */
1097 dump("cold done");
1098 /* XXX "pg" is still in queue */
1099 handhot_advance(gs);
1100 goto done;
1101
1102 case PQ_REFERENCED|PQ_TEST:
1103 PDPOL_EVCNT_INCR(hcoldreftest);
1104 gs->gs_ncold--;
1105 grp->pgrp_inactive = gs->gs_ncold;
1106 grp->pgrp_active = gs->gs_npages - gs->gs_ncold;
1107 COLDTARGET_ADJ(gs, 1);
1108 pg->pqflags |= PQ_HOT;
1109 pg->pqflags &= ~PQ_TEST;
1110 break;
1111
1112 case PQ_REFERENCED:
1113 PDPOL_EVCNT_INCR(hcoldref);
1114 pg->pqflags |= PQ_TEST;
1115 break;
1116 }
1117 pg->pqflags &= ~PQ_REFERENCED;
1118 grp->pgrp_pdreact++;
1119 /* move to the list head */
1120 clockpro_pagerequeue(pg);
1121 dump("cold");
1122 }
1123 done:;
1124 return pg;
1125 }
1126
1127 void
1128 uvmpdpol_pageactivate(struct vm_page *pg)
1129 {
1130
1131 if (!uvmpdpol_pageisqueued_p(pg)) {
1132 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
1133 pg->pqflags |= PQ_INITIALREF;
1134 clockpro_pageenqueue(pg);
1135 } else if ((pg->pqflags & PQ_SPECULATIVE)) {
1136 PDPOL_EVCNT_INCR(speculativehit1);
1137 pg->pqflags &= ~PQ_SPECULATIVE;
1138 pg->pqflags |= PQ_INITIALREF;
1139 clockpro_pagedequeue(pg);
1140 clockpro_pageenqueue(pg);
1141 }
1142 pg->pqflags |= PQ_REFERENCED;
1143 }
1144
1145 void
1146 uvmpdpol_pagedeactivate(struct vm_page *pg)
1147 {
1148
1149 clockpro_clearreferencebit(pg);
1150 }
1151
1152 void
1153 uvmpdpol_pagedequeue(struct vm_page *pg)
1154 {
1155
1156 if (!uvmpdpol_pageisqueued_p(pg)) {
1157 return;
1158 }
1159 clockpro_pagedequeue(pg);
1160 pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
1161 }
1162
1163 void
1164 uvmpdpol_pageenqueue(struct vm_page *pg)
1165 {
1166
1167 #if 1
1168 if (uvmpdpol_pageisqueued_p(pg)) {
1169 return;
1170 }
1171 clockpro_clearreferencebit(pg);
1172 pg->pqflags |= PQ_SPECULATIVE;
1173 clockpro_pageenqueue(pg);
1174 #else
1175 uvmpdpol_pageactivate(pg);
1176 #endif
1177 }
1178
1179 void
1180 uvmpdpol_anfree(struct vm_anon *an)
1181 {
1182
1183 KASSERT(an->an_page == NULL);
1184 if (nonresident_lookupremove(NULL, (objid_t)an, 0)) {
1185 PDPOL_EVCNT_INCR(nresanonfree);
1186 }
1187 }
1188
1189 void
1190 uvmpdpol_init(void *new_gs, size_t npggroup)
1191 {
1192
1193 clockpro_init(new_gs, npggroup);
1194 }
1195
1196 void
1197 uvmpdpol_reinit(void)
1198 {
1199
1200 clockpro_reinit();
1201 }
1202
1203 size_t
1204 uvmpdpol_space(void)
1205 {
1206
1207 return sizeof(struct uvmpdpol_groupstate);
1208 }
1209
1210 void
1211 uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
1212 size_t npggroup, size_t old_ncolors)
1213 {
1214
1215 clockpro_recolor(new_gs, grparray, npggroup, old_ncolors);
1216 }
1217
1218 void
1219 uvmpdpol_estimatepageable(u_int *activep, u_int *inactivep)
1220 {
1221 u_int active = 0;
1222 u_int inactive = 0;
1223
1224 struct uvm_pggroup *grp;
1225 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
1226 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
1227 active += gs->gs_npages - gs->gs_ncold;
1228 inactive += gs->gs_ncold;
1229 }
1230 if (activep) {
1231 *activep = active;
1232 }
1233 if (inactive) {
1234 *inactivep = inactive;
1235 }
1236 }
1237
1238 bool
1239 uvmpdpol_pageisqueued_p(struct vm_page *pg)
1240 {
1241
1242 return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
1243 }
1244
1245 void
1246 uvmpdpol_scaninit(struct uvm_pggroup *grp)
1247 {
1248
1249 grp->pgrp_gs->gs_nscanned = 0;
1250 }
1251
1252 struct vm_page *
1253 uvmpdpol_selectvictim(struct uvm_pggroup *grp)
1254 {
1255 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
1256 struct vm_page *pg;
1257
1258 if (gs->gs_nscanned > gs->gs_npages) {
1259 DPRINTF("scan too much\n");
1260 return NULL;
1261 }
1262 pg = handcold_advance(gs);
1263 gs->gs_nscanned++;
1264 return pg;
1265 }
1266
1267 static void
1268 clockpro_dropswap(pageq_t *q, int *todo)
1269 {
1270 struct vm_page *pg;
1271
1272 TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq.queue) {
1273 if (*todo <= 0) {
1274 break;
1275 }
1276 if ((pg->pqflags & PQ_HOT) == 0) {
1277 continue;
1278 }
1279 if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
1280 continue;
1281 }
1282 if (uvmpd_trydropswap(pg)) {
1283 (*todo)--;
1284 }
1285 }
1286 }
1287
1288 void
1289 uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
1290 {
1291 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
1292 u_int todo = swap_shortage;
1293
1294 if (todo == 0) {
1295 return;
1296 }
1297
1298 /*
1299 * reclaim swap slots from hot pages
1300 */
1301
1302 DPRINTF("%s: [%zd] swap_shortage=%u\n",
1303 __func__, grp - uvm.pggroups, swap_shortage);
1304
1305 clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_NEWQ), &todo);
1306 clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_COLDQ(gs)), &todo);
1307 clockpro_dropswap(clockpro_queue(gs, CLOCKPRO_HOTQ(gs)), &todo);
1308
1309 DPRINTF("%s: [%zd]: done=%u\n",
1310 __func__, grp - uvm.pggroups, swap_shortage - todo);
1311 }
1312
1313 bool
1314 uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
1315 {
1316 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
1317
1318 return (gs->gs_ncold < gs->gs_coldtarget);
1319 }
1320
1321 void
1322 uvmpdpol_tune(struct uvm_pggroup *grp)
1323 {
1324
1325 clockpro_tune(grp->pgrp_gs);
1326 }
1327
1328 #if !defined(PDSIM)
1329
1330 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
1331
1332 void
1333 uvmpdpol_sysctlsetup(void)
1334 {
1335 #if !defined(ADAPTIVE)
1336 struct clockpro_state * const s = &clockpro;
1337
1338 uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
1339 SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
1340 #endif /* !defined(ADAPTIVE) */
1341 }
1342
1343 #endif /* !defined(PDSIM) */
1344
1345 #if defined(DDB)
1346
1347 void clockpro_dump(void);
1348
1349 void
1350 clockpro_dump(void)
1351 {
1352 struct uvm_pggroup *grp;
1353 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
1354 struct uvmpdpol_groupstate *gs = grp->pgrp_gs;
1355 struct vm_page *pg;
1356 int ncold, nhot, ntest, nspeculative, ninitialref, nref;
1357 int newqlen, coldqlen, hotqlen, listqlen;
1358
1359 newqlen = coldqlen = hotqlen = listqlen = 0;
1360 printf(" [%zd]: npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
1361 grp - uvm.pggroups, gs->gs_npages, gs->gs_ncold,
1362 gs->gs_coldtarget, gs->gs_newqlenmax);
1363
1364 #define INITCOUNT() \
1365 ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
1366
1367 #define COUNT(pg) \
1368 if ((pg->pqflags & PQ_HOT) != 0) { \
1369 nhot++; \
1370 } else { \
1371 ncold++; \
1372 if ((pg->pqflags & PQ_TEST) != 0) { \
1373 ntest++; \
1374 } \
1375 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
1376 nspeculative++; \
1377 } \
1378 if ((pg->pqflags & PQ_INITIALREF) != 0) { \
1379 ninitialref++; \
1380 } else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
1381 pmap_is_referenced(pg)) { \
1382 nref++; \
1383 } \
1384 }
1385
1386 #define PRINTCOUNT(name) \
1387 printf("%s#%zd hot=%d, cold=%d, test=%d, speculative=%d, " \
1388 "initialref=%d, nref=%d\n", \
1389 (name), grp - uvm.pggroups, nhot, ncold, ntest, nspeculative, ninitialref, nref)
1390
1391 INITCOUNT();
1392 TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_NEWQ)->q_q, pageq.queue) {
1393 if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
1394 printf("newq corrupt %p\n", pg);
1395 }
1396 COUNT(pg)
1397 newqlen++;
1398 }
1399 PRINTCOUNT("newq");
1400
1401 INITCOUNT();
1402 TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_COLDQ(gs))->q_q, pageq.queue) {
1403 if (clockpro_getq(pg) != CLOCKPRO_COLDQ(gs)) {
1404 printf("coldq corrupt %p\n", pg);
1405 }
1406 COUNT(pg)
1407 coldqlen++;
1408 }
1409 PRINTCOUNT("coldq");
1410
1411 INITCOUNT();
1412 TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_HOTQ(gs))->q_q, pageq.queue) {
1413 if (clockpro_getq(pg) != CLOCKPRO_HOTQ(gs)) {
1414 printf("hotq corrupt %p\n", pg);
1415 }
1416 #if defined(LISTQ)
1417 if ((pg->pqflags & PQ_HOT) == 0) {
1418 printf("cold page in hotq: %p\n", pg);
1419 }
1420 #endif /* defined(LISTQ) */
1421 COUNT(pg)
1422 hotqlen++;
1423 }
1424 PRINTCOUNT("hotq");
1425
1426 INITCOUNT();
1427 TAILQ_FOREACH(pg, &clockpro_queue(gs, CLOCKPRO_LISTQ)->q_q, pageq.queue) {
1428 #if !defined(LISTQ)
1429 printf("listq %p\n", pg);
1430 #endif /* !defined(LISTQ) */
1431 if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
1432 printf("listq corrupt %p\n", pg);
1433 }
1434 COUNT(pg)
1435 listqlen++;
1436 }
1437 PRINTCOUNT("listq");
1438
1439 printf("#%zd: newqlen=%u/%u, coldqlen=%u/%u, hotqlen=%u/%u, listqlen=%d/%d\n",
1440 grp - uvm.pggroups,
1441 newqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_NEWQ)),
1442 coldqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_COLDQ(gs))),
1443 hotqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_HOTQ(gs))),
1444 listqlen, pageq_len(clockpro_queue(gs, CLOCKPRO_LISTQ)));
1445 }
1446 }
1447 #endif /* defined(DDB) */
1448
1449 #if defined(PDSIM)
1450 #if defined(DEBUG)
1451 static void
1452 pdsim_dumpq(struct uvmpdpol_groupstate *gs, int qidx)
1453 {
1454 pageq_t *q = clockpro_queue(gs, qidx);
1455 struct vm_page *pg;
1456
1457 TAILQ_FOREACH(pg, &q->q_q, pageq.queue) {
1458 DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
1459 pg->offset >> PAGE_SHIFT,
1460 (pg->pqflags & PQ_HOT) ? "H" : "",
1461 (pg->pqflags & PQ_TEST) ? "T" : "",
1462 (pg->pqflags & PQ_REFERENCED) ? "R" : "",
1463 pmap_is_referenced(pg) ? "r" : "",
1464 (pg->pqflags & PQ_INITIALREF) ? "I" : "",
1465 (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
1466 );
1467 }
1468 }
1469 #endif /* defined(DEBUG) */
1470
1471 void
1472 pdsim_dump(const char *id)
1473 {
1474 #if defined(DEBUG)
1475 struct clockpro_state * const s = &clockpro;
1476
1477 DPRINTF(" %s L(", id);
1478 pdsim_dumpq(gs, CLOCKPRO_LISTQ);
1479 DPRINTF(" ) H(");
1480 pdsim_dumpq(gs, CLOCKPRO_HOTQ(gs));
1481 DPRINTF(" ) C(");
1482 pdsim_dumpq(gs, CLOCKPRO_COLDQ(gs));
1483 DPRINTF(" ) N(");
1484 pdsim_dumpq(gs, CLOCKPRO_NEWQ);
1485 DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
1486 gs->gs_ncold, gs->gs_coldtarget, gs->gs_coldadj);
1487 #endif /* defined(DEBUG) */
1488 }
1489 #endif /* defined(PDSIM) */
1490