uvm_pdpolicy_clockpro.c revision 1.9.10.1 1 /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.9.10.1 2008/02/18 21:07:33 mjf Exp $ */
2
3 /*-
4 * Copyright (c)2005, 2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * CLOCK-Pro replacement policy:
31 * http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
32 *
33 * approximation of the list of non-resident pages using hash:
34 * http://linux-mm.org/ClockProApproximation
35 */
36
37 /* #define CLOCKPRO_DEBUG */
38
39 #if defined(PDSIM)
40
41 #include "pdsim.h"
42
43 #else /* defined(PDSIM) */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.9.10.1 2008/02/18 21:07:33 mjf Exp $");
47
48 #include "opt_ddb.h"
49
50 #include <sys/param.h>
51 #include <sys/proc.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/hash.h>
55
56 #include <uvm/uvm.h>
57 #include <uvm/uvm_pdpolicy.h>
58 #include <uvm/uvm_pdpolicy_impl.h>
59
60 #if ((__STDC_VERSION__ - 0) >= 199901L)
61 #define DPRINTF(...) /* nothing */
62 #define WARN(...) printf(__VA_ARGS__)
63 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
64 #define DPRINTF(a...) /* nothing */ /* GCC */
65 #define WARN(a...) printf(a)
66 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
67
68 #define dump(a) /* nothing */
69
70 #undef USEONCE2
71 #define LISTQ
72 #undef ADAPTIVE
73
74 #endif /* defined(PDSIM) */
75
76 #if !defined(CLOCKPRO_COLDPCT)
77 #define CLOCKPRO_COLDPCT 10
78 #endif /* !defined(CLOCKPRO_COLDPCT) */
79
80 #define CLOCKPRO_COLDPCTMAX 90
81
82 #if !defined(CLOCKPRO_HASHFACTOR)
83 #define CLOCKPRO_HASHFACTOR 2
84 #endif /* !defined(CLOCKPRO_HASHFACTOR) */
85
86 #define CLOCKPRO_NEWQMIN ((1024 * 1024) >> PAGE_SHIFT) /* XXX */
87
88 int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
89
90 PDPOL_EVCNT_DEFINE(nresrecordobj)
91 PDPOL_EVCNT_DEFINE(nresrecordanon)
92 PDPOL_EVCNT_DEFINE(nreslookupobj)
93 PDPOL_EVCNT_DEFINE(nreslookupanon)
94 PDPOL_EVCNT_DEFINE(nresfoundobj)
95 PDPOL_EVCNT_DEFINE(nresfoundanon)
96 PDPOL_EVCNT_DEFINE(nresanonfree)
97 PDPOL_EVCNT_DEFINE(nresconflict)
98 PDPOL_EVCNT_DEFINE(nresoverwritten)
99 PDPOL_EVCNT_DEFINE(nreshandhot)
100
101 PDPOL_EVCNT_DEFINE(hhottakeover)
102 PDPOL_EVCNT_DEFINE(hhotref)
103 PDPOL_EVCNT_DEFINE(hhotunref)
104 PDPOL_EVCNT_DEFINE(hhotcold)
105 PDPOL_EVCNT_DEFINE(hhotcoldtest)
106
107 PDPOL_EVCNT_DEFINE(hcoldtakeover)
108 PDPOL_EVCNT_DEFINE(hcoldref)
109 PDPOL_EVCNT_DEFINE(hcoldunref)
110 PDPOL_EVCNT_DEFINE(hcoldreftest)
111 PDPOL_EVCNT_DEFINE(hcoldunreftest)
112 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
113 PDPOL_EVCNT_DEFINE(hcoldhot)
114
115 PDPOL_EVCNT_DEFINE(speculativeenqueue)
116 PDPOL_EVCNT_DEFINE(speculativehit1)
117 PDPOL_EVCNT_DEFINE(speculativehit2)
118 PDPOL_EVCNT_DEFINE(speculativemiss)
119
120 #define PQ_REFERENCED PQ_PRIVATE1
121 #define PQ_HOT PQ_PRIVATE2
122 #define PQ_TEST PQ_PRIVATE3
123 #define PQ_INITIALREF PQ_PRIVATE4
124 #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2
125 #error PQ_PRIVATE
126 #endif
127 #define PQ_QMASK (PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7)
128 #define PQ_QFACTOR PQ_PRIVATE5
129 #define PQ_SPECULATIVE PQ_PRIVATE8
130
131 #define CLOCKPRO_NOQUEUE 0
132 #define CLOCKPRO_NEWQ 1 /* small queue to clear initial ref. */
133 #if defined(LISTQ)
134 #define CLOCKPRO_COLDQ 2
135 #define CLOCKPRO_HOTQ 3
136 #else /* defined(LISTQ) */
137 #define CLOCKPRO_COLDQ (2 + coldqidx) /* XXX */
138 #define CLOCKPRO_HOTQ (3 - coldqidx) /* XXX */
139 #endif /* defined(LISTQ) */
140 #define CLOCKPRO_LISTQ 4
141 #define CLOCKPRO_NQUEUE 4
142
143 static inline void
144 clockpro_setq(struct vm_page *pg, int qidx)
145 {
146 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
147 KASSERT(qidx <= CLOCKPRO_NQUEUE);
148
149 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
150 }
151
152 static inline int
153 clockpro_getq(struct vm_page *pg)
154 {
155 int qidx;
156
157 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
158 KASSERT(qidx >= CLOCKPRO_NOQUEUE);
159 KASSERT(qidx <= CLOCKPRO_NQUEUE);
160 return qidx;
161 }
162
163 typedef struct {
164 struct pglist q_q;
165 int q_len;
166 } pageq_t;
167
168 struct clockpro_state {
169 int s_npages;
170 int s_coldtarget;
171 int s_ncold;
172
173 int s_newqlenmax;
174 pageq_t s_q[CLOCKPRO_NQUEUE];
175
176 struct uvm_pctparam s_coldtargetpct;
177 };
178
179 static pageq_t *
180 clockpro_queue(struct clockpro_state *s, int qidx)
181 {
182
183 KASSERT(CLOCKPRO_NOQUEUE < qidx);
184 KASSERT(qidx <= CLOCKPRO_NQUEUE);
185
186 return &s->s_q[qidx - 1];
187 }
188
189 #if !defined(LISTQ)
190
191 static int coldqidx;
192
193 static void
194 clockpro_switchqueue(void)
195 {
196
197 coldqidx = 1 - coldqidx;
198 }
199
200 #endif /* !defined(LISTQ) */
201
202 static struct clockpro_state clockpro;
203 static struct clockpro_scanstate {
204 int ss_nscanned;
205 } scanstate;
206
207 /* ---------------------------------------- */
208
209 static void
210 pageq_init(pageq_t *q)
211 {
212
213 TAILQ_INIT(&q->q_q);
214 q->q_len = 0;
215 }
216
217 static int
218 pageq_len(const pageq_t *q)
219 {
220
221 return q->q_len;
222 }
223
224 static struct vm_page *
225 pageq_first(const pageq_t *q)
226 {
227
228 return TAILQ_FIRST(&q->q_q);
229 }
230
231 static void
232 pageq_insert_tail(pageq_t *q, struct vm_page *pg)
233 {
234
235 TAILQ_INSERT_TAIL(&q->q_q, pg, pageq);
236 q->q_len++;
237 }
238
239 static void
240 pageq_insert_head(pageq_t *q, struct vm_page *pg)
241 {
242
243 TAILQ_INSERT_HEAD(&q->q_q, pg, pageq);
244 q->q_len++;
245 }
246
247 static void
248 pageq_remove(pageq_t *q, struct vm_page *pg)
249 {
250
251 #if 1
252 KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q);
253 #endif
254 KASSERT(q->q_len > 0);
255 TAILQ_REMOVE(&q->q_q, pg, pageq);
256 q->q_len--;
257 }
258
259 static struct vm_page *
260 pageq_remove_head(pageq_t *q)
261 {
262 struct vm_page *pg;
263
264 pg = TAILQ_FIRST(&q->q_q);
265 if (pg == NULL) {
266 KASSERT(q->q_len == 0);
267 return NULL;
268 }
269 pageq_remove(q, pg);
270 return pg;
271 }
272
273 /* ---------------------------------------- */
274
275 static void
276 clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg)
277 {
278 pageq_t *q = clockpro_queue(s, qidx);
279
280 clockpro_setq(pg, qidx);
281 pageq_insert_tail(q, pg);
282 }
283
284 static void
285 clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg)
286 {
287 pageq_t *q = clockpro_queue(s, qidx);
288
289 clockpro_setq(pg, qidx);
290 pageq_insert_head(q, pg);
291 }
292
293 /* ---------------------------------------- */
294
295 typedef uint32_t nonres_cookie_t;
296 #define NONRES_COOKIE_INVAL 0
297
298 typedef uintptr_t objid_t;
299
300 /*
301 * XXX maybe these hash functions need reconsideration,
302 * given that hash distribution is critical here.
303 */
304
305 static uint32_t
306 pageidentityhash1(objid_t obj, off_t idx)
307 {
308 uint32_t hash = HASH32_BUF_INIT;
309
310 #if 1
311 hash = hash32_buf(&idx, sizeof(idx), hash);
312 hash = hash32_buf(&obj, sizeof(obj), hash);
313 #else
314 hash = hash32_buf(&obj, sizeof(obj), hash);
315 hash = hash32_buf(&idx, sizeof(idx), hash);
316 #endif
317 return hash;
318 }
319
320 static uint32_t
321 pageidentityhash2(objid_t obj, off_t idx)
322 {
323 uint32_t hash = HASH32_BUF_INIT;
324
325 hash = hash32_buf(&obj, sizeof(obj), hash);
326 hash = hash32_buf(&idx, sizeof(idx), hash);
327 return hash;
328 }
329
330 static nonres_cookie_t
331 calccookie(objid_t obj, off_t idx)
332 {
333 uint32_t hash = pageidentityhash2(obj, idx);
334 nonres_cookie_t cookie = hash;
335
336 if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
337 cookie++; /* XXX */
338 }
339 return cookie;
340 }
341
342 #define BUCKETSIZE 14
343 struct bucket {
344 int cycle;
345 int cur;
346 nonres_cookie_t pages[BUCKETSIZE];
347 };
348 static int cycle_target;
349 static int cycle_target_frac;
350
351 static struct bucket static_bucket;
352 static struct bucket *buckets = &static_bucket;
353 static size_t hashsize = 1;
354
355 static int coldadj;
356 #define COLDTARGET_ADJ(d) coldadj += (d)
357
358 #if defined(PDSIM)
359
360 static void *
361 clockpro_hashalloc(int n)
362 {
363 size_t allocsz = sizeof(*buckets) * n;
364
365 return malloc(allocsz);
366 }
367
368 static void
369 clockpro_hashfree(void *p, int n)
370 {
371
372 free(p);
373 }
374
375 #else /* defined(PDSIM) */
376
377 static void *
378 clockpro_hashalloc(int n)
379 {
380 size_t allocsz = round_page(sizeof(*buckets) * n);
381
382 return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
383 }
384
385 static void
386 clockpro_hashfree(void *p, int n)
387 {
388 size_t allocsz = round_page(sizeof(*buckets) * n);
389
390 uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
391 }
392
393 #endif /* defined(PDSIM) */
394
395 static void
396 clockpro_hashinit(uint64_t n)
397 {
398 struct bucket *newbuckets;
399 struct bucket *oldbuckets;
400 size_t sz;
401 size_t oldsz;
402 int i;
403
404 sz = howmany(n, BUCKETSIZE);
405 sz *= clockpro_hashfactor;
406 newbuckets = clockpro_hashalloc(sz);
407 if (newbuckets == NULL) {
408 panic("%s: allocation failure", __func__);
409 }
410 for (i = 0; i < sz; i++) {
411 struct bucket *b = &newbuckets[i];
412 int j;
413
414 b->cycle = cycle_target;
415 b->cur = 0;
416 for (j = 0; j < BUCKETSIZE; j++) {
417 b->pages[j] = NONRES_COOKIE_INVAL;
418 }
419 }
420 /* XXX lock */
421 oldbuckets = buckets;
422 oldsz = hashsize;
423 buckets = newbuckets;
424 hashsize = sz;
425 /* XXX unlock */
426 if (oldbuckets != &static_bucket) {
427 clockpro_hashfree(oldbuckets, oldsz);
428 }
429 }
430
431 static struct bucket *
432 nonresident_getbucket(objid_t obj, off_t idx)
433 {
434 uint32_t hash;
435
436 hash = pageidentityhash1(obj, idx);
437 return &buckets[hash % hashsize];
438 }
439
440 static void
441 nonresident_rotate(struct bucket *b)
442 {
443 const int target = cycle_target;
444 const int cycle = b->cycle;
445 int cur;
446 int todo;
447
448 todo = target - cycle;
449 if (todo >= BUCKETSIZE * 2) {
450 todo = (todo % BUCKETSIZE) + BUCKETSIZE;
451 }
452 cur = b->cur;
453 while (todo > 0) {
454 if (b->pages[cur] != NONRES_COOKIE_INVAL) {
455 PDPOL_EVCNT_INCR(nreshandhot);
456 COLDTARGET_ADJ(-1);
457 }
458 b->pages[cur] = NONRES_COOKIE_INVAL;
459 cur++;
460 if (cur == BUCKETSIZE) {
461 cur = 0;
462 }
463 todo--;
464 }
465 b->cycle = target;
466 b->cur = cur;
467 }
468
469 static bool
470 nonresident_lookupremove(objid_t obj, off_t idx)
471 {
472 struct bucket *b = nonresident_getbucket(obj, idx);
473 nonres_cookie_t cookie = calccookie(obj, idx);
474 int i;
475
476 nonresident_rotate(b);
477 for (i = 0; i < BUCKETSIZE; i++) {
478 if (b->pages[i] == cookie) {
479 b->pages[i] = NONRES_COOKIE_INVAL;
480 return true;
481 }
482 }
483 return false;
484 }
485
486 static objid_t
487 pageobj(struct vm_page *pg)
488 {
489 const void *obj;
490
491 /*
492 * XXX object pointer is often freed and reused for unrelated object.
493 * for vnodes, it would be better to use something like
494 * a hash of fsid/fileid/generation.
495 */
496
497 obj = pg->uobject;
498 if (obj == NULL) {
499 obj = pg->uanon;
500 KASSERT(obj != NULL);
501 KASSERT(pg->offset == 0);
502 }
503
504 return (objid_t)obj;
505 }
506
507 static off_t
508 pageidx(struct vm_page *pg)
509 {
510
511 KASSERT((pg->offset & PAGE_MASK) == 0);
512 return pg->offset >> PAGE_SHIFT;
513 }
514
515 static bool
516 nonresident_pagelookupremove(struct vm_page *pg)
517 {
518 bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg));
519
520 if (pg->uobject) {
521 PDPOL_EVCNT_INCR(nreslookupobj);
522 } else {
523 PDPOL_EVCNT_INCR(nreslookupanon);
524 }
525 if (found) {
526 if (pg->uobject) {
527 PDPOL_EVCNT_INCR(nresfoundobj);
528 } else {
529 PDPOL_EVCNT_INCR(nresfoundanon);
530 }
531 }
532 return found;
533 }
534
535 static void
536 nonresident_pagerecord(struct vm_page *pg)
537 {
538 objid_t obj = pageobj(pg);
539 off_t idx = pageidx(pg);
540 struct bucket *b = nonresident_getbucket(obj, idx);
541 nonres_cookie_t cookie = calccookie(obj, idx);
542
543 #if defined(DEBUG)
544 int i;
545
546 for (i = 0; i < BUCKETSIZE; i++) {
547 if (b->pages[i] == cookie) {
548 PDPOL_EVCNT_INCR(nresconflict);
549 }
550 }
551 #endif /* defined(DEBUG) */
552
553 if (pg->uobject) {
554 PDPOL_EVCNT_INCR(nresrecordobj);
555 } else {
556 PDPOL_EVCNT_INCR(nresrecordanon);
557 }
558 nonresident_rotate(b);
559 if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
560 PDPOL_EVCNT_INCR(nresoverwritten);
561 COLDTARGET_ADJ(-1);
562 }
563 b->pages[b->cur] = cookie;
564 b->cur = (b->cur + 1) % BUCKETSIZE;
565 }
566
567 /* ---------------------------------------- */
568
569 #if defined(CLOCKPRO_DEBUG)
570 static void
571 check_sanity(void)
572 {
573 }
574 #else /* defined(CLOCKPRO_DEBUG) */
575 #define check_sanity() /* nothing */
576 #endif /* defined(CLOCKPRO_DEBUG) */
577
578 static void
579 clockpro_reinit(void)
580 {
581
582 clockpro_hashinit(uvmexp.npages);
583 }
584
585 static void
586 clockpro_init(void)
587 {
588 struct clockpro_state *s = &clockpro;
589 int i;
590
591 for (i = 0; i < CLOCKPRO_NQUEUE; i++) {
592 pageq_init(&s->s_q[i]);
593 }
594 s->s_newqlenmax = 1;
595 s->s_coldtarget = 1;
596 uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
597 }
598
599 static void
600 clockpro_tune(void)
601 {
602 struct clockpro_state *s = &clockpro;
603 int coldtarget;
604
605 #if defined(ADAPTIVE)
606 int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100;
607 int coldmin = 1;
608
609 coldtarget = s->s_coldtarget;
610 if (coldtarget + coldadj < coldmin) {
611 coldadj = coldmin - coldtarget;
612 } else if (coldtarget + coldadj > coldmax) {
613 coldadj = coldmax - coldtarget;
614 }
615 coldtarget += coldadj;
616 #else /* defined(ADAPTIVE) */
617 coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages);
618 if (coldtarget < 1) {
619 coldtarget = 1;
620 }
621 #endif /* defined(ADAPTIVE) */
622
623 s->s_coldtarget = coldtarget;
624 s->s_newqlenmax = coldtarget / 4;
625 if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) {
626 s->s_newqlenmax = CLOCKPRO_NEWQMIN;
627 }
628 }
629
630 static void
631 clockpro_movereferencebit(struct vm_page *pg)
632 {
633 bool referenced;
634
635 referenced = pmap_clear_reference(pg);
636 if (referenced) {
637 pg->pqflags |= PQ_REFERENCED;
638 }
639 }
640
641 static void
642 clockpro_clearreferencebit(struct vm_page *pg)
643 {
644
645 clockpro_movereferencebit(pg);
646 pg->pqflags &= ~PQ_REFERENCED;
647 }
648
649 static void
650 clockpro___newqrotate(int len)
651 {
652 struct clockpro_state * const s = &clockpro;
653 pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ);
654 struct vm_page *pg;
655
656 while (pageq_len(newq) > len) {
657 pg = pageq_remove_head(newq);
658 KASSERT(pg != NULL);
659 KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
660 if ((pg->pqflags & PQ_INITIALREF) != 0) {
661 clockpro_clearreferencebit(pg);
662 pg->pqflags &= ~PQ_INITIALREF;
663 }
664 /* place at the list head */
665 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
666 }
667 }
668
669 static void
670 clockpro_newqrotate(void)
671 {
672 struct clockpro_state * const s = &clockpro;
673
674 check_sanity();
675 clockpro___newqrotate(s->s_newqlenmax);
676 check_sanity();
677 }
678
679 static void
680 clockpro_newqflush(int n)
681 {
682
683 check_sanity();
684 clockpro___newqrotate(n);
685 check_sanity();
686 }
687
688 static void
689 clockpro_newqflushone(void)
690 {
691 struct clockpro_state * const s = &clockpro;
692
693 clockpro_newqflush(
694 MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0));
695 }
696
697 /*
698 * our "tail" is called "list-head" in the paper.
699 */
700
701 static void
702 clockpro___enqueuetail(struct vm_page *pg)
703 {
704 struct clockpro_state * const s = &clockpro;
705
706 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
707
708 check_sanity();
709 #if !defined(USEONCE2)
710 clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg);
711 clockpro_newqrotate();
712 #else /* !defined(USEONCE2) */
713 #if defined(LISTQ)
714 KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
715 #endif /* defined(LISTQ) */
716 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
717 #endif /* !defined(USEONCE2) */
718 check_sanity();
719 }
720
721 static void
722 clockpro_pageenqueue(struct vm_page *pg)
723 {
724 struct clockpro_state * const s = &clockpro;
725 bool hot;
726 bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
727
728 KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
729 KASSERT(mutex_owned(&uvm_pageqlock));
730 check_sanity();
731 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
732 s->s_npages++;
733 pg->pqflags &= ~(PQ_HOT|PQ_TEST);
734 if (speculative) {
735 hot = false;
736 PDPOL_EVCNT_INCR(speculativeenqueue);
737 } else {
738 hot = nonresident_pagelookupremove(pg);
739 if (hot) {
740 COLDTARGET_ADJ(1);
741 }
742 }
743
744 /*
745 * consider mmap'ed file:
746 *
747 * - read-ahead enqueues a page.
748 *
749 * - on the following read-ahead hit, the fault handler activates it.
750 *
751 * - finally, the userland code which caused the above fault
752 * actually accesses the page. it makes its reference bit set.
753 *
754 * we want to count the above as a single access, rather than
755 * three accesses with short reuse distances.
756 */
757
758 #if defined(USEONCE2)
759 pg->pqflags &= ~PQ_INITIALREF;
760 if (hot) {
761 pg->pqflags |= PQ_TEST;
762 }
763 s->s_ncold++;
764 clockpro_clearreferencebit(pg);
765 clockpro___enqueuetail(pg);
766 #else /* defined(USEONCE2) */
767 if (speculative) {
768 s->s_ncold++;
769 } else if (hot) {
770 pg->pqflags |= PQ_HOT;
771 } else {
772 pg->pqflags |= PQ_TEST;
773 s->s_ncold++;
774 }
775 clockpro___enqueuetail(pg);
776 #endif /* defined(USEONCE2) */
777 KASSERT(s->s_ncold <= s->s_npages);
778 }
779
780 static pageq_t *
781 clockpro_pagequeue(struct vm_page *pg)
782 {
783 struct clockpro_state * const s = &clockpro;
784 int qidx;
785
786 qidx = clockpro_getq(pg);
787 KASSERT(qidx != CLOCKPRO_NOQUEUE);
788
789 return clockpro_queue(s, qidx);
790 }
791
792 static void
793 clockpro_pagedequeue(struct vm_page *pg)
794 {
795 struct clockpro_state * const s = &clockpro;
796 pageq_t *q;
797
798 KASSERT(s->s_npages > 0);
799 check_sanity();
800 q = clockpro_pagequeue(pg);
801 pageq_remove(q, pg);
802 check_sanity();
803 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
804 if ((pg->pqflags & PQ_HOT) == 0) {
805 KASSERT(s->s_ncold > 0);
806 s->s_ncold--;
807 }
808 KASSERT(s->s_npages > 0);
809 s->s_npages--;
810 check_sanity();
811 }
812
813 static void
814 clockpro_pagerequeue(struct vm_page *pg)
815 {
816 struct clockpro_state * const s = &clockpro;
817 int qidx;
818
819 qidx = clockpro_getq(pg);
820 KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ);
821 pageq_remove(clockpro_queue(s, qidx), pg);
822 check_sanity();
823 clockpro_setq(pg, CLOCKPRO_NOQUEUE);
824
825 clockpro___enqueuetail(pg);
826 }
827
828 static void
829 handhot_endtest(struct vm_page *pg)
830 {
831
832 KASSERT((pg->pqflags & PQ_HOT) == 0);
833 if ((pg->pqflags & PQ_TEST) != 0) {
834 PDPOL_EVCNT_INCR(hhotcoldtest);
835 COLDTARGET_ADJ(-1);
836 pg->pqflags &= ~PQ_TEST;
837 } else {
838 PDPOL_EVCNT_INCR(hhotcold);
839 }
840 }
841
842 static void
843 handhot_advance(void)
844 {
845 struct clockpro_state * const s = &clockpro;
846 struct vm_page *pg;
847 pageq_t *hotq;
848 int hotqlen;
849
850 clockpro_tune();
851
852 dump("hot called");
853 if (s->s_ncold >= s->s_coldtarget) {
854 return;
855 }
856 hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
857 again:
858 pg = pageq_first(hotq);
859 if (pg == NULL) {
860 DPRINTF("%s: HHOT TAKEOVER\n", __func__);
861 dump("hhottakeover");
862 PDPOL_EVCNT_INCR(hhottakeover);
863 #if defined(LISTQ)
864 while (/* CONSTCOND */ 1) {
865 pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
866
867 pg = pageq_first(coldq);
868 if (pg == NULL) {
869 clockpro_newqflushone();
870 pg = pageq_first(coldq);
871 if (pg == NULL) {
872 WARN("hhot: no page?\n");
873 return;
874 }
875 }
876 KASSERT(clockpro_pagequeue(pg) == coldq);
877 pageq_remove(coldq, pg);
878 check_sanity();
879 if ((pg->pqflags & PQ_HOT) == 0) {
880 handhot_endtest(pg);
881 clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg);
882 } else {
883 clockpro_insert_head(s, CLOCKPRO_HOTQ, pg);
884 break;
885 }
886 }
887 #else /* defined(LISTQ) */
888 clockpro_newqflush(0); /* XXX XXX */
889 clockpro_switchqueue();
890 hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
891 goto again;
892 #endif /* defined(LISTQ) */
893 }
894
895 KASSERT(clockpro_pagequeue(pg) == hotq);
896
897 /*
898 * terminate test period of nonresident pages by cycling them.
899 */
900
901 cycle_target_frac += BUCKETSIZE;
902 hotqlen = pageq_len(hotq);
903 while (cycle_target_frac >= hotqlen) {
904 cycle_target++;
905 cycle_target_frac -= hotqlen;
906 }
907
908 if ((pg->pqflags & PQ_HOT) == 0) {
909 #if defined(LISTQ)
910 panic("cold page in hotq: %p", pg);
911 #else /* defined(LISTQ) */
912 handhot_endtest(pg);
913 goto next;
914 #endif /* defined(LISTQ) */
915 }
916 KASSERT((pg->pqflags & PQ_TEST) == 0);
917 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
918 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
919
920 /*
921 * once we met our target,
922 * stop at a hot page so that no cold pages in test period
923 * have larger recency than any hot pages.
924 */
925
926 if (s->s_ncold >= s->s_coldtarget) {
927 dump("hot done");
928 return;
929 }
930 clockpro_movereferencebit(pg);
931 if ((pg->pqflags & PQ_REFERENCED) == 0) {
932 PDPOL_EVCNT_INCR(hhotunref);
933 uvmexp.pddeact++;
934 pg->pqflags &= ~PQ_HOT;
935 clockpro.s_ncold++;
936 KASSERT(s->s_ncold <= s->s_npages);
937 } else {
938 PDPOL_EVCNT_INCR(hhotref);
939 }
940 pg->pqflags &= ~PQ_REFERENCED;
941 #if !defined(LISTQ)
942 next:
943 #endif /* !defined(LISTQ) */
944 clockpro_pagerequeue(pg);
945 dump("hot");
946 goto again;
947 }
948
949 static struct vm_page *
950 handcold_advance(void)
951 {
952 struct clockpro_state * const s = &clockpro;
953 struct vm_page *pg;
954
955 for (;;) {
956 #if defined(LISTQ)
957 pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ);
958 #endif /* defined(LISTQ) */
959 pageq_t *coldq;
960
961 clockpro_newqrotate();
962 handhot_advance();
963 #if defined(LISTQ)
964 pg = pageq_first(listq);
965 if (pg != NULL) {
966 KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
967 KASSERT((pg->pqflags & PQ_TEST) == 0);
968 KASSERT((pg->pqflags & PQ_HOT) == 0);
969 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
970 pageq_remove(listq, pg);
971 check_sanity();
972 clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */
973 goto gotcold;
974 }
975 #endif /* defined(LISTQ) */
976 check_sanity();
977 coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
978 pg = pageq_first(coldq);
979 if (pg == NULL) {
980 clockpro_newqflushone();
981 pg = pageq_first(coldq);
982 }
983 if (pg == NULL) {
984 DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
985 dump("hcoldtakeover");
986 PDPOL_EVCNT_INCR(hcoldtakeover);
987 KASSERT(
988 pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0);
989 #if defined(LISTQ)
990 KASSERT(
991 pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0);
992 #else /* defined(LISTQ) */
993 clockpro_switchqueue();
994 coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
995 pg = pageq_first(coldq);
996 #endif /* defined(LISTQ) */
997 }
998 if (pg == NULL) {
999 WARN("hcold: no page?\n");
1000 return NULL;
1001 }
1002 KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1003 if ((pg->pqflags & PQ_HOT) != 0) {
1004 PDPOL_EVCNT_INCR(hcoldhot);
1005 pageq_remove(coldq, pg);
1006 clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg);
1007 check_sanity();
1008 KASSERT((pg->pqflags & PQ_TEST) == 0);
1009 uvmexp.pdscans++;
1010 continue;
1011 }
1012 #if defined(LISTQ)
1013 gotcold:
1014 #endif /* defined(LISTQ) */
1015 KASSERT((pg->pqflags & PQ_HOT) == 0);
1016 uvmexp.pdscans++;
1017 clockpro_movereferencebit(pg);
1018 if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
1019 KASSERT((pg->pqflags & PQ_TEST) == 0);
1020 if ((pg->pqflags & PQ_REFERENCED) != 0) {
1021 PDPOL_EVCNT_INCR(speculativehit2);
1022 pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
1023 clockpro_pagedequeue(pg);
1024 clockpro_pageenqueue(pg);
1025 continue;
1026 }
1027 PDPOL_EVCNT_INCR(speculativemiss);
1028 }
1029 switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
1030 case PQ_TEST:
1031 PDPOL_EVCNT_INCR(hcoldunreftest);
1032 nonresident_pagerecord(pg);
1033 goto gotit;
1034 case 0:
1035 PDPOL_EVCNT_INCR(hcoldunref);
1036 gotit:
1037 KASSERT(s->s_ncold > 0);
1038 clockpro_pagerequeue(pg); /* XXX */
1039 dump("cold done");
1040 /* XXX "pg" is still in queue */
1041 handhot_advance();
1042 goto done;
1043
1044 case PQ_REFERENCED|PQ_TEST:
1045 PDPOL_EVCNT_INCR(hcoldreftest);
1046 s->s_ncold--;
1047 COLDTARGET_ADJ(1);
1048 pg->pqflags |= PQ_HOT;
1049 pg->pqflags &= ~PQ_TEST;
1050 break;
1051
1052 case PQ_REFERENCED:
1053 PDPOL_EVCNT_INCR(hcoldref);
1054 pg->pqflags |= PQ_TEST;
1055 break;
1056 }
1057 pg->pqflags &= ~PQ_REFERENCED;
1058 uvmexp.pdreact++;
1059 /* move to the list head */
1060 clockpro_pagerequeue(pg);
1061 dump("cold");
1062 }
1063 done:;
1064 return pg;
1065 }
1066
1067 void
1068 uvmpdpol_pageactivate(struct vm_page *pg)
1069 {
1070
1071 if (!uvmpdpol_pageisqueued_p(pg)) {
1072 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
1073 pg->pqflags |= PQ_INITIALREF;
1074 clockpro_pageenqueue(pg);
1075 } else if ((pg->pqflags & PQ_SPECULATIVE)) {
1076 PDPOL_EVCNT_INCR(speculativehit1);
1077 pg->pqflags &= ~PQ_SPECULATIVE;
1078 pg->pqflags |= PQ_INITIALREF;
1079 clockpro_pagedequeue(pg);
1080 clockpro_pageenqueue(pg);
1081 }
1082 pg->pqflags |= PQ_REFERENCED;
1083 }
1084
1085 void
1086 uvmpdpol_pagedeactivate(struct vm_page *pg)
1087 {
1088
1089 clockpro_clearreferencebit(pg);
1090 }
1091
1092 void
1093 uvmpdpol_pagedequeue(struct vm_page *pg)
1094 {
1095
1096 if (!uvmpdpol_pageisqueued_p(pg)) {
1097 return;
1098 }
1099 clockpro_pagedequeue(pg);
1100 pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
1101 }
1102
1103 void
1104 uvmpdpol_pageenqueue(struct vm_page *pg)
1105 {
1106
1107 #if 1
1108 if (uvmpdpol_pageisqueued_p(pg)) {
1109 return;
1110 }
1111 clockpro_clearreferencebit(pg);
1112 pg->pqflags |= PQ_SPECULATIVE;
1113 clockpro_pageenqueue(pg);
1114 #else
1115 uvmpdpol_pageactivate(pg);
1116 #endif
1117 }
1118
1119 void
1120 uvmpdpol_anfree(struct vm_anon *an)
1121 {
1122
1123 KASSERT(an->an_page == NULL);
1124 if (nonresident_lookupremove((objid_t)an, 0)) {
1125 PDPOL_EVCNT_INCR(nresanonfree);
1126 }
1127 }
1128
1129 void
1130 uvmpdpol_init(void)
1131 {
1132
1133 clockpro_init();
1134 }
1135
1136 void
1137 uvmpdpol_reinit(void)
1138 {
1139
1140 clockpro_reinit();
1141 }
1142
1143 void
1144 uvmpdpol_estimatepageable(int *active, int *inactive)
1145 {
1146 struct clockpro_state * const s = &clockpro;
1147
1148 if (active) {
1149 *active = s->s_npages - s->s_ncold;
1150 }
1151 if (inactive) {
1152 *inactive = s->s_ncold;
1153 }
1154 }
1155
1156 bool
1157 uvmpdpol_pageisqueued_p(struct vm_page *pg)
1158 {
1159
1160 return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
1161 }
1162
1163 void
1164 uvmpdpol_scaninit(void)
1165 {
1166 struct clockpro_scanstate * const ss = &scanstate;
1167
1168 ss->ss_nscanned = 0;
1169 }
1170
1171 struct vm_page *
1172 uvmpdpol_selectvictim(void)
1173 {
1174 struct clockpro_state * const s = &clockpro;
1175 struct clockpro_scanstate * const ss = &scanstate;
1176 struct vm_page *pg;
1177
1178 if (ss->ss_nscanned > s->s_npages) {
1179 DPRINTF("scan too much\n");
1180 return NULL;
1181 }
1182 pg = handcold_advance();
1183 ss->ss_nscanned++;
1184 return pg;
1185 }
1186
1187 static void
1188 clockpro_dropswap(pageq_t *q, int *todo)
1189 {
1190 struct vm_page *pg;
1191
1192 TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq) {
1193 if (*todo <= 0) {
1194 break;
1195 }
1196 if ((pg->pqflags & PQ_HOT) == 0) {
1197 continue;
1198 }
1199 if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
1200 continue;
1201 }
1202 if (uvmpd_trydropswap(pg)) {
1203 (*todo)--;
1204 }
1205 }
1206 }
1207
1208 void
1209 uvmpdpol_balancequeue(int swap_shortage)
1210 {
1211 struct clockpro_state * const s = &clockpro;
1212 int todo = swap_shortage;
1213
1214 if (todo == 0) {
1215 return;
1216 }
1217
1218 /*
1219 * reclaim swap slots from hot pages
1220 */
1221
1222 DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage);
1223
1224 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo);
1225 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo);
1226 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo);
1227
1228 DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo);
1229 }
1230
1231 bool
1232 uvmpdpol_needsscan_p(void)
1233 {
1234 struct clockpro_state * const s = &clockpro;
1235
1236 if (s->s_ncold < s->s_coldtarget) {
1237 return true;
1238 }
1239 return false;
1240 }
1241
1242 void
1243 uvmpdpol_tune(void)
1244 {
1245
1246 clockpro_tune();
1247 }
1248
1249 #if !defined(PDSIM)
1250
1251 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
1252
1253 void
1254 uvmpdpol_sysctlsetup(void)
1255 {
1256 #if !defined(ADAPTIVE)
1257 struct clockpro_state * const s = &clockpro;
1258
1259 uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
1260 SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
1261 #endif /* !defined(ADAPTIVE) */
1262 }
1263
1264 #endif /* !defined(PDSIM) */
1265
1266 #if defined(DDB)
1267
1268 void clockpro_dump(void);
1269
1270 void
1271 clockpro_dump(void)
1272 {
1273 struct clockpro_state * const s = &clockpro;
1274
1275 struct vm_page *pg;
1276 int ncold, nhot, ntest, nspeculative, ninitialref, nref;
1277 int newqlen, coldqlen, hotqlen, listqlen;
1278
1279 newqlen = coldqlen = hotqlen = listqlen = 0;
1280 printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
1281 s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax);
1282
1283 #define INITCOUNT() \
1284 ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
1285
1286 #define COUNT(pg) \
1287 if ((pg->pqflags & PQ_HOT) != 0) { \
1288 nhot++; \
1289 } else { \
1290 ncold++; \
1291 if ((pg->pqflags & PQ_TEST) != 0) { \
1292 ntest++; \
1293 } \
1294 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
1295 nspeculative++; \
1296 } \
1297 if ((pg->pqflags & PQ_INITIALREF) != 0) { \
1298 ninitialref++; \
1299 } else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
1300 pmap_is_referenced(pg)) { \
1301 nref++; \
1302 } \
1303 }
1304
1305 #define PRINTCOUNT(name) \
1306 printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
1307 "nref=%d\n", \
1308 (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
1309
1310 INITCOUNT();
1311 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pageq) {
1312 if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
1313 printf("newq corrupt %p\n", pg);
1314 }
1315 COUNT(pg)
1316 newqlen++;
1317 }
1318 PRINTCOUNT("newq");
1319
1320 INITCOUNT();
1321 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pageq) {
1322 if (clockpro_getq(pg) != CLOCKPRO_COLDQ) {
1323 printf("coldq corrupt %p\n", pg);
1324 }
1325 COUNT(pg)
1326 coldqlen++;
1327 }
1328 PRINTCOUNT("coldq");
1329
1330 INITCOUNT();
1331 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pageq) {
1332 if (clockpro_getq(pg) != CLOCKPRO_HOTQ) {
1333 printf("hotq corrupt %p\n", pg);
1334 }
1335 #if defined(LISTQ)
1336 if ((pg->pqflags & PQ_HOT) == 0) {
1337 printf("cold page in hotq: %p\n", pg);
1338 }
1339 #endif /* defined(LISTQ) */
1340 COUNT(pg)
1341 hotqlen++;
1342 }
1343 PRINTCOUNT("hotq");
1344
1345 INITCOUNT();
1346 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pageq) {
1347 #if !defined(LISTQ)
1348 printf("listq %p\n");
1349 #endif /* !defined(LISTQ) */
1350 if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
1351 printf("listq corrupt %p\n", pg);
1352 }
1353 COUNT(pg)
1354 listqlen++;
1355 }
1356 PRINTCOUNT("listq");
1357
1358 printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
1359 newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)),
1360 coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)),
1361 hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)),
1362 listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ)));
1363 }
1364
1365 #endif /* defined(DDB) */
1366
1367 #if defined(PDSIM)
1368 #if defined(DEBUG)
1369 static void
1370 pdsim_dumpq(int qidx)
1371 {
1372 struct clockpro_state * const s = &clockpro;
1373 pageq_t *q = clockpro_queue(s, qidx);
1374 struct vm_page *pg;
1375
1376 TAILQ_FOREACH(pg, &q->q_q, pageq) {
1377 DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
1378 pg->offset >> PAGE_SHIFT,
1379 (pg->pqflags & PQ_HOT) ? "H" : "",
1380 (pg->pqflags & PQ_TEST) ? "T" : "",
1381 (pg->pqflags & PQ_REFERENCED) ? "R" : "",
1382 pmap_is_referenced(pg) ? "r" : "",
1383 (pg->pqflags & PQ_INITIALREF) ? "I" : "",
1384 (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
1385 );
1386 }
1387 }
1388 #endif /* defined(DEBUG) */
1389
1390 void
1391 pdsim_dump(const char *id)
1392 {
1393 #if defined(DEBUG)
1394 struct clockpro_state * const s = &clockpro;
1395
1396 DPRINTF(" %s L(", id);
1397 pdsim_dumpq(CLOCKPRO_LISTQ);
1398 DPRINTF(" ) H(");
1399 pdsim_dumpq(CLOCKPRO_HOTQ);
1400 DPRINTF(" ) C(");
1401 pdsim_dumpq(CLOCKPRO_COLDQ);
1402 DPRINTF(" ) N(");
1403 pdsim_dumpq(CLOCKPRO_NEWQ);
1404 DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
1405 s->s_ncold, s->s_coldtarget, coldadj);
1406 #endif /* defined(DEBUG) */
1407 }
1408 #endif /* defined(PDSIM) */
1409