xref: /netbsd-src/sys/uvm/uvm_pdpolicy_clockpro.c (revision 46f5119e40af2e51998f686b2fdcc76b5488f7f3)
1 /*	$NetBSD: uvm_pdpolicy_clockpro.c,v 1.16 2011/02/05 13:33:47 yamt Exp $	*/
2 
3 /*-
4  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * CLOCK-Pro replacement policy:
31  *	http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html
32  *
33  * approximation of the list of non-resident pages using hash:
34  *	http://linux-mm.org/ClockProApproximation
35  */
36 
37 /* #define	CLOCKPRO_DEBUG */
38 
39 #if defined(PDSIM)
40 
41 #include "pdsim.h"
42 
43 #else /* defined(PDSIM) */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.16 2011/02/05 13:33:47 yamt Exp $");
47 
48 #include "opt_ddb.h"
49 
50 #include <sys/param.h>
51 #include <sys/proc.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/hash.h>
55 
56 #include <uvm/uvm.h>
57 #include <uvm/uvm_pdpolicy.h>
58 #include <uvm/uvm_pdpolicy_impl.h>
59 
60 #if ((__STDC_VERSION__ - 0) >= 199901L)
61 #define	DPRINTF(...)	/* nothing */
62 #define	WARN(...)	printf(__VA_ARGS__)
63 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
64 #define	DPRINTF(a...)	/* nothing */	/* GCC */
65 #define	WARN(a...)	printf(a)
66 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
67 
68 #define	dump(a)		/* nothing */
69 
70 #undef	USEONCE2
71 #define	LISTQ
72 #undef	ADAPTIVE
73 
74 #endif /* defined(PDSIM) */
75 
76 #if !defined(CLOCKPRO_COLDPCT)
77 #define	CLOCKPRO_COLDPCT	10
78 #endif /* !defined(CLOCKPRO_COLDPCT) */
79 
80 #define	CLOCKPRO_COLDPCTMAX	90
81 
82 #if !defined(CLOCKPRO_HASHFACTOR)
83 #define	CLOCKPRO_HASHFACTOR	2
84 #endif /* !defined(CLOCKPRO_HASHFACTOR) */
85 
86 #define	CLOCKPRO_NEWQMIN	((1024 * 1024) >> PAGE_SHIFT)	/* XXX */
87 
88 int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
89 
90 PDPOL_EVCNT_DEFINE(nresrecordobj)
91 PDPOL_EVCNT_DEFINE(nresrecordanon)
92 PDPOL_EVCNT_DEFINE(nreslookupobj)
93 PDPOL_EVCNT_DEFINE(nreslookupanon)
94 PDPOL_EVCNT_DEFINE(nresfoundobj)
95 PDPOL_EVCNT_DEFINE(nresfoundanon)
96 PDPOL_EVCNT_DEFINE(nresanonfree)
97 PDPOL_EVCNT_DEFINE(nresconflict)
98 PDPOL_EVCNT_DEFINE(nresoverwritten)
99 PDPOL_EVCNT_DEFINE(nreshandhot)
100 
101 PDPOL_EVCNT_DEFINE(hhottakeover)
102 PDPOL_EVCNT_DEFINE(hhotref)
103 PDPOL_EVCNT_DEFINE(hhotunref)
104 PDPOL_EVCNT_DEFINE(hhotcold)
105 PDPOL_EVCNT_DEFINE(hhotcoldtest)
106 
107 PDPOL_EVCNT_DEFINE(hcoldtakeover)
108 PDPOL_EVCNT_DEFINE(hcoldref)
109 PDPOL_EVCNT_DEFINE(hcoldunref)
110 PDPOL_EVCNT_DEFINE(hcoldreftest)
111 PDPOL_EVCNT_DEFINE(hcoldunreftest)
112 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
113 PDPOL_EVCNT_DEFINE(hcoldhot)
114 
115 PDPOL_EVCNT_DEFINE(speculativeenqueue)
116 PDPOL_EVCNT_DEFINE(speculativehit1)
117 PDPOL_EVCNT_DEFINE(speculativehit2)
118 PDPOL_EVCNT_DEFINE(speculativemiss)
119 
120 #define	PQ_REFERENCED	PQ_PRIVATE1
121 #define	PQ_HOT		PQ_PRIVATE2
122 #define	PQ_TEST		PQ_PRIVATE3
123 #define	PQ_INITIALREF	PQ_PRIVATE4
124 #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2
125 #error PQ_PRIVATE
126 #endif
127 #define	PQ_QMASK	(PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7)
128 #define	PQ_QFACTOR	PQ_PRIVATE5
129 #define	PQ_SPECULATIVE	PQ_PRIVATE8
130 
131 #define	CLOCKPRO_NOQUEUE	0
132 #define	CLOCKPRO_NEWQ		1	/* small queue to clear initial ref. */
133 #if defined(LISTQ)
134 #define	CLOCKPRO_COLDQ		2
135 #define	CLOCKPRO_HOTQ		3
136 #else /* defined(LISTQ) */
137 #define	CLOCKPRO_COLDQ		(2 + coldqidx)	/* XXX */
138 #define	CLOCKPRO_HOTQ		(3 - coldqidx)	/* XXX */
139 #endif /* defined(LISTQ) */
140 #define	CLOCKPRO_LISTQ		4
141 #define	CLOCKPRO_NQUEUE		4
142 
143 static inline void
144 clockpro_setq(struct vm_page *pg, int qidx)
145 {
146 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
147 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
148 
149 	pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
150 }
151 
152 static inline int
153 clockpro_getq(struct vm_page *pg)
154 {
155 	int qidx;
156 
157 	qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
158 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
159 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
160 	return qidx;
161 }
162 
163 typedef struct {
164 	struct pglist q_q;
165 	int q_len;
166 } pageq_t;
167 
168 struct clockpro_state {
169 	int s_npages;
170 	int s_coldtarget;
171 	int s_ncold;
172 
173 	int s_newqlenmax;
174 	pageq_t s_q[CLOCKPRO_NQUEUE];
175 
176 	struct uvm_pctparam s_coldtargetpct;
177 };
178 
179 static pageq_t *
180 clockpro_queue(struct clockpro_state *s, int qidx)
181 {
182 
183 	KASSERT(CLOCKPRO_NOQUEUE < qidx);
184 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
185 
186 	return &s->s_q[qidx - 1];
187 }
188 
189 #if !defined(LISTQ)
190 
191 static int coldqidx;
192 
193 static void
194 clockpro_switchqueue(void)
195 {
196 
197 	coldqidx = 1 - coldqidx;
198 }
199 
200 #endif /* !defined(LISTQ) */
201 
202 static struct clockpro_state clockpro;
203 static struct clockpro_scanstate {
204 	int ss_nscanned;
205 } scanstate;
206 
207 /* ---------------------------------------- */
208 
209 static void
210 pageq_init(pageq_t *q)
211 {
212 
213 	TAILQ_INIT(&q->q_q);
214 	q->q_len = 0;
215 }
216 
217 static int
218 pageq_len(const pageq_t *q)
219 {
220 
221 	return q->q_len;
222 }
223 
224 static struct vm_page *
225 pageq_first(const pageq_t *q)
226 {
227 
228 	return TAILQ_FIRST(&q->q_q);
229 }
230 
231 static void
232 pageq_insert_tail(pageq_t *q, struct vm_page *pg)
233 {
234 
235 	TAILQ_INSERT_TAIL(&q->q_q, pg, pageq.queue);
236 	q->q_len++;
237 }
238 
239 #if defined(LISTQ)
240 static void
241 pageq_insert_head(pageq_t *q, struct vm_page *pg)
242 {
243 
244 	TAILQ_INSERT_HEAD(&q->q_q, pg, pageq.queue);
245 	q->q_len++;
246 }
247 #endif
248 
249 static void
250 pageq_remove(pageq_t *q, struct vm_page *pg)
251 {
252 
253 #if 1
254 	KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q);
255 #endif
256 	KASSERT(q->q_len > 0);
257 	TAILQ_REMOVE(&q->q_q, pg, pageq.queue);
258 	q->q_len--;
259 }
260 
261 static struct vm_page *
262 pageq_remove_head(pageq_t *q)
263 {
264 	struct vm_page *pg;
265 
266 	pg = TAILQ_FIRST(&q->q_q);
267 	if (pg == NULL) {
268 		KASSERT(q->q_len == 0);
269 		return NULL;
270 	}
271 	pageq_remove(q, pg);
272 	return pg;
273 }
274 
275 /* ---------------------------------------- */
276 
277 static void
278 clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg)
279 {
280 	pageq_t *q = clockpro_queue(s, qidx);
281 
282 	clockpro_setq(pg, qidx);
283 	pageq_insert_tail(q, pg);
284 }
285 
286 #if defined(LISTQ)
287 static void
288 clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg)
289 {
290 	pageq_t *q = clockpro_queue(s, qidx);
291 
292 	clockpro_setq(pg, qidx);
293 	pageq_insert_head(q, pg);
294 }
295 
296 #endif
297 /* ---------------------------------------- */
298 
299 typedef uint32_t nonres_cookie_t;
300 #define	NONRES_COOKIE_INVAL	0
301 
302 typedef uintptr_t objid_t;
303 
304 /*
305  * XXX maybe these hash functions need reconsideration,
306  * given that hash distribution is critical here.
307  */
308 
309 static uint32_t
310 pageidentityhash1(objid_t obj, off_t idx)
311 {
312 	uint32_t hash = HASH32_BUF_INIT;
313 
314 #if 1
315 	hash = hash32_buf(&idx, sizeof(idx), hash);
316 	hash = hash32_buf(&obj, sizeof(obj), hash);
317 #else
318 	hash = hash32_buf(&obj, sizeof(obj), hash);
319 	hash = hash32_buf(&idx, sizeof(idx), hash);
320 #endif
321 	return hash;
322 }
323 
324 static uint32_t
325 pageidentityhash2(objid_t obj, off_t idx)
326 {
327 	uint32_t hash = HASH32_BUF_INIT;
328 
329 	hash = hash32_buf(&obj, sizeof(obj), hash);
330 	hash = hash32_buf(&idx, sizeof(idx), hash);
331 	return hash;
332 }
333 
334 static nonres_cookie_t
335 calccookie(objid_t obj, off_t idx)
336 {
337 	uint32_t hash = pageidentityhash2(obj, idx);
338 	nonres_cookie_t cookie = hash;
339 
340 	if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
341 		cookie++; /* XXX */
342 	}
343 	return cookie;
344 }
345 
346 #define	BUCKETSIZE	14
347 struct bucket {
348 	int cycle;
349 	int cur;
350 	nonres_cookie_t pages[BUCKETSIZE];
351 };
352 static int cycle_target;
353 static int cycle_target_frac;
354 
355 static struct bucket static_bucket;
356 static struct bucket *buckets = &static_bucket;
357 static size_t hashsize = 1;
358 
359 static int coldadj;
360 #define	COLDTARGET_ADJ(d)	coldadj += (d)
361 
362 #if defined(PDSIM)
363 
364 static void *
365 clockpro_hashalloc(int n)
366 {
367 	size_t allocsz = sizeof(*buckets) * n;
368 
369 	return malloc(allocsz);
370 }
371 
372 static void
373 clockpro_hashfree(void *p, int n)
374 {
375 
376 	free(p);
377 }
378 
379 #else /* defined(PDSIM) */
380 
381 static void *
382 clockpro_hashalloc(int n)
383 {
384 	size_t allocsz = round_page(sizeof(*buckets) * n);
385 
386 	return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
387 }
388 
389 static void
390 clockpro_hashfree(void *p, int n)
391 {
392 	size_t allocsz = round_page(sizeof(*buckets) * n);
393 
394 	uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
395 }
396 
397 #endif /* defined(PDSIM) */
398 
399 static void
400 clockpro_hashinit(uint64_t n)
401 {
402 	struct bucket *newbuckets;
403 	struct bucket *oldbuckets;
404 	size_t sz;
405 	size_t oldsz;
406 	int i;
407 
408 	sz = howmany(n, BUCKETSIZE);
409 	sz *= clockpro_hashfactor;
410 	newbuckets = clockpro_hashalloc(sz);
411 	if (newbuckets == NULL) {
412 		panic("%s: allocation failure", __func__);
413 	}
414 	for (i = 0; i < sz; i++) {
415 		struct bucket *b = &newbuckets[i];
416 		int j;
417 
418 		b->cycle = cycle_target;
419 		b->cur = 0;
420 		for (j = 0; j < BUCKETSIZE; j++) {
421 			b->pages[j] = NONRES_COOKIE_INVAL;
422 		}
423 	}
424 	/* XXX lock */
425 	oldbuckets = buckets;
426 	oldsz = hashsize;
427 	buckets = newbuckets;
428 	hashsize = sz;
429 	/* XXX unlock */
430 	if (oldbuckets != &static_bucket) {
431 		clockpro_hashfree(oldbuckets, oldsz);
432 	}
433 }
434 
435 static struct bucket *
436 nonresident_getbucket(objid_t obj, off_t idx)
437 {
438 	uint32_t hash;
439 
440 	hash = pageidentityhash1(obj, idx);
441 	return &buckets[hash % hashsize];
442 }
443 
444 static void
445 nonresident_rotate(struct bucket *b)
446 {
447 	const int target = cycle_target;
448 	const int cycle = b->cycle;
449 	int cur;
450 	int todo;
451 
452 	todo = target - cycle;
453 	if (todo >= BUCKETSIZE * 2) {
454 		todo = (todo % BUCKETSIZE) + BUCKETSIZE;
455 	}
456 	cur = b->cur;
457 	while (todo > 0) {
458 		if (b->pages[cur] != NONRES_COOKIE_INVAL) {
459 			PDPOL_EVCNT_INCR(nreshandhot);
460 			COLDTARGET_ADJ(-1);
461 		}
462 		b->pages[cur] = NONRES_COOKIE_INVAL;
463 		cur++;
464 		if (cur == BUCKETSIZE) {
465 			cur = 0;
466 		}
467 		todo--;
468 	}
469 	b->cycle = target;
470 	b->cur = cur;
471 }
472 
473 static bool
474 nonresident_lookupremove(objid_t obj, off_t idx)
475 {
476 	struct bucket *b = nonresident_getbucket(obj, idx);
477 	nonres_cookie_t cookie = calccookie(obj, idx);
478 	int i;
479 
480 	nonresident_rotate(b);
481 	for (i = 0; i < BUCKETSIZE; i++) {
482 		if (b->pages[i] == cookie) {
483 			b->pages[i] = NONRES_COOKIE_INVAL;
484 			return true;
485 		}
486 	}
487 	return false;
488 }
489 
490 static objid_t
491 pageobj(struct vm_page *pg)
492 {
493 	const void *obj;
494 
495 	/*
496 	 * XXX object pointer is often freed and reused for unrelated object.
497 	 * for vnodes, it would be better to use something like
498 	 * a hash of fsid/fileid/generation.
499 	 */
500 
501 	obj = pg->uobject;
502 	if (obj == NULL) {
503 		obj = pg->uanon;
504 		KASSERT(obj != NULL);
505 	}
506 	return (objid_t)obj;
507 }
508 
509 static off_t
510 pageidx(struct vm_page *pg)
511 {
512 
513 	KASSERT((pg->offset & PAGE_MASK) == 0);
514 	return pg->offset >> PAGE_SHIFT;
515 }
516 
517 static bool
518 nonresident_pagelookupremove(struct vm_page *pg)
519 {
520 	bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg));
521 
522 	if (pg->uobject) {
523 		PDPOL_EVCNT_INCR(nreslookupobj);
524 	} else {
525 		PDPOL_EVCNT_INCR(nreslookupanon);
526 	}
527 	if (found) {
528 		if (pg->uobject) {
529 			PDPOL_EVCNT_INCR(nresfoundobj);
530 		} else {
531 			PDPOL_EVCNT_INCR(nresfoundanon);
532 		}
533 	}
534 	return found;
535 }
536 
537 static void
538 nonresident_pagerecord(struct vm_page *pg)
539 {
540 	objid_t obj = pageobj(pg);
541 	off_t idx = pageidx(pg);
542 	struct bucket *b = nonresident_getbucket(obj, idx);
543 	nonres_cookie_t cookie = calccookie(obj, idx);
544 
545 #if defined(DEBUG)
546 	int i;
547 
548 	for (i = 0; i < BUCKETSIZE; i++) {
549 		if (b->pages[i] == cookie) {
550 			PDPOL_EVCNT_INCR(nresconflict);
551 		}
552 	}
553 #endif /* defined(DEBUG) */
554 
555 	if (pg->uobject) {
556 		PDPOL_EVCNT_INCR(nresrecordobj);
557 	} else {
558 		PDPOL_EVCNT_INCR(nresrecordanon);
559 	}
560 	nonresident_rotate(b);
561 	if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
562 		PDPOL_EVCNT_INCR(nresoverwritten);
563 		COLDTARGET_ADJ(-1);
564 	}
565 	b->pages[b->cur] = cookie;
566 	b->cur = (b->cur + 1) % BUCKETSIZE;
567 }
568 
569 /* ---------------------------------------- */
570 
571 #if defined(CLOCKPRO_DEBUG)
572 static void
573 check_sanity(void)
574 {
575 }
576 #else /* defined(CLOCKPRO_DEBUG) */
577 #define	check_sanity()	/* nothing */
578 #endif /* defined(CLOCKPRO_DEBUG) */
579 
580 static void
581 clockpro_reinit(void)
582 {
583 
584 	clockpro_hashinit(uvmexp.npages);
585 }
586 
587 static void
588 clockpro_init(void)
589 {
590 	struct clockpro_state *s = &clockpro;
591 	int i;
592 
593 	for (i = 0; i < CLOCKPRO_NQUEUE; i++) {
594 		pageq_init(&s->s_q[i]);
595 	}
596 	s->s_newqlenmax = 1;
597 	s->s_coldtarget = 1;
598 	uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
599 }
600 
601 static void
602 clockpro_tune(void)
603 {
604 	struct clockpro_state *s = &clockpro;
605 	int coldtarget;
606 
607 #if defined(ADAPTIVE)
608 	int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100;
609 	int coldmin = 1;
610 
611 	coldtarget = s->s_coldtarget;
612 	if (coldtarget + coldadj < coldmin) {
613 		coldadj = coldmin - coldtarget;
614 	} else if (coldtarget + coldadj > coldmax) {
615 		coldadj = coldmax - coldtarget;
616 	}
617 	coldtarget += coldadj;
618 #else /* defined(ADAPTIVE) */
619 	coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages);
620 	if (coldtarget < 1) {
621 		coldtarget = 1;
622 	}
623 #endif /* defined(ADAPTIVE) */
624 
625 	s->s_coldtarget = coldtarget;
626 	s->s_newqlenmax = coldtarget / 4;
627 	if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) {
628 		s->s_newqlenmax = CLOCKPRO_NEWQMIN;
629 	}
630 }
631 
632 static void
633 clockpro_movereferencebit(struct vm_page *pg)
634 {
635 	bool referenced;
636 
637 	referenced = pmap_clear_reference(pg);
638 	if (referenced) {
639 		pg->pqflags |= PQ_REFERENCED;
640 	}
641 }
642 
643 static void
644 clockpro_clearreferencebit(struct vm_page *pg)
645 {
646 
647 	clockpro_movereferencebit(pg);
648 	pg->pqflags &= ~PQ_REFERENCED;
649 }
650 
651 static void
652 clockpro___newqrotate(int len)
653 {
654 	struct clockpro_state * const s = &clockpro;
655 	pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ);
656 	struct vm_page *pg;
657 
658 	while (pageq_len(newq) > len) {
659 		pg = pageq_remove_head(newq);
660 		KASSERT(pg != NULL);
661 		KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
662 		if ((pg->pqflags & PQ_INITIALREF) != 0) {
663 			clockpro_clearreferencebit(pg);
664 			pg->pqflags &= ~PQ_INITIALREF;
665 		}
666 		/* place at the list head */
667 		clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
668 	}
669 }
670 
671 static void
672 clockpro_newqrotate(void)
673 {
674 	struct clockpro_state * const s = &clockpro;
675 
676 	check_sanity();
677 	clockpro___newqrotate(s->s_newqlenmax);
678 	check_sanity();
679 }
680 
681 static void
682 clockpro_newqflush(int n)
683 {
684 
685 	check_sanity();
686 	clockpro___newqrotate(n);
687 	check_sanity();
688 }
689 
690 static void
691 clockpro_newqflushone(void)
692 {
693 	struct clockpro_state * const s = &clockpro;
694 
695 	clockpro_newqflush(
696 	    MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0));
697 }
698 
699 /*
700  * our "tail" is called "list-head" in the paper.
701  */
702 
703 static void
704 clockpro___enqueuetail(struct vm_page *pg)
705 {
706 	struct clockpro_state * const s = &clockpro;
707 
708 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
709 
710 	check_sanity();
711 #if !defined(USEONCE2)
712 	clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg);
713 	clockpro_newqrotate();
714 #else /* !defined(USEONCE2) */
715 #if defined(LISTQ)
716 	KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
717 #endif /* defined(LISTQ) */
718 	clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
719 #endif /* !defined(USEONCE2) */
720 	check_sanity();
721 }
722 
723 static void
724 clockpro_pageenqueue(struct vm_page *pg)
725 {
726 	struct clockpro_state * const s = &clockpro;
727 	bool hot;
728 	bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
729 
730 	KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
731 	KASSERT(mutex_owned(&uvm_pageqlock));
732 	check_sanity();
733 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
734 	s->s_npages++;
735 	pg->pqflags &= ~(PQ_HOT|PQ_TEST);
736 	if (speculative) {
737 		hot = false;
738 		PDPOL_EVCNT_INCR(speculativeenqueue);
739 	} else {
740 		hot = nonresident_pagelookupremove(pg);
741 		if (hot) {
742 			COLDTARGET_ADJ(1);
743 		}
744 	}
745 
746 	/*
747 	 * consider mmap'ed file:
748 	 *
749 	 * - read-ahead enqueues a page.
750 	 *
751 	 * - on the following read-ahead hit, the fault handler activates it.
752 	 *
753 	 * - finally, the userland code which caused the above fault
754 	 *   actually accesses the page.  it makes its reference bit set.
755 	 *
756 	 * we want to count the above as a single access, rather than
757 	 * three accesses with short reuse distances.
758 	 */
759 
760 #if defined(USEONCE2)
761 	pg->pqflags &= ~PQ_INITIALREF;
762 	if (hot) {
763 		pg->pqflags |= PQ_TEST;
764 	}
765 	s->s_ncold++;
766 	clockpro_clearreferencebit(pg);
767 	clockpro___enqueuetail(pg);
768 #else /* defined(USEONCE2) */
769 	if (speculative) {
770 		s->s_ncold++;
771 	} else if (hot) {
772 		pg->pqflags |= PQ_HOT;
773 	} else {
774 		pg->pqflags |= PQ_TEST;
775 		s->s_ncold++;
776 	}
777 	clockpro___enqueuetail(pg);
778 #endif /* defined(USEONCE2) */
779 	KASSERT(s->s_ncold <= s->s_npages);
780 }
781 
782 static pageq_t *
783 clockpro_pagequeue(struct vm_page *pg)
784 {
785 	struct clockpro_state * const s = &clockpro;
786 	int qidx;
787 
788 	qidx = clockpro_getq(pg);
789 	KASSERT(qidx != CLOCKPRO_NOQUEUE);
790 
791 	return clockpro_queue(s, qidx);
792 }
793 
794 static void
795 clockpro_pagedequeue(struct vm_page *pg)
796 {
797 	struct clockpro_state * const s = &clockpro;
798 	pageq_t *q;
799 
800 	KASSERT(s->s_npages > 0);
801 	check_sanity();
802 	q = clockpro_pagequeue(pg);
803 	pageq_remove(q, pg);
804 	check_sanity();
805 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
806 	if ((pg->pqflags & PQ_HOT) == 0) {
807 		KASSERT(s->s_ncold > 0);
808 		s->s_ncold--;
809 	}
810 	KASSERT(s->s_npages > 0);
811 	s->s_npages--;
812 	check_sanity();
813 }
814 
815 static void
816 clockpro_pagerequeue(struct vm_page *pg)
817 {
818 	struct clockpro_state * const s = &clockpro;
819 	int qidx;
820 
821 	qidx = clockpro_getq(pg);
822 	KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ);
823 	pageq_remove(clockpro_queue(s, qidx), pg);
824 	check_sanity();
825 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
826 
827 	clockpro___enqueuetail(pg);
828 }
829 
830 static void
831 handhot_endtest(struct vm_page *pg)
832 {
833 
834 	KASSERT((pg->pqflags & PQ_HOT) == 0);
835 	if ((pg->pqflags & PQ_TEST) != 0) {
836 		PDPOL_EVCNT_INCR(hhotcoldtest);
837 		COLDTARGET_ADJ(-1);
838 		pg->pqflags &= ~PQ_TEST;
839 	} else {
840 		PDPOL_EVCNT_INCR(hhotcold);
841 	}
842 }
843 
844 static void
845 handhot_advance(void)
846 {
847 	struct clockpro_state * const s = &clockpro;
848 	struct vm_page *pg;
849 	pageq_t *hotq;
850 	int hotqlen;
851 
852 	clockpro_tune();
853 
854 	dump("hot called");
855 	if (s->s_ncold >= s->s_coldtarget) {
856 		return;
857 	}
858 	hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
859 again:
860 	pg = pageq_first(hotq);
861 	if (pg == NULL) {
862 		DPRINTF("%s: HHOT TAKEOVER\n", __func__);
863 		dump("hhottakeover");
864 		PDPOL_EVCNT_INCR(hhottakeover);
865 #if defined(LISTQ)
866 		while (/* CONSTCOND */ 1) {
867 			pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
868 
869 			pg = pageq_first(coldq);
870 			if (pg == NULL) {
871 				clockpro_newqflushone();
872 				pg = pageq_first(coldq);
873 				if (pg == NULL) {
874 					WARN("hhot: no page?\n");
875 					return;
876 				}
877 			}
878 			KASSERT(clockpro_pagequeue(pg) == coldq);
879 			pageq_remove(coldq, pg);
880 			check_sanity();
881 			if ((pg->pqflags & PQ_HOT) == 0) {
882 				handhot_endtest(pg);
883 				clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg);
884 			} else {
885 				clockpro_insert_head(s, CLOCKPRO_HOTQ, pg);
886 				break;
887 			}
888 		}
889 #else /* defined(LISTQ) */
890 		clockpro_newqflush(0); /* XXX XXX */
891 		clockpro_switchqueue();
892 		hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
893 		goto again;
894 #endif /* defined(LISTQ) */
895 	}
896 
897 	KASSERT(clockpro_pagequeue(pg) == hotq);
898 
899 	/*
900 	 * terminate test period of nonresident pages by cycling them.
901 	 */
902 
903 	cycle_target_frac += BUCKETSIZE;
904 	hotqlen = pageq_len(hotq);
905 	while (cycle_target_frac >= hotqlen) {
906 		cycle_target++;
907 		cycle_target_frac -= hotqlen;
908 	}
909 
910 	if ((pg->pqflags & PQ_HOT) == 0) {
911 #if defined(LISTQ)
912 		panic("cold page in hotq: %p", pg);
913 #else /* defined(LISTQ) */
914 		handhot_endtest(pg);
915 		goto next;
916 #endif /* defined(LISTQ) */
917 	}
918 	KASSERT((pg->pqflags & PQ_TEST) == 0);
919 	KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
920 	KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
921 
922 	/*
923 	 * once we met our target,
924 	 * stop at a hot page so that no cold pages in test period
925 	 * have larger recency than any hot pages.
926 	 */
927 
928 	if (s->s_ncold >= s->s_coldtarget) {
929 		dump("hot done");
930 		return;
931 	}
932 	clockpro_movereferencebit(pg);
933 	if ((pg->pqflags & PQ_REFERENCED) == 0) {
934 		PDPOL_EVCNT_INCR(hhotunref);
935 		uvmexp.pddeact++;
936 		pg->pqflags &= ~PQ_HOT;
937 		clockpro.s_ncold++;
938 		KASSERT(s->s_ncold <= s->s_npages);
939 	} else {
940 		PDPOL_EVCNT_INCR(hhotref);
941 	}
942 	pg->pqflags &= ~PQ_REFERENCED;
943 #if !defined(LISTQ)
944 next:
945 #endif /* !defined(LISTQ) */
946 	clockpro_pagerequeue(pg);
947 	dump("hot");
948 	goto again;
949 }
950 
951 static struct vm_page *
952 handcold_advance(void)
953 {
954 	struct clockpro_state * const s = &clockpro;
955 	struct vm_page *pg;
956 
957 	for (;;) {
958 #if defined(LISTQ)
959 		pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ);
960 #endif /* defined(LISTQ) */
961 		pageq_t *coldq;
962 
963 		clockpro_newqrotate();
964 		handhot_advance();
965 #if defined(LISTQ)
966 		pg = pageq_first(listq);
967 		if (pg != NULL) {
968 			KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
969 			KASSERT((pg->pqflags & PQ_TEST) == 0);
970 			KASSERT((pg->pqflags & PQ_HOT) == 0);
971 			KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
972 			pageq_remove(listq, pg);
973 			check_sanity();
974 			clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */
975 			goto gotcold;
976 		}
977 #endif /* defined(LISTQ) */
978 		check_sanity();
979 		coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
980 		pg = pageq_first(coldq);
981 		if (pg == NULL) {
982 			clockpro_newqflushone();
983 			pg = pageq_first(coldq);
984 		}
985 		if (pg == NULL) {
986 			DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
987 			dump("hcoldtakeover");
988 			PDPOL_EVCNT_INCR(hcoldtakeover);
989 			KASSERT(
990 			    pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0);
991 #if defined(LISTQ)
992 			KASSERT(
993 			    pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0);
994 #else /* defined(LISTQ) */
995 			clockpro_switchqueue();
996 			coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
997 			pg = pageq_first(coldq);
998 #endif /* defined(LISTQ) */
999 		}
1000 		if (pg == NULL) {
1001 			WARN("hcold: no page?\n");
1002 			return NULL;
1003 		}
1004 		KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
1005 		if ((pg->pqflags & PQ_HOT) != 0) {
1006 			PDPOL_EVCNT_INCR(hcoldhot);
1007 			pageq_remove(coldq, pg);
1008 			clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg);
1009 			check_sanity();
1010 			KASSERT((pg->pqflags & PQ_TEST) == 0);
1011 			uvmexp.pdscans++;
1012 			continue;
1013 		}
1014 #if defined(LISTQ)
1015 gotcold:
1016 #endif /* defined(LISTQ) */
1017 		KASSERT((pg->pqflags & PQ_HOT) == 0);
1018 		uvmexp.pdscans++;
1019 		clockpro_movereferencebit(pg);
1020 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
1021 			KASSERT((pg->pqflags & PQ_TEST) == 0);
1022 			if ((pg->pqflags & PQ_REFERENCED) != 0) {
1023 				PDPOL_EVCNT_INCR(speculativehit2);
1024 				pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
1025 				clockpro_pagedequeue(pg);
1026 				clockpro_pageenqueue(pg);
1027 				continue;
1028 			}
1029 			PDPOL_EVCNT_INCR(speculativemiss);
1030 		}
1031 		switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
1032 		case PQ_TEST:
1033 			PDPOL_EVCNT_INCR(hcoldunreftest);
1034 			nonresident_pagerecord(pg);
1035 			goto gotit;
1036 		case 0:
1037 			PDPOL_EVCNT_INCR(hcoldunref);
1038 gotit:
1039 			KASSERT(s->s_ncold > 0);
1040 			clockpro_pagerequeue(pg); /* XXX */
1041 			dump("cold done");
1042 			/* XXX "pg" is still in queue */
1043 			handhot_advance();
1044 			goto done;
1045 
1046 		case PQ_REFERENCED|PQ_TEST:
1047 			PDPOL_EVCNT_INCR(hcoldreftest);
1048 			s->s_ncold--;
1049 			COLDTARGET_ADJ(1);
1050 			pg->pqflags |= PQ_HOT;
1051 			pg->pqflags &= ~PQ_TEST;
1052 			break;
1053 
1054 		case PQ_REFERENCED:
1055 			PDPOL_EVCNT_INCR(hcoldref);
1056 			pg->pqflags |= PQ_TEST;
1057 			break;
1058 		}
1059 		pg->pqflags &= ~PQ_REFERENCED;
1060 		uvmexp.pdreact++;
1061 		/* move to the list head */
1062 		clockpro_pagerequeue(pg);
1063 		dump("cold");
1064 	}
1065 done:;
1066 	return pg;
1067 }
1068 
1069 void
1070 uvmpdpol_pageactivate(struct vm_page *pg)
1071 {
1072 
1073 	if (!uvmpdpol_pageisqueued_p(pg)) {
1074 		KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
1075 		pg->pqflags |= PQ_INITIALREF;
1076 		clockpro_pageenqueue(pg);
1077 	} else if ((pg->pqflags & PQ_SPECULATIVE)) {
1078 		PDPOL_EVCNT_INCR(speculativehit1);
1079 		pg->pqflags &= ~PQ_SPECULATIVE;
1080 		pg->pqflags |= PQ_INITIALREF;
1081 		clockpro_pagedequeue(pg);
1082 		clockpro_pageenqueue(pg);
1083 	}
1084 	pg->pqflags |= PQ_REFERENCED;
1085 }
1086 
1087 void
1088 uvmpdpol_pagedeactivate(struct vm_page *pg)
1089 {
1090 
1091 	clockpro_clearreferencebit(pg);
1092 }
1093 
1094 void
1095 uvmpdpol_pagedequeue(struct vm_page *pg)
1096 {
1097 
1098 	if (!uvmpdpol_pageisqueued_p(pg)) {
1099 		return;
1100 	}
1101 	clockpro_pagedequeue(pg);
1102 	pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
1103 }
1104 
1105 void
1106 uvmpdpol_pageenqueue(struct vm_page *pg)
1107 {
1108 
1109 #if 1
1110 	if (uvmpdpol_pageisqueued_p(pg)) {
1111 		return;
1112 	}
1113 	clockpro_clearreferencebit(pg);
1114 	pg->pqflags |= PQ_SPECULATIVE;
1115 	clockpro_pageenqueue(pg);
1116 #else
1117 	uvmpdpol_pageactivate(pg);
1118 #endif
1119 }
1120 
1121 void
1122 uvmpdpol_anfree(struct vm_anon *an)
1123 {
1124 
1125 	KASSERT(an->an_page == NULL);
1126 	if (nonresident_lookupremove((objid_t)an, 0)) {
1127 		PDPOL_EVCNT_INCR(nresanonfree);
1128 	}
1129 }
1130 
1131 void
1132 uvmpdpol_init(void)
1133 {
1134 
1135 	clockpro_init();
1136 }
1137 
1138 void
1139 uvmpdpol_reinit(void)
1140 {
1141 
1142 	clockpro_reinit();
1143 }
1144 
1145 void
1146 uvmpdpol_estimatepageable(int *active, int *inactive)
1147 {
1148 	struct clockpro_state * const s = &clockpro;
1149 
1150 	if (active) {
1151 		*active = s->s_npages - s->s_ncold;
1152 	}
1153 	if (inactive) {
1154 		*inactive = s->s_ncold;
1155 	}
1156 }
1157 
1158 bool
1159 uvmpdpol_pageisqueued_p(struct vm_page *pg)
1160 {
1161 
1162 	return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
1163 }
1164 
1165 void
1166 uvmpdpol_scaninit(void)
1167 {
1168 	struct clockpro_scanstate * const ss = &scanstate;
1169 
1170 	ss->ss_nscanned = 0;
1171 }
1172 
1173 struct vm_page *
1174 uvmpdpol_selectvictim(void)
1175 {
1176 	struct clockpro_state * const s = &clockpro;
1177 	struct clockpro_scanstate * const ss = &scanstate;
1178 	struct vm_page *pg;
1179 
1180 	if (ss->ss_nscanned > s->s_npages) {
1181 		DPRINTF("scan too much\n");
1182 		return NULL;
1183 	}
1184 	pg = handcold_advance();
1185 	ss->ss_nscanned++;
1186 	return pg;
1187 }
1188 
1189 static void
1190 clockpro_dropswap(pageq_t *q, int *todo)
1191 {
1192 	struct vm_page *pg;
1193 
1194 	TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq.queue) {
1195 		if (*todo <= 0) {
1196 			break;
1197 		}
1198 		if ((pg->pqflags & PQ_HOT) == 0) {
1199 			continue;
1200 		}
1201 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
1202 			continue;
1203 		}
1204 		if (uvmpd_trydropswap(pg)) {
1205 			(*todo)--;
1206 		}
1207 	}
1208 }
1209 
1210 void
1211 uvmpdpol_balancequeue(int swap_shortage)
1212 {
1213 	struct clockpro_state * const s = &clockpro;
1214 	int todo = swap_shortage;
1215 
1216 	if (todo == 0) {
1217 		return;
1218 	}
1219 
1220 	/*
1221 	 * reclaim swap slots from hot pages
1222 	 */
1223 
1224 	DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage);
1225 
1226 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo);
1227 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo);
1228 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo);
1229 
1230 	DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo);
1231 }
1232 
1233 bool
1234 uvmpdpol_needsscan_p(void)
1235 {
1236 	struct clockpro_state * const s = &clockpro;
1237 
1238 	if (s->s_ncold < s->s_coldtarget) {
1239 		return true;
1240 	}
1241 	return false;
1242 }
1243 
1244 void
1245 uvmpdpol_tune(void)
1246 {
1247 
1248 	clockpro_tune();
1249 }
1250 
1251 #if !defined(PDSIM)
1252 
1253 #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
1254 
1255 void
1256 uvmpdpol_sysctlsetup(void)
1257 {
1258 #if !defined(ADAPTIVE)
1259 	struct clockpro_state * const s = &clockpro;
1260 
1261 	uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
1262 	    SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
1263 #endif /* !defined(ADAPTIVE) */
1264 }
1265 
1266 #endif /* !defined(PDSIM) */
1267 
1268 #if defined(DDB)
1269 
1270 void clockpro_dump(void);
1271 
1272 void
1273 clockpro_dump(void)
1274 {
1275 	struct clockpro_state * const s = &clockpro;
1276 
1277 	struct vm_page *pg;
1278 	int ncold, nhot, ntest, nspeculative, ninitialref, nref;
1279 	int newqlen, coldqlen, hotqlen, listqlen;
1280 
1281 	newqlen = coldqlen = hotqlen = listqlen = 0;
1282 	printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
1283 	    s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax);
1284 
1285 #define	INITCOUNT()	\
1286 	ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
1287 
1288 #define	COUNT(pg)	\
1289 	if ((pg->pqflags & PQ_HOT) != 0) { \
1290 		nhot++; \
1291 	} else { \
1292 		ncold++; \
1293 		if ((pg->pqflags & PQ_TEST) != 0) { \
1294 			ntest++; \
1295 		} \
1296 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
1297 			nspeculative++; \
1298 		} \
1299 		if ((pg->pqflags & PQ_INITIALREF) != 0) { \
1300 			ninitialref++; \
1301 		} else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
1302 		    pmap_is_referenced(pg)) { \
1303 			nref++; \
1304 		} \
1305 	}
1306 
1307 #define	PRINTCOUNT(name)	\
1308 	printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
1309 	    "nref=%d\n", \
1310 	    (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
1311 
1312 	INITCOUNT();
1313 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pageq.queue) {
1314 		if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
1315 			printf("newq corrupt %p\n", pg);
1316 		}
1317 		COUNT(pg)
1318 		newqlen++;
1319 	}
1320 	PRINTCOUNT("newq");
1321 
1322 	INITCOUNT();
1323 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pageq.queue) {
1324 		if (clockpro_getq(pg) != CLOCKPRO_COLDQ) {
1325 			printf("coldq corrupt %p\n", pg);
1326 		}
1327 		COUNT(pg)
1328 		coldqlen++;
1329 	}
1330 	PRINTCOUNT("coldq");
1331 
1332 	INITCOUNT();
1333 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pageq.queue) {
1334 		if (clockpro_getq(pg) != CLOCKPRO_HOTQ) {
1335 			printf("hotq corrupt %p\n", pg);
1336 		}
1337 #if defined(LISTQ)
1338 		if ((pg->pqflags & PQ_HOT) == 0) {
1339 			printf("cold page in hotq: %p\n", pg);
1340 		}
1341 #endif /* defined(LISTQ) */
1342 		COUNT(pg)
1343 		hotqlen++;
1344 	}
1345 	PRINTCOUNT("hotq");
1346 
1347 	INITCOUNT();
1348 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pageq.queue) {
1349 #if !defined(LISTQ)
1350 		printf("listq %p\n", pg);
1351 #endif /* !defined(LISTQ) */
1352 		if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
1353 			printf("listq corrupt %p\n", pg);
1354 		}
1355 		COUNT(pg)
1356 		listqlen++;
1357 	}
1358 	PRINTCOUNT("listq");
1359 
1360 	printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
1361 	    newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)),
1362 	    coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)),
1363 	    hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)),
1364 	    listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ)));
1365 }
1366 
1367 #endif /* defined(DDB) */
1368 
1369 #if defined(PDSIM)
1370 #if defined(DEBUG)
1371 static void
1372 pdsim_dumpq(int qidx)
1373 {
1374 	struct clockpro_state * const s = &clockpro;
1375 	pageq_t *q = clockpro_queue(s, qidx);
1376 	struct vm_page *pg;
1377 
1378 	TAILQ_FOREACH(pg, &q->q_q, pageq.queue) {
1379 		DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
1380 		    pg->offset >> PAGE_SHIFT,
1381 		    (pg->pqflags & PQ_HOT) ? "H" : "",
1382 		    (pg->pqflags & PQ_TEST) ? "T" : "",
1383 		    (pg->pqflags & PQ_REFERENCED) ? "R" : "",
1384 		    pmap_is_referenced(pg) ? "r" : "",
1385 		    (pg->pqflags & PQ_INITIALREF) ? "I" : "",
1386 		    (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
1387 		    );
1388 	}
1389 }
1390 #endif /* defined(DEBUG) */
1391 
1392 void
1393 pdsim_dump(const char *id)
1394 {
1395 #if defined(DEBUG)
1396 	struct clockpro_state * const s = &clockpro;
1397 
1398 	DPRINTF("  %s L(", id);
1399 	pdsim_dumpq(CLOCKPRO_LISTQ);
1400 	DPRINTF(" ) H(");
1401 	pdsim_dumpq(CLOCKPRO_HOTQ);
1402 	DPRINTF(" ) C(");
1403 	pdsim_dumpq(CLOCKPRO_COLDQ);
1404 	DPRINTF(" ) N(");
1405 	pdsim_dumpq(CLOCKPRO_NEWQ);
1406 	DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
1407 	    s->s_ncold, s->s_coldtarget, coldadj);
1408 #endif /* defined(DEBUG) */
1409 }
1410 #endif /* defined(PDSIM) */
1411