xref: /netbsd-src/sys/uvm/uvm_pdpolicy_clockpro.c (revision 06ddeb9f13adf7283ddbc0ff517ec4b59ab5f739)
1*06ddeb9fSandvar /*	$NetBSD: uvm_pdpolicy_clockpro.c,v 1.27 2022/04/12 20:27:56 andvar Exp $	*/
29d3e3eabSyamt 
39d3e3eabSyamt /*-
49d3e3eabSyamt  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
59d3e3eabSyamt  * All rights reserved.
69d3e3eabSyamt  *
79d3e3eabSyamt  * Redistribution and use in source and binary forms, with or without
89d3e3eabSyamt  * modification, are permitted provided that the following conditions
99d3e3eabSyamt  * are met:
109d3e3eabSyamt  * 1. Redistributions of source code must retain the above copyright
119d3e3eabSyamt  *    notice, this list of conditions and the following disclaimer.
129d3e3eabSyamt  * 2. Redistributions in binary form must reproduce the above copyright
139d3e3eabSyamt  *    notice, this list of conditions and the following disclaimer in the
149d3e3eabSyamt  *    documentation and/or other materials provided with the distribution.
159d3e3eabSyamt  *
169d3e3eabSyamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
179d3e3eabSyamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
189d3e3eabSyamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
199d3e3eabSyamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
209d3e3eabSyamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
219d3e3eabSyamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
229d3e3eabSyamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
239d3e3eabSyamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
249d3e3eabSyamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
259d3e3eabSyamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
269d3e3eabSyamt  * SUCH DAMAGE.
279d3e3eabSyamt  */
289d3e3eabSyamt 
299d3e3eabSyamt /*
309d3e3eabSyamt  * CLOCK-Pro replacement policy:
31edf49f7cStsutsui  *	http://web.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-3.html
329d3e3eabSyamt  *
339d3e3eabSyamt  * approximation of the list of non-resident pages using hash:
349d3e3eabSyamt  *	http://linux-mm.org/ClockProApproximation
359d3e3eabSyamt  */
369d3e3eabSyamt 
379d3e3eabSyamt /* #define	CLOCKPRO_DEBUG */
389d3e3eabSyamt 
399d3e3eabSyamt #if defined(PDSIM)
409d3e3eabSyamt 
419d3e3eabSyamt #include "pdsim.h"
429d3e3eabSyamt 
439d3e3eabSyamt #else /* defined(PDSIM) */
449d3e3eabSyamt 
459d3e3eabSyamt #include <sys/cdefs.h>
46*06ddeb9fSandvar __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.27 2022/04/12 20:27:56 andvar Exp $");
479d3e3eabSyamt 
489d3e3eabSyamt #include "opt_ddb.h"
499d3e3eabSyamt 
509d3e3eabSyamt #include <sys/param.h>
519d3e3eabSyamt #include <sys/proc.h>
529d3e3eabSyamt #include <sys/systm.h>
539d3e3eabSyamt #include <sys/kernel.h>
549d3e3eabSyamt #include <sys/hash.h>
559d3e3eabSyamt 
569d3e3eabSyamt #include <uvm/uvm.h>
5705d8362dSyamt #include <uvm/uvm_pdaemon.h>	/* for uvmpd_trylockowner */
589d3e3eabSyamt #include <uvm/uvm_pdpolicy.h>
599d3e3eabSyamt #include <uvm/uvm_pdpolicy_impl.h>
609d3e3eabSyamt 
619d3e3eabSyamt #if ((__STDC_VERSION__ - 0) >= 199901L)
629d3e3eabSyamt #define	DPRINTF(...)	/* nothing */
639d3e3eabSyamt #define	WARN(...)	printf(__VA_ARGS__)
649d3e3eabSyamt #else /* ((__STDC_VERSION__ - 0) >= 199901L) */
659d3e3eabSyamt #define	DPRINTF(a...)	/* nothing */	/* GCC */
669d3e3eabSyamt #define	WARN(a...)	printf(a)
679d3e3eabSyamt #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */
689d3e3eabSyamt 
699d3e3eabSyamt #define	dump(a)		/* nothing */
709d3e3eabSyamt 
719d3e3eabSyamt #undef	USEONCE2
729d3e3eabSyamt #define	LISTQ
739d3e3eabSyamt #undef	ADAPTIVE
749d3e3eabSyamt 
759d3e3eabSyamt #endif /* defined(PDSIM) */
769d3e3eabSyamt 
779d3e3eabSyamt #if !defined(CLOCKPRO_COLDPCT)
789d3e3eabSyamt #define	CLOCKPRO_COLDPCT	10
799d3e3eabSyamt #endif /* !defined(CLOCKPRO_COLDPCT) */
809d3e3eabSyamt 
819d3e3eabSyamt #define	CLOCKPRO_COLDPCTMAX	90
829d3e3eabSyamt 
839d3e3eabSyamt #if !defined(CLOCKPRO_HASHFACTOR)
849d3e3eabSyamt #define	CLOCKPRO_HASHFACTOR	2
859d3e3eabSyamt #endif /* !defined(CLOCKPRO_HASHFACTOR) */
869d3e3eabSyamt 
879d3e3eabSyamt #define	CLOCKPRO_NEWQMIN	((1024 * 1024) >> PAGE_SHIFT)	/* XXX */
889d3e3eabSyamt 
899d3e3eabSyamt int clockpro_hashfactor = CLOCKPRO_HASHFACTOR;
909d3e3eabSyamt 
919d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresrecordobj)
929d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresrecordanon)
93603d434bSyamt PDPOL_EVCNT_DEFINE(nreslookupobj)
94603d434bSyamt PDPOL_EVCNT_DEFINE(nreslookupanon)
959d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresfoundobj)
969d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresfoundanon)
979d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresanonfree)
989d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresconflict)
999d3e3eabSyamt PDPOL_EVCNT_DEFINE(nresoverwritten)
1009d3e3eabSyamt PDPOL_EVCNT_DEFINE(nreshandhot)
1019d3e3eabSyamt 
1029d3e3eabSyamt PDPOL_EVCNT_DEFINE(hhottakeover)
1039d3e3eabSyamt PDPOL_EVCNT_DEFINE(hhotref)
1049d3e3eabSyamt PDPOL_EVCNT_DEFINE(hhotunref)
1059d3e3eabSyamt PDPOL_EVCNT_DEFINE(hhotcold)
1069d3e3eabSyamt PDPOL_EVCNT_DEFINE(hhotcoldtest)
1079d3e3eabSyamt 
1089d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldtakeover)
1099d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldref)
1109d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldunref)
1119d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldreftest)
1129d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldunreftest)
1139d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative)
1149d3e3eabSyamt PDPOL_EVCNT_DEFINE(hcoldhot)
1159d3e3eabSyamt 
1169d3e3eabSyamt PDPOL_EVCNT_DEFINE(speculativeenqueue)
1179d3e3eabSyamt PDPOL_EVCNT_DEFINE(speculativehit1)
1189d3e3eabSyamt PDPOL_EVCNT_DEFINE(speculativehit2)
1199d3e3eabSyamt PDPOL_EVCNT_DEFINE(speculativemiss)
1209d3e3eabSyamt 
12105d8362dSyamt PDPOL_EVCNT_DEFINE(locksuccess)
12205d8362dSyamt PDPOL_EVCNT_DEFINE(lockfail)
12305d8362dSyamt 
12494843b13Sad #define	PQ_REFERENCED	0x000000010
12594843b13Sad #define	PQ_HOT		0x000000020
12694843b13Sad #define	PQ_TEST		0x000000040
12794843b13Sad #define	PQ_INITIALREF	0x000000080
12894843b13Sad #define	PQ_QMASK	0x000000700
12994843b13Sad #define	PQ_QFACTOR	0x000000100
13094843b13Sad #define	PQ_SPECULATIVE	0x000000800
1319d3e3eabSyamt 
1329d3e3eabSyamt #define	CLOCKPRO_NOQUEUE	0
1339d3e3eabSyamt #define	CLOCKPRO_NEWQ		1	/* small queue to clear initial ref. */
1349d3e3eabSyamt #if defined(LISTQ)
1359d3e3eabSyamt #define	CLOCKPRO_COLDQ		2
1369d3e3eabSyamt #define	CLOCKPRO_HOTQ		3
1379d3e3eabSyamt #else /* defined(LISTQ) */
1389d3e3eabSyamt #define	CLOCKPRO_COLDQ		(2 + coldqidx)	/* XXX */
1399d3e3eabSyamt #define	CLOCKPRO_HOTQ		(3 - coldqidx)	/* XXX */
1409d3e3eabSyamt #endif /* defined(LISTQ) */
1419d3e3eabSyamt #define	CLOCKPRO_LISTQ		4
1429d3e3eabSyamt #define	CLOCKPRO_NQUEUE		4
1439d3e3eabSyamt 
14494843b13Sad static bool	uvmpdpol_pagerealize_locked(struct vm_page *);
14594843b13Sad 
1469d3e3eabSyamt static inline void
clockpro_setq(struct vm_page * pg,int qidx)1479d3e3eabSyamt clockpro_setq(struct vm_page *pg, int qidx)
1489d3e3eabSyamt {
1499d3e3eabSyamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
1509d3e3eabSyamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
1519d3e3eabSyamt 
1529d3e3eabSyamt 	pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
1539d3e3eabSyamt }
1549d3e3eabSyamt 
1559d3e3eabSyamt static inline int
clockpro_getq(struct vm_page * pg)1569d3e3eabSyamt clockpro_getq(struct vm_page *pg)
1579d3e3eabSyamt {
1589d3e3eabSyamt 	int qidx;
1599d3e3eabSyamt 
1609d3e3eabSyamt 	qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
1619d3e3eabSyamt 	KASSERT(qidx >= CLOCKPRO_NOQUEUE);
1629d3e3eabSyamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
1639d3e3eabSyamt 	return qidx;
1649d3e3eabSyamt }
1659d3e3eabSyamt 
1669d3e3eabSyamt typedef struct {
1679d3e3eabSyamt 	struct pglist q_q;
1689d3e3eabSyamt 	int q_len;
1699d3e3eabSyamt } pageq_t;
1709d3e3eabSyamt 
1719d3e3eabSyamt struct clockpro_state {
1725978ddc6Sad 	kmutex_t lock;
1739d3e3eabSyamt 	int s_npages;
1749d3e3eabSyamt 	int s_coldtarget;
1759d3e3eabSyamt 	int s_ncold;
1769d3e3eabSyamt 
1779d3e3eabSyamt 	int s_newqlenmax;
1789d3e3eabSyamt 	pageq_t s_q[CLOCKPRO_NQUEUE];
1799d3e3eabSyamt 
1809d3e3eabSyamt 	struct uvm_pctparam s_coldtargetpct;
1819d3e3eabSyamt };
1829d3e3eabSyamt 
1839d3e3eabSyamt static pageq_t *
clockpro_queue(struct clockpro_state * s,int qidx)1849d3e3eabSyamt clockpro_queue(struct clockpro_state *s, int qidx)
1859d3e3eabSyamt {
1869d3e3eabSyamt 
1879d3e3eabSyamt 	KASSERT(CLOCKPRO_NOQUEUE < qidx);
1889d3e3eabSyamt 	KASSERT(qidx <= CLOCKPRO_NQUEUE);
1899d3e3eabSyamt 
1909d3e3eabSyamt 	return &s->s_q[qidx - 1];
1919d3e3eabSyamt }
1929d3e3eabSyamt 
1939d3e3eabSyamt #if !defined(LISTQ)
1949d3e3eabSyamt 
1959d3e3eabSyamt static int coldqidx;
1969d3e3eabSyamt 
1979d3e3eabSyamt static void
clockpro_switchqueue(void)1989d3e3eabSyamt clockpro_switchqueue(void)
1999d3e3eabSyamt {
2009d3e3eabSyamt 
2019d3e3eabSyamt 	coldqidx = 1 - coldqidx;
2029d3e3eabSyamt }
2039d3e3eabSyamt 
2049d3e3eabSyamt #endif /* !defined(LISTQ) */
2059d3e3eabSyamt 
2065978ddc6Sad static struct clockpro_state clockpro __cacheline_aligned;
2079d3e3eabSyamt static struct clockpro_scanstate {
2089d3e3eabSyamt 	int ss_nscanned;
2099d3e3eabSyamt } scanstate;
2109d3e3eabSyamt 
2119d3e3eabSyamt /* ---------------------------------------- */
2129d3e3eabSyamt 
2139d3e3eabSyamt static void
pageq_init(pageq_t * q)2149d3e3eabSyamt pageq_init(pageq_t *q)
2159d3e3eabSyamt {
2169d3e3eabSyamt 
2179d3e3eabSyamt 	TAILQ_INIT(&q->q_q);
2189d3e3eabSyamt 	q->q_len = 0;
2199d3e3eabSyamt }
2209d3e3eabSyamt 
2219d3e3eabSyamt static int
pageq_len(const pageq_t * q)2229d3e3eabSyamt pageq_len(const pageq_t *q)
2239d3e3eabSyamt {
2249d3e3eabSyamt 
2259d3e3eabSyamt 	return q->q_len;
2269d3e3eabSyamt }
2279d3e3eabSyamt 
2289d3e3eabSyamt static struct vm_page *
pageq_first(const pageq_t * q)2299d3e3eabSyamt pageq_first(const pageq_t *q)
2309d3e3eabSyamt {
2319d3e3eabSyamt 
2329d3e3eabSyamt 	return TAILQ_FIRST(&q->q_q);
2339d3e3eabSyamt }
2349d3e3eabSyamt 
2359d3e3eabSyamt static void
pageq_insert_tail(pageq_t * q,struct vm_page * pg)2369d3e3eabSyamt pageq_insert_tail(pageq_t *q, struct vm_page *pg)
2379d3e3eabSyamt {
2389d3e3eabSyamt 
2396c2dc768Sad 	TAILQ_INSERT_TAIL(&q->q_q, pg, pdqueue);
2409d3e3eabSyamt 	q->q_len++;
2419d3e3eabSyamt }
2429d3e3eabSyamt 
243d5e8b5edSbjs #if defined(LISTQ)
2449d3e3eabSyamt static void
pageq_insert_head(pageq_t * q,struct vm_page * pg)2459d3e3eabSyamt pageq_insert_head(pageq_t *q, struct vm_page *pg)
2469d3e3eabSyamt {
2479d3e3eabSyamt 
2486c2dc768Sad 	TAILQ_INSERT_HEAD(&q->q_q, pg, pdqueue);
2499d3e3eabSyamt 	q->q_len++;
2509d3e3eabSyamt }
251d5e8b5edSbjs #endif
2529d3e3eabSyamt 
2539d3e3eabSyamt static void
pageq_remove(pageq_t * q,struct vm_page * pg)2549d3e3eabSyamt pageq_remove(pageq_t *q, struct vm_page *pg)
2559d3e3eabSyamt {
2569d3e3eabSyamt 
2579d3e3eabSyamt #if 1
2589d3e3eabSyamt 	KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q);
2599d3e3eabSyamt #endif
2609d3e3eabSyamt 	KASSERT(q->q_len > 0);
2616c2dc768Sad 	TAILQ_REMOVE(&q->q_q, pg, pdqueue);
2629d3e3eabSyamt 	q->q_len--;
2639d3e3eabSyamt }
2649d3e3eabSyamt 
2659d3e3eabSyamt static struct vm_page *
pageq_remove_head(pageq_t * q)2669d3e3eabSyamt pageq_remove_head(pageq_t *q)
2679d3e3eabSyamt {
2689d3e3eabSyamt 	struct vm_page *pg;
2699d3e3eabSyamt 
2709d3e3eabSyamt 	pg = TAILQ_FIRST(&q->q_q);
2719d3e3eabSyamt 	if (pg == NULL) {
2729d3e3eabSyamt 		KASSERT(q->q_len == 0);
2739d3e3eabSyamt 		return NULL;
2749d3e3eabSyamt 	}
2759d3e3eabSyamt 	pageq_remove(q, pg);
2769d3e3eabSyamt 	return pg;
2779d3e3eabSyamt }
2789d3e3eabSyamt 
2799d3e3eabSyamt /* ---------------------------------------- */
2809d3e3eabSyamt 
2819d3e3eabSyamt static void
clockpro_insert_tail(struct clockpro_state * s,int qidx,struct vm_page * pg)2829d3e3eabSyamt clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg)
2839d3e3eabSyamt {
2849d3e3eabSyamt 	pageq_t *q = clockpro_queue(s, qidx);
2859d3e3eabSyamt 
2869d3e3eabSyamt 	clockpro_setq(pg, qidx);
2879d3e3eabSyamt 	pageq_insert_tail(q, pg);
2889d3e3eabSyamt }
2899d3e3eabSyamt 
290d5e8b5edSbjs #if defined(LISTQ)
291168cd830Schristos static void
clockpro_insert_head(struct clockpro_state * s,int qidx,struct vm_page * pg)2929d3e3eabSyamt clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg)
2939d3e3eabSyamt {
2949d3e3eabSyamt 	pageq_t *q = clockpro_queue(s, qidx);
2959d3e3eabSyamt 
2969d3e3eabSyamt 	clockpro_setq(pg, qidx);
2979d3e3eabSyamt 	pageq_insert_head(q, pg);
2989d3e3eabSyamt }
2999d3e3eabSyamt 
300d5e8b5edSbjs #endif
3019d3e3eabSyamt /* ---------------------------------------- */
3029d3e3eabSyamt 
3039d3e3eabSyamt typedef uint32_t nonres_cookie_t;
3049d3e3eabSyamt #define	NONRES_COOKIE_INVAL	0
3059d3e3eabSyamt 
3069d3e3eabSyamt typedef uintptr_t objid_t;
3079d3e3eabSyamt 
3089d3e3eabSyamt /*
3099d3e3eabSyamt  * XXX maybe these hash functions need reconsideration,
3109d3e3eabSyamt  * given that hash distribution is critical here.
3119d3e3eabSyamt  */
3129d3e3eabSyamt 
3139d3e3eabSyamt static uint32_t
pageidentityhash1(objid_t obj,off_t idx)3149d3e3eabSyamt pageidentityhash1(objid_t obj, off_t idx)
3159d3e3eabSyamt {
3169d3e3eabSyamt 	uint32_t hash = HASH32_BUF_INIT;
3179d3e3eabSyamt 
3189d3e3eabSyamt #if 1
3199d3e3eabSyamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
3209d3e3eabSyamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
3219d3e3eabSyamt #else
3229d3e3eabSyamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
3239d3e3eabSyamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
3249d3e3eabSyamt #endif
3259d3e3eabSyamt 	return hash;
3269d3e3eabSyamt }
3279d3e3eabSyamt 
3289d3e3eabSyamt static uint32_t
pageidentityhash2(objid_t obj,off_t idx)3299d3e3eabSyamt pageidentityhash2(objid_t obj, off_t idx)
3309d3e3eabSyamt {
3319d3e3eabSyamt 	uint32_t hash = HASH32_BUF_INIT;
3329d3e3eabSyamt 
3339d3e3eabSyamt 	hash = hash32_buf(&obj, sizeof(obj), hash);
3349d3e3eabSyamt 	hash = hash32_buf(&idx, sizeof(idx), hash);
3359d3e3eabSyamt 	return hash;
3369d3e3eabSyamt }
3379d3e3eabSyamt 
3389d3e3eabSyamt static nonres_cookie_t
calccookie(objid_t obj,off_t idx)3399d3e3eabSyamt calccookie(objid_t obj, off_t idx)
3409d3e3eabSyamt {
3419d3e3eabSyamt 	uint32_t hash = pageidentityhash2(obj, idx);
3429d3e3eabSyamt 	nonres_cookie_t cookie = hash;
3439d3e3eabSyamt 
3449d3e3eabSyamt 	if (__predict_false(cookie == NONRES_COOKIE_INVAL)) {
3459d3e3eabSyamt 		cookie++; /* XXX */
3469d3e3eabSyamt 	}
3479d3e3eabSyamt 	return cookie;
3489d3e3eabSyamt }
3499d3e3eabSyamt 
3509d3e3eabSyamt #define	BUCKETSIZE	14
3519d3e3eabSyamt struct bucket {
3529d3e3eabSyamt 	int cycle;
3539d3e3eabSyamt 	int cur;
3549d3e3eabSyamt 	nonres_cookie_t pages[BUCKETSIZE];
3559d3e3eabSyamt };
3569d3e3eabSyamt static int cycle_target;
3579d3e3eabSyamt static int cycle_target_frac;
3589d3e3eabSyamt 
3599d3e3eabSyamt static struct bucket static_bucket;
3609d3e3eabSyamt static struct bucket *buckets = &static_bucket;
3619d3e3eabSyamt static size_t hashsize = 1;
3629d3e3eabSyamt 
3639d3e3eabSyamt static int coldadj;
3649d3e3eabSyamt #define	COLDTARGET_ADJ(d)	coldadj += (d)
3659d3e3eabSyamt 
3669d3e3eabSyamt #if defined(PDSIM)
3679d3e3eabSyamt 
3689d3e3eabSyamt static void *
clockpro_hashalloc(int n)3699d3e3eabSyamt clockpro_hashalloc(int n)
3709d3e3eabSyamt {
3719d3e3eabSyamt 	size_t allocsz = sizeof(*buckets) * n;
3729d3e3eabSyamt 
3739d3e3eabSyamt 	return malloc(allocsz);
3749d3e3eabSyamt }
3759d3e3eabSyamt 
3769d3e3eabSyamt static void
clockpro_hashfree(void * p,int n)3779d3e3eabSyamt clockpro_hashfree(void *p, int n)
3789d3e3eabSyamt {
3799d3e3eabSyamt 
3809d3e3eabSyamt 	free(p);
3819d3e3eabSyamt }
3829d3e3eabSyamt 
3839d3e3eabSyamt #else /* defined(PDSIM) */
3849d3e3eabSyamt 
3859d3e3eabSyamt static void *
clockpro_hashalloc(int n)3869d3e3eabSyamt clockpro_hashalloc(int n)
3879d3e3eabSyamt {
3889d3e3eabSyamt 	size_t allocsz = round_page(sizeof(*buckets) * n);
3899d3e3eabSyamt 
3909d3e3eabSyamt 	return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED);
3919d3e3eabSyamt }
3929d3e3eabSyamt 
3939d3e3eabSyamt static void
clockpro_hashfree(void * p,int n)3949d3e3eabSyamt clockpro_hashfree(void *p, int n)
3959d3e3eabSyamt {
3969d3e3eabSyamt 	size_t allocsz = round_page(sizeof(*buckets) * n);
3979d3e3eabSyamt 
3989d3e3eabSyamt 	uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED);
3999d3e3eabSyamt }
4009d3e3eabSyamt 
4019d3e3eabSyamt #endif /* defined(PDSIM) */
4029d3e3eabSyamt 
4039d3e3eabSyamt static void
clockpro_hashinit(uint64_t n)4049d3e3eabSyamt clockpro_hashinit(uint64_t n)
4059d3e3eabSyamt {
4069d3e3eabSyamt 	struct bucket *newbuckets;
4079d3e3eabSyamt 	struct bucket *oldbuckets;
4089d3e3eabSyamt 	size_t sz;
4099d3e3eabSyamt 	size_t oldsz;
4109d3e3eabSyamt 	int i;
4119d3e3eabSyamt 
4129d3e3eabSyamt 	sz = howmany(n, BUCKETSIZE);
4139d3e3eabSyamt 	sz *= clockpro_hashfactor;
4149d3e3eabSyamt 	newbuckets = clockpro_hashalloc(sz);
4159d3e3eabSyamt 	if (newbuckets == NULL) {
4169d3e3eabSyamt 		panic("%s: allocation failure", __func__);
4179d3e3eabSyamt 	}
4189d3e3eabSyamt 	for (i = 0; i < sz; i++) {
4199d3e3eabSyamt 		struct bucket *b = &newbuckets[i];
4209d3e3eabSyamt 		int j;
4219d3e3eabSyamt 
4229d3e3eabSyamt 		b->cycle = cycle_target;
4239d3e3eabSyamt 		b->cur = 0;
4249d3e3eabSyamt 		for (j = 0; j < BUCKETSIZE; j++) {
4259d3e3eabSyamt 			b->pages[j] = NONRES_COOKIE_INVAL;
4269d3e3eabSyamt 		}
4279d3e3eabSyamt 	}
4289d3e3eabSyamt 	/* XXX lock */
4299d3e3eabSyamt 	oldbuckets = buckets;
4309d3e3eabSyamt 	oldsz = hashsize;
4319d3e3eabSyamt 	buckets = newbuckets;
4329d3e3eabSyamt 	hashsize = sz;
4339d3e3eabSyamt 	/* XXX unlock */
4349d3e3eabSyamt 	if (oldbuckets != &static_bucket) {
4359d3e3eabSyamt 		clockpro_hashfree(oldbuckets, oldsz);
4369d3e3eabSyamt 	}
4379d3e3eabSyamt }
4389d3e3eabSyamt 
4399d3e3eabSyamt static struct bucket *
nonresident_getbucket(objid_t obj,off_t idx)4409d3e3eabSyamt nonresident_getbucket(objid_t obj, off_t idx)
4419d3e3eabSyamt {
4429d3e3eabSyamt 	uint32_t hash;
4439d3e3eabSyamt 
4449d3e3eabSyamt 	hash = pageidentityhash1(obj, idx);
4459d3e3eabSyamt 	return &buckets[hash % hashsize];
4469d3e3eabSyamt }
4479d3e3eabSyamt 
4489d3e3eabSyamt static void
nonresident_rotate(struct bucket * b)4499d3e3eabSyamt nonresident_rotate(struct bucket *b)
4509d3e3eabSyamt {
45179c62b78Syamt 	const int target = cycle_target;
45279c62b78Syamt 	const int cycle = b->cycle;
453b0fbe32dSyamt 	int cur;
45479c62b78Syamt 	int todo;
4559d3e3eabSyamt 
45679c62b78Syamt 	todo = target - cycle;
45779c62b78Syamt 	if (todo >= BUCKETSIZE * 2) {
45879c62b78Syamt 		todo = (todo % BUCKETSIZE) + BUCKETSIZE;
45979c62b78Syamt 	}
460b0fbe32dSyamt 	cur = b->cur;
46179c62b78Syamt 	while (todo > 0) {
462b0fbe32dSyamt 		if (b->pages[cur] != NONRES_COOKIE_INVAL) {
4639d3e3eabSyamt 			PDPOL_EVCNT_INCR(nreshandhot);
4649d3e3eabSyamt 			COLDTARGET_ADJ(-1);
4659d3e3eabSyamt 		}
466b0fbe32dSyamt 		b->pages[cur] = NONRES_COOKIE_INVAL;
467b0fbe32dSyamt 		cur++;
468b0fbe32dSyamt 		if (cur == BUCKETSIZE) {
469b0fbe32dSyamt 			cur = 0;
4709d3e3eabSyamt 		}
47179c62b78Syamt 		todo--;
472b0fbe32dSyamt 	}
47379c62b78Syamt 	b->cycle = target;
474b0fbe32dSyamt 	b->cur = cur;
4759d3e3eabSyamt }
4769d3e3eabSyamt 
477712239e3Sthorpej static bool
nonresident_lookupremove(objid_t obj,off_t idx)4789d3e3eabSyamt nonresident_lookupremove(objid_t obj, off_t idx)
4799d3e3eabSyamt {
4809d3e3eabSyamt 	struct bucket *b = nonresident_getbucket(obj, idx);
4819d3e3eabSyamt 	nonres_cookie_t cookie = calccookie(obj, idx);
4829d3e3eabSyamt 	int i;
4839d3e3eabSyamt 
4849d3e3eabSyamt 	nonresident_rotate(b);
4859d3e3eabSyamt 	for (i = 0; i < BUCKETSIZE; i++) {
4869d3e3eabSyamt 		if (b->pages[i] == cookie) {
4879d3e3eabSyamt 			b->pages[i] = NONRES_COOKIE_INVAL;
488b3667adaSthorpej 			return true;
4899d3e3eabSyamt 		}
4909d3e3eabSyamt 	}
491b3667adaSthorpej 	return false;
4929d3e3eabSyamt }
4939d3e3eabSyamt 
4949d3e3eabSyamt static objid_t
pageobj(struct vm_page * pg)4959d3e3eabSyamt pageobj(struct vm_page *pg)
4969d3e3eabSyamt {
4979d3e3eabSyamt 	const void *obj;
4989d3e3eabSyamt 
4999d3e3eabSyamt 	/*
5009d3e3eabSyamt 	 * XXX object pointer is often freed and reused for unrelated object.
5019d3e3eabSyamt 	 * for vnodes, it would be better to use something like
5029d3e3eabSyamt 	 * a hash of fsid/fileid/generation.
5039d3e3eabSyamt 	 */
5049d3e3eabSyamt 
5059d3e3eabSyamt 	obj = pg->uobject;
5069d3e3eabSyamt 	if (obj == NULL) {
5079d3e3eabSyamt 		obj = pg->uanon;
5089d3e3eabSyamt 		KASSERT(obj != NULL);
5099d3e3eabSyamt 	}
5109d3e3eabSyamt 	return (objid_t)obj;
5119d3e3eabSyamt }
5129d3e3eabSyamt 
5139d3e3eabSyamt static off_t
pageidx(struct vm_page * pg)5149d3e3eabSyamt pageidx(struct vm_page *pg)
5159d3e3eabSyamt {
5169d3e3eabSyamt 
5179d3e3eabSyamt 	KASSERT((pg->offset & PAGE_MASK) == 0);
5189d3e3eabSyamt 	return pg->offset >> PAGE_SHIFT;
5199d3e3eabSyamt }
5209d3e3eabSyamt 
521712239e3Sthorpej static bool
nonresident_pagelookupremove(struct vm_page * pg)5229d3e3eabSyamt nonresident_pagelookupremove(struct vm_page *pg)
5239d3e3eabSyamt {
524712239e3Sthorpej 	bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg));
5259d3e3eabSyamt 
526603d434bSyamt 	if (pg->uobject) {
527603d434bSyamt 		PDPOL_EVCNT_INCR(nreslookupobj);
528603d434bSyamt 	} else {
529603d434bSyamt 		PDPOL_EVCNT_INCR(nreslookupanon);
530603d434bSyamt 	}
5319d3e3eabSyamt 	if (found) {
5329d3e3eabSyamt 		if (pg->uobject) {
5339d3e3eabSyamt 			PDPOL_EVCNT_INCR(nresfoundobj);
5349d3e3eabSyamt 		} else {
5359d3e3eabSyamt 			PDPOL_EVCNT_INCR(nresfoundanon);
5369d3e3eabSyamt 		}
5379d3e3eabSyamt 	}
5389d3e3eabSyamt 	return found;
5399d3e3eabSyamt }
5409d3e3eabSyamt 
5419d3e3eabSyamt static void
nonresident_pagerecord(struct vm_page * pg)5429d3e3eabSyamt nonresident_pagerecord(struct vm_page *pg)
5439d3e3eabSyamt {
5449d3e3eabSyamt 	objid_t obj = pageobj(pg);
5459d3e3eabSyamt 	off_t idx = pageidx(pg);
5469d3e3eabSyamt 	struct bucket *b = nonresident_getbucket(obj, idx);
5479d3e3eabSyamt 	nonres_cookie_t cookie = calccookie(obj, idx);
5489d3e3eabSyamt 
5499d3e3eabSyamt #if defined(DEBUG)
5509d3e3eabSyamt 	int i;
5519d3e3eabSyamt 
5529d3e3eabSyamt 	for (i = 0; i < BUCKETSIZE; i++) {
5539d3e3eabSyamt 		if (b->pages[i] == cookie) {
5549d3e3eabSyamt 			PDPOL_EVCNT_INCR(nresconflict);
5559d3e3eabSyamt 		}
5569d3e3eabSyamt 	}
5579d3e3eabSyamt #endif /* defined(DEBUG) */
5589d3e3eabSyamt 
5599d3e3eabSyamt 	if (pg->uobject) {
5609d3e3eabSyamt 		PDPOL_EVCNT_INCR(nresrecordobj);
5619d3e3eabSyamt 	} else {
5629d3e3eabSyamt 		PDPOL_EVCNT_INCR(nresrecordanon);
5639d3e3eabSyamt 	}
5649d3e3eabSyamt 	nonresident_rotate(b);
5659d3e3eabSyamt 	if (b->pages[b->cur] != NONRES_COOKIE_INVAL) {
5669d3e3eabSyamt 		PDPOL_EVCNT_INCR(nresoverwritten);
5679d3e3eabSyamt 		COLDTARGET_ADJ(-1);
5689d3e3eabSyamt 	}
5699d3e3eabSyamt 	b->pages[b->cur] = cookie;
5709d3e3eabSyamt 	b->cur = (b->cur + 1) % BUCKETSIZE;
5719d3e3eabSyamt }
5729d3e3eabSyamt 
5739d3e3eabSyamt /* ---------------------------------------- */
5749d3e3eabSyamt 
5759d3e3eabSyamt #if defined(CLOCKPRO_DEBUG)
5769d3e3eabSyamt static void
check_sanity(void)5779d3e3eabSyamt check_sanity(void)
5789d3e3eabSyamt {
5799d3e3eabSyamt }
5809d3e3eabSyamt #else /* defined(CLOCKPRO_DEBUG) */
5819d3e3eabSyamt #define	check_sanity()	/* nothing */
5829d3e3eabSyamt #endif /* defined(CLOCKPRO_DEBUG) */
5839d3e3eabSyamt 
5849d3e3eabSyamt static void
clockpro_reinit(void)5859d3e3eabSyamt clockpro_reinit(void)
5869d3e3eabSyamt {
5879d3e3eabSyamt 
5885978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
5895978ddc6Sad 
5909d3e3eabSyamt 	clockpro_hashinit(uvmexp.npages);
5919d3e3eabSyamt }
5929d3e3eabSyamt 
5939d3e3eabSyamt static void
clockpro_init(void)5949d3e3eabSyamt clockpro_init(void)
5959d3e3eabSyamt {
5969d3e3eabSyamt 	struct clockpro_state *s = &clockpro;
5979d3e3eabSyamt 	int i;
5989d3e3eabSyamt 
5995978ddc6Sad 	mutex_init(&s->lock, MUTEX_DEFAULT, IPL_NONE);
6009d3e3eabSyamt 	for (i = 0; i < CLOCKPRO_NQUEUE; i++) {
6019d3e3eabSyamt 		pageq_init(&s->s_q[i]);
6029d3e3eabSyamt 	}
6039d3e3eabSyamt 	s->s_newqlenmax = 1;
6049d3e3eabSyamt 	s->s_coldtarget = 1;
6059d3e3eabSyamt 	uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL);
6069d3e3eabSyamt }
6079d3e3eabSyamt 
6089d3e3eabSyamt static void
clockpro_tune(void)6099d3e3eabSyamt clockpro_tune(void)
6109d3e3eabSyamt {
6119d3e3eabSyamt 	struct clockpro_state *s = &clockpro;
6129d3e3eabSyamt 	int coldtarget;
6139d3e3eabSyamt 
6145978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
6155978ddc6Sad 
6169d3e3eabSyamt #if defined(ADAPTIVE)
6179d3e3eabSyamt 	int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100;
6189d3e3eabSyamt 	int coldmin = 1;
6199d3e3eabSyamt 
6209d3e3eabSyamt 	coldtarget = s->s_coldtarget;
6219d3e3eabSyamt 	if (coldtarget + coldadj < coldmin) {
6229d3e3eabSyamt 		coldadj = coldmin - coldtarget;
6239d3e3eabSyamt 	} else if (coldtarget + coldadj > coldmax) {
6249d3e3eabSyamt 		coldadj = coldmax - coldtarget;
6259d3e3eabSyamt 	}
6269d3e3eabSyamt 	coldtarget += coldadj;
6279d3e3eabSyamt #else /* defined(ADAPTIVE) */
6289d3e3eabSyamt 	coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages);
6299d3e3eabSyamt 	if (coldtarget < 1) {
6309d3e3eabSyamt 		coldtarget = 1;
6319d3e3eabSyamt 	}
6329d3e3eabSyamt #endif /* defined(ADAPTIVE) */
6339d3e3eabSyamt 
6349d3e3eabSyamt 	s->s_coldtarget = coldtarget;
6359d3e3eabSyamt 	s->s_newqlenmax = coldtarget / 4;
6369d3e3eabSyamt 	if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) {
6379d3e3eabSyamt 		s->s_newqlenmax = CLOCKPRO_NEWQMIN;
6389d3e3eabSyamt 	}
6399d3e3eabSyamt }
6409d3e3eabSyamt 
6419d3e3eabSyamt static void
clockpro_movereferencebit(struct vm_page * pg,bool locked)64205d8362dSyamt clockpro_movereferencebit(struct vm_page *pg, bool locked)
6439d3e3eabSyamt {
64405d8362dSyamt 	kmutex_t *lock;
645712239e3Sthorpej 	bool referenced;
6469d3e3eabSyamt 
6475978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
648231cabb5Sad 	KASSERT(!locked || uvm_page_owner_locked_p(pg, false));
64905d8362dSyamt 	if (!locked) {
6505978ddc6Sad 		/*
651*06ddeb9fSandvar 		 * acquire interlock to stabilize page identity.
6525978ddc6Sad 		 * if we have caught the page in a state of flux
6535978ddc6Sad 		 * and it should be dequeued, abort.  it will be
6545978ddc6Sad 		 * dequeued later.
6555978ddc6Sad 		 */
6565978ddc6Sad 		mutex_enter(&pg->interlock);
6575978ddc6Sad 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
6585978ddc6Sad 	            pg->wire_count > 0) {
6595978ddc6Sad 	            	mutex_exit(&pg->interlock);
6605978ddc6Sad 			PDPOL_EVCNT_INCR(lockfail);
6615978ddc6Sad 			return;
6625978ddc6Sad 		}
6635978ddc6Sad 		mutex_exit(&clockpro.lock);	/* XXX */
66405d8362dSyamt 		lock = uvmpd_trylockowner(pg);
6655978ddc6Sad 		/* pg->interlock now dropped */
6665978ddc6Sad 		mutex_enter(&clockpro.lock);	/* XXX */
66705d8362dSyamt 		if (lock == NULL) {
66805d8362dSyamt 			/*
66905d8362dSyamt 			 * XXXuvmplock
67005d8362dSyamt 			 */
67105d8362dSyamt 			PDPOL_EVCNT_INCR(lockfail);
67205d8362dSyamt 			return;
67305d8362dSyamt 		}
67405d8362dSyamt 		PDPOL_EVCNT_INCR(locksuccess);
67505d8362dSyamt 	}
6769d3e3eabSyamt 	referenced = pmap_clear_reference(pg);
67705d8362dSyamt 	if (!locked) {
67805d8362dSyamt 		mutex_exit(lock);
67905d8362dSyamt 	}
6809d3e3eabSyamt 	if (referenced) {
6819d3e3eabSyamt 		pg->pqflags |= PQ_REFERENCED;
6829d3e3eabSyamt 	}
6839d3e3eabSyamt }
6849d3e3eabSyamt 
6859d3e3eabSyamt static void
clockpro_clearreferencebit(struct vm_page * pg,bool locked)68605d8362dSyamt clockpro_clearreferencebit(struct vm_page *pg, bool locked)
6879d3e3eabSyamt {
6889d3e3eabSyamt 
6895978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
6905978ddc6Sad 
69105d8362dSyamt 	clockpro_movereferencebit(pg, locked);
6929d3e3eabSyamt 	pg->pqflags &= ~PQ_REFERENCED;
6939d3e3eabSyamt }
6949d3e3eabSyamt 
6959d3e3eabSyamt static void
clockpro___newqrotate(int len)6969d3e3eabSyamt clockpro___newqrotate(int len)
6979d3e3eabSyamt {
6989d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
6999d3e3eabSyamt 	pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ);
7009d3e3eabSyamt 	struct vm_page *pg;
7019d3e3eabSyamt 
7025978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
7035978ddc6Sad 
7049d3e3eabSyamt 	while (pageq_len(newq) > len) {
7059d3e3eabSyamt 		pg = pageq_remove_head(newq);
7069d3e3eabSyamt 		KASSERT(pg != NULL);
7079d3e3eabSyamt 		KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ);
7089d3e3eabSyamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) {
70905d8362dSyamt 			clockpro_clearreferencebit(pg, false);
7109d3e3eabSyamt 			pg->pqflags &= ~PQ_INITIALREF;
7119d3e3eabSyamt 		}
7129d3e3eabSyamt 		/* place at the list head */
7139d3e3eabSyamt 		clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
7149d3e3eabSyamt 	}
7159d3e3eabSyamt }
7169d3e3eabSyamt 
7179d3e3eabSyamt static void
clockpro_newqrotate(void)7189d3e3eabSyamt clockpro_newqrotate(void)
7199d3e3eabSyamt {
7209d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
7219d3e3eabSyamt 
7225978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
7235978ddc6Sad 
7249d3e3eabSyamt 	check_sanity();
7259d3e3eabSyamt 	clockpro___newqrotate(s->s_newqlenmax);
7269d3e3eabSyamt 	check_sanity();
7279d3e3eabSyamt }
7289d3e3eabSyamt 
7299d3e3eabSyamt static void
clockpro_newqflush(int n)7309d3e3eabSyamt clockpro_newqflush(int n)
7319d3e3eabSyamt {
7329d3e3eabSyamt 
7335978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
7345978ddc6Sad 
7359d3e3eabSyamt 	check_sanity();
7369d3e3eabSyamt 	clockpro___newqrotate(n);
7379d3e3eabSyamt 	check_sanity();
7389d3e3eabSyamt }
7399d3e3eabSyamt 
7409d3e3eabSyamt static void
clockpro_newqflushone(void)7419d3e3eabSyamt clockpro_newqflushone(void)
7429d3e3eabSyamt {
7439d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
7449d3e3eabSyamt 
7455978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
7465978ddc6Sad 
7479d3e3eabSyamt 	clockpro_newqflush(
7489d3e3eabSyamt 	    MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0));
7499d3e3eabSyamt }
7509d3e3eabSyamt 
7519d3e3eabSyamt /*
7529d3e3eabSyamt  * our "tail" is called "list-head" in the paper.
7539d3e3eabSyamt  */
7549d3e3eabSyamt 
7559d3e3eabSyamt static void
clockpro___enqueuetail(struct vm_page * pg)7569d3e3eabSyamt clockpro___enqueuetail(struct vm_page *pg)
7579d3e3eabSyamt {
7589d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
7599d3e3eabSyamt 
7605978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
7619d3e3eabSyamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
7629d3e3eabSyamt 
7639d3e3eabSyamt 	check_sanity();
7649d3e3eabSyamt #if !defined(USEONCE2)
7659d3e3eabSyamt 	clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg);
7669d3e3eabSyamt 	clockpro_newqrotate();
7679d3e3eabSyamt #else /* !defined(USEONCE2) */
7689d3e3eabSyamt #if defined(LISTQ)
7699d3e3eabSyamt 	KASSERT((pg->pqflags & PQ_REFERENCED) == 0);
7709d3e3eabSyamt #endif /* defined(LISTQ) */
7719d3e3eabSyamt 	clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg);
7729d3e3eabSyamt #endif /* !defined(USEONCE2) */
7739d3e3eabSyamt 	check_sanity();
7749d3e3eabSyamt }
7759d3e3eabSyamt 
7769d3e3eabSyamt static void
clockpro_pageenqueue(struct vm_page * pg)7779d3e3eabSyamt clockpro_pageenqueue(struct vm_page *pg)
7789d3e3eabSyamt {
7799d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
780712239e3Sthorpej 	bool hot;
781712239e3Sthorpej 	bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */
7829d3e3eabSyamt 
7839d3e3eabSyamt 	KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0);
7845978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
7859d3e3eabSyamt 	check_sanity();
7869d3e3eabSyamt 	KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE);
7879d3e3eabSyamt 	s->s_npages++;
7889d3e3eabSyamt 	pg->pqflags &= ~(PQ_HOT|PQ_TEST);
7899d3e3eabSyamt 	if (speculative) {
790b3667adaSthorpej 		hot = false;
7919d3e3eabSyamt 		PDPOL_EVCNT_INCR(speculativeenqueue);
7929d3e3eabSyamt 	} else {
7939d3e3eabSyamt 		hot = nonresident_pagelookupremove(pg);
7949d3e3eabSyamt 		if (hot) {
7959d3e3eabSyamt 			COLDTARGET_ADJ(1);
7969d3e3eabSyamt 		}
7979d3e3eabSyamt 	}
7989d3e3eabSyamt 
7999d3e3eabSyamt 	/*
8009d3e3eabSyamt 	 * consider mmap'ed file:
8019d3e3eabSyamt 	 *
8029d3e3eabSyamt 	 * - read-ahead enqueues a page.
8039d3e3eabSyamt 	 *
8049d3e3eabSyamt 	 * - on the following read-ahead hit, the fault handler activates it.
8059d3e3eabSyamt 	 *
8069d3e3eabSyamt 	 * - finally, the userland code which caused the above fault
8079d3e3eabSyamt 	 *   actually accesses the page.  it makes its reference bit set.
8089d3e3eabSyamt 	 *
8099d3e3eabSyamt 	 * we want to count the above as a single access, rather than
8109d3e3eabSyamt 	 * three accesses with short reuse distances.
8119d3e3eabSyamt 	 */
8129d3e3eabSyamt 
8139d3e3eabSyamt #if defined(USEONCE2)
8149d3e3eabSyamt 	pg->pqflags &= ~PQ_INITIALREF;
8159d3e3eabSyamt 	if (hot) {
8169d3e3eabSyamt 		pg->pqflags |= PQ_TEST;
8179d3e3eabSyamt 	}
8189d3e3eabSyamt 	s->s_ncold++;
81905d8362dSyamt 	clockpro_clearreferencebit(pg, false);
8209d3e3eabSyamt 	clockpro___enqueuetail(pg);
8219d3e3eabSyamt #else /* defined(USEONCE2) */
8229d3e3eabSyamt 	if (speculative) {
8239d3e3eabSyamt 		s->s_ncold++;
8249d3e3eabSyamt 	} else if (hot) {
8259d3e3eabSyamt 		pg->pqflags |= PQ_HOT;
8269d3e3eabSyamt 	} else {
8279d3e3eabSyamt 		pg->pqflags |= PQ_TEST;
8289d3e3eabSyamt 		s->s_ncold++;
8299d3e3eabSyamt 	}
8309d3e3eabSyamt 	clockpro___enqueuetail(pg);
8319d3e3eabSyamt #endif /* defined(USEONCE2) */
8329d3e3eabSyamt 	KASSERT(s->s_ncold <= s->s_npages);
8339d3e3eabSyamt }
8349d3e3eabSyamt 
8359d3e3eabSyamt static pageq_t *
clockpro_pagequeue(struct vm_page * pg)8369d3e3eabSyamt clockpro_pagequeue(struct vm_page *pg)
8379d3e3eabSyamt {
8389d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
8399d3e3eabSyamt 	int qidx;
8409d3e3eabSyamt 
8415978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
8425978ddc6Sad 
8439d3e3eabSyamt 	qidx = clockpro_getq(pg);
8449d3e3eabSyamt 	KASSERT(qidx != CLOCKPRO_NOQUEUE);
8459d3e3eabSyamt 
8469d3e3eabSyamt 	return clockpro_queue(s, qidx);
8479d3e3eabSyamt }
8489d3e3eabSyamt 
8499d3e3eabSyamt static void
clockpro_pagedequeue(struct vm_page * pg)8509d3e3eabSyamt clockpro_pagedequeue(struct vm_page *pg)
8519d3e3eabSyamt {
8529d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
8539d3e3eabSyamt 	pageq_t *q;
8549d3e3eabSyamt 
8555978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
8565978ddc6Sad 
8579d3e3eabSyamt 	KASSERT(s->s_npages > 0);
8589d3e3eabSyamt 	check_sanity();
8599d3e3eabSyamt 	q = clockpro_pagequeue(pg);
8609d3e3eabSyamt 	pageq_remove(q, pg);
8619d3e3eabSyamt 	check_sanity();
8629d3e3eabSyamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
8639d3e3eabSyamt 	if ((pg->pqflags & PQ_HOT) == 0) {
8649d3e3eabSyamt 		KASSERT(s->s_ncold > 0);
8659d3e3eabSyamt 		s->s_ncold--;
8669d3e3eabSyamt 	}
8679d3e3eabSyamt 	KASSERT(s->s_npages > 0);
8689d3e3eabSyamt 	s->s_npages--;
8699d3e3eabSyamt 	check_sanity();
8709d3e3eabSyamt }
8719d3e3eabSyamt 
8729d3e3eabSyamt static void
clockpro_pagerequeue(struct vm_page * pg)8739d3e3eabSyamt clockpro_pagerequeue(struct vm_page *pg)
8749d3e3eabSyamt {
8759d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
8769d3e3eabSyamt 	int qidx;
8779d3e3eabSyamt 
8785978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
8795978ddc6Sad 
8809d3e3eabSyamt 	qidx = clockpro_getq(pg);
8819d3e3eabSyamt 	KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ);
8829d3e3eabSyamt 	pageq_remove(clockpro_queue(s, qidx), pg);
8839d3e3eabSyamt 	check_sanity();
8849d3e3eabSyamt 	clockpro_setq(pg, CLOCKPRO_NOQUEUE);
8859d3e3eabSyamt 
8869d3e3eabSyamt 	clockpro___enqueuetail(pg);
8879d3e3eabSyamt }
8889d3e3eabSyamt 
8899d3e3eabSyamt static void
handhot_endtest(struct vm_page * pg)8909d3e3eabSyamt handhot_endtest(struct vm_page *pg)
8919d3e3eabSyamt {
8929d3e3eabSyamt 
8935978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
8945978ddc6Sad 
8959d3e3eabSyamt 	KASSERT((pg->pqflags & PQ_HOT) == 0);
8969d3e3eabSyamt 	if ((pg->pqflags & PQ_TEST) != 0) {
8979d3e3eabSyamt 		PDPOL_EVCNT_INCR(hhotcoldtest);
8989d3e3eabSyamt 		COLDTARGET_ADJ(-1);
8999d3e3eabSyamt 		pg->pqflags &= ~PQ_TEST;
9009d3e3eabSyamt 	} else {
9019d3e3eabSyamt 		PDPOL_EVCNT_INCR(hhotcold);
9029d3e3eabSyamt 	}
9039d3e3eabSyamt }
9049d3e3eabSyamt 
9059d3e3eabSyamt static void
handhot_advance(void)9069d3e3eabSyamt handhot_advance(void)
9079d3e3eabSyamt {
9089d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
9099d3e3eabSyamt 	struct vm_page *pg;
9109d3e3eabSyamt 	pageq_t *hotq;
9119d3e3eabSyamt 	int hotqlen;
9129d3e3eabSyamt 
9135978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
9145978ddc6Sad 
9159d3e3eabSyamt 	clockpro_tune();
9169d3e3eabSyamt 
9179d3e3eabSyamt 	dump("hot called");
9189d3e3eabSyamt 	if (s->s_ncold >= s->s_coldtarget) {
9199d3e3eabSyamt 		return;
9209d3e3eabSyamt 	}
9219d3e3eabSyamt 	hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
9229d3e3eabSyamt again:
9239d3e3eabSyamt 	pg = pageq_first(hotq);
9249d3e3eabSyamt 	if (pg == NULL) {
9259d3e3eabSyamt 		DPRINTF("%s: HHOT TAKEOVER\n", __func__);
9269d3e3eabSyamt 		dump("hhottakeover");
9279d3e3eabSyamt 		PDPOL_EVCNT_INCR(hhottakeover);
9289d3e3eabSyamt #if defined(LISTQ)
9299d3e3eabSyamt 		while (/* CONSTCOND */ 1) {
9309d3e3eabSyamt 			pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
9319d3e3eabSyamt 
9329d3e3eabSyamt 			pg = pageq_first(coldq);
9339d3e3eabSyamt 			if (pg == NULL) {
9349d3e3eabSyamt 				clockpro_newqflushone();
9359d3e3eabSyamt 				pg = pageq_first(coldq);
9369d3e3eabSyamt 				if (pg == NULL) {
9379d3e3eabSyamt 					WARN("hhot: no page?\n");
9389d3e3eabSyamt 					return;
9399d3e3eabSyamt 				}
9409d3e3eabSyamt 			}
9419d3e3eabSyamt 			KASSERT(clockpro_pagequeue(pg) == coldq);
9429d3e3eabSyamt 			pageq_remove(coldq, pg);
9439d3e3eabSyamt 			check_sanity();
9449d3e3eabSyamt 			if ((pg->pqflags & PQ_HOT) == 0) {
9459d3e3eabSyamt 				handhot_endtest(pg);
9469d3e3eabSyamt 				clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg);
9479d3e3eabSyamt 			} else {
9489d3e3eabSyamt 				clockpro_insert_head(s, CLOCKPRO_HOTQ, pg);
9499d3e3eabSyamt 				break;
9509d3e3eabSyamt 			}
9519d3e3eabSyamt 		}
9529d3e3eabSyamt #else /* defined(LISTQ) */
9539d3e3eabSyamt 		clockpro_newqflush(0); /* XXX XXX */
9549d3e3eabSyamt 		clockpro_switchqueue();
9559d3e3eabSyamt 		hotq = clockpro_queue(s, CLOCKPRO_HOTQ);
9569d3e3eabSyamt 		goto again;
9579d3e3eabSyamt #endif /* defined(LISTQ) */
9589d3e3eabSyamt 	}
9599d3e3eabSyamt 
9609d3e3eabSyamt 	KASSERT(clockpro_pagequeue(pg) == hotq);
9619d3e3eabSyamt 
9629d3e3eabSyamt 	/*
9639d3e3eabSyamt 	 * terminate test period of nonresident pages by cycling them.
9649d3e3eabSyamt 	 */
9659d3e3eabSyamt 
9669d3e3eabSyamt 	cycle_target_frac += BUCKETSIZE;
9679d3e3eabSyamt 	hotqlen = pageq_len(hotq);
9689d3e3eabSyamt 	while (cycle_target_frac >= hotqlen) {
9699d3e3eabSyamt 		cycle_target++;
9709d3e3eabSyamt 		cycle_target_frac -= hotqlen;
9719d3e3eabSyamt 	}
9729d3e3eabSyamt 
9739d3e3eabSyamt 	if ((pg->pqflags & PQ_HOT) == 0) {
9749d3e3eabSyamt #if defined(LISTQ)
9759d3e3eabSyamt 		panic("cold page in hotq: %p", pg);
9769d3e3eabSyamt #else /* defined(LISTQ) */
9779d3e3eabSyamt 		handhot_endtest(pg);
9789d3e3eabSyamt 		goto next;
9799d3e3eabSyamt #endif /* defined(LISTQ) */
9809d3e3eabSyamt 	}
9819d3e3eabSyamt 	KASSERT((pg->pqflags & PQ_TEST) == 0);
9829d3e3eabSyamt 	KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
9839d3e3eabSyamt 	KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
9849d3e3eabSyamt 
9859d3e3eabSyamt 	/*
9869d3e3eabSyamt 	 * once we met our target,
9879d3e3eabSyamt 	 * stop at a hot page so that no cold pages in test period
9889d3e3eabSyamt 	 * have larger recency than any hot pages.
9899d3e3eabSyamt 	 */
9909d3e3eabSyamt 
9919d3e3eabSyamt 	if (s->s_ncold >= s->s_coldtarget) {
9929d3e3eabSyamt 		dump("hot done");
9939d3e3eabSyamt 		return;
9949d3e3eabSyamt 	}
99505d8362dSyamt 	clockpro_movereferencebit(pg, false);
9969d3e3eabSyamt 	if ((pg->pqflags & PQ_REFERENCED) == 0) {
9979d3e3eabSyamt 		PDPOL_EVCNT_INCR(hhotunref);
9989d3e3eabSyamt 		uvmexp.pddeact++;
9999d3e3eabSyamt 		pg->pqflags &= ~PQ_HOT;
10009d3e3eabSyamt 		clockpro.s_ncold++;
10019d3e3eabSyamt 		KASSERT(s->s_ncold <= s->s_npages);
10029d3e3eabSyamt 	} else {
10039d3e3eabSyamt 		PDPOL_EVCNT_INCR(hhotref);
10049d3e3eabSyamt 	}
10059d3e3eabSyamt 	pg->pqflags &= ~PQ_REFERENCED;
10069d3e3eabSyamt #if !defined(LISTQ)
10079d3e3eabSyamt next:
10089d3e3eabSyamt #endif /* !defined(LISTQ) */
10099d3e3eabSyamt 	clockpro_pagerequeue(pg);
10109d3e3eabSyamt 	dump("hot");
10119d3e3eabSyamt 	goto again;
10129d3e3eabSyamt }
10139d3e3eabSyamt 
10149d3e3eabSyamt static struct vm_page *
handcold_advance(void)10159d3e3eabSyamt handcold_advance(void)
10169d3e3eabSyamt {
10179d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
10189d3e3eabSyamt 	struct vm_page *pg;
10199d3e3eabSyamt 
10205978ddc6Sad 	KASSERT(mutex_owned(&s->lock));
10215978ddc6Sad 
10229d3e3eabSyamt 	for (;;) {
102386004aaeSyamt #if defined(LISTQ)
10249d3e3eabSyamt 		pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ);
102586004aaeSyamt #endif /* defined(LISTQ) */
10269d3e3eabSyamt 		pageq_t *coldq;
10279d3e3eabSyamt 
10289d3e3eabSyamt 		clockpro_newqrotate();
10299d3e3eabSyamt 		handhot_advance();
10309d3e3eabSyamt #if defined(LISTQ)
10319d3e3eabSyamt 		pg = pageq_first(listq);
10329d3e3eabSyamt 		if (pg != NULL) {
10339d3e3eabSyamt 			KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ);
10349d3e3eabSyamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
10359d3e3eabSyamt 			KASSERT((pg->pqflags & PQ_HOT) == 0);
10369d3e3eabSyamt 			KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
10379d3e3eabSyamt 			pageq_remove(listq, pg);
10389d3e3eabSyamt 			check_sanity();
10399d3e3eabSyamt 			clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */
10409d3e3eabSyamt 			goto gotcold;
10419d3e3eabSyamt 		}
10429d3e3eabSyamt #endif /* defined(LISTQ) */
10439d3e3eabSyamt 		check_sanity();
10449d3e3eabSyamt 		coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
10459d3e3eabSyamt 		pg = pageq_first(coldq);
10469d3e3eabSyamt 		if (pg == NULL) {
10479d3e3eabSyamt 			clockpro_newqflushone();
10489d3e3eabSyamt 			pg = pageq_first(coldq);
10499d3e3eabSyamt 		}
10509d3e3eabSyamt 		if (pg == NULL) {
10519d3e3eabSyamt 			DPRINTF("%s: HCOLD TAKEOVER\n", __func__);
10529d3e3eabSyamt 			dump("hcoldtakeover");
10539d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldtakeover);
10549d3e3eabSyamt 			KASSERT(
10559d3e3eabSyamt 			    pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0);
10569d3e3eabSyamt #if defined(LISTQ)
10579d3e3eabSyamt 			KASSERT(
10589d3e3eabSyamt 			    pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0);
10599d3e3eabSyamt #else /* defined(LISTQ) */
10609d3e3eabSyamt 			clockpro_switchqueue();
10619d3e3eabSyamt 			coldq = clockpro_queue(s, CLOCKPRO_COLDQ);
10629d3e3eabSyamt 			pg = pageq_first(coldq);
10639d3e3eabSyamt #endif /* defined(LISTQ) */
10649d3e3eabSyamt 		}
10659d3e3eabSyamt 		if (pg == NULL) {
10669d3e3eabSyamt 			WARN("hcold: no page?\n");
10679d3e3eabSyamt 			return NULL;
10689d3e3eabSyamt 		}
10699d3e3eabSyamt 		KASSERT((pg->pqflags & PQ_INITIALREF) == 0);
10709d3e3eabSyamt 		if ((pg->pqflags & PQ_HOT) != 0) {
10719d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldhot);
10729d3e3eabSyamt 			pageq_remove(coldq, pg);
10739d3e3eabSyamt 			clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg);
10749d3e3eabSyamt 			check_sanity();
10759d3e3eabSyamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
10769d3e3eabSyamt 			uvmexp.pdscans++;
10779d3e3eabSyamt 			continue;
10789d3e3eabSyamt 		}
10799d3e3eabSyamt #if defined(LISTQ)
10809d3e3eabSyamt gotcold:
10819d3e3eabSyamt #endif /* defined(LISTQ) */
10829d3e3eabSyamt 		KASSERT((pg->pqflags & PQ_HOT) == 0);
10839d3e3eabSyamt 		uvmexp.pdscans++;
108405d8362dSyamt 		clockpro_movereferencebit(pg, false);
10859d3e3eabSyamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) {
10869d3e3eabSyamt 			KASSERT((pg->pqflags & PQ_TEST) == 0);
10879d3e3eabSyamt 			if ((pg->pqflags & PQ_REFERENCED) != 0) {
10889d3e3eabSyamt 				PDPOL_EVCNT_INCR(speculativehit2);
10899d3e3eabSyamt 				pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED);
10909d3e3eabSyamt 				clockpro_pagedequeue(pg);
10919d3e3eabSyamt 				clockpro_pageenqueue(pg);
10929d3e3eabSyamt 				continue;
10939d3e3eabSyamt 			}
10949d3e3eabSyamt 			PDPOL_EVCNT_INCR(speculativemiss);
10959d3e3eabSyamt 		}
10969d3e3eabSyamt 		switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) {
10979d3e3eabSyamt 		case PQ_TEST:
10989d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldunreftest);
10999d3e3eabSyamt 			nonresident_pagerecord(pg);
11009d3e3eabSyamt 			goto gotit;
11019d3e3eabSyamt 		case 0:
11029d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldunref);
11039d3e3eabSyamt gotit:
11049d3e3eabSyamt 			KASSERT(s->s_ncold > 0);
11059d3e3eabSyamt 			clockpro_pagerequeue(pg); /* XXX */
11069d3e3eabSyamt 			dump("cold done");
11079d3e3eabSyamt 			/* XXX "pg" is still in queue */
11089d3e3eabSyamt 			handhot_advance();
11099d3e3eabSyamt 			goto done;
11109d3e3eabSyamt 
11119d3e3eabSyamt 		case PQ_REFERENCED|PQ_TEST:
11129d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldreftest);
11139d3e3eabSyamt 			s->s_ncold--;
11149d3e3eabSyamt 			COLDTARGET_ADJ(1);
11159d3e3eabSyamt 			pg->pqflags |= PQ_HOT;
11169d3e3eabSyamt 			pg->pqflags &= ~PQ_TEST;
11179d3e3eabSyamt 			break;
11189d3e3eabSyamt 
11199d3e3eabSyamt 		case PQ_REFERENCED:
11209d3e3eabSyamt 			PDPOL_EVCNT_INCR(hcoldref);
11219d3e3eabSyamt 			pg->pqflags |= PQ_TEST;
11229d3e3eabSyamt 			break;
11239d3e3eabSyamt 		}
11249d3e3eabSyamt 		pg->pqflags &= ~PQ_REFERENCED;
11259d3e3eabSyamt 		uvmexp.pdreact++;
11269d3e3eabSyamt 		/* move to the list head */
11279d3e3eabSyamt 		clockpro_pagerequeue(pg);
11289d3e3eabSyamt 		dump("cold");
11299d3e3eabSyamt 	}
11309d3e3eabSyamt done:;
11319d3e3eabSyamt 	return pg;
11329d3e3eabSyamt }
11339d3e3eabSyamt 
113494843b13Sad static void
uvmpdpol_pageactivate_locked(struct vm_page * pg)113594843b13Sad uvmpdpol_pageactivate_locked(struct vm_page *pg)
11369d3e3eabSyamt {
11379d3e3eabSyamt 
11389d3e3eabSyamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
11399d3e3eabSyamt 		KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0);
11409d3e3eabSyamt 		pg->pqflags |= PQ_INITIALREF;
11419d3e3eabSyamt 		clockpro_pageenqueue(pg);
11429d3e3eabSyamt 	} else if ((pg->pqflags & PQ_SPECULATIVE)) {
11439d3e3eabSyamt 		PDPOL_EVCNT_INCR(speculativehit1);
11449d3e3eabSyamt 		pg->pqflags &= ~PQ_SPECULATIVE;
11459d3e3eabSyamt 		pg->pqflags |= PQ_INITIALREF;
11469d3e3eabSyamt 		clockpro_pagedequeue(pg);
11479d3e3eabSyamt 		clockpro_pageenqueue(pg);
11489d3e3eabSyamt 	}
11499d3e3eabSyamt 	pg->pqflags |= PQ_REFERENCED;
115094843b13Sad }
115194843b13Sad 
115294843b13Sad void
uvmpdpol_pageactivate(struct vm_page * pg)115394843b13Sad uvmpdpol_pageactivate(struct vm_page *pg)
115494843b13Sad {
115594843b13Sad 
115694843b13Sad 	uvmpdpol_set_intent(pg, PQ_INTENT_A);
115794843b13Sad }
115894843b13Sad 
115994843b13Sad static void
uvmpdpol_pagedeactivate_locked(struct vm_page * pg)116094843b13Sad uvmpdpol_pagedeactivate_locked(struct vm_page *pg)
116194843b13Sad {
116294843b13Sad 
116394843b13Sad 	clockpro_clearreferencebit(pg, true);
11649d3e3eabSyamt }
11659d3e3eabSyamt 
11669d3e3eabSyamt void
uvmpdpol_pagedeactivate(struct vm_page * pg)11679d3e3eabSyamt uvmpdpol_pagedeactivate(struct vm_page *pg)
11689d3e3eabSyamt {
11699d3e3eabSyamt 
117094843b13Sad 	uvmpdpol_set_intent(pg, PQ_INTENT_I);
117194843b13Sad }
117294843b13Sad 
117394843b13Sad static void
uvmpdpol_pagedequeue_locked(struct vm_page * pg)117494843b13Sad uvmpdpol_pagedequeue_locked(struct vm_page *pg)
117594843b13Sad {
117694843b13Sad 
117794843b13Sad 	if (!uvmpdpol_pageisqueued_p(pg)) {
117894843b13Sad 		return;
117994843b13Sad 	}
118094843b13Sad 	clockpro_pagedequeue(pg);
118194843b13Sad 	pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
11829d3e3eabSyamt }
11839d3e3eabSyamt 
11849d3e3eabSyamt void
uvmpdpol_pagedequeue(struct vm_page * pg)11859d3e3eabSyamt uvmpdpol_pagedequeue(struct vm_page *pg)
11869d3e3eabSyamt {
11879d3e3eabSyamt 
118894843b13Sad 	uvmpdpol_set_intent(pg, PQ_INTENT_D);
118994843b13Sad }
119094843b13Sad 
119194843b13Sad static void
uvmpdpol_pageenqueue_locked(struct vm_page * pg)119294843b13Sad uvmpdpol_pageenqueue_locked(struct vm_page *pg)
119394843b13Sad {
119494843b13Sad 
119594843b13Sad #if 1
119694843b13Sad 	if (uvmpdpol_pageisqueued_p(pg)) {
11979d3e3eabSyamt 		return;
11989d3e3eabSyamt 	}
119994843b13Sad 	clockpro_clearreferencebit(pg, true);
120094843b13Sad 	pg->pqflags |= PQ_SPECULATIVE;
120194843b13Sad 	clockpro_pageenqueue(pg);
120294843b13Sad #else
120394843b13Sad 	uvmpdpol_pageactivate_locked(pg);
120494843b13Sad #endif
12059d3e3eabSyamt }
12069d3e3eabSyamt 
12079d3e3eabSyamt void
uvmpdpol_pageenqueue(struct vm_page * pg)12089d3e3eabSyamt uvmpdpol_pageenqueue(struct vm_page *pg)
12099d3e3eabSyamt {
12109d3e3eabSyamt 
121194843b13Sad 	uvmpdpol_set_intent(pg, PQ_INTENT_D);
121294843b13Sad }
121394843b13Sad 
121494843b13Sad static bool
uvmpdpol_pagerealize_locked(struct vm_page * pg)121594843b13Sad uvmpdpol_pagerealize_locked(struct vm_page *pg)
121694843b13Sad {
121794843b13Sad 	uint32_t pqflags;
121894843b13Sad 
121994843b13Sad 	KASSERT(mutex_owned(&clockpro.lock));
122094843b13Sad 	KASSERT(mutex_owned(&pg->interlock));
122194843b13Sad 
122294843b13Sad 	/* XXX this needs to be called from elsewhere, like uvmpdpol_clock. */
122394843b13Sad 
122494843b13Sad 	pqflags = pg->pqflags;
122594843b13Sad 	pq->pqflags &= ~(PQ_INTENT_SET | PQ_INTENT_QUEUED);
122694843b13Sad 	switch (pqflags & (PQ_INTENT_MASK | PQ_INTENT_SET)) {
122794843b13Sad 	case PQ_INTENT_A | PQ_INTENT_SET:
122894843b13Sad 		uvmpdpol_pageactivate_locked(pg);
122994843b13Sad 		return true;
123094843b13Sad 	case PQ_INTENT_E | PQ_INTENT_SET:
123194843b13Sad 		uvmpdpol_pageenqueue_locked(pg);
123294843b13Sad 		return true;
123394843b13Sad 	case PQ_INTENT_I | PQ_INTENT_SET:
123494843b13Sad 		uvmpdpol_pagedeactivate_locked(pg);
123594843b13Sad 		return true;
123694843b13Sad 	case PQ_INTENT_D | PQ_INTENT_SET:
123794843b13Sad 		uvmpdpol_pagedequeue_locked(pg);
123894843b13Sad 		return true;
123994843b13Sad 	default:
124094843b13Sad 		return false;
124194843b13Sad 	}
124294843b13Sad }
124394843b13Sad 
124494843b13Sad void
uvmpdpol_pagerealize(struct vm_page * pg)124594843b13Sad uvmpdpol_pagerealize(struct vm_page *pg)
124694843b13Sad {
12475978ddc6Sad 	struct clockpro_state * const s = &clockpro;
12485978ddc6Sad 
12495978ddc6Sad 	mutex_enter(&s->lock);
125094843b13Sad 	uvmpdpol_pagerealize_locked(pg);
12515978ddc6Sad 	mutex_exit(&s->lock);
12529d3e3eabSyamt }
12539d3e3eabSyamt 
12549d3e3eabSyamt void
uvmpdpol_anfree(struct vm_anon * an)12559d3e3eabSyamt uvmpdpol_anfree(struct vm_anon *an)
12569d3e3eabSyamt {
12575978ddc6Sad 	struct clockpro_state * const s = &clockpro;
12589d3e3eabSyamt 
12599d3e3eabSyamt 	KASSERT(an->an_page == NULL);
12605978ddc6Sad 	mutex_enter(&s->lock);
12619d3e3eabSyamt 	if (nonresident_lookupremove((objid_t)an, 0)) {
12629d3e3eabSyamt 		PDPOL_EVCNT_INCR(nresanonfree);
12639d3e3eabSyamt 	}
12645978ddc6Sad 	mutex_exit(&s->lock);
12659d3e3eabSyamt }
12669d3e3eabSyamt 
12679d3e3eabSyamt void
uvmpdpol_init(void)12689d3e3eabSyamt uvmpdpol_init(void)
12699d3e3eabSyamt {
12709d3e3eabSyamt 
12719d3e3eabSyamt 	clockpro_init();
12729d3e3eabSyamt }
12739d3e3eabSyamt 
12749d3e3eabSyamt void
uvmpdpol_reinit(void)12759d3e3eabSyamt uvmpdpol_reinit(void)
12769d3e3eabSyamt {
12775978ddc6Sad 	struct clockpro_state * const s = &clockpro;
12789d3e3eabSyamt 
12795978ddc6Sad 	mutex_enter(&s->lock);
12809d3e3eabSyamt 	clockpro_reinit();
12815978ddc6Sad 	mutex_exit(&s->lock);
12829d3e3eabSyamt }
12839d3e3eabSyamt 
12849d3e3eabSyamt void
uvmpdpol_estimatepageable(int * active,int * inactive)12859d3e3eabSyamt uvmpdpol_estimatepageable(int *active, int *inactive)
12869d3e3eabSyamt {
12879d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
12889d3e3eabSyamt 
1289da84a45cSad 	/*
1290da84a45cSad 	 * Don't take any locks here.  This can be called from DDB, and in
1291da84a45cSad 	 * any case the numbers are stale the instant the lock is dropped,
1292da84a45cSad 	 * so it just doesn't matter.
1293da84a45cSad 	 */
12949d3e3eabSyamt 	if (active) {
12959d3e3eabSyamt 		*active = s->s_npages - s->s_ncold;
12969d3e3eabSyamt 	}
12979d3e3eabSyamt 	if (inactive) {
12989d3e3eabSyamt 		*inactive = s->s_ncold;
12999d3e3eabSyamt 	}
13009d3e3eabSyamt }
13019d3e3eabSyamt 
1302712239e3Sthorpej bool
uvmpdpol_pageisqueued_p(struct vm_page * pg)13039d3e3eabSyamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
13049d3e3eabSyamt {
13059d3e3eabSyamt 
13065978ddc6Sad 	/* Unlocked check OK due to page lifecycle. */
13079d3e3eabSyamt 	return clockpro_getq(pg) != CLOCKPRO_NOQUEUE;
13089d3e3eabSyamt }
13099d3e3eabSyamt 
1310ff872804Sad bool
uvmpdpol_pageactivate_p(struct vm_page * pg)1311ff872804Sad uvmpdpol_pageactivate_p(struct vm_page *pg)
1312ff872804Sad {
1313ff872804Sad 
1314ff872804Sad 	/* For now, no heuristic, always receive activations. */
1315ff872804Sad 	return true;
1316ff872804Sad }
1317ff872804Sad 
13189d3e3eabSyamt void
uvmpdpol_scaninit(void)13199d3e3eabSyamt uvmpdpol_scaninit(void)
13209d3e3eabSyamt {
13215978ddc6Sad 	struct clockpro_state * const s = &clockpro;
13229d3e3eabSyamt 	struct clockpro_scanstate * const ss = &scanstate;
13239d3e3eabSyamt 
13245978ddc6Sad 	mutex_enter(&s->lock);
13259d3e3eabSyamt 	ss->ss_nscanned = 0;
13265978ddc6Sad 	mutex_exit(&s->lock);
13279d3e3eabSyamt }
13289d3e3eabSyamt 
13299344a595Sad void
uvmpdpol_scanfini(void)13309344a595Sad uvmpdpol_scanfini(void)
13319344a595Sad {
13329344a595Sad 
13339344a595Sad }
13349344a595Sad 
13359d3e3eabSyamt struct vm_page *
uvmpdpol_selectvictim(kmutex_t ** plock)13365978ddc6Sad uvmpdpol_selectvictim(kmutex_t **plock)
13379d3e3eabSyamt {
13389d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
13399d3e3eabSyamt 	struct clockpro_scanstate * const ss = &scanstate;
13409d3e3eabSyamt 	struct vm_page *pg;
13415978ddc6Sad 	kmutex_t *lock = NULL;
13429d3e3eabSyamt 
13435978ddc6Sad 	do {
13445978ddc6Sad 		mutex_enter(&s->lock);
13459d3e3eabSyamt 		if (ss->ss_nscanned > s->s_npages) {
13469d3e3eabSyamt 			DPRINTF("scan too much\n");
13475978ddc6Sad 			mutex_exit(&s->lock);
13489d3e3eabSyamt 			return NULL;
13499d3e3eabSyamt 		}
13509d3e3eabSyamt 		pg = handcold_advance();
13515978ddc6Sad 		if (pg == NULL) {
13525978ddc6Sad 			mutex_exit(&s->lock);
13535978ddc6Sad 			break;
13545978ddc6Sad 		}
13559d3e3eabSyamt 		ss->ss_nscanned++;
13565978ddc6Sad 		/*
1357*06ddeb9fSandvar 		 * acquire interlock to stabilize page identity.
13585978ddc6Sad 		 * if we have caught the page in a state of flux
13595978ddc6Sad 		 * and it should be dequeued, do it now and then
13605978ddc6Sad 		 * move on to the next.
13615978ddc6Sad 		 */
13625978ddc6Sad 		mutex_enter(&pg->interlock);
13635978ddc6Sad 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
13645978ddc6Sad 	            pg->wire_count > 0) {
13655978ddc6Sad 	            	mutex_exit(&pg->interlock);
13665978ddc6Sad 			clockpro_pagedequeue(pg);
13675978ddc6Sad 			pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE);
13685978ddc6Sad 	            	continue;
13695978ddc6Sad 		}
13705978ddc6Sad 		mutex_exit(&s->lock);
13715978ddc6Sad 		lock = uvmpd_trylockowner(pg);
13725978ddc6Sad 		/* pg->interlock now dropped */
13735978ddc6Sad 	} while (lock == NULL);
13745978ddc6Sad 	*plock = lock;
13759d3e3eabSyamt 	return pg;
13769d3e3eabSyamt }
13779d3e3eabSyamt 
13789d3e3eabSyamt static void
clockpro_dropswap(pageq_t * q,int * todo)13799d3e3eabSyamt clockpro_dropswap(pageq_t *q, int *todo)
13809d3e3eabSyamt {
13819d3e3eabSyamt 	struct vm_page *pg;
13829344a595Sad 	kmutex_t *lock;
13839d3e3eabSyamt 
13845978ddc6Sad 	KASSERT(mutex_owned(&clockpro.lock));
13855978ddc6Sad 
13866c2dc768Sad 	TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pdqueue) {
13879d3e3eabSyamt 		if (*todo <= 0) {
13889d3e3eabSyamt 			break;
13899d3e3eabSyamt 		}
13909d3e3eabSyamt 		if ((pg->pqflags & PQ_HOT) == 0) {
13919d3e3eabSyamt 			continue;
13929d3e3eabSyamt 		}
13935978ddc6Sad 		mutex_enter(&pg->interlock);
13945978ddc6Sad 		if ((pg->flags & PG_SWAPBACKED) == 0) {
13955978ddc6Sad 			mutex_exit(&pg->interlock);
13969d3e3eabSyamt 			continue;
13979d3e3eabSyamt 		}
13989344a595Sad 
13999344a595Sad 		/*
14009344a595Sad 		 * try to lock the object that owns the page.
14019344a595Sad 	         */
14029344a595Sad 	        mutex_exit(&clockpro.lock);
14039344a595Sad         	lock = uvmpd_trylockowner(pg);
14049344a595Sad         	/* pg->interlock now released */
14059344a595Sad         	mutex_enter(&clockpro.lock);
14069344a595Sad 		if (lock == NULL) {
14079344a595Sad 			/* didn't get it - try the next page. */
14089344a595Sad 			/* XXXAD lost position in queue */
14099344a595Sad 			continue;
14109344a595Sad 		}
14119344a595Sad 
14129344a595Sad 		/*
14139344a595Sad 		 * if there's a shortage of swap slots, try to free it.
14149344a595Sad 		 */
14159344a595Sad 		if ((pg->flags & PG_SWAPBACKED) != 0 &&
14169344a595Sad 		    (pg->flags & PG_BUSY) == 0) {
14179344a595Sad 			if (uvmpd_dropswap(pg)) {
14189d3e3eabSyamt 				(*todo)--;
14199d3e3eabSyamt 			}
14209344a595Sad 		}
14219344a595Sad 		mutex_exit(lock);
14229d3e3eabSyamt 	}
14239d3e3eabSyamt }
14249d3e3eabSyamt 
14259d3e3eabSyamt void
uvmpdpol_balancequeue(int swap_shortage)14269d3e3eabSyamt uvmpdpol_balancequeue(int swap_shortage)
14279d3e3eabSyamt {
14289d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
14299d3e3eabSyamt 	int todo = swap_shortage;
14309d3e3eabSyamt 
14319d3e3eabSyamt 	if (todo == 0) {
14329d3e3eabSyamt 		return;
14339d3e3eabSyamt 	}
14349d3e3eabSyamt 
14359d3e3eabSyamt 	/*
14369d3e3eabSyamt 	 * reclaim swap slots from hot pages
14379d3e3eabSyamt 	 */
14389d3e3eabSyamt 
14399d3e3eabSyamt 	DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage);
14409d3e3eabSyamt 
14415978ddc6Sad 	mutex_enter(&s->lock);
14429d3e3eabSyamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo);
14439d3e3eabSyamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo);
14449d3e3eabSyamt 	clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo);
14455978ddc6Sad 	mutex_exit(&s->lock);
14469d3e3eabSyamt 
14479d3e3eabSyamt 	DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo);
14489d3e3eabSyamt }
14499d3e3eabSyamt 
1450712239e3Sthorpej bool
uvmpdpol_needsscan_p(void)14519d3e3eabSyamt uvmpdpol_needsscan_p(void)
14529d3e3eabSyamt {
14539d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
14549d3e3eabSyamt 
14555978ddc6Sad 	/* This must be an unlocked check: can be called from interrupt. */
14565978ddc6Sad 	return s->s_ncold < s->s_coldtarget;
14579d3e3eabSyamt }
14589d3e3eabSyamt 
14599d3e3eabSyamt void
uvmpdpol_tune(void)14609d3e3eabSyamt uvmpdpol_tune(void)
14619d3e3eabSyamt {
14625978ddc6Sad 	struct clockpro_state * const s = &clockpro;
14639d3e3eabSyamt 
14645978ddc6Sad 	mutex_enter(&s->lock);
14659d3e3eabSyamt 	clockpro_tune();
14665978ddc6Sad 	mutex_exit(&s->lock);
14679d3e3eabSyamt }
14689d3e3eabSyamt 
146994843b13Sad void
uvmpdpol_idle(void)147094843b13Sad uvmpdpol_idle(void)
147194843b13Sad {
147294843b13Sad 
147394843b13Sad }
147494843b13Sad 
14759d3e3eabSyamt #if !defined(PDSIM)
14769d3e3eabSyamt 
14779d3e3eabSyamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
14789d3e3eabSyamt 
14799d3e3eabSyamt void
uvmpdpol_sysctlsetup(void)14809d3e3eabSyamt uvmpdpol_sysctlsetup(void)
14819d3e3eabSyamt {
14829d3e3eabSyamt #if !defined(ADAPTIVE)
14839d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
14849d3e3eabSyamt 
14859d3e3eabSyamt 	uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct",
14869d3e3eabSyamt 	    SYSCTL_DESCR("Percentage cold target queue of the entire queue"));
14879d3e3eabSyamt #endif /* !defined(ADAPTIVE) */
14889d3e3eabSyamt }
14899d3e3eabSyamt 
14909d3e3eabSyamt #endif /* !defined(PDSIM) */
14919d3e3eabSyamt 
14929d3e3eabSyamt #if defined(DDB)
14939d3e3eabSyamt 
149405d8362dSyamt #if 0 /* XXXuvmplock */
149505d8362dSyamt #define	_pmap_is_referenced(pg)	pmap_is_referenced(pg)
149605d8362dSyamt #else
149705d8362dSyamt #define	_pmap_is_referenced(pg)	false
149805d8362dSyamt #endif
149905d8362dSyamt 
15009d3e3eabSyamt void clockpro_dump(void);
15019d3e3eabSyamt 
15029d3e3eabSyamt void
clockpro_dump(void)15039d3e3eabSyamt clockpro_dump(void)
15049d3e3eabSyamt {
15059d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
15069d3e3eabSyamt 
15079d3e3eabSyamt 	struct vm_page *pg;
15089d3e3eabSyamt 	int ncold, nhot, ntest, nspeculative, ninitialref, nref;
15099d3e3eabSyamt 	int newqlen, coldqlen, hotqlen, listqlen;
15109d3e3eabSyamt 
15119d3e3eabSyamt 	newqlen = coldqlen = hotqlen = listqlen = 0;
15129d3e3eabSyamt 	printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n",
15139d3e3eabSyamt 	    s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax);
15149d3e3eabSyamt 
15159d3e3eabSyamt #define	INITCOUNT()	\
15169d3e3eabSyamt 	ncold = nhot = ntest = nspeculative = ninitialref = nref = 0
15179d3e3eabSyamt 
15189d3e3eabSyamt #define	COUNT(pg)	\
15199d3e3eabSyamt 	if ((pg->pqflags & PQ_HOT) != 0) { \
15209d3e3eabSyamt 		nhot++; \
15219d3e3eabSyamt 	} else { \
15229d3e3eabSyamt 		ncold++; \
15239d3e3eabSyamt 		if ((pg->pqflags & PQ_TEST) != 0) { \
15249d3e3eabSyamt 			ntest++; \
15259d3e3eabSyamt 		} \
15269d3e3eabSyamt 		if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \
15279d3e3eabSyamt 			nspeculative++; \
15289d3e3eabSyamt 		} \
15299d3e3eabSyamt 		if ((pg->pqflags & PQ_INITIALREF) != 0) { \
15309d3e3eabSyamt 			ninitialref++; \
15319d3e3eabSyamt 		} else if ((pg->pqflags & PQ_REFERENCED) != 0 || \
153205d8362dSyamt 		    _pmap_is_referenced(pg)) { \
15339d3e3eabSyamt 			nref++; \
15349d3e3eabSyamt 		} \
15359d3e3eabSyamt 	}
15369d3e3eabSyamt 
15379d3e3eabSyamt #define	PRINTCOUNT(name)	\
15389d3e3eabSyamt 	printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \
15399d3e3eabSyamt 	    "nref=%d\n", \
15409d3e3eabSyamt 	    (name), nhot, ncold, ntest, nspeculative, ninitialref, nref)
15419d3e3eabSyamt 
15429d3e3eabSyamt 	INITCOUNT();
15436c2dc768Sad 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pdqueue) {
15449d3e3eabSyamt 		if (clockpro_getq(pg) != CLOCKPRO_NEWQ) {
15459d3e3eabSyamt 			printf("newq corrupt %p\n", pg);
15469d3e3eabSyamt 		}
15479d3e3eabSyamt 		COUNT(pg)
15489d3e3eabSyamt 		newqlen++;
15499d3e3eabSyamt 	}
15509d3e3eabSyamt 	PRINTCOUNT("newq");
15519d3e3eabSyamt 
15529d3e3eabSyamt 	INITCOUNT();
15536c2dc768Sad 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pdqueue) {
15549d3e3eabSyamt 		if (clockpro_getq(pg) != CLOCKPRO_COLDQ) {
15559d3e3eabSyamt 			printf("coldq corrupt %p\n", pg);
15569d3e3eabSyamt 		}
15579d3e3eabSyamt 		COUNT(pg)
15589d3e3eabSyamt 		coldqlen++;
15599d3e3eabSyamt 	}
15609d3e3eabSyamt 	PRINTCOUNT("coldq");
15619d3e3eabSyamt 
15629d3e3eabSyamt 	INITCOUNT();
15636c2dc768Sad 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pdqueue) {
15649d3e3eabSyamt 		if (clockpro_getq(pg) != CLOCKPRO_HOTQ) {
15659d3e3eabSyamt 			printf("hotq corrupt %p\n", pg);
15669d3e3eabSyamt 		}
15679d3e3eabSyamt #if defined(LISTQ)
15689d3e3eabSyamt 		if ((pg->pqflags & PQ_HOT) == 0) {
15699d3e3eabSyamt 			printf("cold page in hotq: %p\n", pg);
15709d3e3eabSyamt 		}
15719d3e3eabSyamt #endif /* defined(LISTQ) */
15729d3e3eabSyamt 		COUNT(pg)
15739d3e3eabSyamt 		hotqlen++;
15749d3e3eabSyamt 	}
15759d3e3eabSyamt 	PRINTCOUNT("hotq");
15769d3e3eabSyamt 
15779d3e3eabSyamt 	INITCOUNT();
15786c2dc768Sad 	TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pdqueue) {
15799d3e3eabSyamt #if !defined(LISTQ)
1580d5e8b5edSbjs 		printf("listq %p\n", pg);
15819d3e3eabSyamt #endif /* !defined(LISTQ) */
15829d3e3eabSyamt 		if (clockpro_getq(pg) != CLOCKPRO_LISTQ) {
15839d3e3eabSyamt 			printf("listq corrupt %p\n", pg);
15849d3e3eabSyamt 		}
15859d3e3eabSyamt 		COUNT(pg)
15869d3e3eabSyamt 		listqlen++;
15879d3e3eabSyamt 	}
15889d3e3eabSyamt 	PRINTCOUNT("listq");
15899d3e3eabSyamt 
15909d3e3eabSyamt 	printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n",
15919d3e3eabSyamt 	    newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)),
15929d3e3eabSyamt 	    coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)),
15939d3e3eabSyamt 	    hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)),
15949d3e3eabSyamt 	    listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ)));
15959d3e3eabSyamt }
15969d3e3eabSyamt 
15979d3e3eabSyamt #endif /* defined(DDB) */
15989d3e3eabSyamt 
15999d3e3eabSyamt #if defined(PDSIM)
160086004aaeSyamt #if defined(DEBUG)
16019d3e3eabSyamt static void
pdsim_dumpq(int qidx)16029d3e3eabSyamt pdsim_dumpq(int qidx)
16039d3e3eabSyamt {
16049d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
16059d3e3eabSyamt 	pageq_t *q = clockpro_queue(s, qidx);
16069d3e3eabSyamt 	struct vm_page *pg;
16079d3e3eabSyamt 
16086c2dc768Sad 	TAILQ_FOREACH(pg, &q->q_q, pdqueue) {
16099d3e3eabSyamt 		DPRINTF(" %" PRIu64 "%s%s%s%s%s%s",
16109d3e3eabSyamt 		    pg->offset >> PAGE_SHIFT,
16119d3e3eabSyamt 		    (pg->pqflags & PQ_HOT) ? "H" : "",
16129d3e3eabSyamt 		    (pg->pqflags & PQ_TEST) ? "T" : "",
16139d3e3eabSyamt 		    (pg->pqflags & PQ_REFERENCED) ? "R" : "",
161405d8362dSyamt 		    _pmap_is_referenced(pg) ? "r" : "",
16159d3e3eabSyamt 		    (pg->pqflags & PQ_INITIALREF) ? "I" : "",
16169d3e3eabSyamt 		    (pg->pqflags & PQ_SPECULATIVE) ? "S" : ""
16179d3e3eabSyamt 		    );
16189d3e3eabSyamt 	}
16199d3e3eabSyamt }
162086004aaeSyamt #endif /* defined(DEBUG) */
16219d3e3eabSyamt 
16229d3e3eabSyamt void
pdsim_dump(const char * id)16239d3e3eabSyamt pdsim_dump(const char *id)
16249d3e3eabSyamt {
16259d3e3eabSyamt #if defined(DEBUG)
16269d3e3eabSyamt 	struct clockpro_state * const s = &clockpro;
16279d3e3eabSyamt 
16289d3e3eabSyamt 	DPRINTF("  %s L(", id);
16299d3e3eabSyamt 	pdsim_dumpq(CLOCKPRO_LISTQ);
16309d3e3eabSyamt 	DPRINTF(" ) H(");
16319d3e3eabSyamt 	pdsim_dumpq(CLOCKPRO_HOTQ);
16329d3e3eabSyamt 	DPRINTF(" ) C(");
16339d3e3eabSyamt 	pdsim_dumpq(CLOCKPRO_COLDQ);
16349d3e3eabSyamt 	DPRINTF(" ) N(");
16359d3e3eabSyamt 	pdsim_dumpq(CLOCKPRO_NEWQ);
16369d3e3eabSyamt 	DPRINTF(" ) ncold=%d/%d, coldadj=%d\n",
16379d3e3eabSyamt 	    s->s_ncold, s->s_coldtarget, coldadj);
16389d3e3eabSyamt #endif /* defined(DEBUG) */
16399d3e3eabSyamt }
16409d3e3eabSyamt #endif /* defined(PDSIM) */
1641