1 /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.17 2011/06/20 23:18:58 yamt Exp $ */ 2 3 /*- 4 * Copyright (c)2005, 2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * CLOCK-Pro replacement policy: 31 * http://www.cs.wm.edu/hpcs/WWW/HTML/publications/abs05-3.html 32 * 33 * approximation of the list of non-resident pages using hash: 34 * http://linux-mm.org/ClockProApproximation 35 */ 36 37 /* #define CLOCKPRO_DEBUG */ 38 39 #if defined(PDSIM) 40 41 #include "pdsim.h" 42 43 #else /* defined(PDSIM) */ 44 45 #include <sys/cdefs.h> 46 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.17 2011/06/20 23:18:58 yamt Exp $"); 47 48 #include "opt_ddb.h" 49 50 #include <sys/param.h> 51 #include <sys/proc.h> 52 #include <sys/systm.h> 53 #include <sys/kernel.h> 54 #include <sys/hash.h> 55 56 #include <uvm/uvm.h> 57 #include <uvm/uvm_pdaemon.h> /* for uvmpd_trylockowner */ 58 #include <uvm/uvm_pdpolicy.h> 59 #include <uvm/uvm_pdpolicy_impl.h> 60 61 #if ((__STDC_VERSION__ - 0) >= 199901L) 62 #define DPRINTF(...) /* nothing */ 63 #define WARN(...) printf(__VA_ARGS__) 64 #else /* ((__STDC_VERSION__ - 0) >= 199901L) */ 65 #define DPRINTF(a...) /* nothing */ /* GCC */ 66 #define WARN(a...) printf(a) 67 #endif /* ((__STDC_VERSION__ - 0) >= 199901L) */ 68 69 #define dump(a) /* nothing */ 70 71 #undef USEONCE2 72 #define LISTQ 73 #undef ADAPTIVE 74 75 #endif /* defined(PDSIM) */ 76 77 #if !defined(CLOCKPRO_COLDPCT) 78 #define CLOCKPRO_COLDPCT 10 79 #endif /* !defined(CLOCKPRO_COLDPCT) */ 80 81 #define CLOCKPRO_COLDPCTMAX 90 82 83 #if !defined(CLOCKPRO_HASHFACTOR) 84 #define CLOCKPRO_HASHFACTOR 2 85 #endif /* !defined(CLOCKPRO_HASHFACTOR) */ 86 87 #define CLOCKPRO_NEWQMIN ((1024 * 1024) >> PAGE_SHIFT) /* XXX */ 88 89 int clockpro_hashfactor = CLOCKPRO_HASHFACTOR; 90 91 PDPOL_EVCNT_DEFINE(nresrecordobj) 92 PDPOL_EVCNT_DEFINE(nresrecordanon) 93 PDPOL_EVCNT_DEFINE(nreslookupobj) 94 PDPOL_EVCNT_DEFINE(nreslookupanon) 95 PDPOL_EVCNT_DEFINE(nresfoundobj) 96 PDPOL_EVCNT_DEFINE(nresfoundanon) 97 PDPOL_EVCNT_DEFINE(nresanonfree) 98 PDPOL_EVCNT_DEFINE(nresconflict) 99 PDPOL_EVCNT_DEFINE(nresoverwritten) 100 PDPOL_EVCNT_DEFINE(nreshandhot) 101 102 PDPOL_EVCNT_DEFINE(hhottakeover) 103 PDPOL_EVCNT_DEFINE(hhotref) 104 PDPOL_EVCNT_DEFINE(hhotunref) 105 PDPOL_EVCNT_DEFINE(hhotcold) 106 PDPOL_EVCNT_DEFINE(hhotcoldtest) 107 108 PDPOL_EVCNT_DEFINE(hcoldtakeover) 109 PDPOL_EVCNT_DEFINE(hcoldref) 110 PDPOL_EVCNT_DEFINE(hcoldunref) 111 PDPOL_EVCNT_DEFINE(hcoldreftest) 112 PDPOL_EVCNT_DEFINE(hcoldunreftest) 113 PDPOL_EVCNT_DEFINE(hcoldunreftestspeculative) 114 PDPOL_EVCNT_DEFINE(hcoldhot) 115 116 PDPOL_EVCNT_DEFINE(speculativeenqueue) 117 PDPOL_EVCNT_DEFINE(speculativehit1) 118 PDPOL_EVCNT_DEFINE(speculativehit2) 119 PDPOL_EVCNT_DEFINE(speculativemiss) 120 121 PDPOL_EVCNT_DEFINE(locksuccess) 122 PDPOL_EVCNT_DEFINE(lockfail) 123 124 #define PQ_REFERENCED PQ_PRIVATE1 125 #define PQ_HOT PQ_PRIVATE2 126 #define PQ_TEST PQ_PRIVATE3 127 #define PQ_INITIALREF PQ_PRIVATE4 128 #if PQ_PRIVATE6 != PQ_PRIVATE5 * 2 || PQ_PRIVATE7 != PQ_PRIVATE6 * 2 129 #error PQ_PRIVATE 130 #endif 131 #define PQ_QMASK (PQ_PRIVATE5|PQ_PRIVATE6|PQ_PRIVATE7) 132 #define PQ_QFACTOR PQ_PRIVATE5 133 #define PQ_SPECULATIVE PQ_PRIVATE8 134 135 #define CLOCKPRO_NOQUEUE 0 136 #define CLOCKPRO_NEWQ 1 /* small queue to clear initial ref. */ 137 #if defined(LISTQ) 138 #define CLOCKPRO_COLDQ 2 139 #define CLOCKPRO_HOTQ 3 140 #else /* defined(LISTQ) */ 141 #define CLOCKPRO_COLDQ (2 + coldqidx) /* XXX */ 142 #define CLOCKPRO_HOTQ (3 - coldqidx) /* XXX */ 143 #endif /* defined(LISTQ) */ 144 #define CLOCKPRO_LISTQ 4 145 #define CLOCKPRO_NQUEUE 4 146 147 static inline void 148 clockpro_setq(struct vm_page *pg, int qidx) 149 { 150 KASSERT(qidx >= CLOCKPRO_NOQUEUE); 151 KASSERT(qidx <= CLOCKPRO_NQUEUE); 152 153 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR); 154 } 155 156 static inline int 157 clockpro_getq(struct vm_page *pg) 158 { 159 int qidx; 160 161 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR; 162 KASSERT(qidx >= CLOCKPRO_NOQUEUE); 163 KASSERT(qidx <= CLOCKPRO_NQUEUE); 164 return qidx; 165 } 166 167 typedef struct { 168 struct pglist q_q; 169 int q_len; 170 } pageq_t; 171 172 struct clockpro_state { 173 int s_npages; 174 int s_coldtarget; 175 int s_ncold; 176 177 int s_newqlenmax; 178 pageq_t s_q[CLOCKPRO_NQUEUE]; 179 180 struct uvm_pctparam s_coldtargetpct; 181 }; 182 183 static pageq_t * 184 clockpro_queue(struct clockpro_state *s, int qidx) 185 { 186 187 KASSERT(CLOCKPRO_NOQUEUE < qidx); 188 KASSERT(qidx <= CLOCKPRO_NQUEUE); 189 190 return &s->s_q[qidx - 1]; 191 } 192 193 #if !defined(LISTQ) 194 195 static int coldqidx; 196 197 static void 198 clockpro_switchqueue(void) 199 { 200 201 coldqidx = 1 - coldqidx; 202 } 203 204 #endif /* !defined(LISTQ) */ 205 206 static struct clockpro_state clockpro; 207 static struct clockpro_scanstate { 208 int ss_nscanned; 209 } scanstate; 210 211 /* ---------------------------------------- */ 212 213 static void 214 pageq_init(pageq_t *q) 215 { 216 217 TAILQ_INIT(&q->q_q); 218 q->q_len = 0; 219 } 220 221 static int 222 pageq_len(const pageq_t *q) 223 { 224 225 return q->q_len; 226 } 227 228 static struct vm_page * 229 pageq_first(const pageq_t *q) 230 { 231 232 return TAILQ_FIRST(&q->q_q); 233 } 234 235 static void 236 pageq_insert_tail(pageq_t *q, struct vm_page *pg) 237 { 238 239 TAILQ_INSERT_TAIL(&q->q_q, pg, pageq.queue); 240 q->q_len++; 241 } 242 243 #if defined(LISTQ) 244 static void 245 pageq_insert_head(pageq_t *q, struct vm_page *pg) 246 { 247 248 TAILQ_INSERT_HEAD(&q->q_q, pg, pageq.queue); 249 q->q_len++; 250 } 251 #endif 252 253 static void 254 pageq_remove(pageq_t *q, struct vm_page *pg) 255 { 256 257 #if 1 258 KASSERT(clockpro_queue(&clockpro, clockpro_getq(pg)) == q); 259 #endif 260 KASSERT(q->q_len > 0); 261 TAILQ_REMOVE(&q->q_q, pg, pageq.queue); 262 q->q_len--; 263 } 264 265 static struct vm_page * 266 pageq_remove_head(pageq_t *q) 267 { 268 struct vm_page *pg; 269 270 pg = TAILQ_FIRST(&q->q_q); 271 if (pg == NULL) { 272 KASSERT(q->q_len == 0); 273 return NULL; 274 } 275 pageq_remove(q, pg); 276 return pg; 277 } 278 279 /* ---------------------------------------- */ 280 281 static void 282 clockpro_insert_tail(struct clockpro_state *s, int qidx, struct vm_page *pg) 283 { 284 pageq_t *q = clockpro_queue(s, qidx); 285 286 clockpro_setq(pg, qidx); 287 pageq_insert_tail(q, pg); 288 } 289 290 #if defined(LISTQ) 291 static void 292 clockpro_insert_head(struct clockpro_state *s, int qidx, struct vm_page *pg) 293 { 294 pageq_t *q = clockpro_queue(s, qidx); 295 296 clockpro_setq(pg, qidx); 297 pageq_insert_head(q, pg); 298 } 299 300 #endif 301 /* ---------------------------------------- */ 302 303 typedef uint32_t nonres_cookie_t; 304 #define NONRES_COOKIE_INVAL 0 305 306 typedef uintptr_t objid_t; 307 308 /* 309 * XXX maybe these hash functions need reconsideration, 310 * given that hash distribution is critical here. 311 */ 312 313 static uint32_t 314 pageidentityhash1(objid_t obj, off_t idx) 315 { 316 uint32_t hash = HASH32_BUF_INIT; 317 318 #if 1 319 hash = hash32_buf(&idx, sizeof(idx), hash); 320 hash = hash32_buf(&obj, sizeof(obj), hash); 321 #else 322 hash = hash32_buf(&obj, sizeof(obj), hash); 323 hash = hash32_buf(&idx, sizeof(idx), hash); 324 #endif 325 return hash; 326 } 327 328 static uint32_t 329 pageidentityhash2(objid_t obj, off_t idx) 330 { 331 uint32_t hash = HASH32_BUF_INIT; 332 333 hash = hash32_buf(&obj, sizeof(obj), hash); 334 hash = hash32_buf(&idx, sizeof(idx), hash); 335 return hash; 336 } 337 338 static nonres_cookie_t 339 calccookie(objid_t obj, off_t idx) 340 { 341 uint32_t hash = pageidentityhash2(obj, idx); 342 nonres_cookie_t cookie = hash; 343 344 if (__predict_false(cookie == NONRES_COOKIE_INVAL)) { 345 cookie++; /* XXX */ 346 } 347 return cookie; 348 } 349 350 #define BUCKETSIZE 14 351 struct bucket { 352 int cycle; 353 int cur; 354 nonres_cookie_t pages[BUCKETSIZE]; 355 }; 356 static int cycle_target; 357 static int cycle_target_frac; 358 359 static struct bucket static_bucket; 360 static struct bucket *buckets = &static_bucket; 361 static size_t hashsize = 1; 362 363 static int coldadj; 364 #define COLDTARGET_ADJ(d) coldadj += (d) 365 366 #if defined(PDSIM) 367 368 static void * 369 clockpro_hashalloc(int n) 370 { 371 size_t allocsz = sizeof(*buckets) * n; 372 373 return malloc(allocsz); 374 } 375 376 static void 377 clockpro_hashfree(void *p, int n) 378 { 379 380 free(p); 381 } 382 383 #else /* defined(PDSIM) */ 384 385 static void * 386 clockpro_hashalloc(int n) 387 { 388 size_t allocsz = round_page(sizeof(*buckets) * n); 389 390 return (void *)uvm_km_alloc(kernel_map, allocsz, 0, UVM_KMF_WIRED); 391 } 392 393 static void 394 clockpro_hashfree(void *p, int n) 395 { 396 size_t allocsz = round_page(sizeof(*buckets) * n); 397 398 uvm_km_free(kernel_map, (vaddr_t)p, allocsz, UVM_KMF_WIRED); 399 } 400 401 #endif /* defined(PDSIM) */ 402 403 static void 404 clockpro_hashinit(uint64_t n) 405 { 406 struct bucket *newbuckets; 407 struct bucket *oldbuckets; 408 size_t sz; 409 size_t oldsz; 410 int i; 411 412 sz = howmany(n, BUCKETSIZE); 413 sz *= clockpro_hashfactor; 414 newbuckets = clockpro_hashalloc(sz); 415 if (newbuckets == NULL) { 416 panic("%s: allocation failure", __func__); 417 } 418 for (i = 0; i < sz; i++) { 419 struct bucket *b = &newbuckets[i]; 420 int j; 421 422 b->cycle = cycle_target; 423 b->cur = 0; 424 for (j = 0; j < BUCKETSIZE; j++) { 425 b->pages[j] = NONRES_COOKIE_INVAL; 426 } 427 } 428 /* XXX lock */ 429 oldbuckets = buckets; 430 oldsz = hashsize; 431 buckets = newbuckets; 432 hashsize = sz; 433 /* XXX unlock */ 434 if (oldbuckets != &static_bucket) { 435 clockpro_hashfree(oldbuckets, oldsz); 436 } 437 } 438 439 static struct bucket * 440 nonresident_getbucket(objid_t obj, off_t idx) 441 { 442 uint32_t hash; 443 444 hash = pageidentityhash1(obj, idx); 445 return &buckets[hash % hashsize]; 446 } 447 448 static void 449 nonresident_rotate(struct bucket *b) 450 { 451 const int target = cycle_target; 452 const int cycle = b->cycle; 453 int cur; 454 int todo; 455 456 todo = target - cycle; 457 if (todo >= BUCKETSIZE * 2) { 458 todo = (todo % BUCKETSIZE) + BUCKETSIZE; 459 } 460 cur = b->cur; 461 while (todo > 0) { 462 if (b->pages[cur] != NONRES_COOKIE_INVAL) { 463 PDPOL_EVCNT_INCR(nreshandhot); 464 COLDTARGET_ADJ(-1); 465 } 466 b->pages[cur] = NONRES_COOKIE_INVAL; 467 cur++; 468 if (cur == BUCKETSIZE) { 469 cur = 0; 470 } 471 todo--; 472 } 473 b->cycle = target; 474 b->cur = cur; 475 } 476 477 static bool 478 nonresident_lookupremove(objid_t obj, off_t idx) 479 { 480 struct bucket *b = nonresident_getbucket(obj, idx); 481 nonres_cookie_t cookie = calccookie(obj, idx); 482 int i; 483 484 nonresident_rotate(b); 485 for (i = 0; i < BUCKETSIZE; i++) { 486 if (b->pages[i] == cookie) { 487 b->pages[i] = NONRES_COOKIE_INVAL; 488 return true; 489 } 490 } 491 return false; 492 } 493 494 static objid_t 495 pageobj(struct vm_page *pg) 496 { 497 const void *obj; 498 499 /* 500 * XXX object pointer is often freed and reused for unrelated object. 501 * for vnodes, it would be better to use something like 502 * a hash of fsid/fileid/generation. 503 */ 504 505 obj = pg->uobject; 506 if (obj == NULL) { 507 obj = pg->uanon; 508 KASSERT(obj != NULL); 509 } 510 return (objid_t)obj; 511 } 512 513 static off_t 514 pageidx(struct vm_page *pg) 515 { 516 517 KASSERT((pg->offset & PAGE_MASK) == 0); 518 return pg->offset >> PAGE_SHIFT; 519 } 520 521 static bool 522 nonresident_pagelookupremove(struct vm_page *pg) 523 { 524 bool found = nonresident_lookupremove(pageobj(pg), pageidx(pg)); 525 526 if (pg->uobject) { 527 PDPOL_EVCNT_INCR(nreslookupobj); 528 } else { 529 PDPOL_EVCNT_INCR(nreslookupanon); 530 } 531 if (found) { 532 if (pg->uobject) { 533 PDPOL_EVCNT_INCR(nresfoundobj); 534 } else { 535 PDPOL_EVCNT_INCR(nresfoundanon); 536 } 537 } 538 return found; 539 } 540 541 static void 542 nonresident_pagerecord(struct vm_page *pg) 543 { 544 objid_t obj = pageobj(pg); 545 off_t idx = pageidx(pg); 546 struct bucket *b = nonresident_getbucket(obj, idx); 547 nonres_cookie_t cookie = calccookie(obj, idx); 548 549 #if defined(DEBUG) 550 int i; 551 552 for (i = 0; i < BUCKETSIZE; i++) { 553 if (b->pages[i] == cookie) { 554 PDPOL_EVCNT_INCR(nresconflict); 555 } 556 } 557 #endif /* defined(DEBUG) */ 558 559 if (pg->uobject) { 560 PDPOL_EVCNT_INCR(nresrecordobj); 561 } else { 562 PDPOL_EVCNT_INCR(nresrecordanon); 563 } 564 nonresident_rotate(b); 565 if (b->pages[b->cur] != NONRES_COOKIE_INVAL) { 566 PDPOL_EVCNT_INCR(nresoverwritten); 567 COLDTARGET_ADJ(-1); 568 } 569 b->pages[b->cur] = cookie; 570 b->cur = (b->cur + 1) % BUCKETSIZE; 571 } 572 573 /* ---------------------------------------- */ 574 575 #if defined(CLOCKPRO_DEBUG) 576 static void 577 check_sanity(void) 578 { 579 } 580 #else /* defined(CLOCKPRO_DEBUG) */ 581 #define check_sanity() /* nothing */ 582 #endif /* defined(CLOCKPRO_DEBUG) */ 583 584 static void 585 clockpro_reinit(void) 586 { 587 588 clockpro_hashinit(uvmexp.npages); 589 } 590 591 static void 592 clockpro_init(void) 593 { 594 struct clockpro_state *s = &clockpro; 595 int i; 596 597 for (i = 0; i < CLOCKPRO_NQUEUE; i++) { 598 pageq_init(&s->s_q[i]); 599 } 600 s->s_newqlenmax = 1; 601 s->s_coldtarget = 1; 602 uvm_pctparam_init(&s->s_coldtargetpct, CLOCKPRO_COLDPCT, NULL); 603 } 604 605 static void 606 clockpro_tune(void) 607 { 608 struct clockpro_state *s = &clockpro; 609 int coldtarget; 610 611 #if defined(ADAPTIVE) 612 int coldmax = s->s_npages * CLOCKPRO_COLDPCTMAX / 100; 613 int coldmin = 1; 614 615 coldtarget = s->s_coldtarget; 616 if (coldtarget + coldadj < coldmin) { 617 coldadj = coldmin - coldtarget; 618 } else if (coldtarget + coldadj > coldmax) { 619 coldadj = coldmax - coldtarget; 620 } 621 coldtarget += coldadj; 622 #else /* defined(ADAPTIVE) */ 623 coldtarget = UVM_PCTPARAM_APPLY(&s->s_coldtargetpct, s->s_npages); 624 if (coldtarget < 1) { 625 coldtarget = 1; 626 } 627 #endif /* defined(ADAPTIVE) */ 628 629 s->s_coldtarget = coldtarget; 630 s->s_newqlenmax = coldtarget / 4; 631 if (s->s_newqlenmax < CLOCKPRO_NEWQMIN) { 632 s->s_newqlenmax = CLOCKPRO_NEWQMIN; 633 } 634 } 635 636 static void 637 clockpro_movereferencebit(struct vm_page *pg, bool locked) 638 { 639 kmutex_t *lock; 640 bool referenced; 641 642 KASSERT(!locked || uvm_page_locked_p(pg)); 643 if (!locked) { 644 lock = uvmpd_trylockowner(pg); 645 if (lock == NULL) { 646 /* 647 * XXXuvmplock 648 */ 649 PDPOL_EVCNT_INCR(lockfail); 650 return; 651 } 652 PDPOL_EVCNT_INCR(locksuccess); 653 } 654 referenced = pmap_clear_reference(pg); 655 if (!locked) { 656 mutex_exit(lock); 657 } 658 if (referenced) { 659 pg->pqflags |= PQ_REFERENCED; 660 } 661 } 662 663 static void 664 clockpro_clearreferencebit(struct vm_page *pg, bool locked) 665 { 666 667 clockpro_movereferencebit(pg, locked); 668 pg->pqflags &= ~PQ_REFERENCED; 669 } 670 671 static void 672 clockpro___newqrotate(int len) 673 { 674 struct clockpro_state * const s = &clockpro; 675 pageq_t * const newq = clockpro_queue(s, CLOCKPRO_NEWQ); 676 struct vm_page *pg; 677 678 while (pageq_len(newq) > len) { 679 pg = pageq_remove_head(newq); 680 KASSERT(pg != NULL); 681 KASSERT(clockpro_getq(pg) == CLOCKPRO_NEWQ); 682 if ((pg->pqflags & PQ_INITIALREF) != 0) { 683 clockpro_clearreferencebit(pg, false); 684 pg->pqflags &= ~PQ_INITIALREF; 685 } 686 /* place at the list head */ 687 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg); 688 } 689 } 690 691 static void 692 clockpro_newqrotate(void) 693 { 694 struct clockpro_state * const s = &clockpro; 695 696 check_sanity(); 697 clockpro___newqrotate(s->s_newqlenmax); 698 check_sanity(); 699 } 700 701 static void 702 clockpro_newqflush(int n) 703 { 704 705 check_sanity(); 706 clockpro___newqrotate(n); 707 check_sanity(); 708 } 709 710 static void 711 clockpro_newqflushone(void) 712 { 713 struct clockpro_state * const s = &clockpro; 714 715 clockpro_newqflush( 716 MAX(pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) - 1, 0)); 717 } 718 719 /* 720 * our "tail" is called "list-head" in the paper. 721 */ 722 723 static void 724 clockpro___enqueuetail(struct vm_page *pg) 725 { 726 struct clockpro_state * const s = &clockpro; 727 728 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE); 729 730 check_sanity(); 731 #if !defined(USEONCE2) 732 clockpro_insert_tail(s, CLOCKPRO_NEWQ, pg); 733 clockpro_newqrotate(); 734 #else /* !defined(USEONCE2) */ 735 #if defined(LISTQ) 736 KASSERT((pg->pqflags & PQ_REFERENCED) == 0); 737 #endif /* defined(LISTQ) */ 738 clockpro_insert_tail(s, CLOCKPRO_COLDQ, pg); 739 #endif /* !defined(USEONCE2) */ 740 check_sanity(); 741 } 742 743 static void 744 clockpro_pageenqueue(struct vm_page *pg) 745 { 746 struct clockpro_state * const s = &clockpro; 747 bool hot; 748 bool speculative = (pg->pqflags & PQ_SPECULATIVE) != 0; /* XXX */ 749 750 KASSERT((~pg->pqflags & (PQ_INITIALREF|PQ_SPECULATIVE)) != 0); 751 KASSERT(mutex_owned(&uvm_pageqlock)); 752 check_sanity(); 753 KASSERT(clockpro_getq(pg) == CLOCKPRO_NOQUEUE); 754 s->s_npages++; 755 pg->pqflags &= ~(PQ_HOT|PQ_TEST); 756 if (speculative) { 757 hot = false; 758 PDPOL_EVCNT_INCR(speculativeenqueue); 759 } else { 760 hot = nonresident_pagelookupremove(pg); 761 if (hot) { 762 COLDTARGET_ADJ(1); 763 } 764 } 765 766 /* 767 * consider mmap'ed file: 768 * 769 * - read-ahead enqueues a page. 770 * 771 * - on the following read-ahead hit, the fault handler activates it. 772 * 773 * - finally, the userland code which caused the above fault 774 * actually accesses the page. it makes its reference bit set. 775 * 776 * we want to count the above as a single access, rather than 777 * three accesses with short reuse distances. 778 */ 779 780 #if defined(USEONCE2) 781 pg->pqflags &= ~PQ_INITIALREF; 782 if (hot) { 783 pg->pqflags |= PQ_TEST; 784 } 785 s->s_ncold++; 786 clockpro_clearreferencebit(pg, false); 787 clockpro___enqueuetail(pg); 788 #else /* defined(USEONCE2) */ 789 if (speculative) { 790 s->s_ncold++; 791 } else if (hot) { 792 pg->pqflags |= PQ_HOT; 793 } else { 794 pg->pqflags |= PQ_TEST; 795 s->s_ncold++; 796 } 797 clockpro___enqueuetail(pg); 798 #endif /* defined(USEONCE2) */ 799 KASSERT(s->s_ncold <= s->s_npages); 800 } 801 802 static pageq_t * 803 clockpro_pagequeue(struct vm_page *pg) 804 { 805 struct clockpro_state * const s = &clockpro; 806 int qidx; 807 808 qidx = clockpro_getq(pg); 809 KASSERT(qidx != CLOCKPRO_NOQUEUE); 810 811 return clockpro_queue(s, qidx); 812 } 813 814 static void 815 clockpro_pagedequeue(struct vm_page *pg) 816 { 817 struct clockpro_state * const s = &clockpro; 818 pageq_t *q; 819 820 KASSERT(s->s_npages > 0); 821 check_sanity(); 822 q = clockpro_pagequeue(pg); 823 pageq_remove(q, pg); 824 check_sanity(); 825 clockpro_setq(pg, CLOCKPRO_NOQUEUE); 826 if ((pg->pqflags & PQ_HOT) == 0) { 827 KASSERT(s->s_ncold > 0); 828 s->s_ncold--; 829 } 830 KASSERT(s->s_npages > 0); 831 s->s_npages--; 832 check_sanity(); 833 } 834 835 static void 836 clockpro_pagerequeue(struct vm_page *pg) 837 { 838 struct clockpro_state * const s = &clockpro; 839 int qidx; 840 841 qidx = clockpro_getq(pg); 842 KASSERT(qidx == CLOCKPRO_HOTQ || qidx == CLOCKPRO_COLDQ); 843 pageq_remove(clockpro_queue(s, qidx), pg); 844 check_sanity(); 845 clockpro_setq(pg, CLOCKPRO_NOQUEUE); 846 847 clockpro___enqueuetail(pg); 848 } 849 850 static void 851 handhot_endtest(struct vm_page *pg) 852 { 853 854 KASSERT((pg->pqflags & PQ_HOT) == 0); 855 if ((pg->pqflags & PQ_TEST) != 0) { 856 PDPOL_EVCNT_INCR(hhotcoldtest); 857 COLDTARGET_ADJ(-1); 858 pg->pqflags &= ~PQ_TEST; 859 } else { 860 PDPOL_EVCNT_INCR(hhotcold); 861 } 862 } 863 864 static void 865 handhot_advance(void) 866 { 867 struct clockpro_state * const s = &clockpro; 868 struct vm_page *pg; 869 pageq_t *hotq; 870 int hotqlen; 871 872 clockpro_tune(); 873 874 dump("hot called"); 875 if (s->s_ncold >= s->s_coldtarget) { 876 return; 877 } 878 hotq = clockpro_queue(s, CLOCKPRO_HOTQ); 879 again: 880 pg = pageq_first(hotq); 881 if (pg == NULL) { 882 DPRINTF("%s: HHOT TAKEOVER\n", __func__); 883 dump("hhottakeover"); 884 PDPOL_EVCNT_INCR(hhottakeover); 885 #if defined(LISTQ) 886 while (/* CONSTCOND */ 1) { 887 pageq_t *coldq = clockpro_queue(s, CLOCKPRO_COLDQ); 888 889 pg = pageq_first(coldq); 890 if (pg == NULL) { 891 clockpro_newqflushone(); 892 pg = pageq_first(coldq); 893 if (pg == NULL) { 894 WARN("hhot: no page?\n"); 895 return; 896 } 897 } 898 KASSERT(clockpro_pagequeue(pg) == coldq); 899 pageq_remove(coldq, pg); 900 check_sanity(); 901 if ((pg->pqflags & PQ_HOT) == 0) { 902 handhot_endtest(pg); 903 clockpro_insert_tail(s, CLOCKPRO_LISTQ, pg); 904 } else { 905 clockpro_insert_head(s, CLOCKPRO_HOTQ, pg); 906 break; 907 } 908 } 909 #else /* defined(LISTQ) */ 910 clockpro_newqflush(0); /* XXX XXX */ 911 clockpro_switchqueue(); 912 hotq = clockpro_queue(s, CLOCKPRO_HOTQ); 913 goto again; 914 #endif /* defined(LISTQ) */ 915 } 916 917 KASSERT(clockpro_pagequeue(pg) == hotq); 918 919 /* 920 * terminate test period of nonresident pages by cycling them. 921 */ 922 923 cycle_target_frac += BUCKETSIZE; 924 hotqlen = pageq_len(hotq); 925 while (cycle_target_frac >= hotqlen) { 926 cycle_target++; 927 cycle_target_frac -= hotqlen; 928 } 929 930 if ((pg->pqflags & PQ_HOT) == 0) { 931 #if defined(LISTQ) 932 panic("cold page in hotq: %p", pg); 933 #else /* defined(LISTQ) */ 934 handhot_endtest(pg); 935 goto next; 936 #endif /* defined(LISTQ) */ 937 } 938 KASSERT((pg->pqflags & PQ_TEST) == 0); 939 KASSERT((pg->pqflags & PQ_INITIALREF) == 0); 940 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0); 941 942 /* 943 * once we met our target, 944 * stop at a hot page so that no cold pages in test period 945 * have larger recency than any hot pages. 946 */ 947 948 if (s->s_ncold >= s->s_coldtarget) { 949 dump("hot done"); 950 return; 951 } 952 clockpro_movereferencebit(pg, false); 953 if ((pg->pqflags & PQ_REFERENCED) == 0) { 954 PDPOL_EVCNT_INCR(hhotunref); 955 uvmexp.pddeact++; 956 pg->pqflags &= ~PQ_HOT; 957 clockpro.s_ncold++; 958 KASSERT(s->s_ncold <= s->s_npages); 959 } else { 960 PDPOL_EVCNT_INCR(hhotref); 961 } 962 pg->pqflags &= ~PQ_REFERENCED; 963 #if !defined(LISTQ) 964 next: 965 #endif /* !defined(LISTQ) */ 966 clockpro_pagerequeue(pg); 967 dump("hot"); 968 goto again; 969 } 970 971 static struct vm_page * 972 handcold_advance(void) 973 { 974 struct clockpro_state * const s = &clockpro; 975 struct vm_page *pg; 976 977 for (;;) { 978 #if defined(LISTQ) 979 pageq_t *listq = clockpro_queue(s, CLOCKPRO_LISTQ); 980 #endif /* defined(LISTQ) */ 981 pageq_t *coldq; 982 983 clockpro_newqrotate(); 984 handhot_advance(); 985 #if defined(LISTQ) 986 pg = pageq_first(listq); 987 if (pg != NULL) { 988 KASSERT(clockpro_getq(pg) == CLOCKPRO_LISTQ); 989 KASSERT((pg->pqflags & PQ_TEST) == 0); 990 KASSERT((pg->pqflags & PQ_HOT) == 0); 991 KASSERT((pg->pqflags & PQ_INITIALREF) == 0); 992 pageq_remove(listq, pg); 993 check_sanity(); 994 clockpro_insert_head(s, CLOCKPRO_COLDQ, pg); /* XXX */ 995 goto gotcold; 996 } 997 #endif /* defined(LISTQ) */ 998 check_sanity(); 999 coldq = clockpro_queue(s, CLOCKPRO_COLDQ); 1000 pg = pageq_first(coldq); 1001 if (pg == NULL) { 1002 clockpro_newqflushone(); 1003 pg = pageq_first(coldq); 1004 } 1005 if (pg == NULL) { 1006 DPRINTF("%s: HCOLD TAKEOVER\n", __func__); 1007 dump("hcoldtakeover"); 1008 PDPOL_EVCNT_INCR(hcoldtakeover); 1009 KASSERT( 1010 pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)) == 0); 1011 #if defined(LISTQ) 1012 KASSERT( 1013 pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)) == 0); 1014 #else /* defined(LISTQ) */ 1015 clockpro_switchqueue(); 1016 coldq = clockpro_queue(s, CLOCKPRO_COLDQ); 1017 pg = pageq_first(coldq); 1018 #endif /* defined(LISTQ) */ 1019 } 1020 if (pg == NULL) { 1021 WARN("hcold: no page?\n"); 1022 return NULL; 1023 } 1024 KASSERT((pg->pqflags & PQ_INITIALREF) == 0); 1025 if ((pg->pqflags & PQ_HOT) != 0) { 1026 PDPOL_EVCNT_INCR(hcoldhot); 1027 pageq_remove(coldq, pg); 1028 clockpro_insert_tail(s, CLOCKPRO_HOTQ, pg); 1029 check_sanity(); 1030 KASSERT((pg->pqflags & PQ_TEST) == 0); 1031 uvmexp.pdscans++; 1032 continue; 1033 } 1034 #if defined(LISTQ) 1035 gotcold: 1036 #endif /* defined(LISTQ) */ 1037 KASSERT((pg->pqflags & PQ_HOT) == 0); 1038 uvmexp.pdscans++; 1039 clockpro_movereferencebit(pg, false); 1040 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { 1041 KASSERT((pg->pqflags & PQ_TEST) == 0); 1042 if ((pg->pqflags & PQ_REFERENCED) != 0) { 1043 PDPOL_EVCNT_INCR(speculativehit2); 1044 pg->pqflags &= ~(PQ_SPECULATIVE|PQ_REFERENCED); 1045 clockpro_pagedequeue(pg); 1046 clockpro_pageenqueue(pg); 1047 continue; 1048 } 1049 PDPOL_EVCNT_INCR(speculativemiss); 1050 } 1051 switch (pg->pqflags & (PQ_REFERENCED|PQ_TEST)) { 1052 case PQ_TEST: 1053 PDPOL_EVCNT_INCR(hcoldunreftest); 1054 nonresident_pagerecord(pg); 1055 goto gotit; 1056 case 0: 1057 PDPOL_EVCNT_INCR(hcoldunref); 1058 gotit: 1059 KASSERT(s->s_ncold > 0); 1060 clockpro_pagerequeue(pg); /* XXX */ 1061 dump("cold done"); 1062 /* XXX "pg" is still in queue */ 1063 handhot_advance(); 1064 goto done; 1065 1066 case PQ_REFERENCED|PQ_TEST: 1067 PDPOL_EVCNT_INCR(hcoldreftest); 1068 s->s_ncold--; 1069 COLDTARGET_ADJ(1); 1070 pg->pqflags |= PQ_HOT; 1071 pg->pqflags &= ~PQ_TEST; 1072 break; 1073 1074 case PQ_REFERENCED: 1075 PDPOL_EVCNT_INCR(hcoldref); 1076 pg->pqflags |= PQ_TEST; 1077 break; 1078 } 1079 pg->pqflags &= ~PQ_REFERENCED; 1080 uvmexp.pdreact++; 1081 /* move to the list head */ 1082 clockpro_pagerequeue(pg); 1083 dump("cold"); 1084 } 1085 done:; 1086 return pg; 1087 } 1088 1089 void 1090 uvmpdpol_pageactivate(struct vm_page *pg) 1091 { 1092 1093 if (!uvmpdpol_pageisqueued_p(pg)) { 1094 KASSERT((pg->pqflags & PQ_SPECULATIVE) == 0); 1095 pg->pqflags |= PQ_INITIALREF; 1096 clockpro_pageenqueue(pg); 1097 } else if ((pg->pqflags & PQ_SPECULATIVE)) { 1098 PDPOL_EVCNT_INCR(speculativehit1); 1099 pg->pqflags &= ~PQ_SPECULATIVE; 1100 pg->pqflags |= PQ_INITIALREF; 1101 clockpro_pagedequeue(pg); 1102 clockpro_pageenqueue(pg); 1103 } 1104 pg->pqflags |= PQ_REFERENCED; 1105 } 1106 1107 void 1108 uvmpdpol_pagedeactivate(struct vm_page *pg) 1109 { 1110 1111 clockpro_clearreferencebit(pg, true); 1112 } 1113 1114 void 1115 uvmpdpol_pagedequeue(struct vm_page *pg) 1116 { 1117 1118 if (!uvmpdpol_pageisqueued_p(pg)) { 1119 return; 1120 } 1121 clockpro_pagedequeue(pg); 1122 pg->pqflags &= ~(PQ_INITIALREF|PQ_SPECULATIVE); 1123 } 1124 1125 void 1126 uvmpdpol_pageenqueue(struct vm_page *pg) 1127 { 1128 1129 #if 1 1130 if (uvmpdpol_pageisqueued_p(pg)) { 1131 return; 1132 } 1133 clockpro_clearreferencebit(pg, true); 1134 pg->pqflags |= PQ_SPECULATIVE; 1135 clockpro_pageenqueue(pg); 1136 #else 1137 uvmpdpol_pageactivate(pg); 1138 #endif 1139 } 1140 1141 void 1142 uvmpdpol_anfree(struct vm_anon *an) 1143 { 1144 1145 KASSERT(an->an_page == NULL); 1146 if (nonresident_lookupremove((objid_t)an, 0)) { 1147 PDPOL_EVCNT_INCR(nresanonfree); 1148 } 1149 } 1150 1151 void 1152 uvmpdpol_init(void) 1153 { 1154 1155 clockpro_init(); 1156 } 1157 1158 void 1159 uvmpdpol_reinit(void) 1160 { 1161 1162 clockpro_reinit(); 1163 } 1164 1165 void 1166 uvmpdpol_estimatepageable(int *active, int *inactive) 1167 { 1168 struct clockpro_state * const s = &clockpro; 1169 1170 if (active) { 1171 *active = s->s_npages - s->s_ncold; 1172 } 1173 if (inactive) { 1174 *inactive = s->s_ncold; 1175 } 1176 } 1177 1178 bool 1179 uvmpdpol_pageisqueued_p(struct vm_page *pg) 1180 { 1181 1182 return clockpro_getq(pg) != CLOCKPRO_NOQUEUE; 1183 } 1184 1185 void 1186 uvmpdpol_scaninit(void) 1187 { 1188 struct clockpro_scanstate * const ss = &scanstate; 1189 1190 ss->ss_nscanned = 0; 1191 } 1192 1193 struct vm_page * 1194 uvmpdpol_selectvictim(void) 1195 { 1196 struct clockpro_state * const s = &clockpro; 1197 struct clockpro_scanstate * const ss = &scanstate; 1198 struct vm_page *pg; 1199 1200 if (ss->ss_nscanned > s->s_npages) { 1201 DPRINTF("scan too much\n"); 1202 return NULL; 1203 } 1204 pg = handcold_advance(); 1205 ss->ss_nscanned++; 1206 return pg; 1207 } 1208 1209 static void 1210 clockpro_dropswap(pageq_t *q, int *todo) 1211 { 1212 struct vm_page *pg; 1213 1214 TAILQ_FOREACH_REVERSE(pg, &q->q_q, pglist, pageq.queue) { 1215 if (*todo <= 0) { 1216 break; 1217 } 1218 if ((pg->pqflags & PQ_HOT) == 0) { 1219 continue; 1220 } 1221 if ((pg->pqflags & PQ_SWAPBACKED) == 0) { 1222 continue; 1223 } 1224 if (uvmpd_trydropswap(pg)) { 1225 (*todo)--; 1226 } 1227 } 1228 } 1229 1230 void 1231 uvmpdpol_balancequeue(int swap_shortage) 1232 { 1233 struct clockpro_state * const s = &clockpro; 1234 int todo = swap_shortage; 1235 1236 if (todo == 0) { 1237 return; 1238 } 1239 1240 /* 1241 * reclaim swap slots from hot pages 1242 */ 1243 1244 DPRINTF("%s: swap_shortage=%d\n", __func__, swap_shortage); 1245 1246 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_NEWQ), &todo); 1247 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_COLDQ), &todo); 1248 clockpro_dropswap(clockpro_queue(s, CLOCKPRO_HOTQ), &todo); 1249 1250 DPRINTF("%s: done=%d\n", __func__, swap_shortage - todo); 1251 } 1252 1253 bool 1254 uvmpdpol_needsscan_p(void) 1255 { 1256 struct clockpro_state * const s = &clockpro; 1257 1258 if (s->s_ncold < s->s_coldtarget) { 1259 return true; 1260 } 1261 return false; 1262 } 1263 1264 void 1265 uvmpdpol_tune(void) 1266 { 1267 1268 clockpro_tune(); 1269 } 1270 1271 #if !defined(PDSIM) 1272 1273 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */ 1274 1275 void 1276 uvmpdpol_sysctlsetup(void) 1277 { 1278 #if !defined(ADAPTIVE) 1279 struct clockpro_state * const s = &clockpro; 1280 1281 uvm_pctparam_createsysctlnode(&s->s_coldtargetpct, "coldtargetpct", 1282 SYSCTL_DESCR("Percentage cold target queue of the entire queue")); 1283 #endif /* !defined(ADAPTIVE) */ 1284 } 1285 1286 #endif /* !defined(PDSIM) */ 1287 1288 #if defined(DDB) 1289 1290 #if 0 /* XXXuvmplock */ 1291 #define _pmap_is_referenced(pg) pmap_is_referenced(pg) 1292 #else 1293 #define _pmap_is_referenced(pg) false 1294 #endif 1295 1296 void clockpro_dump(void); 1297 1298 void 1299 clockpro_dump(void) 1300 { 1301 struct clockpro_state * const s = &clockpro; 1302 1303 struct vm_page *pg; 1304 int ncold, nhot, ntest, nspeculative, ninitialref, nref; 1305 int newqlen, coldqlen, hotqlen, listqlen; 1306 1307 newqlen = coldqlen = hotqlen = listqlen = 0; 1308 printf("npages=%d, ncold=%d, coldtarget=%d, newqlenmax=%d\n", 1309 s->s_npages, s->s_ncold, s->s_coldtarget, s->s_newqlenmax); 1310 1311 #define INITCOUNT() \ 1312 ncold = nhot = ntest = nspeculative = ninitialref = nref = 0 1313 1314 #define COUNT(pg) \ 1315 if ((pg->pqflags & PQ_HOT) != 0) { \ 1316 nhot++; \ 1317 } else { \ 1318 ncold++; \ 1319 if ((pg->pqflags & PQ_TEST) != 0) { \ 1320 ntest++; \ 1321 } \ 1322 if ((pg->pqflags & PQ_SPECULATIVE) != 0) { \ 1323 nspeculative++; \ 1324 } \ 1325 if ((pg->pqflags & PQ_INITIALREF) != 0) { \ 1326 ninitialref++; \ 1327 } else if ((pg->pqflags & PQ_REFERENCED) != 0 || \ 1328 _pmap_is_referenced(pg)) { \ 1329 nref++; \ 1330 } \ 1331 } 1332 1333 #define PRINTCOUNT(name) \ 1334 printf("%s hot=%d, cold=%d, test=%d, speculative=%d, initialref=%d, " \ 1335 "nref=%d\n", \ 1336 (name), nhot, ncold, ntest, nspeculative, ninitialref, nref) 1337 1338 INITCOUNT(); 1339 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_NEWQ)->q_q, pageq.queue) { 1340 if (clockpro_getq(pg) != CLOCKPRO_NEWQ) { 1341 printf("newq corrupt %p\n", pg); 1342 } 1343 COUNT(pg) 1344 newqlen++; 1345 } 1346 PRINTCOUNT("newq"); 1347 1348 INITCOUNT(); 1349 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_COLDQ)->q_q, pageq.queue) { 1350 if (clockpro_getq(pg) != CLOCKPRO_COLDQ) { 1351 printf("coldq corrupt %p\n", pg); 1352 } 1353 COUNT(pg) 1354 coldqlen++; 1355 } 1356 PRINTCOUNT("coldq"); 1357 1358 INITCOUNT(); 1359 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_HOTQ)->q_q, pageq.queue) { 1360 if (clockpro_getq(pg) != CLOCKPRO_HOTQ) { 1361 printf("hotq corrupt %p\n", pg); 1362 } 1363 #if defined(LISTQ) 1364 if ((pg->pqflags & PQ_HOT) == 0) { 1365 printf("cold page in hotq: %p\n", pg); 1366 } 1367 #endif /* defined(LISTQ) */ 1368 COUNT(pg) 1369 hotqlen++; 1370 } 1371 PRINTCOUNT("hotq"); 1372 1373 INITCOUNT(); 1374 TAILQ_FOREACH(pg, &clockpro_queue(s, CLOCKPRO_LISTQ)->q_q, pageq.queue) { 1375 #if !defined(LISTQ) 1376 printf("listq %p\n", pg); 1377 #endif /* !defined(LISTQ) */ 1378 if (clockpro_getq(pg) != CLOCKPRO_LISTQ) { 1379 printf("listq corrupt %p\n", pg); 1380 } 1381 COUNT(pg) 1382 listqlen++; 1383 } 1384 PRINTCOUNT("listq"); 1385 1386 printf("newqlen=%d/%d, coldqlen=%d/%d, hotqlen=%d/%d, listqlen=%d/%d\n", 1387 newqlen, pageq_len(clockpro_queue(s, CLOCKPRO_NEWQ)), 1388 coldqlen, pageq_len(clockpro_queue(s, CLOCKPRO_COLDQ)), 1389 hotqlen, pageq_len(clockpro_queue(s, CLOCKPRO_HOTQ)), 1390 listqlen, pageq_len(clockpro_queue(s, CLOCKPRO_LISTQ))); 1391 } 1392 1393 #endif /* defined(DDB) */ 1394 1395 #if defined(PDSIM) 1396 #if defined(DEBUG) 1397 static void 1398 pdsim_dumpq(int qidx) 1399 { 1400 struct clockpro_state * const s = &clockpro; 1401 pageq_t *q = clockpro_queue(s, qidx); 1402 struct vm_page *pg; 1403 1404 TAILQ_FOREACH(pg, &q->q_q, pageq.queue) { 1405 DPRINTF(" %" PRIu64 "%s%s%s%s%s%s", 1406 pg->offset >> PAGE_SHIFT, 1407 (pg->pqflags & PQ_HOT) ? "H" : "", 1408 (pg->pqflags & PQ_TEST) ? "T" : "", 1409 (pg->pqflags & PQ_REFERENCED) ? "R" : "", 1410 _pmap_is_referenced(pg) ? "r" : "", 1411 (pg->pqflags & PQ_INITIALREF) ? "I" : "", 1412 (pg->pqflags & PQ_SPECULATIVE) ? "S" : "" 1413 ); 1414 } 1415 } 1416 #endif /* defined(DEBUG) */ 1417 1418 void 1419 pdsim_dump(const char *id) 1420 { 1421 #if defined(DEBUG) 1422 struct clockpro_state * const s = &clockpro; 1423 1424 DPRINTF(" %s L(", id); 1425 pdsim_dumpq(CLOCKPRO_LISTQ); 1426 DPRINTF(" ) H("); 1427 pdsim_dumpq(CLOCKPRO_HOTQ); 1428 DPRINTF(" ) C("); 1429 pdsim_dumpq(CLOCKPRO_COLDQ); 1430 DPRINTF(" ) N("); 1431 pdsim_dumpq(CLOCKPRO_NEWQ); 1432 DPRINTF(" ) ncold=%d/%d, coldadj=%d\n", 1433 s->s_ncold, s->s_coldtarget, coldadj); 1434 #endif /* defined(DEBUG) */ 1435 } 1436 #endif /* defined(PDSIM) */ 1437