1 /* $NetBSD: uvm_page.c,v 1.175 2011/06/15 19:46:11 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 37 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64 /* 65 * uvm_page.c: page ops. 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.175 2011/06/15 19:46:11 rmind Exp $"); 70 71 #include "opt_ddb.h" 72 #include "opt_uvmhist.h" 73 #include "opt_readahead.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/malloc.h> 78 #include <sys/sched.h> 79 #include <sys/kernel.h> 80 #include <sys/vnode.h> 81 #include <sys/proc.h> 82 #include <sys/atomic.h> 83 #include <sys/cpu.h> 84 85 #include <uvm/uvm.h> 86 #include <uvm/uvm_ddb.h> 87 #include <uvm/uvm_pdpolicy.h> 88 89 /* 90 * global vars... XXXCDC: move to uvm. structure. 91 */ 92 93 /* 94 * physical memory config is stored in vm_physmem. 95 */ 96 97 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 98 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 99 #define vm_nphysmem vm_nphysseg 100 101 /* 102 * Some supported CPUs in a given architecture don't support all 103 * of the things necessary to do idle page zero'ing efficiently. 104 * We therefore provide a way to enable it from machdep code here. 105 */ 106 bool vm_page_zero_enable = false; 107 108 /* 109 * number of pages per-CPU to reserve for the kernel. 110 */ 111 int vm_page_reserve_kernel = 5; 112 113 /* 114 * physical memory size; 115 */ 116 int physmem; 117 118 /* 119 * local variables 120 */ 121 122 /* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128 static vaddr_t virtual_space_start; 129 static vaddr_t virtual_space_end; 130 131 /* 132 * we allocate an initial number of page colors in uvm_page_init(), 133 * and remember them. We may re-color pages as cache sizes are 134 * discovered during the autoconfiguration phase. But we can never 135 * free the initial set of buckets, since they are allocated using 136 * uvm_pageboot_alloc(). 137 */ 138 139 static bool have_recolored_pages /* = false */; 140 141 MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page"); 142 143 #ifdef DEBUG 144 vaddr_t uvm_zerocheckkva; 145 #endif /* DEBUG */ 146 147 /* 148 * local prototypes 149 */ 150 151 static void uvm_pageinsert(struct uvm_object *, struct vm_page *); 152 static void uvm_pageremove(struct uvm_object *, struct vm_page *); 153 154 /* 155 * per-object tree of pages 156 */ 157 158 static signed int 159 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2) 160 { 161 const struct vm_page *pg1 = n1; 162 const struct vm_page *pg2 = n2; 163 const voff_t a = pg1->offset; 164 const voff_t b = pg2->offset; 165 166 if (a < b) 167 return -1; 168 if (a > b) 169 return 1; 170 return 0; 171 } 172 173 static signed int 174 uvm_page_compare_key(void *ctx, const void *n, const void *key) 175 { 176 const struct vm_page *pg = n; 177 const voff_t a = pg->offset; 178 const voff_t b = *(const voff_t *)key; 179 180 if (a < b) 181 return -1; 182 if (a > b) 183 return 1; 184 return 0; 185 } 186 187 const rb_tree_ops_t uvm_page_tree_ops = { 188 .rbto_compare_nodes = uvm_page_compare_nodes, 189 .rbto_compare_key = uvm_page_compare_key, 190 .rbto_node_offset = offsetof(struct vm_page, rb_node), 191 .rbto_context = NULL 192 }; 193 194 /* 195 * inline functions 196 */ 197 198 /* 199 * uvm_pageinsert: insert a page in the object. 200 * 201 * => caller must lock object 202 * => caller must lock page queues 203 * => call should have already set pg's object and offset pointers 204 * and bumped the version counter 205 */ 206 207 static inline void 208 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg, 209 struct vm_page *where) 210 { 211 212 KASSERT(uobj == pg->uobject); 213 KASSERT(mutex_owned(uobj->vmobjlock)); 214 KASSERT((pg->flags & PG_TABLED) == 0); 215 KASSERT(where == NULL || (where->flags & PG_TABLED)); 216 KASSERT(where == NULL || (where->uobject == uobj)); 217 218 if (UVM_OBJ_IS_VNODE(uobj)) { 219 if (uobj->uo_npages == 0) { 220 struct vnode *vp = (struct vnode *)uobj; 221 222 vholdl(vp); 223 } 224 if (UVM_OBJ_IS_VTEXT(uobj)) { 225 atomic_inc_uint(&uvmexp.execpages); 226 } else { 227 atomic_inc_uint(&uvmexp.filepages); 228 } 229 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 230 atomic_inc_uint(&uvmexp.anonpages); 231 } 232 233 if (where) 234 TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue); 235 else 236 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue); 237 pg->flags |= PG_TABLED; 238 uobj->uo_npages++; 239 } 240 241 242 static inline void 243 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) 244 { 245 struct vm_page *ret; 246 247 KASSERT(uobj == pg->uobject); 248 ret = rb_tree_insert_node(&uobj->rb_tree, pg); 249 KASSERT(ret == pg); 250 } 251 252 static inline void 253 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg) 254 { 255 256 KDASSERT(uobj != NULL); 257 uvm_pageinsert_tree(uobj, pg); 258 uvm_pageinsert_list(uobj, pg, NULL); 259 } 260 261 /* 262 * uvm_page_remove: remove page from object. 263 * 264 * => caller must lock object 265 * => caller must lock page queues 266 */ 267 268 static inline void 269 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg) 270 { 271 272 KASSERT(uobj == pg->uobject); 273 KASSERT(mutex_owned(uobj->vmobjlock)); 274 KASSERT(pg->flags & PG_TABLED); 275 276 if (UVM_OBJ_IS_VNODE(uobj)) { 277 if (uobj->uo_npages == 1) { 278 struct vnode *vp = (struct vnode *)uobj; 279 280 holdrelel(vp); 281 } 282 if (UVM_OBJ_IS_VTEXT(uobj)) { 283 atomic_dec_uint(&uvmexp.execpages); 284 } else { 285 atomic_dec_uint(&uvmexp.filepages); 286 } 287 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 288 atomic_dec_uint(&uvmexp.anonpages); 289 } 290 291 /* object should be locked */ 292 uobj->uo_npages--; 293 TAILQ_REMOVE(&uobj->memq, pg, listq.queue); 294 pg->flags &= ~PG_TABLED; 295 pg->uobject = NULL; 296 } 297 298 static inline void 299 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg) 300 { 301 302 KASSERT(uobj == pg->uobject); 303 rb_tree_remove_node(&uobj->rb_tree, pg); 304 } 305 306 static inline void 307 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg) 308 { 309 310 KDASSERT(uobj != NULL); 311 uvm_pageremove_tree(uobj, pg); 312 uvm_pageremove_list(uobj, pg); 313 } 314 315 static void 316 uvm_page_init_buckets(struct pgfreelist *pgfl) 317 { 318 int color, i; 319 320 for (color = 0; color < uvmexp.ncolors; color++) { 321 for (i = 0; i < PGFL_NQUEUES; i++) { 322 LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]); 323 } 324 } 325 } 326 327 /* 328 * uvm_page_init: init the page system. called from uvm_init(). 329 * 330 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 331 */ 332 333 void 334 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 335 { 336 static struct uvm_cpu boot_cpu; 337 psize_t freepages, pagecount, bucketcount, n; 338 struct pgflbucket *bucketarray, *cpuarray; 339 struct vm_physseg *seg; 340 struct vm_page *pagearray; 341 int lcv; 342 u_int i; 343 paddr_t paddr; 344 345 KASSERT(ncpu <= 1); 346 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *)); 347 348 /* 349 * init the page queues and page queue locks, except the free 350 * list; we allocate that later (with the initial vm_page 351 * structures). 352 */ 353 354 uvm.cpus[0] = &boot_cpu; 355 curcpu()->ci_data.cpu_uvm = &boot_cpu; 356 uvm_reclaim_init(); 357 uvmpdpol_init(); 358 mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE); 359 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM); 360 361 /* 362 * allocate vm_page structures. 363 */ 364 365 /* 366 * sanity check: 367 * before calling this function the MD code is expected to register 368 * some free RAM with the uvm_page_physload() function. our job 369 * now is to allocate vm_page structures for this memory. 370 */ 371 372 if (vm_nphysmem == 0) 373 panic("uvm_page_bootstrap: no memory pre-allocated"); 374 375 /* 376 * first calculate the number of free pages... 377 * 378 * note that we use start/end rather than avail_start/avail_end. 379 * this allows us to allocate extra vm_page structures in case we 380 * want to return some memory to the pool after booting. 381 */ 382 383 freepages = 0; 384 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 385 seg = VM_PHYSMEM_PTR(lcv); 386 freepages += (seg->end - seg->start); 387 } 388 389 /* 390 * Let MD code initialize the number of colors, or default 391 * to 1 color if MD code doesn't care. 392 */ 393 if (uvmexp.ncolors == 0) 394 uvmexp.ncolors = 1; 395 uvmexp.colormask = uvmexp.ncolors - 1; 396 397 /* 398 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 399 * use. for each page of memory we use we need a vm_page structure. 400 * thus, the total number of pages we can use is the total size of 401 * the memory divided by the PAGE_SIZE plus the size of the vm_page 402 * structure. we add one to freepages as a fudge factor to avoid 403 * truncation errors (since we can only allocate in terms of whole 404 * pages). 405 */ 406 407 bucketcount = uvmexp.ncolors * VM_NFREELIST; 408 pagecount = ((freepages + 1) << PAGE_SHIFT) / 409 (PAGE_SIZE + sizeof(struct vm_page)); 410 411 bucketarray = (void *)uvm_pageboot_alloc((bucketcount * 412 sizeof(struct pgflbucket) * 2) + (pagecount * 413 sizeof(struct vm_page))); 414 cpuarray = bucketarray + bucketcount; 415 pagearray = (struct vm_page *)(bucketarray + bucketcount * 2); 416 417 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 418 uvm.page_free[lcv].pgfl_buckets = 419 (bucketarray + (lcv * uvmexp.ncolors)); 420 uvm_page_init_buckets(&uvm.page_free[lcv]); 421 uvm.cpus[0]->page_free[lcv].pgfl_buckets = 422 (cpuarray + (lcv * uvmexp.ncolors)); 423 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]); 424 } 425 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 426 427 /* 428 * init the vm_page structures and put them in the correct place. 429 */ 430 431 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 432 seg = VM_PHYSMEM_PTR(lcv); 433 n = seg->end - seg->start; 434 435 /* set up page array pointers */ 436 seg->pgs = pagearray; 437 pagearray += n; 438 pagecount -= n; 439 seg->lastpg = seg->pgs + n; 440 441 /* init and free vm_pages (we've already zeroed them) */ 442 paddr = ctob(seg->start); 443 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 444 seg->pgs[i].phys_addr = paddr; 445 #ifdef __HAVE_VM_PAGE_MD 446 VM_MDPAGE_INIT(&seg->pgs[i]); 447 #endif 448 if (atop(paddr) >= seg->avail_start && 449 atop(paddr) < seg->avail_end) { 450 uvmexp.npages++; 451 /* add page to free pool */ 452 uvm_pagefree(&seg->pgs[i]); 453 } 454 } 455 } 456 457 /* 458 * pass up the values of virtual_space_start and 459 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 460 * layers of the VM. 461 */ 462 463 *kvm_startp = round_page(virtual_space_start); 464 *kvm_endp = trunc_page(virtual_space_end); 465 #ifdef DEBUG 466 /* 467 * steal kva for uvm_pagezerocheck(). 468 */ 469 uvm_zerocheckkva = *kvm_startp; 470 *kvm_startp += PAGE_SIZE; 471 #endif /* DEBUG */ 472 473 /* 474 * init various thresholds. 475 */ 476 477 uvmexp.reserve_pagedaemon = 1; 478 uvmexp.reserve_kernel = vm_page_reserve_kernel; 479 480 /* 481 * determine if we should zero pages in the idle loop. 482 */ 483 484 uvm.cpus[0]->page_idle_zero = vm_page_zero_enable; 485 486 /* 487 * done! 488 */ 489 490 uvm.page_init_done = true; 491 } 492 493 /* 494 * uvm_setpagesize: set the page size 495 * 496 * => sets page_shift and page_mask from uvmexp.pagesize. 497 */ 498 499 void 500 uvm_setpagesize(void) 501 { 502 503 /* 504 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE 505 * to be a constant (indicated by being a non-zero value). 506 */ 507 if (uvmexp.pagesize == 0) { 508 if (PAGE_SIZE == 0) 509 panic("uvm_setpagesize: uvmexp.pagesize not set"); 510 uvmexp.pagesize = PAGE_SIZE; 511 } 512 uvmexp.pagemask = uvmexp.pagesize - 1; 513 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 514 panic("uvm_setpagesize: page size %u (%#x) not a power of two", 515 uvmexp.pagesize, uvmexp.pagesize); 516 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 517 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 518 break; 519 } 520 521 /* 522 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 523 */ 524 525 vaddr_t 526 uvm_pageboot_alloc(vsize_t size) 527 { 528 static bool initialized = false; 529 vaddr_t addr; 530 #if !defined(PMAP_STEAL_MEMORY) 531 vaddr_t vaddr; 532 paddr_t paddr; 533 #endif 534 535 /* 536 * on first call to this function, initialize ourselves. 537 */ 538 if (initialized == false) { 539 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 540 541 /* round it the way we like it */ 542 virtual_space_start = round_page(virtual_space_start); 543 virtual_space_end = trunc_page(virtual_space_end); 544 545 initialized = true; 546 } 547 548 /* round to page size */ 549 size = round_page(size); 550 551 #if defined(PMAP_STEAL_MEMORY) 552 553 /* 554 * defer bootstrap allocation to MD code (it may want to allocate 555 * from a direct-mapped segment). pmap_steal_memory should adjust 556 * virtual_space_start/virtual_space_end if necessary. 557 */ 558 559 addr = pmap_steal_memory(size, &virtual_space_start, 560 &virtual_space_end); 561 562 return(addr); 563 564 #else /* !PMAP_STEAL_MEMORY */ 565 566 /* 567 * allocate virtual memory for this request 568 */ 569 if (virtual_space_start == virtual_space_end || 570 (virtual_space_end - virtual_space_start) < size) 571 panic("uvm_pageboot_alloc: out of virtual space"); 572 573 addr = virtual_space_start; 574 575 #ifdef PMAP_GROWKERNEL 576 /* 577 * If the kernel pmap can't map the requested space, 578 * then allocate more resources for it. 579 */ 580 if (uvm_maxkaddr < (addr + size)) { 581 uvm_maxkaddr = pmap_growkernel(addr + size); 582 if (uvm_maxkaddr < (addr + size)) 583 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 584 } 585 #endif 586 587 virtual_space_start += size; 588 589 /* 590 * allocate and mapin physical pages to back new virtual pages 591 */ 592 593 for (vaddr = round_page(addr) ; vaddr < addr + size ; 594 vaddr += PAGE_SIZE) { 595 596 if (!uvm_page_physget(&paddr)) 597 panic("uvm_pageboot_alloc: out of memory"); 598 599 /* 600 * Note this memory is no longer managed, so using 601 * pmap_kenter is safe. 602 */ 603 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 604 } 605 pmap_update(pmap_kernel()); 606 return(addr); 607 #endif /* PMAP_STEAL_MEMORY */ 608 } 609 610 #if !defined(PMAP_STEAL_MEMORY) 611 /* 612 * uvm_page_physget: "steal" one page from the vm_physmem structure. 613 * 614 * => attempt to allocate it off the end of a segment in which the "avail" 615 * values match the start/end values. if we can't do that, then we 616 * will advance both values (making them equal, and removing some 617 * vm_page structures from the non-avail area). 618 * => return false if out of memory. 619 */ 620 621 /* subroutine: try to allocate from memory chunks on the specified freelist */ 622 static bool uvm_page_physget_freelist(paddr_t *, int); 623 624 static bool 625 uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 626 { 627 struct vm_physseg *seg; 628 int lcv, x; 629 630 /* pass 1: try allocating from a matching end */ 631 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 632 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--) 633 #else 634 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 635 #endif 636 { 637 seg = VM_PHYSMEM_PTR(lcv); 638 639 if (uvm.page_init_done == true) 640 panic("uvm_page_physget: called _after_ bootstrap"); 641 642 if (seg->free_list != freelist) 643 continue; 644 645 /* try from front */ 646 if (seg->avail_start == seg->start && 647 seg->avail_start < seg->avail_end) { 648 *paddrp = ctob(seg->avail_start); 649 seg->avail_start++; 650 seg->start++; 651 /* nothing left? nuke it */ 652 if (seg->avail_start == seg->end) { 653 if (vm_nphysmem == 1) 654 panic("uvm_page_physget: out of memory!"); 655 vm_nphysmem--; 656 for (x = lcv ; x < vm_nphysmem ; x++) 657 /* structure copy */ 658 VM_PHYSMEM_PTR_SWAP(x, x + 1); 659 } 660 return (true); 661 } 662 663 /* try from rear */ 664 if (seg->avail_end == seg->end && 665 seg->avail_start < seg->avail_end) { 666 *paddrp = ctob(seg->avail_end - 1); 667 seg->avail_end--; 668 seg->end--; 669 /* nothing left? nuke it */ 670 if (seg->avail_end == seg->start) { 671 if (vm_nphysmem == 1) 672 panic("uvm_page_physget: out of memory!"); 673 vm_nphysmem--; 674 for (x = lcv ; x < vm_nphysmem ; x++) 675 /* structure copy */ 676 VM_PHYSMEM_PTR_SWAP(x, x + 1); 677 } 678 return (true); 679 } 680 } 681 682 /* pass2: forget about matching ends, just allocate something */ 683 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 684 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--) 685 #else 686 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 687 #endif 688 { 689 seg = VM_PHYSMEM_PTR(lcv); 690 691 /* any room in this bank? */ 692 if (seg->avail_start >= seg->avail_end) 693 continue; /* nope */ 694 695 *paddrp = ctob(seg->avail_start); 696 seg->avail_start++; 697 /* truncate! */ 698 seg->start = seg->avail_start; 699 700 /* nothing left? nuke it */ 701 if (seg->avail_start == seg->end) { 702 if (vm_nphysmem == 1) 703 panic("uvm_page_physget: out of memory!"); 704 vm_nphysmem--; 705 for (x = lcv ; x < vm_nphysmem ; x++) 706 /* structure copy */ 707 VM_PHYSMEM_PTR_SWAP(x, x + 1); 708 } 709 return (true); 710 } 711 712 return (false); /* whoops! */ 713 } 714 715 bool 716 uvm_page_physget(paddr_t *paddrp) 717 { 718 int i; 719 720 /* try in the order of freelist preference */ 721 for (i = 0; i < VM_NFREELIST; i++) 722 if (uvm_page_physget_freelist(paddrp, i) == true) 723 return (true); 724 return (false); 725 } 726 #endif /* PMAP_STEAL_MEMORY */ 727 728 /* 729 * uvm_page_physload: load physical memory into VM system 730 * 731 * => all args are PFs 732 * => all pages in start/end get vm_page structures 733 * => areas marked by avail_start/avail_end get added to the free page pool 734 * => we are limited to VM_PHYSSEG_MAX physical memory segments 735 */ 736 737 void 738 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 739 paddr_t avail_end, int free_list) 740 { 741 int preload, lcv; 742 psize_t npages; 743 struct vm_page *pgs; 744 struct vm_physseg *ps; 745 746 if (uvmexp.pagesize == 0) 747 panic("uvm_page_physload: page size not set!"); 748 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 749 panic("uvm_page_physload: bad free list %d", free_list); 750 if (start >= end) 751 panic("uvm_page_physload: start >= end"); 752 753 /* 754 * do we have room? 755 */ 756 757 if (vm_nphysmem == VM_PHYSSEG_MAX) { 758 printf("uvm_page_physload: unable to load physical memory " 759 "segment\n"); 760 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 761 VM_PHYSSEG_MAX, (long long)start, (long long)end); 762 printf("\tincrease VM_PHYSSEG_MAX\n"); 763 return; 764 } 765 766 /* 767 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been 768 * called yet, so malloc is not available). 769 */ 770 771 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 772 if (VM_PHYSMEM_PTR(lcv)->pgs) 773 break; 774 } 775 preload = (lcv == vm_nphysmem); 776 777 /* 778 * if VM is already running, attempt to malloc() vm_page structures 779 */ 780 781 if (!preload) { 782 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 783 } else { 784 pgs = NULL; 785 npages = 0; 786 } 787 788 /* 789 * now insert us in the proper place in vm_physmem[] 790 */ 791 792 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 793 /* random: put it at the end (easy!) */ 794 ps = VM_PHYSMEM_PTR(vm_nphysmem); 795 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 796 { 797 int x; 798 /* sort by address for binary search */ 799 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 800 if (start < VM_PHYSMEM_PTR(lcv)->start) 801 break; 802 ps = VM_PHYSMEM_PTR(lcv); 803 /* move back other entries, if necessary ... */ 804 for (x = vm_nphysmem ; x > lcv ; x--) 805 /* structure copy */ 806 VM_PHYSMEM_PTR_SWAP(x, x - 1); 807 } 808 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 809 { 810 int x; 811 /* sort by largest segment first */ 812 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 813 if ((end - start) > 814 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start)) 815 break; 816 ps = VM_PHYSMEM_PTR(lcv); 817 /* move back other entries, if necessary ... */ 818 for (x = vm_nphysmem ; x > lcv ; x--) 819 /* structure copy */ 820 VM_PHYSMEM_PTR_SWAP(x, x - 1); 821 } 822 #else 823 panic("uvm_page_physload: unknown physseg strategy selected!"); 824 #endif 825 826 ps->start = start; 827 ps->end = end; 828 ps->avail_start = avail_start; 829 ps->avail_end = avail_end; 830 if (preload) { 831 ps->pgs = NULL; 832 } else { 833 ps->pgs = pgs; 834 ps->lastpg = pgs + npages; 835 } 836 ps->free_list = free_list; 837 vm_nphysmem++; 838 839 if (!preload) { 840 uvmpdpol_reinit(); 841 } 842 } 843 844 /* 845 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 846 */ 847 848 #if VM_PHYSSEG_MAX == 1 849 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *); 850 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 851 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *); 852 #else 853 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *); 854 #endif 855 856 /* 857 * vm_physseg_find: find vm_physseg structure that belongs to a PA 858 */ 859 int 860 vm_physseg_find(paddr_t pframe, int *offp) 861 { 862 863 #if VM_PHYSSEG_MAX == 1 864 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp); 865 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 866 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp); 867 #else 868 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp); 869 #endif 870 } 871 872 #if VM_PHYSSEG_MAX == 1 873 static inline int 874 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 875 { 876 877 /* 'contig' case */ 878 if (pframe >= segs[0].start && pframe < segs[0].end) { 879 if (offp) 880 *offp = pframe - segs[0].start; 881 return(0); 882 } 883 return(-1); 884 } 885 886 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 887 888 static inline int 889 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 890 { 891 /* binary search for it */ 892 u_int start, len, try; 893 894 /* 895 * if try is too large (thus target is less than try) we reduce 896 * the length to trunc(len/2) [i.e. everything smaller than "try"] 897 * 898 * if the try is too small (thus target is greater than try) then 899 * we set the new start to be (try + 1). this means we need to 900 * reduce the length to (round(len/2) - 1). 901 * 902 * note "adjust" below which takes advantage of the fact that 903 * (round(len/2) - 1) == trunc((len - 1) / 2) 904 * for any value of len we may have 905 */ 906 907 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) { 908 try = start + (len / 2); /* try in the middle */ 909 910 /* start past our try? */ 911 if (pframe >= segs[try].start) { 912 /* was try correct? */ 913 if (pframe < segs[try].end) { 914 if (offp) 915 *offp = pframe - segs[try].start; 916 return(try); /* got it */ 917 } 918 start = try + 1; /* next time, start here */ 919 len--; /* "adjust" */ 920 } else { 921 /* 922 * pframe before try, just reduce length of 923 * region, done in "for" loop 924 */ 925 } 926 } 927 return(-1); 928 } 929 930 #else 931 932 static inline int 933 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 934 { 935 /* linear search for it */ 936 int lcv; 937 938 for (lcv = 0; lcv < nsegs; lcv++) { 939 if (pframe >= segs[lcv].start && 940 pframe < segs[lcv].end) { 941 if (offp) 942 *offp = pframe - segs[lcv].start; 943 return(lcv); /* got it */ 944 } 945 } 946 return(-1); 947 } 948 #endif 949 950 /* 951 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 952 * back from an I/O mapping (ugh!). used in some MD code as well. 953 */ 954 struct vm_page * 955 uvm_phys_to_vm_page(paddr_t pa) 956 { 957 paddr_t pf = atop(pa); 958 int off; 959 int psi; 960 961 psi = vm_physseg_find(pf, &off); 962 if (psi != -1) 963 return(&VM_PHYSMEM_PTR(psi)->pgs[off]); 964 return(NULL); 965 } 966 967 paddr_t 968 uvm_vm_page_to_phys(const struct vm_page *pg) 969 { 970 971 return pg->phys_addr; 972 } 973 974 /* 975 * uvm_page_recolor: Recolor the pages if the new bucket count is 976 * larger than the old one. 977 */ 978 979 void 980 uvm_page_recolor(int newncolors) 981 { 982 struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray; 983 struct pgfreelist gpgfl, pgfl; 984 struct vm_page *pg; 985 vsize_t bucketcount; 986 int lcv, color, i, ocolors; 987 struct uvm_cpu *ucpu; 988 989 if (newncolors <= uvmexp.ncolors) 990 return; 991 992 if (uvm.page_init_done == false) { 993 uvmexp.ncolors = newncolors; 994 return; 995 } 996 997 bucketcount = newncolors * VM_NFREELIST; 998 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2, 999 M_VMPAGE, M_NOWAIT); 1000 cpuarray = bucketarray + bucketcount; 1001 if (bucketarray == NULL) { 1002 printf("WARNING: unable to allocate %ld page color buckets\n", 1003 (long) bucketcount); 1004 return; 1005 } 1006 1007 mutex_spin_enter(&uvm_fpageqlock); 1008 1009 /* Make sure we should still do this. */ 1010 if (newncolors <= uvmexp.ncolors) { 1011 mutex_spin_exit(&uvm_fpageqlock); 1012 free(bucketarray, M_VMPAGE); 1013 return; 1014 } 1015 1016 oldbucketarray = uvm.page_free[0].pgfl_buckets; 1017 ocolors = uvmexp.ncolors; 1018 1019 uvmexp.ncolors = newncolors; 1020 uvmexp.colormask = uvmexp.ncolors - 1; 1021 1022 ucpu = curcpu()->ci_data.cpu_uvm; 1023 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1024 gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors)); 1025 pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors)); 1026 uvm_page_init_buckets(&gpgfl); 1027 uvm_page_init_buckets(&pgfl); 1028 for (color = 0; color < ocolors; color++) { 1029 for (i = 0; i < PGFL_NQUEUES; i++) { 1030 while ((pg = LIST_FIRST(&uvm.page_free[ 1031 lcv].pgfl_buckets[color].pgfl_queues[i])) 1032 != NULL) { 1033 LIST_REMOVE(pg, pageq.list); /* global */ 1034 LIST_REMOVE(pg, listq.list); /* cpu */ 1035 LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[ 1036 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 1037 i], pg, pageq.list); 1038 LIST_INSERT_HEAD(&pgfl.pgfl_buckets[ 1039 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 1040 i], pg, listq.list); 1041 } 1042 } 1043 } 1044 uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets; 1045 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 1046 } 1047 1048 if (have_recolored_pages) { 1049 mutex_spin_exit(&uvm_fpageqlock); 1050 free(oldbucketarray, M_VMPAGE); 1051 return; 1052 } 1053 1054 have_recolored_pages = true; 1055 mutex_spin_exit(&uvm_fpageqlock); 1056 } 1057 1058 /* 1059 * uvm_cpu_attach: initialize per-CPU data structures. 1060 */ 1061 1062 void 1063 uvm_cpu_attach(struct cpu_info *ci) 1064 { 1065 struct pgflbucket *bucketarray; 1066 struct pgfreelist pgfl; 1067 struct uvm_cpu *ucpu; 1068 vsize_t bucketcount; 1069 int lcv; 1070 1071 if (CPU_IS_PRIMARY(ci)) { 1072 /* Already done in uvm_page_init(). */ 1073 return; 1074 } 1075 1076 /* Add more reserve pages for this CPU. */ 1077 uvmexp.reserve_kernel += vm_page_reserve_kernel; 1078 1079 /* Configure this CPU's free lists. */ 1080 bucketcount = uvmexp.ncolors * VM_NFREELIST; 1081 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket), 1082 M_VMPAGE, M_WAITOK); 1083 ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP); 1084 uvm.cpus[cpu_index(ci)] = ucpu; 1085 ci->ci_data.cpu_uvm = ucpu; 1086 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1087 pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors)); 1088 uvm_page_init_buckets(&pgfl); 1089 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 1090 } 1091 } 1092 1093 /* 1094 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat 1095 */ 1096 1097 static struct vm_page * 1098 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2, 1099 int *trycolorp) 1100 { 1101 struct pgflist *freeq; 1102 struct vm_page *pg; 1103 int color, trycolor = *trycolorp; 1104 struct pgfreelist *gpgfl, *pgfl; 1105 1106 KASSERT(mutex_owned(&uvm_fpageqlock)); 1107 1108 color = trycolor; 1109 pgfl = &ucpu->page_free[flist]; 1110 gpgfl = &uvm.page_free[flist]; 1111 do { 1112 /* cpu, try1 */ 1113 if ((pg = LIST_FIRST((freeq = 1114 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1115 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1116 uvmexp.cpuhit++; 1117 goto gotit; 1118 } 1119 /* global, try1 */ 1120 if ((pg = LIST_FIRST((freeq = 1121 &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1122 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1123 uvmexp.cpumiss++; 1124 goto gotit; 1125 } 1126 /* cpu, try2 */ 1127 if ((pg = LIST_FIRST((freeq = 1128 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1129 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1130 uvmexp.cpuhit++; 1131 goto gotit; 1132 } 1133 /* global, try2 */ 1134 if ((pg = LIST_FIRST((freeq = 1135 &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1136 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1137 uvmexp.cpumiss++; 1138 goto gotit; 1139 } 1140 color = (color + 1) & uvmexp.colormask; 1141 } while (color != trycolor); 1142 1143 return (NULL); 1144 1145 gotit: 1146 LIST_REMOVE(pg, pageq.list); /* global list */ 1147 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1148 uvmexp.free--; 1149 1150 /* update zero'd page count */ 1151 if (pg->flags & PG_ZERO) 1152 uvmexp.zeropages--; 1153 1154 if (color == trycolor) 1155 uvmexp.colorhit++; 1156 else { 1157 uvmexp.colormiss++; 1158 *trycolorp = color; 1159 } 1160 1161 return (pg); 1162 } 1163 1164 /* 1165 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 1166 * 1167 * => return null if no pages free 1168 * => wake up pagedaemon if number of free pages drops below low water mark 1169 * => if obj != NULL, obj must be locked (to put in obj's tree) 1170 * => if anon != NULL, anon must be locked (to put in anon) 1171 * => only one of obj or anon can be non-null 1172 * => caller must activate/deactivate page if it is not wired. 1173 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 1174 * => policy decision: it is more important to pull a page off of the 1175 * appropriate priority free list than it is to get a zero'd or 1176 * unknown contents page. This is because we live with the 1177 * consequences of a bad free list decision for the entire 1178 * lifetime of the page, e.g. if the page comes from memory that 1179 * is slower to access. 1180 */ 1181 1182 struct vm_page * 1183 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 1184 int flags, int strat, int free_list) 1185 { 1186 int lcv, try1, try2, zeroit = 0, color; 1187 struct uvm_cpu *ucpu; 1188 struct vm_page *pg; 1189 lwp_t *l; 1190 1191 KASSERT(obj == NULL || anon == NULL); 1192 KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0); 1193 KASSERT(off == trunc_page(off)); 1194 KASSERT(obj == NULL || mutex_owned(obj->vmobjlock)); 1195 KASSERT(anon == NULL || anon->an_lock == NULL || 1196 mutex_owned(anon->an_lock)); 1197 1198 mutex_spin_enter(&uvm_fpageqlock); 1199 1200 /* 1201 * This implements a global round-robin page coloring 1202 * algorithm. 1203 */ 1204 1205 ucpu = curcpu()->ci_data.cpu_uvm; 1206 if (flags & UVM_FLAG_COLORMATCH) { 1207 color = atop(off) & uvmexp.colormask; 1208 } else { 1209 color = ucpu->page_free_nextcolor; 1210 } 1211 1212 /* 1213 * check to see if we need to generate some free pages waking 1214 * the pagedaemon. 1215 */ 1216 1217 uvm_kick_pdaemon(); 1218 1219 /* 1220 * fail if any of these conditions is true: 1221 * [1] there really are no free pages, or 1222 * [2] only kernel "reserved" pages remain and 1223 * reserved pages have not been requested. 1224 * [3] only pagedaemon "reserved" pages remain and 1225 * the requestor isn't the pagedaemon. 1226 * we make kernel reserve pages available if called by a 1227 * kernel thread or a realtime thread. 1228 */ 1229 l = curlwp; 1230 if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) { 1231 flags |= UVM_PGA_USERESERVE; 1232 } 1233 if ((uvmexp.free <= uvmexp.reserve_kernel && 1234 (flags & UVM_PGA_USERESERVE) == 0) || 1235 (uvmexp.free <= uvmexp.reserve_pagedaemon && 1236 curlwp != uvm.pagedaemon_lwp)) 1237 goto fail; 1238 1239 #if PGFL_NQUEUES != 2 1240 #error uvm_pagealloc_strat needs to be updated 1241 #endif 1242 1243 /* 1244 * If we want a zero'd page, try the ZEROS queue first, otherwise 1245 * we try the UNKNOWN queue first. 1246 */ 1247 if (flags & UVM_PGA_ZERO) { 1248 try1 = PGFL_ZEROS; 1249 try2 = PGFL_UNKNOWN; 1250 } else { 1251 try1 = PGFL_UNKNOWN; 1252 try2 = PGFL_ZEROS; 1253 } 1254 1255 again: 1256 switch (strat) { 1257 case UVM_PGA_STRAT_NORMAL: 1258 /* Check freelists: descending priority (ascending id) order */ 1259 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1260 pg = uvm_pagealloc_pgfl(ucpu, lcv, 1261 try1, try2, &color); 1262 if (pg != NULL) 1263 goto gotit; 1264 } 1265 1266 /* No pages free! */ 1267 goto fail; 1268 1269 case UVM_PGA_STRAT_ONLY: 1270 case UVM_PGA_STRAT_FALLBACK: 1271 /* Attempt to allocate from the specified free list. */ 1272 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 1273 pg = uvm_pagealloc_pgfl(ucpu, free_list, 1274 try1, try2, &color); 1275 if (pg != NULL) 1276 goto gotit; 1277 1278 /* Fall back, if possible. */ 1279 if (strat == UVM_PGA_STRAT_FALLBACK) { 1280 strat = UVM_PGA_STRAT_NORMAL; 1281 goto again; 1282 } 1283 1284 /* No pages free! */ 1285 goto fail; 1286 1287 default: 1288 panic("uvm_pagealloc_strat: bad strat %d", strat); 1289 /* NOTREACHED */ 1290 } 1291 1292 gotit: 1293 /* 1294 * We now know which color we actually allocated from; set 1295 * the next color accordingly. 1296 */ 1297 1298 ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask; 1299 1300 /* 1301 * update allocation statistics and remember if we have to 1302 * zero the page 1303 */ 1304 1305 if (flags & UVM_PGA_ZERO) { 1306 if (pg->flags & PG_ZERO) { 1307 uvmexp.pga_zerohit++; 1308 zeroit = 0; 1309 } else { 1310 uvmexp.pga_zeromiss++; 1311 zeroit = 1; 1312 } 1313 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1314 ucpu->page_idle_zero = vm_page_zero_enable; 1315 } 1316 } 1317 KASSERT(pg->pqflags == PQ_FREE); 1318 1319 pg->offset = off; 1320 pg->uobject = obj; 1321 pg->uanon = anon; 1322 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1323 if (anon) { 1324 anon->an_page = pg; 1325 pg->pqflags = PQ_ANON; 1326 atomic_inc_uint(&uvmexp.anonpages); 1327 } else { 1328 if (obj) { 1329 uvm_pageinsert(obj, pg); 1330 } 1331 pg->pqflags = 0; 1332 } 1333 mutex_spin_exit(&uvm_fpageqlock); 1334 1335 #if defined(UVM_PAGE_TRKOWN) 1336 pg->owner_tag = NULL; 1337 #endif 1338 UVM_PAGE_OWN(pg, "new alloc"); 1339 1340 if (flags & UVM_PGA_ZERO) { 1341 /* 1342 * A zero'd page is not clean. If we got a page not already 1343 * zero'd, then we have to zero it ourselves. 1344 */ 1345 pg->flags &= ~PG_CLEAN; 1346 if (zeroit) 1347 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1348 } 1349 1350 return(pg); 1351 1352 fail: 1353 mutex_spin_exit(&uvm_fpageqlock); 1354 return (NULL); 1355 } 1356 1357 /* 1358 * uvm_pagereplace: replace a page with another 1359 * 1360 * => object must be locked 1361 */ 1362 1363 void 1364 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg) 1365 { 1366 struct uvm_object *uobj = oldpg->uobject; 1367 1368 KASSERT((oldpg->flags & PG_TABLED) != 0); 1369 KASSERT(uobj != NULL); 1370 KASSERT((newpg->flags & PG_TABLED) == 0); 1371 KASSERT(newpg->uobject == NULL); 1372 KASSERT(mutex_owned(uobj->vmobjlock)); 1373 1374 newpg->uobject = uobj; 1375 newpg->offset = oldpg->offset; 1376 1377 uvm_pageremove_tree(uobj, oldpg); 1378 uvm_pageinsert_tree(uobj, newpg); 1379 uvm_pageinsert_list(uobj, newpg, oldpg); 1380 uvm_pageremove_list(uobj, oldpg); 1381 } 1382 1383 /* 1384 * uvm_pagerealloc: reallocate a page from one object to another 1385 * 1386 * => both objects must be locked 1387 */ 1388 1389 void 1390 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1391 { 1392 /* 1393 * remove it from the old object 1394 */ 1395 1396 if (pg->uobject) { 1397 uvm_pageremove(pg->uobject, pg); 1398 } 1399 1400 /* 1401 * put it in the new object 1402 */ 1403 1404 if (newobj) { 1405 pg->uobject = newobj; 1406 pg->offset = newoff; 1407 uvm_pageinsert(newobj, pg); 1408 } 1409 } 1410 1411 #ifdef DEBUG 1412 /* 1413 * check if page is zero-filled 1414 * 1415 * - called with free page queue lock held. 1416 */ 1417 void 1418 uvm_pagezerocheck(struct vm_page *pg) 1419 { 1420 int *p, *ep; 1421 1422 KASSERT(uvm_zerocheckkva != 0); 1423 KASSERT(mutex_owned(&uvm_fpageqlock)); 1424 1425 /* 1426 * XXX assuming pmap_kenter_pa and pmap_kremove never call 1427 * uvm page allocator. 1428 * 1429 * it might be better to have "CPU-local temporary map" pmap interface. 1430 */ 1431 pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0); 1432 p = (int *)uvm_zerocheckkva; 1433 ep = (int *)((char *)p + PAGE_SIZE); 1434 pmap_update(pmap_kernel()); 1435 while (p < ep) { 1436 if (*p != 0) 1437 panic("PG_ZERO page isn't zero-filled"); 1438 p++; 1439 } 1440 pmap_kremove(uvm_zerocheckkva, PAGE_SIZE); 1441 /* 1442 * pmap_update() is not necessary here because no one except us 1443 * uses this VA. 1444 */ 1445 } 1446 #endif /* DEBUG */ 1447 1448 /* 1449 * uvm_pagefree: free page 1450 * 1451 * => erase page's identity (i.e. remove from object) 1452 * => put page on free list 1453 * => caller must lock owning object (either anon or uvm_object) 1454 * => caller must lock page queues 1455 * => assumes all valid mappings of pg are gone 1456 */ 1457 1458 void 1459 uvm_pagefree(struct vm_page *pg) 1460 { 1461 struct pgflist *pgfl; 1462 struct uvm_cpu *ucpu; 1463 int index, color, queue; 1464 bool iszero; 1465 1466 #ifdef DEBUG 1467 if (pg->uobject == (void *)0xdeadbeef && 1468 pg->uanon == (void *)0xdeadbeef) { 1469 panic("uvm_pagefree: freeing free page %p", pg); 1470 } 1471 #endif /* DEBUG */ 1472 1473 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1474 KASSERT(!(pg->pqflags & PQ_FREE)); 1475 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg)); 1476 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock)); 1477 KASSERT(pg->uobject != NULL || pg->uanon == NULL || 1478 mutex_owned(pg->uanon->an_lock)); 1479 1480 /* 1481 * if the page is loaned, resolve the loan instead of freeing. 1482 */ 1483 1484 if (pg->loan_count) { 1485 KASSERT(pg->wire_count == 0); 1486 1487 /* 1488 * if the page is owned by an anon then we just want to 1489 * drop anon ownership. the kernel will free the page when 1490 * it is done with it. if the page is owned by an object, 1491 * remove it from the object and mark it dirty for the benefit 1492 * of possible anon owners. 1493 * 1494 * regardless of previous ownership, wakeup any waiters, 1495 * unbusy the page, and we're done. 1496 */ 1497 1498 if (pg->uobject != NULL) { 1499 uvm_pageremove(pg->uobject, pg); 1500 pg->flags &= ~PG_CLEAN; 1501 } else if (pg->uanon != NULL) { 1502 if ((pg->pqflags & PQ_ANON) == 0) { 1503 pg->loan_count--; 1504 } else { 1505 pg->pqflags &= ~PQ_ANON; 1506 atomic_dec_uint(&uvmexp.anonpages); 1507 } 1508 pg->uanon->an_page = NULL; 1509 pg->uanon = NULL; 1510 } 1511 if (pg->flags & PG_WANTED) { 1512 wakeup(pg); 1513 } 1514 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1); 1515 #ifdef UVM_PAGE_TRKOWN 1516 pg->owner_tag = NULL; 1517 #endif 1518 if (pg->loan_count) { 1519 KASSERT(pg->uobject == NULL); 1520 if (pg->uanon == NULL) { 1521 uvm_pagedequeue(pg); 1522 } 1523 return; 1524 } 1525 } 1526 1527 /* 1528 * remove page from its object or anon. 1529 */ 1530 1531 if (pg->uobject != NULL) { 1532 uvm_pageremove(pg->uobject, pg); 1533 } else if (pg->uanon != NULL) { 1534 pg->uanon->an_page = NULL; 1535 atomic_dec_uint(&uvmexp.anonpages); 1536 } 1537 1538 /* 1539 * now remove the page from the queues. 1540 */ 1541 1542 uvm_pagedequeue(pg); 1543 1544 /* 1545 * if the page was wired, unwire it now. 1546 */ 1547 1548 if (pg->wire_count) { 1549 pg->wire_count = 0; 1550 uvmexp.wired--; 1551 } 1552 1553 /* 1554 * and put on free queue 1555 */ 1556 1557 iszero = (pg->flags & PG_ZERO); 1558 index = uvm_page_lookup_freelist(pg); 1559 color = VM_PGCOLOR_BUCKET(pg); 1560 queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN); 1561 1562 #ifdef DEBUG 1563 pg->uobject = (void *)0xdeadbeef; 1564 pg->uanon = (void *)0xdeadbeef; 1565 #endif 1566 1567 mutex_spin_enter(&uvm_fpageqlock); 1568 pg->pqflags = PQ_FREE; 1569 1570 #ifdef DEBUG 1571 if (iszero) 1572 uvm_pagezerocheck(pg); 1573 #endif /* DEBUG */ 1574 1575 1576 /* global list */ 1577 pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1578 LIST_INSERT_HEAD(pgfl, pg, pageq.list); 1579 uvmexp.free++; 1580 if (iszero) { 1581 uvmexp.zeropages++; 1582 } 1583 1584 /* per-cpu list */ 1585 ucpu = curcpu()->ci_data.cpu_uvm; 1586 pg->offset = (uintptr_t)ucpu; 1587 pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1588 LIST_INSERT_HEAD(pgfl, pg, listq.list); 1589 ucpu->pages[queue]++; 1590 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1591 ucpu->page_idle_zero = vm_page_zero_enable; 1592 } 1593 1594 mutex_spin_exit(&uvm_fpageqlock); 1595 } 1596 1597 /* 1598 * uvm_page_unbusy: unbusy an array of pages. 1599 * 1600 * => pages must either all belong to the same object, or all belong to anons. 1601 * => if pages are object-owned, object must be locked. 1602 * => if pages are anon-owned, anons must be locked. 1603 * => caller must lock page queues if pages may be released. 1604 * => caller must make sure that anon-owned pages are not PG_RELEASED. 1605 */ 1606 1607 void 1608 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1609 { 1610 struct vm_page *pg; 1611 int i; 1612 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist); 1613 1614 for (i = 0; i < npgs; i++) { 1615 pg = pgs[i]; 1616 if (pg == NULL || pg == PGO_DONTCARE) { 1617 continue; 1618 } 1619 1620 KASSERT(pg->uobject == NULL || 1621 mutex_owned(pg->uobject->vmobjlock)); 1622 KASSERT(pg->uobject != NULL || 1623 (pg->uanon != NULL && mutex_owned(pg->uanon->an_lock))); 1624 1625 KASSERT(pg->flags & PG_BUSY); 1626 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1627 if (pg->flags & PG_WANTED) { 1628 wakeup(pg); 1629 } 1630 if (pg->flags & PG_RELEASED) { 1631 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0); 1632 KASSERT(pg->uobject != NULL || 1633 (pg->uanon != NULL && pg->uanon->an_ref > 0)); 1634 pg->flags &= ~PG_RELEASED; 1635 uvm_pagefree(pg); 1636 } else { 1637 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0); 1638 KASSERT((pg->flags & PG_FAKE) == 0); 1639 pg->flags &= ~(PG_WANTED|PG_BUSY); 1640 UVM_PAGE_OWN(pg, NULL); 1641 } 1642 } 1643 } 1644 1645 #if defined(UVM_PAGE_TRKOWN) 1646 /* 1647 * uvm_page_own: set or release page ownership 1648 * 1649 * => this is a debugging function that keeps track of who sets PG_BUSY 1650 * and where they do it. it can be used to track down problems 1651 * such a process setting "PG_BUSY" and never releasing it. 1652 * => page's object [if any] must be locked 1653 * => if "tag" is NULL then we are releasing page ownership 1654 */ 1655 void 1656 uvm_page_own(struct vm_page *pg, const char *tag) 1657 { 1658 struct uvm_object *uobj; 1659 struct vm_anon *anon; 1660 1661 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0); 1662 1663 uobj = pg->uobject; 1664 anon = pg->uanon; 1665 if (uobj != NULL) { 1666 KASSERT(mutex_owned(uobj->vmobjlock)); 1667 } else if (anon != NULL) { 1668 KASSERT(mutex_owned(anon->an_lock)); 1669 } 1670 1671 KASSERT((pg->flags & PG_WANTED) == 0); 1672 1673 /* gain ownership? */ 1674 if (tag) { 1675 KASSERT((pg->flags & PG_BUSY) != 0); 1676 if (pg->owner_tag) { 1677 printf("uvm_page_own: page %p already owned " 1678 "by proc %d [%s]\n", pg, 1679 pg->owner, pg->owner_tag); 1680 panic("uvm_page_own"); 1681 } 1682 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1683 pg->lowner = (curlwp) ? curlwp->l_lid : (lwpid_t) -1; 1684 pg->owner_tag = tag; 1685 return; 1686 } 1687 1688 /* drop ownership */ 1689 KASSERT((pg->flags & PG_BUSY) == 0); 1690 if (pg->owner_tag == NULL) { 1691 printf("uvm_page_own: dropping ownership of an non-owned " 1692 "page (%p)\n", pg); 1693 panic("uvm_page_own"); 1694 } 1695 if (!uvmpdpol_pageisqueued_p(pg)) { 1696 KASSERT((pg->uanon == NULL && pg->uobject == NULL) || 1697 pg->wire_count > 0); 1698 } else { 1699 KASSERT(pg->wire_count == 0); 1700 } 1701 pg->owner_tag = NULL; 1702 } 1703 #endif 1704 1705 /* 1706 * uvm_pageidlezero: zero free pages while the system is idle. 1707 * 1708 * => try to complete one color bucket at a time, to reduce our impact 1709 * on the CPU cache. 1710 * => we loop until we either reach the target or there is a lwp ready 1711 * to run, or MD code detects a reason to break early. 1712 */ 1713 void 1714 uvm_pageidlezero(void) 1715 { 1716 struct vm_page *pg; 1717 struct pgfreelist *pgfl, *gpgfl; 1718 struct uvm_cpu *ucpu; 1719 int free_list, firstbucket, nextbucket; 1720 bool lcont = false; 1721 1722 ucpu = curcpu()->ci_data.cpu_uvm; 1723 if (!ucpu->page_idle_zero || 1724 ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1725 ucpu->page_idle_zero = false; 1726 return; 1727 } 1728 if (!mutex_tryenter(&uvm_fpageqlock)) { 1729 /* Contention: let other CPUs to use the lock. */ 1730 return; 1731 } 1732 firstbucket = ucpu->page_free_nextcolor; 1733 nextbucket = firstbucket; 1734 do { 1735 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1736 if (sched_curcpu_runnable_p()) { 1737 goto quit; 1738 } 1739 pgfl = &ucpu->page_free[free_list]; 1740 gpgfl = &uvm.page_free[free_list]; 1741 while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[ 1742 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) { 1743 if (lcont || sched_curcpu_runnable_p()) { 1744 goto quit; 1745 } 1746 LIST_REMOVE(pg, pageq.list); /* global list */ 1747 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1748 ucpu->pages[PGFL_UNKNOWN]--; 1749 uvmexp.free--; 1750 KASSERT(pg->pqflags == PQ_FREE); 1751 pg->pqflags = 0; 1752 mutex_spin_exit(&uvm_fpageqlock); 1753 #ifdef PMAP_PAGEIDLEZERO 1754 if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) { 1755 1756 /* 1757 * The machine-dependent code detected 1758 * some reason for us to abort zeroing 1759 * pages, probably because there is a 1760 * process now ready to run. 1761 */ 1762 1763 mutex_spin_enter(&uvm_fpageqlock); 1764 pg->pqflags = PQ_FREE; 1765 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1766 nextbucket].pgfl_queues[ 1767 PGFL_UNKNOWN], pg, pageq.list); 1768 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1769 nextbucket].pgfl_queues[ 1770 PGFL_UNKNOWN], pg, listq.list); 1771 ucpu->pages[PGFL_UNKNOWN]++; 1772 uvmexp.free++; 1773 uvmexp.zeroaborts++; 1774 goto quit; 1775 } 1776 #else 1777 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1778 #endif /* PMAP_PAGEIDLEZERO */ 1779 pg->flags |= PG_ZERO; 1780 1781 if (!mutex_tryenter(&uvm_fpageqlock)) { 1782 lcont = true; 1783 mutex_spin_enter(&uvm_fpageqlock); 1784 } else { 1785 lcont = false; 1786 } 1787 pg->pqflags = PQ_FREE; 1788 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1789 nextbucket].pgfl_queues[PGFL_ZEROS], 1790 pg, pageq.list); 1791 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1792 nextbucket].pgfl_queues[PGFL_ZEROS], 1793 pg, listq.list); 1794 ucpu->pages[PGFL_ZEROS]++; 1795 uvmexp.free++; 1796 uvmexp.zeropages++; 1797 } 1798 } 1799 if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1800 break; 1801 } 1802 nextbucket = (nextbucket + 1) & uvmexp.colormask; 1803 } while (nextbucket != firstbucket); 1804 ucpu->page_idle_zero = false; 1805 quit: 1806 mutex_spin_exit(&uvm_fpageqlock); 1807 } 1808 1809 /* 1810 * uvm_pagelookup: look up a page 1811 * 1812 * => caller should lock object to keep someone from pulling the page 1813 * out from under it 1814 */ 1815 1816 struct vm_page * 1817 uvm_pagelookup(struct uvm_object *obj, voff_t off) 1818 { 1819 struct vm_page *pg; 1820 1821 KASSERT(mutex_owned(obj->vmobjlock)); 1822 1823 pg = rb_tree_find_node(&obj->rb_tree, &off); 1824 1825 KASSERT(pg == NULL || obj->uo_npages != 0); 1826 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 1827 (pg->flags & PG_BUSY) != 0); 1828 return pg; 1829 } 1830 1831 /* 1832 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1833 * 1834 * => caller must lock page queues 1835 */ 1836 1837 void 1838 uvm_pagewire(struct vm_page *pg) 1839 { 1840 KASSERT(mutex_owned(&uvm_pageqlock)); 1841 #if defined(READAHEAD_STATS) 1842 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1843 uvm_ra_hit.ev_count++; 1844 pg->pqflags &= ~PQ_READAHEAD; 1845 } 1846 #endif /* defined(READAHEAD_STATS) */ 1847 if (pg->wire_count == 0) { 1848 uvm_pagedequeue(pg); 1849 uvmexp.wired++; 1850 } 1851 pg->wire_count++; 1852 } 1853 1854 /* 1855 * uvm_pageunwire: unwire the page. 1856 * 1857 * => activate if wire count goes to zero. 1858 * => caller must lock page queues 1859 */ 1860 1861 void 1862 uvm_pageunwire(struct vm_page *pg) 1863 { 1864 KASSERT(mutex_owned(&uvm_pageqlock)); 1865 pg->wire_count--; 1866 if (pg->wire_count == 0) { 1867 uvm_pageactivate(pg); 1868 uvmexp.wired--; 1869 } 1870 } 1871 1872 /* 1873 * uvm_pagedeactivate: deactivate page 1874 * 1875 * => caller must lock page queues 1876 * => caller must check to make sure page is not wired 1877 * => object that page belongs to must be locked (so we can adjust pg->flags) 1878 * => caller must clear the reference on the page before calling 1879 */ 1880 1881 void 1882 uvm_pagedeactivate(struct vm_page *pg) 1883 { 1884 1885 KASSERT(mutex_owned(&uvm_pageqlock)); 1886 KASSERT(uvm_page_locked_p(pg)); 1887 KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg)); 1888 uvmpdpol_pagedeactivate(pg); 1889 } 1890 1891 /* 1892 * uvm_pageactivate: activate page 1893 * 1894 * => caller must lock page queues 1895 */ 1896 1897 void 1898 uvm_pageactivate(struct vm_page *pg) 1899 { 1900 1901 KASSERT(mutex_owned(&uvm_pageqlock)); 1902 KASSERT(uvm_page_locked_p(pg)); 1903 #if defined(READAHEAD_STATS) 1904 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1905 uvm_ra_hit.ev_count++; 1906 pg->pqflags &= ~PQ_READAHEAD; 1907 } 1908 #endif /* defined(READAHEAD_STATS) */ 1909 if (pg->wire_count != 0) { 1910 return; 1911 } 1912 uvmpdpol_pageactivate(pg); 1913 } 1914 1915 /* 1916 * uvm_pagedequeue: remove a page from any paging queue 1917 */ 1918 1919 void 1920 uvm_pagedequeue(struct vm_page *pg) 1921 { 1922 1923 if (uvmpdpol_pageisqueued_p(pg)) { 1924 KASSERT(mutex_owned(&uvm_pageqlock)); 1925 } 1926 1927 uvmpdpol_pagedequeue(pg); 1928 } 1929 1930 /* 1931 * uvm_pageenqueue: add a page to a paging queue without activating. 1932 * used where a page is not really demanded (yet). eg. read-ahead 1933 */ 1934 1935 void 1936 uvm_pageenqueue(struct vm_page *pg) 1937 { 1938 1939 KASSERT(mutex_owned(&uvm_pageqlock)); 1940 if (pg->wire_count != 0) { 1941 return; 1942 } 1943 uvmpdpol_pageenqueue(pg); 1944 } 1945 1946 /* 1947 * uvm_pagezero: zero fill a page 1948 * 1949 * => if page is part of an object then the object should be locked 1950 * to protect pg->flags. 1951 */ 1952 1953 void 1954 uvm_pagezero(struct vm_page *pg) 1955 { 1956 pg->flags &= ~PG_CLEAN; 1957 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1958 } 1959 1960 /* 1961 * uvm_pagecopy: copy a page 1962 * 1963 * => if page is part of an object then the object should be locked 1964 * to protect pg->flags. 1965 */ 1966 1967 void 1968 uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1969 { 1970 1971 dst->flags &= ~PG_CLEAN; 1972 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 1973 } 1974 1975 /* 1976 * uvm_pageismanaged: test it see that a page (specified by PA) is managed. 1977 */ 1978 1979 bool 1980 uvm_pageismanaged(paddr_t pa) 1981 { 1982 1983 return (vm_physseg_find(atop(pa), NULL) != -1); 1984 } 1985 1986 /* 1987 * uvm_page_lookup_freelist: look up the free list for the specified page 1988 */ 1989 1990 int 1991 uvm_page_lookup_freelist(struct vm_page *pg) 1992 { 1993 int lcv; 1994 1995 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1996 KASSERT(lcv != -1); 1997 return (VM_PHYSMEM_PTR(lcv)->free_list); 1998 } 1999 2000 /* 2001 * uvm_page_locked_p: return true if object associated with page is 2002 * locked. this is a weak check for runtime assertions only. 2003 */ 2004 2005 bool 2006 uvm_page_locked_p(struct vm_page *pg) 2007 { 2008 2009 if (pg->uobject != NULL) { 2010 return mutex_owned(pg->uobject->vmobjlock); 2011 } 2012 if (pg->uanon != NULL) { 2013 return mutex_owned(pg->uanon->an_lock); 2014 } 2015 return true; 2016 } 2017 2018 #if defined(DDB) || defined(DEBUGPRINT) 2019 2020 /* 2021 * uvm_page_printit: actually print the page 2022 */ 2023 2024 static const char page_flagbits[] = UVM_PGFLAGBITS; 2025 static const char page_pqflagbits[] = UVM_PQFLAGBITS; 2026 2027 void 2028 uvm_page_printit(struct vm_page *pg, bool full, 2029 void (*pr)(const char *, ...)) 2030 { 2031 struct vm_page *tpg; 2032 struct uvm_object *uobj; 2033 struct pgflist *pgl; 2034 char pgbuf[128]; 2035 char pqbuf[128]; 2036 2037 (*pr)("PAGE %p:\n", pg); 2038 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags); 2039 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags); 2040 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n", 2041 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg)); 2042 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n", 2043 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); 2044 #if defined(UVM_PAGE_TRKOWN) 2045 if (pg->flags & PG_BUSY) 2046 (*pr)(" owning process = %d, tag=%s\n", 2047 pg->owner, pg->owner_tag); 2048 else 2049 (*pr)(" page not busy, no owner\n"); 2050 #else 2051 (*pr)(" [page ownership tracking disabled]\n"); 2052 #endif 2053 2054 if (!full) 2055 return; 2056 2057 /* cross-verify object/anon */ 2058 if ((pg->pqflags & PQ_FREE) == 0) { 2059 if (pg->pqflags & PQ_ANON) { 2060 if (pg->uanon == NULL || pg->uanon->an_page != pg) 2061 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", 2062 (pg->uanon) ? pg->uanon->an_page : NULL); 2063 else 2064 (*pr)(" anon backpointer is OK\n"); 2065 } else { 2066 uobj = pg->uobject; 2067 if (uobj) { 2068 (*pr)(" checking object list\n"); 2069 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) { 2070 if (tpg == pg) { 2071 break; 2072 } 2073 } 2074 if (tpg) 2075 (*pr)(" page found on object list\n"); 2076 else 2077 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n"); 2078 } 2079 } 2080 } 2081 2082 /* cross-verify page queue */ 2083 if (pg->pqflags & PQ_FREE) { 2084 int fl = uvm_page_lookup_freelist(pg); 2085 int color = VM_PGCOLOR_BUCKET(pg); 2086 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ 2087 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; 2088 } else { 2089 pgl = NULL; 2090 } 2091 2092 if (pgl) { 2093 (*pr)(" checking pageq list\n"); 2094 LIST_FOREACH(tpg, pgl, pageq.list) { 2095 if (tpg == pg) { 2096 break; 2097 } 2098 } 2099 if (tpg) 2100 (*pr)(" page found on pageq list\n"); 2101 else 2102 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); 2103 } 2104 } 2105 2106 /* 2107 * uvm_pages_printthem - print a summary of all managed pages 2108 */ 2109 2110 void 2111 uvm_page_printall(void (*pr)(const char *, ...)) 2112 { 2113 unsigned i; 2114 struct vm_page *pg; 2115 2116 (*pr)("%18s %4s %4s %18s %18s" 2117 #ifdef UVM_PAGE_TRKOWN 2118 " OWNER" 2119 #endif 2120 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON"); 2121 for (i = 0; i < vm_nphysmem; i++) { 2122 for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) { 2123 (*pr)("%18p %04x %04x %18p %18p", 2124 pg, pg->flags, pg->pqflags, pg->uobject, 2125 pg->uanon); 2126 #ifdef UVM_PAGE_TRKOWN 2127 if (pg->flags & PG_BUSY) 2128 (*pr)(" %d [%s]", pg->owner, pg->owner_tag); 2129 #endif 2130 (*pr)("\n"); 2131 } 2132 } 2133 } 2134 2135 #endif /* DDB || DEBUGPRINT */ 2136