1 /* $NetBSD: uvm_page.c,v 1.184 2014/04/21 16:33:48 chs Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 37 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64 /* 65 * uvm_page.c: page ops. 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.184 2014/04/21 16:33:48 chs Exp $"); 70 71 #include "opt_ddb.h" 72 #include "opt_uvmhist.h" 73 #include "opt_readahead.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/sched.h> 78 #include <sys/kernel.h> 79 #include <sys/vnode.h> 80 #include <sys/proc.h> 81 #include <sys/atomic.h> 82 #include <sys/cpu.h> 83 84 #include <uvm/uvm.h> 85 #include <uvm/uvm_ddb.h> 86 #include <uvm/uvm_pdpolicy.h> 87 88 /* 89 * global vars... XXXCDC: move to uvm. structure. 90 */ 91 92 /* 93 * physical memory config is stored in vm_physmem. 94 */ 95 96 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 97 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 98 #define vm_nphysmem vm_nphysseg 99 100 /* 101 * Some supported CPUs in a given architecture don't support all 102 * of the things necessary to do idle page zero'ing efficiently. 103 * We therefore provide a way to enable it from machdep code here. 104 */ 105 bool vm_page_zero_enable = false; 106 107 /* 108 * number of pages per-CPU to reserve for the kernel. 109 */ 110 int vm_page_reserve_kernel = 5; 111 112 /* 113 * physical memory size; 114 */ 115 int physmem; 116 117 /* 118 * local variables 119 */ 120 121 /* 122 * these variables record the values returned by vm_page_bootstrap, 123 * for debugging purposes. The implementation of uvm_pageboot_alloc 124 * and pmap_startup here also uses them internally. 125 */ 126 127 static vaddr_t virtual_space_start; 128 static vaddr_t virtual_space_end; 129 130 /* 131 * we allocate an initial number of page colors in uvm_page_init(), 132 * and remember them. We may re-color pages as cache sizes are 133 * discovered during the autoconfiguration phase. But we can never 134 * free the initial set of buckets, since they are allocated using 135 * uvm_pageboot_alloc(). 136 */ 137 138 static size_t recolored_pages_memsize /* = 0 */; 139 140 #ifdef DEBUG 141 vaddr_t uvm_zerocheckkva; 142 #endif /* DEBUG */ 143 144 /* 145 * local prototypes 146 */ 147 148 static void uvm_pageinsert(struct uvm_object *, struct vm_page *); 149 static void uvm_pageremove(struct uvm_object *, struct vm_page *); 150 151 /* 152 * per-object tree of pages 153 */ 154 155 static signed int 156 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2) 157 { 158 const struct vm_page *pg1 = n1; 159 const struct vm_page *pg2 = n2; 160 const voff_t a = pg1->offset; 161 const voff_t b = pg2->offset; 162 163 if (a < b) 164 return -1; 165 if (a > b) 166 return 1; 167 return 0; 168 } 169 170 static signed int 171 uvm_page_compare_key(void *ctx, const void *n, const void *key) 172 { 173 const struct vm_page *pg = n; 174 const voff_t a = pg->offset; 175 const voff_t b = *(const voff_t *)key; 176 177 if (a < b) 178 return -1; 179 if (a > b) 180 return 1; 181 return 0; 182 } 183 184 const rb_tree_ops_t uvm_page_tree_ops = { 185 .rbto_compare_nodes = uvm_page_compare_nodes, 186 .rbto_compare_key = uvm_page_compare_key, 187 .rbto_node_offset = offsetof(struct vm_page, rb_node), 188 .rbto_context = NULL 189 }; 190 191 /* 192 * inline functions 193 */ 194 195 /* 196 * uvm_pageinsert: insert a page in the object. 197 * 198 * => caller must lock object 199 * => caller must lock page queues 200 * => call should have already set pg's object and offset pointers 201 * and bumped the version counter 202 */ 203 204 static inline void 205 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg, 206 struct vm_page *where) 207 { 208 209 KASSERT(uobj == pg->uobject); 210 KASSERT(mutex_owned(uobj->vmobjlock)); 211 KASSERT((pg->flags & PG_TABLED) == 0); 212 KASSERT(where == NULL || (where->flags & PG_TABLED)); 213 KASSERT(where == NULL || (where->uobject == uobj)); 214 215 if (UVM_OBJ_IS_VNODE(uobj)) { 216 if (uobj->uo_npages == 0) { 217 struct vnode *vp = (struct vnode *)uobj; 218 219 vholdl(vp); 220 } 221 if (UVM_OBJ_IS_VTEXT(uobj)) { 222 atomic_inc_uint(&uvmexp.execpages); 223 } else { 224 atomic_inc_uint(&uvmexp.filepages); 225 } 226 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 227 atomic_inc_uint(&uvmexp.anonpages); 228 } 229 230 if (where) 231 TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue); 232 else 233 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue); 234 pg->flags |= PG_TABLED; 235 uobj->uo_npages++; 236 } 237 238 239 static inline void 240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) 241 { 242 struct vm_page *ret __diagused; 243 244 KASSERT(uobj == pg->uobject); 245 ret = rb_tree_insert_node(&uobj->rb_tree, pg); 246 KASSERT(ret == pg); 247 } 248 249 static inline void 250 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg) 251 { 252 253 KDASSERT(uobj != NULL); 254 uvm_pageinsert_tree(uobj, pg); 255 uvm_pageinsert_list(uobj, pg, NULL); 256 } 257 258 /* 259 * uvm_page_remove: remove page from object. 260 * 261 * => caller must lock object 262 * => caller must lock page queues 263 */ 264 265 static inline void 266 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg) 267 { 268 269 KASSERT(uobj == pg->uobject); 270 KASSERT(mutex_owned(uobj->vmobjlock)); 271 KASSERT(pg->flags & PG_TABLED); 272 273 if (UVM_OBJ_IS_VNODE(uobj)) { 274 if (uobj->uo_npages == 1) { 275 struct vnode *vp = (struct vnode *)uobj; 276 277 holdrelel(vp); 278 } 279 if (UVM_OBJ_IS_VTEXT(uobj)) { 280 atomic_dec_uint(&uvmexp.execpages); 281 } else { 282 atomic_dec_uint(&uvmexp.filepages); 283 } 284 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 285 atomic_dec_uint(&uvmexp.anonpages); 286 } 287 288 /* object should be locked */ 289 uobj->uo_npages--; 290 TAILQ_REMOVE(&uobj->memq, pg, listq.queue); 291 pg->flags &= ~PG_TABLED; 292 pg->uobject = NULL; 293 } 294 295 static inline void 296 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg) 297 { 298 299 KASSERT(uobj == pg->uobject); 300 rb_tree_remove_node(&uobj->rb_tree, pg); 301 } 302 303 static inline void 304 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg) 305 { 306 307 KDASSERT(uobj != NULL); 308 uvm_pageremove_tree(uobj, pg); 309 uvm_pageremove_list(uobj, pg); 310 } 311 312 static void 313 uvm_page_init_buckets(struct pgfreelist *pgfl) 314 { 315 int color, i; 316 317 for (color = 0; color < uvmexp.ncolors; color++) { 318 for (i = 0; i < PGFL_NQUEUES; i++) { 319 LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]); 320 } 321 } 322 } 323 324 /* 325 * uvm_page_init: init the page system. called from uvm_init(). 326 * 327 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 328 */ 329 330 void 331 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 332 { 333 static struct uvm_cpu boot_cpu; 334 psize_t freepages, pagecount, bucketcount, n; 335 struct pgflbucket *bucketarray, *cpuarray; 336 struct vm_physseg *seg; 337 struct vm_page *pagearray; 338 int lcv; 339 u_int i; 340 paddr_t paddr; 341 342 KASSERT(ncpu <= 1); 343 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *)); 344 345 /* 346 * init the page queues and page queue locks, except the free 347 * list; we allocate that later (with the initial vm_page 348 * structures). 349 */ 350 351 uvm.cpus[0] = &boot_cpu; 352 curcpu()->ci_data.cpu_uvm = &boot_cpu; 353 uvmpdpol_init(); 354 mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE); 355 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM); 356 357 /* 358 * allocate vm_page structures. 359 */ 360 361 /* 362 * sanity check: 363 * before calling this function the MD code is expected to register 364 * some free RAM with the uvm_page_physload() function. our job 365 * now is to allocate vm_page structures for this memory. 366 */ 367 368 if (vm_nphysmem == 0) 369 panic("uvm_page_bootstrap: no memory pre-allocated"); 370 371 /* 372 * first calculate the number of free pages... 373 * 374 * note that we use start/end rather than avail_start/avail_end. 375 * this allows us to allocate extra vm_page structures in case we 376 * want to return some memory to the pool after booting. 377 */ 378 379 freepages = 0; 380 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 381 seg = VM_PHYSMEM_PTR(lcv); 382 freepages += (seg->end - seg->start); 383 } 384 385 /* 386 * Let MD code initialize the number of colors, or default 387 * to 1 color if MD code doesn't care. 388 */ 389 if (uvmexp.ncolors == 0) 390 uvmexp.ncolors = 1; 391 uvmexp.colormask = uvmexp.ncolors - 1; 392 KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0); 393 394 /* 395 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 396 * use. for each page of memory we use we need a vm_page structure. 397 * thus, the total number of pages we can use is the total size of 398 * the memory divided by the PAGE_SIZE plus the size of the vm_page 399 * structure. we add one to freepages as a fudge factor to avoid 400 * truncation errors (since we can only allocate in terms of whole 401 * pages). 402 */ 403 404 bucketcount = uvmexp.ncolors * VM_NFREELIST; 405 pagecount = ((freepages + 1) << PAGE_SHIFT) / 406 (PAGE_SIZE + sizeof(struct vm_page)); 407 408 bucketarray = (void *)uvm_pageboot_alloc((bucketcount * 409 sizeof(struct pgflbucket) * 2) + (pagecount * 410 sizeof(struct vm_page))); 411 cpuarray = bucketarray + bucketcount; 412 pagearray = (struct vm_page *)(bucketarray + bucketcount * 2); 413 414 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 415 uvm.page_free[lcv].pgfl_buckets = 416 (bucketarray + (lcv * uvmexp.ncolors)); 417 uvm_page_init_buckets(&uvm.page_free[lcv]); 418 uvm.cpus[0]->page_free[lcv].pgfl_buckets = 419 (cpuarray + (lcv * uvmexp.ncolors)); 420 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]); 421 } 422 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 423 424 /* 425 * init the vm_page structures and put them in the correct place. 426 */ 427 428 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 429 seg = VM_PHYSMEM_PTR(lcv); 430 n = seg->end - seg->start; 431 432 /* set up page array pointers */ 433 seg->pgs = pagearray; 434 pagearray += n; 435 pagecount -= n; 436 seg->lastpg = seg->pgs + n; 437 438 /* init and free vm_pages (we've already zeroed them) */ 439 paddr = ctob(seg->start); 440 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 441 seg->pgs[i].phys_addr = paddr; 442 #ifdef __HAVE_VM_PAGE_MD 443 VM_MDPAGE_INIT(&seg->pgs[i]); 444 #endif 445 if (atop(paddr) >= seg->avail_start && 446 atop(paddr) < seg->avail_end) { 447 uvmexp.npages++; 448 /* add page to free pool */ 449 uvm_pagefree(&seg->pgs[i]); 450 } 451 } 452 } 453 454 /* 455 * pass up the values of virtual_space_start and 456 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 457 * layers of the VM. 458 */ 459 460 *kvm_startp = round_page(virtual_space_start); 461 *kvm_endp = trunc_page(virtual_space_end); 462 #ifdef DEBUG 463 /* 464 * steal kva for uvm_pagezerocheck(). 465 */ 466 uvm_zerocheckkva = *kvm_startp; 467 *kvm_startp += PAGE_SIZE; 468 #endif /* DEBUG */ 469 470 /* 471 * init various thresholds. 472 */ 473 474 uvmexp.reserve_pagedaemon = 1; 475 uvmexp.reserve_kernel = vm_page_reserve_kernel; 476 477 /* 478 * determine if we should zero pages in the idle loop. 479 */ 480 481 uvm.cpus[0]->page_idle_zero = vm_page_zero_enable; 482 483 /* 484 * done! 485 */ 486 487 uvm.page_init_done = true; 488 } 489 490 /* 491 * uvm_setpagesize: set the page size 492 * 493 * => sets page_shift and page_mask from uvmexp.pagesize. 494 */ 495 496 void 497 uvm_setpagesize(void) 498 { 499 500 /* 501 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE 502 * to be a constant (indicated by being a non-zero value). 503 */ 504 if (uvmexp.pagesize == 0) { 505 if (PAGE_SIZE == 0) 506 panic("uvm_setpagesize: uvmexp.pagesize not set"); 507 uvmexp.pagesize = PAGE_SIZE; 508 } 509 uvmexp.pagemask = uvmexp.pagesize - 1; 510 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 511 panic("uvm_setpagesize: page size %u (%#x) not a power of two", 512 uvmexp.pagesize, uvmexp.pagesize); 513 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 514 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 515 break; 516 } 517 518 /* 519 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 520 */ 521 522 vaddr_t 523 uvm_pageboot_alloc(vsize_t size) 524 { 525 static bool initialized = false; 526 vaddr_t addr; 527 #if !defined(PMAP_STEAL_MEMORY) 528 vaddr_t vaddr; 529 paddr_t paddr; 530 #endif 531 532 /* 533 * on first call to this function, initialize ourselves. 534 */ 535 if (initialized == false) { 536 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 537 538 /* round it the way we like it */ 539 virtual_space_start = round_page(virtual_space_start); 540 virtual_space_end = trunc_page(virtual_space_end); 541 542 initialized = true; 543 } 544 545 /* round to page size */ 546 size = round_page(size); 547 548 #if defined(PMAP_STEAL_MEMORY) 549 550 /* 551 * defer bootstrap allocation to MD code (it may want to allocate 552 * from a direct-mapped segment). pmap_steal_memory should adjust 553 * virtual_space_start/virtual_space_end if necessary. 554 */ 555 556 addr = pmap_steal_memory(size, &virtual_space_start, 557 &virtual_space_end); 558 559 return(addr); 560 561 #else /* !PMAP_STEAL_MEMORY */ 562 563 /* 564 * allocate virtual memory for this request 565 */ 566 if (virtual_space_start == virtual_space_end || 567 (virtual_space_end - virtual_space_start) < size) 568 panic("uvm_pageboot_alloc: out of virtual space"); 569 570 addr = virtual_space_start; 571 572 #ifdef PMAP_GROWKERNEL 573 /* 574 * If the kernel pmap can't map the requested space, 575 * then allocate more resources for it. 576 */ 577 if (uvm_maxkaddr < (addr + size)) { 578 uvm_maxkaddr = pmap_growkernel(addr + size); 579 if (uvm_maxkaddr < (addr + size)) 580 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 581 } 582 #endif 583 584 virtual_space_start += size; 585 586 /* 587 * allocate and mapin physical pages to back new virtual pages 588 */ 589 590 for (vaddr = round_page(addr) ; vaddr < addr + size ; 591 vaddr += PAGE_SIZE) { 592 593 if (!uvm_page_physget(&paddr)) 594 panic("uvm_pageboot_alloc: out of memory"); 595 596 /* 597 * Note this memory is no longer managed, so using 598 * pmap_kenter is safe. 599 */ 600 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 601 } 602 pmap_update(pmap_kernel()); 603 return(addr); 604 #endif /* PMAP_STEAL_MEMORY */ 605 } 606 607 #if !defined(PMAP_STEAL_MEMORY) 608 /* 609 * uvm_page_physget: "steal" one page from the vm_physmem structure. 610 * 611 * => attempt to allocate it off the end of a segment in which the "avail" 612 * values match the start/end values. if we can't do that, then we 613 * will advance both values (making them equal, and removing some 614 * vm_page structures from the non-avail area). 615 * => return false if out of memory. 616 */ 617 618 /* subroutine: try to allocate from memory chunks on the specified freelist */ 619 static bool uvm_page_physget_freelist(paddr_t *, int); 620 621 static bool 622 uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 623 { 624 struct vm_physseg *seg; 625 int lcv, x; 626 627 /* pass 1: try allocating from a matching end */ 628 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 629 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--) 630 #else 631 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 632 #endif 633 { 634 seg = VM_PHYSMEM_PTR(lcv); 635 636 if (uvm.page_init_done == true) 637 panic("uvm_page_physget: called _after_ bootstrap"); 638 639 if (seg->free_list != freelist) 640 continue; 641 642 /* try from front */ 643 if (seg->avail_start == seg->start && 644 seg->avail_start < seg->avail_end) { 645 *paddrp = ctob(seg->avail_start); 646 seg->avail_start++; 647 seg->start++; 648 /* nothing left? nuke it */ 649 if (seg->avail_start == seg->end) { 650 if (vm_nphysmem == 1) 651 panic("uvm_page_physget: out of memory!"); 652 vm_nphysmem--; 653 for (x = lcv ; x < vm_nphysmem ; x++) 654 /* structure copy */ 655 VM_PHYSMEM_PTR_SWAP(x, x + 1); 656 } 657 return (true); 658 } 659 660 /* try from rear */ 661 if (seg->avail_end == seg->end && 662 seg->avail_start < seg->avail_end) { 663 *paddrp = ctob(seg->avail_end - 1); 664 seg->avail_end--; 665 seg->end--; 666 /* nothing left? nuke it */ 667 if (seg->avail_end == seg->start) { 668 if (vm_nphysmem == 1) 669 panic("uvm_page_physget: out of memory!"); 670 vm_nphysmem--; 671 for (x = lcv ; x < vm_nphysmem ; x++) 672 /* structure copy */ 673 VM_PHYSMEM_PTR_SWAP(x, x + 1); 674 } 675 return (true); 676 } 677 } 678 679 /* pass2: forget about matching ends, just allocate something */ 680 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 681 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--) 682 #else 683 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 684 #endif 685 { 686 seg = VM_PHYSMEM_PTR(lcv); 687 688 /* any room in this bank? */ 689 if (seg->avail_start >= seg->avail_end) 690 continue; /* nope */ 691 692 *paddrp = ctob(seg->avail_start); 693 seg->avail_start++; 694 /* truncate! */ 695 seg->start = seg->avail_start; 696 697 /* nothing left? nuke it */ 698 if (seg->avail_start == seg->end) { 699 if (vm_nphysmem == 1) 700 panic("uvm_page_physget: out of memory!"); 701 vm_nphysmem--; 702 for (x = lcv ; x < vm_nphysmem ; x++) 703 /* structure copy */ 704 VM_PHYSMEM_PTR_SWAP(x, x + 1); 705 } 706 return (true); 707 } 708 709 return (false); /* whoops! */ 710 } 711 712 bool 713 uvm_page_physget(paddr_t *paddrp) 714 { 715 int i; 716 717 /* try in the order of freelist preference */ 718 for (i = 0; i < VM_NFREELIST; i++) 719 if (uvm_page_physget_freelist(paddrp, i) == true) 720 return (true); 721 return (false); 722 } 723 #endif /* PMAP_STEAL_MEMORY */ 724 725 /* 726 * uvm_page_physload: load physical memory into VM system 727 * 728 * => all args are PFs 729 * => all pages in start/end get vm_page structures 730 * => areas marked by avail_start/avail_end get added to the free page pool 731 * => we are limited to VM_PHYSSEG_MAX physical memory segments 732 */ 733 734 void 735 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 736 paddr_t avail_end, int free_list) 737 { 738 int preload, lcv; 739 psize_t npages; 740 struct vm_page *pgs; 741 struct vm_physseg *ps; 742 743 if (uvmexp.pagesize == 0) 744 panic("uvm_page_physload: page size not set!"); 745 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 746 panic("uvm_page_physload: bad free list %d", free_list); 747 if (start >= end) 748 panic("uvm_page_physload: start >= end"); 749 750 /* 751 * do we have room? 752 */ 753 754 if (vm_nphysmem == VM_PHYSSEG_MAX) { 755 printf("uvm_page_physload: unable to load physical memory " 756 "segment\n"); 757 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 758 VM_PHYSSEG_MAX, (long long)start, (long long)end); 759 printf("\tincrease VM_PHYSSEG_MAX\n"); 760 return; 761 } 762 763 /* 764 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been 765 * called yet, so kmem is not available). 766 */ 767 768 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) { 769 if (VM_PHYSMEM_PTR(lcv)->pgs) 770 break; 771 } 772 preload = (lcv == vm_nphysmem); 773 774 /* 775 * if VM is already running, attempt to kmem_alloc vm_page structures 776 */ 777 778 if (!preload) { 779 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 780 } else { 781 pgs = NULL; 782 npages = 0; 783 } 784 785 /* 786 * now insert us in the proper place in vm_physmem[] 787 */ 788 789 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 790 /* random: put it at the end (easy!) */ 791 ps = VM_PHYSMEM_PTR(vm_nphysmem); 792 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 793 { 794 int x; 795 /* sort by address for binary search */ 796 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 797 if (start < VM_PHYSMEM_PTR(lcv)->start) 798 break; 799 ps = VM_PHYSMEM_PTR(lcv); 800 /* move back other entries, if necessary ... */ 801 for (x = vm_nphysmem ; x > lcv ; x--) 802 /* structure copy */ 803 VM_PHYSMEM_PTR_SWAP(x, x - 1); 804 } 805 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 806 { 807 int x; 808 /* sort by largest segment first */ 809 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) 810 if ((end - start) > 811 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start)) 812 break; 813 ps = VM_PHYSMEM_PTR(lcv); 814 /* move back other entries, if necessary ... */ 815 for (x = vm_nphysmem ; x > lcv ; x--) 816 /* structure copy */ 817 VM_PHYSMEM_PTR_SWAP(x, x - 1); 818 } 819 #else 820 panic("uvm_page_physload: unknown physseg strategy selected!"); 821 #endif 822 823 ps->start = start; 824 ps->end = end; 825 ps->avail_start = avail_start; 826 ps->avail_end = avail_end; 827 if (preload) { 828 ps->pgs = NULL; 829 } else { 830 ps->pgs = pgs; 831 ps->lastpg = pgs + npages; 832 } 833 ps->free_list = free_list; 834 vm_nphysmem++; 835 836 if (!preload) { 837 uvmpdpol_reinit(); 838 } 839 } 840 841 /* 842 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 843 */ 844 845 #if VM_PHYSSEG_MAX == 1 846 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *); 847 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 848 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *); 849 #else 850 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *); 851 #endif 852 853 /* 854 * vm_physseg_find: find vm_physseg structure that belongs to a PA 855 */ 856 int 857 vm_physseg_find(paddr_t pframe, int *offp) 858 { 859 860 #if VM_PHYSSEG_MAX == 1 861 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp); 862 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 863 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp); 864 #else 865 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp); 866 #endif 867 } 868 869 #if VM_PHYSSEG_MAX == 1 870 static inline int 871 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 872 { 873 874 /* 'contig' case */ 875 if (pframe >= segs[0].start && pframe < segs[0].end) { 876 if (offp) 877 *offp = pframe - segs[0].start; 878 return(0); 879 } 880 return(-1); 881 } 882 883 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 884 885 static inline int 886 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 887 { 888 /* binary search for it */ 889 u_int start, len, try; 890 891 /* 892 * if try is too large (thus target is less than try) we reduce 893 * the length to trunc(len/2) [i.e. everything smaller than "try"] 894 * 895 * if the try is too small (thus target is greater than try) then 896 * we set the new start to be (try + 1). this means we need to 897 * reduce the length to (round(len/2) - 1). 898 * 899 * note "adjust" below which takes advantage of the fact that 900 * (round(len/2) - 1) == trunc((len - 1) / 2) 901 * for any value of len we may have 902 */ 903 904 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) { 905 try = start + (len / 2); /* try in the middle */ 906 907 /* start past our try? */ 908 if (pframe >= segs[try].start) { 909 /* was try correct? */ 910 if (pframe < segs[try].end) { 911 if (offp) 912 *offp = pframe - segs[try].start; 913 return(try); /* got it */ 914 } 915 start = try + 1; /* next time, start here */ 916 len--; /* "adjust" */ 917 } else { 918 /* 919 * pframe before try, just reduce length of 920 * region, done in "for" loop 921 */ 922 } 923 } 924 return(-1); 925 } 926 927 #else 928 929 static inline int 930 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) 931 { 932 /* linear search for it */ 933 int lcv; 934 935 for (lcv = 0; lcv < nsegs; lcv++) { 936 if (pframe >= segs[lcv].start && 937 pframe < segs[lcv].end) { 938 if (offp) 939 *offp = pframe - segs[lcv].start; 940 return(lcv); /* got it */ 941 } 942 } 943 return(-1); 944 } 945 #endif 946 947 /* 948 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 949 * back from an I/O mapping (ugh!). used in some MD code as well. 950 */ 951 struct vm_page * 952 uvm_phys_to_vm_page(paddr_t pa) 953 { 954 paddr_t pf = atop(pa); 955 int off; 956 int psi; 957 958 psi = vm_physseg_find(pf, &off); 959 if (psi != -1) 960 return(&VM_PHYSMEM_PTR(psi)->pgs[off]); 961 return(NULL); 962 } 963 964 paddr_t 965 uvm_vm_page_to_phys(const struct vm_page *pg) 966 { 967 968 return pg->phys_addr; 969 } 970 971 /* 972 * uvm_page_recolor: Recolor the pages if the new bucket count is 973 * larger than the old one. 974 */ 975 976 void 977 uvm_page_recolor(int newncolors) 978 { 979 struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray; 980 struct pgfreelist gpgfl, pgfl; 981 struct vm_page *pg; 982 vsize_t bucketcount; 983 size_t bucketmemsize, oldbucketmemsize; 984 int lcv, color, i, ocolors; 985 struct uvm_cpu *ucpu; 986 987 KASSERT(((newncolors - 1) & newncolors) == 0); 988 989 if (newncolors <= uvmexp.ncolors) 990 return; 991 992 if (uvm.page_init_done == false) { 993 uvmexp.ncolors = newncolors; 994 return; 995 } 996 997 bucketcount = newncolors * VM_NFREELIST; 998 bucketmemsize = bucketcount * sizeof(struct pgflbucket) * 2; 999 bucketarray = kmem_alloc(bucketmemsize, KM_SLEEP); 1000 cpuarray = bucketarray + bucketcount; 1001 if (bucketarray == NULL) { 1002 printf("WARNING: unable to allocate %ld page color buckets\n", 1003 (long) bucketcount); 1004 return; 1005 } 1006 1007 mutex_spin_enter(&uvm_fpageqlock); 1008 1009 /* Make sure we should still do this. */ 1010 if (newncolors <= uvmexp.ncolors) { 1011 mutex_spin_exit(&uvm_fpageqlock); 1012 kmem_free(bucketarray, bucketmemsize); 1013 return; 1014 } 1015 1016 oldbucketarray = uvm.page_free[0].pgfl_buckets; 1017 ocolors = uvmexp.ncolors; 1018 1019 uvmexp.ncolors = newncolors; 1020 uvmexp.colormask = uvmexp.ncolors - 1; 1021 1022 ucpu = curcpu()->ci_data.cpu_uvm; 1023 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1024 gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors)); 1025 pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors)); 1026 uvm_page_init_buckets(&gpgfl); 1027 uvm_page_init_buckets(&pgfl); 1028 for (color = 0; color < ocolors; color++) { 1029 for (i = 0; i < PGFL_NQUEUES; i++) { 1030 while ((pg = LIST_FIRST(&uvm.page_free[ 1031 lcv].pgfl_buckets[color].pgfl_queues[i])) 1032 != NULL) { 1033 LIST_REMOVE(pg, pageq.list); /* global */ 1034 LIST_REMOVE(pg, listq.list); /* cpu */ 1035 LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[ 1036 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 1037 i], pg, pageq.list); 1038 LIST_INSERT_HEAD(&pgfl.pgfl_buckets[ 1039 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 1040 i], pg, listq.list); 1041 } 1042 } 1043 } 1044 uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets; 1045 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 1046 } 1047 1048 oldbucketmemsize = recolored_pages_memsize; 1049 1050 recolored_pages_memsize = bucketmemsize; 1051 mutex_spin_exit(&uvm_fpageqlock); 1052 1053 if (oldbucketmemsize) { 1054 kmem_free(oldbucketarray, recolored_pages_memsize); 1055 } 1056 1057 /* 1058 * this calls uvm_km_alloc() which may want to hold 1059 * uvm_fpageqlock. 1060 */ 1061 uvm_pager_realloc_emerg(); 1062 } 1063 1064 /* 1065 * uvm_cpu_attach: initialize per-CPU data structures. 1066 */ 1067 1068 void 1069 uvm_cpu_attach(struct cpu_info *ci) 1070 { 1071 struct pgflbucket *bucketarray; 1072 struct pgfreelist pgfl; 1073 struct uvm_cpu *ucpu; 1074 vsize_t bucketcount; 1075 int lcv; 1076 1077 if (CPU_IS_PRIMARY(ci)) { 1078 /* Already done in uvm_page_init(). */ 1079 goto attachrnd; 1080 } 1081 1082 /* Add more reserve pages for this CPU. */ 1083 uvmexp.reserve_kernel += vm_page_reserve_kernel; 1084 1085 /* Configure this CPU's free lists. */ 1086 bucketcount = uvmexp.ncolors * VM_NFREELIST; 1087 bucketarray = kmem_alloc(bucketcount * sizeof(struct pgflbucket), 1088 KM_SLEEP); 1089 ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP); 1090 uvm.cpus[cpu_index(ci)] = ucpu; 1091 ci->ci_data.cpu_uvm = ucpu; 1092 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1093 pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors)); 1094 uvm_page_init_buckets(&pgfl); 1095 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 1096 } 1097 1098 attachrnd: 1099 /* 1100 * Attach RNG source for this CPU's VM events 1101 */ 1102 rnd_attach_source(&uvm.cpus[cpu_index(ci)]->rs, 1103 ci->ci_data.cpu_name, RND_TYPE_VM, 0); 1104 1105 } 1106 1107 /* 1108 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat 1109 */ 1110 1111 static struct vm_page * 1112 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2, 1113 int *trycolorp) 1114 { 1115 struct pgflist *freeq; 1116 struct vm_page *pg; 1117 int color, trycolor = *trycolorp; 1118 struct pgfreelist *gpgfl, *pgfl; 1119 1120 KASSERT(mutex_owned(&uvm_fpageqlock)); 1121 1122 color = trycolor; 1123 pgfl = &ucpu->page_free[flist]; 1124 gpgfl = &uvm.page_free[flist]; 1125 do { 1126 /* cpu, try1 */ 1127 if ((pg = LIST_FIRST((freeq = 1128 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1129 KASSERT(pg->pqflags & PQ_FREE); 1130 KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO)); 1131 KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO)); 1132 KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg)); 1133 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1134 uvmexp.cpuhit++; 1135 goto gotit; 1136 } 1137 /* global, try1 */ 1138 if ((pg = LIST_FIRST((freeq = 1139 &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1140 KASSERT(pg->pqflags & PQ_FREE); 1141 KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO)); 1142 KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO)); 1143 KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg)); 1144 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1145 uvmexp.cpumiss++; 1146 goto gotit; 1147 } 1148 /* cpu, try2 */ 1149 if ((pg = LIST_FIRST((freeq = 1150 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1151 KASSERT(pg->pqflags & PQ_FREE); 1152 KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO)); 1153 KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO)); 1154 KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg)); 1155 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1156 uvmexp.cpuhit++; 1157 goto gotit; 1158 } 1159 /* global, try2 */ 1160 if ((pg = LIST_FIRST((freeq = 1161 &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1162 KASSERT(pg->pqflags & PQ_FREE); 1163 KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO)); 1164 KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO)); 1165 KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg)); 1166 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1167 uvmexp.cpumiss++; 1168 goto gotit; 1169 } 1170 color = (color + 1) & uvmexp.colormask; 1171 } while (color != trycolor); 1172 1173 return (NULL); 1174 1175 gotit: 1176 LIST_REMOVE(pg, pageq.list); /* global list */ 1177 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1178 uvmexp.free--; 1179 1180 /* update zero'd page count */ 1181 if (pg->flags & PG_ZERO) 1182 uvmexp.zeropages--; 1183 1184 if (color == trycolor) 1185 uvmexp.colorhit++; 1186 else { 1187 uvmexp.colormiss++; 1188 *trycolorp = color; 1189 } 1190 1191 return (pg); 1192 } 1193 1194 /* 1195 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 1196 * 1197 * => return null if no pages free 1198 * => wake up pagedaemon if number of free pages drops below low water mark 1199 * => if obj != NULL, obj must be locked (to put in obj's tree) 1200 * => if anon != NULL, anon must be locked (to put in anon) 1201 * => only one of obj or anon can be non-null 1202 * => caller must activate/deactivate page if it is not wired. 1203 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 1204 * => policy decision: it is more important to pull a page off of the 1205 * appropriate priority free list than it is to get a zero'd or 1206 * unknown contents page. This is because we live with the 1207 * consequences of a bad free list decision for the entire 1208 * lifetime of the page, e.g. if the page comes from memory that 1209 * is slower to access. 1210 */ 1211 1212 struct vm_page * 1213 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 1214 int flags, int strat, int free_list) 1215 { 1216 int lcv, try1, try2, zeroit = 0, color; 1217 struct uvm_cpu *ucpu; 1218 struct vm_page *pg; 1219 lwp_t *l; 1220 1221 KASSERT(obj == NULL || anon == NULL); 1222 KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0); 1223 KASSERT(off == trunc_page(off)); 1224 KASSERT(obj == NULL || mutex_owned(obj->vmobjlock)); 1225 KASSERT(anon == NULL || anon->an_lock == NULL || 1226 mutex_owned(anon->an_lock)); 1227 1228 mutex_spin_enter(&uvm_fpageqlock); 1229 1230 /* 1231 * This implements a global round-robin page coloring 1232 * algorithm. 1233 */ 1234 1235 ucpu = curcpu()->ci_data.cpu_uvm; 1236 if (flags & UVM_FLAG_COLORMATCH) { 1237 color = atop(off) & uvmexp.colormask; 1238 } else { 1239 color = ucpu->page_free_nextcolor; 1240 } 1241 1242 /* 1243 * check to see if we need to generate some free pages waking 1244 * the pagedaemon. 1245 */ 1246 1247 uvm_kick_pdaemon(); 1248 1249 /* 1250 * fail if any of these conditions is true: 1251 * [1] there really are no free pages, or 1252 * [2] only kernel "reserved" pages remain and 1253 * reserved pages have not been requested. 1254 * [3] only pagedaemon "reserved" pages remain and 1255 * the requestor isn't the pagedaemon. 1256 * we make kernel reserve pages available if called by a 1257 * kernel thread or a realtime thread. 1258 */ 1259 l = curlwp; 1260 if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) { 1261 flags |= UVM_PGA_USERESERVE; 1262 } 1263 if ((uvmexp.free <= uvmexp.reserve_kernel && 1264 (flags & UVM_PGA_USERESERVE) == 0) || 1265 (uvmexp.free <= uvmexp.reserve_pagedaemon && 1266 curlwp != uvm.pagedaemon_lwp)) 1267 goto fail; 1268 1269 #if PGFL_NQUEUES != 2 1270 #error uvm_pagealloc_strat needs to be updated 1271 #endif 1272 1273 /* 1274 * If we want a zero'd page, try the ZEROS queue first, otherwise 1275 * we try the UNKNOWN queue first. 1276 */ 1277 if (flags & UVM_PGA_ZERO) { 1278 try1 = PGFL_ZEROS; 1279 try2 = PGFL_UNKNOWN; 1280 } else { 1281 try1 = PGFL_UNKNOWN; 1282 try2 = PGFL_ZEROS; 1283 } 1284 1285 again: 1286 switch (strat) { 1287 case UVM_PGA_STRAT_NORMAL: 1288 /* Check freelists: descending priority (ascending id) order */ 1289 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1290 pg = uvm_pagealloc_pgfl(ucpu, lcv, 1291 try1, try2, &color); 1292 if (pg != NULL) 1293 goto gotit; 1294 } 1295 1296 /* No pages free! */ 1297 goto fail; 1298 1299 case UVM_PGA_STRAT_ONLY: 1300 case UVM_PGA_STRAT_FALLBACK: 1301 /* Attempt to allocate from the specified free list. */ 1302 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 1303 pg = uvm_pagealloc_pgfl(ucpu, free_list, 1304 try1, try2, &color); 1305 if (pg != NULL) 1306 goto gotit; 1307 1308 /* Fall back, if possible. */ 1309 if (strat == UVM_PGA_STRAT_FALLBACK) { 1310 strat = UVM_PGA_STRAT_NORMAL; 1311 goto again; 1312 } 1313 1314 /* No pages free! */ 1315 goto fail; 1316 1317 default: 1318 panic("uvm_pagealloc_strat: bad strat %d", strat); 1319 /* NOTREACHED */ 1320 } 1321 1322 gotit: 1323 /* 1324 * We now know which color we actually allocated from; set 1325 * the next color accordingly. 1326 */ 1327 1328 ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask; 1329 1330 /* 1331 * update allocation statistics and remember if we have to 1332 * zero the page 1333 */ 1334 1335 if (flags & UVM_PGA_ZERO) { 1336 if (pg->flags & PG_ZERO) { 1337 uvmexp.pga_zerohit++; 1338 zeroit = 0; 1339 } else { 1340 uvmexp.pga_zeromiss++; 1341 zeroit = 1; 1342 } 1343 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1344 ucpu->page_idle_zero = vm_page_zero_enable; 1345 } 1346 } 1347 KASSERT(pg->pqflags == PQ_FREE); 1348 1349 pg->offset = off; 1350 pg->uobject = obj; 1351 pg->uanon = anon; 1352 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1353 if (anon) { 1354 anon->an_page = pg; 1355 pg->pqflags = PQ_ANON; 1356 atomic_inc_uint(&uvmexp.anonpages); 1357 } else { 1358 if (obj) { 1359 uvm_pageinsert(obj, pg); 1360 } 1361 pg->pqflags = 0; 1362 } 1363 mutex_spin_exit(&uvm_fpageqlock); 1364 1365 #if defined(UVM_PAGE_TRKOWN) 1366 pg->owner_tag = NULL; 1367 #endif 1368 UVM_PAGE_OWN(pg, "new alloc"); 1369 1370 if (flags & UVM_PGA_ZERO) { 1371 /* 1372 * A zero'd page is not clean. If we got a page not already 1373 * zero'd, then we have to zero it ourselves. 1374 */ 1375 pg->flags &= ~PG_CLEAN; 1376 if (zeroit) 1377 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1378 } 1379 1380 return(pg); 1381 1382 fail: 1383 mutex_spin_exit(&uvm_fpageqlock); 1384 return (NULL); 1385 } 1386 1387 /* 1388 * uvm_pagereplace: replace a page with another 1389 * 1390 * => object must be locked 1391 */ 1392 1393 void 1394 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg) 1395 { 1396 struct uvm_object *uobj = oldpg->uobject; 1397 1398 KASSERT((oldpg->flags & PG_TABLED) != 0); 1399 KASSERT(uobj != NULL); 1400 KASSERT((newpg->flags & PG_TABLED) == 0); 1401 KASSERT(newpg->uobject == NULL); 1402 KASSERT(mutex_owned(uobj->vmobjlock)); 1403 1404 newpg->uobject = uobj; 1405 newpg->offset = oldpg->offset; 1406 1407 uvm_pageremove_tree(uobj, oldpg); 1408 uvm_pageinsert_tree(uobj, newpg); 1409 uvm_pageinsert_list(uobj, newpg, oldpg); 1410 uvm_pageremove_list(uobj, oldpg); 1411 } 1412 1413 /* 1414 * uvm_pagerealloc: reallocate a page from one object to another 1415 * 1416 * => both objects must be locked 1417 */ 1418 1419 void 1420 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1421 { 1422 /* 1423 * remove it from the old object 1424 */ 1425 1426 if (pg->uobject) { 1427 uvm_pageremove(pg->uobject, pg); 1428 } 1429 1430 /* 1431 * put it in the new object 1432 */ 1433 1434 if (newobj) { 1435 pg->uobject = newobj; 1436 pg->offset = newoff; 1437 uvm_pageinsert(newobj, pg); 1438 } 1439 } 1440 1441 #ifdef DEBUG 1442 /* 1443 * check if page is zero-filled 1444 * 1445 * - called with free page queue lock held. 1446 */ 1447 void 1448 uvm_pagezerocheck(struct vm_page *pg) 1449 { 1450 int *p, *ep; 1451 1452 KASSERT(uvm_zerocheckkva != 0); 1453 KASSERT(mutex_owned(&uvm_fpageqlock)); 1454 1455 /* 1456 * XXX assuming pmap_kenter_pa and pmap_kremove never call 1457 * uvm page allocator. 1458 * 1459 * it might be better to have "CPU-local temporary map" pmap interface. 1460 */ 1461 pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0); 1462 p = (int *)uvm_zerocheckkva; 1463 ep = (int *)((char *)p + PAGE_SIZE); 1464 pmap_update(pmap_kernel()); 1465 while (p < ep) { 1466 if (*p != 0) 1467 panic("PG_ZERO page isn't zero-filled"); 1468 p++; 1469 } 1470 pmap_kremove(uvm_zerocheckkva, PAGE_SIZE); 1471 /* 1472 * pmap_update() is not necessary here because no one except us 1473 * uses this VA. 1474 */ 1475 } 1476 #endif /* DEBUG */ 1477 1478 /* 1479 * uvm_pagefree: free page 1480 * 1481 * => erase page's identity (i.e. remove from object) 1482 * => put page on free list 1483 * => caller must lock owning object (either anon or uvm_object) 1484 * => caller must lock page queues 1485 * => assumes all valid mappings of pg are gone 1486 */ 1487 1488 void 1489 uvm_pagefree(struct vm_page *pg) 1490 { 1491 struct pgflist *pgfl; 1492 struct uvm_cpu *ucpu; 1493 int index, color, queue; 1494 bool iszero; 1495 1496 #ifdef DEBUG 1497 if (pg->uobject == (void *)0xdeadbeef && 1498 pg->uanon == (void *)0xdeadbeef) { 1499 panic("uvm_pagefree: freeing free page %p", pg); 1500 } 1501 #endif /* DEBUG */ 1502 1503 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1504 KASSERT(!(pg->pqflags & PQ_FREE)); 1505 //KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg)); 1506 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock)); 1507 KASSERT(pg->uobject != NULL || pg->uanon == NULL || 1508 mutex_owned(pg->uanon->an_lock)); 1509 1510 /* 1511 * if the page is loaned, resolve the loan instead of freeing. 1512 */ 1513 1514 if (pg->loan_count) { 1515 KASSERT(pg->wire_count == 0); 1516 1517 /* 1518 * if the page is owned by an anon then we just want to 1519 * drop anon ownership. the kernel will free the page when 1520 * it is done with it. if the page is owned by an object, 1521 * remove it from the object and mark it dirty for the benefit 1522 * of possible anon owners. 1523 * 1524 * regardless of previous ownership, wakeup any waiters, 1525 * unbusy the page, and we're done. 1526 */ 1527 1528 if (pg->uobject != NULL) { 1529 uvm_pageremove(pg->uobject, pg); 1530 pg->flags &= ~PG_CLEAN; 1531 } else if (pg->uanon != NULL) { 1532 if ((pg->pqflags & PQ_ANON) == 0) { 1533 pg->loan_count--; 1534 } else { 1535 pg->pqflags &= ~PQ_ANON; 1536 atomic_dec_uint(&uvmexp.anonpages); 1537 } 1538 pg->uanon->an_page = NULL; 1539 pg->uanon = NULL; 1540 } 1541 if (pg->flags & PG_WANTED) { 1542 wakeup(pg); 1543 } 1544 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1); 1545 #ifdef UVM_PAGE_TRKOWN 1546 pg->owner_tag = NULL; 1547 #endif 1548 if (pg->loan_count) { 1549 KASSERT(pg->uobject == NULL); 1550 if (pg->uanon == NULL) { 1551 KASSERT(mutex_owned(&uvm_pageqlock)); 1552 uvm_pagedequeue(pg); 1553 } 1554 return; 1555 } 1556 } 1557 1558 /* 1559 * remove page from its object or anon. 1560 */ 1561 1562 if (pg->uobject != NULL) { 1563 uvm_pageremove(pg->uobject, pg); 1564 } else if (pg->uanon != NULL) { 1565 pg->uanon->an_page = NULL; 1566 atomic_dec_uint(&uvmexp.anonpages); 1567 } 1568 1569 /* 1570 * now remove the page from the queues. 1571 */ 1572 if (uvmpdpol_pageisqueued_p(pg)) { 1573 KASSERT(mutex_owned(&uvm_pageqlock)); 1574 uvm_pagedequeue(pg); 1575 } 1576 1577 /* 1578 * if the page was wired, unwire it now. 1579 */ 1580 1581 if (pg->wire_count) { 1582 pg->wire_count = 0; 1583 uvmexp.wired--; 1584 } 1585 1586 /* 1587 * and put on free queue 1588 */ 1589 1590 iszero = (pg->flags & PG_ZERO); 1591 index = uvm_page_lookup_freelist(pg); 1592 color = VM_PGCOLOR_BUCKET(pg); 1593 queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN); 1594 1595 #ifdef DEBUG 1596 pg->uobject = (void *)0xdeadbeef; 1597 pg->uanon = (void *)0xdeadbeef; 1598 #endif 1599 1600 mutex_spin_enter(&uvm_fpageqlock); 1601 pg->pqflags = PQ_FREE; 1602 1603 #ifdef DEBUG 1604 if (iszero) 1605 uvm_pagezerocheck(pg); 1606 #endif /* DEBUG */ 1607 1608 1609 /* global list */ 1610 pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1611 LIST_INSERT_HEAD(pgfl, pg, pageq.list); 1612 uvmexp.free++; 1613 if (iszero) { 1614 uvmexp.zeropages++; 1615 } 1616 1617 /* per-cpu list */ 1618 ucpu = curcpu()->ci_data.cpu_uvm; 1619 pg->offset = (uintptr_t)ucpu; 1620 pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1621 LIST_INSERT_HEAD(pgfl, pg, listq.list); 1622 ucpu->pages[queue]++; 1623 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1624 ucpu->page_idle_zero = vm_page_zero_enable; 1625 } 1626 1627 mutex_spin_exit(&uvm_fpageqlock); 1628 } 1629 1630 /* 1631 * uvm_page_unbusy: unbusy an array of pages. 1632 * 1633 * => pages must either all belong to the same object, or all belong to anons. 1634 * => if pages are object-owned, object must be locked. 1635 * => if pages are anon-owned, anons must be locked. 1636 * => caller must lock page queues if pages may be released. 1637 * => caller must make sure that anon-owned pages are not PG_RELEASED. 1638 */ 1639 1640 void 1641 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1642 { 1643 struct vm_page *pg; 1644 int i; 1645 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist); 1646 1647 for (i = 0; i < npgs; i++) { 1648 pg = pgs[i]; 1649 if (pg == NULL || pg == PGO_DONTCARE) { 1650 continue; 1651 } 1652 1653 KASSERT(uvm_page_locked_p(pg)); 1654 KASSERT(pg->flags & PG_BUSY); 1655 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1656 if (pg->flags & PG_WANTED) { 1657 wakeup(pg); 1658 } 1659 if (pg->flags & PG_RELEASED) { 1660 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0); 1661 KASSERT(pg->uobject != NULL || 1662 (pg->uanon != NULL && pg->uanon->an_ref > 0)); 1663 pg->flags &= ~PG_RELEASED; 1664 uvm_pagefree(pg); 1665 } else { 1666 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0); 1667 KASSERT((pg->flags & PG_FAKE) == 0); 1668 pg->flags &= ~(PG_WANTED|PG_BUSY); 1669 UVM_PAGE_OWN(pg, NULL); 1670 } 1671 } 1672 } 1673 1674 #if defined(UVM_PAGE_TRKOWN) 1675 /* 1676 * uvm_page_own: set or release page ownership 1677 * 1678 * => this is a debugging function that keeps track of who sets PG_BUSY 1679 * and where they do it. it can be used to track down problems 1680 * such a process setting "PG_BUSY" and never releasing it. 1681 * => page's object [if any] must be locked 1682 * => if "tag" is NULL then we are releasing page ownership 1683 */ 1684 void 1685 uvm_page_own(struct vm_page *pg, const char *tag) 1686 { 1687 1688 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0); 1689 KASSERT((pg->flags & PG_WANTED) == 0); 1690 KASSERT(uvm_page_locked_p(pg)); 1691 1692 /* gain ownership? */ 1693 if (tag) { 1694 KASSERT((pg->flags & PG_BUSY) != 0); 1695 if (pg->owner_tag) { 1696 printf("uvm_page_own: page %p already owned " 1697 "by proc %d [%s]\n", pg, 1698 pg->owner, pg->owner_tag); 1699 panic("uvm_page_own"); 1700 } 1701 pg->owner = curproc->p_pid; 1702 pg->lowner = curlwp->l_lid; 1703 pg->owner_tag = tag; 1704 return; 1705 } 1706 1707 /* drop ownership */ 1708 KASSERT((pg->flags & PG_BUSY) == 0); 1709 if (pg->owner_tag == NULL) { 1710 printf("uvm_page_own: dropping ownership of an non-owned " 1711 "page (%p)\n", pg); 1712 panic("uvm_page_own"); 1713 } 1714 if (!uvmpdpol_pageisqueued_p(pg)) { 1715 KASSERT((pg->uanon == NULL && pg->uobject == NULL) || 1716 pg->wire_count > 0); 1717 } else { 1718 KASSERT(pg->wire_count == 0); 1719 } 1720 pg->owner_tag = NULL; 1721 } 1722 #endif 1723 1724 /* 1725 * uvm_pageidlezero: zero free pages while the system is idle. 1726 * 1727 * => try to complete one color bucket at a time, to reduce our impact 1728 * on the CPU cache. 1729 * => we loop until we either reach the target or there is a lwp ready 1730 * to run, or MD code detects a reason to break early. 1731 */ 1732 void 1733 uvm_pageidlezero(void) 1734 { 1735 struct vm_page *pg; 1736 struct pgfreelist *pgfl, *gpgfl; 1737 struct uvm_cpu *ucpu; 1738 int free_list, firstbucket, nextbucket; 1739 bool lcont = false; 1740 1741 ucpu = curcpu()->ci_data.cpu_uvm; 1742 if (!ucpu->page_idle_zero || 1743 ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1744 ucpu->page_idle_zero = false; 1745 return; 1746 } 1747 if (!mutex_tryenter(&uvm_fpageqlock)) { 1748 /* Contention: let other CPUs to use the lock. */ 1749 return; 1750 } 1751 firstbucket = ucpu->page_free_nextcolor; 1752 nextbucket = firstbucket; 1753 do { 1754 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1755 if (sched_curcpu_runnable_p()) { 1756 goto quit; 1757 } 1758 pgfl = &ucpu->page_free[free_list]; 1759 gpgfl = &uvm.page_free[free_list]; 1760 while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[ 1761 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) { 1762 if (lcont || sched_curcpu_runnable_p()) { 1763 goto quit; 1764 } 1765 LIST_REMOVE(pg, pageq.list); /* global list */ 1766 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1767 ucpu->pages[PGFL_UNKNOWN]--; 1768 uvmexp.free--; 1769 KASSERT(pg->pqflags == PQ_FREE); 1770 pg->pqflags = 0; 1771 mutex_spin_exit(&uvm_fpageqlock); 1772 #ifdef PMAP_PAGEIDLEZERO 1773 if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) { 1774 1775 /* 1776 * The machine-dependent code detected 1777 * some reason for us to abort zeroing 1778 * pages, probably because there is a 1779 * process now ready to run. 1780 */ 1781 1782 mutex_spin_enter(&uvm_fpageqlock); 1783 pg->pqflags = PQ_FREE; 1784 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1785 nextbucket].pgfl_queues[ 1786 PGFL_UNKNOWN], pg, pageq.list); 1787 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1788 nextbucket].pgfl_queues[ 1789 PGFL_UNKNOWN], pg, listq.list); 1790 ucpu->pages[PGFL_UNKNOWN]++; 1791 uvmexp.free++; 1792 uvmexp.zeroaborts++; 1793 goto quit; 1794 } 1795 #else 1796 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1797 #endif /* PMAP_PAGEIDLEZERO */ 1798 pg->flags |= PG_ZERO; 1799 1800 if (!mutex_tryenter(&uvm_fpageqlock)) { 1801 lcont = true; 1802 mutex_spin_enter(&uvm_fpageqlock); 1803 } else { 1804 lcont = false; 1805 } 1806 pg->pqflags = PQ_FREE; 1807 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1808 nextbucket].pgfl_queues[PGFL_ZEROS], 1809 pg, pageq.list); 1810 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1811 nextbucket].pgfl_queues[PGFL_ZEROS], 1812 pg, listq.list); 1813 ucpu->pages[PGFL_ZEROS]++; 1814 uvmexp.free++; 1815 uvmexp.zeropages++; 1816 } 1817 } 1818 if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1819 break; 1820 } 1821 nextbucket = (nextbucket + 1) & uvmexp.colormask; 1822 } while (nextbucket != firstbucket); 1823 ucpu->page_idle_zero = false; 1824 quit: 1825 mutex_spin_exit(&uvm_fpageqlock); 1826 } 1827 1828 /* 1829 * uvm_pagelookup: look up a page 1830 * 1831 * => caller should lock object to keep someone from pulling the page 1832 * out from under it 1833 */ 1834 1835 struct vm_page * 1836 uvm_pagelookup(struct uvm_object *obj, voff_t off) 1837 { 1838 struct vm_page *pg; 1839 1840 KASSERT(mutex_owned(obj->vmobjlock)); 1841 1842 pg = rb_tree_find_node(&obj->rb_tree, &off); 1843 1844 KASSERT(pg == NULL || obj->uo_npages != 0); 1845 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 1846 (pg->flags & PG_BUSY) != 0); 1847 return pg; 1848 } 1849 1850 /* 1851 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1852 * 1853 * => caller must lock page queues 1854 */ 1855 1856 void 1857 uvm_pagewire(struct vm_page *pg) 1858 { 1859 KASSERT(mutex_owned(&uvm_pageqlock)); 1860 #if defined(READAHEAD_STATS) 1861 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1862 uvm_ra_hit.ev_count++; 1863 pg->pqflags &= ~PQ_READAHEAD; 1864 } 1865 #endif /* defined(READAHEAD_STATS) */ 1866 if (pg->wire_count == 0) { 1867 uvm_pagedequeue(pg); 1868 uvmexp.wired++; 1869 } 1870 pg->wire_count++; 1871 } 1872 1873 /* 1874 * uvm_pageunwire: unwire the page. 1875 * 1876 * => activate if wire count goes to zero. 1877 * => caller must lock page queues 1878 */ 1879 1880 void 1881 uvm_pageunwire(struct vm_page *pg) 1882 { 1883 KASSERT(mutex_owned(&uvm_pageqlock)); 1884 pg->wire_count--; 1885 if (pg->wire_count == 0) { 1886 uvm_pageactivate(pg); 1887 uvmexp.wired--; 1888 } 1889 } 1890 1891 /* 1892 * uvm_pagedeactivate: deactivate page 1893 * 1894 * => caller must lock page queues 1895 * => caller must check to make sure page is not wired 1896 * => object that page belongs to must be locked (so we can adjust pg->flags) 1897 * => caller must clear the reference on the page before calling 1898 */ 1899 1900 void 1901 uvm_pagedeactivate(struct vm_page *pg) 1902 { 1903 1904 KASSERT(mutex_owned(&uvm_pageqlock)); 1905 KASSERT(uvm_page_locked_p(pg)); 1906 KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg)); 1907 uvmpdpol_pagedeactivate(pg); 1908 } 1909 1910 /* 1911 * uvm_pageactivate: activate page 1912 * 1913 * => caller must lock page queues 1914 */ 1915 1916 void 1917 uvm_pageactivate(struct vm_page *pg) 1918 { 1919 1920 KASSERT(mutex_owned(&uvm_pageqlock)); 1921 KASSERT(uvm_page_locked_p(pg)); 1922 #if defined(READAHEAD_STATS) 1923 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1924 uvm_ra_hit.ev_count++; 1925 pg->pqflags &= ~PQ_READAHEAD; 1926 } 1927 #endif /* defined(READAHEAD_STATS) */ 1928 if (pg->wire_count != 0) { 1929 return; 1930 } 1931 uvmpdpol_pageactivate(pg); 1932 } 1933 1934 /* 1935 * uvm_pagedequeue: remove a page from any paging queue 1936 */ 1937 1938 void 1939 uvm_pagedequeue(struct vm_page *pg) 1940 { 1941 1942 if (uvmpdpol_pageisqueued_p(pg)) { 1943 KASSERT(mutex_owned(&uvm_pageqlock)); 1944 } 1945 1946 uvmpdpol_pagedequeue(pg); 1947 } 1948 1949 /* 1950 * uvm_pageenqueue: add a page to a paging queue without activating. 1951 * used where a page is not really demanded (yet). eg. read-ahead 1952 */ 1953 1954 void 1955 uvm_pageenqueue(struct vm_page *pg) 1956 { 1957 1958 KASSERT(mutex_owned(&uvm_pageqlock)); 1959 if (pg->wire_count != 0) { 1960 return; 1961 } 1962 uvmpdpol_pageenqueue(pg); 1963 } 1964 1965 /* 1966 * uvm_pagezero: zero fill a page 1967 * 1968 * => if page is part of an object then the object should be locked 1969 * to protect pg->flags. 1970 */ 1971 1972 void 1973 uvm_pagezero(struct vm_page *pg) 1974 { 1975 pg->flags &= ~PG_CLEAN; 1976 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1977 } 1978 1979 /* 1980 * uvm_pagecopy: copy a page 1981 * 1982 * => if page is part of an object then the object should be locked 1983 * to protect pg->flags. 1984 */ 1985 1986 void 1987 uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1988 { 1989 1990 dst->flags &= ~PG_CLEAN; 1991 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 1992 } 1993 1994 /* 1995 * uvm_pageismanaged: test it see that a page (specified by PA) is managed. 1996 */ 1997 1998 bool 1999 uvm_pageismanaged(paddr_t pa) 2000 { 2001 2002 return (vm_physseg_find(atop(pa), NULL) != -1); 2003 } 2004 2005 /* 2006 * uvm_page_lookup_freelist: look up the free list for the specified page 2007 */ 2008 2009 int 2010 uvm_page_lookup_freelist(struct vm_page *pg) 2011 { 2012 int lcv; 2013 2014 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 2015 KASSERT(lcv != -1); 2016 return (VM_PHYSMEM_PTR(lcv)->free_list); 2017 } 2018 2019 /* 2020 * uvm_page_locked_p: return true if object associated with page is 2021 * locked. this is a weak check for runtime assertions only. 2022 */ 2023 2024 bool 2025 uvm_page_locked_p(struct vm_page *pg) 2026 { 2027 2028 if (pg->uobject != NULL) { 2029 return mutex_owned(pg->uobject->vmobjlock); 2030 } 2031 if (pg->uanon != NULL) { 2032 return mutex_owned(pg->uanon->an_lock); 2033 } 2034 return true; 2035 } 2036 2037 #if defined(DDB) || defined(DEBUGPRINT) 2038 2039 /* 2040 * uvm_page_printit: actually print the page 2041 */ 2042 2043 static const char page_flagbits[] = UVM_PGFLAGBITS; 2044 static const char page_pqflagbits[] = UVM_PQFLAGBITS; 2045 2046 void 2047 uvm_page_printit(struct vm_page *pg, bool full, 2048 void (*pr)(const char *, ...)) 2049 { 2050 struct vm_page *tpg; 2051 struct uvm_object *uobj; 2052 struct pgflist *pgl; 2053 char pgbuf[128]; 2054 char pqbuf[128]; 2055 2056 (*pr)("PAGE %p:\n", pg); 2057 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags); 2058 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags); 2059 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n", 2060 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg)); 2061 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n", 2062 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); 2063 #if defined(UVM_PAGE_TRKOWN) 2064 if (pg->flags & PG_BUSY) 2065 (*pr)(" owning process = %d, tag=%s\n", 2066 pg->owner, pg->owner_tag); 2067 else 2068 (*pr)(" page not busy, no owner\n"); 2069 #else 2070 (*pr)(" [page ownership tracking disabled]\n"); 2071 #endif 2072 2073 if (!full) 2074 return; 2075 2076 /* cross-verify object/anon */ 2077 if ((pg->pqflags & PQ_FREE) == 0) { 2078 if (pg->pqflags & PQ_ANON) { 2079 if (pg->uanon == NULL || pg->uanon->an_page != pg) 2080 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", 2081 (pg->uanon) ? pg->uanon->an_page : NULL); 2082 else 2083 (*pr)(" anon backpointer is OK\n"); 2084 } else { 2085 uobj = pg->uobject; 2086 if (uobj) { 2087 (*pr)(" checking object list\n"); 2088 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) { 2089 if (tpg == pg) { 2090 break; 2091 } 2092 } 2093 if (tpg) 2094 (*pr)(" page found on object list\n"); 2095 else 2096 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n"); 2097 } 2098 } 2099 } 2100 2101 /* cross-verify page queue */ 2102 if (pg->pqflags & PQ_FREE) { 2103 int fl = uvm_page_lookup_freelist(pg); 2104 int color = VM_PGCOLOR_BUCKET(pg); 2105 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ 2106 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; 2107 } else { 2108 pgl = NULL; 2109 } 2110 2111 if (pgl) { 2112 (*pr)(" checking pageq list\n"); 2113 LIST_FOREACH(tpg, pgl, pageq.list) { 2114 if (tpg == pg) { 2115 break; 2116 } 2117 } 2118 if (tpg) 2119 (*pr)(" page found on pageq list\n"); 2120 else 2121 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); 2122 } 2123 } 2124 2125 /* 2126 * uvm_pages_printthem - print a summary of all managed pages 2127 */ 2128 2129 void 2130 uvm_page_printall(void (*pr)(const char *, ...)) 2131 { 2132 unsigned i; 2133 struct vm_page *pg; 2134 2135 (*pr)("%18s %4s %4s %18s %18s" 2136 #ifdef UVM_PAGE_TRKOWN 2137 " OWNER" 2138 #endif 2139 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON"); 2140 for (i = 0; i < vm_nphysmem; i++) { 2141 for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) { 2142 (*pr)("%18p %04x %04x %18p %18p", 2143 pg, pg->flags, pg->pqflags, pg->uobject, 2144 pg->uanon); 2145 #ifdef UVM_PAGE_TRKOWN 2146 if (pg->flags & PG_BUSY) 2147 (*pr)(" %d [%s]", pg->owner, pg->owner_tag); 2148 #endif 2149 (*pr)("\n"); 2150 } 2151 } 2152 } 2153 2154 #endif /* DDB || DEBUGPRINT */ 2155