1 /* $NetBSD: uvm_page.c,v 1.155 2010/04/25 15:54:14 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * Copyright (c) 1997 Charles D. Cranor and Washington University. 31 * Copyright (c) 1991, 1993, The Regents of the University of California. 32 * 33 * All rights reserved. 34 * 35 * This code is derived from software contributed to Berkeley by 36 * The Mach Operating System project at Carnegie-Mellon University. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Charles D. Cranor, 49 * Washington University, the University of California, Berkeley and 50 * its contributors. 51 * 4. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 68 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 69 * 70 * 71 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 72 * All rights reserved. 73 * 74 * Permission to use, copy, modify and distribute this software and 75 * its documentation is hereby granted, provided that both the copyright 76 * notice and this permission notice appear in all copies of the 77 * software, derivative works or modified versions, and any portions 78 * thereof, and that both notices appear in supporting documentation. 79 * 80 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 81 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 82 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 83 * 84 * Carnegie Mellon requests users of this software to return to 85 * 86 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 87 * School of Computer Science 88 * Carnegie Mellon University 89 * Pittsburgh PA 15213-3890 90 * 91 * any improvements or extensions that they make and grant Carnegie the 92 * rights to redistribute these changes. 93 */ 94 95 /* 96 * uvm_page.c: page ops. 97 */ 98 99 #include <sys/cdefs.h> 100 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.155 2010/04/25 15:54:14 ad Exp $"); 101 102 #include "opt_ddb.h" 103 #include "opt_uvmhist.h" 104 #include "opt_readahead.h" 105 106 #include <sys/param.h> 107 #include <sys/systm.h> 108 #include <sys/malloc.h> 109 #include <sys/sched.h> 110 #include <sys/kernel.h> 111 #include <sys/vnode.h> 112 #include <sys/proc.h> 113 #include <sys/atomic.h> 114 #include <sys/cpu.h> 115 116 #include <uvm/uvm.h> 117 #include <uvm/uvm_ddb.h> 118 #include <uvm/uvm_pdpolicy.h> 119 120 /* 121 * global vars... XXXCDC: move to uvm. structure. 122 */ 123 124 /* 125 * physical memory config is stored in vm_physmem. 126 */ 127 128 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 129 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 130 131 /* 132 * Some supported CPUs in a given architecture don't support all 133 * of the things necessary to do idle page zero'ing efficiently. 134 * We therefore provide a way to enable it from machdep code here. 135 */ 136 bool vm_page_zero_enable = false; 137 138 /* 139 * number of pages per-CPU to reserve for the kernel. 140 */ 141 int vm_page_reserve_kernel = 5; 142 143 /* 144 * physical memory size; 145 */ 146 int physmem; 147 148 /* 149 * local variables 150 */ 151 152 /* 153 * these variables record the values returned by vm_page_bootstrap, 154 * for debugging purposes. The implementation of uvm_pageboot_alloc 155 * and pmap_startup here also uses them internally. 156 */ 157 158 static vaddr_t virtual_space_start; 159 static vaddr_t virtual_space_end; 160 161 /* 162 * we allocate an initial number of page colors in uvm_page_init(), 163 * and remember them. We may re-color pages as cache sizes are 164 * discovered during the autoconfiguration phase. But we can never 165 * free the initial set of buckets, since they are allocated using 166 * uvm_pageboot_alloc(). 167 */ 168 169 static bool have_recolored_pages /* = false */; 170 171 MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page"); 172 173 #ifdef DEBUG 174 vaddr_t uvm_zerocheckkva; 175 #endif /* DEBUG */ 176 177 /* 178 * local prototypes 179 */ 180 181 static void uvm_pageinsert(struct uvm_object *, struct vm_page *); 182 static void uvm_pageremove(struct uvm_object *, struct vm_page *); 183 184 /* 185 * per-object tree of pages 186 */ 187 188 static signed int 189 uvm_page_compare_nodes(const struct rb_node *n1, const struct rb_node *n2) 190 { 191 const struct vm_page *pg1 = (const void *)n1; 192 const struct vm_page *pg2 = (const void *)n2; 193 const voff_t a = pg1->offset; 194 const voff_t b = pg2->offset; 195 196 if (a < b) 197 return 1; 198 if (a > b) 199 return -1; 200 return 0; 201 } 202 203 static signed int 204 uvm_page_compare_key(const struct rb_node *n, const void *key) 205 { 206 const struct vm_page *pg = (const void *)n; 207 const voff_t a = pg->offset; 208 const voff_t b = *(const voff_t *)key; 209 210 if (a < b) 211 return 1; 212 if (a > b) 213 return -1; 214 return 0; 215 } 216 217 const struct rb_tree_ops uvm_page_tree_ops = { 218 .rbto_compare_nodes = uvm_page_compare_nodes, 219 .rbto_compare_key = uvm_page_compare_key, 220 }; 221 222 /* 223 * inline functions 224 */ 225 226 /* 227 * uvm_pageinsert: insert a page in the object. 228 * 229 * => caller must lock object 230 * => caller must lock page queues 231 * => call should have already set pg's object and offset pointers 232 * and bumped the version counter 233 */ 234 235 static inline void 236 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg, 237 struct vm_page *where) 238 { 239 240 KASSERT(uobj == pg->uobject); 241 KASSERT(mutex_owned(&uobj->vmobjlock)); 242 KASSERT((pg->flags & PG_TABLED) == 0); 243 KASSERT(where == NULL || (where->flags & PG_TABLED)); 244 KASSERT(where == NULL || (where->uobject == uobj)); 245 246 if (UVM_OBJ_IS_VNODE(uobj)) { 247 if (uobj->uo_npages == 0) { 248 struct vnode *vp = (struct vnode *)uobj; 249 250 vholdl(vp); 251 } 252 if (UVM_OBJ_IS_VTEXT(uobj)) { 253 atomic_inc_uint(&uvmexp.execpages); 254 } else { 255 atomic_inc_uint(&uvmexp.filepages); 256 } 257 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 258 atomic_inc_uint(&uvmexp.anonpages); 259 } 260 261 if (where) 262 TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue); 263 else 264 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue); 265 pg->flags |= PG_TABLED; 266 uobj->uo_npages++; 267 } 268 269 270 static inline void 271 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg) 272 { 273 bool success; 274 275 KASSERT(uobj == pg->uobject); 276 success = rb_tree_insert_node(&uobj->rb_tree, &pg->rb_node); 277 KASSERT(success); 278 } 279 280 static inline void 281 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg) 282 { 283 284 KDASSERT(uobj != NULL); 285 uvm_pageinsert_tree(uobj, pg); 286 uvm_pageinsert_list(uobj, pg, NULL); 287 } 288 289 /* 290 * uvm_page_remove: remove page from object. 291 * 292 * => caller must lock object 293 * => caller must lock page queues 294 */ 295 296 static inline void 297 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg) 298 { 299 300 KASSERT(uobj == pg->uobject); 301 KASSERT(mutex_owned(&uobj->vmobjlock)); 302 KASSERT(pg->flags & PG_TABLED); 303 304 if (UVM_OBJ_IS_VNODE(uobj)) { 305 if (uobj->uo_npages == 1) { 306 struct vnode *vp = (struct vnode *)uobj; 307 308 holdrelel(vp); 309 } 310 if (UVM_OBJ_IS_VTEXT(uobj)) { 311 atomic_dec_uint(&uvmexp.execpages); 312 } else { 313 atomic_dec_uint(&uvmexp.filepages); 314 } 315 } else if (UVM_OBJ_IS_AOBJ(uobj)) { 316 atomic_dec_uint(&uvmexp.anonpages); 317 } 318 319 /* object should be locked */ 320 uobj->uo_npages--; 321 TAILQ_REMOVE(&uobj->memq, pg, listq.queue); 322 pg->flags &= ~PG_TABLED; 323 pg->uobject = NULL; 324 } 325 326 static inline void 327 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg) 328 { 329 330 KASSERT(uobj == pg->uobject); 331 rb_tree_remove_node(&uobj->rb_tree, &pg->rb_node); 332 } 333 334 static inline void 335 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg) 336 { 337 338 KDASSERT(uobj != NULL); 339 uvm_pageremove_tree(uobj, pg); 340 uvm_pageremove_list(uobj, pg); 341 } 342 343 static void 344 uvm_page_init_buckets(struct pgfreelist *pgfl) 345 { 346 int color, i; 347 348 for (color = 0; color < uvmexp.ncolors; color++) { 349 for (i = 0; i < PGFL_NQUEUES; i++) { 350 LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]); 351 } 352 } 353 } 354 355 /* 356 * uvm_page_init: init the page system. called from uvm_init(). 357 * 358 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 359 */ 360 361 void 362 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 363 { 364 static struct uvm_cpu boot_cpu; 365 psize_t freepages, pagecount, bucketcount, n; 366 struct pgflbucket *bucketarray, *cpuarray; 367 struct vm_page *pagearray; 368 int lcv; 369 u_int i; 370 paddr_t paddr; 371 372 KASSERT(ncpu <= 1); 373 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *)); 374 375 /* 376 * init the page queues and page queue locks, except the free 377 * list; we allocate that later (with the initial vm_page 378 * structures). 379 */ 380 381 uvm.cpus[0] = &boot_cpu; 382 curcpu()->ci_data.cpu_uvm = &boot_cpu; 383 uvm_reclaim_init(); 384 uvmpdpol_init(); 385 mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE); 386 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM); 387 388 /* 389 * allocate vm_page structures. 390 */ 391 392 /* 393 * sanity check: 394 * before calling this function the MD code is expected to register 395 * some free RAM with the uvm_page_physload() function. our job 396 * now is to allocate vm_page structures for this memory. 397 */ 398 399 if (vm_nphysseg == 0) 400 panic("uvm_page_bootstrap: no memory pre-allocated"); 401 402 /* 403 * first calculate the number of free pages... 404 * 405 * note that we use start/end rather than avail_start/avail_end. 406 * this allows us to allocate extra vm_page structures in case we 407 * want to return some memory to the pool after booting. 408 */ 409 410 freepages = 0; 411 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 412 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 413 414 /* 415 * Let MD code initialize the number of colors, or default 416 * to 1 color if MD code doesn't care. 417 */ 418 if (uvmexp.ncolors == 0) 419 uvmexp.ncolors = 1; 420 uvmexp.colormask = uvmexp.ncolors - 1; 421 422 /* 423 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 424 * use. for each page of memory we use we need a vm_page structure. 425 * thus, the total number of pages we can use is the total size of 426 * the memory divided by the PAGE_SIZE plus the size of the vm_page 427 * structure. we add one to freepages as a fudge factor to avoid 428 * truncation errors (since we can only allocate in terms of whole 429 * pages). 430 */ 431 432 bucketcount = uvmexp.ncolors * VM_NFREELIST; 433 pagecount = ((freepages + 1) << PAGE_SHIFT) / 434 (PAGE_SIZE + sizeof(struct vm_page)); 435 436 bucketarray = (void *)uvm_pageboot_alloc((bucketcount * 437 sizeof(struct pgflbucket) * 2) + (pagecount * 438 sizeof(struct vm_page))); 439 cpuarray = bucketarray + bucketcount; 440 pagearray = (struct vm_page *)(bucketarray + bucketcount * 2); 441 442 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 443 uvm.page_free[lcv].pgfl_buckets = 444 (bucketarray + (lcv * uvmexp.ncolors)); 445 uvm_page_init_buckets(&uvm.page_free[lcv]); 446 uvm.cpus[0]->page_free[lcv].pgfl_buckets = 447 (cpuarray + (lcv * uvmexp.ncolors)); 448 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]); 449 } 450 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 451 452 /* 453 * init the vm_page structures and put them in the correct place. 454 */ 455 456 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 457 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 458 459 /* set up page array pointers */ 460 vm_physmem[lcv].pgs = pagearray; 461 pagearray += n; 462 pagecount -= n; 463 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 464 465 /* init and free vm_pages (we've already zeroed them) */ 466 paddr = ctob(vm_physmem[lcv].start); 467 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 468 vm_physmem[lcv].pgs[i].phys_addr = paddr; 469 #ifdef __HAVE_VM_PAGE_MD 470 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 471 #endif 472 if (atop(paddr) >= vm_physmem[lcv].avail_start && 473 atop(paddr) <= vm_physmem[lcv].avail_end) { 474 uvmexp.npages++; 475 /* add page to free pool */ 476 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 477 } 478 } 479 } 480 481 /* 482 * pass up the values of virtual_space_start and 483 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 484 * layers of the VM. 485 */ 486 487 *kvm_startp = round_page(virtual_space_start); 488 *kvm_endp = trunc_page(virtual_space_end); 489 #ifdef DEBUG 490 /* 491 * steal kva for uvm_pagezerocheck(). 492 */ 493 uvm_zerocheckkva = *kvm_startp; 494 *kvm_startp += PAGE_SIZE; 495 #endif /* DEBUG */ 496 497 /* 498 * init various thresholds. 499 */ 500 501 uvmexp.reserve_pagedaemon = 1; 502 uvmexp.reserve_kernel = vm_page_reserve_kernel; 503 504 /* 505 * determine if we should zero pages in the idle loop. 506 */ 507 508 uvm.cpus[0]->page_idle_zero = vm_page_zero_enable; 509 510 /* 511 * done! 512 */ 513 514 uvm.page_init_done = true; 515 } 516 517 /* 518 * uvm_setpagesize: set the page size 519 * 520 * => sets page_shift and page_mask from uvmexp.pagesize. 521 */ 522 523 void 524 uvm_setpagesize(void) 525 { 526 527 /* 528 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE 529 * to be a constant (indicated by being a non-zero value). 530 */ 531 if (uvmexp.pagesize == 0) { 532 if (PAGE_SIZE == 0) 533 panic("uvm_setpagesize: uvmexp.pagesize not set"); 534 uvmexp.pagesize = PAGE_SIZE; 535 } 536 uvmexp.pagemask = uvmexp.pagesize - 1; 537 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 538 panic("uvm_setpagesize: page size not a power of two"); 539 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 540 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 541 break; 542 } 543 544 /* 545 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 546 */ 547 548 vaddr_t 549 uvm_pageboot_alloc(vsize_t size) 550 { 551 static bool initialized = false; 552 vaddr_t addr; 553 #if !defined(PMAP_STEAL_MEMORY) 554 vaddr_t vaddr; 555 paddr_t paddr; 556 #endif 557 558 /* 559 * on first call to this function, initialize ourselves. 560 */ 561 if (initialized == false) { 562 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 563 564 /* round it the way we like it */ 565 virtual_space_start = round_page(virtual_space_start); 566 virtual_space_end = trunc_page(virtual_space_end); 567 568 initialized = true; 569 } 570 571 /* round to page size */ 572 size = round_page(size); 573 574 #if defined(PMAP_STEAL_MEMORY) 575 576 /* 577 * defer bootstrap allocation to MD code (it may want to allocate 578 * from a direct-mapped segment). pmap_steal_memory should adjust 579 * virtual_space_start/virtual_space_end if necessary. 580 */ 581 582 addr = pmap_steal_memory(size, &virtual_space_start, 583 &virtual_space_end); 584 585 return(addr); 586 587 #else /* !PMAP_STEAL_MEMORY */ 588 589 /* 590 * allocate virtual memory for this request 591 */ 592 if (virtual_space_start == virtual_space_end || 593 (virtual_space_end - virtual_space_start) < size) 594 panic("uvm_pageboot_alloc: out of virtual space"); 595 596 addr = virtual_space_start; 597 598 #ifdef PMAP_GROWKERNEL 599 /* 600 * If the kernel pmap can't map the requested space, 601 * then allocate more resources for it. 602 */ 603 if (uvm_maxkaddr < (addr + size)) { 604 uvm_maxkaddr = pmap_growkernel(addr + size); 605 if (uvm_maxkaddr < (addr + size)) 606 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 607 } 608 #endif 609 610 virtual_space_start += size; 611 612 /* 613 * allocate and mapin physical pages to back new virtual pages 614 */ 615 616 for (vaddr = round_page(addr) ; vaddr < addr + size ; 617 vaddr += PAGE_SIZE) { 618 619 if (!uvm_page_physget(&paddr)) 620 panic("uvm_pageboot_alloc: out of memory"); 621 622 /* 623 * Note this memory is no longer managed, so using 624 * pmap_kenter is safe. 625 */ 626 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 627 } 628 pmap_update(pmap_kernel()); 629 return(addr); 630 #endif /* PMAP_STEAL_MEMORY */ 631 } 632 633 #if !defined(PMAP_STEAL_MEMORY) 634 /* 635 * uvm_page_physget: "steal" one page from the vm_physmem structure. 636 * 637 * => attempt to allocate it off the end of a segment in which the "avail" 638 * values match the start/end values. if we can't do that, then we 639 * will advance both values (making them equal, and removing some 640 * vm_page structures from the non-avail area). 641 * => return false if out of memory. 642 */ 643 644 /* subroutine: try to allocate from memory chunks on the specified freelist */ 645 static bool uvm_page_physget_freelist(paddr_t *, int); 646 647 static bool 648 uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 649 { 650 int lcv, x; 651 652 /* pass 1: try allocating from a matching end */ 653 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 654 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 655 #else 656 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 657 #endif 658 { 659 660 if (uvm.page_init_done == true) 661 panic("uvm_page_physget: called _after_ bootstrap"); 662 663 if (vm_physmem[lcv].free_list != freelist) 664 continue; 665 666 /* try from front */ 667 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 668 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 669 *paddrp = ctob(vm_physmem[lcv].avail_start); 670 vm_physmem[lcv].avail_start++; 671 vm_physmem[lcv].start++; 672 /* nothing left? nuke it */ 673 if (vm_physmem[lcv].avail_start == 674 vm_physmem[lcv].end) { 675 if (vm_nphysseg == 1) 676 panic("uvm_page_physget: out of memory!"); 677 vm_nphysseg--; 678 for (x = lcv ; x < vm_nphysseg ; x++) 679 /* structure copy */ 680 vm_physmem[x] = vm_physmem[x+1]; 681 } 682 return (true); 683 } 684 685 /* try from rear */ 686 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 687 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 688 *paddrp = ctob(vm_physmem[lcv].avail_end - 1); 689 vm_physmem[lcv].avail_end--; 690 vm_physmem[lcv].end--; 691 /* nothing left? nuke it */ 692 if (vm_physmem[lcv].avail_end == 693 vm_physmem[lcv].start) { 694 if (vm_nphysseg == 1) 695 panic("uvm_page_physget: out of memory!"); 696 vm_nphysseg--; 697 for (x = lcv ; x < vm_nphysseg ; x++) 698 /* structure copy */ 699 vm_physmem[x] = vm_physmem[x+1]; 700 } 701 return (true); 702 } 703 } 704 705 /* pass2: forget about matching ends, just allocate something */ 706 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 707 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 708 #else 709 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 710 #endif 711 { 712 713 /* any room in this bank? */ 714 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 715 continue; /* nope */ 716 717 *paddrp = ctob(vm_physmem[lcv].avail_start); 718 vm_physmem[lcv].avail_start++; 719 /* truncate! */ 720 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 721 722 /* nothing left? nuke it */ 723 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 724 if (vm_nphysseg == 1) 725 panic("uvm_page_physget: out of memory!"); 726 vm_nphysseg--; 727 for (x = lcv ; x < vm_nphysseg ; x++) 728 /* structure copy */ 729 vm_physmem[x] = vm_physmem[x+1]; 730 } 731 return (true); 732 } 733 734 return (false); /* whoops! */ 735 } 736 737 bool 738 uvm_page_physget(paddr_t *paddrp) 739 { 740 int i; 741 742 /* try in the order of freelist preference */ 743 for (i = 0; i < VM_NFREELIST; i++) 744 if (uvm_page_physget_freelist(paddrp, i) == true) 745 return (true); 746 return (false); 747 } 748 #endif /* PMAP_STEAL_MEMORY */ 749 750 /* 751 * uvm_page_physload: load physical memory into VM system 752 * 753 * => all args are PFs 754 * => all pages in start/end get vm_page structures 755 * => areas marked by avail_start/avail_end get added to the free page pool 756 * => we are limited to VM_PHYSSEG_MAX physical memory segments 757 */ 758 759 void 760 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 761 paddr_t avail_end, int free_list) 762 { 763 int preload, lcv; 764 psize_t npages; 765 struct vm_page *pgs; 766 struct vm_physseg *ps; 767 768 if (uvmexp.pagesize == 0) 769 panic("uvm_page_physload: page size not set!"); 770 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 771 panic("uvm_page_physload: bad free list %d", free_list); 772 if (start >= end) 773 panic("uvm_page_physload: start >= end"); 774 775 /* 776 * do we have room? 777 */ 778 779 if (vm_nphysseg == VM_PHYSSEG_MAX) { 780 printf("uvm_page_physload: unable to load physical memory " 781 "segment\n"); 782 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 783 VM_PHYSSEG_MAX, (long long)start, (long long)end); 784 printf("\tincrease VM_PHYSSEG_MAX\n"); 785 return; 786 } 787 788 /* 789 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 790 * called yet, so malloc is not available). 791 */ 792 793 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 794 if (vm_physmem[lcv].pgs) 795 break; 796 } 797 preload = (lcv == vm_nphysseg); 798 799 /* 800 * if VM is already running, attempt to malloc() vm_page structures 801 */ 802 803 if (!preload) { 804 #if defined(VM_PHYSSEG_NOADD) 805 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 806 #else 807 /* XXXCDC: need some sort of lockout for this case */ 808 paddr_t paddr; 809 npages = end - start; /* # of pages */ 810 pgs = malloc(sizeof(struct vm_page) * npages, 811 M_VMPAGE, M_NOWAIT); 812 if (pgs == NULL) { 813 printf("uvm_page_physload: can not malloc vm_page " 814 "structs for segment\n"); 815 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 816 return; 817 } 818 /* zero data, init phys_addr and free_list, and free pages */ 819 memset(pgs, 0, sizeof(struct vm_page) * npages); 820 for (lcv = 0, paddr = ctob(start) ; 821 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 822 pgs[lcv].phys_addr = paddr; 823 pgs[lcv].free_list = free_list; 824 if (atop(paddr) >= avail_start && 825 atop(paddr) <= avail_end) 826 uvm_pagefree(&pgs[lcv]); 827 } 828 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 829 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 830 #endif 831 } else { 832 pgs = NULL; 833 npages = 0; 834 } 835 836 /* 837 * now insert us in the proper place in vm_physmem[] 838 */ 839 840 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 841 /* random: put it at the end (easy!) */ 842 ps = &vm_physmem[vm_nphysseg]; 843 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 844 { 845 int x; 846 /* sort by address for binary search */ 847 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 848 if (start < vm_physmem[lcv].start) 849 break; 850 ps = &vm_physmem[lcv]; 851 /* move back other entries, if necessary ... */ 852 for (x = vm_nphysseg ; x > lcv ; x--) 853 /* structure copy */ 854 vm_physmem[x] = vm_physmem[x - 1]; 855 } 856 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 857 { 858 int x; 859 /* sort by largest segment first */ 860 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 861 if ((end - start) > 862 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 863 break; 864 ps = &vm_physmem[lcv]; 865 /* move back other entries, if necessary ... */ 866 for (x = vm_nphysseg ; x > lcv ; x--) 867 /* structure copy */ 868 vm_physmem[x] = vm_physmem[x - 1]; 869 } 870 #else 871 panic("uvm_page_physload: unknown physseg strategy selected!"); 872 #endif 873 874 ps->start = start; 875 ps->end = end; 876 ps->avail_start = avail_start; 877 ps->avail_end = avail_end; 878 if (preload) { 879 ps->pgs = NULL; 880 } else { 881 ps->pgs = pgs; 882 ps->lastpg = pgs + npages - 1; 883 } 884 ps->free_list = free_list; 885 vm_nphysseg++; 886 887 if (!preload) { 888 uvmpdpol_reinit(); 889 } 890 } 891 892 /* 893 * uvm_page_recolor: Recolor the pages if the new bucket count is 894 * larger than the old one. 895 */ 896 897 void 898 uvm_page_recolor(int newncolors) 899 { 900 struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray; 901 struct pgfreelist gpgfl, pgfl; 902 struct vm_page *pg; 903 vsize_t bucketcount; 904 int lcv, color, i, ocolors; 905 struct uvm_cpu *ucpu; 906 907 if (newncolors <= uvmexp.ncolors) 908 return; 909 910 if (uvm.page_init_done == false) { 911 uvmexp.ncolors = newncolors; 912 return; 913 } 914 915 bucketcount = newncolors * VM_NFREELIST; 916 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2, 917 M_VMPAGE, M_NOWAIT); 918 cpuarray = bucketarray + bucketcount; 919 if (bucketarray == NULL) { 920 printf("WARNING: unable to allocate %ld page color buckets\n", 921 (long) bucketcount); 922 return; 923 } 924 925 mutex_spin_enter(&uvm_fpageqlock); 926 927 /* Make sure we should still do this. */ 928 if (newncolors <= uvmexp.ncolors) { 929 mutex_spin_exit(&uvm_fpageqlock); 930 free(bucketarray, M_VMPAGE); 931 return; 932 } 933 934 oldbucketarray = uvm.page_free[0].pgfl_buckets; 935 ocolors = uvmexp.ncolors; 936 937 uvmexp.ncolors = newncolors; 938 uvmexp.colormask = uvmexp.ncolors - 1; 939 940 ucpu = curcpu()->ci_data.cpu_uvm; 941 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 942 gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors)); 943 pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors)); 944 uvm_page_init_buckets(&gpgfl); 945 uvm_page_init_buckets(&pgfl); 946 for (color = 0; color < ocolors; color++) { 947 for (i = 0; i < PGFL_NQUEUES; i++) { 948 while ((pg = LIST_FIRST(&uvm.page_free[ 949 lcv].pgfl_buckets[color].pgfl_queues[i])) 950 != NULL) { 951 LIST_REMOVE(pg, pageq.list); /* global */ 952 LIST_REMOVE(pg, listq.list); /* cpu */ 953 LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[ 954 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 955 i], pg, pageq.list); 956 LIST_INSERT_HEAD(&pgfl.pgfl_buckets[ 957 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 958 i], pg, listq.list); 959 } 960 } 961 } 962 uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets; 963 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 964 } 965 966 if (have_recolored_pages) { 967 mutex_spin_exit(&uvm_fpageqlock); 968 free(oldbucketarray, M_VMPAGE); 969 return; 970 } 971 972 have_recolored_pages = true; 973 mutex_spin_exit(&uvm_fpageqlock); 974 } 975 976 /* 977 * uvm_cpu_attach: initialize per-CPU data structures. 978 */ 979 980 void 981 uvm_cpu_attach(struct cpu_info *ci) 982 { 983 struct pgflbucket *bucketarray; 984 struct pgfreelist pgfl; 985 struct uvm_cpu *ucpu; 986 vsize_t bucketcount; 987 int lcv; 988 989 if (CPU_IS_PRIMARY(ci)) { 990 /* Already done in uvm_page_init(). */ 991 return; 992 } 993 994 /* Add more reserve pages for this CPU. */ 995 uvmexp.reserve_kernel += vm_page_reserve_kernel; 996 997 /* Configure this CPU's free lists. */ 998 bucketcount = uvmexp.ncolors * VM_NFREELIST; 999 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket), 1000 M_VMPAGE, M_WAITOK); 1001 ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP); 1002 uvm.cpus[cpu_index(ci)] = ucpu; 1003 ci->ci_data.cpu_uvm = ucpu; 1004 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1005 pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors)); 1006 uvm_page_init_buckets(&pgfl); 1007 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 1008 } 1009 } 1010 1011 /* 1012 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat 1013 */ 1014 1015 static struct vm_page * 1016 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2, 1017 int *trycolorp) 1018 { 1019 struct pgflist *freeq; 1020 struct vm_page *pg; 1021 int color, trycolor = *trycolorp; 1022 struct pgfreelist *gpgfl, *pgfl; 1023 1024 KASSERT(mutex_owned(&uvm_fpageqlock)); 1025 1026 color = trycolor; 1027 pgfl = &ucpu->page_free[flist]; 1028 gpgfl = &uvm.page_free[flist]; 1029 do { 1030 /* cpu, try1 */ 1031 if ((pg = LIST_FIRST((freeq = 1032 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1033 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1034 uvmexp.cpuhit++; 1035 goto gotit; 1036 } 1037 /* global, try1 */ 1038 if ((pg = LIST_FIRST((freeq = 1039 &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) { 1040 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--; 1041 uvmexp.cpumiss++; 1042 goto gotit; 1043 } 1044 /* cpu, try2 */ 1045 if ((pg = LIST_FIRST((freeq = 1046 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1047 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1048 uvmexp.cpuhit++; 1049 goto gotit; 1050 } 1051 /* global, try2 */ 1052 if ((pg = LIST_FIRST((freeq = 1053 &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) { 1054 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--; 1055 uvmexp.cpumiss++; 1056 goto gotit; 1057 } 1058 color = (color + 1) & uvmexp.colormask; 1059 } while (color != trycolor); 1060 1061 return (NULL); 1062 1063 gotit: 1064 LIST_REMOVE(pg, pageq.list); /* global list */ 1065 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1066 uvmexp.free--; 1067 1068 /* update zero'd page count */ 1069 if (pg->flags & PG_ZERO) 1070 uvmexp.zeropages--; 1071 1072 if (color == trycolor) 1073 uvmexp.colorhit++; 1074 else { 1075 uvmexp.colormiss++; 1076 *trycolorp = color; 1077 } 1078 1079 return (pg); 1080 } 1081 1082 /* 1083 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 1084 * 1085 * => return null if no pages free 1086 * => wake up pagedaemon if number of free pages drops below low water mark 1087 * => if obj != NULL, obj must be locked (to put in obj's tree) 1088 * => if anon != NULL, anon must be locked (to put in anon) 1089 * => only one of obj or anon can be non-null 1090 * => caller must activate/deactivate page if it is not wired. 1091 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 1092 * => policy decision: it is more important to pull a page off of the 1093 * appropriate priority free list than it is to get a zero'd or 1094 * unknown contents page. This is because we live with the 1095 * consequences of a bad free list decision for the entire 1096 * lifetime of the page, e.g. if the page comes from memory that 1097 * is slower to access. 1098 */ 1099 1100 struct vm_page * 1101 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 1102 int flags, int strat, int free_list) 1103 { 1104 int lcv, try1, try2, zeroit = 0, color; 1105 struct uvm_cpu *ucpu; 1106 struct vm_page *pg; 1107 lwp_t *l; 1108 1109 KASSERT(obj == NULL || anon == NULL); 1110 KASSERT(anon == NULL || off == 0); 1111 KASSERT(off == trunc_page(off)); 1112 KASSERT(obj == NULL || mutex_owned(&obj->vmobjlock)); 1113 KASSERT(anon == NULL || mutex_owned(&anon->an_lock)); 1114 1115 mutex_spin_enter(&uvm_fpageqlock); 1116 1117 /* 1118 * This implements a global round-robin page coloring 1119 * algorithm. 1120 * 1121 * XXXJRT: What about virtually-indexed caches? 1122 */ 1123 1124 ucpu = curcpu()->ci_data.cpu_uvm; 1125 color = ucpu->page_free_nextcolor; 1126 1127 /* 1128 * check to see if we need to generate some free pages waking 1129 * the pagedaemon. 1130 */ 1131 1132 uvm_kick_pdaemon(); 1133 1134 /* 1135 * fail if any of these conditions is true: 1136 * [1] there really are no free pages, or 1137 * [2] only kernel "reserved" pages remain and 1138 * reserved pages have not been requested. 1139 * [3] only pagedaemon "reserved" pages remain and 1140 * the requestor isn't the pagedaemon. 1141 * we make kernel reserve pages available if called by a 1142 * kernel thread or a realtime thread. 1143 */ 1144 l = curlwp; 1145 if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) { 1146 flags |= UVM_PGA_USERESERVE; 1147 } 1148 if ((uvmexp.free <= uvmexp.reserve_kernel && 1149 (flags & UVM_PGA_USERESERVE) == 0) || 1150 (uvmexp.free <= uvmexp.reserve_pagedaemon && 1151 curlwp != uvm.pagedaemon_lwp)) 1152 goto fail; 1153 1154 #if PGFL_NQUEUES != 2 1155 #error uvm_pagealloc_strat needs to be updated 1156 #endif 1157 1158 /* 1159 * If we want a zero'd page, try the ZEROS queue first, otherwise 1160 * we try the UNKNOWN queue first. 1161 */ 1162 if (flags & UVM_PGA_ZERO) { 1163 try1 = PGFL_ZEROS; 1164 try2 = PGFL_UNKNOWN; 1165 } else { 1166 try1 = PGFL_UNKNOWN; 1167 try2 = PGFL_ZEROS; 1168 } 1169 1170 again: 1171 switch (strat) { 1172 case UVM_PGA_STRAT_NORMAL: 1173 /* Check freelists: descending priority (ascending id) order */ 1174 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1175 pg = uvm_pagealloc_pgfl(ucpu, lcv, 1176 try1, try2, &color); 1177 if (pg != NULL) 1178 goto gotit; 1179 } 1180 1181 /* No pages free! */ 1182 goto fail; 1183 1184 case UVM_PGA_STRAT_ONLY: 1185 case UVM_PGA_STRAT_FALLBACK: 1186 /* Attempt to allocate from the specified free list. */ 1187 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 1188 pg = uvm_pagealloc_pgfl(ucpu, free_list, 1189 try1, try2, &color); 1190 if (pg != NULL) 1191 goto gotit; 1192 1193 /* Fall back, if possible. */ 1194 if (strat == UVM_PGA_STRAT_FALLBACK) { 1195 strat = UVM_PGA_STRAT_NORMAL; 1196 goto again; 1197 } 1198 1199 /* No pages free! */ 1200 goto fail; 1201 1202 default: 1203 panic("uvm_pagealloc_strat: bad strat %d", strat); 1204 /* NOTREACHED */ 1205 } 1206 1207 gotit: 1208 /* 1209 * We now know which color we actually allocated from; set 1210 * the next color accordingly. 1211 */ 1212 1213 ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask; 1214 1215 /* 1216 * update allocation statistics and remember if we have to 1217 * zero the page 1218 */ 1219 1220 if (flags & UVM_PGA_ZERO) { 1221 if (pg->flags & PG_ZERO) { 1222 uvmexp.pga_zerohit++; 1223 zeroit = 0; 1224 } else { 1225 uvmexp.pga_zeromiss++; 1226 zeroit = 1; 1227 } 1228 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1229 ucpu->page_idle_zero = vm_page_zero_enable; 1230 } 1231 } 1232 KASSERT(pg->pqflags == PQ_FREE); 1233 1234 pg->offset = off; 1235 pg->uobject = obj; 1236 pg->uanon = anon; 1237 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1238 if (anon) { 1239 anon->an_page = pg; 1240 pg->pqflags = PQ_ANON; 1241 atomic_inc_uint(&uvmexp.anonpages); 1242 } else { 1243 if (obj) { 1244 uvm_pageinsert(obj, pg); 1245 } 1246 pg->pqflags = 0; 1247 } 1248 mutex_spin_exit(&uvm_fpageqlock); 1249 1250 #if defined(UVM_PAGE_TRKOWN) 1251 pg->owner_tag = NULL; 1252 #endif 1253 UVM_PAGE_OWN(pg, "new alloc"); 1254 1255 if (flags & UVM_PGA_ZERO) { 1256 /* 1257 * A zero'd page is not clean. If we got a page not already 1258 * zero'd, then we have to zero it ourselves. 1259 */ 1260 pg->flags &= ~PG_CLEAN; 1261 if (zeroit) 1262 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1263 } 1264 1265 return(pg); 1266 1267 fail: 1268 mutex_spin_exit(&uvm_fpageqlock); 1269 return (NULL); 1270 } 1271 1272 /* 1273 * uvm_pagereplace: replace a page with another 1274 * 1275 * => object must be locked 1276 */ 1277 1278 void 1279 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg) 1280 { 1281 struct uvm_object *uobj = oldpg->uobject; 1282 1283 KASSERT((oldpg->flags & PG_TABLED) != 0); 1284 KASSERT(uobj != NULL); 1285 KASSERT((newpg->flags & PG_TABLED) == 0); 1286 KASSERT(newpg->uobject == NULL); 1287 KASSERT(mutex_owned(&uobj->vmobjlock)); 1288 1289 newpg->uobject = uobj; 1290 newpg->offset = oldpg->offset; 1291 1292 uvm_pageremove_tree(uobj, oldpg); 1293 uvm_pageinsert_tree(uobj, newpg); 1294 uvm_pageinsert_list(uobj, newpg, oldpg); 1295 uvm_pageremove_list(uobj, oldpg); 1296 } 1297 1298 /* 1299 * uvm_pagerealloc: reallocate a page from one object to another 1300 * 1301 * => both objects must be locked 1302 */ 1303 1304 void 1305 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1306 { 1307 /* 1308 * remove it from the old object 1309 */ 1310 1311 if (pg->uobject) { 1312 uvm_pageremove(pg->uobject, pg); 1313 } 1314 1315 /* 1316 * put it in the new object 1317 */ 1318 1319 if (newobj) { 1320 pg->uobject = newobj; 1321 pg->offset = newoff; 1322 uvm_pageinsert(newobj, pg); 1323 } 1324 } 1325 1326 #ifdef DEBUG 1327 /* 1328 * check if page is zero-filled 1329 * 1330 * - called with free page queue lock held. 1331 */ 1332 void 1333 uvm_pagezerocheck(struct vm_page *pg) 1334 { 1335 int *p, *ep; 1336 1337 KASSERT(uvm_zerocheckkva != 0); 1338 KASSERT(mutex_owned(&uvm_fpageqlock)); 1339 1340 /* 1341 * XXX assuming pmap_kenter_pa and pmap_kremove never call 1342 * uvm page allocator. 1343 * 1344 * it might be better to have "CPU-local temporary map" pmap interface. 1345 */ 1346 pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0); 1347 p = (int *)uvm_zerocheckkva; 1348 ep = (int *)((char *)p + PAGE_SIZE); 1349 pmap_update(pmap_kernel()); 1350 while (p < ep) { 1351 if (*p != 0) 1352 panic("PG_ZERO page isn't zero-filled"); 1353 p++; 1354 } 1355 pmap_kremove(uvm_zerocheckkva, PAGE_SIZE); 1356 /* 1357 * pmap_update() is not necessary here because no one except us 1358 * uses this VA. 1359 */ 1360 } 1361 #endif /* DEBUG */ 1362 1363 /* 1364 * uvm_pagefree: free page 1365 * 1366 * => erase page's identity (i.e. remove from object) 1367 * => put page on free list 1368 * => caller must lock owning object (either anon or uvm_object) 1369 * => caller must lock page queues 1370 * => assumes all valid mappings of pg are gone 1371 */ 1372 1373 void 1374 uvm_pagefree(struct vm_page *pg) 1375 { 1376 struct pgflist *pgfl; 1377 struct uvm_cpu *ucpu; 1378 int index, color, queue; 1379 bool iszero; 1380 1381 #ifdef DEBUG 1382 if (pg->uobject == (void *)0xdeadbeef && 1383 pg->uanon == (void *)0xdeadbeef) { 1384 panic("uvm_pagefree: freeing free page %p", pg); 1385 } 1386 #endif /* DEBUG */ 1387 1388 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1389 KASSERT(!(pg->pqflags & PQ_FREE)); 1390 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg)); 1391 KASSERT(pg->uobject == NULL || mutex_owned(&pg->uobject->vmobjlock)); 1392 KASSERT(pg->uobject != NULL || pg->uanon == NULL || 1393 mutex_owned(&pg->uanon->an_lock)); 1394 1395 /* 1396 * if the page is loaned, resolve the loan instead of freeing. 1397 */ 1398 1399 if (pg->loan_count) { 1400 KASSERT(pg->wire_count == 0); 1401 1402 /* 1403 * if the page is owned by an anon then we just want to 1404 * drop anon ownership. the kernel will free the page when 1405 * it is done with it. if the page is owned by an object, 1406 * remove it from the object and mark it dirty for the benefit 1407 * of possible anon owners. 1408 * 1409 * regardless of previous ownership, wakeup any waiters, 1410 * unbusy the page, and we're done. 1411 */ 1412 1413 if (pg->uobject != NULL) { 1414 uvm_pageremove(pg->uobject, pg); 1415 pg->flags &= ~PG_CLEAN; 1416 } else if (pg->uanon != NULL) { 1417 if ((pg->pqflags & PQ_ANON) == 0) { 1418 pg->loan_count--; 1419 } else { 1420 pg->pqflags &= ~PQ_ANON; 1421 atomic_dec_uint(&uvmexp.anonpages); 1422 } 1423 pg->uanon->an_page = NULL; 1424 pg->uanon = NULL; 1425 } 1426 if (pg->flags & PG_WANTED) { 1427 wakeup(pg); 1428 } 1429 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1); 1430 #ifdef UVM_PAGE_TRKOWN 1431 pg->owner_tag = NULL; 1432 #endif 1433 if (pg->loan_count) { 1434 KASSERT(pg->uobject == NULL); 1435 if (pg->uanon == NULL) { 1436 uvm_pagedequeue(pg); 1437 } 1438 return; 1439 } 1440 } 1441 1442 /* 1443 * remove page from its object or anon. 1444 */ 1445 1446 if (pg->uobject != NULL) { 1447 uvm_pageremove(pg->uobject, pg); 1448 } else if (pg->uanon != NULL) { 1449 pg->uanon->an_page = NULL; 1450 atomic_dec_uint(&uvmexp.anonpages); 1451 } 1452 1453 /* 1454 * now remove the page from the queues. 1455 */ 1456 1457 uvm_pagedequeue(pg); 1458 1459 /* 1460 * if the page was wired, unwire it now. 1461 */ 1462 1463 if (pg->wire_count) { 1464 pg->wire_count = 0; 1465 uvmexp.wired--; 1466 } 1467 1468 /* 1469 * and put on free queue 1470 */ 1471 1472 iszero = (pg->flags & PG_ZERO); 1473 index = uvm_page_lookup_freelist(pg); 1474 color = VM_PGCOLOR_BUCKET(pg); 1475 queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN); 1476 1477 #ifdef DEBUG 1478 pg->uobject = (void *)0xdeadbeef; 1479 pg->uanon = (void *)0xdeadbeef; 1480 #endif 1481 1482 mutex_spin_enter(&uvm_fpageqlock); 1483 pg->pqflags = PQ_FREE; 1484 1485 #ifdef DEBUG 1486 if (iszero) 1487 uvm_pagezerocheck(pg); 1488 #endif /* DEBUG */ 1489 1490 1491 /* global list */ 1492 pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1493 LIST_INSERT_HEAD(pgfl, pg, pageq.list); 1494 uvmexp.free++; 1495 if (iszero) { 1496 uvmexp.zeropages++; 1497 } 1498 1499 /* per-cpu list */ 1500 ucpu = curcpu()->ci_data.cpu_uvm; 1501 pg->offset = (uintptr_t)ucpu; 1502 pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue]; 1503 LIST_INSERT_HEAD(pgfl, pg, listq.list); 1504 ucpu->pages[queue]++; 1505 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { 1506 ucpu->page_idle_zero = vm_page_zero_enable; 1507 } 1508 1509 mutex_spin_exit(&uvm_fpageqlock); 1510 } 1511 1512 /* 1513 * uvm_page_unbusy: unbusy an array of pages. 1514 * 1515 * => pages must either all belong to the same object, or all belong to anons. 1516 * => if pages are object-owned, object must be locked. 1517 * => if pages are anon-owned, anons must be locked. 1518 * => caller must lock page queues if pages may be released. 1519 * => caller must make sure that anon-owned pages are not PG_RELEASED. 1520 */ 1521 1522 void 1523 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1524 { 1525 struct vm_page *pg; 1526 int i; 1527 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist); 1528 1529 for (i = 0; i < npgs; i++) { 1530 pg = pgs[i]; 1531 if (pg == NULL || pg == PGO_DONTCARE) { 1532 continue; 1533 } 1534 1535 KASSERT(pg->uobject == NULL || 1536 mutex_owned(&pg->uobject->vmobjlock)); 1537 KASSERT(pg->uobject != NULL || 1538 (pg->uanon != NULL && mutex_owned(&pg->uanon->an_lock))); 1539 1540 KASSERT(pg->flags & PG_BUSY); 1541 KASSERT((pg->flags & PG_PAGEOUT) == 0); 1542 if (pg->flags & PG_WANTED) { 1543 wakeup(pg); 1544 } 1545 if (pg->flags & PG_RELEASED) { 1546 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0); 1547 KASSERT(pg->uobject != NULL || 1548 (pg->uanon != NULL && pg->uanon->an_ref > 0)); 1549 pg->flags &= ~PG_RELEASED; 1550 uvm_pagefree(pg); 1551 } else { 1552 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0); 1553 KASSERT((pg->flags & PG_FAKE) == 0); 1554 pg->flags &= ~(PG_WANTED|PG_BUSY); 1555 UVM_PAGE_OWN(pg, NULL); 1556 } 1557 } 1558 } 1559 1560 #if defined(UVM_PAGE_TRKOWN) 1561 /* 1562 * uvm_page_own: set or release page ownership 1563 * 1564 * => this is a debugging function that keeps track of who sets PG_BUSY 1565 * and where they do it. it can be used to track down problems 1566 * such a process setting "PG_BUSY" and never releasing it. 1567 * => page's object [if any] must be locked 1568 * => if "tag" is NULL then we are releasing page ownership 1569 */ 1570 void 1571 uvm_page_own(struct vm_page *pg, const char *tag) 1572 { 1573 struct uvm_object *uobj; 1574 struct vm_anon *anon; 1575 1576 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0); 1577 1578 uobj = pg->uobject; 1579 anon = pg->uanon; 1580 if (uobj != NULL) { 1581 KASSERT(mutex_owned(&uobj->vmobjlock)); 1582 } else if (anon != NULL) { 1583 KASSERT(mutex_owned(&anon->an_lock)); 1584 } 1585 1586 KASSERT((pg->flags & PG_WANTED) == 0); 1587 1588 /* gain ownership? */ 1589 if (tag) { 1590 KASSERT((pg->flags & PG_BUSY) != 0); 1591 if (pg->owner_tag) { 1592 printf("uvm_page_own: page %p already owned " 1593 "by proc %d [%s]\n", pg, 1594 pg->owner, pg->owner_tag); 1595 panic("uvm_page_own"); 1596 } 1597 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1598 pg->lowner = (curlwp) ? curlwp->l_lid : (lwpid_t) -1; 1599 pg->owner_tag = tag; 1600 return; 1601 } 1602 1603 /* drop ownership */ 1604 KASSERT((pg->flags & PG_BUSY) == 0); 1605 if (pg->owner_tag == NULL) { 1606 printf("uvm_page_own: dropping ownership of an non-owned " 1607 "page (%p)\n", pg); 1608 panic("uvm_page_own"); 1609 } 1610 if (!uvmpdpol_pageisqueued_p(pg)) { 1611 KASSERT((pg->uanon == NULL && pg->uobject == NULL) || 1612 pg->wire_count > 0); 1613 } else { 1614 KASSERT(pg->wire_count == 0); 1615 } 1616 pg->owner_tag = NULL; 1617 } 1618 #endif 1619 1620 /* 1621 * uvm_pageidlezero: zero free pages while the system is idle. 1622 * 1623 * => try to complete one color bucket at a time, to reduce our impact 1624 * on the CPU cache. 1625 * => we loop until we either reach the target or there is a lwp ready 1626 * to run, or MD code detects a reason to break early. 1627 */ 1628 void 1629 uvm_pageidlezero(void) 1630 { 1631 struct vm_page *pg; 1632 struct pgfreelist *pgfl, *gpgfl; 1633 struct uvm_cpu *ucpu; 1634 int free_list, firstbucket, nextbucket; 1635 1636 ucpu = curcpu()->ci_data.cpu_uvm; 1637 if (!ucpu->page_idle_zero || 1638 ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1639 ucpu->page_idle_zero = false; 1640 return; 1641 } 1642 mutex_enter(&uvm_fpageqlock); 1643 firstbucket = ucpu->page_free_nextcolor; 1644 nextbucket = firstbucket; 1645 do { 1646 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1647 if (sched_curcpu_runnable_p()) { 1648 goto quit; 1649 } 1650 pgfl = &ucpu->page_free[free_list]; 1651 gpgfl = &uvm.page_free[free_list]; 1652 while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[ 1653 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) { 1654 if (sched_curcpu_runnable_p()) { 1655 goto quit; 1656 } 1657 LIST_REMOVE(pg, pageq.list); /* global list */ 1658 LIST_REMOVE(pg, listq.list); /* per-cpu list */ 1659 ucpu->pages[PGFL_UNKNOWN]--; 1660 uvmexp.free--; 1661 KASSERT(pg->pqflags == PQ_FREE); 1662 pg->pqflags = 0; 1663 mutex_spin_exit(&uvm_fpageqlock); 1664 #ifdef PMAP_PAGEIDLEZERO 1665 if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) { 1666 1667 /* 1668 * The machine-dependent code detected 1669 * some reason for us to abort zeroing 1670 * pages, probably because there is a 1671 * process now ready to run. 1672 */ 1673 1674 mutex_spin_enter(&uvm_fpageqlock); 1675 pg->pqflags = PQ_FREE; 1676 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1677 nextbucket].pgfl_queues[ 1678 PGFL_UNKNOWN], pg, pageq.list); 1679 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1680 nextbucket].pgfl_queues[ 1681 PGFL_UNKNOWN], pg, listq.list); 1682 ucpu->pages[PGFL_UNKNOWN]++; 1683 uvmexp.free++; 1684 uvmexp.zeroaborts++; 1685 goto quit; 1686 } 1687 #else 1688 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1689 #endif /* PMAP_PAGEIDLEZERO */ 1690 pg->flags |= PG_ZERO; 1691 1692 mutex_spin_enter(&uvm_fpageqlock); 1693 pg->pqflags = PQ_FREE; 1694 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[ 1695 nextbucket].pgfl_queues[PGFL_ZEROS], 1696 pg, pageq.list); 1697 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ 1698 nextbucket].pgfl_queues[PGFL_ZEROS], 1699 pg, listq.list); 1700 ucpu->pages[PGFL_ZEROS]++; 1701 uvmexp.free++; 1702 uvmexp.zeropages++; 1703 } 1704 } 1705 if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) { 1706 break; 1707 } 1708 nextbucket = (nextbucket + 1) & uvmexp.colormask; 1709 } while (nextbucket != firstbucket); 1710 ucpu->page_idle_zero = false; 1711 quit: 1712 mutex_spin_exit(&uvm_fpageqlock); 1713 } 1714 1715 /* 1716 * uvm_pagelookup: look up a page 1717 * 1718 * => caller should lock object to keep someone from pulling the page 1719 * out from under it 1720 */ 1721 1722 struct vm_page * 1723 uvm_pagelookup(struct uvm_object *obj, voff_t off) 1724 { 1725 struct vm_page *pg; 1726 1727 KASSERT(mutex_owned(&obj->vmobjlock)); 1728 1729 pg = (struct vm_page *)rb_tree_find_node(&obj->rb_tree, &off); 1730 1731 KASSERT(pg == NULL || obj->uo_npages != 0); 1732 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 1733 (pg->flags & PG_BUSY) != 0); 1734 return(pg); 1735 } 1736 1737 /* 1738 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1739 * 1740 * => caller must lock page queues 1741 */ 1742 1743 void 1744 uvm_pagewire(struct vm_page *pg) 1745 { 1746 KASSERT(mutex_owned(&uvm_pageqlock)); 1747 #if defined(READAHEAD_STATS) 1748 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1749 uvm_ra_hit.ev_count++; 1750 pg->pqflags &= ~PQ_READAHEAD; 1751 } 1752 #endif /* defined(READAHEAD_STATS) */ 1753 if (pg->wire_count == 0) { 1754 uvm_pagedequeue(pg); 1755 uvmexp.wired++; 1756 } 1757 pg->wire_count++; 1758 } 1759 1760 /* 1761 * uvm_pageunwire: unwire the page. 1762 * 1763 * => activate if wire count goes to zero. 1764 * => caller must lock page queues 1765 */ 1766 1767 void 1768 uvm_pageunwire(struct vm_page *pg) 1769 { 1770 KASSERT(mutex_owned(&uvm_pageqlock)); 1771 pg->wire_count--; 1772 if (pg->wire_count == 0) { 1773 uvm_pageactivate(pg); 1774 uvmexp.wired--; 1775 } 1776 } 1777 1778 /* 1779 * uvm_pagedeactivate: deactivate page 1780 * 1781 * => caller must lock page queues 1782 * => caller must check to make sure page is not wired 1783 * => object that page belongs to must be locked (so we can adjust pg->flags) 1784 * => caller must clear the reference on the page before calling 1785 */ 1786 1787 void 1788 uvm_pagedeactivate(struct vm_page *pg) 1789 { 1790 1791 KASSERT(mutex_owned(&uvm_pageqlock)); 1792 KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg)); 1793 uvmpdpol_pagedeactivate(pg); 1794 } 1795 1796 /* 1797 * uvm_pageactivate: activate page 1798 * 1799 * => caller must lock page queues 1800 */ 1801 1802 void 1803 uvm_pageactivate(struct vm_page *pg) 1804 { 1805 1806 KASSERT(mutex_owned(&uvm_pageqlock)); 1807 #if defined(READAHEAD_STATS) 1808 if ((pg->pqflags & PQ_READAHEAD) != 0) { 1809 uvm_ra_hit.ev_count++; 1810 pg->pqflags &= ~PQ_READAHEAD; 1811 } 1812 #endif /* defined(READAHEAD_STATS) */ 1813 if (pg->wire_count != 0) { 1814 return; 1815 } 1816 uvmpdpol_pageactivate(pg); 1817 } 1818 1819 /* 1820 * uvm_pagedequeue: remove a page from any paging queue 1821 */ 1822 1823 void 1824 uvm_pagedequeue(struct vm_page *pg) 1825 { 1826 1827 if (uvmpdpol_pageisqueued_p(pg)) { 1828 KASSERT(mutex_owned(&uvm_pageqlock)); 1829 } 1830 1831 uvmpdpol_pagedequeue(pg); 1832 } 1833 1834 /* 1835 * uvm_pageenqueue: add a page to a paging queue without activating. 1836 * used where a page is not really demanded (yet). eg. read-ahead 1837 */ 1838 1839 void 1840 uvm_pageenqueue(struct vm_page *pg) 1841 { 1842 1843 KASSERT(mutex_owned(&uvm_pageqlock)); 1844 if (pg->wire_count != 0) { 1845 return; 1846 } 1847 uvmpdpol_pageenqueue(pg); 1848 } 1849 1850 /* 1851 * uvm_pagezero: zero fill a page 1852 * 1853 * => if page is part of an object then the object should be locked 1854 * to protect pg->flags. 1855 */ 1856 1857 void 1858 uvm_pagezero(struct vm_page *pg) 1859 { 1860 pg->flags &= ~PG_CLEAN; 1861 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1862 } 1863 1864 /* 1865 * uvm_pagecopy: copy a page 1866 * 1867 * => if page is part of an object then the object should be locked 1868 * to protect pg->flags. 1869 */ 1870 1871 void 1872 uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1873 { 1874 1875 dst->flags &= ~PG_CLEAN; 1876 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 1877 } 1878 1879 /* 1880 * uvm_pageismanaged: test it see that a page (specified by PA) is managed. 1881 */ 1882 1883 bool 1884 uvm_pageismanaged(paddr_t pa) 1885 { 1886 1887 return (vm_physseg_find(atop(pa), NULL) != -1); 1888 } 1889 1890 /* 1891 * uvm_page_lookup_freelist: look up the free list for the specified page 1892 */ 1893 1894 int 1895 uvm_page_lookup_freelist(struct vm_page *pg) 1896 { 1897 int lcv; 1898 1899 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1900 KASSERT(lcv != -1); 1901 return (vm_physmem[lcv].free_list); 1902 } 1903 1904 #if defined(DDB) || defined(DEBUGPRINT) 1905 1906 /* 1907 * uvm_page_printit: actually print the page 1908 */ 1909 1910 static const char page_flagbits[] = UVM_PGFLAGBITS; 1911 static const char page_pqflagbits[] = UVM_PQFLAGBITS; 1912 1913 void 1914 uvm_page_printit(struct vm_page *pg, bool full, 1915 void (*pr)(const char *, ...)) 1916 { 1917 struct vm_page *tpg; 1918 struct uvm_object *uobj; 1919 struct pgflist *pgl; 1920 char pgbuf[128]; 1921 char pqbuf[128]; 1922 1923 (*pr)("PAGE %p:\n", pg); 1924 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags); 1925 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags); 1926 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n", 1927 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg)); 1928 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n", 1929 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count); 1930 #if defined(UVM_PAGE_TRKOWN) 1931 if (pg->flags & PG_BUSY) 1932 (*pr)(" owning process = %d, tag=%s\n", 1933 pg->owner, pg->owner_tag); 1934 else 1935 (*pr)(" page not busy, no owner\n"); 1936 #else 1937 (*pr)(" [page ownership tracking disabled]\n"); 1938 #endif 1939 1940 if (!full) 1941 return; 1942 1943 /* cross-verify object/anon */ 1944 if ((pg->pqflags & PQ_FREE) == 0) { 1945 if (pg->pqflags & PQ_ANON) { 1946 if (pg->uanon == NULL || pg->uanon->an_page != pg) 1947 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", 1948 (pg->uanon) ? pg->uanon->an_page : NULL); 1949 else 1950 (*pr)(" anon backpointer is OK\n"); 1951 } else { 1952 uobj = pg->uobject; 1953 if (uobj) { 1954 (*pr)(" checking object list\n"); 1955 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) { 1956 if (tpg == pg) { 1957 break; 1958 } 1959 } 1960 if (tpg) 1961 (*pr)(" page found on object list\n"); 1962 else 1963 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n"); 1964 } 1965 } 1966 } 1967 1968 /* cross-verify page queue */ 1969 if (pg->pqflags & PQ_FREE) { 1970 int fl = uvm_page_lookup_freelist(pg); 1971 int color = VM_PGCOLOR_BUCKET(pg); 1972 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ 1973 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; 1974 } else { 1975 pgl = NULL; 1976 } 1977 1978 if (pgl) { 1979 (*pr)(" checking pageq list\n"); 1980 LIST_FOREACH(tpg, pgl, pageq.list) { 1981 if (tpg == pg) { 1982 break; 1983 } 1984 } 1985 if (tpg) 1986 (*pr)(" page found on pageq list\n"); 1987 else 1988 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); 1989 } 1990 } 1991 1992 /* 1993 * uvm_pages_printthem - print a summary of all managed pages 1994 */ 1995 1996 void 1997 uvm_page_printall(void (*pr)(const char *, ...)) 1998 { 1999 unsigned i; 2000 struct vm_page *pg; 2001 2002 (*pr)("%18s %4s %4s %18s %18s" 2003 #ifdef UVM_PAGE_TRKOWN 2004 " OWNER" 2005 #endif 2006 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON"); 2007 for (i = 0; i < vm_nphysseg; i++) { 2008 for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) { 2009 (*pr)("%18p %04x %04x %18p %18p", 2010 pg, pg->flags, pg->pqflags, pg->uobject, 2011 pg->uanon); 2012 #ifdef UVM_PAGE_TRKOWN 2013 if (pg->flags & PG_BUSY) 2014 (*pr)(" %d [%s]", pg->owner, pg->owner_tag); 2015 #endif 2016 (*pr)("\n"); 2017 } 2018 } 2019 } 2020 2021 #endif /* DDB || DEBUGPRINT */ 2022