1 /* $OpenBSD: uvm_page.c,v 1.73 2009/04/06 17:03:51 oga Exp $ */ 2 /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * uvm_page.c: page ops. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/malloc.h> 77 #include <sys/sched.h> 78 #include <sys/kernel.h> 79 #include <sys/vnode.h> 80 #include <sys/mount.h> 81 82 #include <uvm/uvm.h> 83 84 /* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88 /* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95 /* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101 /* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105 boolean_t vm_page_zero_enable = FALSE; 106 107 /* 108 * local variables 109 */ 110 111 /* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117 static vaddr_t virtual_space_start; 118 static vaddr_t virtual_space_end; 119 120 /* 121 * we use a hash table with only one bucket during bootup. we will 122 * later rehash (resize) the hash table once the allocator is ready. 123 * we static allocate the one bootstrap bucket below... 124 */ 125 126 static struct pglist uvm_bootbucket; 127 128 /* 129 * History 130 */ 131 UVMHIST_DECL(pghist); 132 133 /* 134 * local prototypes 135 */ 136 137 static void uvm_pageinsert(struct vm_page *); 138 static void uvm_pageremove(struct vm_page *); 139 140 /* 141 * inline functions 142 */ 143 144 /* 145 * uvm_pageinsert: insert a page in the object and the hash table 146 * 147 * => caller must lock object 148 * => caller must lock page queues 149 * => call should have already set pg's object and offset pointers 150 * and bumped the version counter 151 */ 152 153 __inline static void 154 uvm_pageinsert(struct vm_page *pg) 155 { 156 struct pglist *buck; 157 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 158 159 KASSERT((pg->pg_flags & PG_TABLED) == 0); 160 mtx_enter(&uvm.hashlock); 161 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 162 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 163 mtx_leave(&uvm.hashlock); 164 165 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 166 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 167 pg->uobject->uo_npages++; 168 } 169 170 /* 171 * uvm_page_remove: remove page from object and hash 172 * 173 * => caller must lock object 174 * => caller must lock page queues 175 */ 176 177 static __inline void 178 uvm_pageremove(struct vm_page *pg) 179 { 180 struct pglist *buck; 181 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 182 183 KASSERT(pg->pg_flags & PG_TABLED); 184 mtx_enter(&uvm.hashlock); 185 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 186 TAILQ_REMOVE(buck, pg, hashq); 187 mtx_leave(&uvm.hashlock); 188 189 #ifdef UBC 190 if (pg->uobject->pgops == &uvm_vnodeops) { 191 uvm_pgcnt_vnode--; 192 } 193 #endif 194 195 /* object should be locked */ 196 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 197 198 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 199 pg->uobject->uo_npages--; 200 pg->uobject = NULL; 201 pg->pg_version++; 202 } 203 204 /* 205 * uvm_page_init: init the page system. called from uvm_init(). 206 * 207 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 208 */ 209 210 void 211 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 212 { 213 vsize_t freepages, pagecount, n; 214 vm_page_t pagearray; 215 int lcv, i; 216 paddr_t paddr; 217 #if defined(UVMHIST) 218 static struct uvm_history_ent pghistbuf[100]; 219 #endif 220 221 UVMHIST_FUNC("uvm_page_init"); 222 UVMHIST_INIT_STATIC(pghist, pghistbuf); 223 UVMHIST_CALLED(pghist); 224 225 /* 226 * init the page queues and page queue locks 227 */ 228 229 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 230 for (i = 0; i < PGFL_NQUEUES; i++) 231 TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]); 232 } 233 TAILQ_INIT(&uvm.page_active); 234 TAILQ_INIT(&uvm.page_inactive_swp); 235 TAILQ_INIT(&uvm.page_inactive_obj); 236 simple_lock_init(&uvm.pageqlock); 237 mtx_init(&uvm.fpageqlock, IPL_VM); 238 239 /* 240 * init the <obj,offset> => <page> hash table. for now 241 * we just have one bucket (the bootstrap bucket). later on we 242 * will allocate new buckets as we dynamically resize the hash table. 243 */ 244 245 uvm.page_nhash = 1; /* 1 bucket */ 246 uvm.page_hashmask = 0; /* mask for hash function */ 247 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 248 TAILQ_INIT(uvm.page_hash); /* init hash table */ 249 mtx_init(&uvm.hashlock, IPL_VM); /* init hash table lock */ 250 251 /* 252 * allocate vm_page structures. 253 */ 254 255 /* 256 * sanity check: 257 * before calling this function the MD code is expected to register 258 * some free RAM with the uvm_page_physload() function. our job 259 * now is to allocate vm_page structures for this memory. 260 */ 261 262 if (vm_nphysseg == 0) 263 panic("uvm_page_bootstrap: no memory pre-allocated"); 264 265 /* 266 * first calculate the number of free pages... 267 * 268 * note that we use start/end rather than avail_start/avail_end. 269 * this allows us to allocate extra vm_page structures in case we 270 * want to return some memory to the pool after booting. 271 */ 272 273 freepages = 0; 274 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 275 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 276 277 /* 278 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 279 * use. for each page of memory we use we need a vm_page structure. 280 * thus, the total number of pages we can use is the total size of 281 * the memory divided by the PAGE_SIZE plus the size of the vm_page 282 * structure. we add one to freepages as a fudge factor to avoid 283 * truncation errors (since we can only allocate in terms of whole 284 * pages). 285 */ 286 287 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 288 (PAGE_SIZE + sizeof(struct vm_page)); 289 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 290 sizeof(struct vm_page)); 291 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 292 293 /* 294 * init the vm_page structures and put them in the correct place. 295 */ 296 297 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 298 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 299 if (n > pagecount) { 300 printf("uvm_page_init: lost %ld page(s) in init\n", 301 (long)(n - pagecount)); 302 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 303 /* n = pagecount; */ 304 } 305 306 /* set up page array pointers */ 307 vm_physmem[lcv].pgs = pagearray; 308 pagearray += n; 309 pagecount -= n; 310 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 311 312 /* init and free vm_pages (we've already zeroed them) */ 313 paddr = ptoa(vm_physmem[lcv].start); 314 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 315 vm_physmem[lcv].pgs[i].phys_addr = paddr; 316 #ifdef __HAVE_VM_PAGE_MD 317 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 318 #endif 319 if (atop(paddr) >= vm_physmem[lcv].avail_start && 320 atop(paddr) <= vm_physmem[lcv].avail_end) { 321 uvmexp.npages++; 322 /* add page to free pool */ 323 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 324 } 325 } 326 } 327 328 /* 329 * pass up the values of virtual_space_start and 330 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 331 * layers of the VM. 332 */ 333 334 *kvm_startp = round_page(virtual_space_start); 335 *kvm_endp = trunc_page(virtual_space_end); 336 337 /* 338 * init locks for kernel threads 339 */ 340 341 simple_lock_init(&uvm.pagedaemon_lock); 342 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 343 344 /* 345 * init reserve thresholds 346 * XXXCDC - values may need adjusting 347 */ 348 uvmexp.reserve_pagedaemon = 4; 349 uvmexp.reserve_kernel = 6; 350 uvmexp.anonminpct = 10; 351 uvmexp.vnodeminpct = 10; 352 uvmexp.vtextminpct = 5; 353 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 354 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 355 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 356 357 /* 358 * determine if we should zero pages in the idle loop. 359 */ 360 361 uvm.page_idle_zero = vm_page_zero_enable; 362 363 /* 364 * done! 365 */ 366 367 uvm.page_init_done = TRUE; 368 } 369 370 /* 371 * uvm_setpagesize: set the page size 372 * 373 * => sets page_shift and page_mask from uvmexp.pagesize. 374 */ 375 376 void 377 uvm_setpagesize(void) 378 { 379 if (uvmexp.pagesize == 0) 380 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 381 uvmexp.pagemask = uvmexp.pagesize - 1; 382 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 383 panic("uvm_setpagesize: page size not a power of two"); 384 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 385 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 386 break; 387 } 388 389 /* 390 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 391 */ 392 393 vaddr_t 394 uvm_pageboot_alloc(vsize_t size) 395 { 396 #if defined(PMAP_STEAL_MEMORY) 397 vaddr_t addr; 398 399 /* 400 * defer bootstrap allocation to MD code (it may want to allocate 401 * from a direct-mapped segment). pmap_steal_memory should round 402 * off virtual_space_start/virtual_space_end. 403 */ 404 405 addr = pmap_steal_memory(size, &virtual_space_start, 406 &virtual_space_end); 407 408 return(addr); 409 410 #else /* !PMAP_STEAL_MEMORY */ 411 412 static boolean_t initialized = FALSE; 413 vaddr_t addr, vaddr; 414 paddr_t paddr; 415 416 /* round to page size */ 417 size = round_page(size); 418 419 /* 420 * on first call to this function, initialize ourselves. 421 */ 422 if (initialized == FALSE) { 423 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 424 425 /* round it the way we like it */ 426 virtual_space_start = round_page(virtual_space_start); 427 virtual_space_end = trunc_page(virtual_space_end); 428 429 initialized = TRUE; 430 } 431 432 /* 433 * allocate virtual memory for this request 434 */ 435 if (virtual_space_start == virtual_space_end || 436 (virtual_space_end - virtual_space_start) < size) 437 panic("uvm_pageboot_alloc: out of virtual space"); 438 439 addr = virtual_space_start; 440 441 #ifdef PMAP_GROWKERNEL 442 /* 443 * If the kernel pmap can't map the requested space, 444 * then allocate more resources for it. 445 */ 446 if (uvm_maxkaddr < (addr + size)) { 447 uvm_maxkaddr = pmap_growkernel(addr + size); 448 if (uvm_maxkaddr < (addr + size)) 449 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 450 } 451 #endif 452 453 virtual_space_start += size; 454 455 /* 456 * allocate and mapin physical pages to back new virtual pages 457 */ 458 459 for (vaddr = round_page(addr) ; vaddr < addr + size ; 460 vaddr += PAGE_SIZE) { 461 462 if (!uvm_page_physget(&paddr)) 463 panic("uvm_pageboot_alloc: out of memory"); 464 465 /* 466 * Note this memory is no longer managed, so using 467 * pmap_kenter is safe. 468 */ 469 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 470 } 471 pmap_update(pmap_kernel()); 472 return(addr); 473 #endif /* PMAP_STEAL_MEMORY */ 474 } 475 476 #if !defined(PMAP_STEAL_MEMORY) 477 /* 478 * uvm_page_physget: "steal" one page from the vm_physmem structure. 479 * 480 * => attempt to allocate it off the end of a segment in which the "avail" 481 * values match the start/end values. if we can't do that, then we 482 * will advance both values (making them equal, and removing some 483 * vm_page structures from the non-avail area). 484 * => return false if out of memory. 485 */ 486 487 /* subroutine: try to allocate from memory chunks on the specified freelist */ 488 static boolean_t uvm_page_physget_freelist(paddr_t *, int); 489 490 static boolean_t 491 uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 492 { 493 int lcv, x; 494 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 495 496 /* pass 1: try allocating from a matching end */ 497 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 498 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 499 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 500 #else 501 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 502 #endif 503 { 504 505 if (uvm.page_init_done == TRUE) 506 panic("uvm_page_physget: called _after_ bootstrap"); 507 508 if (vm_physmem[lcv].free_list != freelist) 509 continue; 510 511 /* try from front */ 512 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 513 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 514 *paddrp = ptoa(vm_physmem[lcv].avail_start); 515 vm_physmem[lcv].avail_start++; 516 vm_physmem[lcv].start++; 517 /* nothing left? nuke it */ 518 if (vm_physmem[lcv].avail_start == 519 vm_physmem[lcv].end) { 520 if (vm_nphysseg == 1) 521 panic("uvm_page_physget: out of memory!"); 522 vm_nphysseg--; 523 for (x = lcv ; x < vm_nphysseg ; x++) 524 /* structure copy */ 525 vm_physmem[x] = vm_physmem[x+1]; 526 } 527 return (TRUE); 528 } 529 530 /* try from rear */ 531 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 532 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 533 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 534 vm_physmem[lcv].avail_end--; 535 vm_physmem[lcv].end--; 536 /* nothing left? nuke it */ 537 if (vm_physmem[lcv].avail_end == 538 vm_physmem[lcv].start) { 539 if (vm_nphysseg == 1) 540 panic("uvm_page_physget: out of memory!"); 541 vm_nphysseg--; 542 for (x = lcv ; x < vm_nphysseg ; x++) 543 /* structure copy */ 544 vm_physmem[x] = vm_physmem[x+1]; 545 } 546 return (TRUE); 547 } 548 } 549 550 /* pass2: forget about matching ends, just allocate something */ 551 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 552 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 553 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 554 #else 555 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 556 #endif 557 { 558 559 /* any room in this bank? */ 560 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 561 continue; /* nope */ 562 563 *paddrp = ptoa(vm_physmem[lcv].avail_start); 564 vm_physmem[lcv].avail_start++; 565 /* truncate! */ 566 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 567 568 /* nothing left? nuke it */ 569 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 570 if (vm_nphysseg == 1) 571 panic("uvm_page_physget: out of memory!"); 572 vm_nphysseg--; 573 for (x = lcv ; x < vm_nphysseg ; x++) 574 /* structure copy */ 575 vm_physmem[x] = vm_physmem[x+1]; 576 } 577 return (TRUE); 578 } 579 580 return (FALSE); /* whoops! */ 581 } 582 583 boolean_t 584 uvm_page_physget(paddr_t *paddrp) 585 { 586 int i; 587 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 588 589 /* try in the order of freelist preference */ 590 for (i = 0; i < VM_NFREELIST; i++) 591 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 592 return (TRUE); 593 return (FALSE); 594 } 595 #endif /* PMAP_STEAL_MEMORY */ 596 597 /* 598 * uvm_page_physload: load physical memory into VM system 599 * 600 * => all args are PFs 601 * => all pages in start/end get vm_page structures 602 * => areas marked by avail_start/avail_end get added to the free page pool 603 * => we are limited to VM_PHYSSEG_MAX physical memory segments 604 */ 605 606 void 607 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 608 paddr_t avail_end, int free_list) 609 { 610 int preload, lcv; 611 psize_t npages; 612 struct vm_page *pgs; 613 struct vm_physseg *ps; 614 615 if (uvmexp.pagesize == 0) 616 panic("uvm_page_physload: page size not set!"); 617 618 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 619 panic("uvm_page_physload: bad free list %d", free_list); 620 621 if (start >= end) 622 panic("uvm_page_physload: start >= end"); 623 624 /* 625 * do we have room? 626 */ 627 if (vm_nphysseg == VM_PHYSSEG_MAX) { 628 printf("uvm_page_physload: unable to load physical memory " 629 "segment\n"); 630 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 631 VM_PHYSSEG_MAX, (long long)start, (long long)end); 632 printf("\tincrease VM_PHYSSEG_MAX\n"); 633 return; 634 } 635 636 /* 637 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 638 * called yet, so malloc is not available). 639 */ 640 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 641 if (vm_physmem[lcv].pgs) 642 break; 643 } 644 preload = (lcv == vm_nphysseg); 645 646 /* 647 * if VM is already running, attempt to malloc() vm_page structures 648 */ 649 if (!preload) { 650 #if defined(VM_PHYSSEG_NOADD) 651 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 652 #else 653 /* XXXCDC: need some sort of lockout for this case */ 654 paddr_t paddr; 655 npages = end - start; /* # of pages */ 656 pgs = (vm_page *)uvm_km_alloc(kernel_map, 657 sizeof(struct vm_page) * npages); 658 if (pgs == NULL) { 659 printf("uvm_page_physload: can not malloc vm_page " 660 "structs for segment\n"); 661 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 662 return; 663 } 664 /* zero data, init phys_addr and free_list, and free pages */ 665 memset(pgs, 0, sizeof(struct vm_page) * npages); 666 for (lcv = 0, paddr = ptoa(start) ; 667 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 668 pgs[lcv].phys_addr = paddr; 669 pgs[lcv].free_list = free_list; 670 if (atop(paddr) >= avail_start && 671 atop(paddr) <= avail_end) 672 uvm_pagefree(&pgs[lcv]); 673 } 674 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 675 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 676 #endif 677 } else { 678 679 /* gcc complains if these don't get init'd */ 680 pgs = NULL; 681 npages = 0; 682 683 } 684 685 /* 686 * now insert us in the proper place in vm_physmem[] 687 */ 688 689 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 690 691 /* random: put it at the end (easy!) */ 692 ps = &vm_physmem[vm_nphysseg]; 693 694 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 695 696 { 697 int x; 698 /* sort by address for binary search */ 699 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 700 if (start < vm_physmem[lcv].start) 701 break; 702 ps = &vm_physmem[lcv]; 703 /* move back other entries, if necessary ... */ 704 for (x = vm_nphysseg ; x > lcv ; x--) 705 /* structure copy */ 706 vm_physmem[x] = vm_physmem[x - 1]; 707 } 708 709 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 710 711 { 712 int x; 713 /* sort by largest segment first */ 714 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 715 if ((end - start) > 716 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 717 break; 718 ps = &vm_physmem[lcv]; 719 /* move back other entries, if necessary ... */ 720 for (x = vm_nphysseg ; x > lcv ; x--) 721 /* structure copy */ 722 vm_physmem[x] = vm_physmem[x - 1]; 723 } 724 725 #else 726 727 panic("uvm_page_physload: unknown physseg strategy selected!"); 728 729 #endif 730 731 ps->start = start; 732 ps->end = end; 733 ps->avail_start = avail_start; 734 ps->avail_end = avail_end; 735 if (preload) { 736 ps->pgs = NULL; 737 } else { 738 ps->pgs = pgs; 739 ps->lastpg = pgs + npages - 1; 740 } 741 ps->free_list = free_list; 742 vm_nphysseg++; 743 744 /* 745 * done! 746 */ 747 748 if (!preload) 749 uvm_page_rehash(); 750 751 return; 752 } 753 754 /* 755 * uvm_page_rehash: reallocate hash table based on number of free pages. 756 */ 757 758 void 759 uvm_page_rehash(void) 760 { 761 int freepages, lcv, bucketcount, oldcount; 762 struct pglist *newbuckets, *oldbuckets; 763 struct vm_page *pg; 764 size_t newsize, oldsize; 765 766 /* 767 * compute number of pages that can go in the free pool 768 */ 769 770 freepages = 0; 771 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 772 freepages += 773 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 774 775 /* 776 * compute number of buckets needed for this number of pages 777 */ 778 779 bucketcount = 1; 780 while (bucketcount < freepages) 781 bucketcount = bucketcount * 2; 782 783 /* 784 * compute the size of the current table and new table. 785 */ 786 787 oldbuckets = uvm.page_hash; 788 oldcount = uvm.page_nhash; 789 oldsize = round_page(sizeof(struct pglist) * oldcount); 790 newsize = round_page(sizeof(struct pglist) * bucketcount); 791 792 /* 793 * allocate the new buckets 794 */ 795 796 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 797 if (newbuckets == NULL) { 798 printf("uvm_page_physrehash: WARNING: could not grow page " 799 "hash table\n"); 800 return; 801 } 802 for (lcv = 0 ; lcv < bucketcount ; lcv++) 803 TAILQ_INIT(&newbuckets[lcv]); 804 805 /* 806 * now replace the old buckets with the new ones and rehash everything 807 */ 808 809 mtx_enter(&uvm.hashlock); 810 uvm.page_hash = newbuckets; 811 uvm.page_nhash = bucketcount; 812 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 813 814 /* ... and rehash */ 815 for (lcv = 0 ; lcv < oldcount ; lcv++) { 816 while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) { 817 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 818 TAILQ_INSERT_TAIL( 819 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 820 pg, hashq); 821 } 822 } 823 mtx_leave(&uvm.hashlock); 824 825 /* 826 * free old bucket array if is not the boot-time table 827 */ 828 829 if (oldbuckets != &uvm_bootbucket) 830 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 831 832 /* 833 * done 834 */ 835 return; 836 } 837 838 839 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 840 841 void uvm_page_physdump(void); /* SHUT UP GCC */ 842 843 /* call from DDB */ 844 void 845 uvm_page_physdump(void) 846 { 847 int lcv; 848 849 printf("rehash: physical memory config [segs=%d of %d]:\n", 850 vm_nphysseg, VM_PHYSSEG_MAX); 851 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 852 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 853 (long long)vm_physmem[lcv].start, 854 (long long)vm_physmem[lcv].end, 855 (long long)vm_physmem[lcv].avail_start, 856 (long long)vm_physmem[lcv].avail_end); 857 printf("STRATEGY = "); 858 switch (VM_PHYSSEG_STRAT) { 859 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 860 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 861 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 862 default: printf("<<UNKNOWN>>!!!!\n"); 863 } 864 printf("number of buckets = %d\n", uvm.page_nhash); 865 } 866 #endif 867 868 void 869 uvm_shutdown(void) 870 { 871 #ifdef UVM_SWAP_ENCRYPT 872 uvm_swap_finicrypt_all(); 873 #endif 874 } 875 876 /* 877 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 878 * 879 * => return null if no pages free 880 * => wake up pagedaemon if number of free pages drops below low water mark 881 * => if obj != NULL, obj must be locked (to put in hash) 882 * => if anon != NULL, anon must be locked (to put in anon) 883 * => only one of obj or anon can be non-null 884 * => caller must activate/deactivate page if it is not wired. 885 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 886 * => policy decision: it is more important to pull a page off of the 887 * appropriate priority free list than it is to get a zero'd or 888 * unknown contents page. This is because we live with the 889 * consequences of a bad free list decision for the entire 890 * lifetime of the page, e.g. if the page comes from memory that 891 * is slower to access. 892 */ 893 894 struct vm_page * 895 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 896 int flags, int strat, int free_list) 897 { 898 int lcv, try1, try2, zeroit = 0; 899 struct vm_page *pg; 900 struct pglist *freeq; 901 struct pgfreelist *pgfl; 902 boolean_t use_reserve; 903 UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); 904 905 KASSERT(obj == NULL || anon == NULL); 906 KASSERT(off == trunc_page(off)); 907 908 uvm_lock_fpageq(); 909 910 /* 911 * check to see if we need to generate some free pages waking 912 * the pagedaemon. 913 */ 914 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 915 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 916 uvmexp.inactive < uvmexp.inactarg)) 917 wakeup(&uvm.pagedaemon); 918 919 /* 920 * fail if any of these conditions is true: 921 * [1] there really are no free pages, or 922 * [2] only kernel "reserved" pages remain and 923 * the page isn't being allocated to a kernel object. 924 * [3] only pagedaemon "reserved" pages remain and 925 * the requestor isn't the pagedaemon. 926 */ 927 928 use_reserve = (flags & UVM_PGA_USERESERVE) || 929 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 930 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 931 (uvmexp.free <= uvmexp.reserve_pagedaemon && 932 !((curproc == uvm.pagedaemon_proc) || 933 (curproc == syncerproc)))) 934 goto fail; 935 936 #if PGFL_NQUEUES != 2 937 #error uvm_pagealloc_strat needs to be updated 938 #endif 939 940 /* 941 * If we want a zero'd page, try the ZEROS queue first, otherwise 942 * we try the UNKNOWN queue first. 943 */ 944 if (flags & UVM_PGA_ZERO) { 945 try1 = PGFL_ZEROS; 946 try2 = PGFL_UNKNOWN; 947 } else { 948 try1 = PGFL_UNKNOWN; 949 try2 = PGFL_ZEROS; 950 } 951 952 UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx", 953 obj, (u_long)off, anon, flags); 954 UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0); 955 again: 956 switch (strat) { 957 case UVM_PGA_STRAT_NORMAL: 958 /* Check all freelists in descending priority order. */ 959 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 960 pgfl = &uvm.page_free[lcv]; 961 if ((pg = TAILQ_FIRST((freeq = 962 &pgfl->pgfl_queues[try1]))) != NULL || 963 (pg = TAILQ_FIRST((freeq = 964 &pgfl->pgfl_queues[try2]))) != NULL) 965 goto gotit; 966 } 967 968 /* No pages free! */ 969 goto fail; 970 971 case UVM_PGA_STRAT_ONLY: 972 case UVM_PGA_STRAT_FALLBACK: 973 /* Attempt to allocate from the specified free list. */ 974 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 975 pgfl = &uvm.page_free[free_list]; 976 if ((pg = TAILQ_FIRST((freeq = 977 &pgfl->pgfl_queues[try1]))) != NULL || 978 (pg = TAILQ_FIRST((freeq = 979 &pgfl->pgfl_queues[try2]))) != NULL) 980 goto gotit; 981 982 /* Fall back, if possible. */ 983 if (strat == UVM_PGA_STRAT_FALLBACK) { 984 strat = UVM_PGA_STRAT_NORMAL; 985 goto again; 986 } 987 988 /* No pages free! */ 989 goto fail; 990 991 default: 992 panic("uvm_pagealloc_strat: bad strat %d", strat); 993 /* NOTREACHED */ 994 } 995 996 gotit: 997 TAILQ_REMOVE(freeq, pg, pageq); 998 uvmexp.free--; 999 1000 /* update zero'd page count */ 1001 if (pg->pg_flags & PG_ZERO) 1002 uvmexp.zeropages--; 1003 1004 /* 1005 * update allocation statistics and remember if we have to 1006 * zero the page 1007 */ 1008 if (flags & UVM_PGA_ZERO) { 1009 if (pg->pg_flags & PG_ZERO) { 1010 uvmexp.pga_zerohit++; 1011 zeroit = 0; 1012 } else { 1013 uvmexp.pga_zeromiss++; 1014 zeroit = 1; 1015 } 1016 } 1017 1018 uvm_unlock_fpageq(); /* unlock free page queue */ 1019 1020 pg->offset = off; 1021 pg->uobject = obj; 1022 pg->uanon = anon; 1023 pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1024 pg->pg_version++; 1025 if (anon) { 1026 anon->an_page = pg; 1027 atomic_setbits_int(&pg->pg_flags, PQ_ANON); 1028 #ifdef UBC 1029 uvm_pgcnt_anon++; 1030 #endif 1031 } else { 1032 if (obj) 1033 uvm_pageinsert(pg); 1034 } 1035 #if defined(UVM_PAGE_TRKOWN) 1036 pg->owner_tag = NULL; 1037 #endif 1038 UVM_PAGE_OWN(pg, "new alloc"); 1039 1040 if (flags & UVM_PGA_ZERO) { 1041 /* 1042 * A zero'd page is not clean. If we got a page not already 1043 * zero'd, then we have to zero it ourselves. 1044 */ 1045 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1046 if (zeroit) 1047 pmap_zero_page(pg); 1048 } 1049 1050 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 1051 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 1052 return(pg); 1053 1054 fail: 1055 uvm_unlock_fpageq(); 1056 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 1057 return (NULL); 1058 } 1059 1060 /* 1061 * uvm_pagerealloc: reallocate a page from one object to another 1062 * 1063 * => both objects must be locked 1064 */ 1065 1066 void 1067 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1068 { 1069 1070 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 1071 1072 /* 1073 * remove it from the old object 1074 */ 1075 1076 if (pg->uobject) { 1077 uvm_pageremove(pg); 1078 } 1079 1080 /* 1081 * put it in the new object 1082 */ 1083 1084 if (newobj) { 1085 pg->uobject = newobj; 1086 pg->offset = newoff; 1087 pg->pg_version++; 1088 uvm_pageinsert(pg); 1089 } 1090 } 1091 1092 1093 /* 1094 * uvm_pagefree: free page 1095 * 1096 * => erase page's identity (i.e. remove from hash/object) 1097 * => put page on free list 1098 * => caller must lock owning object (either anon or uvm_object) 1099 * => caller must lock page queues 1100 * => assumes all valid mappings of pg are gone 1101 */ 1102 1103 void 1104 uvm_pagefree(struct vm_page *pg) 1105 { 1106 int saved_loan_count = pg->loan_count; 1107 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 1108 1109 #ifdef DEBUG 1110 if (pg->uobject == (void *)0xdeadbeef && 1111 pg->uanon == (void *)0xdeadbeef) { 1112 panic("uvm_pagefree: freeing free page %p", pg); 1113 } 1114 #endif 1115 1116 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 1117 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 1118 1119 /* 1120 * if the page was an object page (and thus "TABLED"), remove it 1121 * from the object. 1122 */ 1123 1124 if (pg->pg_flags & PG_TABLED) { 1125 1126 /* 1127 * if the object page is on loan we are going to drop ownership. 1128 * it is possible that an anon will take over as owner for this 1129 * page later on. the anon will want a !PG_CLEAN page so that 1130 * it knows it needs to allocate swap if it wants to page the 1131 * page out. 1132 */ 1133 1134 /* in case an anon takes over */ 1135 if (saved_loan_count) 1136 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1137 uvm_pageremove(pg); 1138 1139 /* 1140 * if our page was on loan, then we just lost control over it 1141 * (in fact, if it was loaned to an anon, the anon may have 1142 * already taken over ownership of the page by now and thus 1143 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1144 * return (when the last loan is dropped, then the page can be 1145 * freed by whatever was holding the last loan). 1146 */ 1147 1148 if (saved_loan_count) 1149 return; 1150 } else if (saved_loan_count && pg->uanon) { 1151 /* 1152 * if our page is owned by an anon and is loaned out to the 1153 * kernel then we just want to drop ownership and return. 1154 * the kernel must free the page when all its loans clear ... 1155 * note that the kernel can't change the loan status of our 1156 * page as long as we are holding PQ lock. 1157 */ 1158 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1159 pg->uanon->an_page = NULL; 1160 pg->uanon = NULL; 1161 return; 1162 } 1163 KASSERT(saved_loan_count == 0); 1164 1165 /* 1166 * now remove the page from the queues 1167 */ 1168 1169 if (pg->pg_flags & PQ_ACTIVE) { 1170 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1171 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1172 uvmexp.active--; 1173 } 1174 if (pg->pg_flags & PQ_INACTIVE) { 1175 if (pg->pg_flags & PQ_SWAPBACKED) 1176 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1177 else 1178 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1179 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1180 uvmexp.inactive--; 1181 } 1182 1183 /* 1184 * if the page was wired, unwire it now. 1185 */ 1186 1187 if (pg->wire_count) { 1188 pg->wire_count = 0; 1189 uvmexp.wired--; 1190 } 1191 if (pg->uanon) { 1192 pg->uanon->an_page = NULL; 1193 #ifdef UBC 1194 uvm_pgcnt_anon--; 1195 #endif 1196 } 1197 1198 /* 1199 * and put on free queue 1200 */ 1201 1202 atomic_clearbits_int(&pg->pg_flags, PG_ZERO); 1203 1204 uvm_lock_fpageq(); 1205 TAILQ_INSERT_TAIL(&uvm.page_free[ 1206 uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1207 atomic_clearbits_int(&pg->pg_flags, PQ_MASK); 1208 atomic_setbits_int(&pg->pg_flags, PQ_FREE); 1209 #ifdef DEBUG 1210 pg->uobject = (void *)0xdeadbeef; 1211 pg->offset = 0xdeadbeef; 1212 pg->uanon = (void *)0xdeadbeef; 1213 #endif 1214 uvmexp.free++; 1215 1216 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1217 uvm.page_idle_zero = vm_page_zero_enable; 1218 1219 uvm_unlock_fpageq(); 1220 } 1221 1222 /* 1223 * uvm_page_unbusy: unbusy an array of pages. 1224 * 1225 * => pages must either all belong to the same object, or all belong to anons. 1226 * => if pages are object-owned, object must be locked. 1227 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1228 */ 1229 1230 void 1231 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1232 { 1233 struct vm_page *pg; 1234 struct uvm_object *uobj; 1235 int i; 1236 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1237 1238 for (i = 0; i < npgs; i++) { 1239 pg = pgs[i]; 1240 1241 if (pg == NULL || pg == PGO_DONTCARE) { 1242 continue; 1243 } 1244 if (pg->pg_flags & PG_WANTED) { 1245 wakeup(pg); 1246 } 1247 if (pg->pg_flags & PG_RELEASED) { 1248 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1249 uobj = pg->uobject; 1250 if (uobj != NULL) { 1251 uobj->pgops->pgo_releasepg(pg, NULL); 1252 } else { 1253 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1254 UVM_PAGE_OWN(pg, NULL); 1255 uvm_anfree(pg->uanon); 1256 } 1257 } else { 1258 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1259 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1260 UVM_PAGE_OWN(pg, NULL); 1261 } 1262 } 1263 } 1264 1265 #if defined(UVM_PAGE_TRKOWN) 1266 /* 1267 * uvm_page_own: set or release page ownership 1268 * 1269 * => this is a debugging function that keeps track of who sets PG_BUSY 1270 * and where they do it. it can be used to track down problems 1271 * such a process setting "PG_BUSY" and never releasing it. 1272 * => page's object [if any] must be locked 1273 * => if "tag" is NULL then we are releasing page ownership 1274 */ 1275 void 1276 uvm_page_own(struct vm_page *pg, char *tag) 1277 { 1278 /* gain ownership? */ 1279 if (tag) { 1280 if (pg->owner_tag) { 1281 printf("uvm_page_own: page %p already owned " 1282 "by proc %d [%s]\n", pg, 1283 pg->owner, pg->owner_tag); 1284 panic("uvm_page_own"); 1285 } 1286 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1287 pg->owner_tag = tag; 1288 return; 1289 } 1290 1291 /* drop ownership */ 1292 if (pg->owner_tag == NULL) { 1293 printf("uvm_page_own: dropping ownership of an non-owned " 1294 "page (%p)\n", pg); 1295 panic("uvm_page_own"); 1296 } 1297 pg->owner_tag = NULL; 1298 return; 1299 } 1300 #endif 1301 1302 /* 1303 * uvm_pageidlezero: zero free pages while the system is idle. 1304 * 1305 * => we do at least one iteration per call, if we are below the target. 1306 * => we loop until we either reach the target or whichqs indicates that 1307 * there is a process ready to run. 1308 */ 1309 void 1310 uvm_pageidlezero(void) 1311 { 1312 struct vm_page *pg; 1313 struct pgfreelist *pgfl; 1314 int free_list; 1315 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1316 1317 do { 1318 uvm_lock_fpageq(); 1319 1320 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1321 uvm.page_idle_zero = FALSE; 1322 uvm_unlock_fpageq(); 1323 return; 1324 } 1325 1326 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1327 pgfl = &uvm.page_free[free_list]; 1328 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1329 PGFL_UNKNOWN])) != NULL) 1330 break; 1331 } 1332 1333 if (pg == NULL) { 1334 /* 1335 * No non-zero'd pages; don't bother trying again 1336 * until we know we have non-zero'd pages free. 1337 */ 1338 uvm.page_idle_zero = FALSE; 1339 uvm_unlock_fpageq(); 1340 return; 1341 } 1342 1343 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1344 uvmexp.free--; 1345 uvm_unlock_fpageq(); 1346 1347 #ifdef PMAP_PAGEIDLEZERO 1348 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1349 /* 1350 * The machine-dependent code detected some 1351 * reason for us to abort zeroing pages, 1352 * probably because there is a process now 1353 * ready to run. 1354 */ 1355 uvm_lock_fpageq(); 1356 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1357 pg, pageq); 1358 uvmexp.free++; 1359 uvmexp.zeroaborts++; 1360 uvm_unlock_fpageq(); 1361 return; 1362 } 1363 #else 1364 /* 1365 * XXX This will toast the cache unless the pmap_zero_page() 1366 * XXX implementation does uncached access. 1367 */ 1368 pmap_zero_page(pg); 1369 #endif 1370 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1371 1372 uvm_lock_fpageq(); 1373 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1374 uvmexp.free++; 1375 uvmexp.zeropages++; 1376 uvm_unlock_fpageq(); 1377 } while (curcpu_is_idle()); 1378 } 1379 1380 /* 1381 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1382 */ 1383 1384 #if VM_PHYSSEG_MAX > 1 1385 /* 1386 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1387 */ 1388 int 1389 vm_physseg_find(paddr_t pframe, int *offp) 1390 { 1391 1392 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1393 /* binary search for it */ 1394 int start, len, try; 1395 1396 /* 1397 * if try is too large (thus target is less than than try) we reduce 1398 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1399 * 1400 * if the try is too small (thus target is greater than try) then 1401 * we set the new start to be (try + 1). this means we need to 1402 * reduce the length to (round(len/2) - 1). 1403 * 1404 * note "adjust" below which takes advantage of the fact that 1405 * (round(len/2) - 1) == trunc((len - 1) / 2) 1406 * for any value of len we may have 1407 */ 1408 1409 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1410 try = start + (len / 2); /* try in the middle */ 1411 1412 /* start past our try? */ 1413 if (pframe >= vm_physmem[try].start) { 1414 /* was try correct? */ 1415 if (pframe < vm_physmem[try].end) { 1416 if (offp) 1417 *offp = pframe - vm_physmem[try].start; 1418 return(try); /* got it */ 1419 } 1420 start = try + 1; /* next time, start here */ 1421 len--; /* "adjust" */ 1422 } else { 1423 /* 1424 * pframe before try, just reduce length of 1425 * region, done in "for" loop 1426 */ 1427 } 1428 } 1429 return(-1); 1430 1431 #else 1432 /* linear search for it */ 1433 int lcv; 1434 1435 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1436 if (pframe >= vm_physmem[lcv].start && 1437 pframe < vm_physmem[lcv].end) { 1438 if (offp) 1439 *offp = pframe - vm_physmem[lcv].start; 1440 return(lcv); /* got it */ 1441 } 1442 } 1443 return(-1); 1444 1445 #endif 1446 } 1447 1448 /* 1449 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1450 * back from an I/O mapping (ugh!). used in some MD code as well. 1451 */ 1452 struct vm_page * 1453 PHYS_TO_VM_PAGE(paddr_t pa) 1454 { 1455 paddr_t pf = atop(pa); 1456 int off; 1457 int psi; 1458 1459 psi = vm_physseg_find(pf, &off); 1460 1461 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1462 } 1463 #endif /* VM_PHYSSEG_MAX > 1 */ 1464 1465 /* 1466 * uvm_pagelookup: look up a page 1467 * 1468 * => caller should lock object to keep someone from pulling the page 1469 * out from under it 1470 */ 1471 struct vm_page * 1472 uvm_pagelookup(struct uvm_object *obj, voff_t off) 1473 { 1474 struct vm_page *pg; 1475 struct pglist *buck; 1476 1477 mtx_enter(&uvm.hashlock); 1478 buck = &uvm.page_hash[uvm_pagehash(obj,off)]; 1479 1480 TAILQ_FOREACH(pg, buck, hashq) { 1481 if (pg->uobject == obj && pg->offset == off) { 1482 break; 1483 } 1484 } 1485 mtx_leave(&uvm.hashlock); 1486 return(pg); 1487 } 1488 1489 /* 1490 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1491 * 1492 * => caller must lock page queues 1493 */ 1494 void 1495 uvm_pagewire(struct vm_page *pg) 1496 { 1497 if (pg->wire_count == 0) { 1498 if (pg->pg_flags & PQ_ACTIVE) { 1499 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1500 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1501 uvmexp.active--; 1502 } 1503 if (pg->pg_flags & PQ_INACTIVE) { 1504 if (pg->pg_flags & PQ_SWAPBACKED) 1505 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1506 else 1507 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1508 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1509 uvmexp.inactive--; 1510 } 1511 uvmexp.wired++; 1512 } 1513 pg->wire_count++; 1514 } 1515 1516 /* 1517 * uvm_pageunwire: unwire the page. 1518 * 1519 * => activate if wire count goes to zero. 1520 * => caller must lock page queues 1521 */ 1522 void 1523 uvm_pageunwire(struct vm_page *pg) 1524 { 1525 pg->wire_count--; 1526 if (pg->wire_count == 0) { 1527 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1528 uvmexp.active++; 1529 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1530 uvmexp.wired--; 1531 } 1532 } 1533 1534 /* 1535 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1536 * 1537 * => caller must lock page queues 1538 * => caller must check to make sure page is not wired 1539 * => object that page belongs to must be locked (so we can adjust pg->flags) 1540 */ 1541 void 1542 uvm_pagedeactivate(struct vm_page *pg) 1543 { 1544 if (pg->pg_flags & PQ_ACTIVE) { 1545 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1546 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1547 uvmexp.active--; 1548 } 1549 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1550 KASSERT(pg->wire_count == 0); 1551 if (pg->pg_flags & PQ_SWAPBACKED) 1552 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1553 else 1554 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1555 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1556 uvmexp.inactive++; 1557 pmap_clear_reference(pg); 1558 /* 1559 * update the "clean" bit. this isn't 100% 1560 * accurate, and doesn't have to be. we'll 1561 * re-sync it after we zap all mappings when 1562 * scanning the inactive list. 1563 */ 1564 if ((pg->pg_flags & PG_CLEAN) != 0 && 1565 pmap_is_modified(pg)) 1566 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1567 } 1568 } 1569 1570 /* 1571 * uvm_pageactivate: activate page 1572 * 1573 * => caller must lock page queues 1574 */ 1575 void 1576 uvm_pageactivate(struct vm_page *pg) 1577 { 1578 if (pg->pg_flags & PQ_INACTIVE) { 1579 if (pg->pg_flags & PQ_SWAPBACKED) 1580 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1581 else 1582 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1583 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1584 uvmexp.inactive--; 1585 } 1586 if (pg->wire_count == 0) { 1587 1588 /* 1589 * if page is already active, remove it from list so we 1590 * can put it at tail. if it wasn't active, then mark 1591 * it active and bump active count 1592 */ 1593 if (pg->pg_flags & PQ_ACTIVE) 1594 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1595 else { 1596 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1597 uvmexp.active++; 1598 } 1599 1600 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1601 } 1602 } 1603 1604 /* 1605 * uvm_pagezero: zero fill a page 1606 * 1607 * => if page is part of an object then the object should be locked 1608 * to protect pg->flags. 1609 */ 1610 void 1611 uvm_pagezero(struct vm_page *pg) 1612 { 1613 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1614 pmap_zero_page(pg); 1615 } 1616 1617 /* 1618 * uvm_pagecopy: copy a page 1619 * 1620 * => if page is part of an object then the object should be locked 1621 * to protect pg->flags. 1622 */ 1623 void 1624 uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1625 { 1626 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1627 pmap_copy_page(src, dst); 1628 } 1629 1630 /* 1631 * uvm_page_lookup_freelist: look up the free list for the specified page 1632 */ 1633 int 1634 uvm_page_lookup_freelist(struct vm_page *pg) 1635 { 1636 #if VM_PHYSSEG_MAX == 1 1637 return (vm_physmem[0].free_list); 1638 #else 1639 int lcv; 1640 1641 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1642 KASSERT(lcv != -1); 1643 return (vm_physmem[lcv].free_list); 1644 #endif 1645 } 1646