1 /* $OpenBSD: uvm_page.c,v 1.22 2001/08/11 10:57:22 art Exp $ */ 2 /* $NetBSD: uvm_page.c,v 1.37 2000/06/09 04:43:19 soda Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * uvm_page.c: page ops. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/malloc.h> 77 #include <sys/sched.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_kern.h> 82 83 #define UVM_PAGE /* pull in uvm_page.h functions */ 84 #include <uvm/uvm.h> 85 86 /* 87 * global vars... XXXCDC: move to uvm. structure. 88 */ 89 90 /* 91 * physical memory config is stored in vm_physmem. 92 */ 93 94 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 95 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 96 97 /* 98 * Some supported CPUs in a given architecture don't support all 99 * of the things necessary to do idle page zero'ing efficiently. 100 * We therefore provide a way to disable it from machdep code here. 101 */ 102 103 boolean_t vm_page_zero_enable = TRUE; 104 105 /* 106 * local variables 107 */ 108 109 /* 110 * these variables record the values returned by vm_page_bootstrap, 111 * for debugging purposes. The implementation of uvm_pageboot_alloc 112 * and pmap_startup here also uses them internally. 113 */ 114 115 static vaddr_t virtual_space_start; 116 static vaddr_t virtual_space_end; 117 118 /* 119 * we use a hash table with only one bucket during bootup. we will 120 * later rehash (resize) the hash table once the allocator is ready. 121 * we static allocate the one bootstrap bucket below... 122 */ 123 124 static struct pglist uvm_bootbucket; 125 126 /* 127 * local prototypes 128 */ 129 130 static void uvm_pageinsert __P((struct vm_page *)); 131 132 133 /* 134 * inline functions 135 */ 136 137 /* 138 * uvm_pageinsert: insert a page in the object and the hash table 139 * 140 * => caller must lock object 141 * => caller must lock page queues 142 * => call should have already set pg's object and offset pointers 143 * and bumped the version counter 144 */ 145 146 __inline static void 147 uvm_pageinsert(pg) 148 struct vm_page *pg; 149 { 150 struct pglist *buck; 151 int s; 152 153 #ifdef DIAGNOSTIC 154 if (pg->flags & PG_TABLED) 155 panic("uvm_pageinsert: already inserted"); 156 #endif 157 158 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 159 s = splimp(); 160 simple_lock(&uvm.hashlock); 161 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 162 simple_unlock(&uvm.hashlock); 163 splx(s); 164 165 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 166 pg->flags |= PG_TABLED; 167 pg->uobject->uo_npages++; 168 169 } 170 171 /* 172 * uvm_page_remove: remove page from object and hash 173 * 174 * => caller must lock object 175 * => caller must lock page queues 176 */ 177 178 void __inline 179 uvm_pageremove(pg) 180 struct vm_page *pg; 181 { 182 struct pglist *buck; 183 int s; 184 185 #ifdef DIAGNOSTIC 186 if ((pg->flags & (PG_FAULTING)) != 0) 187 panic("uvm_pageremove: page is faulting"); 188 #endif 189 190 if ((pg->flags & PG_TABLED) == 0) 191 return; /* XXX: log */ 192 193 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 194 s = splimp(); 195 simple_lock(&uvm.hashlock); 196 TAILQ_REMOVE(buck, pg, hashq); 197 simple_unlock(&uvm.hashlock); 198 splx(s); 199 200 /* object should be locked */ 201 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 202 203 pg->flags &= ~PG_TABLED; 204 pg->uobject->uo_npages--; 205 pg->uobject = NULL; 206 pg->version++; 207 208 } 209 210 /* 211 * uvm_page_init: init the page system. called from uvm_init(). 212 * 213 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 214 */ 215 216 void 217 uvm_page_init(kvm_startp, kvm_endp) 218 vaddr_t *kvm_startp, *kvm_endp; 219 { 220 vsize_t freepages, pagecount, n; 221 vm_page_t pagearray; 222 int lcv, i; 223 paddr_t paddr; 224 225 226 /* 227 * step 1: init the page queues and page queue locks 228 */ 229 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 230 for (i = 0; i < PGFL_NQUEUES; i++) 231 TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]); 232 } 233 TAILQ_INIT(&uvm.page_active); 234 TAILQ_INIT(&uvm.page_inactive_swp); 235 TAILQ_INIT(&uvm.page_inactive_obj); 236 simple_lock_init(&uvm.pageqlock); 237 simple_lock_init(&uvm.fpageqlock); 238 239 /* 240 * step 2: init the <obj,offset> => <page> hash table. for now 241 * we just have one bucket (the bootstrap bucket). later on we 242 * will allocate new buckets as we dynamically resize the hash table. 243 */ 244 245 uvm.page_nhash = 1; /* 1 bucket */ 246 uvm.page_hashmask = 0; /* mask for hash function */ 247 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 248 TAILQ_INIT(uvm.page_hash); /* init hash table */ 249 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 250 251 /* 252 * step 3: allocate vm_page structures. 253 */ 254 255 /* 256 * sanity check: 257 * before calling this function the MD code is expected to register 258 * some free RAM with the uvm_page_physload() function. our job 259 * now is to allocate vm_page structures for this memory. 260 */ 261 262 if (vm_nphysseg == 0) 263 panic("vm_page_bootstrap: no memory pre-allocated"); 264 265 /* 266 * first calculate the number of free pages... 267 * 268 * note that we use start/end rather than avail_start/avail_end. 269 * this allows us to allocate extra vm_page structures in case we 270 * want to return some memory to the pool after booting. 271 */ 272 273 freepages = 0; 274 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 275 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 276 277 /* 278 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 279 * use. for each page of memory we use we need a vm_page structure. 280 * thus, the total number of pages we can use is the total size of 281 * the memory divided by the PAGE_SIZE plus the size of the vm_page 282 * structure. we add one to freepages as a fudge factor to avoid 283 * truncation errors (since we can only allocate in terms of whole 284 * pages). 285 */ 286 287 pagecount = ((freepages + 1) << PAGE_SHIFT) / 288 (PAGE_SIZE + sizeof(struct vm_page)); 289 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 290 sizeof(struct vm_page)); 291 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 292 293 /* 294 * step 4: init the vm_page structures and put them in the correct 295 * place... 296 */ 297 298 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 299 300 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 301 if (n > pagecount) { 302 printf("uvm_page_init: lost %ld page(s) in init\n", 303 (long)(n - pagecount)); 304 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 305 /* n = pagecount; */ 306 } 307 /* set up page array pointers */ 308 vm_physmem[lcv].pgs = pagearray; 309 pagearray += n; 310 pagecount -= n; 311 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 312 313 /* init and free vm_pages (we've already zeroed them) */ 314 paddr = ptoa(vm_physmem[lcv].start); 315 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 316 vm_physmem[lcv].pgs[i].phys_addr = paddr; 317 if (atop(paddr) >= vm_physmem[lcv].avail_start && 318 atop(paddr) <= vm_physmem[lcv].avail_end) { 319 uvmexp.npages++; 320 /* add page to free pool */ 321 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 322 } 323 } 324 } 325 /* 326 * step 5: pass up the values of virtual_space_start and 327 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 328 * layers of the VM. 329 */ 330 331 *kvm_startp = round_page(virtual_space_start); 332 *kvm_endp = trunc_page(virtual_space_end); 333 334 /* 335 * step 6: init pagedaemon lock 336 */ 337 338 simple_lock_init(&uvm.pagedaemon_lock); 339 340 /* 341 * step 7: init reserve thresholds 342 * XXXCDC - values may need adjusting 343 */ 344 uvmexp.reserve_pagedaemon = 4; 345 uvmexp.reserve_kernel = 6; 346 347 /* 348 * step 8: determine if we should zero pages in the idle 349 * loop. 350 * 351 * XXXJRT - might consider zero'ing up to the target *now*, 352 * but that could take an awfully long time if you 353 * have a lot of memory. 354 */ 355 uvm.page_idle_zero = vm_page_zero_enable; 356 357 /* 358 * done! 359 */ 360 361 uvm.page_init_done = TRUE; 362 } 363 364 /* 365 * uvm_setpagesize: set the page size 366 * 367 * => sets page_shift and page_mask from uvmexp.pagesize. 368 * => XXXCDC: move global vars. 369 */ 370 371 void 372 uvm_setpagesize() 373 { 374 if (uvmexp.pagesize == 0) 375 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 376 uvmexp.pagemask = uvmexp.pagesize - 1; 377 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 378 panic("uvm_setpagesize: page size not a power of two"); 379 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 380 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 381 break; 382 } 383 384 /* 385 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 386 */ 387 388 vaddr_t 389 uvm_pageboot_alloc(size) 390 vsize_t size; 391 { 392 #if defined(PMAP_STEAL_MEMORY) 393 vaddr_t addr; 394 395 /* 396 * defer bootstrap allocation to MD code (it may want to allocate 397 * from a direct-mapped segment). pmap_steal_memory should round 398 * off virtual_space_start/virtual_space_end. 399 */ 400 401 addr = pmap_steal_memory(size, &virtual_space_start, 402 &virtual_space_end); 403 404 return(addr); 405 406 #else /* !PMAP_STEAL_MEMORY */ 407 408 static boolean_t initialized = FALSE; 409 vaddr_t addr, vaddr; 410 paddr_t paddr; 411 412 /* round to page size */ 413 size = round_page(size); 414 415 /* 416 * on first call to this function, initialize ourselves. 417 */ 418 if (initialized == FALSE) { 419 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 420 421 /* round it the way we like it */ 422 virtual_space_start = round_page(virtual_space_start); 423 virtual_space_end = trunc_page(virtual_space_end); 424 425 initialized = TRUE; 426 } 427 428 /* 429 * allocate virtual memory for this request 430 */ 431 if (virtual_space_start == virtual_space_end || 432 (virtual_space_end - virtual_space_start) < size) 433 panic("uvm_pageboot_alloc: out of virtual space"); 434 435 addr = virtual_space_start; 436 437 #ifdef PMAP_GROWKERNEL 438 /* 439 * If the kernel pmap can't map the requested space, 440 * then allocate more resources for it. 441 */ 442 if (uvm_maxkaddr < (addr + size)) { 443 uvm_maxkaddr = pmap_growkernel(addr + size); 444 if (uvm_maxkaddr < (addr + size)) 445 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 446 } 447 #endif 448 449 virtual_space_start += size; 450 451 /* 452 * allocate and mapin physical pages to back new virtual pages 453 */ 454 455 for (vaddr = round_page(addr) ; vaddr < addr + size ; 456 vaddr += PAGE_SIZE) { 457 458 if (!uvm_page_physget(&paddr)) 459 panic("uvm_pageboot_alloc: out of memory"); 460 461 /* 462 * Note this memory is no longer managed, so using 463 * pmap_kenter is safe. 464 */ 465 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 466 } 467 return(addr); 468 #endif /* PMAP_STEAL_MEMORY */ 469 } 470 471 #if !defined(PMAP_STEAL_MEMORY) 472 /* 473 * uvm_page_physget: "steal" one page from the vm_physmem structure. 474 * 475 * => attempt to allocate it off the end of a segment in which the "avail" 476 * values match the start/end values. if we can't do that, then we 477 * will advance both values (making them equal, and removing some 478 * vm_page structures from the non-avail area). 479 * => return false if out of memory. 480 */ 481 482 /* subroutine: try to allocate from memory chunks on the specified freelist */ 483 static boolean_t uvm_page_physget_freelist __P((paddr_t *, int)); 484 485 static boolean_t 486 uvm_page_physget_freelist(paddrp, freelist) 487 paddr_t *paddrp; 488 int freelist; 489 { 490 int lcv, x; 491 492 /* pass 1: try allocating from a matching end */ 493 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 494 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 495 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 496 #else 497 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 498 #endif 499 { 500 501 if (uvm.page_init_done == TRUE) 502 panic("vm_page_physget: called _after_ bootstrap"); 503 504 if (vm_physmem[lcv].free_list != freelist) 505 continue; 506 507 /* try from front */ 508 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 509 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 510 *paddrp = ptoa(vm_physmem[lcv].avail_start); 511 vm_physmem[lcv].avail_start++; 512 vm_physmem[lcv].start++; 513 /* nothing left? nuke it */ 514 if (vm_physmem[lcv].avail_start == 515 vm_physmem[lcv].end) { 516 if (vm_nphysseg == 1) 517 panic("vm_page_physget: out of memory!"); 518 vm_nphysseg--; 519 for (x = lcv ; x < vm_nphysseg ; x++) 520 /* structure copy */ 521 vm_physmem[x] = vm_physmem[x+1]; 522 } 523 return (TRUE); 524 } 525 526 /* try from rear */ 527 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 528 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 529 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 530 vm_physmem[lcv].avail_end--; 531 vm_physmem[lcv].end--; 532 /* nothing left? nuke it */ 533 if (vm_physmem[lcv].avail_end == 534 vm_physmem[lcv].start) { 535 if (vm_nphysseg == 1) 536 panic("vm_page_physget: out of memory!"); 537 vm_nphysseg--; 538 for (x = lcv ; x < vm_nphysseg ; x++) 539 /* structure copy */ 540 vm_physmem[x] = vm_physmem[x+1]; 541 } 542 return (TRUE); 543 } 544 } 545 546 /* pass2: forget about matching ends, just allocate something */ 547 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 548 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 549 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 550 #else 551 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 552 #endif 553 { 554 555 /* any room in this bank? */ 556 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 557 continue; /* nope */ 558 559 *paddrp = ptoa(vm_physmem[lcv].avail_start); 560 vm_physmem[lcv].avail_start++; 561 /* truncate! */ 562 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 563 564 /* nothing left? nuke it */ 565 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 566 if (vm_nphysseg == 1) 567 panic("vm_page_physget: out of memory!"); 568 vm_nphysseg--; 569 for (x = lcv ; x < vm_nphysseg ; x++) 570 /* structure copy */ 571 vm_physmem[x] = vm_physmem[x+1]; 572 } 573 return (TRUE); 574 } 575 576 return (FALSE); /* whoops! */ 577 } 578 579 boolean_t 580 uvm_page_physget(paddrp) 581 paddr_t *paddrp; 582 { 583 int i; 584 585 /* try in the order of freelist preference */ 586 for (i = 0; i < VM_NFREELIST; i++) 587 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 588 return (TRUE); 589 return (FALSE); 590 } 591 #endif /* PMAP_STEAL_MEMORY */ 592 593 /* 594 * uvm_page_physload: load physical memory into VM system 595 * 596 * => all args are PFs 597 * => all pages in start/end get vm_page structures 598 * => areas marked by avail_start/avail_end get added to the free page pool 599 * => we are limited to VM_PHYSSEG_MAX physical memory segments 600 */ 601 602 void 603 uvm_page_physload(start, end, avail_start, avail_end, free_list) 604 paddr_t start, end, avail_start, avail_end; 605 int free_list; 606 { 607 int preload, lcv; 608 psize_t npages; 609 struct vm_page *pgs; 610 struct vm_physseg *ps; 611 612 if (uvmexp.pagesize == 0) 613 panic("uvm_page_physload: page size not set!"); 614 615 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 616 panic("uvm_page_physload: bad free list %d\n", free_list); 617 618 if (start >= end) 619 panic("uvm_page_physload: start >= end"); 620 621 /* 622 * do we have room? 623 */ 624 if (vm_nphysseg == VM_PHYSSEG_MAX) { 625 printf("uvm_page_physload: unable to load physical memory " 626 "segment\n"); 627 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 628 VM_PHYSSEG_MAX, (long long)start, (long long)end); 629 return; 630 } 631 632 /* 633 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 634 * called yet, so malloc is not available). 635 */ 636 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 637 if (vm_physmem[lcv].pgs) 638 break; 639 } 640 preload = (lcv == vm_nphysseg); 641 642 /* 643 * if VM is already running, attempt to malloc() vm_page structures 644 */ 645 if (!preload) { 646 #if defined(VM_PHYSSEG_NOADD) 647 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 648 #else 649 /* XXXCDC: need some sort of lockout for this case */ 650 paddr_t paddr; 651 npages = end - start; /* # of pages */ 652 MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages, 653 M_VMPAGE, M_NOWAIT); 654 if (pgs == NULL) { 655 printf("uvm_page_physload: can not malloc vm_page " 656 "structs for segment\n"); 657 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 658 return; 659 } 660 /* zero data, init phys_addr and free_list, and free pages */ 661 memset(pgs, 0, sizeof(struct vm_page) * npages); 662 for (lcv = 0, paddr = ptoa(start) ; 663 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 664 pgs[lcv].phys_addr = paddr; 665 pgs[lcv].free_list = free_list; 666 if (atop(paddr) >= avail_start && 667 atop(paddr) <= avail_end) 668 uvm_pagefree(&pgs[lcv]); 669 } 670 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 671 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 672 #endif 673 } else { 674 675 /* gcc complains if these don't get init'd */ 676 pgs = NULL; 677 npages = 0; 678 679 } 680 681 /* 682 * now insert us in the proper place in vm_physmem[] 683 */ 684 685 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 686 687 /* random: put it at the end (easy!) */ 688 ps = &vm_physmem[vm_nphysseg]; 689 690 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 691 692 { 693 int x; 694 /* sort by address for binary search */ 695 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 696 if (start < vm_physmem[lcv].start) 697 break; 698 ps = &vm_physmem[lcv]; 699 /* move back other entries, if necessary ... */ 700 for (x = vm_nphysseg ; x > lcv ; x--) 701 /* structure copy */ 702 vm_physmem[x] = vm_physmem[x - 1]; 703 } 704 705 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 706 707 { 708 int x; 709 /* sort by largest segment first */ 710 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 711 if ((end - start) > 712 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 713 break; 714 ps = &vm_physmem[lcv]; 715 /* move back other entries, if necessary ... */ 716 for (x = vm_nphysseg ; x > lcv ; x--) 717 /* structure copy */ 718 vm_physmem[x] = vm_physmem[x - 1]; 719 } 720 721 #else 722 723 panic("uvm_page_physload: unknown physseg strategy selected!"); 724 725 #endif 726 727 ps->start = start; 728 ps->end = end; 729 ps->avail_start = avail_start; 730 ps->avail_end = avail_end; 731 if (preload) { 732 ps->pgs = NULL; 733 } else { 734 ps->pgs = pgs; 735 ps->lastpg = pgs + npages - 1; 736 } 737 ps->free_list = free_list; 738 vm_nphysseg++; 739 740 /* 741 * done! 742 */ 743 744 if (!preload) 745 uvm_page_rehash(); 746 747 return; 748 } 749 750 /* 751 * uvm_page_rehash: reallocate hash table based on number of free pages. 752 */ 753 754 void 755 uvm_page_rehash() 756 { 757 int freepages, lcv, bucketcount, s, oldcount; 758 struct pglist *newbuckets, *oldbuckets; 759 struct vm_page *pg; 760 size_t newsize, oldsize; 761 762 /* 763 * compute number of pages that can go in the free pool 764 */ 765 766 freepages = 0; 767 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 768 freepages += 769 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 770 771 /* 772 * compute number of buckets needed for this number of pages 773 */ 774 775 bucketcount = 1; 776 while (bucketcount < freepages) 777 bucketcount = bucketcount * 2; 778 779 /* 780 * compute the size of the current table and new table. 781 */ 782 783 oldbuckets = uvm.page_hash; 784 oldcount = uvm.page_nhash; 785 oldsize = round_page(sizeof(struct pglist) * oldcount); 786 newsize = round_page(sizeof(struct pglist) * bucketcount); 787 788 /* 789 * allocate the new buckets 790 */ 791 792 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 793 if (newbuckets == NULL) { 794 printf("uvm_page_physrehash: WARNING: could not grow page " 795 "hash table\n"); 796 return; 797 } 798 for (lcv = 0 ; lcv < bucketcount ; lcv++) 799 TAILQ_INIT(&newbuckets[lcv]); 800 801 /* 802 * now replace the old buckets with the new ones and rehash everything 803 */ 804 805 s = splimp(); 806 simple_lock(&uvm.hashlock); 807 uvm.page_hash = newbuckets; 808 uvm.page_nhash = bucketcount; 809 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 810 811 /* ... and rehash */ 812 for (lcv = 0 ; lcv < oldcount ; lcv++) { 813 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 814 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 815 TAILQ_INSERT_TAIL( 816 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 817 pg, hashq); 818 } 819 } 820 simple_unlock(&uvm.hashlock); 821 splx(s); 822 823 /* 824 * free old bucket array if is not the boot-time table 825 */ 826 827 if (oldbuckets != &uvm_bootbucket) 828 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 829 830 /* 831 * done 832 */ 833 return; 834 } 835 836 837 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 838 839 void uvm_page_physdump __P((void)); /* SHUT UP GCC */ 840 841 /* call from DDB */ 842 void 843 uvm_page_physdump() 844 { 845 int lcv; 846 847 printf("rehash: physical memory config [segs=%d of %d]:\n", 848 vm_nphysseg, VM_PHYSSEG_MAX); 849 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 850 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 851 (long long)vm_physmem[lcv].start, 852 (long long)vm_physmem[lcv].end, 853 (long long)vm_physmem[lcv].avail_start, 854 (long long)vm_physmem[lcv].avail_end); 855 printf("STRATEGY = "); 856 switch (VM_PHYSSEG_STRAT) { 857 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 858 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 859 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 860 default: printf("<<UNKNOWN>>!!!!\n"); 861 } 862 printf("number of buckets = %d\n", uvm.page_nhash); 863 } 864 #endif 865 866 /* 867 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 868 * 869 * => return null if no pages free 870 * => wake up pagedaemon if number of free pages drops below low water mark 871 * => if obj != NULL, obj must be locked (to put in hash) 872 * => if anon != NULL, anon must be locked (to put in anon) 873 * => only one of obj or anon can be non-null 874 * => caller must activate/deactivate page if it is not wired. 875 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 876 * => policy decision: it is more important to pull a page off of the 877 * appropriate priority free list than it is to get a zero'd or 878 * unknown contents page. This is because we live with the 879 * consequences of a bad free list decision for the entire 880 * lifetime of the page, e.g. if the page comes from memory that 881 * is slower to access. 882 */ 883 884 struct vm_page * 885 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list) 886 struct uvm_object *obj; 887 voff_t off; 888 int flags; 889 struct vm_anon *anon; 890 int strat, free_list; 891 { 892 int lcv, try1, try2, s, zeroit = 0; 893 struct vm_page *pg; 894 struct pglist *freeq; 895 struct pgfreelist *pgfl; 896 boolean_t use_reserve; 897 898 #ifdef DIAGNOSTIC 899 /* sanity check */ 900 if (obj && anon) 901 panic("uvm_pagealloc: obj and anon != NULL"); 902 #endif 903 904 s = uvm_lock_fpageq(); /* lock free page queue */ 905 906 /* 907 * check to see if we need to generate some free pages waking 908 * the pagedaemon. 909 */ 910 911 if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg && 912 uvmexp.inactive < uvmexp.inactarg)) 913 wakeup(&uvm.pagedaemon); 914 915 /* 916 * fail if any of these conditions is true: 917 * [1] there really are no free pages, or 918 * [2] only kernel "reserved" pages remain and 919 * the page isn't being allocated to a kernel object. 920 * [3] only pagedaemon "reserved" pages remain and 921 * the requestor isn't the pagedaemon. 922 */ 923 924 use_reserve = (flags & UVM_PGA_USERESERVE) || 925 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 926 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 927 (uvmexp.free <= uvmexp.reserve_pagedaemon && 928 !(use_reserve && (curproc == uvm.pagedaemon_proc || 929 curproc == syncerproc)))) 930 goto fail; 931 932 #if PGFL_NQUEUES != 2 933 #error uvm_pagealloc_strat needs to be updated 934 #endif 935 936 /* 937 * If we want a zero'd page, try the ZEROS queue first, otherwise 938 * we try the UNKNOWN queue first. 939 */ 940 if (flags & UVM_PGA_ZERO) { 941 try1 = PGFL_ZEROS; 942 try2 = PGFL_UNKNOWN; 943 } else { 944 try1 = PGFL_UNKNOWN; 945 try2 = PGFL_ZEROS; 946 } 947 948 again: 949 switch (strat) { 950 case UVM_PGA_STRAT_NORMAL: 951 /* Check all freelists in descending priority order. */ 952 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 953 pgfl = &uvm.page_free[lcv]; 954 if ((pg = TAILQ_FIRST((freeq = 955 &pgfl->pgfl_queues[try1]))) != NULL || 956 (pg = TAILQ_FIRST((freeq = 957 &pgfl->pgfl_queues[try2]))) != NULL) 958 goto gotit; 959 } 960 961 /* No pages free! */ 962 goto fail; 963 964 case UVM_PGA_STRAT_ONLY: 965 case UVM_PGA_STRAT_FALLBACK: 966 /* Attempt to allocate from the specified free list. */ 967 #ifdef DIAGNOSTIC 968 if (free_list >= VM_NFREELIST || free_list < 0) 969 panic("uvm_pagealloc_strat: bad free list %d", 970 free_list); 971 #endif 972 pgfl = &uvm.page_free[free_list]; 973 if ((pg = TAILQ_FIRST((freeq = 974 &pgfl->pgfl_queues[try1]))) != NULL || 975 (pg = TAILQ_FIRST((freeq = 976 &pgfl->pgfl_queues[try2]))) != NULL) 977 goto gotit; 978 979 /* Fall back, if possible. */ 980 if (strat == UVM_PGA_STRAT_FALLBACK) { 981 strat = UVM_PGA_STRAT_NORMAL; 982 goto again; 983 } 984 985 /* No pages free! */ 986 goto fail; 987 988 default: 989 panic("uvm_pagealloc_strat: bad strat %d", strat); 990 /* NOTREACHED */ 991 } 992 993 gotit: 994 TAILQ_REMOVE(freeq, pg, pageq); 995 uvmexp.free--; 996 997 /* update zero'd page count */ 998 if (pg->flags & PG_ZERO) 999 uvmexp.zeropages--; 1000 1001 /* 1002 * update allocation statistics and remember if we have to 1003 * zero the page 1004 */ 1005 if (flags & UVM_PGA_ZERO) { 1006 if (pg->flags & PG_ZERO) { 1007 uvmexp.pga_zerohit++; 1008 zeroit = 0; 1009 } else { 1010 uvmexp.pga_zeromiss++; 1011 zeroit = 1; 1012 } 1013 } 1014 1015 uvm_unlock_fpageq(s); /* unlock free page queue */ 1016 1017 pg->offset = off; 1018 pg->uobject = obj; 1019 pg->uanon = anon; 1020 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1021 pg->version++; 1022 pg->wire_count = 0; 1023 pg->loan_count = 0; 1024 if (anon) { 1025 anon->u.an_page = pg; 1026 pg->pqflags = PQ_ANON; 1027 } else { 1028 if (obj) 1029 uvm_pageinsert(pg); 1030 pg->pqflags = 0; 1031 } 1032 #if defined(UVM_PAGE_TRKOWN) 1033 pg->owner_tag = NULL; 1034 #endif 1035 UVM_PAGE_OWN(pg, "new alloc"); 1036 1037 if (flags & UVM_PGA_ZERO) { 1038 /* 1039 * A zero'd page is not clean. If we got a page not already 1040 * zero'd, then we have to zero it ourselves. 1041 */ 1042 pg->flags &= ~PG_CLEAN; 1043 if (zeroit) 1044 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1045 } 1046 1047 return(pg); 1048 1049 fail: 1050 uvm_unlock_fpageq(s); 1051 return (NULL); 1052 } 1053 1054 /* 1055 * uvm_pagealloc_contig: allocate contiguous memory. 1056 * 1057 * XXX - fix comment. 1058 */ 1059 1060 vaddr_t 1061 uvm_pagealloc_contig(size, low, high, alignment) 1062 vaddr_t size; 1063 vaddr_t low, high; 1064 vaddr_t alignment; 1065 { 1066 struct pglist pglist; 1067 struct vm_page *pg; 1068 vaddr_t addr, temp_addr; 1069 1070 size = round_page(size); 1071 1072 TAILQ_INIT(&pglist); 1073 if (uvm_pglistalloc(size, low, high, alignment, 0, 1074 &pglist, 1, FALSE)) 1075 return 0; 1076 addr = vm_map_min(kernel_map); 1077 if (uvm_map(kernel_map, &addr, size, NULL, UVM_UNKNOWN_OFFSET, 1078 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 1079 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 1080 uvm_pglistfree(&pglist); 1081 return 0; 1082 } 1083 temp_addr = addr; 1084 for (pg = TAILQ_FIRST(&pglist); pg != NULL; 1085 pg = TAILQ_NEXT(pg, pageq)) { 1086 pg->uobject = uvm.kernel_object; 1087 pg->offset = temp_addr - vm_map_min(kernel_map); 1088 uvm_pageinsert(pg); 1089 uvm_pagewire(pg); 1090 pmap_kenter_pa(temp_addr, VM_PAGE_TO_PHYS(pg), 1091 VM_PROT_READ|VM_PROT_WRITE); 1092 temp_addr += PAGE_SIZE; 1093 } 1094 return addr; 1095 } 1096 1097 /* 1098 * uvm_pagerealloc: reallocate a page from one object to another 1099 * 1100 * => both objects must be locked 1101 */ 1102 1103 void 1104 uvm_pagerealloc(pg, newobj, newoff) 1105 struct vm_page *pg; 1106 struct uvm_object *newobj; 1107 voff_t newoff; 1108 { 1109 /* 1110 * remove it from the old object 1111 */ 1112 1113 if (pg->uobject) { 1114 uvm_pageremove(pg); 1115 } 1116 1117 /* 1118 * put it in the new object 1119 */ 1120 1121 if (newobj) { 1122 pg->uobject = newobj; 1123 pg->offset = newoff; 1124 pg->version++; 1125 uvm_pageinsert(pg); 1126 } 1127 1128 return; 1129 } 1130 1131 1132 /* 1133 * uvm_pagefree: free page 1134 * 1135 * => erase page's identity (i.e. remove from hash/object) 1136 * => put page on free list 1137 * => caller must lock owning object (either anon or uvm_object) 1138 * => caller must lock page queues 1139 * => assumes all valid mappings of pg are gone 1140 */ 1141 1142 void uvm_pagefree(pg) 1143 1144 struct vm_page *pg; 1145 1146 { 1147 int s; 1148 int saved_loan_count = pg->loan_count; 1149 1150 /* 1151 * if the page was an object page (and thus "TABLED"), remove it 1152 * from the object. 1153 */ 1154 1155 if (pg->flags & PG_TABLED) { 1156 1157 /* 1158 * if the object page is on loan we are going to drop ownership. 1159 * it is possible that an anon will take over as owner for this 1160 * page later on. the anon will want a !PG_CLEAN page so that 1161 * it knows it needs to allocate swap if it wants to page the 1162 * page out. 1163 */ 1164 1165 if (saved_loan_count) 1166 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1167 1168 uvm_pageremove(pg); 1169 1170 /* 1171 * if our page was on loan, then we just lost control over it 1172 * (in fact, if it was loaned to an anon, the anon may have 1173 * already taken over ownership of the page by now and thus 1174 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1175 * return (when the last loan is dropped, then the page can be 1176 * freed by whatever was holding the last loan). 1177 */ 1178 if (saved_loan_count) 1179 return; 1180 1181 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1182 1183 /* 1184 * if our page is owned by an anon and is loaned out to the 1185 * kernel then we just want to drop ownership and return. 1186 * the kernel must free the page when all its loans clear ... 1187 * note that the kernel can't change the loan status of our 1188 * page as long as we are holding PQ lock. 1189 */ 1190 pg->pqflags &= ~PQ_ANON; 1191 pg->uanon = NULL; 1192 return; 1193 } 1194 1195 #ifdef DIAGNOSTIC 1196 if (saved_loan_count) { 1197 printf("uvm_pagefree: warning: freeing page with a loan " 1198 "count of %d\n", saved_loan_count); 1199 panic("uvm_pagefree: loan count"); 1200 } 1201 #endif 1202 1203 1204 /* 1205 * now remove the page from the queues 1206 */ 1207 1208 if (pg->pqflags & PQ_ACTIVE) { 1209 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1210 pg->pqflags &= ~PQ_ACTIVE; 1211 uvmexp.active--; 1212 } 1213 if (pg->pqflags & PQ_INACTIVE) { 1214 if (pg->pqflags & PQ_SWAPBACKED) 1215 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1216 else 1217 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1218 pg->pqflags &= ~PQ_INACTIVE; 1219 uvmexp.inactive--; 1220 } 1221 1222 /* 1223 * if the page was wired, unwire it now. 1224 */ 1225 if (pg->wire_count) { 1226 pg->wire_count = 0; 1227 uvmexp.wired--; 1228 } 1229 1230 /* 1231 * and put on free queue 1232 */ 1233 1234 pg->flags &= ~PG_ZERO; 1235 1236 s = uvm_lock_fpageq(); 1237 TAILQ_INSERT_TAIL(&uvm.page_free[ 1238 uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1239 pg->pqflags = PQ_FREE; 1240 #ifdef DEBUG 1241 pg->uobject = (void *)0xdeadbeef; 1242 pg->offset = 0xdeadbeef; 1243 pg->uanon = (void *)0xdeadbeef; 1244 #endif 1245 uvmexp.free++; 1246 1247 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1248 uvm.page_idle_zero = vm_page_zero_enable; 1249 1250 uvm_unlock_fpageq(s); 1251 } 1252 1253 #if defined(UVM_PAGE_TRKOWN) 1254 /* 1255 * uvm_page_own: set or release page ownership 1256 * 1257 * => this is a debugging function that keeps track of who sets PG_BUSY 1258 * and where they do it. it can be used to track down problems 1259 * such a process setting "PG_BUSY" and never releasing it. 1260 * => page's object [if any] must be locked 1261 * => if "tag" is NULL then we are releasing page ownership 1262 */ 1263 void 1264 uvm_page_own(pg, tag) 1265 struct vm_page *pg; 1266 char *tag; 1267 { 1268 /* gain ownership? */ 1269 if (tag) { 1270 if (pg->owner_tag) { 1271 printf("uvm_page_own: page %p already owned " 1272 "by proc %d [%s]\n", pg, 1273 pg->owner, pg->owner_tag); 1274 panic("uvm_page_own"); 1275 } 1276 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1277 pg->owner_tag = tag; 1278 return; 1279 } 1280 1281 /* drop ownership */ 1282 if (pg->owner_tag == NULL) { 1283 printf("uvm_page_own: dropping ownership of an non-owned " 1284 "page (%p)\n", pg); 1285 panic("uvm_page_own"); 1286 } 1287 pg->owner_tag = NULL; 1288 return; 1289 } 1290 #endif 1291 1292 /* 1293 * uvm_pageidlezero: zero free pages while the system is idle. 1294 * 1295 * => we do at least one iteration per call, if we are below the target. 1296 * => we loop until we either reach the target or whichqs indicates that 1297 * there is a process ready to run. 1298 */ 1299 void 1300 uvm_pageidlezero() 1301 { 1302 struct vm_page *pg; 1303 struct pgfreelist *pgfl; 1304 int free_list, s; 1305 1306 do { 1307 s = uvm_lock_fpageq(); 1308 1309 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1310 uvm.page_idle_zero = FALSE; 1311 uvm_unlock_fpageq(s); 1312 return; 1313 } 1314 1315 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1316 pgfl = &uvm.page_free[free_list]; 1317 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1318 PGFL_UNKNOWN])) != NULL) 1319 break; 1320 } 1321 1322 if (pg == NULL) { 1323 /* 1324 * No non-zero'd pages; don't bother trying again 1325 * until we know we have non-zero'd pages free. 1326 */ 1327 uvm.page_idle_zero = FALSE; 1328 uvm_unlock_fpageq(s); 1329 return; 1330 } 1331 1332 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1333 uvmexp.free--; 1334 uvm_unlock_fpageq(s); 1335 1336 #ifdef PMAP_PAGEIDLEZERO 1337 PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)); 1338 #else 1339 /* 1340 * XXX This will toast the cache unless the pmap_zero_page() 1341 * XXX implementation does uncached access. 1342 */ 1343 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1344 #endif 1345 pg->flags |= PG_ZERO; 1346 1347 s = uvm_lock_fpageq(); 1348 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1349 uvmexp.free++; 1350 uvmexp.zeropages++; 1351 uvm_unlock_fpageq(s); 1352 } while (whichqs == 0); 1353 } 1354