1 /* $NetBSD: uvm_page.c,v 1.15 1998/10/18 23:50:00 chs Exp $ */ 2 3 /* 4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! 5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< 6 */ 7 /* 8 * Copyright (c) 1997 Charles D. Cranor and Washington University. 9 * Copyright (c) 1991, 1993, The Regents of the University of California. 10 * 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by Charles D. Cranor, 27 * Washington University, the University of California, Berkeley and 28 * its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 46 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 47 * 48 * 49 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 50 * All rights reserved. 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 /* 74 * uvm_page.c: page ops. 75 */ 76 77 #include "opt_pmap_new.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/malloc.h> 82 #include <sys/proc.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_kern.h> 87 88 #define UVM_PAGE /* pull in uvm_page.h functions */ 89 #include <uvm/uvm.h> 90 91 /* 92 * global vars... XXXCDC: move to uvm. structure. 93 */ 94 95 /* 96 * physical memory config is stored in vm_physmem. 97 */ 98 99 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 100 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 101 102 /* 103 * local variables 104 */ 105 106 /* 107 * these variables record the values returned by vm_page_bootstrap, 108 * for debugging purposes. The implementation of uvm_pageboot_alloc 109 * and pmap_startup here also uses them internally. 110 */ 111 112 static vaddr_t virtual_space_start; 113 static vaddr_t virtual_space_end; 114 115 /* 116 * we use a hash table with only one bucket during bootup. we will 117 * later rehash (resize) the hash table once malloc() is ready. 118 * we static allocate the bootstrap bucket below... 119 */ 120 121 static struct pglist uvm_bootbucket; 122 123 /* 124 * local prototypes 125 */ 126 127 static void uvm_pageinsert __P((struct vm_page *)); 128 129 130 /* 131 * inline functions 132 */ 133 134 /* 135 * uvm_pageinsert: insert a page in the object and the hash table 136 * 137 * => caller must lock object 138 * => caller must lock page queues 139 * => call should have already set pg's object and offset pointers 140 * and bumped the version counter 141 */ 142 143 __inline static void 144 uvm_pageinsert(pg) 145 struct vm_page *pg; 146 { 147 struct pglist *buck; 148 int s; 149 150 #ifdef DIAGNOSTIC 151 if (pg->flags & PG_TABLED) 152 panic("uvm_pageinsert: already inserted"); 153 #endif 154 155 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 156 s = splimp(); 157 simple_lock(&uvm.hashlock); 158 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 159 simple_unlock(&uvm.hashlock); 160 splx(s); 161 162 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 163 pg->flags |= PG_TABLED; 164 pg->uobject->uo_npages++; 165 166 } 167 168 /* 169 * uvm_page_remove: remove page from object and hash 170 * 171 * => caller must lock object 172 * => caller must lock page queues 173 */ 174 175 void __inline 176 uvm_pageremove(pg) 177 struct vm_page *pg; 178 { 179 struct pglist *buck; 180 int s; 181 182 #ifdef DIAGNOSTIC 183 if ((pg->flags & (PG_FAULTING)) != 0) 184 panic("uvm_pageremove: page is faulting"); 185 #endif 186 187 if ((pg->flags & PG_TABLED) == 0) 188 return; /* XXX: log */ 189 190 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 191 s = splimp(); 192 simple_lock(&uvm.hashlock); 193 TAILQ_REMOVE(buck, pg, hashq); 194 simple_unlock(&uvm.hashlock); 195 splx(s); 196 197 /* object should be locked */ 198 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 199 200 pg->flags &= ~PG_TABLED; 201 pg->uobject->uo_npages--; 202 pg->uobject = NULL; 203 pg->version++; 204 205 } 206 207 /* 208 * uvm_page_init: init the page system. called from uvm_init(). 209 * 210 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 211 */ 212 213 void 214 uvm_page_init(kvm_startp, kvm_endp) 215 vaddr_t *kvm_startp, *kvm_endp; 216 { 217 int freepages, pagecount; 218 vm_page_t pagearray; 219 int lcv, n, i; 220 paddr_t paddr; 221 222 223 /* 224 * step 1: init the page queues and page queue locks 225 */ 226 for (lcv = 0; lcv < VM_NFREELIST; lcv++) 227 TAILQ_INIT(&uvm.page_free[lcv]); 228 TAILQ_INIT(&uvm.page_active); 229 TAILQ_INIT(&uvm.page_inactive_swp); 230 TAILQ_INIT(&uvm.page_inactive_obj); 231 simple_lock_init(&uvm.pageqlock); 232 simple_lock_init(&uvm.fpageqlock); 233 234 /* 235 * step 2: init the <obj,offset> => <page> hash table. for now 236 * we just have one bucket (the bootstrap bucket). later on we 237 * will malloc() new buckets as we dynamically resize the hash table. 238 */ 239 240 uvm.page_nhash = 1; /* 1 bucket */ 241 uvm.page_hashmask = 0; /* mask for hash function */ 242 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 243 TAILQ_INIT(uvm.page_hash); /* init hash table */ 244 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 245 246 /* 247 * step 3: allocate vm_page structures. 248 */ 249 250 /* 251 * sanity check: 252 * before calling this function the MD code is expected to register 253 * some free RAM with the uvm_page_physload() function. our job 254 * now is to allocate vm_page structures for this memory. 255 */ 256 257 if (vm_nphysseg == 0) 258 panic("vm_page_bootstrap: no memory pre-allocated"); 259 260 /* 261 * first calculate the number of free pages... 262 * 263 * note that we use start/end rather than avail_start/avail_end. 264 * this allows us to allocate extra vm_page structures in case we 265 * want to return some memory to the pool after booting. 266 */ 267 268 freepages = 0; 269 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 270 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 271 272 /* 273 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 274 * use. for each page of memory we use we need a vm_page structure. 275 * thus, the total number of pages we can use is the total size of 276 * the memory divided by the PAGE_SIZE plus the size of the vm_page 277 * structure. we add one to freepages as a fudge factor to avoid 278 * truncation errors (since we can only allocate in terms of whole 279 * pages). 280 */ 281 282 pagecount = ((freepages + 1) << PAGE_SHIFT) / 283 (PAGE_SIZE + sizeof(struct vm_page)); 284 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 285 sizeof(struct vm_page)); 286 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 287 288 /* 289 * step 4: init the vm_page structures and put them in the correct 290 * place... 291 */ 292 293 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 294 295 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 296 if (n > pagecount) { 297 printf("uvm_page_init: lost %d page(s) in init\n", 298 n - pagecount); 299 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 300 /* n = pagecount; */ 301 } 302 /* set up page array pointers */ 303 vm_physmem[lcv].pgs = pagearray; 304 pagearray += n; 305 pagecount -= n; 306 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 307 308 /* init and free vm_pages (we've already zeroed them) */ 309 paddr = ptoa(vm_physmem[lcv].start); 310 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 311 vm_physmem[lcv].pgs[i].phys_addr = paddr; 312 if (atop(paddr) >= vm_physmem[lcv].avail_start && 313 atop(paddr) <= vm_physmem[lcv].avail_end) { 314 uvmexp.npages++; 315 /* add page to free pool */ 316 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 317 } 318 } 319 } 320 /* 321 * step 5: pass up the values of virtual_space_start and 322 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 323 * layers of the VM. 324 */ 325 326 *kvm_startp = round_page(virtual_space_start); 327 *kvm_endp = trunc_page(virtual_space_end); 328 329 /* 330 * step 6: init pagedaemon lock 331 */ 332 333 simple_lock_init(&uvm.pagedaemon_lock); 334 335 /* 336 * step 7: init reserve thresholds 337 * XXXCDC - values may need adjusting 338 */ 339 uvmexp.reserve_pagedaemon = 1; 340 uvmexp.reserve_kernel = 5; 341 342 /* 343 * done! 344 */ 345 346 } 347 348 /* 349 * uvm_setpagesize: set the page size 350 * 351 * => sets page_shift and page_mask from uvmexp.pagesize. 352 * => XXXCDC: move global vars. 353 */ 354 355 void 356 uvm_setpagesize() 357 { 358 if (uvmexp.pagesize == 0) 359 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 360 uvmexp.pagemask = uvmexp.pagesize - 1; 361 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 362 panic("uvm_setpagesize: page size not a power of two"); 363 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 364 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 365 break; 366 } 367 368 /* 369 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 370 */ 371 372 vaddr_t 373 uvm_pageboot_alloc(size) 374 vsize_t size; 375 { 376 #if defined(PMAP_STEAL_MEMORY) 377 vaddr_t addr; 378 379 /* 380 * defer bootstrap allocation to MD code (it may want to allocate 381 * from a direct-mapped segment). pmap_steal_memory should round 382 * off virtual_space_start/virtual_space_end. 383 */ 384 385 addr = pmap_steal_memory(size, &virtual_space_start, 386 &virtual_space_end); 387 388 return(addr); 389 390 #else /* !PMAP_STEAL_MEMORY */ 391 392 vaddr_t addr, vaddr; 393 paddr_t paddr; 394 395 /* round to page size */ 396 size = round_page(size); 397 398 /* 399 * on first call to this function init ourselves. we detect this 400 * by checking virtual_space_start/end which are in the zero'd BSS area. 401 */ 402 403 if (virtual_space_start == virtual_space_end) { 404 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 405 406 /* round it the way we like it */ 407 virtual_space_start = round_page(virtual_space_start); 408 virtual_space_end = trunc_page(virtual_space_end); 409 } 410 411 /* 412 * allocate virtual memory for this request 413 */ 414 415 addr = virtual_space_start; 416 virtual_space_start += size; 417 418 /* 419 * allocate and mapin physical pages to back new virtual pages 420 */ 421 422 for (vaddr = round_page(addr) ; vaddr < addr + size ; 423 vaddr += PAGE_SIZE) { 424 425 if (!uvm_page_physget(&paddr)) 426 panic("uvm_pageboot_alloc: out of memory"); 427 428 /* XXX: should be wired, but some pmaps don't like that ... */ 429 #if defined(PMAP_NEW) 430 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 431 #else 432 pmap_enter(pmap_kernel(), vaddr, paddr, 433 VM_PROT_READ|VM_PROT_WRITE, FALSE); 434 #endif 435 436 } 437 return(addr); 438 #endif /* PMAP_STEAL_MEMORY */ 439 } 440 441 #if !defined(PMAP_STEAL_MEMORY) 442 /* 443 * uvm_page_physget: "steal" one page from the vm_physmem structure. 444 * 445 * => attempt to allocate it off the end of a segment in which the "avail" 446 * values match the start/end values. if we can't do that, then we 447 * will advance both values (making them equal, and removing some 448 * vm_page structures from the non-avail area). 449 * => return false if out of memory. 450 */ 451 452 boolean_t 453 uvm_page_physget(paddrp) 454 paddr_t *paddrp; 455 { 456 int lcv, x; 457 458 /* pass 1: try allocating from a matching end */ 459 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 460 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 461 #else 462 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 463 #endif 464 { 465 466 if (vm_physmem[lcv].pgs) 467 panic("vm_page_physget: called _after_ bootstrap"); 468 469 /* try from front */ 470 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 471 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 472 *paddrp = ptoa(vm_physmem[lcv].avail_start); 473 vm_physmem[lcv].avail_start++; 474 vm_physmem[lcv].start++; 475 /* nothing left? nuke it */ 476 if (vm_physmem[lcv].avail_start == 477 vm_physmem[lcv].end) { 478 if (vm_nphysseg == 1) 479 panic("vm_page_physget: out of memory!"); 480 vm_nphysseg--; 481 for (x = lcv ; x < vm_nphysseg ; x++) 482 /* structure copy */ 483 vm_physmem[x] = vm_physmem[x+1]; 484 } 485 return (TRUE); 486 } 487 488 /* try from rear */ 489 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 490 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 491 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 492 vm_physmem[lcv].avail_end--; 493 vm_physmem[lcv].end--; 494 /* nothing left? nuke it */ 495 if (vm_physmem[lcv].avail_end == 496 vm_physmem[lcv].start) { 497 if (vm_nphysseg == 1) 498 panic("vm_page_physget: out of memory!"); 499 vm_nphysseg--; 500 for (x = lcv ; x < vm_nphysseg ; x++) 501 /* structure copy */ 502 vm_physmem[x] = vm_physmem[x+1]; 503 } 504 return (TRUE); 505 } 506 } 507 508 /* pass2: forget about matching ends, just allocate something */ 509 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 510 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 511 #else 512 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 513 #endif 514 { 515 516 /* any room in this bank? */ 517 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 518 continue; /* nope */ 519 520 *paddrp = ptoa(vm_physmem[lcv].avail_start); 521 vm_physmem[lcv].avail_start++; 522 /* truncate! */ 523 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 524 525 /* nothing left? nuke it */ 526 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 527 if (vm_nphysseg == 1) 528 panic("vm_page_physget: out of memory!"); 529 vm_nphysseg--; 530 for (x = lcv ; x < vm_nphysseg ; x++) 531 /* structure copy */ 532 vm_physmem[x] = vm_physmem[x+1]; 533 } 534 return (TRUE); 535 } 536 537 return (FALSE); /* whoops! */ 538 } 539 #endif /* PMAP_STEAL_MEMORY */ 540 541 /* 542 * uvm_page_physload: load physical memory into VM system 543 * 544 * => all args are PFs 545 * => all pages in start/end get vm_page structures 546 * => areas marked by avail_start/avail_end get added to the free page pool 547 * => we are limited to VM_PHYSSEG_MAX physical memory segments 548 */ 549 550 void 551 uvm_page_physload(start, end, avail_start, avail_end, free_list) 552 vaddr_t start, end, avail_start, avail_end; 553 int free_list; 554 { 555 int preload, lcv; 556 psize_t npages; 557 struct vm_page *pgs; 558 struct vm_physseg *ps; 559 560 if (uvmexp.pagesize == 0) 561 panic("vm_page_physload: page size not set!"); 562 563 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 564 panic("uvm_page_physload: bad free list %d\n", free_list); 565 566 /* 567 * do we have room? 568 */ 569 if (vm_nphysseg == VM_PHYSSEG_MAX) { 570 printf("vm_page_physload: unable to load physical memory " 571 "segment\n"); 572 printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n", 573 VM_PHYSSEG_MAX, start, end); 574 return; 575 } 576 577 /* 578 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 579 * called yet, so malloc is not available). 580 */ 581 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 582 if (vm_physmem[lcv].pgs) 583 break; 584 } 585 preload = (lcv == vm_nphysseg); 586 587 /* 588 * if VM is already running, attempt to malloc() vm_page structures 589 */ 590 if (!preload) { 591 #if defined(VM_PHYSSEG_NOADD) 592 panic("vm_page_physload: tried to add RAM after vm_mem_init"); 593 #else 594 /* XXXCDC: need some sort of lockout for this case */ 595 paddr_t paddr; 596 npages = end - start; /* # of pages */ 597 MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages, 598 M_VMPAGE, M_NOWAIT); 599 if (pgs == NULL) { 600 printf("vm_page_physload: can not malloc vm_page " 601 "structs for segment\n"); 602 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 603 return; 604 } 605 /* zero data, init phys_addr and free_list, and free pages */ 606 memset(pgs, 0, sizeof(struct vm_page) * npages); 607 for (lcv = 0, paddr = ptoa(start) ; 608 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 609 pgs[lcv].phys_addr = paddr; 610 pgs[lcv].free_list = free_list; 611 if (atop(paddr) >= avail_start && 612 atop(paddr) <= avail_end) 613 uvm_pagefree(&pgs[lcv]); 614 } 615 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 616 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 617 #endif 618 } else { 619 620 /* gcc complains if these don't get init'd */ 621 pgs = NULL; 622 npages = 0; 623 624 } 625 626 /* 627 * now insert us in the proper place in vm_physmem[] 628 */ 629 630 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 631 632 /* random: put it at the end (easy!) */ 633 ps = &vm_physmem[vm_nphysseg]; 634 635 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 636 637 { 638 int x; 639 /* sort by address for binary search */ 640 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 641 if (start < vm_physmem[lcv].start) 642 break; 643 ps = &vm_physmem[lcv]; 644 /* move back other entries, if necessary ... */ 645 for (x = vm_nphysseg ; x > lcv ; x--) 646 /* structure copy */ 647 vm_physmem[x] = vm_physmem[x - 1]; 648 } 649 650 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 651 652 { 653 int x; 654 /* sort by largest segment first */ 655 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 656 if ((end - start) > 657 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 658 break; 659 ps = &vm_physmem[lcv]; 660 /* move back other entries, if necessary ... */ 661 for (x = vm_nphysseg ; x > lcv ; x--) 662 /* structure copy */ 663 vm_physmem[x] = vm_physmem[x - 1]; 664 } 665 666 #else 667 668 panic("vm_page_physload: unknown physseg strategy selected!"); 669 670 #endif 671 672 ps->start = start; 673 ps->end = end; 674 ps->avail_start = avail_start; 675 ps->avail_end = avail_end; 676 if (preload) { 677 ps->pgs = NULL; 678 } else { 679 ps->pgs = pgs; 680 ps->lastpg = pgs + npages - 1; 681 } 682 ps->free_list = free_list; 683 vm_nphysseg++; 684 685 /* 686 * done! 687 */ 688 689 if (!preload) 690 uvm_page_rehash(); 691 692 return; 693 } 694 695 /* 696 * uvm_page_rehash: reallocate hash table based on number of free pages. 697 */ 698 699 void 700 uvm_page_rehash() 701 { 702 int freepages, lcv, bucketcount, s, oldcount; 703 struct pglist *newbuckets, *oldbuckets; 704 struct vm_page *pg; 705 706 /* 707 * compute number of pages that can go in the free pool 708 */ 709 710 freepages = 0; 711 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 712 freepages += 713 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 714 715 /* 716 * compute number of buckets needed for this number of pages 717 */ 718 719 bucketcount = 1; 720 while (bucketcount < freepages) 721 bucketcount = bucketcount * 2; 722 723 /* 724 * malloc new buckets 725 */ 726 727 MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount, 728 M_VMPBUCKET, M_NOWAIT); 729 if (newbuckets == NULL) { 730 printf("vm_page_physrehash: WARNING: could not grow page " 731 "hash table\n"); 732 return; 733 } 734 for (lcv = 0 ; lcv < bucketcount ; lcv++) 735 TAILQ_INIT(&newbuckets[lcv]); 736 737 /* 738 * now replace the old buckets with the new ones and rehash everything 739 */ 740 741 s = splimp(); 742 simple_lock(&uvm.hashlock); 743 /* swap old for new ... */ 744 oldbuckets = uvm.page_hash; 745 oldcount = uvm.page_nhash; 746 uvm.page_hash = newbuckets; 747 uvm.page_nhash = bucketcount; 748 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 749 750 /* ... and rehash */ 751 for (lcv = 0 ; lcv < oldcount ; lcv++) { 752 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 753 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 754 TAILQ_INSERT_TAIL( 755 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 756 pg, hashq); 757 } 758 } 759 simple_unlock(&uvm.hashlock); 760 splx(s); 761 762 /* 763 * free old bucket array if we malloc'd it previously 764 */ 765 766 if (oldbuckets != &uvm_bootbucket) 767 FREE(oldbuckets, M_VMPBUCKET); 768 769 /* 770 * done 771 */ 772 return; 773 } 774 775 776 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 777 778 void uvm_page_physdump __P((void)); /* SHUT UP GCC */ 779 780 /* call from DDB */ 781 void 782 uvm_page_physdump() 783 { 784 int lcv; 785 786 printf("rehash: physical memory config [segs=%d of %d]:\n", 787 vm_nphysseg, VM_PHYSSEG_MAX); 788 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 789 printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start, 790 vm_physmem[lcv].end, vm_physmem[lcv].avail_start, 791 vm_physmem[lcv].avail_end); 792 printf("STRATEGY = "); 793 switch (VM_PHYSSEG_STRAT) { 794 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 795 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 796 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 797 default: printf("<<UNKNOWN>>!!!!\n"); 798 } 799 printf("number of buckets = %d\n", uvm.page_nhash); 800 } 801 #endif 802 803 /* 804 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 805 * 806 * => return null if no pages free 807 * => wake up pagedaemon if number of free pages drops below low water mark 808 * => if obj != NULL, obj must be locked (to put in hash) 809 * => if anon != NULL, anon must be locked (to put in anon) 810 * => only one of obj or anon can be non-null 811 * => caller must activate/deactivate page if it is not wired. 812 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 813 */ 814 815 struct vm_page * 816 uvm_pagealloc_strat(obj, off, anon, strat, free_list) 817 struct uvm_object *obj; 818 vaddr_t off; 819 struct vm_anon *anon; 820 int strat, free_list; 821 { 822 int lcv, s; 823 struct vm_page *pg; 824 struct pglist *freeq; 825 826 #ifdef DIAGNOSTIC 827 /* sanity check */ 828 if (obj && anon) 829 panic("uvm_pagealloc: obj and anon != NULL"); 830 #endif 831 832 s = splimp(); 833 834 uvm_lock_fpageq(); /* lock free page queue */ 835 836 /* 837 * check to see if we need to generate some free pages waking 838 * the pagedaemon. 839 */ 840 841 if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg && 842 uvmexp.inactive < uvmexp.inactarg)) 843 thread_wakeup(&uvm.pagedaemon); 844 845 /* 846 * fail if any of these conditions is true: 847 * [1] there really are no free pages, or 848 * [2] only kernel "reserved" pages remain and 849 * the page isn't being allocated to a kernel object. 850 * [3] only pagedaemon "reserved" pages remain and 851 * the requestor isn't the pagedaemon. 852 */ 853 854 if ((uvmexp.free <= uvmexp.reserve_kernel && 855 !(obj && obj->uo_refs == UVM_OBJ_KERN)) || 856 (uvmexp.free <= uvmexp.reserve_pagedaemon && 857 !(obj == uvmexp.kmem_object && curproc == uvm.pagedaemon_proc))) 858 goto fail; 859 860 again: 861 switch (strat) { 862 case UVM_PGA_STRAT_NORMAL: 863 /* Check all freelists in descending priority order. */ 864 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 865 freeq = &uvm.page_free[lcv]; 866 if ((pg = freeq->tqh_first) != NULL) 867 goto gotit; 868 } 869 870 /* No pages free! */ 871 goto fail; 872 873 case UVM_PGA_STRAT_ONLY: 874 case UVM_PGA_STRAT_FALLBACK: 875 /* Attempt to allocate from the specified free list. */ 876 #ifdef DIAGNOSTIC 877 if (free_list >= VM_NFREELIST || free_list < 0) 878 panic("uvm_pagealloc_strat: bad free list %d", 879 free_list); 880 #endif 881 freeq = &uvm.page_free[free_list]; 882 if ((pg = freeq->tqh_first) != NULL) 883 goto gotit; 884 885 /* Fall back, if possible. */ 886 if (strat == UVM_PGA_STRAT_FALLBACK) { 887 strat = UVM_PGA_STRAT_NORMAL; 888 goto again; 889 } 890 891 /* No pages free! */ 892 goto fail; 893 894 default: 895 panic("uvm_pagealloc_strat: bad strat %d", strat); 896 /* NOTREACHED */ 897 } 898 899 gotit: 900 TAILQ_REMOVE(freeq, pg, pageq); 901 uvmexp.free--; 902 903 uvm_unlock_fpageq(); /* unlock free page queue */ 904 splx(s); 905 906 pg->offset = off; 907 pg->uobject = obj; 908 pg->uanon = anon; 909 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 910 pg->version++; 911 pg->wire_count = 0; 912 pg->loan_count = 0; 913 if (anon) { 914 anon->u.an_page = pg; 915 pg->pqflags = PQ_ANON; 916 } else { 917 if (obj) 918 uvm_pageinsert(pg); 919 pg->pqflags = 0; 920 } 921 #if defined(UVM_PAGE_TRKOWN) 922 pg->owner_tag = NULL; 923 #endif 924 UVM_PAGE_OWN(pg, "new alloc"); 925 926 return(pg); 927 928 fail: 929 uvm_unlock_fpageq(); 930 splx(s); 931 return (NULL); 932 } 933 934 /* 935 * uvm_pagerealloc: reallocate a page from one object to another 936 * 937 * => both objects must be locked 938 */ 939 940 void 941 uvm_pagerealloc(pg, newobj, newoff) 942 struct vm_page *pg; 943 struct uvm_object *newobj; 944 vaddr_t newoff; 945 { 946 /* 947 * remove it from the old object 948 */ 949 950 if (pg->uobject) { 951 uvm_pageremove(pg); 952 } 953 954 /* 955 * put it in the new object 956 */ 957 958 if (newobj) { 959 pg->uobject = newobj; 960 pg->offset = newoff; 961 pg->version++; 962 uvm_pageinsert(pg); 963 } 964 965 return; 966 } 967 968 969 /* 970 * uvm_pagefree: free page 971 * 972 * => erase page's identity (i.e. remove from hash/object) 973 * => put page on free list 974 * => caller must lock owning object (either anon or uvm_object) 975 * => caller must lock page queues 976 * => assumes all valid mappings of pg are gone 977 */ 978 979 void uvm_pagefree(pg) 980 981 struct vm_page *pg; 982 983 { 984 int s; 985 int saved_loan_count = pg->loan_count; 986 987 /* 988 * if the page was an object page (and thus "TABLED"), remove it 989 * from the object. 990 */ 991 992 if (pg->flags & PG_TABLED) { 993 994 /* 995 * if the object page is on loan we are going to drop ownership. 996 * it is possible that an anon will take over as owner for this 997 * page later on. the anon will want a !PG_CLEAN page so that 998 * it knows it needs to allocate swap if it wants to page the 999 * page out. 1000 */ 1001 1002 if (saved_loan_count) 1003 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1004 1005 uvm_pageremove(pg); 1006 1007 /* 1008 * if our page was on loan, then we just lost control over it 1009 * (in fact, if it was loaned to an anon, the anon may have 1010 * already taken over ownership of the page by now and thus 1011 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1012 * return (when the last loan is dropped, then the page can be 1013 * freed by whatever was holding the last loan). 1014 */ 1015 if (saved_loan_count) 1016 return; 1017 1018 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1019 1020 /* 1021 * if our page is owned by an anon and is loaned out to the 1022 * kernel then we just want to drop ownership and return. 1023 * the kernel must free the page when all its loans clear ... 1024 * note that the kernel can't change the loan status of our 1025 * page as long as we are holding PQ lock. 1026 */ 1027 pg->pqflags &= ~PQ_ANON; 1028 pg->uanon = NULL; 1029 return; 1030 } 1031 1032 #ifdef DIAGNOSTIC 1033 if (saved_loan_count) { 1034 printf("uvm_pagefree: warning: freeing page with a loan " 1035 "count of %d\n", saved_loan_count); 1036 panic("uvm_pagefree: loan count"); 1037 } 1038 #endif 1039 1040 1041 /* 1042 * now remove the page from the queues 1043 */ 1044 1045 if (pg->pqflags & PQ_ACTIVE) { 1046 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1047 pg->pqflags &= ~PQ_ACTIVE; 1048 uvmexp.active--; 1049 } 1050 if (pg->pqflags & PQ_INACTIVE) { 1051 if (pg->pqflags & PQ_SWAPBACKED) 1052 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1053 else 1054 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1055 pg->pqflags &= ~PQ_INACTIVE; 1056 uvmexp.inactive--; 1057 } 1058 1059 /* 1060 * if the page was wired, unwire it now. 1061 */ 1062 if (pg->wire_count) 1063 { 1064 pg->wire_count = 0; 1065 uvmexp.wired--; 1066 } 1067 1068 /* 1069 * and put on free queue 1070 */ 1071 1072 s = splimp(); 1073 uvm_lock_fpageq(); 1074 TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)], 1075 pg, pageq); 1076 pg->pqflags = PQ_FREE; 1077 #ifdef DEBUG 1078 pg->uobject = (void *)0xdeadbeef; 1079 pg->offset = 0xdeadbeef; 1080 pg->uanon = (void *)0xdeadbeef; 1081 #endif 1082 uvmexp.free++; 1083 uvm_unlock_fpageq(); 1084 splx(s); 1085 } 1086 1087 #if defined(UVM_PAGE_TRKOWN) 1088 /* 1089 * uvm_page_own: set or release page ownership 1090 * 1091 * => this is a debugging function that keeps track of who sets PG_BUSY 1092 * and where they do it. it can be used to track down problems 1093 * such a process setting "PG_BUSY" and never releasing it. 1094 * => page's object [if any] must be locked 1095 * => if "tag" is NULL then we are releasing page ownership 1096 */ 1097 void 1098 uvm_page_own(pg, tag) 1099 struct vm_page *pg; 1100 char *tag; 1101 { 1102 /* gain ownership? */ 1103 if (tag) { 1104 if (pg->owner_tag) { 1105 printf("uvm_page_own: page %p already owned " 1106 "by proc %d [%s]\n", pg, 1107 pg->owner, pg->owner_tag); 1108 panic("uvm_page_own"); 1109 } 1110 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1111 pg->owner_tag = tag; 1112 return; 1113 } 1114 1115 /* drop ownership */ 1116 if (pg->owner_tag == NULL) { 1117 printf("uvm_page_own: dropping ownership of an non-owned " 1118 "page (%p)\n", pg); 1119 panic("uvm_page_own"); 1120 } 1121 pg->owner_tag = NULL; 1122 return; 1123 } 1124 #endif 1125