1 /* 2 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved. 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ 38 */ 39 40 /* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 /* 67 * Resident memory management module. The module manipulates 'VM pages'. 68 * A VM page is the core building block for memory management. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/vnode.h> 77 #include <sys/kernel.h> 78 #include <sys/alist.h> 79 #include <sys/sysctl.h> 80 #include <sys/cpu_topology.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <sys/lock.h> 85 #include <vm/vm_kern.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pageout.h> 91 #include <vm/vm_pager.h> 92 #include <vm/vm_extern.h> 93 #include <vm/swap_pager.h> 94 95 #include <machine/inttypes.h> 96 #include <machine/md_var.h> 97 #include <machine/specialreg.h> 98 #include <machine/bus_dma.h> 99 100 #include <vm/vm_page2.h> 101 #include <sys/spinlock2.h> 102 103 /* 104 * Cache necessary elements in the hash table itself to avoid indirecting 105 * through random vm_page's when doing a lookup. The hash table is 106 * heuristical and it is ok for races to mess up any or all fields. 107 */ 108 struct vm_page_hash_elm { 109 vm_page_t m; 110 vm_object_t object; /* heuristical */ 111 uint32_t pindex; /* heuristical 32-bit field */ 112 int ticks; 113 }; 114 115 #define VM_PAGE_HASH_SET 4 /* power of 2, set-assoc */ 116 #define VM_PAGE_HASH_MAX (8 * 1024 * 1024) /* power of 2, max size */ 117 118 /* 119 * SET - Minimum required set associative size, must be a power of 2. We 120 * want this to match or exceed the set-associativeness of the cpu, 121 * up to a reasonable limit (we will use 16). 122 */ 123 __read_mostly static int set_assoc_mask = 16 - 1; 124 125 static void vm_page_queue_init(void); 126 static void vm_page_free_wakeup(void); 127 static vm_page_t vm_page_select_cache(u_short pg_color); 128 static vm_page_t _vm_page_list_find_wide(int basequeue, int index, int *lastp); 129 static vm_page_t _vm_page_list_find2_wide(int bq1, int bq2, int index, 130 int *lastp1, int *lastp); 131 static void _vm_page_deactivate_locked(vm_page_t m, int athead); 132 static void vm_numa_add_topology_mem(cpu_node_t *cpup, int physid, long bytes); 133 134 /* 135 * Array of tailq lists 136 */ 137 struct vpgqueues vm_page_queues[PQ_COUNT]; 138 139 static volatile int vm_pages_waiting; 140 static struct alist vm_contig_alist; 141 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536]; 142 static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin, "vm_contig_spin"); 143 144 __read_mostly static int vm_page_hash_vnode_only; 145 __read_mostly static int vm_page_hash_size; 146 __read_mostly static struct vm_page_hash_elm *vm_page_hash; 147 148 static u_long vm_dma_reserved = 0; 149 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved); 150 SYSCTL_ULONG(_vm, OID_AUTO, dma_reserved, CTLFLAG_RD, &vm_dma_reserved, 0, 151 "Memory reserved for DMA"); 152 SYSCTL_UINT(_vm, OID_AUTO, dma_free_pages, CTLFLAG_RD, 153 &vm_contig_alist.bl_free, 0, "Memory reserved for DMA"); 154 155 SYSCTL_INT(_vm, OID_AUTO, page_hash_vnode_only, CTLFLAG_RW, 156 &vm_page_hash_vnode_only, 0, "Only hash vnode pages"); 157 #if 0 158 static int vm_page_hash_debug; 159 SYSCTL_INT(_vm, OID_AUTO, page_hash_debug, CTLFLAG_RW, 160 &vm_page_hash_debug, 0, "Only hash vnode pages"); 161 #endif 162 163 static int vm_contig_verbose = 0; 164 TUNABLE_INT("vm.contig_verbose", &vm_contig_verbose); 165 166 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, 167 vm_pindex_t, pindex); 168 169 static void 170 vm_page_queue_init(void) 171 { 172 int i; 173 174 for (i = 0; i < PQ_L2_SIZE; i++) 175 vm_page_queues[PQ_FREE+i].cnt_offset = 176 offsetof(struct vmstats, v_free_count); 177 for (i = 0; i < PQ_L2_SIZE; i++) 178 vm_page_queues[PQ_CACHE+i].cnt_offset = 179 offsetof(struct vmstats, v_cache_count); 180 for (i = 0; i < PQ_L2_SIZE; i++) 181 vm_page_queues[PQ_INACTIVE+i].cnt_offset = 182 offsetof(struct vmstats, v_inactive_count); 183 for (i = 0; i < PQ_L2_SIZE; i++) 184 vm_page_queues[PQ_ACTIVE+i].cnt_offset = 185 offsetof(struct vmstats, v_active_count); 186 for (i = 0; i < PQ_L2_SIZE; i++) 187 vm_page_queues[PQ_HOLD+i].cnt_offset = 188 offsetof(struct vmstats, v_active_count); 189 /* PQ_NONE has no queue */ 190 191 for (i = 0; i < PQ_COUNT; i++) { 192 vm_page_queues[i].lastq = -1; 193 TAILQ_INIT(&vm_page_queues[i].pl); 194 spin_init(&vm_page_queues[i].spin, "vm_page_queue_init"); 195 } 196 } 197 198 /* 199 * note: place in initialized data section? Is this necessary? 200 */ 201 vm_pindex_t first_page = 0; 202 vm_pindex_t vm_page_array_size = 0; 203 vm_page_t vm_page_array = NULL; 204 vm_paddr_t vm_low_phys_reserved; 205 206 /* 207 * (low level boot) 208 * 209 * Sets the page size, perhaps based upon the memory size. 210 * Must be called before any use of page-size dependent functions. 211 */ 212 void 213 vm_set_page_size(void) 214 { 215 if (vmstats.v_page_size == 0) 216 vmstats.v_page_size = PAGE_SIZE; 217 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0) 218 panic("vm_set_page_size: page size not a power of two"); 219 } 220 221 /* 222 * (low level boot) 223 * 224 * Add a new page to the freelist for use by the system. New pages 225 * are added to both the head and tail of the associated free page 226 * queue in a bottom-up fashion, so both zero'd and non-zero'd page 227 * requests pull 'recent' adds (higher physical addresses) first. 228 * 229 * Beware that the page zeroing daemon will also be running soon after 230 * boot, moving pages from the head to the tail of the PQ_FREE queues. 231 * 232 * Must be called in a critical section. 233 */ 234 static void 235 vm_add_new_page(vm_paddr_t pa, int *badcountp) 236 { 237 struct vpgqueues *vpq; 238 vm_page_t m; 239 240 m = PHYS_TO_VM_PAGE(pa); 241 242 /* 243 * Make sure it isn't a duplicate (due to BIOS page range overlaps, 244 * which we consider bugs... but don't crash). Note that m->phys_addr 245 * is pre-initialized, so use m->queue as a check. 246 */ 247 if (m->queue) { 248 if (*badcountp < 10) { 249 kprintf("vm_add_new_page: duplicate pa %016jx\n", 250 (intmax_t)pa); 251 ++*badcountp; 252 } else if (*badcountp == 10) { 253 kprintf("vm_add_new_page: duplicate pa (many more)\n"); 254 ++*badcountp; 255 } 256 return; 257 } 258 259 m->phys_addr = pa; 260 m->flags = 0; 261 m->pat_mode = PAT_WRITE_BACK; 262 m->pc = (pa >> PAGE_SHIFT); 263 264 /* 265 * Twist for cpu localization in addition to page coloring, so 266 * different cpus selecting by m->queue get different page colors. 267 */ 268 m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE); 269 m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE)); 270 m->pc &= PQ_L2_MASK; 271 272 /* 273 * Reserve a certain number of contiguous low memory pages for 274 * contigmalloc() to use. 275 * 276 * Even though these pages represent real ram and can be 277 * reverse-mapped, we set PG_FICTITIOUS and PG_UNQUEUED 278 * because their use is special-cased. 279 * 280 * WARNING! Once PG_FICTITIOUS is set, vm_page_wire*() 281 * and vm_page_unwire*() calls have no effect. 282 */ 283 if (pa < vm_low_phys_reserved) { 284 atomic_add_long(&vmstats.v_page_count, 1); 285 atomic_add_long(&vmstats.v_dma_pages, 1); 286 m->flags |= PG_FICTITIOUS | PG_UNQUEUED; 287 m->queue = PQ_NONE; 288 m->wire_count = 1; 289 atomic_add_long(&vmstats.v_wire_count, 1); 290 alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1); 291 return; 292 } 293 294 /* 295 * General page 296 */ 297 m->queue = m->pc + PQ_FREE; 298 KKASSERT(m->dirty == 0); 299 300 atomic_add_long(&vmstats.v_page_count, 1); 301 atomic_add_long(&vmstats.v_free_count, 1); 302 vpq = &vm_page_queues[m->queue]; 303 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 304 ++vpq->lcnt; 305 } 306 307 /* 308 * (low level boot) 309 * 310 * Initializes the resident memory module. 311 * 312 * Preallocates memory for critical VM structures and arrays prior to 313 * kernel_map becoming available. 314 * 315 * Memory is allocated from (virtual2_start, virtual2_end) if available, 316 * otherwise memory is allocated from (virtual_start, virtual_end). 317 * 318 * On x86-64 (virtual_start, virtual_end) is only 2GB and may not be 319 * large enough to hold vm_page_array & other structures for machines with 320 * large amounts of ram, so we want to use virtual2* when available. 321 */ 322 void 323 vm_page_startup(void) 324 { 325 vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start; 326 vm_offset_t mapped; 327 vm_pindex_t npages; 328 vm_paddr_t page_range; 329 vm_paddr_t new_end; 330 int i; 331 vm_paddr_t pa; 332 vm_paddr_t last_pa; 333 vm_paddr_t end; 334 vm_paddr_t biggestone, biggestsize; 335 vm_paddr_t total; 336 vm_page_t m; 337 int badcount; 338 339 total = 0; 340 badcount = 0; 341 biggestsize = 0; 342 biggestone = 0; 343 vaddr = round_page(vaddr); 344 345 /* 346 * Make sure ranges are page-aligned. 347 */ 348 for (i = 0; phys_avail[i].phys_end; ++i) { 349 phys_avail[i].phys_beg = round_page64(phys_avail[i].phys_beg); 350 phys_avail[i].phys_end = trunc_page64(phys_avail[i].phys_end); 351 if (phys_avail[i].phys_end < phys_avail[i].phys_beg) 352 phys_avail[i].phys_end = phys_avail[i].phys_beg; 353 } 354 355 /* 356 * Locate largest block 357 */ 358 for (i = 0; phys_avail[i].phys_end; ++i) { 359 vm_paddr_t size = phys_avail[i].phys_end - 360 phys_avail[i].phys_beg; 361 362 if (size > biggestsize) { 363 biggestone = i; 364 biggestsize = size; 365 } 366 total += size; 367 } 368 --i; /* adjust to last entry for use down below */ 369 370 end = phys_avail[biggestone].phys_end; 371 end = trunc_page(end); 372 373 /* 374 * Initialize the queue headers for the free queue, the active queue 375 * and the inactive queue. 376 */ 377 vm_page_queue_init(); 378 379 #if !defined(_KERNEL_VIRTUAL) 380 /* 381 * VKERNELs don't support minidumps and as such don't need 382 * vm_page_dump 383 * 384 * Allocate a bitmap to indicate that a random physical page 385 * needs to be included in a minidump. 386 * 387 * The amd64 port needs this to indicate which direct map pages 388 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 389 * 390 * However, x86 still needs this workspace internally within the 391 * minidump code. In theory, they are not needed on x86, but are 392 * included should the sf_buf code decide to use them. 393 */ 394 page_range = phys_avail[i].phys_end / PAGE_SIZE; 395 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 396 end -= vm_page_dump_size; 397 vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size, 398 VM_PROT_READ | VM_PROT_WRITE); 399 bzero((void *)vm_page_dump, vm_page_dump_size); 400 #endif 401 /* 402 * Compute the number of pages of memory that will be available for 403 * use (taking into account the overhead of a page structure per 404 * page). 405 */ 406 first_page = phys_avail[0].phys_beg / PAGE_SIZE; 407 page_range = phys_avail[i].phys_end / PAGE_SIZE - first_page; 408 npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE; 409 410 #ifndef _KERNEL_VIRTUAL 411 /* 412 * (only applies to real kernels) 413 * 414 * Reserve a large amount of low memory for potential 32-bit DMA 415 * space allocations. Once device initialization is complete we 416 * release most of it, but keep (vm_dma_reserved) memory reserved 417 * for later use. Typically for X / graphics. Through trial and 418 * error we find that GPUs usually requires ~60-100MB or so. 419 * 420 * By default, 128M is left in reserve on machines with 2G+ of ram. 421 */ 422 vm_low_phys_reserved = (vm_paddr_t)65536 << PAGE_SHIFT; 423 if (vm_low_phys_reserved > total / 4) 424 vm_low_phys_reserved = total / 4; 425 if (vm_dma_reserved == 0) { 426 vm_dma_reserved = 128 * 1024 * 1024; /* 128MB */ 427 if (vm_dma_reserved > total / 16) 428 vm_dma_reserved = total / 16; 429 } 430 #endif 431 alist_init(&vm_contig_alist, 65536, vm_contig_ameta, 432 ALIST_RECORDS_65536); 433 434 /* 435 * Initialize the mem entry structures now, and put them in the free 436 * queue. 437 */ 438 if (bootverbose && ctob(physmem) >= 400LL*1024*1024*1024) 439 kprintf("initializing vm_page_array "); 440 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 441 mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); 442 vm_page_array = (vm_page_t)mapped; 443 444 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL) 445 /* 446 * since pmap_map on amd64 returns stuff out of a direct-map region, 447 * we have to manually add these pages to the minidump tracking so 448 * that they can be dumped, including the vm_page_array. 449 */ 450 for (pa = new_end; 451 pa < phys_avail[biggestone].phys_end; 452 pa += PAGE_SIZE) { 453 dump_add_page(pa); 454 } 455 #endif 456 457 /* 458 * Clear all of the page structures, run basic initialization so 459 * PHYS_TO_VM_PAGE() operates properly even on pages not in the 460 * map. 461 */ 462 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 463 vm_page_array_size = page_range; 464 if (bootverbose && ctob(physmem) >= 400LL*1024*1024*1024) 465 kprintf("size = 0x%zx\n", vm_page_array_size); 466 467 m = &vm_page_array[0]; 468 pa = ptoa(first_page); 469 for (i = 0; i < page_range; ++i) { 470 spin_init(&m->spin, "vm_page"); 471 m->phys_addr = pa; 472 pa += PAGE_SIZE; 473 ++m; 474 } 475 476 /* 477 * Construct the free queue(s) in ascending order (by physical 478 * address) so that the first 16MB of physical memory is allocated 479 * last rather than first. On large-memory machines, this avoids 480 * the exhaustion of low physical memory before isa_dma_init has run. 481 */ 482 vmstats.v_page_count = 0; 483 vmstats.v_free_count = 0; 484 for (i = 0; phys_avail[i].phys_end && npages > 0; ++i) { 485 pa = phys_avail[i].phys_beg; 486 if (i == biggestone) 487 last_pa = new_end; 488 else 489 last_pa = phys_avail[i].phys_end; 490 while (pa < last_pa && npages-- > 0) { 491 vm_add_new_page(pa, &badcount); 492 pa += PAGE_SIZE; 493 } 494 } 495 if (virtual2_start) 496 virtual2_start = vaddr; 497 else 498 virtual_start = vaddr; 499 mycpu->gd_vmstats = vmstats; 500 } 501 502 /* 503 * (called from early boot only) 504 * 505 * Reorganize VM pages based on numa data. May be called as many times as 506 * necessary. Will reorganize the vm_page_t page color and related queue(s) 507 * to allow vm_page_alloc() to choose pages based on socket affinity. 508 * 509 * NOTE: This function is only called while we are still in UP mode, so 510 * we only need a critical section to protect the queues (which 511 * saves a lot of time, there are likely a ton of pages). 512 */ 513 void 514 vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid) 515 { 516 vm_paddr_t scan_beg; 517 vm_paddr_t scan_end; 518 vm_paddr_t ran_end; 519 struct vpgqueues *vpq; 520 vm_page_t m; 521 vm_page_t mend; 522 int socket_mod; 523 int socket_value; 524 int i; 525 526 /* 527 * Check if no physical information, or there was only one socket 528 * (so don't waste time doing nothing!). 529 */ 530 if (cpu_topology_phys_ids <= 1 || 531 cpu_topology_core_ids == 0) { 532 return; 533 } 534 535 /* 536 * Setup for our iteration. Note that ACPI may iterate CPU 537 * sockets starting at 0 or 1 or some other number. The 538 * cpu_topology code mod's it against the socket count. 539 */ 540 ran_end = ran_beg + bytes; 541 542 socket_mod = PQ_L2_SIZE / cpu_topology_phys_ids; 543 socket_value = (physid % cpu_topology_phys_ids) * socket_mod; 544 mend = &vm_page_array[vm_page_array_size]; 545 546 crit_enter(); 547 548 /* 549 * Adjust cpu_topology's phys_mem parameter 550 */ 551 if (root_cpu_node) 552 vm_numa_add_topology_mem(root_cpu_node, physid, (long)bytes); 553 554 /* 555 * Adjust vm_page->pc and requeue all affected pages. The 556 * allocator will then be able to localize memory allocations 557 * to some degree. 558 */ 559 for (i = 0; phys_avail[i].phys_end; ++i) { 560 scan_beg = phys_avail[i].phys_beg; 561 scan_end = phys_avail[i].phys_end; 562 if (scan_end <= ran_beg) 563 continue; 564 if (scan_beg >= ran_end) 565 continue; 566 if (scan_beg < ran_beg) 567 scan_beg = ran_beg; 568 if (scan_end > ran_end) 569 scan_end = ran_end; 570 if (atop(scan_end) > first_page + vm_page_array_size) 571 scan_end = ptoa(first_page + vm_page_array_size); 572 573 m = PHYS_TO_VM_PAGE(scan_beg); 574 while (scan_beg < scan_end) { 575 KKASSERT(m < mend); 576 if (m->queue != PQ_NONE) { 577 vpq = &vm_page_queues[m->queue]; 578 TAILQ_REMOVE(&vpq->pl, m, pageq); 579 --vpq->lcnt; 580 /* queue doesn't change, no need to adj cnt */ 581 m->queue -= m->pc; 582 m->pc %= socket_mod; 583 m->pc += socket_value; 584 m->pc &= PQ_L2_MASK; 585 m->queue += m->pc; 586 vpq = &vm_page_queues[m->queue]; 587 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 588 ++vpq->lcnt; 589 /* queue doesn't change, no need to adj cnt */ 590 } else { 591 m->pc %= socket_mod; 592 m->pc += socket_value; 593 m->pc &= PQ_L2_MASK; 594 } 595 scan_beg += PAGE_SIZE; 596 ++m; 597 } 598 } 599 600 crit_exit(); 601 } 602 603 /* 604 * (called from early boot only) 605 * 606 * Don't allow the NUMA organization to leave vm_page_queues[] nodes 607 * completely empty for a logical cpu. Doing so would force allocations 608 * on that cpu to always borrow from a nearby cpu, create unnecessary 609 * contention, and cause vm_page_alloc() to iterate more queues and run more 610 * slowly. 611 * 612 * This situation can occur when memory sticks are not entirely populated, 613 * populated at different densities, or in naturally assymetric systems 614 * such as the 2990WX. There could very well be many vm_page_queues[] 615 * entries with *NO* pages assigned to them. 616 * 617 * Fixing this up ensures that each logical CPU has roughly the same 618 * sized memory pool, and more importantly ensures that logical CPUs 619 * do not wind up with an empty memory pool. 620 * 621 * At them moment we just iterate the other queues and borrow pages, 622 * moving them into the queues for cpus with severe deficits even though 623 * the memory might not be local to those cpus. I am not doing this in 624 * a 'smart' way, its effectively UMA style (sorta, since its page-by-page 625 * whereas real UMA typically exchanges address bits 8-10 with high address 626 * bits). But it works extremely well and gives us fairly good deterministic 627 * results on the cpu cores associated with these secondary nodes. 628 */ 629 void 630 vm_numa_organize_finalize(void) 631 { 632 struct vpgqueues *vpq; 633 vm_page_t m; 634 long lcnt_lo; 635 long lcnt_hi; 636 int iter; 637 int i; 638 int scale_lim; 639 640 crit_enter(); 641 642 /* 643 * Machines might not use an exact power of 2 for phys_ids, 644 * core_ids, ht_ids, etc. This can slightly reduce the actual 645 * range of indices in vm_page_queues[] that are nominally used. 646 */ 647 if (cpu_topology_ht_ids) { 648 scale_lim = PQ_L2_SIZE / cpu_topology_phys_ids; 649 scale_lim = scale_lim / cpu_topology_core_ids; 650 scale_lim = scale_lim / cpu_topology_ht_ids; 651 scale_lim = scale_lim * cpu_topology_ht_ids; 652 scale_lim = scale_lim * cpu_topology_core_ids; 653 scale_lim = scale_lim * cpu_topology_phys_ids; 654 } else { 655 scale_lim = PQ_L2_SIZE; 656 } 657 658 /* 659 * Calculate an average, set hysteresis for balancing from 660 * 10% below the average to the average. 661 */ 662 lcnt_hi = 0; 663 for (i = 0; i < scale_lim; ++i) { 664 lcnt_hi += vm_page_queues[i].lcnt; 665 } 666 lcnt_hi /= scale_lim; 667 lcnt_lo = lcnt_hi - lcnt_hi / 10; 668 669 kprintf("vm_page: avg %ld pages per queue, %d queues\n", 670 lcnt_hi, scale_lim); 671 672 iter = 0; 673 for (i = 0; i < scale_lim; ++i) { 674 vpq = &vm_page_queues[PQ_FREE + i]; 675 while (vpq->lcnt < lcnt_lo) { 676 struct vpgqueues *vptmp; 677 678 iter = (iter + 1) & PQ_L2_MASK; 679 vptmp = &vm_page_queues[PQ_FREE + iter]; 680 if (vptmp->lcnt < lcnt_hi) 681 continue; 682 m = TAILQ_FIRST(&vptmp->pl); 683 KKASSERT(m->queue == PQ_FREE + iter); 684 TAILQ_REMOVE(&vptmp->pl, m, pageq); 685 --vptmp->lcnt; 686 /* queue doesn't change, no need to adj cnt */ 687 m->queue -= m->pc; 688 m->pc = i; 689 m->queue += m->pc; 690 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 691 ++vpq->lcnt; 692 } 693 } 694 crit_exit(); 695 } 696 697 static 698 void 699 vm_numa_add_topology_mem(cpu_node_t *cpup, int physid, long bytes) 700 { 701 int cpuid; 702 int i; 703 704 switch(cpup->type) { 705 case PACKAGE_LEVEL: 706 cpup->phys_mem += bytes; 707 break; 708 case CHIP_LEVEL: 709 /* 710 * All members should have the same chipid, so we only need 711 * to pull out one member. 712 */ 713 if (CPUMASK_TESTNZERO(cpup->members)) { 714 cpuid = BSFCPUMASK(cpup->members); 715 if (physid == 716 get_chip_ID_from_APICID(CPUID_TO_APICID(cpuid))) { 717 cpup->phys_mem += bytes; 718 } 719 } 720 break; 721 case CORE_LEVEL: 722 case THREAD_LEVEL: 723 /* 724 * Just inherit from the parent node 725 */ 726 cpup->phys_mem = cpup->parent_node->phys_mem; 727 break; 728 } 729 for (i = 0; i < MAXCPU && cpup->child_node[i]; ++i) 730 vm_numa_add_topology_mem(cpup->child_node[i], physid, bytes); 731 } 732 733 /* 734 * We tended to reserve a ton of memory for contigmalloc(). Now that most 735 * drivers have initialized we want to return most the remaining free 736 * reserve back to the VM page queues so they can be used for normal 737 * allocations. 738 * 739 * We leave vm_dma_reserved bytes worth of free pages in the reserve pool. 740 */ 741 static void 742 vm_page_startup_finish(void *dummy __unused) 743 { 744 alist_blk_t blk; 745 alist_blk_t rblk; 746 alist_blk_t count; 747 alist_blk_t xcount; 748 alist_blk_t bfree; 749 vm_page_t m; 750 struct vm_page_hash_elm *mp; 751 int mask; 752 753 /* 754 * Set the set_assoc_mask based on the fitted number of CPUs. 755 * This is a mask, so we subject 1. 756 * 757 * w/PQ_L2_SIZE = 1024, Don't let the associativity drop below 8. 758 * So if we have 256 CPUs, two hyper-threads will wind up sharing. 759 * 760 * The maximum is PQ_L2_SIZE. However, we limit the starting 761 * maximum to 16 (mask = 15) in order to improve the cache locality 762 * of related kernel data structures. 763 */ 764 mask = PQ_L2_SIZE / ncpus_fit - 1; 765 if (mask < 7) /* minimum is 8-way w/256 CPU threads */ 766 mask = 7; 767 if (mask < 15) 768 mask = 15; 769 cpu_ccfence(); 770 set_assoc_mask = mask; 771 772 /* 773 * Return part of the initial reserve back to the system 774 */ 775 spin_lock(&vm_contig_spin); 776 for (;;) { 777 bfree = alist_free_info(&vm_contig_alist, &blk, &count); 778 if (bfree <= vm_dma_reserved / PAGE_SIZE) 779 break; 780 if (count == 0) 781 break; 782 783 /* 784 * Figure out how much of the initial reserve we have to 785 * free in order to reach our target. 786 */ 787 bfree -= vm_dma_reserved / PAGE_SIZE; 788 if (count > bfree) { 789 blk += count - bfree; 790 count = bfree; 791 } 792 793 /* 794 * Calculate the nearest power of 2 <= count. 795 */ 796 for (xcount = 1; xcount <= count; xcount <<= 1) 797 ; 798 xcount >>= 1; 799 blk += count - xcount; 800 count = xcount; 801 802 /* 803 * Allocate the pages from the alist, then free them to 804 * the normal VM page queues. 805 * 806 * Pages allocated from the alist are wired. We have to 807 * busy, unwire, and free them. We must also adjust 808 * vm_low_phys_reserved before freeing any pages to prevent 809 * confusion. 810 */ 811 rblk = alist_alloc(&vm_contig_alist, blk, count); 812 if (rblk != blk) { 813 kprintf("vm_page_startup_finish: Unable to return " 814 "dma space @0x%08x/%d -> 0x%08x\n", 815 blk, count, rblk); 816 break; 817 } 818 atomic_add_long(&vmstats.v_dma_pages, -(long)count); 819 spin_unlock(&vm_contig_spin); 820 821 m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT); 822 vm_low_phys_reserved = VM_PAGE_TO_PHYS(m); 823 while (count) { 824 vm_page_flag_clear(m, PG_FICTITIOUS | PG_UNQUEUED); 825 vm_page_busy_wait(m, FALSE, "cpgfr"); 826 vm_page_unwire(m, 0); 827 vm_page_free(m); 828 --count; 829 ++m; 830 } 831 spin_lock(&vm_contig_spin); 832 } 833 spin_unlock(&vm_contig_spin); 834 835 /* 836 * Print out how much DMA space drivers have already allocated and 837 * how much is left over. 838 */ 839 kprintf("DMA space used: %jdk, remaining available: %jdk\n", 840 (intmax_t)(vmstats.v_dma_pages - vm_contig_alist.bl_free) * 841 (PAGE_SIZE / 1024), 842 (intmax_t)vm_contig_alist.bl_free * (PAGE_SIZE / 1024)); 843 844 /* 845 * Power of 2 846 */ 847 vm_page_hash_size = 4096; 848 while (vm_page_hash_size < (vm_page_array_size / 16)) 849 vm_page_hash_size <<= 1; 850 if (vm_page_hash_size > VM_PAGE_HASH_MAX) 851 vm_page_hash_size = VM_PAGE_HASH_MAX; 852 853 /* 854 * hash table for vm_page_lookup_quick() 855 */ 856 mp = (void *)kmem_alloc3(&kernel_map, 857 vm_page_hash_size * sizeof(*vm_page_hash), 858 VM_SUBSYS_VMPGHASH, KM_CPU(0)); 859 bzero(mp, vm_page_hash_size * sizeof(*mp)); 860 cpu_sfence(); 861 vm_page_hash = mp; 862 } 863 SYSINIT(vm_pgend, SI_SUB_PROC0_POST, SI_ORDER_ANY, 864 vm_page_startup_finish, NULL); 865 866 867 /* 868 * Scan comparison function for Red-Black tree scans. An inclusive 869 * (start,end) is expected. Other fields are not used. 870 */ 871 int 872 rb_vm_page_scancmp(struct vm_page *p, void *data) 873 { 874 struct rb_vm_page_scan_info *info = data; 875 876 if (p->pindex < info->start_pindex) 877 return(-1); 878 if (p->pindex > info->end_pindex) 879 return(1); 880 return(0); 881 } 882 883 int 884 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2) 885 { 886 if (p1->pindex < p2->pindex) 887 return(-1); 888 if (p1->pindex > p2->pindex) 889 return(1); 890 return(0); 891 } 892 893 void 894 vm_page_init(vm_page_t m) 895 { 896 /* do nothing for now. Called from pmap_page_init() */ 897 } 898 899 /* 900 * Each page queue has its own spin lock, which is fairly optimal for 901 * allocating and freeing pages at least. 902 * 903 * The caller must hold the vm_page_spin_lock() before locking a vm_page's 904 * queue spinlock via this function. Also note that m->queue cannot change 905 * unless both the page and queue are locked. 906 */ 907 static __inline 908 void 909 _vm_page_queue_spin_lock(vm_page_t m) 910 { 911 u_short queue; 912 913 queue = m->queue; 914 if (queue != PQ_NONE) { 915 spin_lock(&vm_page_queues[queue].spin); 916 KKASSERT(queue == m->queue); 917 } 918 } 919 920 static __inline 921 void 922 _vm_page_queue_spin_unlock(vm_page_t m) 923 { 924 u_short queue; 925 926 queue = m->queue; 927 cpu_ccfence(); 928 if (queue != PQ_NONE) 929 spin_unlock(&vm_page_queues[queue].spin); 930 } 931 932 static __inline 933 void 934 _vm_page_queues_spin_lock(u_short queue) 935 { 936 cpu_ccfence(); 937 if (queue != PQ_NONE) 938 spin_lock(&vm_page_queues[queue].spin); 939 } 940 941 942 static __inline 943 void 944 _vm_page_queues_spin_unlock(u_short queue) 945 { 946 cpu_ccfence(); 947 if (queue != PQ_NONE) 948 spin_unlock(&vm_page_queues[queue].spin); 949 } 950 951 void 952 vm_page_queue_spin_lock(vm_page_t m) 953 { 954 _vm_page_queue_spin_lock(m); 955 } 956 957 void 958 vm_page_queues_spin_lock(u_short queue) 959 { 960 _vm_page_queues_spin_lock(queue); 961 } 962 963 void 964 vm_page_queue_spin_unlock(vm_page_t m) 965 { 966 _vm_page_queue_spin_unlock(m); 967 } 968 969 void 970 vm_page_queues_spin_unlock(u_short queue) 971 { 972 _vm_page_queues_spin_unlock(queue); 973 } 974 975 /* 976 * This locks the specified vm_page and its queue in the proper order 977 * (page first, then queue). The queue may change so the caller must 978 * recheck on return. 979 */ 980 static __inline 981 void 982 _vm_page_and_queue_spin_lock(vm_page_t m) 983 { 984 vm_page_spin_lock(m); 985 _vm_page_queue_spin_lock(m); 986 } 987 988 static __inline 989 void 990 _vm_page_and_queue_spin_unlock(vm_page_t m) 991 { 992 _vm_page_queues_spin_unlock(m->queue); 993 vm_page_spin_unlock(m); 994 } 995 996 void 997 vm_page_and_queue_spin_unlock(vm_page_t m) 998 { 999 _vm_page_and_queue_spin_unlock(m); 1000 } 1001 1002 void 1003 vm_page_and_queue_spin_lock(vm_page_t m) 1004 { 1005 _vm_page_and_queue_spin_lock(m); 1006 } 1007 1008 /* 1009 * Helper function removes vm_page from its current queue. 1010 * Returns the base queue the page used to be on. 1011 * 1012 * The vm_page and the queue must be spinlocked. 1013 * This function will unlock the queue but leave the page spinlocked. 1014 */ 1015 static __inline u_short 1016 _vm_page_rem_queue_spinlocked(vm_page_t m) 1017 { 1018 struct vpgqueues *pq; 1019 u_short queue; 1020 u_short oqueue; 1021 long *cnt_adj; 1022 long *cnt_gd; 1023 1024 queue = m->queue; 1025 if (queue != PQ_NONE) { 1026 pq = &vm_page_queues[queue]; 1027 TAILQ_REMOVE(&pq->pl, m, pageq); 1028 1029 /* 1030 * Primarily adjust our pcpu stats for rollup, which is 1031 * (mycpu->gd_vmstats_adj + offset). This is normally 1032 * synchronized on every hardclock(). 1033 * 1034 * However, in order for the nominal low-memory algorithms 1035 * to work properly if the unsynchronized adjustment gets 1036 * too negative and might trigger the pageout daemon, we 1037 * immediately synchronize with the global structure. 1038 * 1039 * The idea here is to reduce unnecessary SMP cache mastership 1040 * changes in the global vmstats, which can be particularly 1041 * bad in multi-socket systems. 1042 * 1043 * WARNING! In systems with low amounts of memory the 1044 * vm_paging_needed(-1024 * ncpus) test could 1045 * wind up testing a value above the paging target, 1046 * meaning it would almost always return TRUE. In 1047 * that situation we synchronize every time the 1048 * cumulative adjustment falls below -1024. 1049 */ 1050 cnt_adj = (long *)((char *)&mycpu->gd_vmstats_adj + 1051 pq->cnt_offset); 1052 cnt_gd = (long *)((char *)&mycpu->gd_vmstats + 1053 pq->cnt_offset); 1054 atomic_add_long(cnt_adj, -1); 1055 atomic_add_long(cnt_gd, -1); 1056 1057 if (*cnt_adj < -1024 && vm_paging_needed(-1024 * ncpus)) { 1058 u_long copy = atomic_swap_long(cnt_adj, 0); 1059 cnt_adj = (long *)((char *)&vmstats + pq->cnt_offset); 1060 atomic_add_long(cnt_adj, copy); 1061 } 1062 pq->lcnt--; 1063 m->queue = PQ_NONE; 1064 oqueue = queue; 1065 queue -= m->pc; 1066 vm_page_queues_spin_unlock(oqueue); /* intended */ 1067 } 1068 return queue; 1069 } 1070 1071 /* 1072 * Helper function places the vm_page on the specified queue. Generally 1073 * speaking only PQ_FREE pages are placed at the head, to allow them to 1074 * be allocated sooner rather than later on the assumption that they 1075 * are cache-hot. 1076 * 1077 * The vm_page must be spinlocked. 1078 * The vm_page must NOT be FICTITIOUS (that would be a disaster) 1079 * This function will return with both the page and the queue locked. 1080 */ 1081 static __inline void 1082 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead) 1083 { 1084 struct vpgqueues *pq; 1085 u_long *cnt_adj; 1086 u_long *cnt_gd; 1087 1088 KKASSERT(m->queue == PQ_NONE && 1089 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED)) == 0); 1090 1091 if (queue != PQ_NONE) { 1092 vm_page_queues_spin_lock(queue); 1093 pq = &vm_page_queues[queue]; 1094 ++pq->lcnt; 1095 1096 /* 1097 * Adjust our pcpu stats. If a system entity really needs 1098 * to incorporate the count it will call vmstats_rollup() 1099 * to roll it all up into the global vmstats strufture. 1100 */ 1101 cnt_adj = (long *)((char *)&mycpu->gd_vmstats_adj + 1102 pq->cnt_offset); 1103 cnt_gd = (long *)((char *)&mycpu->gd_vmstats + 1104 pq->cnt_offset); 1105 atomic_add_long(cnt_adj, 1); 1106 atomic_add_long(cnt_gd, 1); 1107 1108 /* 1109 * PQ_FREE is always handled LIFO style to try to provide 1110 * cache-hot pages to programs. 1111 */ 1112 m->queue = queue; 1113 if (queue - m->pc == PQ_FREE) { 1114 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1115 } else if (athead) { 1116 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1117 } else { 1118 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 1119 } 1120 /* leave the queue spinlocked */ 1121 } 1122 } 1123 1124 /* 1125 * Wait until page is no longer BUSY. If also_m_busy is TRUE we wait 1126 * until the page is no longer BUSY or SBUSY (busy_count field is 0). 1127 * 1128 * Returns TRUE if it had to sleep, FALSE if we did not. Only one sleep 1129 * call will be made before returning. 1130 * 1131 * This function does NOT busy the page and on return the page is not 1132 * guaranteed to be available. 1133 */ 1134 void 1135 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 1136 { 1137 u_int32_t busy_count; 1138 1139 for (;;) { 1140 busy_count = m->busy_count; 1141 cpu_ccfence(); 1142 1143 if ((busy_count & PBUSY_LOCKED) == 0 && 1144 (also_m_busy == 0 || (busy_count & PBUSY_MASK) == 0)) { 1145 break; 1146 } 1147 tsleep_interlock(m, 0); 1148 if (atomic_cmpset_int(&m->busy_count, busy_count, 1149 busy_count | PBUSY_WANTED)) { 1150 atomic_set_int(&m->flags, PG_REFERENCED); 1151 tsleep(m, PINTERLOCKED, msg, 0); 1152 break; 1153 } 1154 } 1155 } 1156 1157 /* 1158 * This calculates and returns a page color given an optional VM object and 1159 * either a pindex or an iterator. We attempt to return a cpu-localized 1160 * pg_color that is still roughly 16-way set-associative. The CPU topology 1161 * is used if it was probed. 1162 * 1163 * The caller may use the returned value to index into e.g. PQ_FREE when 1164 * allocating a page in order to nominally obtain pages that are hopefully 1165 * already localized to the requesting cpu. This function is not able to 1166 * provide any sort of guarantee of this, but does its best to improve 1167 * hardware cache management performance. 1168 * 1169 * WARNING! The caller must mask the returned value with PQ_L2_MASK. 1170 */ 1171 u_short 1172 vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex) 1173 { 1174 u_short pg_color; 1175 int object_pg_color; 1176 1177 /* 1178 * WARNING! cpu_topology_core_ids might not be a power of two. 1179 * We also shouldn't make assumptions about 1180 * cpu_topology_phys_ids either. 1181 * 1182 * WARNING! ncpus might not be known at this time (during early 1183 * boot), and might be set to 1. 1184 * 1185 * General format: [phys_id][core_id][cpuid][set-associativity] 1186 * (but uses modulo, so not necessarily precise bit masks) 1187 */ 1188 object_pg_color = object ? object->pg_color : 0; 1189 1190 if (cpu_topology_ht_ids) { 1191 int phys_id; 1192 int core_id; 1193 int ht_id; 1194 int physcale; 1195 int grpscale; 1196 int cpuscale; 1197 1198 /* 1199 * Translate cpuid to socket, core, and hyperthread id. 1200 */ 1201 phys_id = get_cpu_phys_id(cpuid); 1202 core_id = get_cpu_core_id(cpuid); 1203 ht_id = get_cpu_ht_id(cpuid); 1204 1205 /* 1206 * Calculate pg_color for our array index. 1207 * 1208 * physcale - socket multiplier. 1209 * grpscale - core multiplier (cores per socket) 1210 * cpu* - cpus per core 1211 * 1212 * WARNING! In early boot, ncpus has not yet been 1213 * initialized and may be set to (1). 1214 * 1215 * WARNING! physcale must match the organization that 1216 * vm_numa_organize() creates to ensure that 1217 * we properly localize allocations to the 1218 * requested cpuid. 1219 */ 1220 physcale = PQ_L2_SIZE / cpu_topology_phys_ids; 1221 grpscale = physcale / cpu_topology_core_ids; 1222 cpuscale = grpscale / cpu_topology_ht_ids; 1223 1224 pg_color = phys_id * physcale; 1225 pg_color += core_id * grpscale; 1226 pg_color += ht_id * cpuscale; 1227 pg_color += (pindex + object_pg_color) % cpuscale; 1228 1229 #if 0 1230 if (grpsize >= 8) { 1231 pg_color += (pindex + object_pg_color) % grpsize; 1232 } else { 1233 if (grpsize <= 2) { 1234 grpsize = 8; 1235 } else { 1236 /* 3->9, 4->8, 5->10, 6->12, 7->14 */ 1237 grpsize += grpsize; 1238 if (grpsize < 8) 1239 grpsize += grpsize; 1240 } 1241 pg_color += (pindex + object_pg_color) % grpsize; 1242 } 1243 #endif 1244 } else { 1245 /* 1246 * Unknown topology, distribute things evenly. 1247 * 1248 * WARNING! In early boot, ncpus has not yet been 1249 * initialized and may be set to (1). 1250 */ 1251 int cpuscale; 1252 1253 cpuscale = PQ_L2_SIZE / ncpus; 1254 1255 pg_color = cpuid * cpuscale; 1256 pg_color += (pindex + object_pg_color) % cpuscale; 1257 } 1258 return (pg_color & PQ_L2_MASK); 1259 } 1260 1261 /* 1262 * Wait until BUSY can be set, then set it. If also_m_busy is TRUE we 1263 * also wait for m->busy_count to become 0 before setting PBUSY_LOCKED. 1264 */ 1265 void 1266 VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, 1267 int also_m_busy, const char *msg 1268 VM_PAGE_DEBUG_ARGS) 1269 { 1270 u_int32_t busy_count; 1271 1272 for (;;) { 1273 busy_count = m->busy_count; 1274 cpu_ccfence(); 1275 if (busy_count & PBUSY_LOCKED) { 1276 tsleep_interlock(m, 0); 1277 if (atomic_cmpset_int(&m->busy_count, busy_count, 1278 busy_count | PBUSY_WANTED)) { 1279 atomic_set_int(&m->flags, PG_REFERENCED); 1280 tsleep(m, PINTERLOCKED, msg, 0); 1281 } 1282 } else if (also_m_busy && busy_count) { 1283 tsleep_interlock(m, 0); 1284 if (atomic_cmpset_int(&m->busy_count, busy_count, 1285 busy_count | PBUSY_WANTED)) { 1286 atomic_set_int(&m->flags, PG_REFERENCED); 1287 tsleep(m, PINTERLOCKED, msg, 0); 1288 } 1289 } else { 1290 if (atomic_cmpset_int(&m->busy_count, busy_count, 1291 busy_count | PBUSY_LOCKED)) { 1292 #ifdef VM_PAGE_DEBUG 1293 m->busy_func = func; 1294 m->busy_line = lineno; 1295 #endif 1296 break; 1297 } 1298 } 1299 } 1300 } 1301 1302 /* 1303 * Attempt to set BUSY. If also_m_busy is TRUE we only succeed if 1304 * m->busy_count is also 0. 1305 * 1306 * Returns non-zero on failure. 1307 */ 1308 int 1309 VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy 1310 VM_PAGE_DEBUG_ARGS) 1311 { 1312 u_int32_t busy_count; 1313 1314 for (;;) { 1315 busy_count = m->busy_count; 1316 cpu_ccfence(); 1317 if (busy_count & PBUSY_LOCKED) 1318 return TRUE; 1319 if (also_m_busy && (busy_count & PBUSY_MASK) != 0) 1320 return TRUE; 1321 if (atomic_cmpset_int(&m->busy_count, busy_count, 1322 busy_count | PBUSY_LOCKED)) { 1323 #ifdef VM_PAGE_DEBUG 1324 m->busy_func = func; 1325 m->busy_line = lineno; 1326 #endif 1327 return FALSE; 1328 } 1329 } 1330 } 1331 1332 /* 1333 * Clear the BUSY flag and return non-zero to indicate to the caller 1334 * that a wakeup() should be performed. 1335 * 1336 * (inline version) 1337 */ 1338 static __inline 1339 int 1340 _vm_page_wakeup(vm_page_t m) 1341 { 1342 u_int32_t busy_count; 1343 1344 busy_count = m->busy_count; 1345 cpu_ccfence(); 1346 for (;;) { 1347 if (atomic_fcmpset_int(&m->busy_count, &busy_count, 1348 busy_count & 1349 ~(PBUSY_LOCKED | PBUSY_WANTED))) { 1350 return((int)(busy_count & PBUSY_WANTED)); 1351 } 1352 } 1353 /* not reached */ 1354 } 1355 1356 /* 1357 * Clear the BUSY flag and wakeup anyone waiting for the page. This 1358 * is typically the last call you make on a page before moving onto 1359 * other things. 1360 */ 1361 void 1362 vm_page_wakeup(vm_page_t m) 1363 { 1364 KASSERT(m->busy_count & PBUSY_LOCKED, 1365 ("vm_page_wakeup: page not busy!!!")); 1366 if (_vm_page_wakeup(m)) 1367 wakeup(m); 1368 } 1369 1370 /* 1371 * Hold a page, preventing reuse. This is typically only called on pages 1372 * in a known state (either held busy, special, or interlocked in some 1373 * manner). Holding a page does not ensure that it remains valid, it only 1374 * prevents reuse. The page must not already be on the FREE queue or in 1375 * any danger of being moved to the FREE queue concurrent with this call. 1376 * 1377 * Other parts of the system can still disassociate the page from its object 1378 * and attempt to free it, or perform read or write I/O on it and/or otherwise 1379 * manipulate the page, but if the page is held the VM system will leave the 1380 * page and its data intact and not cycle it through the FREE queue until 1381 * the last hold has been released. 1382 * 1383 * (see vm_page_wire() if you want to prevent the page from being 1384 * disassociated from its object too). 1385 */ 1386 void 1387 vm_page_hold(vm_page_t m) 1388 { 1389 atomic_add_int(&m->hold_count, 1); 1390 KKASSERT(m->queue - m->pc != PQ_FREE); 1391 } 1392 1393 /* 1394 * The opposite of vm_page_hold(). If the page is on the HOLD queue 1395 * it was freed while held and must be moved back to the FREE queue. 1396 * 1397 * To avoid racing against vm_page_free*() we must re-test conditions 1398 * after obtaining the spin-lock. The initial test can also race a 1399 * vm_page_free*() that is in the middle of moving a page to PQ_HOLD, 1400 * leaving the page on PQ_HOLD with hold_count == 0. Rather than 1401 * throw a spin-lock in the critical path, we rely on the pageout 1402 * daemon to clean-up these loose ends. 1403 * 1404 * More critically, the 'easy movement' between queues without busying 1405 * a vm_page is only allowed for PQ_FREE<->PQ_HOLD. 1406 */ 1407 void 1408 vm_page_unhold(vm_page_t m) 1409 { 1410 KASSERT(m->hold_count > 0 && m->queue - m->pc != PQ_FREE, 1411 ("vm_page_unhold: pg %p illegal hold_count (%d) or " 1412 "on FREE queue (%d)", 1413 m, m->hold_count, m->queue - m->pc)); 1414 1415 if (atomic_fetchadd_int(&m->hold_count, -1) == 1 && 1416 m->queue - m->pc == PQ_HOLD) { 1417 vm_page_spin_lock(m); 1418 if (m->hold_count == 0 && m->queue - m->pc == PQ_HOLD) { 1419 _vm_page_queue_spin_lock(m); 1420 _vm_page_rem_queue_spinlocked(m); 1421 _vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 1); 1422 _vm_page_queue_spin_unlock(m); 1423 } 1424 vm_page_spin_unlock(m); 1425 } 1426 } 1427 1428 /* 1429 * Create a fictitious page with the specified physical address and 1430 * memory attribute. The memory attribute is the only the machine- 1431 * dependent aspect of a fictitious page that must be initialized. 1432 */ 1433 void 1434 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 1435 { 1436 /* 1437 * The page's memattr might have changed since the 1438 * previous initialization. Update the pmap to the 1439 * new memattr. 1440 */ 1441 if ((m->flags & PG_FICTITIOUS) != 0) 1442 goto memattr; 1443 m->phys_addr = paddr; 1444 m->queue = PQ_NONE; 1445 /* Fictitious pages don't use "segind". */ 1446 /* Fictitious pages don't use "order" or "pool". */ 1447 m->flags = PG_FICTITIOUS | PG_UNQUEUED; 1448 m->busy_count = PBUSY_LOCKED; 1449 m->wire_count = 1; 1450 spin_init(&m->spin, "fake_page"); 1451 pmap_page_init(m); 1452 memattr: 1453 pmap_page_set_memattr(m, memattr); 1454 } 1455 1456 /* 1457 * Inserts the given vm_page into the object and object list. 1458 * 1459 * The pagetables are not updated but will presumably fault the page 1460 * in if necessary, or if a kernel page the caller will at some point 1461 * enter the page into the kernel's pmap. We are not allowed to block 1462 * here so we *can't* do this anyway. 1463 * 1464 * This routine may not block. 1465 * This routine must be called with the vm_object held. 1466 * This routine must be called with a critical section held. 1467 * 1468 * This routine returns TRUE if the page was inserted into the object 1469 * successfully, and FALSE if the page already exists in the object. 1470 */ 1471 int 1472 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 1473 { 1474 ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(object)); 1475 if (m->object != NULL) 1476 panic("vm_page_insert: already inserted"); 1477 1478 atomic_add_int(&object->generation, 1); 1479 1480 /* 1481 * Associate the VM page with an (object, offset). 1482 * 1483 * The vm_page spin lock is required for interactions with the pmap. 1484 * XXX vm_page_spin_lock() might not be needed for this any more. 1485 */ 1486 vm_page_spin_lock(m); 1487 m->object = object; 1488 m->pindex = pindex; 1489 if (vm_page_rb_tree_RB_INSERT(&object->rb_memq, m)) { 1490 m->object = NULL; 1491 m->pindex = 0; 1492 vm_page_spin_unlock(m); 1493 return FALSE; 1494 } 1495 ++object->resident_page_count; 1496 ++mycpu->gd_vmtotal.t_rm; 1497 vm_page_spin_unlock(m); 1498 1499 /* 1500 * Since we are inserting a new and possibly dirty page, 1501 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 1502 */ 1503 if ((m->valid & m->dirty) || 1504 (m->flags & (PG_WRITEABLE | PG_NEED_COMMIT))) 1505 vm_object_set_writeable_dirty(object); 1506 1507 /* 1508 * Checks for a swap assignment and sets PG_SWAPPED if appropriate. 1509 */ 1510 swap_pager_page_inserted(m); 1511 return TRUE; 1512 } 1513 1514 /* 1515 * Removes the given vm_page_t from the (object,index) table 1516 * 1517 * The page must be BUSY and will remain BUSY on return. 1518 * No other requirements. 1519 * 1520 * NOTE: FreeBSD side effect was to unbusy the page on return. We leave 1521 * it busy. 1522 * 1523 * NOTE: Caller is responsible for any pmap disposition prior to the 1524 * rename (as the pmap code will not be able to find the entries 1525 * once the object has been disassociated). The caller may choose 1526 * to leave the pmap association intact if this routine is being 1527 * called as part of a rename between shadowed objects. 1528 * 1529 * This routine may not block. 1530 */ 1531 void 1532 vm_page_remove(vm_page_t m) 1533 { 1534 vm_object_t object; 1535 1536 if (m->object == NULL) { 1537 return; 1538 } 1539 1540 if ((m->busy_count & PBUSY_LOCKED) == 0) 1541 panic("vm_page_remove: page not busy"); 1542 1543 object = m->object; 1544 1545 vm_object_hold(object); 1546 1547 /* 1548 * Remove the page from the object and update the object. 1549 * 1550 * The vm_page spin lock is required for interactions with the pmap. 1551 * XXX vm_page_spin_lock() might not be needed for this any more. 1552 */ 1553 vm_page_spin_lock(m); 1554 vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m); 1555 --object->resident_page_count; 1556 --mycpu->gd_vmtotal.t_rm; 1557 m->object = NULL; 1558 atomic_add_int(&object->generation, 1); 1559 vm_page_spin_unlock(m); 1560 1561 vm_object_drop(object); 1562 } 1563 1564 /* 1565 * Calculate the hash position for the vm_page hash heuristic. Generally 1566 * speaking we want to localize sequential lookups to reduce memory stalls. 1567 * 1568 * Mask by ~3 to offer 4-way set-assoc 1569 */ 1570 static __inline 1571 struct vm_page_hash_elm * 1572 vm_page_hash_hash(vm_object_t object, vm_pindex_t pindex) 1573 { 1574 size_t hi; 1575 1576 hi = iscsi_crc32(&object, sizeof(object)) << 2; 1577 hi ^= hi >> (23 - 2); 1578 hi += pindex * VM_PAGE_HASH_SET; 1579 #if 0 1580 /* mix it up */ 1581 hi = (intptr_t)object ^ object->pg_color ^ pindex; 1582 hi += object->pg_color * pindex; 1583 hi = hi ^ (hi >> 20); 1584 #endif 1585 hi &= vm_page_hash_size - 1; /* bounds */ 1586 hi &= ~(VM_PAGE_HASH_SET - 1); /* set-assoc */ 1587 1588 return (&vm_page_hash[hi]); 1589 } 1590 1591 /* 1592 * Heuristical page lookup that does not require any locks. Returns 1593 * a soft-busied page on success, NULL on failure. 1594 * 1595 * Caller must lookup the page the slow way if NULL is returned. 1596 */ 1597 vm_page_t 1598 vm_page_hash_get(vm_object_t object, vm_pindex_t pindex) 1599 { 1600 struct vm_page_hash_elm *mp; 1601 vm_page_t m; 1602 int i; 1603 1604 if (vm_page_hash == NULL) 1605 return NULL; 1606 mp = vm_page_hash_hash(object, pindex); 1607 for (i = 0; i < VM_PAGE_HASH_SET; ++i) { 1608 if (mp[i].object != object || mp[i].pindex != (uint32_t)pindex) 1609 continue; 1610 m = mp[i].m; 1611 cpu_ccfence(); 1612 if (m == NULL) 1613 continue; 1614 if (m->object != object || m->pindex != pindex) 1615 continue; 1616 if (vm_page_sbusy_try(m)) 1617 continue; 1618 if (m->object == object && m->pindex == pindex) { 1619 /* 1620 * On-match optimization - do not update ticks 1621 * unless we have to (reduce cache coherency traffic) 1622 */ 1623 if (mp[i].ticks != ticks) 1624 mp[i].ticks = ticks; 1625 return m; 1626 } 1627 vm_page_sbusy_drop(m); 1628 } 1629 return NULL; 1630 } 1631 1632 /* 1633 * Enter page onto vm_page_hash[]. This is a heuristic, SMP collisions 1634 * are allowed. 1635 */ 1636 static __inline 1637 void 1638 vm_page_hash_enter(vm_page_t m) 1639 { 1640 struct vm_page_hash_elm *mp; 1641 struct vm_page_hash_elm *best; 1642 int i; 1643 1644 /* 1645 * Only enter type-stable vm_pages with well-shared objects. 1646 */ 1647 if ((m->flags & PG_MAPPEDMULTI) == 0) 1648 return; 1649 if (__predict_false(vm_page_hash == NULL || 1650 m < &vm_page_array[0] || 1651 m >= &vm_page_array[vm_page_array_size])) { 1652 return; 1653 } 1654 if (__predict_false(m->object == NULL)) 1655 return; 1656 #if 0 1657 /* 1658 * Disabled at the moment, there are some degenerate conditions 1659 * with often-exec'd programs that get ignored. In particular, 1660 * the kernel's elf loader does a vn_rdwr() on the first page of 1661 * a binary. 1662 */ 1663 if (m->object->ref_count <= 2 || (m->object->flags & OBJ_ONEMAPPING)) 1664 return; 1665 #endif 1666 if (vm_page_hash_vnode_only && m->object->type != OBJT_VNODE) 1667 return; 1668 1669 /* 1670 * Find best entry 1671 */ 1672 mp = vm_page_hash_hash(m->object, m->pindex); 1673 best = mp; 1674 for (i = 0; i < VM_PAGE_HASH_SET; ++i) { 1675 if (mp[i].m == m && 1676 mp[i].object == m->object && 1677 mp[i].pindex == (uint32_t)m->pindex) { 1678 /* 1679 * On-match optimization - do not update ticks 1680 * unless we have to (reduce cache coherency traffic) 1681 */ 1682 if (mp[i].ticks != ticks) 1683 mp[i].ticks = ticks; 1684 return; 1685 } 1686 1687 /* 1688 * The best choice is the oldest entry. 1689 * 1690 * Also check for a field overflow, using -1 instead of 0 1691 * to deal with SMP races on accessing the 'ticks' global. 1692 */ 1693 if ((ticks - best->ticks) < (ticks - mp[i].ticks) || 1694 (int)(ticks - mp[i].ticks) < -1) { 1695 best = &mp[i]; 1696 } 1697 } 1698 1699 /* 1700 * Load the entry. Copy a few elements to the hash entry itself 1701 * to reduce memory stalls due to memory indirects on lookups. 1702 */ 1703 best->m = m; 1704 best->object = m->object; 1705 best->pindex = (uint32_t)m->pindex; 1706 best->ticks = ticks; 1707 } 1708 1709 /* 1710 * Locate and return the page at (object, pindex), or NULL if the 1711 * page could not be found. 1712 * 1713 * The caller must hold the vm_object token. 1714 */ 1715 vm_page_t 1716 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1717 { 1718 vm_page_t m; 1719 1720 /* 1721 * Search the hash table for this object/offset pair 1722 */ 1723 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1724 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex); 1725 if (m) { 1726 KKASSERT(m->object == object && m->pindex == pindex); 1727 vm_page_hash_enter(m); 1728 } 1729 return(m); 1730 } 1731 1732 vm_page_t 1733 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *object, 1734 vm_pindex_t pindex, 1735 int also_m_busy, const char *msg 1736 VM_PAGE_DEBUG_ARGS) 1737 { 1738 u_int32_t busy_count; 1739 vm_page_t m; 1740 1741 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1742 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex); 1743 while (m) { 1744 KKASSERT(m->object == object && m->pindex == pindex); 1745 busy_count = m->busy_count; 1746 cpu_ccfence(); 1747 if (busy_count & PBUSY_LOCKED) { 1748 tsleep_interlock(m, 0); 1749 if (atomic_cmpset_int(&m->busy_count, busy_count, 1750 busy_count | PBUSY_WANTED)) { 1751 atomic_set_int(&m->flags, PG_REFERENCED); 1752 tsleep(m, PINTERLOCKED, msg, 0); 1753 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, 1754 pindex); 1755 } 1756 } else if (also_m_busy && busy_count) { 1757 tsleep_interlock(m, 0); 1758 if (atomic_cmpset_int(&m->busy_count, busy_count, 1759 busy_count | PBUSY_WANTED)) { 1760 atomic_set_int(&m->flags, PG_REFERENCED); 1761 tsleep(m, PINTERLOCKED, msg, 0); 1762 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, 1763 pindex); 1764 } 1765 } else if (atomic_cmpset_int(&m->busy_count, busy_count, 1766 busy_count | PBUSY_LOCKED)) { 1767 #ifdef VM_PAGE_DEBUG 1768 m->busy_func = func; 1769 m->busy_line = lineno; 1770 #endif 1771 vm_page_hash_enter(m); 1772 break; 1773 } 1774 } 1775 return m; 1776 } 1777 1778 /* 1779 * Attempt to lookup and busy a page. 1780 * 1781 * Returns NULL if the page could not be found 1782 * 1783 * Returns a vm_page and error == TRUE if the page exists but could not 1784 * be busied. 1785 * 1786 * Returns a vm_page and error == FALSE on success. 1787 */ 1788 vm_page_t 1789 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *object, 1790 vm_pindex_t pindex, 1791 int also_m_busy, int *errorp 1792 VM_PAGE_DEBUG_ARGS) 1793 { 1794 u_int32_t busy_count; 1795 vm_page_t m; 1796 1797 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1798 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex); 1799 *errorp = FALSE; 1800 while (m) { 1801 KKASSERT(m->object == object && m->pindex == pindex); 1802 busy_count = m->busy_count; 1803 cpu_ccfence(); 1804 if (busy_count & PBUSY_LOCKED) { 1805 *errorp = TRUE; 1806 break; 1807 } 1808 if (also_m_busy && busy_count) { 1809 *errorp = TRUE; 1810 break; 1811 } 1812 if (atomic_cmpset_int(&m->busy_count, busy_count, 1813 busy_count | PBUSY_LOCKED)) { 1814 #ifdef VM_PAGE_DEBUG 1815 m->busy_func = func; 1816 m->busy_line = lineno; 1817 #endif 1818 vm_page_hash_enter(m); 1819 break; 1820 } 1821 } 1822 return m; 1823 } 1824 1825 /* 1826 * Returns a page that is only soft-busied for use by the caller in 1827 * a read-only fashion. Returns NULL if the page could not be found, 1828 * the soft busy could not be obtained, or the page data is invalid. 1829 * 1830 * XXX Doesn't handle PG_FICTITIOUS pages at the moment, but there is 1831 * no reason why we couldn't. 1832 */ 1833 vm_page_t 1834 vm_page_lookup_sbusy_try(struct vm_object *object, vm_pindex_t pindex, 1835 int pgoff, int pgbytes) 1836 { 1837 vm_page_t m; 1838 1839 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1840 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex); 1841 if (m) { 1842 if ((m->valid != VM_PAGE_BITS_ALL && 1843 !vm_page_is_valid(m, pgoff, pgbytes)) || 1844 (m->flags & PG_FICTITIOUS)) { 1845 m = NULL; 1846 } else if (vm_page_sbusy_try(m)) { 1847 m = NULL; 1848 } else if ((m->valid != VM_PAGE_BITS_ALL && 1849 !vm_page_is_valid(m, pgoff, pgbytes)) || 1850 (m->flags & PG_FICTITIOUS)) { 1851 vm_page_sbusy_drop(m); 1852 m = NULL; 1853 } else { 1854 vm_page_hash_enter(m); 1855 } 1856 } 1857 return m; 1858 } 1859 1860 /* 1861 * Caller must hold the related vm_object 1862 */ 1863 vm_page_t 1864 vm_page_next(vm_page_t m) 1865 { 1866 vm_page_t next; 1867 1868 next = vm_page_rb_tree_RB_NEXT(m); 1869 if (next && next->pindex != m->pindex + 1) 1870 next = NULL; 1871 return (next); 1872 } 1873 1874 /* 1875 * vm_page_rename() 1876 * 1877 * Move the given vm_page from its current object to the specified 1878 * target object/offset. The page must be busy and will remain so 1879 * on return. 1880 * 1881 * new_object must be held. 1882 * This routine might block. XXX ? 1883 * 1884 * NOTE: Swap associated with the page must be invalidated by the move. We 1885 * have to do this for several reasons: (1) we aren't freeing the 1886 * page, (2) we are dirtying the page, (3) the VM system is probably 1887 * moving the page from object A to B, and will then later move 1888 * the backing store from A to B and we can't have a conflict. 1889 * 1890 * NOTE: We *always* dirty the page. It is necessary both for the 1891 * fact that we moved it, and because we may be invalidating 1892 * swap. If the page is on the cache, we have to deactivate it 1893 * or vm_page_dirty() will panic. Dirty pages are not allowed 1894 * on the cache. 1895 * 1896 * NOTE: Caller is responsible for any pmap disposition prior to the 1897 * rename (as the pmap code will not be able to find the entries 1898 * once the object has been disassociated or changed). Nominally 1899 * the caller is moving a page between shadowed objects and so the 1900 * pmap association is retained without having to remove the page 1901 * from it. 1902 */ 1903 void 1904 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1905 { 1906 KKASSERT(m->busy_count & PBUSY_LOCKED); 1907 ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(new_object)); 1908 if (m->object) { 1909 ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(m->object)); 1910 vm_page_remove(m); 1911 } 1912 if (vm_page_insert(m, new_object, new_pindex) == FALSE) { 1913 panic("vm_page_rename: target exists (%p,%"PRIu64")", 1914 new_object, new_pindex); 1915 } 1916 if (m->queue - m->pc == PQ_CACHE) 1917 vm_page_deactivate(m); 1918 vm_page_dirty(m); 1919 } 1920 1921 /* 1922 * vm_page_unqueue() without any wakeup. This routine is used when a page 1923 * is to remain BUSYied by the caller. 1924 * 1925 * This routine may not block. 1926 */ 1927 void 1928 vm_page_unqueue_nowakeup(vm_page_t m) 1929 { 1930 vm_page_and_queue_spin_lock(m); 1931 (void)_vm_page_rem_queue_spinlocked(m); 1932 vm_page_spin_unlock(m); 1933 } 1934 1935 /* 1936 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon 1937 * if necessary. 1938 * 1939 * This routine may not block. 1940 */ 1941 void 1942 vm_page_unqueue(vm_page_t m) 1943 { 1944 u_short queue; 1945 1946 vm_page_and_queue_spin_lock(m); 1947 queue = _vm_page_rem_queue_spinlocked(m); 1948 if (queue == PQ_FREE || queue == PQ_CACHE) { 1949 vm_page_spin_unlock(m); 1950 pagedaemon_wakeup(); 1951 } else { 1952 vm_page_spin_unlock(m); 1953 } 1954 } 1955 1956 /* 1957 * vm_page_list_find() 1958 * 1959 * Find a page on the specified queue with color optimization. 1960 * 1961 * The page coloring optimization attempts to locate a page that does 1962 * not overload other nearby pages in the object in the cpu's L1 or L2 1963 * caches. We need this optimization because cpu caches tend to be 1964 * physical caches, while object spaces tend to be virtual. 1965 * 1966 * The page coloring optimization also, very importantly, tries to localize 1967 * memory to cpus and physical sockets. 1968 * 1969 * Each PQ_FREE and PQ_CACHE color queue has its own spinlock and the 1970 * algorithm is adjusted to localize allocations on a per-core basis. 1971 * This is done by 'twisting' the colors. 1972 * 1973 * The page is returned spinlocked and removed from its queue (it will 1974 * be on PQ_NONE), or NULL. The page is not BUSY'd. The caller 1975 * is responsible for dealing with the busy-page case (usually by 1976 * deactivating the page and looping). 1977 * 1978 * NOTE: This routine is carefully inlined. A non-inlined version 1979 * is available for outside callers but the only critical path is 1980 * from within this source file. 1981 * 1982 * NOTE: This routine assumes that the vm_pages found in PQ_CACHE and PQ_FREE 1983 * represent stable storage, allowing us to order our locks vm_page 1984 * first, then queue. 1985 */ 1986 static __inline 1987 vm_page_t 1988 _vm_page_list_find(int basequeue, int index) 1989 { 1990 struct vpgqueues *pq; 1991 vm_page_t m; 1992 1993 index &= PQ_L2_MASK; 1994 pq = &vm_page_queues[basequeue + index]; 1995 1996 /* 1997 * Try this cpu's colored queue first. Test for a page unlocked, 1998 * then lock the queue and locate a page. Note that the lock order 1999 * is reversed, but we do not want to dwadle on the page spinlock 2000 * anyway as it is held significantly longer than the queue spinlock. 2001 */ 2002 if (TAILQ_FIRST(&pq->pl)) { 2003 spin_lock(&pq->spin); 2004 TAILQ_FOREACH(m, &pq->pl, pageq) { 2005 if (spin_trylock(&m->spin) == 0) 2006 continue; 2007 KKASSERT(m->queue == basequeue + index); 2008 pq->lastq = -1; 2009 return(m); 2010 } 2011 spin_unlock(&pq->spin); 2012 } 2013 2014 m = _vm_page_list_find_wide(basequeue, index, &pq->lastq); 2015 2016 return(m); 2017 } 2018 2019 /* 2020 * If we could not find the page in the desired queue try to find it in 2021 * a nearby (NUMA-aware) queue, spreading out as we go. 2022 */ 2023 static vm_page_t 2024 _vm_page_list_find_wide(int basequeue, int index, int *lastp) 2025 { 2026 struct vpgqueues *pq; 2027 vm_page_t m = NULL; 2028 int pqmask = set_assoc_mask >> 1; 2029 int pqi; 2030 int range; 2031 int skip_start; 2032 int skip_next; 2033 int count; 2034 2035 /* 2036 * Avoid re-searching empty queues over and over again skip to 2037 * pq->last if appropriate. 2038 */ 2039 if (*lastp >= 0) 2040 index = *lastp; 2041 2042 index &= PQ_L2_MASK; 2043 pq = &vm_page_queues[basequeue]; 2044 count = 0; 2045 skip_start = -1; 2046 skip_next = -1; 2047 2048 /* 2049 * Run local sets of 16, 32, 64, 128, up to the entire queue if all 2050 * else fails (PQ_L2_MASK). 2051 * 2052 * pqmask is a mask, 15, 31, 63, etc. 2053 * 2054 * Test each queue unlocked first, then lock the queue and locate 2055 * a page. Note that the lock order is reversed, but we do not want 2056 * to dwadle on the page spinlock anyway as it is held significantly 2057 * longer than the queue spinlock. 2058 */ 2059 do { 2060 pqmask = (pqmask << 1) | 1; 2061 2062 pqi = index; 2063 range = pqmask + 1; 2064 2065 while (range > 0) { 2066 if (pqi >= skip_start && pqi < skip_next) { 2067 range -= skip_next - pqi; 2068 pqi = (pqi & ~pqmask) | (skip_next & pqmask); 2069 } 2070 if (range > 0 && TAILQ_FIRST(&pq[pqi].pl)) { 2071 spin_lock(&pq[pqi].spin); 2072 TAILQ_FOREACH(m, &pq[pqi].pl, pageq) { 2073 if (spin_trylock(&m->spin) == 0) 2074 continue; 2075 KKASSERT(m->queue == basequeue + pqi); 2076 2077 /* 2078 * If we had to wander too far, set 2079 * *lastp to skip past empty queues. 2080 */ 2081 if (count >= 8) 2082 *lastp = pqi & PQ_L2_MASK; 2083 return(m); 2084 } 2085 spin_unlock(&pq[pqi].spin); 2086 } 2087 --range; 2088 ++count; 2089 pqi = (pqi & ~pqmask) | ((pqi + 1) & pqmask); 2090 } 2091 skip_start = pqi & ~pqmask; 2092 skip_next = (pqi | pqmask) + 1; 2093 } while (pqmask != PQ_L2_MASK); 2094 2095 return(m); 2096 } 2097 2098 static __inline 2099 vm_page_t 2100 _vm_page_list_find2(int bq1, int bq2, int index) 2101 { 2102 struct vpgqueues *pq1; 2103 struct vpgqueues *pq2; 2104 vm_page_t m; 2105 2106 index &= PQ_L2_MASK; 2107 pq1 = &vm_page_queues[bq1 + index]; 2108 pq2 = &vm_page_queues[bq2 + index]; 2109 2110 /* 2111 * Try this cpu's colored queue first. Test for a page unlocked, 2112 * then lock the queue and locate a page. Note that the lock order 2113 * is reversed, but we do not want to dwadle on the page spinlock 2114 * anyway as it is held significantly longer than the queue spinlock. 2115 */ 2116 if (TAILQ_FIRST(&pq1->pl)) { 2117 spin_lock(&pq1->spin); 2118 TAILQ_FOREACH(m, &pq1->pl, pageq) { 2119 if (spin_trylock(&m->spin) == 0) 2120 continue; 2121 KKASSERT(m->queue == bq1 + index); 2122 pq1->lastq = -1; 2123 pq2->lastq = -1; 2124 return(m); 2125 } 2126 spin_unlock(&pq1->spin); 2127 } 2128 2129 m = _vm_page_list_find2_wide(bq1, bq2, index, &pq1->lastq, &pq2->lastq); 2130 2131 return(m); 2132 } 2133 2134 2135 /* 2136 * This version checks two queues at the same time, widening its search 2137 * as we progress. prefering basequeue1 2138 * and starting on basequeue2 after exhausting the first set. The idea 2139 * is to try to stay localized to the cpu. 2140 */ 2141 static vm_page_t 2142 _vm_page_list_find2_wide(int basequeue1, int basequeue2, int index, 2143 int *lastp1, int *lastp2) 2144 { 2145 struct vpgqueues *pq1; 2146 struct vpgqueues *pq2; 2147 vm_page_t m = NULL; 2148 int pqmask1, pqmask2; 2149 int pqi; 2150 int range; 2151 int skip_start1, skip_start2; 2152 int skip_next1, skip_next2; 2153 int count1, count2; 2154 2155 /* 2156 * Avoid re-searching empty queues over and over again skip to 2157 * pq->last if appropriate. 2158 */ 2159 if (*lastp1 >= 0) 2160 index = *lastp1; 2161 2162 index &= PQ_L2_MASK; 2163 2164 pqmask1 = set_assoc_mask >> 1; 2165 pq1 = &vm_page_queues[basequeue1]; 2166 count1 = 0; 2167 skip_start1 = -1; 2168 skip_next1 = -1; 2169 2170 pqmask2 = set_assoc_mask >> 1; 2171 pq2 = &vm_page_queues[basequeue2]; 2172 count2 = 0; 2173 skip_start2 = -1; 2174 skip_next2 = -1; 2175 2176 /* 2177 * Run local sets of 16, 32, 64, 128, up to the entire queue if all 2178 * else fails (PQ_L2_MASK). 2179 * 2180 * pqmask is a mask, 15, 31, 63, etc. 2181 * 2182 * Test each queue unlocked first, then lock the queue and locate 2183 * a page. Note that the lock order is reversed, but we do not want 2184 * to dwadle on the page spinlock anyway as it is held significantly 2185 * longer than the queue spinlock. 2186 */ 2187 do { 2188 if (pqmask1 == PQ_L2_MASK) 2189 goto skip2; 2190 2191 pqmask1 = (pqmask1 << 1) | 1; 2192 pqi = index; 2193 range = pqmask1 + 1; 2194 2195 while (range > 0) { 2196 if (pqi >= skip_start1 && pqi < skip_next1) { 2197 range -= skip_next1 - pqi; 2198 pqi = (pqi & ~pqmask1) | (skip_next1 & pqmask1); 2199 } 2200 if (range > 0 && TAILQ_FIRST(&pq1[pqi].pl)) { 2201 spin_lock(&pq1[pqi].spin); 2202 TAILQ_FOREACH(m, &pq1[pqi].pl, pageq) { 2203 if (spin_trylock(&m->spin) == 0) 2204 continue; 2205 KKASSERT(m->queue == basequeue1 + pqi); 2206 2207 /* 2208 * If we had to wander too far, set 2209 * *lastp to skip past empty queues. 2210 */ 2211 if (count1 >= 8) 2212 *lastp1 = pqi & PQ_L2_MASK; 2213 return(m); 2214 } 2215 spin_unlock(&pq1[pqi].spin); 2216 } 2217 --range; 2218 ++count1; 2219 pqi = (pqi & ~pqmask1) | ((pqi + 1) & pqmask1); 2220 } 2221 skip_start1 = pqi & ~pqmask1; 2222 skip_next1 = (pqi | pqmask1) + 1; 2223 skip2: 2224 if (pqmask1 < ((set_assoc_mask << 1) | 1)) 2225 continue; 2226 2227 pqmask2 = (pqmask2 << 1) | 1; 2228 pqi = index; 2229 range = pqmask2 + 1; 2230 2231 while (range > 0) { 2232 if (pqi >= skip_start2 && pqi < skip_next2) { 2233 range -= skip_next2 - pqi; 2234 pqi = (pqi & ~pqmask2) | (skip_next2 & pqmask2); 2235 } 2236 if (range > 0 && TAILQ_FIRST(&pq2[pqi].pl)) { 2237 spin_lock(&pq2[pqi].spin); 2238 TAILQ_FOREACH(m, &pq2[pqi].pl, pageq) { 2239 if (spin_trylock(&m->spin) == 0) 2240 continue; 2241 KKASSERT(m->queue == basequeue2 + pqi); 2242 2243 /* 2244 * If we had to wander too far, set 2245 * *lastp to skip past empty queues. 2246 */ 2247 if (count2 >= 8) 2248 *lastp2 = pqi & PQ_L2_MASK; 2249 return(m); 2250 } 2251 spin_unlock(&pq2[pqi].spin); 2252 } 2253 --range; 2254 ++count2; 2255 pqi = (pqi & ~pqmask2) | ((pqi + 1) & pqmask2); 2256 } 2257 skip_start2 = pqi & ~pqmask2; 2258 skip_next2 = (pqi | pqmask2) + 1; 2259 } while (pqmask1 != PQ_L2_MASK && pqmask2 != PQ_L2_MASK); 2260 2261 return(m); 2262 } 2263 2264 /* 2265 * Returns a vm_page candidate for allocation. The page is not busied so 2266 * it can move around. The caller must busy the page (and typically 2267 * deactivate it if it cannot be busied!) 2268 * 2269 * Returns a spinlocked vm_page that has been removed from its queue. 2270 * (note that _vm_page_list_find() does not remove the page from its 2271 * queue). 2272 */ 2273 vm_page_t 2274 vm_page_list_find(int basequeue, int index) 2275 { 2276 vm_page_t m; 2277 2278 m = _vm_page_list_find(basequeue, index); 2279 if (m) 2280 _vm_page_rem_queue_spinlocked(m); 2281 return m; 2282 } 2283 2284 /* 2285 * Find a page on the cache queue with color optimization, remove it 2286 * from the queue, and busy it. The returned page will not be spinlocked. 2287 * 2288 * A candidate failure will be deactivated. Candidates can fail due to 2289 * being busied by someone else, in which case they will be deactivated. 2290 * 2291 * This routine may not block. 2292 * 2293 */ 2294 static vm_page_t 2295 vm_page_select_cache(u_short pg_color) 2296 { 2297 vm_page_t m; 2298 2299 for (;;) { 2300 m = _vm_page_list_find(PQ_CACHE, pg_color); 2301 if (m == NULL) 2302 break; 2303 /* 2304 * (m) has been spinlocked 2305 */ 2306 _vm_page_rem_queue_spinlocked(m); 2307 if (vm_page_busy_try(m, TRUE)) { 2308 _vm_page_deactivate_locked(m, 0); 2309 vm_page_spin_unlock(m); 2310 } else { 2311 /* 2312 * We successfully busied the page 2313 */ 2314 if ((m->flags & PG_NEED_COMMIT) == 0 && 2315 m->hold_count == 0 && 2316 m->wire_count == 0 && 2317 (m->dirty & m->valid) == 0) { 2318 vm_page_spin_unlock(m); 2319 KKASSERT((m->flags & PG_UNQUEUED) == 0); 2320 pagedaemon_wakeup(); 2321 return(m); 2322 } 2323 2324 /* 2325 * The page cannot be recycled, deactivate it. 2326 */ 2327 _vm_page_deactivate_locked(m, 0); 2328 if (_vm_page_wakeup(m)) { 2329 vm_page_spin_unlock(m); 2330 wakeup(m); 2331 } else { 2332 vm_page_spin_unlock(m); 2333 } 2334 } 2335 } 2336 return (m); 2337 } 2338 2339 /* 2340 * Find a free page. We attempt to inline the nominal case and fall back 2341 * to _vm_page_select_free() otherwise. A busied page is removed from 2342 * the queue and returned. 2343 * 2344 * This routine may not block. 2345 */ 2346 static __inline vm_page_t 2347 vm_page_select_free(u_short pg_color) 2348 { 2349 vm_page_t m; 2350 2351 for (;;) { 2352 m = _vm_page_list_find(PQ_FREE, pg_color); 2353 if (m == NULL) 2354 break; 2355 _vm_page_rem_queue_spinlocked(m); 2356 if (vm_page_busy_try(m, TRUE)) { 2357 /* 2358 * Various mechanisms such as a pmap_collect can 2359 * result in a busy page on the free queue. We 2360 * have to move the page out of the way so we can 2361 * retry the allocation. If the other thread is not 2362 * allocating the page then m->valid will remain 0 and 2363 * the pageout daemon will free the page later on. 2364 * 2365 * Since we could not busy the page, however, we 2366 * cannot make assumptions as to whether the page 2367 * will be allocated by the other thread or not, 2368 * so all we can do is deactivate it to move it out 2369 * of the way. In particular, if the other thread 2370 * wires the page it may wind up on the inactive 2371 * queue and the pageout daemon will have to deal 2372 * with that case too. 2373 */ 2374 _vm_page_deactivate_locked(m, 0); 2375 vm_page_spin_unlock(m); 2376 } else { 2377 /* 2378 * Theoretically if we are able to busy the page 2379 * atomic with the queue removal (using the vm_page 2380 * lock) nobody else should have been able to mess 2381 * with the page before us. 2382 * 2383 * Assert the page state. Note that even though 2384 * wiring doesn't adjust queues, a page on the free 2385 * queue should never be wired at this point. 2386 */ 2387 KKASSERT((m->flags & (PG_UNQUEUED | 2388 PG_NEED_COMMIT)) == 0); 2389 KASSERT(m->hold_count == 0, 2390 ("m->hold_count is not zero " 2391 "pg %p q=%d flags=%08x hold=%d wire=%d", 2392 m, m->queue, m->flags, 2393 m->hold_count, m->wire_count)); 2394 KKASSERT(m->wire_count == 0); 2395 vm_page_spin_unlock(m); 2396 pagedaemon_wakeup(); 2397 2398 /* return busied and removed page */ 2399 return(m); 2400 } 2401 } 2402 return(m); 2403 } 2404 2405 static __inline vm_page_t 2406 vm_page_select_free_or_cache(u_short pg_color, int *fromcachep) 2407 { 2408 vm_page_t m; 2409 2410 *fromcachep = 0; 2411 for (;;) { 2412 m = _vm_page_list_find2(PQ_FREE, PQ_CACHE, pg_color); 2413 if (m == NULL) 2414 break; 2415 if (vm_page_busy_try(m, TRUE)) { 2416 _vm_page_rem_queue_spinlocked(m); 2417 _vm_page_deactivate_locked(m, 0); 2418 vm_page_spin_unlock(m); 2419 } else if (m->queue - m->pc == PQ_FREE) { 2420 /* 2421 * We successfully busied the page, PQ_FREE case 2422 */ 2423 _vm_page_rem_queue_spinlocked(m); 2424 KKASSERT((m->flags & (PG_UNQUEUED | 2425 PG_NEED_COMMIT)) == 0); 2426 KASSERT(m->hold_count == 0, 2427 ("m->hold_count is not zero " 2428 "pg %p q=%d flags=%08x hold=%d wire=%d", 2429 m, m->queue, m->flags, 2430 m->hold_count, m->wire_count)); 2431 KKASSERT(m->wire_count == 0); 2432 vm_page_spin_unlock(m); 2433 pagedaemon_wakeup(); 2434 2435 /* return busied and removed page */ 2436 return(m); 2437 } else { 2438 /* 2439 * We successfully busied the page, PQ_CACHE case 2440 */ 2441 _vm_page_rem_queue_spinlocked(m); 2442 if ((m->flags & PG_NEED_COMMIT) == 0 && 2443 m->hold_count == 0 && 2444 m->wire_count == 0 && 2445 (m->dirty & m->valid) == 0) { 2446 vm_page_spin_unlock(m); 2447 KKASSERT((m->flags & PG_UNQUEUED) == 0); 2448 pagedaemon_wakeup(); 2449 *fromcachep = 1; 2450 return(m); 2451 } 2452 2453 /* 2454 * The page cannot be recycled, deactivate it. 2455 */ 2456 _vm_page_deactivate_locked(m, 0); 2457 if (_vm_page_wakeup(m)) { 2458 vm_page_spin_unlock(m); 2459 wakeup(m); 2460 } else { 2461 vm_page_spin_unlock(m); 2462 } 2463 } 2464 } 2465 return(m); 2466 } 2467 2468 /* 2469 * vm_page_alloc() 2470 * 2471 * Allocate and return a memory cell associated with this VM object/offset 2472 * pair. If object is NULL an unassociated page will be allocated. 2473 * 2474 * The returned page will be busied and removed from its queues. This 2475 * routine can block and may return NULL if a race occurs and the page 2476 * is found to already exist at the specified (object, pindex). 2477 * 2478 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain 2479 * VM_ALLOC_QUICK like normal but cannot use cache 2480 * VM_ALLOC_SYSTEM greater free drain 2481 * VM_ALLOC_INTERRUPT allow free list to be completely drained 2482 * VM_ALLOC_ZERO advisory request for pre-zero'd page only 2483 * VM_ALLOC_FORCE_ZERO advisory request for pre-zero'd page only 2484 * VM_ALLOC_NULL_OK ok to return NULL on insertion collision 2485 * (see vm_page_grab()) 2486 * VM_ALLOC_USE_GD ok to use per-gd cache 2487 * 2488 * VM_ALLOC_CPU(n) allocate using specified cpu localization 2489 * 2490 * The object must be held if not NULL 2491 * This routine may not block 2492 * 2493 * Additional special handling is required when called from an interrupt 2494 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache 2495 * in this case. 2496 */ 2497 vm_page_t 2498 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) 2499 { 2500 globaldata_t gd; 2501 vm_object_t obj; 2502 vm_page_t m; 2503 u_short pg_color; 2504 int cpuid_local; 2505 int fromcache; 2506 2507 #if 0 2508 /* 2509 * Special per-cpu free VM page cache. The pages are pre-busied 2510 * and pre-zerod for us. 2511 */ 2512 if (gd->gd_vmpg_count && (page_req & VM_ALLOC_USE_GD)) { 2513 crit_enter_gd(gd); 2514 if (gd->gd_vmpg_count) { 2515 m = gd->gd_vmpg_array[--gd->gd_vmpg_count]; 2516 crit_exit_gd(gd); 2517 goto done; 2518 } 2519 crit_exit_gd(gd); 2520 } 2521 #endif 2522 m = NULL; 2523 2524 /* 2525 * CPU LOCALIZATION 2526 * 2527 * CPU localization algorithm. Break the page queues up by physical 2528 * id and core id (note that two cpu threads will have the same core 2529 * id, and core_id != gd_cpuid). 2530 * 2531 * This is nowhere near perfect, for example the last pindex in a 2532 * subgroup will overflow into the next cpu or package. But this 2533 * should get us good page reuse locality in heavy mixed loads. 2534 * 2535 * (may be executed before the APs are started, so other GDs might 2536 * not exist!) 2537 */ 2538 if (page_req & VM_ALLOC_CPU_SPEC) 2539 cpuid_local = VM_ALLOC_GETCPU(page_req); 2540 else 2541 cpuid_local = mycpu->gd_cpuid; 2542 2543 pg_color = vm_get_pg_color(cpuid_local, object, pindex); 2544 2545 KKASSERT(page_req & 2546 (VM_ALLOC_NORMAL|VM_ALLOC_QUICK| 2547 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 2548 2549 /* 2550 * Certain system threads (pageout daemon, buf_daemon's) are 2551 * allowed to eat deeper into the free page list. 2552 */ 2553 if (curthread->td_flags & TDF_SYSTHREAD) 2554 page_req |= VM_ALLOC_SYSTEM; 2555 2556 /* 2557 * Impose various limitations. Note that the v_free_reserved test 2558 * must match the opposite of vm_page_count_target() to avoid 2559 * livelocks, be careful. 2560 */ 2561 loop: 2562 gd = mycpu; 2563 if (gd->gd_vmstats.v_free_count >= gd->gd_vmstats.v_free_reserved || 2564 ((page_req & VM_ALLOC_INTERRUPT) && 2565 gd->gd_vmstats.v_free_count > 0) || 2566 ((page_req & VM_ALLOC_SYSTEM) && 2567 gd->gd_vmstats.v_cache_count == 0 && 2568 gd->gd_vmstats.v_free_count > 2569 gd->gd_vmstats.v_interrupt_free_min) 2570 ) { 2571 /* 2572 * The free queue has sufficient free pages to take one out. 2573 * 2574 * However, if the free queue is strained the scan may widen 2575 * to the entire queue and cause a great deal of SMP 2576 * contention, so we use a double-queue-scan if we can 2577 * to avoid this. 2578 */ 2579 if (page_req & VM_ALLOC_NORMAL) { 2580 m = vm_page_select_free_or_cache(pg_color, &fromcache); 2581 if (m && fromcache) 2582 goto found_cache; 2583 } else { 2584 m = vm_page_select_free(pg_color); 2585 } 2586 } else if (page_req & VM_ALLOC_NORMAL) { 2587 /* 2588 * Allocatable from the cache (non-interrupt only). On 2589 * success, we must free the page and try again, thus 2590 * ensuring that vmstats.v_*_free_min counters are replenished. 2591 */ 2592 #ifdef INVARIANTS 2593 if (curthread->td_preempted) { 2594 kprintf("vm_page_alloc(): warning, attempt to allocate" 2595 " cache page from preempting interrupt\n"); 2596 m = NULL; 2597 } else { 2598 m = vm_page_select_cache(pg_color); 2599 } 2600 #else 2601 m = vm_page_select_cache(pg_color); 2602 #endif 2603 /* 2604 * On success move the page into the free queue and loop. 2605 * 2606 * Only do this if we can safely acquire the vm_object lock, 2607 * because this is effectively a random page and the caller 2608 * might be holding the lock shared, we don't want to 2609 * deadlock. 2610 */ 2611 if (m != NULL) { 2612 found_cache: 2613 KASSERT(m->dirty == 0, 2614 ("Found dirty cache page %p", m)); 2615 if ((obj = m->object) != NULL) { 2616 if (vm_object_hold_try(obj)) { 2617 vm_page_protect(m, VM_PROT_NONE); 2618 vm_page_free(m); 2619 /* m->object NULL here */ 2620 vm_object_drop(obj); 2621 } else { 2622 vm_page_deactivate(m); 2623 vm_page_wakeup(m); 2624 } 2625 } else { 2626 vm_page_protect(m, VM_PROT_NONE); 2627 vm_page_free(m); 2628 } 2629 goto loop; 2630 } 2631 2632 /* 2633 * On failure return NULL 2634 */ 2635 atomic_add_int(&vm_pageout_deficit, 1); 2636 pagedaemon_wakeup(); 2637 return (NULL); 2638 } else { 2639 /* 2640 * No pages available, wakeup the pageout daemon and give up. 2641 */ 2642 atomic_add_int(&vm_pageout_deficit, 1); 2643 pagedaemon_wakeup(); 2644 return (NULL); 2645 } 2646 2647 /* 2648 * v_free_count can race so loop if we don't find the expected 2649 * page. 2650 */ 2651 if (m == NULL) { 2652 vmstats_rollup(); 2653 goto loop; 2654 } 2655 2656 /* 2657 * Good page found. The page has already been busied for us and 2658 * removed from its queues. 2659 */ 2660 KASSERT(m->dirty == 0, 2661 ("vm_page_alloc: free/cache page %p was dirty", m)); 2662 KKASSERT(m->queue == PQ_NONE); 2663 2664 #if 0 2665 done: 2666 #endif 2667 /* 2668 * Initialize the structure, inheriting some flags but clearing 2669 * all the rest. The page has already been busied for us. 2670 */ 2671 vm_page_flag_clear(m, ~PG_KEEP_NEWPAGE_MASK); 2672 2673 KKASSERT(m->wire_count == 0); 2674 KKASSERT((m->busy_count & PBUSY_MASK) == 0); 2675 m->act_count = 0; 2676 m->valid = 0; 2677 2678 /* 2679 * Caller must be holding the object lock (asserted by 2680 * vm_page_insert()). 2681 * 2682 * NOTE: Inserting a page here does not insert it into any pmaps 2683 * (which could cause us to block allocating memory). 2684 * 2685 * NOTE: If no object an unassociated page is allocated, m->pindex 2686 * can be used by the caller for any purpose. 2687 */ 2688 if (object) { 2689 if (vm_page_insert(m, object, pindex) == FALSE) { 2690 vm_page_free(m); 2691 if ((page_req & VM_ALLOC_NULL_OK) == 0) 2692 panic("PAGE RACE %p[%ld]/%p", 2693 object, (long)pindex, m); 2694 m = NULL; 2695 } 2696 } else { 2697 m->pindex = pindex; 2698 } 2699 2700 /* 2701 * Don't wakeup too often - wakeup the pageout daemon when 2702 * we would be nearly out of memory. 2703 */ 2704 pagedaemon_wakeup(); 2705 2706 /* 2707 * A BUSY page is returned. 2708 */ 2709 return (m); 2710 } 2711 2712 /* 2713 * Returns number of pages available in our DMA memory reserve 2714 * (adjusted with vm.dma_reserved=<value>m in /boot/loader.conf) 2715 */ 2716 vm_size_t 2717 vm_contig_avail_pages(void) 2718 { 2719 alist_blk_t blk; 2720 alist_blk_t count; 2721 alist_blk_t bfree; 2722 spin_lock(&vm_contig_spin); 2723 bfree = alist_free_info(&vm_contig_alist, &blk, &count); 2724 spin_unlock(&vm_contig_spin); 2725 2726 return bfree; 2727 } 2728 2729 /* 2730 * Attempt to allocate contiguous physical memory with the specified 2731 * requirements. 2732 */ 2733 vm_page_t 2734 vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high, 2735 unsigned long alignment, unsigned long boundary, 2736 unsigned long size, vm_memattr_t memattr) 2737 { 2738 alist_blk_t blk; 2739 vm_page_t m; 2740 vm_pindex_t i; 2741 #if 0 2742 static vm_pindex_t contig_rover; 2743 #endif 2744 2745 alignment >>= PAGE_SHIFT; 2746 if (alignment == 0) 2747 alignment = 1; 2748 boundary >>= PAGE_SHIFT; 2749 if (boundary == 0) 2750 boundary = 1; 2751 size = (size + PAGE_MASK) >> PAGE_SHIFT; 2752 2753 #if 0 2754 /* 2755 * Disabled temporarily until we find a solution for DRM (a flag 2756 * to always use the free space reserve, for performance). 2757 */ 2758 if (high == BUS_SPACE_MAXADDR && alignment <= PAGE_SIZE && 2759 boundary <= PAGE_SIZE && size == 1 && 2760 memattr == VM_MEMATTR_DEFAULT) { 2761 /* 2762 * Any page will work, use vm_page_alloc() 2763 * (e.g. when used from kmem_alloc_attr()) 2764 */ 2765 m = vm_page_alloc(NULL, (contig_rover++) & 0x7FFFFFFF, 2766 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | 2767 VM_ALLOC_INTERRUPT); 2768 m->valid = VM_PAGE_BITS_ALL; 2769 vm_page_wire(m); 2770 vm_page_wakeup(m); 2771 } else 2772 #endif 2773 { 2774 /* 2775 * Use the low-memory dma reserve 2776 */ 2777 spin_lock(&vm_contig_spin); 2778 blk = alist_alloc(&vm_contig_alist, 0, size); 2779 if (blk == ALIST_BLOCK_NONE) { 2780 spin_unlock(&vm_contig_spin); 2781 if (bootverbose) { 2782 kprintf("vm_page_alloc_contig: %ldk nospace\n", 2783 (size << PAGE_SHIFT) / 1024); 2784 print_backtrace(5); 2785 } 2786 return(NULL); 2787 } 2788 if (high && ((vm_paddr_t)(blk + size) << PAGE_SHIFT) > high) { 2789 alist_free(&vm_contig_alist, blk, size); 2790 spin_unlock(&vm_contig_spin); 2791 if (bootverbose) { 2792 kprintf("vm_page_alloc_contig: %ldk high " 2793 "%016jx failed\n", 2794 (size << PAGE_SHIFT) / 1024, 2795 (intmax_t)high); 2796 } 2797 return(NULL); 2798 } 2799 spin_unlock(&vm_contig_spin); 2800 2801 /* 2802 * Base vm_page_t of range 2803 */ 2804 m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT); 2805 } 2806 if (vm_contig_verbose) { 2807 kprintf("vm_page_alloc_contig: %016jx/%ldk " 2808 "(%016jx-%016jx al=%lu bo=%lu pgs=%lu attr=%d\n", 2809 (intmax_t)m->phys_addr, 2810 (size << PAGE_SHIFT) / 1024, 2811 low, high, alignment, boundary, size, memattr); 2812 } 2813 if (memattr != VM_MEMATTR_DEFAULT) { 2814 for (i = 0; i < size; ++i) { 2815 KKASSERT(m[i].flags & PG_FICTITIOUS); 2816 pmap_page_set_memattr(&m[i], memattr); 2817 } 2818 } 2819 return m; 2820 } 2821 2822 /* 2823 * Free contiguously allocated pages. The pages will be wired but not busy. 2824 * When freeing to the alist we leave them wired and not busy. 2825 */ 2826 void 2827 vm_page_free_contig(vm_page_t m, unsigned long size) 2828 { 2829 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); 2830 vm_pindex_t start = pa >> PAGE_SHIFT; 2831 vm_pindex_t pages = (size + PAGE_MASK) >> PAGE_SHIFT; 2832 2833 if (vm_contig_verbose) { 2834 kprintf("vm_page_free_contig: %016jx/%ldk\n", 2835 (intmax_t)pa, size / 1024); 2836 } 2837 if (pa < vm_low_phys_reserved) { 2838 /* 2839 * Just assert check the first page for convenience. 2840 */ 2841 KKASSERT(m->wire_count == 1); 2842 KKASSERT(m->flags & PG_FICTITIOUS); 2843 KKASSERT(pa + size <= vm_low_phys_reserved); 2844 spin_lock(&vm_contig_spin); 2845 alist_free(&vm_contig_alist, start, pages); 2846 spin_unlock(&vm_contig_spin); 2847 } else { 2848 while (pages) { 2849 /* XXX FUTURE, maybe (pair with vm_pg_contig_alloc()) */ 2850 /*vm_page_flag_clear(m, PG_FICTITIOUS | PG_UNQUEUED);*/ 2851 vm_page_busy_wait(m, FALSE, "cpgfr"); 2852 vm_page_unwire(m, 0); 2853 vm_page_free(m); 2854 --pages; 2855 ++m; 2856 } 2857 2858 } 2859 } 2860 2861 2862 /* 2863 * Wait for sufficient free memory for nominal heavy memory use kernel 2864 * operations. 2865 * 2866 * WARNING! Be sure never to call this in any vm_pageout code path, which 2867 * will trivially deadlock the system. 2868 */ 2869 void 2870 vm_wait_nominal(void) 2871 { 2872 while (vm_page_count_min(0)) 2873 vm_wait(0); 2874 } 2875 2876 /* 2877 * Test if vm_wait_nominal() would block. 2878 */ 2879 int 2880 vm_test_nominal(void) 2881 { 2882 if (vm_page_count_min(0)) 2883 return(1); 2884 return(0); 2885 } 2886 2887 /* 2888 * Block until free pages are available for allocation, called in various 2889 * places before memory allocations. 2890 * 2891 * The caller may loop if vm_page_count_min() == FALSE so we cannot be 2892 * more generous then that. 2893 */ 2894 void 2895 vm_wait(int timo) 2896 { 2897 /* 2898 * never wait forever 2899 */ 2900 if (timo == 0) 2901 timo = hz; 2902 lwkt_gettoken(&vm_token); 2903 2904 if (curthread == pagethread || 2905 curthread == emergpager) { 2906 /* 2907 * The pageout daemon itself needs pages, this is bad. 2908 */ 2909 if (vm_page_count_min(0)) { 2910 vm_pageout_pages_needed = 1; 2911 tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo); 2912 } 2913 } else { 2914 /* 2915 * Wakeup the pageout daemon if necessary and wait. 2916 * 2917 * Do not wait indefinitely for the target to be reached, 2918 * as load might prevent it from being reached any time soon. 2919 * But wait a little to try to slow down page allocations 2920 * and to give more important threads (the pagedaemon) 2921 * allocation priority. 2922 */ 2923 if (vm_page_count_target()) { 2924 if (vm_pages_needed <= 1) { 2925 ++vm_pages_needed; 2926 wakeup(&vm_pages_needed); 2927 } 2928 ++vm_pages_waiting; /* SMP race ok */ 2929 tsleep(&vmstats.v_free_count, 0, "vmwait", timo); 2930 } 2931 } 2932 lwkt_reltoken(&vm_token); 2933 } 2934 2935 /* 2936 * Block until free pages are available for allocation 2937 * 2938 * Called only from vm_fault so that processes page faulting can be 2939 * easily tracked. 2940 */ 2941 void 2942 vm_wait_pfault(void) 2943 { 2944 /* 2945 * Wakeup the pageout daemon if necessary and wait. 2946 * 2947 * Do not wait indefinitely for the target to be reached, 2948 * as load might prevent it from being reached any time soon. 2949 * But wait a little to try to slow down page allocations 2950 * and to give more important threads (the pagedaemon) 2951 * allocation priority. 2952 */ 2953 if (vm_page_count_min(0)) { 2954 lwkt_gettoken(&vm_token); 2955 while (vm_page_count_severe()) { 2956 if (vm_page_count_target()) { 2957 thread_t td; 2958 2959 if (vm_pages_needed <= 1) { 2960 ++vm_pages_needed; 2961 wakeup(&vm_pages_needed); 2962 } 2963 ++vm_pages_waiting; /* SMP race ok */ 2964 tsleep(&vmstats.v_free_count, 0, "pfault", hz); 2965 2966 /* 2967 * Do not stay stuck in the loop if the system is trying 2968 * to kill the process. 2969 */ 2970 td = curthread; 2971 if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL)) 2972 break; 2973 } 2974 } 2975 lwkt_reltoken(&vm_token); 2976 } 2977 } 2978 2979 /* 2980 * Put the specified page on the active list (if appropriate). Ensure 2981 * that act_count is at least ACT_INIT but do not otherwise mess with it. 2982 * 2983 * The caller should be holding the page busied ? XXX 2984 * This routine may not block. 2985 * 2986 * It is ok if the page is wired (so buffer cache operations don't have 2987 * to mess with the page queues). 2988 */ 2989 void 2990 vm_page_activate(vm_page_t m) 2991 { 2992 u_short oqueue; 2993 2994 /* 2995 * If already active or inappropriate, just set act_count and 2996 * return. We don't have to spin-lock the page. 2997 */ 2998 if (m->queue - m->pc == PQ_ACTIVE || 2999 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED))) { 3000 if (m->act_count < ACT_INIT) 3001 m->act_count = ACT_INIT; 3002 return; 3003 } 3004 3005 vm_page_spin_lock(m); 3006 if (m->queue - m->pc != PQ_ACTIVE && 3007 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED)) == 0) { 3008 _vm_page_queue_spin_lock(m); 3009 oqueue = _vm_page_rem_queue_spinlocked(m); 3010 /* page is left spinlocked, queue is unlocked */ 3011 3012 if (oqueue == PQ_CACHE) 3013 mycpu->gd_cnt.v_reactivated++; 3014 if (m->act_count < ACT_INIT) 3015 m->act_count = ACT_INIT; 3016 _vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0); 3017 _vm_page_and_queue_spin_unlock(m); 3018 if (oqueue == PQ_CACHE || oqueue == PQ_FREE) 3019 pagedaemon_wakeup(); 3020 } else { 3021 if (m->act_count < ACT_INIT) 3022 m->act_count = ACT_INIT; 3023 vm_page_spin_unlock(m); 3024 } 3025 } 3026 3027 void 3028 vm_page_soft_activate(vm_page_t m) 3029 { 3030 if (m->queue - m->pc == PQ_ACTIVE || 3031 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED))) { 3032 if (m->act_count < ACT_INIT) 3033 m->act_count = ACT_INIT; 3034 } else { 3035 vm_page_activate(m); 3036 } 3037 } 3038 3039 /* 3040 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 3041 * routine is called when a page has been added to the cache or free 3042 * queues. 3043 * 3044 * This routine may not block. 3045 */ 3046 static __inline void 3047 vm_page_free_wakeup(void) 3048 { 3049 globaldata_t gd = mycpu; 3050 3051 /* 3052 * If the pageout daemon itself needs pages, then tell it that 3053 * there are some free. 3054 */ 3055 if (vm_pageout_pages_needed && 3056 gd->gd_vmstats.v_cache_count + gd->gd_vmstats.v_free_count >= 3057 gd->gd_vmstats.v_pageout_free_min 3058 ) { 3059 vm_pageout_pages_needed = 0; 3060 wakeup(&vm_pageout_pages_needed); 3061 } 3062 3063 /* 3064 * Wakeup processes that are waiting on memory. 3065 * 3066 * Generally speaking we want to wakeup stuck processes as soon as 3067 * possible. !vm_page_count_min(0) is the absolute minimum point 3068 * where we can do this. Wait a bit longer to reduce degenerate 3069 * re-blocking (vm_page_free_hysteresis). The target check is just 3070 * to make sure the min-check w/hysteresis does not exceed the 3071 * normal target. 3072 */ 3073 if (vm_pages_waiting) { 3074 if (!vm_page_count_min(vm_page_free_hysteresis) || 3075 !vm_page_count_target()) { 3076 vm_pages_waiting = 0; 3077 wakeup(&vmstats.v_free_count); 3078 ++mycpu->gd_cnt.v_ppwakeups; 3079 } 3080 #if 0 3081 if (!vm_page_count_target()) { 3082 /* 3083 * Plenty of pages are free, wakeup everyone. 3084 */ 3085 vm_pages_waiting = 0; 3086 wakeup(&vmstats.v_free_count); 3087 ++mycpu->gd_cnt.v_ppwakeups; 3088 } else if (!vm_page_count_min(0)) { 3089 /* 3090 * Some pages are free, wakeup someone. 3091 */ 3092 int wcount = vm_pages_waiting; 3093 if (wcount > 0) 3094 --wcount; 3095 vm_pages_waiting = wcount; 3096 wakeup_one(&vmstats.v_free_count); 3097 ++mycpu->gd_cnt.v_ppwakeups; 3098 } 3099 #endif 3100 } 3101 } 3102 3103 /* 3104 * Returns the given page to the PQ_FREE or PQ_HOLD list and disassociates 3105 * it from its VM object. 3106 * 3107 * The vm_page must be BUSY on entry. BUSY will be released on 3108 * return (the page will have been freed). 3109 */ 3110 void 3111 vm_page_free_toq(vm_page_t m) 3112 { 3113 /* 3114 * The page must not be mapped when freed, but we may have to call 3115 * pmap_mapped_sync() to validate this. 3116 */ 3117 mycpu->gd_cnt.v_tfree++; 3118 if (m->flags & (PG_MAPPED | PG_WRITEABLE)) 3119 pmap_mapped_sync(m); 3120 KKASSERT((m->flags & PG_MAPPED) == 0); 3121 KKASSERT(m->busy_count & PBUSY_LOCKED); 3122 3123 if ((m->busy_count & PBUSY_MASK) || ((m->queue - m->pc) == PQ_FREE)) { 3124 kprintf("vm_page_free: pindex(%lu), busy %08x, " 3125 "hold(%d)\n", 3126 (u_long)m->pindex, m->busy_count, m->hold_count); 3127 if ((m->queue - m->pc) == PQ_FREE) 3128 panic("vm_page_free: freeing free page"); 3129 else 3130 panic("vm_page_free: freeing busy page"); 3131 } 3132 3133 /* 3134 * Remove from object, spinlock the page and its queues and 3135 * remove from any queue. No queue spinlock will be held 3136 * after this section (because the page was removed from any 3137 * queue). 3138 */ 3139 vm_page_remove(m); 3140 3141 /* 3142 * No further management of fictitious pages occurs beyond object 3143 * and queue removal. 3144 */ 3145 if ((m->flags & PG_FICTITIOUS) != 0) { 3146 KKASSERT(m->queue == PQ_NONE); 3147 vm_page_wakeup(m); 3148 return; 3149 } 3150 vm_page_and_queue_spin_lock(m); 3151 _vm_page_rem_queue_spinlocked(m); 3152 3153 m->valid = 0; 3154 vm_page_undirty(m); 3155 3156 if (m->wire_count != 0) { 3157 if (m->wire_count > 1) { 3158 panic( 3159 "vm_page_free: invalid wire count (%d), pindex: 0x%lx", 3160 m->wire_count, (long)m->pindex); 3161 } 3162 panic("vm_page_free: freeing wired page"); 3163 } 3164 3165 if (!MD_PAGE_FREEABLE(m)) 3166 panic("vm_page_free: page %p is still mapped!", m); 3167 3168 /* 3169 * Clear the PG_NEED_COMMIT and the PG_UNQUEUED flags. The 3170 * page returns to normal operation and will be placed in 3171 * the PQ_HOLD or PQ_FREE queue. 3172 */ 3173 vm_page_flag_clear(m, PG_NEED_COMMIT | PG_UNQUEUED); 3174 3175 if (m->hold_count != 0) { 3176 _vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0); 3177 } else { 3178 _vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 1); 3179 } 3180 3181 /* 3182 * This sequence allows us to clear BUSY while still holding 3183 * its spin lock, which reduces contention vs allocators. We 3184 * must not leave the queue locked or _vm_page_wakeup() may 3185 * deadlock. 3186 */ 3187 _vm_page_queue_spin_unlock(m); 3188 if (_vm_page_wakeup(m)) { 3189 vm_page_spin_unlock(m); 3190 wakeup(m); 3191 } else { 3192 vm_page_spin_unlock(m); 3193 } 3194 vm_page_free_wakeup(); 3195 } 3196 3197 /* 3198 * Mark this page as wired down by yet another map. We do not adjust the 3199 * queue the page is on, it will be checked for wiring as-needed. 3200 * 3201 * This function has no effect on fictitious pages. 3202 * 3203 * Caller must be holding the page busy. 3204 */ 3205 void 3206 vm_page_wire(vm_page_t m) 3207 { 3208 KKASSERT(m->busy_count & PBUSY_LOCKED); 3209 if ((m->flags & PG_FICTITIOUS) == 0) { 3210 if (atomic_fetchadd_int(&m->wire_count, 1) == 0) { 3211 atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count, 1); 3212 } 3213 KASSERT(m->wire_count != 0, 3214 ("vm_page_wire: wire_count overflow m=%p", m)); 3215 } 3216 } 3217 3218 /* 3219 * Release one wiring of this page, potentially enabling it to be paged again. 3220 * 3221 * Note that wired pages are no longer unconditionally removed from the 3222 * paging queues, so the page may already be on a queue. Move the page 3223 * to the desired queue if necessary. 3224 * 3225 * Many pages placed on the inactive queue should actually go 3226 * into the cache, but it is difficult to figure out which. What 3227 * we do instead, if the inactive target is well met, is to put 3228 * clean pages at the head of the inactive queue instead of the tail. 3229 * This will cause them to be moved to the cache more quickly and 3230 * if not actively re-referenced, freed more quickly. If we just 3231 * stick these pages at the end of the inactive queue, heavy filesystem 3232 * meta-data accesses can cause an unnecessary paging load on memory bound 3233 * processes. This optimization causes one-time-use metadata to be 3234 * reused more quickly. 3235 * 3236 * Pages marked PG_NEED_COMMIT are always activated and never placed on 3237 * the inactive queue. This helps the pageout daemon determine memory 3238 * pressure and act on out-of-memory situations more quickly. 3239 * 3240 * BUT, if we are in a low-memory situation we have no choice but to 3241 * put clean pages on the cache queue. 3242 * 3243 * A number of routines use vm_page_unwire() to guarantee that the page 3244 * will go into either the inactive or active queues, and will NEVER 3245 * be placed in the cache - for example, just after dirtying a page. 3246 * dirty pages in the cache are not allowed. 3247 * 3248 * PG_FICTITIOUS or PG_UNQUEUED pages are never moved to any queue, and 3249 * the wire_count will not be adjusted in any way for a PG_FICTITIOUS 3250 * page. 3251 * 3252 * This routine may not block. 3253 */ 3254 void 3255 vm_page_unwire(vm_page_t m, int activate) 3256 { 3257 KKASSERT(m->busy_count & PBUSY_LOCKED); 3258 if (m->flags & PG_FICTITIOUS) { 3259 /* do nothing */ 3260 } else if ((int)m->wire_count <= 0) { 3261 panic("vm_page_unwire: invalid wire count: %d", m->wire_count); 3262 } else { 3263 if (atomic_fetchadd_int(&m->wire_count, -1) == 1) { 3264 atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count,-1); 3265 if (m->flags & PG_UNQUEUED) { 3266 ; 3267 } else if (activate || (m->flags & PG_NEED_COMMIT)) { 3268 vm_page_activate(m); 3269 } else { 3270 vm_page_deactivate(m); 3271 } 3272 } 3273 } 3274 } 3275 3276 /* 3277 * Move the specified page to the inactive queue. 3278 * 3279 * Normally athead is 0 resulting in LRU operation. athead is set 3280 * to 1 if we want this page to be 'as if it were placed in the cache', 3281 * except without unmapping it from the process address space. 3282 * 3283 * vm_page's spinlock must be held on entry and will remain held on return. 3284 * This routine may not block. The caller does not have to hold the page 3285 * busied but should have some sort of interlock on its validity. 3286 * 3287 * It is ok if the page is wired (so buffer cache operations don't have 3288 * to mess with the page queues). 3289 */ 3290 static void 3291 _vm_page_deactivate_locked(vm_page_t m, int athead) 3292 { 3293 u_short oqueue; 3294 3295 /* 3296 * Ignore if already inactive. 3297 */ 3298 if (m->queue - m->pc == PQ_INACTIVE || 3299 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED))) { 3300 return; 3301 } 3302 3303 _vm_page_queue_spin_lock(m); 3304 oqueue = _vm_page_rem_queue_spinlocked(m); 3305 3306 if ((m->flags & (PG_FICTITIOUS | PG_UNQUEUED)) == 0) { 3307 if (oqueue == PQ_CACHE) 3308 mycpu->gd_cnt.v_reactivated++; 3309 vm_page_flag_clear(m, PG_WINATCFLS); 3310 _vm_page_add_queue_spinlocked(m, PQ_INACTIVE + m->pc, athead); 3311 if (athead == 0) { 3312 atomic_add_long( 3313 &vm_page_queues[PQ_INACTIVE + m->pc].adds, 1); 3314 } 3315 } 3316 /* NOTE: PQ_NONE if condition not taken */ 3317 _vm_page_queue_spin_unlock(m); 3318 /* leaves vm_page spinlocked */ 3319 } 3320 3321 /* 3322 * Attempt to deactivate a page. 3323 * 3324 * No requirements. We can pre-filter before getting the spinlock. 3325 * 3326 * It is ok if the page is wired (so buffer cache operations don't have 3327 * to mess with the page queues). 3328 */ 3329 void 3330 vm_page_deactivate(vm_page_t m) 3331 { 3332 if (m->queue - m->pc != PQ_INACTIVE && 3333 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED)) == 0) { 3334 vm_page_spin_lock(m); 3335 _vm_page_deactivate_locked(m, 0); 3336 vm_page_spin_unlock(m); 3337 } 3338 } 3339 3340 void 3341 vm_page_deactivate_locked(vm_page_t m) 3342 { 3343 _vm_page_deactivate_locked(m, 0); 3344 } 3345 3346 /* 3347 * Attempt to move a busied page to PQ_CACHE, then unconditionally unbusy it. 3348 * 3349 * This function returns non-zero if it successfully moved the page to 3350 * PQ_CACHE. 3351 * 3352 * This function unconditionally unbusies the page on return. 3353 */ 3354 int 3355 vm_page_try_to_cache(vm_page_t m) 3356 { 3357 /* 3358 * Shortcut if we obviously cannot move the page, or if the 3359 * page is already on the cache queue, or it is ficitious. 3360 * 3361 * Never allow a wired page into the cache. 3362 */ 3363 if (m->dirty || m->hold_count || m->wire_count || 3364 m->queue - m->pc == PQ_CACHE || 3365 (m->flags & (PG_UNQUEUED | PG_NEED_COMMIT | PG_FICTITIOUS))) { 3366 vm_page_wakeup(m); 3367 return(0); 3368 } 3369 3370 /* 3371 * Page busied by us and no longer spinlocked. Dirty pages cannot 3372 * be moved to the cache, but can be deactivated. However, users 3373 * of this function want to move pages closer to the cache so we 3374 * only deactivate it if it is in PQ_ACTIVE. We do not re-deactivate. 3375 */ 3376 vm_page_test_dirty(m); 3377 if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 3378 if (m->queue - m->pc == PQ_ACTIVE) 3379 vm_page_deactivate(m); 3380 vm_page_wakeup(m); 3381 return(0); 3382 } 3383 vm_page_cache(m); 3384 return(1); 3385 } 3386 3387 /* 3388 * Attempt to free the page. If we cannot free it, we do nothing. 3389 * 1 is returned on success, 0 on failure. 3390 * 3391 * The page can be in any state, including already being on the free 3392 * queue. Check to see if it really can be freed. Note that we disallow 3393 * this ad-hoc operation if the page is flagged PG_UNQUEUED. 3394 * 3395 * Caller provides an unlocked/non-busied page. 3396 * No requirements. 3397 */ 3398 int 3399 vm_page_try_to_free(vm_page_t m) 3400 { 3401 if (vm_page_busy_try(m, TRUE)) 3402 return(0); 3403 3404 if (m->dirty || /* can't free if it is dirty */ 3405 m->hold_count || /* or held (XXX may be wrong) */ 3406 m->wire_count || /* or wired */ 3407 (m->flags & (PG_UNQUEUED | /* or unqueued */ 3408 PG_NEED_COMMIT | /* or needs a commit */ 3409 PG_FICTITIOUS)) || /* or is fictitious */ 3410 m->queue - m->pc == PQ_FREE || /* already on PQ_FREE */ 3411 m->queue - m->pc == PQ_HOLD) { /* already on PQ_HOLD */ 3412 vm_page_wakeup(m); 3413 return(0); 3414 } 3415 3416 /* 3417 * We can probably free the page. 3418 * 3419 * Page busied by us and no longer spinlocked. Dirty pages will 3420 * not be freed by this function. We have to re-test the 3421 * dirty bit after cleaning out the pmaps. 3422 */ 3423 vm_page_test_dirty(m); 3424 if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 3425 vm_page_wakeup(m); 3426 return(0); 3427 } 3428 vm_page_protect(m, VM_PROT_NONE); 3429 if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 3430 vm_page_wakeup(m); 3431 return(0); 3432 } 3433 vm_page_free(m); 3434 return(1); 3435 } 3436 3437 /* 3438 * vm_page_cache 3439 * 3440 * Put the specified page onto the page cache queue (if appropriate). 3441 * 3442 * The page must be busy, and this routine will release the busy and 3443 * possibly even free the page. 3444 */ 3445 void 3446 vm_page_cache(vm_page_t m) 3447 { 3448 /* 3449 * Not suitable for the cache 3450 */ 3451 if ((m->flags & (PG_UNQUEUED | PG_NEED_COMMIT | PG_FICTITIOUS)) || 3452 (m->busy_count & PBUSY_MASK) || 3453 m->wire_count || m->hold_count) { 3454 vm_page_wakeup(m); 3455 return; 3456 } 3457 3458 /* 3459 * Already in the cache (and thus not mapped) 3460 */ 3461 if ((m->queue - m->pc) == PQ_CACHE) { 3462 KKASSERT((m->flags & PG_MAPPED) == 0); 3463 vm_page_wakeup(m); 3464 return; 3465 } 3466 3467 #if 0 3468 /* 3469 * REMOVED - it is possible for dirty to get set at any time as 3470 * long as the page is still mapped and writeable. 3471 * 3472 * Caller is required to test m->dirty, but note that the act of 3473 * removing the page from its maps can cause it to become dirty 3474 * on an SMP system due to another cpu running in usermode. 3475 */ 3476 if (m->dirty) { 3477 panic("vm_page_cache: caching a dirty page, pindex: %ld", 3478 (long)m->pindex); 3479 } 3480 #endif 3481 3482 /* 3483 * Remove all pmaps and indicate that the page is not 3484 * writeable or mapped. Our vm_page_protect() call may 3485 * have blocked (especially w/ VM_PROT_NONE), so recheck 3486 * everything. 3487 */ 3488 if (m->flags & (PG_MAPPED | PG_WRITEABLE)) { 3489 vm_page_protect(m, VM_PROT_NONE); 3490 pmap_mapped_sync(m); 3491 } 3492 if ((m->flags & (PG_UNQUEUED | PG_MAPPED)) || 3493 (m->busy_count & PBUSY_MASK) || 3494 m->wire_count || m->hold_count) { 3495 vm_page_wakeup(m); 3496 } else if (m->dirty || (m->flags & PG_NEED_COMMIT)) { 3497 vm_page_deactivate(m); 3498 vm_page_wakeup(m); 3499 } else { 3500 _vm_page_and_queue_spin_lock(m); 3501 _vm_page_rem_queue_spinlocked(m); 3502 _vm_page_add_queue_spinlocked(m, PQ_CACHE + m->pc, 0); 3503 _vm_page_and_queue_spin_unlock(m); 3504 vm_page_wakeup(m); 3505 vm_page_free_wakeup(); 3506 } 3507 } 3508 3509 /* 3510 * vm_page_dontneed() 3511 * 3512 * Cache, deactivate, or do nothing as appropriate. This routine 3513 * is typically used by madvise() MADV_DONTNEED. 3514 * 3515 * Generally speaking we want to move the page into the cache so 3516 * it gets reused quickly. However, this can result in a silly syndrome 3517 * due to the page recycling too quickly. Small objects will not be 3518 * fully cached. On the otherhand, if we move the page to the inactive 3519 * queue we wind up with a problem whereby very large objects 3520 * unnecessarily blow away our inactive and cache queues. 3521 * 3522 * The solution is to move the pages based on a fixed weighting. We 3523 * either leave them alone, deactivate them, or move them to the cache, 3524 * where moving them to the cache has the highest weighting. 3525 * By forcing some pages into other queues we eventually force the 3526 * system to balance the queues, potentially recovering other unrelated 3527 * space from active. The idea is to not force this to happen too 3528 * often. 3529 * 3530 * The page must be busied. 3531 */ 3532 void 3533 vm_page_dontneed(vm_page_t m) 3534 { 3535 static int dnweight; 3536 int dnw; 3537 int head; 3538 3539 dnw = ++dnweight; 3540 3541 /* 3542 * occassionally leave the page alone 3543 */ 3544 if ((dnw & 0x01F0) == 0 || 3545 m->queue - m->pc == PQ_INACTIVE || 3546 m->queue - m->pc == PQ_CACHE 3547 ) { 3548 if (m->act_count >= ACT_INIT) 3549 --m->act_count; 3550 return; 3551 } 3552 3553 /* 3554 * If vm_page_dontneed() is inactivating a page, it must clear 3555 * the referenced flag; otherwise the pagedaemon will see references 3556 * on the page in the inactive queue and reactivate it. Until the 3557 * page can move to the cache queue, madvise's job is not done. 3558 */ 3559 vm_page_flag_clear(m, PG_REFERENCED); 3560 pmap_clear_reference(m); 3561 3562 if (m->dirty == 0) 3563 vm_page_test_dirty(m); 3564 3565 if (m->dirty || (dnw & 0x0070) == 0) { 3566 /* 3567 * Deactivate the page 3 times out of 32. 3568 */ 3569 head = 0; 3570 } else { 3571 /* 3572 * Cache the page 28 times out of every 32. Note that 3573 * the page is deactivated instead of cached, but placed 3574 * at the head of the queue instead of the tail. 3575 */ 3576 head = 1; 3577 } 3578 vm_page_spin_lock(m); 3579 _vm_page_deactivate_locked(m, head); 3580 vm_page_spin_unlock(m); 3581 } 3582 3583 /* 3584 * These routines manipulate the 'soft busy' count for a page. A soft busy 3585 * is almost like a hard BUSY except that it allows certain compatible 3586 * operations to occur on the page while it is busy. For example, a page 3587 * undergoing a write can still be mapped read-only. 3588 * 3589 * We also use soft-busy to quickly pmap_enter shared read-only pages 3590 * without having to hold the page locked. 3591 * 3592 * The soft-busy count can be > 1 in situations where multiple threads 3593 * are pmap_enter()ing the same page simultaneously, or when two buffer 3594 * cache buffers overlap the same page. 3595 * 3596 * The caller must hold the page BUSY when making these two calls. 3597 */ 3598 void 3599 vm_page_io_start(vm_page_t m) 3600 { 3601 uint32_t ocount; 3602 3603 ocount = atomic_fetchadd_int(&m->busy_count, 1); 3604 KKASSERT(ocount & PBUSY_LOCKED); 3605 } 3606 3607 void 3608 vm_page_io_finish(vm_page_t m) 3609 { 3610 uint32_t ocount; 3611 3612 ocount = atomic_fetchadd_int(&m->busy_count, -1); 3613 KKASSERT(ocount & PBUSY_MASK); 3614 #if 0 3615 if (((ocount - 1) & (PBUSY_LOCKED | PBUSY_MASK)) == 0) 3616 wakeup(m); 3617 #endif 3618 } 3619 3620 /* 3621 * Attempt to soft-busy a page. The page must not be PBUSY_LOCKED. 3622 * 3623 * We can't use fetchadd here because we might race a hard-busy and the 3624 * page freeing code asserts on a non-zero soft-busy count (even if only 3625 * temporary). 3626 * 3627 * Returns 0 on success, non-zero on failure. 3628 */ 3629 int 3630 vm_page_sbusy_try(vm_page_t m) 3631 { 3632 uint32_t ocount; 3633 3634 for (;;) { 3635 ocount = m->busy_count; 3636 cpu_ccfence(); 3637 if (ocount & PBUSY_LOCKED) 3638 return 1; 3639 if (atomic_cmpset_int(&m->busy_count, ocount, ocount + 1)) 3640 break; 3641 } 3642 return 0; 3643 #if 0 3644 if (m->busy_count & PBUSY_LOCKED) 3645 return 1; 3646 ocount = atomic_fetchadd_int(&m->busy_count, 1); 3647 if (ocount & PBUSY_LOCKED) { 3648 vm_page_sbusy_drop(m); 3649 return 1; 3650 } 3651 return 0; 3652 #endif 3653 } 3654 3655 /* 3656 * Indicate that a clean VM page requires a filesystem commit and cannot 3657 * be reused. Used by tmpfs. 3658 */ 3659 void 3660 vm_page_need_commit(vm_page_t m) 3661 { 3662 vm_page_flag_set(m, PG_NEED_COMMIT); 3663 vm_object_set_writeable_dirty(m->object); 3664 } 3665 3666 void 3667 vm_page_clear_commit(vm_page_t m) 3668 { 3669 vm_page_flag_clear(m, PG_NEED_COMMIT); 3670 } 3671 3672 /* 3673 * Grab a page, blocking if it is busy and allocating a page if necessary. 3674 * A busy page is returned or NULL. The page may or may not be valid and 3675 * might not be on a queue (the caller is responsible for the disposition of 3676 * the page). 3677 * 3678 * If VM_ALLOC_ZERO is specified and the grab must allocate a new page, the 3679 * page will be zero'd and marked valid. 3680 * 3681 * If VM_ALLOC_FORCE_ZERO is specified the page will be zero'd and marked 3682 * valid even if it already exists. 3683 * 3684 * If VM_ALLOC_RETRY is specified this routine will never return NULL. Also 3685 * note that VM_ALLOC_NORMAL must be specified if VM_ALLOC_RETRY is specified. 3686 * VM_ALLOC_NULL_OK is implied when VM_ALLOC_RETRY is specified. 3687 * 3688 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is 3689 * always returned if we had blocked. 3690 * 3691 * This routine may not be called from an interrupt. 3692 * 3693 * No other requirements. 3694 */ 3695 vm_page_t 3696 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 3697 { 3698 vm_page_t m; 3699 int error; 3700 int shared = 1; 3701 3702 KKASSERT(allocflags & 3703 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 3704 vm_object_hold_shared(object); 3705 for (;;) { 3706 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); 3707 if (error) { 3708 vm_page_sleep_busy(m, TRUE, "pgrbwt"); 3709 if ((allocflags & VM_ALLOC_RETRY) == 0) { 3710 m = NULL; 3711 break; 3712 } 3713 /* retry */ 3714 } else if (m == NULL) { 3715 if (shared) { 3716 vm_object_upgrade(object); 3717 shared = 0; 3718 } 3719 if (allocflags & VM_ALLOC_RETRY) 3720 allocflags |= VM_ALLOC_NULL_OK; 3721 m = vm_page_alloc(object, pindex, 3722 allocflags & ~VM_ALLOC_RETRY); 3723 if (m) 3724 break; 3725 vm_wait(0); 3726 if ((allocflags & VM_ALLOC_RETRY) == 0) 3727 goto failed; 3728 } else { 3729 /* m found */ 3730 break; 3731 } 3732 } 3733 3734 /* 3735 * If VM_ALLOC_ZERO an invalid page will be zero'd and set valid. 3736 * 3737 * If VM_ALLOC_FORCE_ZERO the page is unconditionally zero'd and set 3738 * valid even if already valid. 3739 * 3740 * NOTE! We have removed all of the PG_ZERO optimizations and also 3741 * removed the idle zeroing code. These optimizations actually 3742 * slow things down on modern cpus because the zerod area is 3743 * likely uncached, placing a memory-access burden on the 3744 * accesors taking the fault. 3745 * 3746 * By always zeroing the page in-line with the fault, no 3747 * dynamic ram reads are needed and the caches are hot, ready 3748 * for userland to access the memory. 3749 */ 3750 if (m->valid == 0) { 3751 if (allocflags & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO)) { 3752 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 3753 m->valid = VM_PAGE_BITS_ALL; 3754 } 3755 } else if (allocflags & VM_ALLOC_FORCE_ZERO) { 3756 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 3757 m->valid = VM_PAGE_BITS_ALL; 3758 } 3759 failed: 3760 vm_object_drop(object); 3761 return(m); 3762 } 3763 3764 /* 3765 * Mapping function for valid bits or for dirty bits in 3766 * a page. May not block. 3767 * 3768 * Inputs are required to range within a page. 3769 * 3770 * No requirements. 3771 * Non blocking. 3772 */ 3773 int 3774 vm_page_bits(int base, int size) 3775 { 3776 int first_bit; 3777 int last_bit; 3778 3779 KASSERT( 3780 base + size <= PAGE_SIZE, 3781 ("vm_page_bits: illegal base/size %d/%d", base, size) 3782 ); 3783 3784 if (size == 0) /* handle degenerate case */ 3785 return(0); 3786 3787 first_bit = base >> DEV_BSHIFT; 3788 last_bit = (base + size - 1) >> DEV_BSHIFT; 3789 3790 return ((2 << last_bit) - (1 << first_bit)); 3791 } 3792 3793 /* 3794 * Sets portions of a page valid and clean. The arguments are expected 3795 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 3796 * of any partial chunks touched by the range. The invalid portion of 3797 * such chunks will be zero'd. 3798 * 3799 * NOTE: When truncating a buffer vnode_pager_setsize() will automatically 3800 * align base to DEV_BSIZE so as not to mark clean a partially 3801 * truncated device block. Otherwise the dirty page status might be 3802 * lost. 3803 * 3804 * This routine may not block. 3805 * 3806 * (base + size) must be less then or equal to PAGE_SIZE. 3807 */ 3808 static void 3809 _vm_page_zero_valid(vm_page_t m, int base, int size) 3810 { 3811 int frag; 3812 int endoff; 3813 3814 if (size == 0) /* handle degenerate case */ 3815 return; 3816 3817 /* 3818 * If the base is not DEV_BSIZE aligned and the valid 3819 * bit is clear, we have to zero out a portion of the 3820 * first block. 3821 */ 3822 3823 if ((frag = rounddown2(base, DEV_BSIZE)) != base && 3824 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 3825 ) { 3826 pmap_zero_page_area( 3827 VM_PAGE_TO_PHYS(m), 3828 frag, 3829 base - frag 3830 ); 3831 } 3832 3833 /* 3834 * If the ending offset is not DEV_BSIZE aligned and the 3835 * valid bit is clear, we have to zero out a portion of 3836 * the last block. 3837 */ 3838 3839 endoff = base + size; 3840 3841 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && 3842 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 3843 ) { 3844 pmap_zero_page_area( 3845 VM_PAGE_TO_PHYS(m), 3846 endoff, 3847 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 3848 ); 3849 } 3850 } 3851 3852 /* 3853 * Set valid, clear dirty bits. If validating the entire 3854 * page we can safely clear the pmap modify bit. We also 3855 * use this opportunity to clear the PG_NOSYNC flag. If a process 3856 * takes a write fault on a MAP_NOSYNC memory area the flag will 3857 * be set again. 3858 * 3859 * We set valid bits inclusive of any overlap, but we can only 3860 * clear dirty bits for DEV_BSIZE chunks that are fully within 3861 * the range. 3862 * 3863 * Page must be busied? 3864 * No other requirements. 3865 */ 3866 void 3867 vm_page_set_valid(vm_page_t m, int base, int size) 3868 { 3869 _vm_page_zero_valid(m, base, size); 3870 m->valid |= vm_page_bits(base, size); 3871 } 3872 3873 3874 /* 3875 * Set valid bits and clear dirty bits. 3876 * 3877 * Page must be busied by caller. 3878 * 3879 * NOTE: This function does not clear the pmap modified bit. 3880 * Also note that e.g. NFS may use a byte-granular base 3881 * and size. 3882 * 3883 * No other requirements. 3884 */ 3885 void 3886 vm_page_set_validclean(vm_page_t m, int base, int size) 3887 { 3888 int pagebits; 3889 3890 _vm_page_zero_valid(m, base, size); 3891 pagebits = vm_page_bits(base, size); 3892 m->valid |= pagebits; 3893 m->dirty &= ~pagebits; 3894 if (base == 0 && size == PAGE_SIZE) { 3895 /*pmap_clear_modify(m);*/ 3896 vm_page_flag_clear(m, PG_NOSYNC); 3897 } 3898 } 3899 3900 /* 3901 * Set valid & dirty. Used by buwrite() 3902 * 3903 * Page must be busied by caller. 3904 */ 3905 void 3906 vm_page_set_validdirty(vm_page_t m, int base, int size) 3907 { 3908 int pagebits; 3909 3910 pagebits = vm_page_bits(base, size); 3911 m->valid |= pagebits; 3912 m->dirty |= pagebits; 3913 if (m->object) 3914 vm_object_set_writeable_dirty(m->object); 3915 } 3916 3917 /* 3918 * Clear dirty bits. 3919 * 3920 * NOTE: This function does not clear the pmap modified bit. 3921 * Also note that e.g. NFS may use a byte-granular base 3922 * and size. 3923 * 3924 * Page must be busied? 3925 * No other requirements. 3926 */ 3927 void 3928 vm_page_clear_dirty(vm_page_t m, int base, int size) 3929 { 3930 m->dirty &= ~vm_page_bits(base, size); 3931 if (base == 0 && size == PAGE_SIZE) { 3932 /*pmap_clear_modify(m);*/ 3933 vm_page_flag_clear(m, PG_NOSYNC); 3934 } 3935 } 3936 3937 /* 3938 * Make the page all-dirty. 3939 * 3940 * Also make sure the related object and vnode reflect the fact that the 3941 * object may now contain a dirty page. 3942 * 3943 * Page must be busied? 3944 * No other requirements. 3945 */ 3946 void 3947 vm_page_dirty(vm_page_t m) 3948 { 3949 #ifdef INVARIANTS 3950 int pqtype = m->queue - m->pc; 3951 #endif 3952 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE, 3953 ("vm_page_dirty: page in free/cache queue!")); 3954 if (m->dirty != VM_PAGE_BITS_ALL) { 3955 m->dirty = VM_PAGE_BITS_ALL; 3956 if (m->object) 3957 vm_object_set_writeable_dirty(m->object); 3958 } 3959 } 3960 3961 /* 3962 * Invalidates DEV_BSIZE'd chunks within a page. Both the 3963 * valid and dirty bits for the effected areas are cleared. 3964 * 3965 * Page must be busied? 3966 * Does not block. 3967 * No other requirements. 3968 */ 3969 void 3970 vm_page_set_invalid(vm_page_t m, int base, int size) 3971 { 3972 int bits; 3973 3974 bits = vm_page_bits(base, size); 3975 m->valid &= ~bits; 3976 m->dirty &= ~bits; 3977 atomic_add_int(&m->object->generation, 1); 3978 } 3979 3980 /* 3981 * The kernel assumes that the invalid portions of a page contain 3982 * garbage, but such pages can be mapped into memory by user code. 3983 * When this occurs, we must zero out the non-valid portions of the 3984 * page so user code sees what it expects. 3985 * 3986 * Pages are most often semi-valid when the end of a file is mapped 3987 * into memory and the file's size is not page aligned. 3988 * 3989 * Page must be busied? 3990 * No other requirements. 3991 */ 3992 void 3993 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 3994 { 3995 int b; 3996 int i; 3997 3998 /* 3999 * Scan the valid bits looking for invalid sections that 4000 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 4001 * valid bit may be set ) have already been zerod by 4002 * vm_page_set_validclean(). 4003 */ 4004 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 4005 if (i == (PAGE_SIZE / DEV_BSIZE) || 4006 (m->valid & (1 << i)) 4007 ) { 4008 if (i > b) { 4009 pmap_zero_page_area( 4010 VM_PAGE_TO_PHYS(m), 4011 b << DEV_BSHIFT, 4012 (i - b) << DEV_BSHIFT 4013 ); 4014 } 4015 b = i + 1; 4016 } 4017 } 4018 4019 /* 4020 * setvalid is TRUE when we can safely set the zero'd areas 4021 * as being valid. We can do this if there are no cache consistency 4022 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 4023 */ 4024 if (setvalid) 4025 m->valid = VM_PAGE_BITS_ALL; 4026 } 4027 4028 /* 4029 * Is a (partial) page valid? Note that the case where size == 0 4030 * will return FALSE in the degenerate case where the page is entirely 4031 * invalid, and TRUE otherwise. 4032 * 4033 * Does not block. 4034 * No other requirements. 4035 */ 4036 int 4037 vm_page_is_valid(vm_page_t m, int base, int size) 4038 { 4039 int bits = vm_page_bits(base, size); 4040 4041 if (m->valid && ((m->valid & bits) == bits)) 4042 return 1; 4043 else 4044 return 0; 4045 } 4046 4047 /* 4048 * Update dirty bits from pmap/mmu. May not block. 4049 * 4050 * Caller must hold the page busy 4051 * 4052 * WARNING! Unless the page has been unmapped, this function only 4053 * provides a likely dirty status. 4054 */ 4055 void 4056 vm_page_test_dirty(vm_page_t m) 4057 { 4058 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) { 4059 vm_page_dirty(m); 4060 } 4061 } 4062 4063 #include "opt_ddb.h" 4064 #ifdef DDB 4065 #include <ddb/ddb.h> 4066 4067 DB_SHOW_COMMAND(page, vm_page_print_page_info) 4068 { 4069 db_printf("vmstats.v_free_count: %ld\n", vmstats.v_free_count); 4070 db_printf("vmstats.v_cache_count: %ld\n", vmstats.v_cache_count); 4071 db_printf("vmstats.v_inactive_count: %ld\n", vmstats.v_inactive_count); 4072 db_printf("vmstats.v_active_count: %ld\n", vmstats.v_active_count); 4073 db_printf("vmstats.v_wire_count: %ld\n", vmstats.v_wire_count); 4074 db_printf("vmstats.v_free_reserved: %ld\n", vmstats.v_free_reserved); 4075 db_printf("vmstats.v_free_min: %ld\n", vmstats.v_free_min); 4076 db_printf("vmstats.v_free_target: %ld\n", vmstats.v_free_target); 4077 db_printf("vmstats.v_cache_min: %ld\n", vmstats.v_cache_min); 4078 db_printf("vmstats.v_inactive_target: %ld\n", 4079 vmstats.v_inactive_target); 4080 } 4081 4082 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 4083 { 4084 int i; 4085 db_printf("PQ_FREE:"); 4086 for (i = 0; i < PQ_L2_SIZE; i++) { 4087 db_printf(" %ld", vm_page_queues[PQ_FREE + i].lcnt); 4088 } 4089 db_printf("\n"); 4090 4091 db_printf("PQ_CACHE:"); 4092 for(i = 0; i < PQ_L2_SIZE; i++) { 4093 db_printf(" %ld", vm_page_queues[PQ_CACHE + i].lcnt); 4094 } 4095 db_printf("\n"); 4096 4097 db_printf("PQ_ACTIVE:"); 4098 for(i = 0; i < PQ_L2_SIZE; i++) { 4099 db_printf(" %ld", vm_page_queues[PQ_ACTIVE + i].lcnt); 4100 } 4101 db_printf("\n"); 4102 4103 db_printf("PQ_INACTIVE:"); 4104 for(i = 0; i < PQ_L2_SIZE; i++) { 4105 db_printf(" %ld", vm_page_queues[PQ_INACTIVE + i].lcnt); 4106 } 4107 db_printf("\n"); 4108 } 4109 #endif /* DDB */ 4110