1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.18 2005/03/04 00:44:49 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_PAGE_H_ 73 #define _VM_PAGE_H_ 74 75 #if !defined(KLD_MODULE) && defined(_KERNEL) 76 #include "opt_vmpage.h" 77 #endif 78 79 #include <vm/pmap.h> 80 #include <machine/atomic.h> 81 82 /* 83 * Management of resident (logical) pages. 84 * 85 * A small structure is kept for each resident 86 * page, indexed by page number. Each structure 87 * is an element of several lists: 88 * 89 * A hash table bucket used to quickly 90 * perform object/offset lookups 91 * 92 * A list of all pages for a given object, 93 * so they can be quickly deactivated at 94 * time of deallocation. 95 * 96 * An ordered list of pages due for pageout. 97 * 98 * In addition, the structure contains the object 99 * and offset to which this page belongs (for pageout), 100 * and sundry status bits. 101 * 102 * Fields in this structure are locked either by the lock on the 103 * object that the page belongs to (O) or by the lock on the page 104 * queues (P). 105 * 106 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 107 * bits set without having associated valid bits set. This is used by 108 * NFS to implement piecemeal writes. 109 */ 110 111 TAILQ_HEAD(pglist, vm_page); 112 113 struct msf_buf; 114 struct vm_page { 115 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 116 struct vm_page *hnext; /* hash table link (O,P) */ 117 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 118 119 vm_object_t object; /* which object am I in (O,P)*/ 120 vm_pindex_t pindex; /* offset into object (O,P) */ 121 vm_paddr_t phys_addr; /* physical address of page */ 122 struct md_page md; /* machine dependant stuff */ 123 u_short queue; /* page queue index */ 124 u_short flags; /* see below */ 125 u_short pc; /* page color */ 126 u_short wire_count; /* wired down maps refs (P) */ 127 short hold_count; /* page hold count */ 128 u_char act_count; /* page usage count */ 129 u_char busy; /* page busy count */ 130 131 /* 132 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 133 * so, on normal X86 kernels, they must be at least 8 bits wide. 134 */ 135 #if PAGE_SIZE == 4096 136 u_char valid; /* map of valid DEV_BSIZE chunks */ 137 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 138 u_char unused1; 139 u_char unused2; 140 #elif PAGE_SIZE == 8192 141 u_short valid; /* map of valid DEV_BSIZE chunks */ 142 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 143 #endif 144 struct msf_buf *msf_hint; /* first page of an msfbuf map */ 145 }; 146 147 /* 148 * note: currently use SWAPBLK_NONE as an absolute value rather then 149 * a flag bit. 150 */ 151 #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */ 152 #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */ 153 154 /* 155 * Page coloring parameters. We default to a middle of the road optimization. 156 * Larger selections would not really hurt us but if a machine does not have 157 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles 158 * looking for free pages. 159 * 160 * Page coloring cannot be disabled. Modules do not have access to most PQ 161 * constants because they can change between builds. 162 */ 163 #if defined(_KERNEL) && !defined(KLD_MODULE) 164 165 #if !defined(PQ_CACHESIZE) 166 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */ 167 #endif 168 169 #if PQ_CACHESIZE >= 1024 170 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 171 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 172 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 173 174 #elif PQ_CACHESIZE >= 512 175 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 176 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 177 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 178 179 #elif PQ_CACHESIZE >= 256 180 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 181 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 182 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 183 184 #elif PQ_CACHESIZE >= 128 185 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 186 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 187 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 188 189 #else 190 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 191 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 192 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 193 194 #endif 195 196 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 197 198 #endif /* KERNEL && !KLD_MODULE */ 199 200 /* 201 * 202 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual 203 * cache size chosen in order to present a uniform interface for modules. 204 */ 205 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */ 206 207 #if PQ_L2_SIZE > PQ_MAXL2_SIZE 208 #error "Illegal PQ_L2_SIZE" 209 #endif 210 211 #define PQ_NONE 0 212 #define PQ_FREE 1 213 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE) 214 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE) 215 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE) 216 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE) 217 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE) 218 219 struct vpgqueues { 220 struct pglist pl; 221 int *cnt; 222 int lcnt; 223 int flipflop; /* probably not the best place */ 224 }; 225 226 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 227 228 /* 229 * These are the flags defined for vm_page. 230 * 231 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 232 * not under PV management but otherwise should be treated as a 233 * normal page. Pages not under PV management cannot be paged out 234 * via the object/vm_page_t because there is no knowledge of their 235 * pte mappings, nor can they be removed from their objects via 236 * the object, and such pages are also not on any PQ queue. 237 */ 238 #define PG_BUSY 0x0001 /* page is in transit (O) */ 239 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */ 240 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 241 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 242 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */ 243 #define PG_MAPPED 0x0020 /* page is mapped */ 244 #define PG_ZERO 0x0040 /* page is zeroed */ 245 #define PG_REFERENCED 0x0080 /* page has been referenced */ 246 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ 247 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 248 #define PG_NOSYNC 0x0400 /* do not collect for syncer */ 249 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 250 #define PG_MARKER 0x1000 /* special queue marker page */ 251 252 /* 253 * Misc constants. 254 */ 255 256 #define ACT_DECLINE 1 257 #define ACT_ADVANCE 3 258 #define ACT_INIT 5 259 #define ACT_MAX 64 260 261 #ifdef _KERNEL 262 /* 263 * Each pageable resident page falls into one of four lists: 264 * 265 * free 266 * Available for allocation now. 267 * 268 * The following are all LRU sorted: 269 * 270 * cache 271 * Almost available for allocation. Still in an 272 * object, but clean and immediately freeable at 273 * non-interrupt times. 274 * 275 * inactive 276 * Low activity, candidates for reclamation. 277 * This is the list of pages that should be 278 * paged out next. 279 * 280 * active 281 * Pages that are "active" i.e. they have been 282 * recently referenced. 283 * 284 * zero 285 * Pages that are really free and have been pre-zeroed 286 * 287 */ 288 289 extern int vm_page_zero_count; 290 extern vm_page_t vm_page_array; /* First resident page in table */ 291 extern int vm_page_array_size; /* number of vm_page_t's */ 292 extern long first_page; /* first physical page number */ 293 294 #define VM_PAGE_TO_PHYS(entry) \ 295 ((entry)->phys_addr) 296 297 #define PHYS_TO_VM_PAGE(pa) \ 298 (&vm_page_array[atop(pa) - first_page]) 299 300 /* 301 * Functions implemented as macros 302 */ 303 304 static __inline void 305 vm_page_flag_set(vm_page_t m, unsigned int bits) 306 { 307 atomic_set_short(&(m)->flags, bits); 308 } 309 310 static __inline void 311 vm_page_flag_clear(vm_page_t m, unsigned int bits) 312 { 313 atomic_clear_short(&(m)->flags, bits); 314 } 315 316 static __inline void 317 vm_page_busy(vm_page_t m) 318 { 319 KASSERT((m->flags & PG_BUSY) == 0, 320 ("vm_page_busy: page already busy!!!")); 321 vm_page_flag_set(m, PG_BUSY); 322 } 323 324 /* 325 * vm_page_flash: 326 * 327 * wakeup anyone waiting for the page. 328 */ 329 330 static __inline void 331 vm_page_flash(vm_page_t m) 332 { 333 if (m->flags & PG_WANTED) { 334 vm_page_flag_clear(m, PG_WANTED); 335 wakeup(m); 336 } 337 } 338 339 /* 340 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This 341 * is typically the last call you make on a page before moving onto 342 * other things. 343 */ 344 static __inline void 345 vm_page_wakeup(vm_page_t m) 346 { 347 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 348 vm_page_flag_clear(m, PG_BUSY); 349 vm_page_flash(m); 350 } 351 352 /* 353 * These routines manipulate the 'soft busy' count for a page. A soft busy 354 * is almost like PG_BUSY except that it allows certain compatible operations 355 * to occur on the page while it is busy. For example, a page undergoing a 356 * write can still be mapped read-only. 357 */ 358 static __inline void 359 vm_page_io_start(vm_page_t m) 360 { 361 atomic_add_char(&(m)->busy, 1); 362 } 363 364 static __inline void 365 vm_page_io_finish(vm_page_t m) 366 { 367 atomic_subtract_char(&m->busy, 1); 368 if (m->busy == 0) 369 vm_page_flash(m); 370 } 371 372 373 #if PAGE_SIZE == 4096 374 #define VM_PAGE_BITS_ALL 0xff 375 #endif 376 377 #if PAGE_SIZE == 8192 378 #define VM_PAGE_BITS_ALL 0xffff 379 #endif 380 381 /* 382 * Note: the code will always use nominally free pages from the free list 383 * before trying other flag-specified sources. 384 * 385 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 386 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 387 * is also specified. 388 */ 389 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 390 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 391 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 392 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 393 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 394 395 void vm_page_unhold(vm_page_t mem); 396 void vm_page_activate (vm_page_t); 397 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 398 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 399 void vm_page_cache (vm_page_t); 400 int vm_page_try_to_cache (vm_page_t); 401 int vm_page_try_to_free (vm_page_t); 402 void vm_page_dontneed (vm_page_t); 403 void vm_page_deactivate (vm_page_t); 404 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 405 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 406 void vm_page_remove (vm_page_t); 407 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 408 vm_offset_t vm_page_startup (vm_offset_t, vm_offset_t, vm_offset_t); 409 vm_page_t vm_add_new_page (vm_paddr_t pa); 410 void vm_page_unmanage (vm_page_t); 411 void vm_page_unwire (vm_page_t, int); 412 void vm_page_wire (vm_page_t); 413 void vm_page_unqueue (vm_page_t); 414 void vm_page_unqueue_nowakeup (vm_page_t); 415 void vm_page_set_validclean (vm_page_t, int, int); 416 void vm_page_set_dirty (vm_page_t, int, int); 417 void vm_page_clear_dirty (vm_page_t, int, int); 418 void vm_page_set_invalid (vm_page_t, int, int); 419 int vm_page_is_valid (vm_page_t, int, int); 420 void vm_page_test_dirty (vm_page_t); 421 int vm_page_bits (int, int); 422 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 423 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 424 void vm_page_free_toq(vm_page_t m); 425 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 426 void vm_contig_pg_free(int, u_long); 427 428 /* 429 * Holding a page keeps it from being reused. Other parts of the system 430 * can still disassociate the page from its current object and free it, or 431 * perform read or write I/O on it and/or otherwise manipulate the page, 432 * but if the page is held the VM system will leave the page and its data 433 * intact and not reuse the page for other purposes until the last hold 434 * reference is released. (see vm_page_wire() if you want to prevent the 435 * page from being disassociated from its object too). 436 * 437 * This routine must be called while at splvm() or better. 438 * 439 * The caller must still validate the contents of the page and, if necessary, 440 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete 441 * before manipulating the page. 442 */ 443 static __inline void 444 vm_page_hold(vm_page_t mem) 445 { 446 mem->hold_count++; 447 } 448 449 /* 450 * Reduce the protection of a page. This routine never raises the 451 * protection and therefore can be safely called if the page is already 452 * at VM_PROT_NONE (it will be a NOP effectively ). 453 * 454 * VM_PROT_NONE will remove all user mappings of a page. This is often 455 * necessary when a page changes state (for example, turns into a copy-on-write 456 * page or needs to be frozen for write I/O) in order to force a fault, or 457 * to force a page's dirty bits to be synchronized and avoid hardware 458 * (modified/accessed) bit update races with pmap changes. 459 * 460 * Since 'prot' is usually a constant, this inline usually winds up optimizing 461 * out the primary conditional. 462 */ 463 static __inline void 464 vm_page_protect(vm_page_t mem, int prot) 465 { 466 if (prot == VM_PROT_NONE) { 467 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 468 pmap_page_protect(mem, VM_PROT_NONE); 469 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); 470 } 471 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 472 pmap_page_protect(mem, VM_PROT_READ); 473 vm_page_flag_clear(mem, PG_WRITEABLE); 474 } 475 } 476 477 /* 478 * Zero-fill the specified page. The entire contents of the page will be 479 * zero'd out. 480 */ 481 static __inline boolean_t 482 vm_page_zero_fill(vm_page_t m) 483 { 484 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 485 return (TRUE); 486 } 487 488 /* 489 * Copy the contents of src_m to dest_m. The pages must be stable but spl 490 * and other protections depend on context. 491 */ 492 static __inline void 493 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 494 { 495 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 496 dest_m->valid = VM_PAGE_BITS_ALL; 497 } 498 499 /* 500 * Free a page. The page must be marked BUSY. 501 * 502 * The clearing of PG_ZERO is a temporary safety until the code can be 503 * reviewed to determine that PG_ZERO is being properly cleared on 504 * write faults or maps. PG_ZERO was previously cleared in 505 * vm_page_alloc(). 506 */ 507 static __inline void 508 vm_page_free(vm_page_t m) 509 { 510 vm_page_flag_clear(m, PG_ZERO); 511 vm_page_free_toq(m); 512 } 513 514 /* 515 * Free a page to the zerod-pages queue 516 */ 517 static __inline void 518 vm_page_free_zero(vm_page_t m) 519 { 520 vm_page_flag_set(m, PG_ZERO); 521 vm_page_free_toq(m); 522 } 523 524 /* 525 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 526 * m->busy is zero. Returns TRUE if it had to sleep ( including if 527 * it almost had to sleep and made temporary spl*() mods), FALSE 528 * otherwise. 529 * 530 * This routine assumes that interrupts can only remove the busy 531 * status from a page, not set the busy status or change it from 532 * PG_BUSY to m->busy or vise versa (which would create a timing 533 * window). 534 * 535 * Note: as an inline, 'also_m_busy' is usually a constant and well 536 * optimized. 537 */ 538 static __inline int 539 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 540 { 541 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 542 int s = splvm(); 543 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 544 /* 545 * Page is busy. Wait and retry. 546 */ 547 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 548 tsleep(m, 0, msg, 0); 549 } 550 splx(s); 551 return(TRUE); 552 /* not reached */ 553 } 554 return(FALSE); 555 } 556 557 /* 558 * Make page all dirty 559 */ 560 static __inline void 561 vm_page_dirty(vm_page_t m) 562 { 563 KASSERT(m->queue - m->pc != PQ_CACHE, 564 ("vm_page_dirty: page in cache!")); 565 m->dirty = VM_PAGE_BITS_ALL; 566 } 567 568 /* 569 * Set page to not be dirty. Note: does not clear pmap modify bits . 570 */ 571 static __inline void 572 vm_page_undirty(vm_page_t m) 573 { 574 m->dirty = 0; 575 } 576 577 #endif /* _KERNEL */ 578 #endif /* !_VM_PAGE_ */ 579