1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.28 2008/05/09 07:24:48 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_VM_PAGE_H_ 73 #define _VM_VM_PAGE_H_ 74 75 #if !defined(KLD_MODULE) && defined(_KERNEL) 76 #include "opt_vmpage.h" 77 #endif 78 79 #ifndef _SYS_TYPES_H_ 80 #include <sys/types.h> 81 #endif 82 #ifndef _SYS_TREE_H_ 83 #include <sys/tree.h> 84 #endif 85 #ifndef _MACHINE_PMAP_H_ 86 #include <machine/pmap.h> 87 #endif 88 #ifndef _VM_PMAP_H_ 89 #include <vm/pmap.h> 90 #endif 91 #ifndef _MACHINE_ATOMIC_H_ 92 #include <machine/atomic.h> 93 #endif 94 95 #ifdef _KERNEL 96 97 #ifndef _SYS_SYSTM_H_ 98 #include <sys/systm.h> 99 #endif 100 #ifndef _SYS_THREAD2_H_ 101 #include <sys/thread2.h> 102 #endif 103 104 #ifdef __x86_64__ 105 #include <machine/vmparam.h> 106 #endif 107 108 #endif 109 110 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t; 111 112 struct vm_page_action { 113 LIST_ENTRY(vm_page_action) entry; 114 vm_page_event_t event; 115 void (*func)(struct vm_page *, 116 struct vm_page_action *); 117 void *data; 118 }; 119 120 typedef struct vm_page_action *vm_page_action_t; 121 122 /* 123 * Management of resident (logical) pages. 124 * 125 * A small structure is kept for each resident 126 * page, indexed by page number. Each structure 127 * is an element of several lists: 128 * 129 * A hash table bucket used to quickly 130 * perform object/offset lookups 131 * 132 * A list of all pages for a given object, 133 * so they can be quickly deactivated at 134 * time of deallocation. 135 * 136 * An ordered list of pages due for pageout. 137 * 138 * In addition, the structure contains the object 139 * and offset to which this page belongs (for pageout), 140 * and sundry status bits. 141 * 142 * Fields in this structure are locked either by the lock on the 143 * object that the page belongs to (O) or by the lock on the page 144 * queues (P). 145 * 146 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 147 * bits set without having associated valid bits set. This is used by 148 * NFS to implement piecemeal writes. 149 */ 150 151 TAILQ_HEAD(pglist, vm_page); 152 153 struct vm_object; 154 155 int rb_vm_page_compare(struct vm_page *, struct vm_page *); 156 157 struct vm_page_rb_tree; 158 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t); 159 160 struct vm_page { 161 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 162 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */ 163 164 struct vm_object *object; /* which object am I in (O,P)*/ 165 vm_pindex_t pindex; /* offset into object (O,P) */ 166 vm_paddr_t phys_addr; /* physical address of page */ 167 struct md_page md; /* machine dependant stuff */ 168 u_short queue; /* page queue index */ 169 u_short flags; /* see below */ 170 u_short pc; /* page color */ 171 u_char act_count; /* page usage count */ 172 u_char busy; /* page busy count */ 173 u_int wire_count; /* wired down maps refs (P) */ 174 int hold_count; /* page hold count */ 175 176 /* 177 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 178 * so, on normal X86 kernels, they must be at least 8 bits wide. 179 */ 180 #if PAGE_SIZE == 4096 181 u_char valid; /* map of valid DEV_BSIZE chunks */ 182 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 183 #elif PAGE_SIZE == 8192 184 u_short valid; /* map of valid DEV_BSIZE chunks */ 185 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 186 #endif 187 LIST_HEAD(,vm_page_action) action_list; 188 }; 189 190 #ifndef __VM_PAGE_T_DEFINED__ 191 #define __VM_PAGE_T_DEFINED__ 192 typedef struct vm_page *vm_page_t; 193 #endif 194 195 /* 196 * Page coloring parameters. We default to a middle of the road optimization. 197 * Larger selections would not really hurt us but if a machine does not have 198 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles 199 * looking for free pages. 200 * 201 * Page coloring cannot be disabled. Modules do not have access to most PQ 202 * constants because they can change between builds. 203 */ 204 #if defined(_KERNEL) && !defined(KLD_MODULE) 205 206 #if !defined(PQ_CACHESIZE) 207 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */ 208 #endif 209 210 #if PQ_CACHESIZE >= 1024 211 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 212 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 213 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 214 215 #elif PQ_CACHESIZE >= 512 216 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 217 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 218 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 219 220 #elif PQ_CACHESIZE >= 256 221 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 222 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 223 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 224 225 #elif PQ_CACHESIZE >= 128 226 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 227 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 228 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 229 230 #else 231 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 232 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 233 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 234 235 #endif 236 237 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 238 239 #endif /* KERNEL && !KLD_MODULE */ 240 241 /* 242 * 243 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual 244 * cache size chosen in order to present a uniform interface for modules. 245 */ 246 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */ 247 248 #if PQ_L2_SIZE > PQ_MAXL2_SIZE 249 #error "Illegal PQ_L2_SIZE" 250 #endif 251 252 #define PQ_NONE 0 253 #define PQ_FREE 1 254 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE) 255 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE) 256 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE) 257 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE) 258 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE) 259 260 /* 261 * Scan support 262 */ 263 struct vm_map; 264 265 struct rb_vm_page_scan_info { 266 vm_pindex_t start_pindex; 267 vm_pindex_t end_pindex; 268 int limit; 269 int desired; 270 int error; 271 int pagerflags; 272 vm_offset_t addr; 273 vm_pindex_t backing_offset_index; 274 struct vm_object *object; 275 struct vm_object *backing_object; 276 struct vm_page *mpte; 277 struct pmap *pmap; 278 struct vm_map *map; 279 }; 280 281 int rb_vm_page_scancmp(struct vm_page *, void *); 282 283 struct vpgqueues { 284 struct pglist pl; 285 int *cnt; 286 int lcnt; 287 int flipflop; /* probably not the best place */ 288 }; 289 290 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 291 292 /* 293 * These are the flags defined for vm_page. 294 * 295 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 296 * not under PV management but otherwise should be treated as a 297 * normal page. Pages not under PV management cannot be paged out 298 * via the object/vm_page_t because there is no knowledge of their 299 * pte mappings, nor can they be removed from their objects via 300 * the object, and such pages are also not on any PQ queue. The 301 * PG_MAPPED and PG_WRITEABLE flags are not applicable. 302 * 303 * PG_MAPPED only applies to managed pages, indicating whether the page 304 * is mapped onto one or more pmaps. A page might still be mapped to 305 * special pmaps in an unmanaged fashion, for example when mapped into a 306 * buffer cache buffer, without setting PG_MAPPED. 307 * 308 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry 309 * somewhere, and that the page can be dirtied by hardware at any time 310 * and may have to be tested for that. The modified bit in unmanaged 311 * mappings or in the special clean map is not tested. 312 * 313 * PG_SWAPPED indicates that the page is backed by a swap block. Any 314 * VM object type other than OBJT_DEFAULT can have swap-backed pages now. 315 */ 316 #define PG_BUSY 0x0001 /* page is in transit (O) */ 317 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */ 318 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 319 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 320 #define PG_WRITEABLE 0x0010 /* page is writeable */ 321 #define PG_MAPPED 0x0020 /* page is mapped (managed) */ 322 #define PG_ZERO 0x0040 /* page is zeroed */ 323 #define PG_REFERENCED 0x0080 /* page has been referenced */ 324 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ 325 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 326 #define PG_NOSYNC 0x0400 /* do not collect for syncer */ 327 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 328 #define PG_MARKER 0x1000 /* special queue marker page */ 329 #define PG_RAM 0x2000 /* read ahead mark */ 330 #define PG_SWAPPED 0x4000 /* backed by swap */ 331 #define PG_NOTMETA 0x8000 /* do not back with swap */ 332 /* u_short, only 16 flag bits */ 333 334 /* 335 * Misc constants. 336 */ 337 338 #define ACT_DECLINE 1 339 #define ACT_ADVANCE 3 340 #define ACT_INIT 5 341 #define ACT_MAX 64 342 343 #ifdef _KERNEL 344 /* 345 * Each pageable resident page falls into one of four lists: 346 * 347 * free 348 * Available for allocation now. 349 * 350 * The following are all LRU sorted: 351 * 352 * cache 353 * Almost available for allocation. Still in an 354 * object, but clean and immediately freeable at 355 * non-interrupt times. 356 * 357 * inactive 358 * Low activity, candidates for reclamation. 359 * This is the list of pages that should be 360 * paged out next. 361 * 362 * active 363 * Pages that are "active" i.e. they have been 364 * recently referenced. 365 * 366 * zero 367 * Pages that are really free and have been pre-zeroed 368 * 369 */ 370 371 extern int vm_page_zero_count; 372 extern struct vm_page *vm_page_array; /* First resident page in table */ 373 extern int vm_page_array_size; /* number of vm_page_t's */ 374 extern long first_page; /* first physical page number */ 375 376 #define VM_PAGE_TO_PHYS(entry) \ 377 ((entry)->phys_addr) 378 379 #define PHYS_TO_VM_PAGE(pa) \ 380 (&vm_page_array[atop(pa) - first_page]) 381 382 /* 383 * Functions implemented as macros 384 */ 385 386 static __inline void 387 vm_page_flag_set(vm_page_t m, unsigned int bits) 388 { 389 atomic_set_short(&(m)->flags, bits); 390 } 391 392 static __inline void 393 vm_page_flag_clear(vm_page_t m, unsigned int bits) 394 { 395 atomic_clear_short(&(m)->flags, bits); 396 } 397 398 static __inline void 399 vm_page_busy(vm_page_t m) 400 { 401 ASSERT_LWKT_TOKEN_HELD(&vm_token); 402 KASSERT((m->flags & PG_BUSY) == 0, 403 ("vm_page_busy: page already busy!!!")); 404 vm_page_flag_set(m, PG_BUSY); 405 } 406 407 /* 408 * vm_page_flash: 409 * 410 * wakeup anyone waiting for the page. 411 */ 412 413 static __inline void 414 vm_page_flash(vm_page_t m) 415 { 416 lwkt_gettoken(&vm_token); 417 if (m->flags & PG_WANTED) { 418 vm_page_flag_clear(m, PG_WANTED); 419 wakeup(m); 420 } 421 lwkt_reltoken(&vm_token); 422 } 423 424 /* 425 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This 426 * is typically the last call you make on a page before moving onto 427 * other things. 428 */ 429 static __inline void 430 vm_page_wakeup(vm_page_t m) 431 { 432 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 433 vm_page_flag_clear(m, PG_BUSY); 434 vm_page_flash(m); 435 } 436 437 /* 438 * These routines manipulate the 'soft busy' count for a page. A soft busy 439 * is almost like PG_BUSY except that it allows certain compatible operations 440 * to occur on the page while it is busy. For example, a page undergoing a 441 * write can still be mapped read-only. 442 */ 443 static __inline void 444 vm_page_io_start(vm_page_t m) 445 { 446 atomic_add_char(&(m)->busy, 1); 447 } 448 449 static __inline void 450 vm_page_io_finish(vm_page_t m) 451 { 452 atomic_subtract_char(&m->busy, 1); 453 if (m->busy == 0) 454 vm_page_flash(m); 455 } 456 457 458 #if PAGE_SIZE == 4096 459 #define VM_PAGE_BITS_ALL 0xff 460 #endif 461 462 #if PAGE_SIZE == 8192 463 #define VM_PAGE_BITS_ALL 0xffff 464 #endif 465 466 /* 467 * Note: the code will always use nominally free pages from the free list 468 * before trying other flag-specified sources. 469 * 470 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 471 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 472 * is also specified. 473 */ 474 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 475 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 476 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 477 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 478 #define VM_ALLOC_QUICK 0x10 /* like NORMAL but do not use cache */ 479 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 480 481 void vm_page_hold(vm_page_t); 482 void vm_page_unhold(vm_page_t); 483 void vm_page_activate (vm_page_t); 484 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); 485 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); 486 void vm_page_cache (vm_page_t); 487 int vm_page_try_to_cache (vm_page_t); 488 int vm_page_try_to_free (vm_page_t); 489 void vm_page_dontneed (vm_page_t); 490 void vm_page_deactivate (vm_page_t); 491 void vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t); 492 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t); 493 void vm_page_remove (vm_page_t); 494 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t); 495 vm_offset_t vm_page_startup (vm_offset_t); 496 vm_page_t vm_add_new_page (vm_paddr_t pa); 497 void vm_page_unmanage (vm_page_t); 498 void vm_page_unwire (vm_page_t, int); 499 void vm_page_wire (vm_page_t); 500 void vm_page_unqueue (vm_page_t); 501 void vm_page_unqueue_nowakeup (vm_page_t); 502 void vm_page_set_validclean (vm_page_t, int, int); 503 void vm_page_set_validdirty (vm_page_t, int, int); 504 void vm_page_set_valid (vm_page_t, int, int); 505 void vm_page_set_dirty (vm_page_t, int, int); 506 void vm_page_clear_dirty (vm_page_t, int, int); 507 void vm_page_set_invalid (vm_page_t, int, int); 508 int vm_page_is_valid (vm_page_t, int, int); 509 void vm_page_test_dirty (vm_page_t); 510 int vm_page_bits (int, int); 511 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 512 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 513 void vm_page_free_toq(vm_page_t m); 514 vm_page_t vm_page_free_fromq_fast(void); 515 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 516 void vm_contig_pg_free(int, u_long); 517 void vm_page_event_internal(vm_page_t, vm_page_event_t); 518 void vm_page_dirty(vm_page_t m); 519 520 /* 521 * Reduce the protection of a page. This routine never raises the 522 * protection and therefore can be safely called if the page is already 523 * at VM_PROT_NONE (it will be a NOP effectively ). 524 * 525 * VM_PROT_NONE will remove all user mappings of a page. This is often 526 * necessary when a page changes state (for example, turns into a copy-on-write 527 * page or needs to be frozen for write I/O) in order to force a fault, or 528 * to force a page's dirty bits to be synchronized and avoid hardware 529 * (modified/accessed) bit update races with pmap changes. 530 * 531 * Since 'prot' is usually a constant, this inline usually winds up optimizing 532 * out the primary conditional. 533 * 534 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have 535 * been cleared. Callers should be aware that other page related elements 536 * might have changed, however. 537 */ 538 static __inline void 539 vm_page_protect(vm_page_t mem, int prot) 540 { 541 if (prot == VM_PROT_NONE) { 542 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 543 pmap_page_protect(mem, VM_PROT_NONE); 544 /* PG_WRITEABLE & PG_MAPPED cleared by call */ 545 } 546 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 547 pmap_page_protect(mem, VM_PROT_READ); 548 /* PG_WRITEABLE cleared by call */ 549 } 550 } 551 552 /* 553 * Zero-fill the specified page. The entire contents of the page will be 554 * zero'd out. 555 */ 556 static __inline boolean_t 557 vm_page_zero_fill(vm_page_t m) 558 { 559 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 560 return (TRUE); 561 } 562 563 /* 564 * Copy the contents of src_m to dest_m. The pages must be stable but spl 565 * and other protections depend on context. 566 */ 567 static __inline void 568 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 569 { 570 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 571 dest_m->valid = VM_PAGE_BITS_ALL; 572 dest_m->dirty = VM_PAGE_BITS_ALL; 573 } 574 575 /* 576 * Free a page. The page must be marked BUSY. 577 * 578 * The clearing of PG_ZERO is a temporary safety until the code can be 579 * reviewed to determine that PG_ZERO is being properly cleared on 580 * write faults or maps. PG_ZERO was previously cleared in 581 * vm_page_alloc(). 582 */ 583 static __inline void 584 vm_page_free(vm_page_t m) 585 { 586 vm_page_flag_clear(m, PG_ZERO); 587 vm_page_free_toq(m); 588 } 589 590 /* 591 * Free a page to the zerod-pages queue 592 */ 593 static __inline void 594 vm_page_free_zero(vm_page_t m) 595 { 596 #ifdef __x86_64__ 597 /* JG DEBUG64 We check if the page is really zeroed. */ 598 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 599 int i; 600 601 for (i = 0; i < PAGE_SIZE; i++) { 602 if (p[i] != 0) { 603 panic("non-zero page in vm_page_free_zero()"); 604 } 605 } 606 607 #endif 608 vm_page_flag_set(m, PG_ZERO); 609 vm_page_free_toq(m); 610 } 611 612 /* 613 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 614 * m->busy is zero. Returns TRUE if it had to sleep ( including if 615 * it almost had to sleep and made temporary spl*() mods), FALSE 616 * otherwise. 617 * 618 * This routine assumes that interrupts can only remove the busy 619 * status from a page, not set the busy status or change it from 620 * PG_BUSY to m->busy or vise versa (which would create a timing 621 * window). 622 * 623 * Note: as an inline, 'also_m_busy' is usually a constant and well 624 * optimized. 625 */ 626 static __inline int 627 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 628 { 629 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 630 lwkt_gettoken(&vm_token); 631 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 632 /* 633 * Page is busy. Wait and retry. 634 */ 635 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 636 tsleep(m, 0, msg, 0); 637 } 638 lwkt_reltoken(&vm_token); 639 return(TRUE); 640 /* not reached */ 641 } 642 return(FALSE); 643 } 644 645 /* 646 * Set page to not be dirty. Note: does not clear pmap modify bits . 647 */ 648 static __inline void 649 vm_page_undirty(vm_page_t m) 650 { 651 m->dirty = 0; 652 } 653 654 #endif /* _KERNEL */ 655 #endif /* !_VM_VM_PAGE_H_ */ 656