10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52759Selowe * Common Development and Distribution License (the "License"). 62759Selowe * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 222759Selowe * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * VM - page locking primitives 300Sstevel@tonic-gate */ 310Sstevel@tonic-gate #include <sys/param.h> 320Sstevel@tonic-gate #include <sys/t_lock.h> 330Sstevel@tonic-gate #include <sys/vtrace.h> 340Sstevel@tonic-gate #include <sys/debug.h> 350Sstevel@tonic-gate #include <sys/cmn_err.h> 360Sstevel@tonic-gate #include <sys/vnode.h> 370Sstevel@tonic-gate #include <sys/bitmap.h> 380Sstevel@tonic-gate #include <sys/lockstat.h> 390Sstevel@tonic-gate #include <sys/condvar_impl.h> 400Sstevel@tonic-gate #include <vm/page.h> 410Sstevel@tonic-gate #include <vm/seg_enum.h> 420Sstevel@tonic-gate #include <vm/vm_dep.h> 430Sstevel@tonic-gate 440Sstevel@tonic-gate /* 450Sstevel@tonic-gate * This global mutex is for logical page locking. 460Sstevel@tonic-gate * The following fields in the page structure are protected 470Sstevel@tonic-gate * by this lock: 480Sstevel@tonic-gate * 490Sstevel@tonic-gate * p_lckcnt 500Sstevel@tonic-gate * p_cowcnt 510Sstevel@tonic-gate */ 520Sstevel@tonic-gate kmutex_t page_llock; 530Sstevel@tonic-gate 540Sstevel@tonic-gate /* 550Sstevel@tonic-gate * This is a global lock for the logical page free list. The 560Sstevel@tonic-gate * logical free list, in this implementation, is maintained as two 570Sstevel@tonic-gate * separate physical lists - the cache list and the free list. 580Sstevel@tonic-gate */ 590Sstevel@tonic-gate kmutex_t page_freelock; 600Sstevel@tonic-gate 610Sstevel@tonic-gate /* 620Sstevel@tonic-gate * The hash table, page_hash[], the p_selock fields, and the 630Sstevel@tonic-gate * list of pages associated with vnodes are protected by arrays of mutexes. 640Sstevel@tonic-gate * 650Sstevel@tonic-gate * Unless the hashes are changed radically, the table sizes must be 660Sstevel@tonic-gate * a power of two. Also, we typically need more mutexes for the 670Sstevel@tonic-gate * vnodes since these locks are occasionally held for long periods. 680Sstevel@tonic-gate * And since there seem to be two special vnodes (kvp and swapvp), 690Sstevel@tonic-gate * we make room for private mutexes for them. 700Sstevel@tonic-gate * 710Sstevel@tonic-gate * The pse_mutex[] array holds the mutexes to protect the p_selock 720Sstevel@tonic-gate * fields of all page_t structures. 730Sstevel@tonic-gate * 740Sstevel@tonic-gate * PAGE_SE_MUTEX(pp) returns the address of the appropriate mutex 750Sstevel@tonic-gate * when given a pointer to a page_t. 760Sstevel@tonic-gate * 770Sstevel@tonic-gate * PSE_TABLE_SIZE must be a power of two. One could argue that we 780Sstevel@tonic-gate * should go to the trouble of setting it up at run time and base it 790Sstevel@tonic-gate * on memory size rather than the number of compile time CPUs. 800Sstevel@tonic-gate * 810Sstevel@tonic-gate * XX64 We should be using physmem size to calculate PSE_TABLE_SIZE, 820Sstevel@tonic-gate * PSE_SHIFT, PIO_SHIFT. 830Sstevel@tonic-gate * 840Sstevel@tonic-gate * These might break in 64 bit world. 850Sstevel@tonic-gate */ 860Sstevel@tonic-gate #define PSE_SHIFT 7 /* log2(PSE_TABLE_SIZE) */ 870Sstevel@tonic-gate 880Sstevel@tonic-gate #define PSE_TABLE_SIZE 128 /* number of mutexes to have */ 890Sstevel@tonic-gate 900Sstevel@tonic-gate #define PIO_SHIFT PSE_SHIFT /* next power of 2 bigger than page_t */ 910Sstevel@tonic-gate #define PIO_TABLE_SIZE PSE_TABLE_SIZE /* number of io mutexes to have */ 920Sstevel@tonic-gate 930Sstevel@tonic-gate pad_mutex_t ph_mutex[PH_TABLE_SIZE]; 940Sstevel@tonic-gate pad_mutex_t pse_mutex[PSE_TABLE_SIZE]; 950Sstevel@tonic-gate kmutex_t pio_mutex[PIO_TABLE_SIZE]; 960Sstevel@tonic-gate 970Sstevel@tonic-gate #define PAGE_SE_MUTEX(pp) \ 980Sstevel@tonic-gate &pse_mutex[((((uintptr_t)(pp) >> PSE_SHIFT) ^ \ 990Sstevel@tonic-gate ((uintptr_t)(pp) >> (PSE_SHIFT << 1))) & \ 1000Sstevel@tonic-gate (PSE_TABLE_SIZE - 1))].pad_mutex 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate #define PAGE_IO_MUTEX(pp) \ 1030Sstevel@tonic-gate &pio_mutex[(((uintptr_t)pp) >> PIO_SHIFT) & (PIO_TABLE_SIZE - 1)] 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate #define PSZC_MTX_TABLE_SIZE 128 1060Sstevel@tonic-gate #define PSZC_MTX_TABLE_SHIFT 7 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate static pad_mutex_t pszc_mutex[PSZC_MTX_TABLE_SIZE]; 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate #define PAGE_SZC_MUTEX(_pp) \ 1110Sstevel@tonic-gate &pszc_mutex[((((uintptr_t)(_pp) >> PSZC_MTX_TABLE_SHIFT) ^ \ 1120Sstevel@tonic-gate ((uintptr_t)(_pp) >> (PSZC_MTX_TABLE_SHIFT << 1)) ^ \ 1130Sstevel@tonic-gate ((uintptr_t)(_pp) >> (3 * PSZC_MTX_TABLE_SHIFT))) & \ 1140Sstevel@tonic-gate (PSZC_MTX_TABLE_SIZE - 1))].pad_mutex 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate /* 1170Sstevel@tonic-gate * The vph_mutex[] array holds the mutexes to protect the vnode chains, 1180Sstevel@tonic-gate * (i.e., the list of pages anchored by v_pages and connected via p_vpprev 1190Sstevel@tonic-gate * and p_vpnext). 1200Sstevel@tonic-gate * 1210Sstevel@tonic-gate * The page_vnode_mutex(vp) function returns the address of the appropriate 1220Sstevel@tonic-gate * mutex from this array given a pointer to a vnode. It is complicated 1230Sstevel@tonic-gate * by the fact that the kernel's vnode and the swapfs vnode are referenced 1240Sstevel@tonic-gate * frequently enough to warrent their own mutexes. 1250Sstevel@tonic-gate * 1260Sstevel@tonic-gate * The VP_HASH_FUNC returns the index into the vph_mutex array given 1270Sstevel@tonic-gate * an address of a vnode. 1280Sstevel@tonic-gate */ 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate /* 1310Sstevel@tonic-gate * XX64 VPH_TABLE_SIZE and VP_HASH_FUNC might break in 64 bit world. 1320Sstevel@tonic-gate * Need to review again. 1330Sstevel@tonic-gate */ 1340Sstevel@tonic-gate #define VPH_TABLE_SIZE (2 << VP_SHIFT) 1350Sstevel@tonic-gate 1360Sstevel@tonic-gate #define VP_HASH_FUNC(vp) \ 1370Sstevel@tonic-gate ((((uintptr_t)(vp) >> 6) + \ 1380Sstevel@tonic-gate ((uintptr_t)(vp) >> 8) + \ 1390Sstevel@tonic-gate ((uintptr_t)(vp) >> 10) + \ 1400Sstevel@tonic-gate ((uintptr_t)(vp) >> 12)) \ 1410Sstevel@tonic-gate & (VPH_TABLE_SIZE - 1)) 1420Sstevel@tonic-gate 1430Sstevel@tonic-gate extern struct vnode kvp; 1440Sstevel@tonic-gate 145*3290Sjohansen /* 146*3290Sjohansen * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes. 147*3290Sjohansen * The lock for kvp is VPH_TABLE_SIZE + 0, and the lock for zvp is 148*3290Sjohansen * VPH_TABLE_SIZE + 1. 149*3290Sjohansen */ 150*3290Sjohansen 1510Sstevel@tonic-gate kmutex_t vph_mutex[VPH_TABLE_SIZE + 2]; 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate /* 1540Sstevel@tonic-gate * Initialize the locks used by the Virtual Memory Management system. 1550Sstevel@tonic-gate */ 1560Sstevel@tonic-gate void 1570Sstevel@tonic-gate page_lock_init() 1580Sstevel@tonic-gate { 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate /* 1620Sstevel@tonic-gate * At present we only use page ownership to aid debugging, so it's 1630Sstevel@tonic-gate * OK if the owner field isn't exact. In the 32-bit world two thread ids 1640Sstevel@tonic-gate * can map to the same owner because we just 'or' in 0x80000000 and 1650Sstevel@tonic-gate * then clear the second highest bit, so that (for example) 0x2faced00 1660Sstevel@tonic-gate * and 0xafaced00 both map to 0xafaced00. 1670Sstevel@tonic-gate * In the 64-bit world, p_selock may not be large enough to hold a full 1680Sstevel@tonic-gate * thread pointer. If we ever need precise ownership (e.g. if we implement 1690Sstevel@tonic-gate * priority inheritance for page locks) then p_selock should become a 1700Sstevel@tonic-gate * uintptr_t and SE_WRITER should be -((uintptr_t)curthread >> 2). 1710Sstevel@tonic-gate */ 1720Sstevel@tonic-gate #define SE_WRITER (((selock_t)(ulong_t)curthread | INT_MIN) & ~SE_EWANTED) 1730Sstevel@tonic-gate #define SE_READER 1 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate /* 1760Sstevel@tonic-gate * A page that is deleted must be marked as such using the 1770Sstevel@tonic-gate * page_lock_delete() function. The page must be exclusively locked. 1780Sstevel@tonic-gate * The SE_DELETED marker is put in p_selock when this function is called. 1790Sstevel@tonic-gate * SE_DELETED must be distinct from any SE_WRITER value. 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate #define SE_DELETED (1 | INT_MIN) 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate #ifdef VM_STATS 1840Sstevel@tonic-gate uint_t vph_kvp_count; 1850Sstevel@tonic-gate uint_t vph_swapfsvp_count; 1860Sstevel@tonic-gate uint_t vph_other; 1870Sstevel@tonic-gate #endif /* VM_STATS */ 1880Sstevel@tonic-gate 1890Sstevel@tonic-gate #ifdef VM_STATS 1900Sstevel@tonic-gate uint_t page_lock_count; 1910Sstevel@tonic-gate uint_t page_lock_miss; 1920Sstevel@tonic-gate uint_t page_lock_miss_lock; 1930Sstevel@tonic-gate uint_t page_lock_reclaim; 1940Sstevel@tonic-gate uint_t page_lock_bad_reclaim; 1950Sstevel@tonic-gate uint_t page_lock_same_page; 1960Sstevel@tonic-gate uint_t page_lock_upgrade; 197917Selowe uint_t page_lock_retired; 1980Sstevel@tonic-gate uint_t page_lock_upgrade_failed; 1990Sstevel@tonic-gate uint_t page_lock_deleted; 2000Sstevel@tonic-gate 2010Sstevel@tonic-gate uint_t page_trylock_locked; 202917Selowe uint_t page_trylock_failed; 2030Sstevel@tonic-gate uint_t page_trylock_missed; 2040Sstevel@tonic-gate 2050Sstevel@tonic-gate uint_t page_try_reclaim_upgrade; 2060Sstevel@tonic-gate #endif /* VM_STATS */ 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate /* 2090Sstevel@tonic-gate * Acquire the "shared/exclusive" lock on a page. 2100Sstevel@tonic-gate * 2110Sstevel@tonic-gate * Returns 1 on success and locks the page appropriately. 2120Sstevel@tonic-gate * 0 on failure and does not lock the page. 2130Sstevel@tonic-gate * 2140Sstevel@tonic-gate * If `lock' is non-NULL, it will be dropped and reacquired in the 2150Sstevel@tonic-gate * failure case. This routine can block, and if it does 2160Sstevel@tonic-gate * it will always return a failure since the page identity [vp, off] 2170Sstevel@tonic-gate * or state may have changed. 2180Sstevel@tonic-gate */ 2190Sstevel@tonic-gate 2200Sstevel@tonic-gate int 2210Sstevel@tonic-gate page_lock(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim) 2220Sstevel@tonic-gate { 2230Sstevel@tonic-gate return (page_lock_es(pp, se, lock, reclaim, 0)); 2240Sstevel@tonic-gate } 2250Sstevel@tonic-gate 2260Sstevel@tonic-gate /* 2270Sstevel@tonic-gate * With the addition of reader-writer lock semantics to page_lock_es, 2280Sstevel@tonic-gate * callers wanting an exclusive (writer) lock may prevent shared-lock 2290Sstevel@tonic-gate * (reader) starvation by setting the es parameter to SE_EXCL_WANTED. 2300Sstevel@tonic-gate * In this case, when an exclusive lock cannot be acquired, p_selock's 231917Selowe * SE_EWANTED bit is set. Shared-lock (reader) requests are also denied 232917Selowe * if the page is slated for retirement. 233917Selowe * 234917Selowe * The se and es parameters determine if the lock should be granted 235917Selowe * based on the following decision table: 236917Selowe * 237917Selowe * Lock wanted es flags p_selock/SE_EWANTED Action 238917Selowe * ----------- -------------- ------------------- --------- 239917Selowe * SE_EXCL any [1][2] unlocked/any grant lock, clear SE_EWANTED 240917Selowe * SE_EXCL SE_EWANTED any lock/any deny, set SE_EWANTED 241917Selowe * SE_EXCL none any lock/any deny 2422759Selowe * SE_SHARED n/a [2] shared/0 grant 2432759Selowe * SE_SHARED n/a [2] unlocked/0 grant 244917Selowe * SE_SHARED n/a shared/1 deny 245917Selowe * SE_SHARED n/a unlocked/1 deny 246917Selowe * SE_SHARED n/a excl/any deny 2470Sstevel@tonic-gate * 248917Selowe * Notes: 249917Selowe * [1] The code grants an exclusive lock to the caller and clears the bit 250917Selowe * SE_EWANTED whenever p_selock is unlocked, regardless of the SE_EWANTED 251917Selowe * bit's value. This was deemed acceptable as we are not concerned about 252917Selowe * exclusive-lock starvation. If this ever becomes an issue, a priority or 253917Selowe * fifo mechanism should also be implemented. Meantime, the thread that 254917Selowe * set SE_EWANTED should be prepared to catch this condition and reset it 255917Selowe * 256917Selowe * [2] Retired pages may not be locked at any time, regardless of the 257917Selowe * dispostion of se, unless the es parameter has SE_RETIRED flag set. 2580Sstevel@tonic-gate * 259917Selowe * Notes on values of "es": 260917Selowe * 261917Selowe * es & 1: page_lookup_create will attempt page relocation 262917Selowe * es & SE_EXCL_WANTED: caller wants SE_EWANTED set (eg. delete 263917Selowe * memory thread); this prevents reader-starvation of waiting 264917Selowe * writer thread(s) by giving priority to writers over readers. 265917Selowe * es & SE_RETIRED: caller wants to lock pages even if they are 266917Selowe * retired. Default is to deny the lock if the page is retired. 267917Selowe * 268917Selowe * And yes, we know, the semantics of this function are too complicated. 269917Selowe * It's on the list to be cleaned up. 2700Sstevel@tonic-gate */ 2710Sstevel@tonic-gate int 2720Sstevel@tonic-gate page_lock_es(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim, int es) 2730Sstevel@tonic-gate { 2740Sstevel@tonic-gate int retval; 2750Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 2760Sstevel@tonic-gate int upgraded; 2770Sstevel@tonic-gate int reclaim_it; 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate VM_STAT_ADD(page_lock_count); 2820Sstevel@tonic-gate 2830Sstevel@tonic-gate upgraded = 0; 2840Sstevel@tonic-gate reclaim_it = 0; 2850Sstevel@tonic-gate 2860Sstevel@tonic-gate mutex_enter(pse); 2870Sstevel@tonic-gate 288917Selowe ASSERT(((es & SE_EXCL_WANTED) == 0) || 289917Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 2900Sstevel@tonic-gate 291917Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 292917Selowe mutex_exit(pse); 293917Selowe VM_STAT_ADD(page_lock_retired); 294917Selowe return (0); 295917Selowe } 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && pp->p_selock == 0) { 2980Sstevel@tonic-gate se = SE_EXCL; 2990Sstevel@tonic-gate } 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate if ((reclaim == P_RECLAIM) && (PP_ISFREE(pp))) { 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate reclaim_it = 1; 3040Sstevel@tonic-gate if (se == SE_SHARED) { 3050Sstevel@tonic-gate /* 3060Sstevel@tonic-gate * This is an interesting situation. 3070Sstevel@tonic-gate * 3080Sstevel@tonic-gate * Remember that p_free can only change if 3090Sstevel@tonic-gate * p_selock < 0. 3100Sstevel@tonic-gate * p_free does not depend on our holding `pse'. 3110Sstevel@tonic-gate * And, since we hold `pse', p_selock can not change. 3120Sstevel@tonic-gate * So, if p_free changes on us, the page is already 3130Sstevel@tonic-gate * exclusively held, and we would fail to get p_selock 3140Sstevel@tonic-gate * regardless. 3150Sstevel@tonic-gate * 3160Sstevel@tonic-gate * We want to avoid getting the share 3170Sstevel@tonic-gate * lock on a free page that needs to be reclaimed. 3180Sstevel@tonic-gate * It is possible that some other thread has the share 3190Sstevel@tonic-gate * lock and has left the free page on the cache list. 3200Sstevel@tonic-gate * pvn_vplist_dirty() does this for brief periods. 3210Sstevel@tonic-gate * If the se_share is currently SE_EXCL, we will fail 3220Sstevel@tonic-gate * to acquire p_selock anyway. Blocking is the 3230Sstevel@tonic-gate * right thing to do. 3240Sstevel@tonic-gate * If we need to reclaim this page, we must get 3250Sstevel@tonic-gate * exclusive access to it, force the upgrade now. 3260Sstevel@tonic-gate * Again, we will fail to acquire p_selock if the 3270Sstevel@tonic-gate * page is not free and block. 3280Sstevel@tonic-gate */ 3290Sstevel@tonic-gate upgraded = 1; 3300Sstevel@tonic-gate se = SE_EXCL; 3310Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade); 3320Sstevel@tonic-gate } 3330Sstevel@tonic-gate } 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate if (se == SE_EXCL) { 336917Selowe if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) { 3370Sstevel@tonic-gate /* 3380Sstevel@tonic-gate * if the caller wants a writer lock (but did not 3390Sstevel@tonic-gate * specify exclusive access), and there is a pending 3400Sstevel@tonic-gate * writer that wants exclusive access, return failure 3410Sstevel@tonic-gate */ 3420Sstevel@tonic-gate retval = 0; 3430Sstevel@tonic-gate } else if ((pp->p_selock & ~SE_EWANTED) == 0) { 3440Sstevel@tonic-gate /* no reader/writer lock held */ 3450Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 3460Sstevel@tonic-gate /* this clears our setting of the SE_EWANTED bit */ 3470Sstevel@tonic-gate pp->p_selock = SE_WRITER; 3480Sstevel@tonic-gate retval = 1; 3490Sstevel@tonic-gate } else { 3500Sstevel@tonic-gate /* page is locked */ 351917Selowe if (es & SE_EXCL_WANTED) { 3520Sstevel@tonic-gate /* set the SE_EWANTED bit */ 3530Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 3540Sstevel@tonic-gate } 3550Sstevel@tonic-gate retval = 0; 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate } else { 3580Sstevel@tonic-gate retval = 0; 3590Sstevel@tonic-gate if (pp->p_selock >= 0) { 360917Selowe if ((pp->p_selock & SE_EWANTED) == 0) { 3612759Selowe pp->p_selock += SE_READER; 3622759Selowe retval = 1; 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate } 3650Sstevel@tonic-gate } 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate if (retval == 0) { 3680Sstevel@tonic-gate if ((pp->p_selock & ~SE_EWANTED) == SE_DELETED) { 3690Sstevel@tonic-gate VM_STAT_ADD(page_lock_deleted); 3700Sstevel@tonic-gate mutex_exit(pse); 3710Sstevel@tonic-gate return (retval); 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate #ifdef VM_STATS 3750Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss); 3760Sstevel@tonic-gate if (upgraded) { 3770Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade_failed); 3780Sstevel@tonic-gate } 3790Sstevel@tonic-gate #endif 3800Sstevel@tonic-gate if (lock) { 3810Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss_lock); 3820Sstevel@tonic-gate mutex_exit(lock); 3830Sstevel@tonic-gate } 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate /* 3860Sstevel@tonic-gate * Now, wait for the page to be unlocked and 3870Sstevel@tonic-gate * release the lock protecting p_cv and p_selock. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate cv_wait(&pp->p_cv, pse); 3900Sstevel@tonic-gate mutex_exit(pse); 3910Sstevel@tonic-gate 3920Sstevel@tonic-gate /* 3930Sstevel@tonic-gate * The page identity may have changed while we were 3940Sstevel@tonic-gate * blocked. If we are willing to depend on "pp" 3950Sstevel@tonic-gate * still pointing to a valid page structure (i.e., 3960Sstevel@tonic-gate * assuming page structures are not dynamically allocated 3970Sstevel@tonic-gate * or freed), we could try to lock the page if its 3980Sstevel@tonic-gate * identity hasn't changed. 3990Sstevel@tonic-gate * 4000Sstevel@tonic-gate * This needs to be measured, since we come back from 4010Sstevel@tonic-gate * cv_wait holding pse (the expensive part of this 4020Sstevel@tonic-gate * operation) we might as well try the cheap part. 4030Sstevel@tonic-gate * Though we would also have to confirm that dropping 4040Sstevel@tonic-gate * `lock' did not cause any grief to the callers. 4050Sstevel@tonic-gate */ 4060Sstevel@tonic-gate if (lock) { 4070Sstevel@tonic-gate mutex_enter(lock); 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate } else { 4100Sstevel@tonic-gate /* 4110Sstevel@tonic-gate * We have the page lock. 4120Sstevel@tonic-gate * If we needed to reclaim the page, and the page 4130Sstevel@tonic-gate * needed reclaiming (ie, it was free), then we 4140Sstevel@tonic-gate * have the page exclusively locked. We may need 4150Sstevel@tonic-gate * to downgrade the page. 4160Sstevel@tonic-gate */ 4170Sstevel@tonic-gate ASSERT((upgraded) ? 4180Sstevel@tonic-gate ((PP_ISFREE(pp)) && PAGE_EXCL(pp)) : 1); 4190Sstevel@tonic-gate mutex_exit(pse); 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate /* 4220Sstevel@tonic-gate * We now hold this page's lock, either shared or 4230Sstevel@tonic-gate * exclusive. This will prevent its identity from changing. 4240Sstevel@tonic-gate * The page, however, may or may not be free. If the caller 4250Sstevel@tonic-gate * requested, and it is free, go reclaim it from the 4260Sstevel@tonic-gate * free list. If the page can't be reclaimed, return failure 4270Sstevel@tonic-gate * so that the caller can start all over again. 4280Sstevel@tonic-gate * 4290Sstevel@tonic-gate * NOTE:page_reclaim() releases the page lock (p_selock) 4300Sstevel@tonic-gate * if it can't be reclaimed. 4310Sstevel@tonic-gate */ 4320Sstevel@tonic-gate if (reclaim_it) { 4330Sstevel@tonic-gate if (!page_reclaim(pp, lock)) { 4340Sstevel@tonic-gate VM_STAT_ADD(page_lock_bad_reclaim); 4350Sstevel@tonic-gate retval = 0; 4360Sstevel@tonic-gate } else { 4370Sstevel@tonic-gate VM_STAT_ADD(page_lock_reclaim); 4380Sstevel@tonic-gate if (upgraded) { 4390Sstevel@tonic-gate page_downgrade(pp); 4400Sstevel@tonic-gate } 4410Sstevel@tonic-gate } 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate } 4440Sstevel@tonic-gate return (retval); 4450Sstevel@tonic-gate } 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate /* 4480Sstevel@tonic-gate * Clear the SE_EWANTED bit from p_selock. This function allows 4490Sstevel@tonic-gate * callers of page_lock_es and page_try_reclaim_lock to clear 4500Sstevel@tonic-gate * their setting of this bit if they decide they no longer wish 4510Sstevel@tonic-gate * to gain exclusive access to the page. Currently only 4520Sstevel@tonic-gate * delete_memory_thread uses this when the delete memory 4530Sstevel@tonic-gate * operation is cancelled. 4540Sstevel@tonic-gate */ 4550Sstevel@tonic-gate void 4560Sstevel@tonic-gate page_lock_clr_exclwanted(page_t *pp) 4570Sstevel@tonic-gate { 4580Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate mutex_enter(pse); 4610Sstevel@tonic-gate pp->p_selock &= ~SE_EWANTED; 4620Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 4630Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 4640Sstevel@tonic-gate mutex_exit(pse); 4650Sstevel@tonic-gate } 4660Sstevel@tonic-gate 4670Sstevel@tonic-gate /* 4680Sstevel@tonic-gate * Read the comments inside of page_lock_es() carefully. 4690Sstevel@tonic-gate * 4700Sstevel@tonic-gate * SE_EXCL callers specifying es == SE_EXCL_WANTED will cause the 4710Sstevel@tonic-gate * SE_EWANTED bit of p_selock to be set when the lock cannot be obtained. 4720Sstevel@tonic-gate * This is used by threads subject to reader-starvation (eg. memory delete). 4730Sstevel@tonic-gate * 4740Sstevel@tonic-gate * When a thread using SE_EXCL_WANTED does not obtain the SE_EXCL lock, 4750Sstevel@tonic-gate * it is expected that it will retry at a later time. Threads that will 4760Sstevel@tonic-gate * not retry the lock *must* call page_lock_clr_exclwanted to clear the 4770Sstevel@tonic-gate * SE_EWANTED bit. (When a thread using SE_EXCL_WANTED obtains the lock, 4780Sstevel@tonic-gate * the bit is cleared.) 4790Sstevel@tonic-gate */ 4800Sstevel@tonic-gate int 4810Sstevel@tonic-gate page_try_reclaim_lock(page_t *pp, se_t se, int es) 4820Sstevel@tonic-gate { 4830Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 4840Sstevel@tonic-gate selock_t old; 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate mutex_enter(pse); 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate old = pp->p_selock; 4890Sstevel@tonic-gate 4900Sstevel@tonic-gate ASSERT(((es & SE_EXCL_WANTED) == 0) || 491917Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 492917Selowe 493917Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 494917Selowe mutex_exit(pse); 495917Selowe VM_STAT_ADD(page_trylock_failed); 496917Selowe return (0); 497917Selowe } 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && old == 0) { 5000Sstevel@tonic-gate se = SE_EXCL; 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate if (se == SE_SHARED) { 5040Sstevel@tonic-gate if (!PP_ISFREE(pp)) { 5050Sstevel@tonic-gate if (old >= 0) { 506917Selowe /* 507917Selowe * Readers are not allowed when excl wanted 508917Selowe */ 509917Selowe if ((old & SE_EWANTED) == 0) { 5102759Selowe pp->p_selock = old + SE_READER; 5112759Selowe mutex_exit(pse); 5122759Selowe return (1); 5130Sstevel@tonic-gate } 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate mutex_exit(pse); 5160Sstevel@tonic-gate return (0); 5170Sstevel@tonic-gate } 5180Sstevel@tonic-gate /* 5190Sstevel@tonic-gate * The page is free, so we really want SE_EXCL (below) 5200Sstevel@tonic-gate */ 5210Sstevel@tonic-gate VM_STAT_ADD(page_try_reclaim_upgrade); 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate /* 5250Sstevel@tonic-gate * The caller wants a writer lock. We try for it only if 5260Sstevel@tonic-gate * SE_EWANTED is not set, or if the caller specified 5270Sstevel@tonic-gate * SE_EXCL_WANTED. 5280Sstevel@tonic-gate */ 529917Selowe if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) { 5300Sstevel@tonic-gate if ((old & ~SE_EWANTED) == 0) { 5310Sstevel@tonic-gate /* no reader/writer lock held */ 5320Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 5330Sstevel@tonic-gate /* this clears out our setting of the SE_EWANTED bit */ 5340Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5350Sstevel@tonic-gate mutex_exit(pse); 5360Sstevel@tonic-gate return (1); 5370Sstevel@tonic-gate } 5380Sstevel@tonic-gate } 539917Selowe if (es & SE_EXCL_WANTED) { 5400Sstevel@tonic-gate /* page is locked, set the SE_EWANTED bit */ 5410Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 5420Sstevel@tonic-gate } 5430Sstevel@tonic-gate mutex_exit(pse); 5440Sstevel@tonic-gate return (0); 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate /* 5480Sstevel@tonic-gate * Acquire a page's "shared/exclusive" lock, but never block. 5490Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate int 5520Sstevel@tonic-gate page_trylock(page_t *pp, se_t se) 5530Sstevel@tonic-gate { 5540Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate mutex_enter(pse); 557917Selowe if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) || 558973Selowe (se == SE_SHARED && PP_PR_NOSHARE(pp))) { 559917Selowe /* 560917Selowe * Fail if a thread wants exclusive access and page is 561917Selowe * retired, if the page is slated for retirement, or a 562917Selowe * share lock is requested. 563917Selowe */ 5640Sstevel@tonic-gate mutex_exit(pse); 565917Selowe VM_STAT_ADD(page_trylock_failed); 5660Sstevel@tonic-gate return (0); 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate if (se == SE_EXCL) { 5700Sstevel@tonic-gate if (pp->p_selock == 0) { 5710Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 5720Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5730Sstevel@tonic-gate mutex_exit(pse); 5740Sstevel@tonic-gate return (1); 5750Sstevel@tonic-gate } 5760Sstevel@tonic-gate } else { 5770Sstevel@tonic-gate if (pp->p_selock >= 0) { 5780Sstevel@tonic-gate pp->p_selock += SE_READER; 5790Sstevel@tonic-gate mutex_exit(pse); 5800Sstevel@tonic-gate return (1); 5810Sstevel@tonic-gate } 5820Sstevel@tonic-gate } 5830Sstevel@tonic-gate mutex_exit(pse); 5840Sstevel@tonic-gate return (0); 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate /* 588917Selowe * Variant of page_unlock() specifically for the page freelist 589917Selowe * code. The mere existence of this code is a vile hack that 590917Selowe * has resulted due to the backwards locking order of the page 591917Selowe * freelist manager; please don't call it. 592917Selowe */ 593917Selowe void 5943253Smec page_unlock_nocapture(page_t *pp) 595917Selowe { 596917Selowe kmutex_t *pse = PAGE_SE_MUTEX(pp); 597917Selowe selock_t old; 598917Selowe 599917Selowe mutex_enter(pse); 600917Selowe 601917Selowe old = pp->p_selock; 602917Selowe if ((old & ~SE_EWANTED) == SE_READER) { 603917Selowe pp->p_selock = old & ~SE_READER; 604917Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 605917Selowe cv_broadcast(&pp->p_cv); 606917Selowe } else if ((old & ~SE_EWANTED) == SE_DELETED) { 6073253Smec panic("page_unlock_nocapture: page %p is deleted", pp); 608917Selowe } else if (old < 0) { 609917Selowe THREAD_KPRI_RELEASE(); 610917Selowe pp->p_selock &= SE_EWANTED; 611917Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 612917Selowe cv_broadcast(&pp->p_cv); 613917Selowe } else if ((old & ~SE_EWANTED) > SE_READER) { 614917Selowe pp->p_selock = old - SE_READER; 615917Selowe } else { 6163253Smec panic("page_unlock_nocapture: page %p is not locked", pp); 617917Selowe } 618917Selowe 619917Selowe mutex_exit(pse); 620917Selowe } 621917Selowe 622917Selowe /* 6230Sstevel@tonic-gate * Release the page's "shared/exclusive" lock and wake up anyone 6240Sstevel@tonic-gate * who might be waiting for it. 6250Sstevel@tonic-gate */ 6260Sstevel@tonic-gate void 6270Sstevel@tonic-gate page_unlock(page_t *pp) 6280Sstevel@tonic-gate { 6290Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 6300Sstevel@tonic-gate selock_t old; 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate mutex_enter(pse); 633917Selowe 6340Sstevel@tonic-gate old = pp->p_selock; 6350Sstevel@tonic-gate if ((old & ~SE_EWANTED) == SE_READER) { 6360Sstevel@tonic-gate pp->p_selock = old & ~SE_READER; 6370Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6380Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6390Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) == SE_DELETED) { 6400Sstevel@tonic-gate panic("page_unlock: page %p is deleted", pp); 6410Sstevel@tonic-gate } else if (old < 0) { 6420Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 6430Sstevel@tonic-gate pp->p_selock &= SE_EWANTED; 6440Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6450Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6460Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) > SE_READER) { 6470Sstevel@tonic-gate pp->p_selock = old - SE_READER; 6480Sstevel@tonic-gate } else { 6490Sstevel@tonic-gate panic("page_unlock: page %p is not locked", pp); 6500Sstevel@tonic-gate } 651917Selowe 6523253Smec if (pp->p_selock == 0) { 653917Selowe /* 6543253Smec * If the T_CAPTURING bit is set, that means that we should 6553253Smec * not try and capture the page again as we could recurse 6563253Smec * which could lead to a stack overflow panic or spending a 6573253Smec * relatively long time in the kernel making no progress. 658917Selowe */ 6593253Smec if ((pp->p_toxic & PR_CAPTURE) && 6603253Smec !(curthread->t_flag & T_CAPTURING) && 6613253Smec !PP_RETIRED(pp)) { 662917Selowe THREAD_KPRI_REQUEST(); 663917Selowe pp->p_selock = SE_WRITER; 664917Selowe mutex_exit(pse); 6653253Smec page_unlock_capture(pp); 666917Selowe } else { 667917Selowe mutex_exit(pse); 668917Selowe } 669917Selowe } else { 670917Selowe mutex_exit(pse); 671917Selowe } 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate /* 6750Sstevel@tonic-gate * Try to upgrade the lock on the page from a "shared" to an 6760Sstevel@tonic-gate * "exclusive" lock. Since this upgrade operation is done while 6770Sstevel@tonic-gate * holding the mutex protecting this page, no one else can acquire this page's 6780Sstevel@tonic-gate * lock and change the page. Thus, it is safe to drop the "shared" 6790Sstevel@tonic-gate * lock and attempt to acquire the "exclusive" lock. 6800Sstevel@tonic-gate * 6810Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 6820Sstevel@tonic-gate */ 6830Sstevel@tonic-gate int 6840Sstevel@tonic-gate page_tryupgrade(page_t *pp) 6850Sstevel@tonic-gate { 6860Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate mutex_enter(pse); 6890Sstevel@tonic-gate if (!(pp->p_selock & SE_EWANTED)) { 6900Sstevel@tonic-gate /* no threads want exclusive access, try upgrade */ 6910Sstevel@tonic-gate if (pp->p_selock == SE_READER) { 6920Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 6930Sstevel@tonic-gate /* convert to exclusive lock */ 6940Sstevel@tonic-gate pp->p_selock = SE_WRITER; 6950Sstevel@tonic-gate mutex_exit(pse); 6960Sstevel@tonic-gate return (1); 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate } 6990Sstevel@tonic-gate mutex_exit(pse); 7000Sstevel@tonic-gate return (0); 7010Sstevel@tonic-gate } 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate /* 7040Sstevel@tonic-gate * Downgrade the "exclusive" lock on the page to a "shared" lock 7050Sstevel@tonic-gate * while holding the mutex protecting this page's p_selock field. 7060Sstevel@tonic-gate */ 7070Sstevel@tonic-gate void 7080Sstevel@tonic-gate page_downgrade(page_t *pp) 7090Sstevel@tonic-gate { 7100Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7110Sstevel@tonic-gate int excl_waiting; 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED); 7140Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7150Sstevel@tonic-gate 7160Sstevel@tonic-gate mutex_enter(pse); 7170Sstevel@tonic-gate excl_waiting = pp->p_selock & SE_EWANTED; 7180Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 7190Sstevel@tonic-gate pp->p_selock = SE_READER | excl_waiting; 7200Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7210Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7220Sstevel@tonic-gate mutex_exit(pse); 7230Sstevel@tonic-gate } 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate void 7260Sstevel@tonic-gate page_lock_delete(page_t *pp) 7270Sstevel@tonic-gate { 7280Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7310Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 7320Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 7330Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 7340Sstevel@tonic-gate 7350Sstevel@tonic-gate mutex_enter(pse); 7360Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 7370Sstevel@tonic-gate pp->p_selock = SE_DELETED; 7380Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7390Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7400Sstevel@tonic-gate mutex_exit(pse); 7410Sstevel@tonic-gate } 7420Sstevel@tonic-gate 7433253Smec int 7443253Smec page_deleted(page_t *pp) 7453253Smec { 7463253Smec return (pp->p_selock == SE_DELETED); 7473253Smec } 7483253Smec 7490Sstevel@tonic-gate /* 7500Sstevel@tonic-gate * Implement the io lock for pages 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate void 7530Sstevel@tonic-gate page_iolock_init(page_t *pp) 7540Sstevel@tonic-gate { 7550Sstevel@tonic-gate pp->p_iolock_state = 0; 7560Sstevel@tonic-gate cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL); 7570Sstevel@tonic-gate } 7580Sstevel@tonic-gate 7590Sstevel@tonic-gate /* 7600Sstevel@tonic-gate * Acquire the i/o lock on a page. 7610Sstevel@tonic-gate */ 7620Sstevel@tonic-gate void 7630Sstevel@tonic-gate page_io_lock(page_t *pp) 7640Sstevel@tonic-gate { 7650Sstevel@tonic-gate kmutex_t *pio; 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 7680Sstevel@tonic-gate mutex_enter(pio); 7690Sstevel@tonic-gate while (pp->p_iolock_state & PAGE_IO_INUSE) { 7700Sstevel@tonic-gate cv_wait(&(pp->p_io_cv), pio); 7710Sstevel@tonic-gate } 7720Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 7730Sstevel@tonic-gate mutex_exit(pio); 7740Sstevel@tonic-gate } 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate /* 7770Sstevel@tonic-gate * Release the i/o lock on a page. 7780Sstevel@tonic-gate */ 7790Sstevel@tonic-gate void 7800Sstevel@tonic-gate page_io_unlock(page_t *pp) 7810Sstevel@tonic-gate { 7820Sstevel@tonic-gate kmutex_t *pio; 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 7850Sstevel@tonic-gate mutex_enter(pio); 7862999Sstans cv_broadcast(&pp->p_io_cv); 7870Sstevel@tonic-gate pp->p_iolock_state &= ~PAGE_IO_INUSE; 7880Sstevel@tonic-gate mutex_exit(pio); 7890Sstevel@tonic-gate } 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate /* 7920Sstevel@tonic-gate * Try to acquire the i/o lock on a page without blocking. 7930Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 7940Sstevel@tonic-gate */ 7950Sstevel@tonic-gate int 7960Sstevel@tonic-gate page_io_trylock(page_t *pp) 7970Sstevel@tonic-gate { 7980Sstevel@tonic-gate kmutex_t *pio; 7990Sstevel@tonic-gate 8000Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) 8010Sstevel@tonic-gate return (0); 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 8040Sstevel@tonic-gate mutex_enter(pio); 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) { 8070Sstevel@tonic-gate mutex_exit(pio); 8080Sstevel@tonic-gate return (0); 8090Sstevel@tonic-gate } 8100Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 8110Sstevel@tonic-gate mutex_exit(pio); 8120Sstevel@tonic-gate 8130Sstevel@tonic-gate return (1); 8140Sstevel@tonic-gate } 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate /* 8172999Sstans * Wait until the i/o lock is not held. 8182999Sstans */ 8192999Sstans void 8202999Sstans page_io_wait(page_t *pp) 8212999Sstans { 8222999Sstans kmutex_t *pio; 8232999Sstans 8242999Sstans pio = PAGE_IO_MUTEX(pp); 8252999Sstans mutex_enter(pio); 8262999Sstans while (pp->p_iolock_state & PAGE_IO_INUSE) { 8272999Sstans cv_wait(&(pp->p_io_cv), pio); 8282999Sstans } 8292999Sstans mutex_exit(pio); 8302999Sstans } 8312999Sstans 8322999Sstans /* 8332999Sstans * Returns 1 on success, 0 on failure. 8342999Sstans */ 8352999Sstans int 8362999Sstans page_io_locked(page_t *pp) 8372999Sstans { 8382999Sstans return (pp->p_iolock_state & PAGE_IO_INUSE); 8392999Sstans } 8402999Sstans 8412999Sstans /* 8420Sstevel@tonic-gate * Assert that the i/o lock on a page is held. 8430Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 8440Sstevel@tonic-gate */ 8450Sstevel@tonic-gate int 8460Sstevel@tonic-gate page_iolock_assert(page_t *pp) 8470Sstevel@tonic-gate { 8482999Sstans return (page_io_locked(pp)); 8490Sstevel@tonic-gate } 8500Sstevel@tonic-gate 8510Sstevel@tonic-gate /* 8520Sstevel@tonic-gate * Wrapper exported to kernel routines that are built 8530Sstevel@tonic-gate * platform-independent (the macro is platform-dependent; 8540Sstevel@tonic-gate * the size of vph_mutex[] is based on NCPU). 8550Sstevel@tonic-gate * 8560Sstevel@tonic-gate * Note that you can do stress testing on this by setting the 8570Sstevel@tonic-gate * variable page_vnode_mutex_stress to something other than 8580Sstevel@tonic-gate * zero in a DEBUG kernel in a debugger after loading the kernel. 8590Sstevel@tonic-gate * Setting it after the kernel is running may not work correctly. 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate #ifdef DEBUG 8620Sstevel@tonic-gate static int page_vnode_mutex_stress = 0; 8630Sstevel@tonic-gate #endif 8640Sstevel@tonic-gate 8650Sstevel@tonic-gate kmutex_t * 8660Sstevel@tonic-gate page_vnode_mutex(vnode_t *vp) 8670Sstevel@tonic-gate { 8680Sstevel@tonic-gate if (vp == &kvp) 8690Sstevel@tonic-gate return (&vph_mutex[VPH_TABLE_SIZE + 0]); 870*3290Sjohansen 871*3290Sjohansen if (vp == &zvp) 872*3290Sjohansen return (&vph_mutex[VPH_TABLE_SIZE + 1]); 8730Sstevel@tonic-gate #ifdef DEBUG 8740Sstevel@tonic-gate if (page_vnode_mutex_stress != 0) 8750Sstevel@tonic-gate return (&vph_mutex[0]); 8760Sstevel@tonic-gate #endif 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate return (&vph_mutex[VP_HASH_FUNC(vp)]); 8790Sstevel@tonic-gate } 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate kmutex_t * 8820Sstevel@tonic-gate page_se_mutex(page_t *pp) 8830Sstevel@tonic-gate { 8840Sstevel@tonic-gate return (PAGE_SE_MUTEX(pp)); 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate #ifdef VM_STATS 8880Sstevel@tonic-gate uint_t pszclck_stat[4]; 8890Sstevel@tonic-gate #endif 8900Sstevel@tonic-gate /* 8910Sstevel@tonic-gate * Find, take and return a mutex held by hat_page_demote(). 8920Sstevel@tonic-gate * Called by page_demote_vp_pages() before hat_page_demote() call and by 8930Sstevel@tonic-gate * routines that want to block hat_page_demote() but can't do it 8940Sstevel@tonic-gate * via locking all constituent pages. 8950Sstevel@tonic-gate * 8960Sstevel@tonic-gate * Return NULL if p_szc is 0. 8970Sstevel@tonic-gate * 8980Sstevel@tonic-gate * It should only be used for pages that can be demoted by hat_page_demote() 8990Sstevel@tonic-gate * i.e. non swapfs file system pages. The logic here is lifted from 9000Sstevel@tonic-gate * sfmmu_mlspl_enter() except there's no need to worry about p_szc increase 9010Sstevel@tonic-gate * since the page is locked and not free. 9020Sstevel@tonic-gate * 9030Sstevel@tonic-gate * Hash of the root page is used to find the lock. 9040Sstevel@tonic-gate * To find the root in the presense of hat_page_demote() chageing the location 9050Sstevel@tonic-gate * of the root this routine relies on the fact that hat_page_demote() changes 9060Sstevel@tonic-gate * root last. 9070Sstevel@tonic-gate * 9080Sstevel@tonic-gate * If NULL is returned pp's p_szc is guaranteed to be 0. If non NULL is 9090Sstevel@tonic-gate * returned pp's p_szc may be any value. 9100Sstevel@tonic-gate */ 9110Sstevel@tonic-gate kmutex_t * 9120Sstevel@tonic-gate page_szc_lock(page_t *pp) 9130Sstevel@tonic-gate { 9140Sstevel@tonic-gate kmutex_t *mtx; 9150Sstevel@tonic-gate page_t *rootpp; 9160Sstevel@tonic-gate uint_t szc; 9170Sstevel@tonic-gate uint_t rszc; 9180Sstevel@tonic-gate uint_t pszc = pp->p_szc; 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate ASSERT(pp != NULL); 9210Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 9220Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 9230Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 9240Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 925*3290Sjohansen ASSERT(!PP_ISKAS(pp)); 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate again: 9280Sstevel@tonic-gate if (pszc == 0) { 9290Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[0]); 9300Sstevel@tonic-gate return (NULL); 9310Sstevel@tonic-gate } 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* The lock lives in the root page */ 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 9360Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9370Sstevel@tonic-gate mutex_enter(mtx); 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate /* 9400Sstevel@tonic-gate * since p_szc can only decrease if pp == rootpp 9410Sstevel@tonic-gate * rootpp will be always the same i.e we have the right root 9420Sstevel@tonic-gate * regardless of rootpp->p_szc. 9430Sstevel@tonic-gate * If location of pp's root didn't change after we took 9440Sstevel@tonic-gate * the lock we have the right root. return mutex hashed off it. 9450Sstevel@tonic-gate */ 9460Sstevel@tonic-gate if (pp == rootpp || (rszc = rootpp->p_szc) == pszc) { 9470Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[1]); 9480Sstevel@tonic-gate return (mtx); 9490Sstevel@tonic-gate } 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate /* 9520Sstevel@tonic-gate * root location changed because page got demoted. 9530Sstevel@tonic-gate * locate the new root. 9540Sstevel@tonic-gate */ 9550Sstevel@tonic-gate if (rszc < pszc) { 9560Sstevel@tonic-gate szc = pp->p_szc; 9570Sstevel@tonic-gate ASSERT(szc < pszc); 9580Sstevel@tonic-gate mutex_exit(mtx); 9590Sstevel@tonic-gate pszc = szc; 9600Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[2]); 9610Sstevel@tonic-gate goto again; 9620Sstevel@tonic-gate } 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[3]); 9650Sstevel@tonic-gate /* 9660Sstevel@tonic-gate * current hat_page_demote not done yet. 9670Sstevel@tonic-gate * wait for it to finish. 9680Sstevel@tonic-gate */ 9690Sstevel@tonic-gate mutex_exit(mtx); 9700Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 9710Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9720Sstevel@tonic-gate mutex_enter(mtx); 9730Sstevel@tonic-gate mutex_exit(mtx); 9740Sstevel@tonic-gate ASSERT(rootpp->p_szc < rszc); 9750Sstevel@tonic-gate goto again; 9760Sstevel@tonic-gate } 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate int 9790Sstevel@tonic-gate page_szc_lock_assert(page_t *pp) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate page_t *rootpp = PP_PAGEROOT(pp); 9820Sstevel@tonic-gate kmutex_t *mtx = PAGE_SZC_MUTEX(rootpp); 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate return (MUTEX_HELD(mtx)); 9850Sstevel@tonic-gate } 986