xref: /onnv-gate/usr/src/uts/common/vm/vm_page.c (revision 1338:c2e71173ece3)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
23*1338Selowe  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989  AT&T	*/
280Sstevel@tonic-gate /*	  All Rights Reserved  	*/
290Sstevel@tonic-gate 
300Sstevel@tonic-gate /*
310Sstevel@tonic-gate  * University Copyright- Copyright (c) 1982, 1986, 1988
320Sstevel@tonic-gate  * The Regents of the University of California
330Sstevel@tonic-gate  * All Rights Reserved
340Sstevel@tonic-gate  *
350Sstevel@tonic-gate  * University Acknowledgment- Portions of this document are derived from
360Sstevel@tonic-gate  * software developed by the University of California, Berkeley, and its
370Sstevel@tonic-gate  * contributors.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
410Sstevel@tonic-gate 
420Sstevel@tonic-gate /*
430Sstevel@tonic-gate  * VM - physical page management.
440Sstevel@tonic-gate  */
450Sstevel@tonic-gate 
460Sstevel@tonic-gate #include <sys/types.h>
470Sstevel@tonic-gate #include <sys/t_lock.h>
480Sstevel@tonic-gate #include <sys/param.h>
490Sstevel@tonic-gate #include <sys/systm.h>
500Sstevel@tonic-gate #include <sys/errno.h>
510Sstevel@tonic-gate #include <sys/time.h>
520Sstevel@tonic-gate #include <sys/vnode.h>
530Sstevel@tonic-gate #include <sys/vm.h>
540Sstevel@tonic-gate #include <sys/vtrace.h>
550Sstevel@tonic-gate #include <sys/swap.h>
560Sstevel@tonic-gate #include <sys/cmn_err.h>
570Sstevel@tonic-gate #include <sys/tuneable.h>
580Sstevel@tonic-gate #include <sys/sysmacros.h>
590Sstevel@tonic-gate #include <sys/cpuvar.h>
600Sstevel@tonic-gate #include <sys/callb.h>
610Sstevel@tonic-gate #include <sys/debug.h>
620Sstevel@tonic-gate #include <sys/tnf_probe.h>
630Sstevel@tonic-gate #include <sys/condvar_impl.h>
640Sstevel@tonic-gate #include <sys/mem_config.h>
650Sstevel@tonic-gate #include <sys/mem_cage.h>
660Sstevel@tonic-gate #include <sys/kmem.h>
670Sstevel@tonic-gate #include <sys/atomic.h>
680Sstevel@tonic-gate #include <sys/strlog.h>
690Sstevel@tonic-gate #include <sys/mman.h>
700Sstevel@tonic-gate #include <sys/ontrap.h>
710Sstevel@tonic-gate #include <sys/lgrp.h>
720Sstevel@tonic-gate #include <sys/vfs.h>
730Sstevel@tonic-gate 
740Sstevel@tonic-gate #include <vm/hat.h>
750Sstevel@tonic-gate #include <vm/anon.h>
760Sstevel@tonic-gate #include <vm/page.h>
770Sstevel@tonic-gate #include <vm/seg.h>
780Sstevel@tonic-gate #include <vm/pvn.h>
790Sstevel@tonic-gate #include <vm/seg_kmem.h>
800Sstevel@tonic-gate #include <vm/vm_dep.h>
810Sstevel@tonic-gate 
820Sstevel@tonic-gate #include <fs/fs_subr.h>
830Sstevel@tonic-gate 
840Sstevel@tonic-gate static int nopageage = 0;
850Sstevel@tonic-gate 
860Sstevel@tonic-gate static pgcnt_t max_page_get;	/* max page_get request size in pages */
870Sstevel@tonic-gate pgcnt_t total_pages = 0;	/* total number of pages (used by /proc) */
880Sstevel@tonic-gate 
890Sstevel@tonic-gate /*
900Sstevel@tonic-gate  * freemem_lock protects all freemem variables:
910Sstevel@tonic-gate  * availrmem. Also this lock protects the globals which track the
920Sstevel@tonic-gate  * availrmem changes for accurate kernel footprint calculation.
930Sstevel@tonic-gate  * See below for an explanation of these
940Sstevel@tonic-gate  * globals.
950Sstevel@tonic-gate  */
960Sstevel@tonic-gate kmutex_t freemem_lock;
970Sstevel@tonic-gate pgcnt_t availrmem;
980Sstevel@tonic-gate pgcnt_t availrmem_initial;
990Sstevel@tonic-gate 
1000Sstevel@tonic-gate /*
1010Sstevel@tonic-gate  * These globals track availrmem changes to get a more accurate
1020Sstevel@tonic-gate  * estimate of tke kernel size. Historically pp_kernel is used for
1030Sstevel@tonic-gate  * kernel size and is based on availrmem. But availrmem is adjusted for
1040Sstevel@tonic-gate  * locked pages in the system not just for kernel locked pages.
1050Sstevel@tonic-gate  * These new counters will track the pages locked through segvn and
1060Sstevel@tonic-gate  * by explicit user locking.
1070Sstevel@tonic-gate  *
1080Sstevel@tonic-gate  * segvn_pages_locked : This keeps track on a global basis how many pages
1090Sstevel@tonic-gate  * are currently locked because of I/O.
1100Sstevel@tonic-gate  *
1110Sstevel@tonic-gate  * pages_locked : How many pages are locked becuase of user specified
1120Sstevel@tonic-gate  * locking through mlock or plock.
1130Sstevel@tonic-gate  *
1140Sstevel@tonic-gate  * pages_useclaim,pages_claimed : These two variables track the
1150Sstevel@tonic-gate  * cliam adjustments because of the protection changes on a segvn segment.
1160Sstevel@tonic-gate  *
1170Sstevel@tonic-gate  * All these globals are protected by the same lock which protects availrmem.
1180Sstevel@tonic-gate  */
1190Sstevel@tonic-gate pgcnt_t segvn_pages_locked;
1200Sstevel@tonic-gate pgcnt_t pages_locked;
1210Sstevel@tonic-gate pgcnt_t pages_useclaim;
1220Sstevel@tonic-gate pgcnt_t pages_claimed;
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate /*
1260Sstevel@tonic-gate  * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
1270Sstevel@tonic-gate  */
1280Sstevel@tonic-gate static kmutex_t	new_freemem_lock;
1290Sstevel@tonic-gate static uint_t	freemem_wait;	/* someone waiting for freemem */
1300Sstevel@tonic-gate static kcondvar_t freemem_cv;
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate /*
1330Sstevel@tonic-gate  * The logical page free list is maintained as two lists, the 'free'
1340Sstevel@tonic-gate  * and the 'cache' lists.
1350Sstevel@tonic-gate  * The free list contains those pages that should be reused first.
1360Sstevel@tonic-gate  *
1370Sstevel@tonic-gate  * The implementation of the lists is machine dependent.
1380Sstevel@tonic-gate  * page_get_freelist(), page_get_cachelist(),
1390Sstevel@tonic-gate  * page_list_sub(), and page_list_add()
1400Sstevel@tonic-gate  * form the interface to the machine dependent implementation.
1410Sstevel@tonic-gate  *
1420Sstevel@tonic-gate  * Pages with p_free set are on the cache list.
1430Sstevel@tonic-gate  * Pages with p_free and p_age set are on the free list,
1440Sstevel@tonic-gate  *
1450Sstevel@tonic-gate  * A page may be locked while on either list.
1460Sstevel@tonic-gate  */
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate /*
1490Sstevel@tonic-gate  * free list accounting stuff.
1500Sstevel@tonic-gate  *
1510Sstevel@tonic-gate  *
1520Sstevel@tonic-gate  * Spread out the value for the number of pages on the
1530Sstevel@tonic-gate  * page free and page cache lists.  If there is just one
1540Sstevel@tonic-gate  * value, then it must be under just one lock.
1550Sstevel@tonic-gate  * The lock contention and cache traffic are a real bother.
1560Sstevel@tonic-gate  *
1570Sstevel@tonic-gate  * When we acquire and then drop a single pcf lock
1580Sstevel@tonic-gate  * we can start in the middle of the array of pcf structures.
1590Sstevel@tonic-gate  * If we acquire more than one pcf lock at a time, we need to
1600Sstevel@tonic-gate  * start at the front to avoid deadlocking.
1610Sstevel@tonic-gate  *
1620Sstevel@tonic-gate  * pcf_count holds the number of pages in each pool.
1630Sstevel@tonic-gate  *
1640Sstevel@tonic-gate  * pcf_block is set when page_create_get_something() has asked the
1650Sstevel@tonic-gate  * PSM page freelist and page cachelist routines without specifying
1660Sstevel@tonic-gate  * a color and nothing came back.  This is used to block anything
1670Sstevel@tonic-gate  * else from moving pages from one list to the other while the
1680Sstevel@tonic-gate  * lists are searched again.  If a page is freeed while pcf_block is
1690Sstevel@tonic-gate  * set, then pcf_reserve is incremented.  pcgs_unblock() takes care
1700Sstevel@tonic-gate  * of clearning pcf_block, doing the wakeups, etc.
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate #if NCPU <= 4
1740Sstevel@tonic-gate #define	PAD	1
1750Sstevel@tonic-gate #define	PCF_FANOUT	4
1760Sstevel@tonic-gate static	uint_t	pcf_mask = PCF_FANOUT - 1;
1770Sstevel@tonic-gate #else
1780Sstevel@tonic-gate #define	PAD	9
1790Sstevel@tonic-gate #ifdef sun4v
1800Sstevel@tonic-gate #define	PCF_FANOUT	32
1810Sstevel@tonic-gate #else
1820Sstevel@tonic-gate #define	PCF_FANOUT	128
1830Sstevel@tonic-gate #endif
1840Sstevel@tonic-gate static	uint_t	pcf_mask = PCF_FANOUT - 1;
1850Sstevel@tonic-gate #endif
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate struct pcf {
1880Sstevel@tonic-gate 	uint_t		pcf_touch;	/* just to help the cache */
1890Sstevel@tonic-gate 	uint_t		pcf_count;	/* page count */
1900Sstevel@tonic-gate 	kmutex_t	pcf_lock;	/* protects the structure */
1910Sstevel@tonic-gate 	uint_t		pcf_wait;	/* number of waiters */
1920Sstevel@tonic-gate 	uint_t		pcf_block; 	/* pcgs flag to page_free() */
1930Sstevel@tonic-gate 	uint_t		pcf_reserve; 	/* pages freed after pcf_block set */
1940Sstevel@tonic-gate 	uint_t		pcf_fill[PAD];	/* to line up on the caches */
1950Sstevel@tonic-gate };
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate static struct	pcf	pcf[PCF_FANOUT];
1980Sstevel@tonic-gate #define	PCF_INDEX()	((CPU->cpu_id) & (pcf_mask))
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate kmutex_t	pcgs_lock;		/* serializes page_create_get_ */
2010Sstevel@tonic-gate kmutex_t	pcgs_cagelock;		/* serializes NOSLEEP cage allocs */
2020Sstevel@tonic-gate kmutex_t	pcgs_wait_lock;		/* used for delay in pcgs */
2030Sstevel@tonic-gate static kcondvar_t	pcgs_cv;	/* cv for delay in pcgs */
2040Sstevel@tonic-gate 
2050Sstevel@tonic-gate #define	PAGE_LOCK_MAXIMUM \
2060Sstevel@tonic-gate 	((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1)
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate #ifdef VM_STATS
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate /*
2110Sstevel@tonic-gate  * No locks, but so what, they are only statistics.
2120Sstevel@tonic-gate  */
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate static struct page_tcnt {
2150Sstevel@tonic-gate 	int	pc_free_cache;		/* free's into cache list */
2160Sstevel@tonic-gate 	int	pc_free_dontneed;	/* free's with dontneed */
2170Sstevel@tonic-gate 	int	pc_free_pageout;	/* free's from pageout */
2180Sstevel@tonic-gate 	int	pc_free_free;		/* free's into free list */
2190Sstevel@tonic-gate 	int	pc_free_pages;		/* free's into large page free list */
2200Sstevel@tonic-gate 	int	pc_destroy_pages;	/* large page destroy's */
2210Sstevel@tonic-gate 	int	pc_get_cache;		/* get's from cache list */
2220Sstevel@tonic-gate 	int	pc_get_free;		/* get's from free list */
2230Sstevel@tonic-gate 	int	pc_reclaim;		/* reclaim's */
2240Sstevel@tonic-gate 	int	pc_abortfree;		/* abort's of free pages */
2250Sstevel@tonic-gate 	int	pc_find_hit;		/* find's that find page */
2260Sstevel@tonic-gate 	int	pc_find_miss;		/* find's that don't find page */
2270Sstevel@tonic-gate 	int	pc_destroy_free;	/* # of free pages destroyed */
2280Sstevel@tonic-gate #define	PC_HASH_CNT	(4*PAGE_HASHAVELEN)
2290Sstevel@tonic-gate 	int	pc_find_hashlen[PC_HASH_CNT+1];
2300Sstevel@tonic-gate 	int	pc_addclaim_pages;
2310Sstevel@tonic-gate 	int	pc_subclaim_pages;
2320Sstevel@tonic-gate 	int	pc_free_replacement_page[2];
2330Sstevel@tonic-gate 	int	pc_try_demote_pages[6];
2340Sstevel@tonic-gate 	int	pc_demote_pages[2];
2350Sstevel@tonic-gate } pagecnt;
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate uint_t	hashin_count;
2380Sstevel@tonic-gate uint_t	hashin_not_held;
2390Sstevel@tonic-gate uint_t	hashin_already;
2400Sstevel@tonic-gate 
2410Sstevel@tonic-gate uint_t	hashout_count;
2420Sstevel@tonic-gate uint_t	hashout_not_held;
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate uint_t	page_create_count;
2450Sstevel@tonic-gate uint_t	page_create_not_enough;
2460Sstevel@tonic-gate uint_t	page_create_not_enough_again;
2470Sstevel@tonic-gate uint_t	page_create_zero;
2480Sstevel@tonic-gate uint_t	page_create_hashout;
2490Sstevel@tonic-gate uint_t	page_create_page_lock_failed;
2500Sstevel@tonic-gate uint_t	page_create_trylock_failed;
2510Sstevel@tonic-gate uint_t	page_create_found_one;
2520Sstevel@tonic-gate uint_t	page_create_hashin_failed;
2530Sstevel@tonic-gate uint_t	page_create_dropped_phm;
2540Sstevel@tonic-gate 
2550Sstevel@tonic-gate uint_t	page_create_new;
2560Sstevel@tonic-gate uint_t	page_create_exists;
2570Sstevel@tonic-gate uint_t	page_create_putbacks;
2580Sstevel@tonic-gate uint_t	page_create_overshoot;
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate uint_t	page_reclaim_zero;
2610Sstevel@tonic-gate uint_t	page_reclaim_zero_locked;
2620Sstevel@tonic-gate 
2630Sstevel@tonic-gate uint_t	page_rename_exists;
2640Sstevel@tonic-gate uint_t	page_rename_count;
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate uint_t	page_lookup_cnt[20];
2670Sstevel@tonic-gate uint_t	page_lookup_nowait_cnt[10];
2680Sstevel@tonic-gate uint_t	page_find_cnt;
2690Sstevel@tonic-gate uint_t	page_exists_cnt;
2700Sstevel@tonic-gate uint_t	page_exists_forreal_cnt;
2710Sstevel@tonic-gate uint_t	page_lookup_dev_cnt;
2720Sstevel@tonic-gate uint_t	get_cachelist_cnt;
2730Sstevel@tonic-gate uint_t	page_create_cnt[10];
2740Sstevel@tonic-gate uint_t	alloc_pages[8];
2750Sstevel@tonic-gate uint_t	page_exphcontg[19];
2760Sstevel@tonic-gate uint_t  page_create_large_cnt[10];
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate /*
2790Sstevel@tonic-gate  * Collects statistics.
2800Sstevel@tonic-gate  */
2810Sstevel@tonic-gate #define	PAGE_HASH_SEARCH(index, pp, vp, off) { \
2820Sstevel@tonic-gate 	uint_t	mylen = 0; \
2830Sstevel@tonic-gate 			\
2840Sstevel@tonic-gate 	for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \
2850Sstevel@tonic-gate 		if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
2860Sstevel@tonic-gate 			break; \
2870Sstevel@tonic-gate 	} \
2880Sstevel@tonic-gate 	if ((pp) != NULL) \
2890Sstevel@tonic-gate 		pagecnt.pc_find_hit++; \
2900Sstevel@tonic-gate 	else \
2910Sstevel@tonic-gate 		pagecnt.pc_find_miss++; \
2920Sstevel@tonic-gate 	if (mylen > PC_HASH_CNT) \
2930Sstevel@tonic-gate 		mylen = PC_HASH_CNT; \
2940Sstevel@tonic-gate 	pagecnt.pc_find_hashlen[mylen]++; \
2950Sstevel@tonic-gate }
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate #else	/* VM_STATS */
2980Sstevel@tonic-gate 
2990Sstevel@tonic-gate /*
3000Sstevel@tonic-gate  * Don't collect statistics
3010Sstevel@tonic-gate  */
3020Sstevel@tonic-gate #define	PAGE_HASH_SEARCH(index, pp, vp, off) { \
3030Sstevel@tonic-gate 	for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3040Sstevel@tonic-gate 		if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3050Sstevel@tonic-gate 			break; \
3060Sstevel@tonic-gate 	} \
3070Sstevel@tonic-gate }
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate #endif	/* VM_STATS */
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 
3130Sstevel@tonic-gate #ifdef DEBUG
3140Sstevel@tonic-gate #define	MEMSEG_SEARCH_STATS
3150Sstevel@tonic-gate #endif
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate #ifdef MEMSEG_SEARCH_STATS
3180Sstevel@tonic-gate struct memseg_stats {
3190Sstevel@tonic-gate     uint_t nsearch;
3200Sstevel@tonic-gate     uint_t nlastwon;
3210Sstevel@tonic-gate     uint_t nhashwon;
3220Sstevel@tonic-gate     uint_t nnotfound;
3230Sstevel@tonic-gate } memseg_stats;
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate #define	MEMSEG_STAT_INCR(v) \
3260Sstevel@tonic-gate 	atomic_add_32(&memseg_stats.v, 1)
3270Sstevel@tonic-gate #else
3280Sstevel@tonic-gate #define	MEMSEG_STAT_INCR(x)
3290Sstevel@tonic-gate #endif
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate struct memseg *memsegs;		/* list of memory segments */
3320Sstevel@tonic-gate 
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate static void page_init_mem_config(void);
3350Sstevel@tonic-gate static int page_do_hashin(page_t *, vnode_t *, u_offset_t);
3360Sstevel@tonic-gate static void page_do_hashout(page_t *);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate static void page_demote_vp_pages(page_t *);
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate /*
3410Sstevel@tonic-gate  * vm subsystem related initialization
3420Sstevel@tonic-gate  */
3430Sstevel@tonic-gate void
3440Sstevel@tonic-gate vm_init(void)
3450Sstevel@tonic-gate {
3460Sstevel@tonic-gate 	boolean_t callb_vm_cpr(void *, int);
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate 	(void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
3490Sstevel@tonic-gate 	page_init_mem_config();
350917Selowe 	page_retire_init();
3510Sstevel@tonic-gate }
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate /*
3540Sstevel@tonic-gate  * This function is called at startup and when memory is added or deleted.
3550Sstevel@tonic-gate  */
3560Sstevel@tonic-gate void
3570Sstevel@tonic-gate init_pages_pp_maximum()
3580Sstevel@tonic-gate {
3590Sstevel@tonic-gate 	static pgcnt_t p_min;
3600Sstevel@tonic-gate 	static pgcnt_t pages_pp_maximum_startup;
3610Sstevel@tonic-gate 	static pgcnt_t avrmem_delta;
3620Sstevel@tonic-gate 	static int init_done;
3630Sstevel@tonic-gate 	static int user_set;	/* true if set in /etc/system */
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 	if (init_done == 0) {
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate 		/* If the user specified a value, save it */
3680Sstevel@tonic-gate 		if (pages_pp_maximum != 0) {
3690Sstevel@tonic-gate 			user_set = 1;
3700Sstevel@tonic-gate 			pages_pp_maximum_startup = pages_pp_maximum;
3710Sstevel@tonic-gate 		}
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate 		/*
3740Sstevel@tonic-gate 		 * Setting of pages_pp_maximum is based first time
3750Sstevel@tonic-gate 		 * on the value of availrmem just after the start-up
3760Sstevel@tonic-gate 		 * allocations. To preserve this relationship at run
3770Sstevel@tonic-gate 		 * time, use a delta from availrmem_initial.
3780Sstevel@tonic-gate 		 */
3790Sstevel@tonic-gate 		ASSERT(availrmem_initial >= availrmem);
3800Sstevel@tonic-gate 		avrmem_delta = availrmem_initial - availrmem;
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate 		/* The allowable floor of pages_pp_maximum */
3830Sstevel@tonic-gate 		p_min = tune.t_minarmem + 100;
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate 		/* Make sure we don't come through here again. */
3860Sstevel@tonic-gate 		init_done = 1;
3870Sstevel@tonic-gate 	}
3880Sstevel@tonic-gate 	/*
3890Sstevel@tonic-gate 	 * Determine pages_pp_maximum, the number of currently available
3900Sstevel@tonic-gate 	 * pages (availrmem) that can't be `locked'. If not set by
3910Sstevel@tonic-gate 	 * the user, we set it to 4% of the currently available memory
3920Sstevel@tonic-gate 	 * plus 4MB.
3930Sstevel@tonic-gate 	 * But we also insist that it be greater than tune.t_minarmem;
3940Sstevel@tonic-gate 	 * otherwise a process could lock down a lot of memory, get swapped
3950Sstevel@tonic-gate 	 * out, and never have enough to get swapped back in.
3960Sstevel@tonic-gate 	 */
3970Sstevel@tonic-gate 	if (user_set)
3980Sstevel@tonic-gate 		pages_pp_maximum = pages_pp_maximum_startup;
3990Sstevel@tonic-gate 	else
4000Sstevel@tonic-gate 		pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
4010Sstevel@tonic-gate 		    + btop(4 * 1024 * 1024);
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 	if (pages_pp_maximum <= p_min) {
4040Sstevel@tonic-gate 		pages_pp_maximum = p_min;
4050Sstevel@tonic-gate 	}
4060Sstevel@tonic-gate }
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate void
4090Sstevel@tonic-gate set_max_page_get(pgcnt_t target_total_pages)
4100Sstevel@tonic-gate {
4110Sstevel@tonic-gate 	max_page_get = target_total_pages / 2;
4120Sstevel@tonic-gate }
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate static pgcnt_t pending_delete;
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate /*ARGSUSED*/
4170Sstevel@tonic-gate static void
4180Sstevel@tonic-gate page_mem_config_post_add(
4190Sstevel@tonic-gate 	void *arg,
4200Sstevel@tonic-gate 	pgcnt_t delta_pages)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	set_max_page_get(total_pages - pending_delete);
4230Sstevel@tonic-gate 	init_pages_pp_maximum();
4240Sstevel@tonic-gate }
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate /*ARGSUSED*/
4270Sstevel@tonic-gate static int
4280Sstevel@tonic-gate page_mem_config_pre_del(
4290Sstevel@tonic-gate 	void *arg,
4300Sstevel@tonic-gate 	pgcnt_t delta_pages)
4310Sstevel@tonic-gate {
4320Sstevel@tonic-gate 	pgcnt_t nv;
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate 	nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
4350Sstevel@tonic-gate 	set_max_page_get(total_pages - nv);
4360Sstevel@tonic-gate 	return (0);
4370Sstevel@tonic-gate }
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate /*ARGSUSED*/
4400Sstevel@tonic-gate static void
4410Sstevel@tonic-gate page_mem_config_post_del(
4420Sstevel@tonic-gate 	void *arg,
4430Sstevel@tonic-gate 	pgcnt_t delta_pages,
4440Sstevel@tonic-gate 	int cancelled)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate 	pgcnt_t nv;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
4490Sstevel@tonic-gate 	set_max_page_get(total_pages - nv);
4500Sstevel@tonic-gate 	if (!cancelled)
4510Sstevel@tonic-gate 		init_pages_pp_maximum();
4520Sstevel@tonic-gate }
4530Sstevel@tonic-gate 
4540Sstevel@tonic-gate static kphysm_setup_vector_t page_mem_config_vec = {
4550Sstevel@tonic-gate 	KPHYSM_SETUP_VECTOR_VERSION,
4560Sstevel@tonic-gate 	page_mem_config_post_add,
4570Sstevel@tonic-gate 	page_mem_config_pre_del,
4580Sstevel@tonic-gate 	page_mem_config_post_del,
4590Sstevel@tonic-gate };
4600Sstevel@tonic-gate 
4610Sstevel@tonic-gate static void
4620Sstevel@tonic-gate page_init_mem_config(void)
4630Sstevel@tonic-gate {
4640Sstevel@tonic-gate 	int ret;
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate 	ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL);
4670Sstevel@tonic-gate 	ASSERT(ret == 0);
4680Sstevel@tonic-gate }
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate /*
4710Sstevel@tonic-gate  * Evenly spread out the PCF counters for large free pages
4720Sstevel@tonic-gate  */
4730Sstevel@tonic-gate static void
4740Sstevel@tonic-gate page_free_large_ctr(pgcnt_t npages)
4750Sstevel@tonic-gate {
4760Sstevel@tonic-gate 	static struct pcf	*p = pcf;
4770Sstevel@tonic-gate 	pgcnt_t			lump;
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 	freemem += npages;
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate 	lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT;
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 	while (npages > 0) {
4840Sstevel@tonic-gate 
4850Sstevel@tonic-gate 		ASSERT(!p->pcf_block);
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate 		if (lump < npages) {
4880Sstevel@tonic-gate 			p->pcf_count += (uint_t)lump;
4890Sstevel@tonic-gate 			npages -= lump;
4900Sstevel@tonic-gate 		} else {
4910Sstevel@tonic-gate 			p->pcf_count += (uint_t)npages;
4920Sstevel@tonic-gate 			npages = 0;
4930Sstevel@tonic-gate 		}
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 		ASSERT(!p->pcf_wait);
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate 		if (++p > &pcf[PCF_FANOUT - 1])
4980Sstevel@tonic-gate 			p = pcf;
4990Sstevel@tonic-gate 	}
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	ASSERT(npages == 0);
5020Sstevel@tonic-gate }
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate  * Add a physical chunk of memory to the system freee lists during startup.
5060Sstevel@tonic-gate  * Platform specific startup() allocates the memory for the page structs.
5070Sstevel@tonic-gate  *
5080Sstevel@tonic-gate  * num	- number of page structures
5090Sstevel@tonic-gate  * base - page number (pfn) to be associated with the first page.
5100Sstevel@tonic-gate  *
5110Sstevel@tonic-gate  * Since we are doing this during startup (ie. single threaded), we will
5120Sstevel@tonic-gate  * use shortcut routines to avoid any locking overhead while putting all
5130Sstevel@tonic-gate  * these pages on the freelists.
5140Sstevel@tonic-gate  *
5150Sstevel@tonic-gate  * NOTE: Any changes performed to page_free(), must also be performed to
5160Sstevel@tonic-gate  *	 add_physmem() since this is how we initialize all page_t's at
5170Sstevel@tonic-gate  *	 boot time.
5180Sstevel@tonic-gate  */
5190Sstevel@tonic-gate void
5200Sstevel@tonic-gate add_physmem(
5210Sstevel@tonic-gate 	page_t	*pp,
5220Sstevel@tonic-gate 	pgcnt_t	num,
5230Sstevel@tonic-gate 	pfn_t	pnum)
5240Sstevel@tonic-gate {
5250Sstevel@tonic-gate 	page_t	*root = NULL;
5260Sstevel@tonic-gate 	uint_t	szc = page_num_pagesizes() - 1;
5270Sstevel@tonic-gate 	pgcnt_t	large = page_get_pagecnt(szc);
5280Sstevel@tonic-gate 	pgcnt_t	cnt = 0;
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_INIT,
5310Sstevel@tonic-gate 		"add_physmem:pp %p num %lu", pp, num);
5320Sstevel@tonic-gate 
5330Sstevel@tonic-gate 	/*
5340Sstevel@tonic-gate 	 * Arbitrarily limit the max page_get request
5350Sstevel@tonic-gate 	 * to 1/2 of the page structs we have.
5360Sstevel@tonic-gate 	 */
5370Sstevel@tonic-gate 	total_pages += num;
5380Sstevel@tonic-gate 	set_max_page_get(total_pages);
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 	/*
5410Sstevel@tonic-gate 	 * The physical space for the pages array
5420Sstevel@tonic-gate 	 * representing ram pages has already been
5430Sstevel@tonic-gate 	 * allocated.  Here we initialize each lock
5440Sstevel@tonic-gate 	 * in the page structure, and put each on
5450Sstevel@tonic-gate 	 * the free list
5460Sstevel@tonic-gate 	 */
547414Skchow 	for (; num; pp++, pnum++, num--) {
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate 		/*
5500Sstevel@tonic-gate 		 * this needs to fill in the page number
5510Sstevel@tonic-gate 		 * and do any other arch specific initialization
5520Sstevel@tonic-gate 		 */
5530Sstevel@tonic-gate 		add_physmem_cb(pp, pnum);
5540Sstevel@tonic-gate 
5550Sstevel@tonic-gate 		/*
5560Sstevel@tonic-gate 		 * Initialize the page lock as unlocked, since nobody
5570Sstevel@tonic-gate 		 * can see or access this page yet.
5580Sstevel@tonic-gate 		 */
5590Sstevel@tonic-gate 		pp->p_selock = 0;
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate 		/*
5620Sstevel@tonic-gate 		 * Initialize IO lock
5630Sstevel@tonic-gate 		 */
5640Sstevel@tonic-gate 		page_iolock_init(pp);
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate 		/*
5670Sstevel@tonic-gate 		 * initialize other fields in the page_t
5680Sstevel@tonic-gate 		 */
5690Sstevel@tonic-gate 		PP_SETFREE(pp);
5700Sstevel@tonic-gate 		page_clr_all_props(pp);
5710Sstevel@tonic-gate 		PP_SETAGED(pp);
5720Sstevel@tonic-gate 		pp->p_offset = (u_offset_t)-1;
5730Sstevel@tonic-gate 		pp->p_next = pp;
5740Sstevel@tonic-gate 		pp->p_prev = pp;
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 		/*
5770Sstevel@tonic-gate 		 * Simple case: System doesn't support large pages.
5780Sstevel@tonic-gate 		 */
5790Sstevel@tonic-gate 		if (szc == 0) {
5800Sstevel@tonic-gate 			pp->p_szc = 0;
5810Sstevel@tonic-gate 			page_free_at_startup(pp);
5820Sstevel@tonic-gate 			continue;
5830Sstevel@tonic-gate 		}
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 		/*
5860Sstevel@tonic-gate 		 * Handle unaligned pages, we collect them up onto
5870Sstevel@tonic-gate 		 * the root page until we have a full large page.
5880Sstevel@tonic-gate 		 */
5890Sstevel@tonic-gate 		if (!IS_P2ALIGNED(pnum, large)) {
5900Sstevel@tonic-gate 
5910Sstevel@tonic-gate 			/*
5920Sstevel@tonic-gate 			 * If not in a large page,
5930Sstevel@tonic-gate 			 * just free as small page.
5940Sstevel@tonic-gate 			 */
5950Sstevel@tonic-gate 			if (root == NULL) {
5960Sstevel@tonic-gate 				pp->p_szc = 0;
5970Sstevel@tonic-gate 				page_free_at_startup(pp);
5980Sstevel@tonic-gate 				continue;
5990Sstevel@tonic-gate 			}
6000Sstevel@tonic-gate 
6010Sstevel@tonic-gate 			/*
6020Sstevel@tonic-gate 			 * Link a constituent page into the large page.
6030Sstevel@tonic-gate 			 */
6040Sstevel@tonic-gate 			pp->p_szc = szc;
6050Sstevel@tonic-gate 			page_list_concat(&root, &pp);
6060Sstevel@tonic-gate 
6070Sstevel@tonic-gate 			/*
6080Sstevel@tonic-gate 			 * When large page is fully formed, free it.
6090Sstevel@tonic-gate 			 */
6100Sstevel@tonic-gate 			if (++cnt == large) {
6110Sstevel@tonic-gate 				page_free_large_ctr(cnt);
6120Sstevel@tonic-gate 				page_list_add_pages(root, PG_LIST_ISINIT);
6130Sstevel@tonic-gate 				root = NULL;
6140Sstevel@tonic-gate 				cnt = 0;
6150Sstevel@tonic-gate 			}
6160Sstevel@tonic-gate 			continue;
6170Sstevel@tonic-gate 		}
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate 		/*
6200Sstevel@tonic-gate 		 * At this point we have a page number which
6210Sstevel@tonic-gate 		 * is aligned. We assert that we aren't already
6220Sstevel@tonic-gate 		 * in a different large page.
6230Sstevel@tonic-gate 		 */
6240Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pnum, large));
6250Sstevel@tonic-gate 		ASSERT(root == NULL && cnt == 0);
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 		/*
6280Sstevel@tonic-gate 		 * If insufficient number of pages left to form
6290Sstevel@tonic-gate 		 * a large page, just free the small page.
6300Sstevel@tonic-gate 		 */
6310Sstevel@tonic-gate 		if (num < large) {
6320Sstevel@tonic-gate 			pp->p_szc = 0;
6330Sstevel@tonic-gate 			page_free_at_startup(pp);
6340Sstevel@tonic-gate 			continue;
6350Sstevel@tonic-gate 		}
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate 		/*
6380Sstevel@tonic-gate 		 * Otherwise start a new large page.
6390Sstevel@tonic-gate 		 */
6400Sstevel@tonic-gate 		pp->p_szc = szc;
6410Sstevel@tonic-gate 		cnt++;
6420Sstevel@tonic-gate 		root = pp;
6430Sstevel@tonic-gate 	}
6440Sstevel@tonic-gate 	ASSERT(root == NULL && cnt == 0);
6450Sstevel@tonic-gate }
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate /*
6480Sstevel@tonic-gate  * Find a page representing the specified [vp, offset].
6490Sstevel@tonic-gate  * If we find the page but it is intransit coming in,
6500Sstevel@tonic-gate  * it will have an "exclusive" lock and we wait for
6510Sstevel@tonic-gate  * the i/o to complete.  A page found on the free list
6520Sstevel@tonic-gate  * is always reclaimed and then locked.  On success, the page
6530Sstevel@tonic-gate  * is locked, its data is valid and it isn't on the free
6540Sstevel@tonic-gate  * list, while a NULL is returned if the page doesn't exist.
6550Sstevel@tonic-gate  */
6560Sstevel@tonic-gate page_t *
6570Sstevel@tonic-gate page_lookup(vnode_t *vp, u_offset_t off, se_t se)
6580Sstevel@tonic-gate {
6590Sstevel@tonic-gate 	return (page_lookup_create(vp, off, se, NULL, NULL, 0));
6600Sstevel@tonic-gate }
6610Sstevel@tonic-gate 
6620Sstevel@tonic-gate /*
6630Sstevel@tonic-gate  * Find a page representing the specified [vp, offset].
6640Sstevel@tonic-gate  * We either return the one we found or, if passed in,
6650Sstevel@tonic-gate  * create one with identity of [vp, offset] of the
6660Sstevel@tonic-gate  * pre-allocated page. If we find exsisting page but it is
6670Sstevel@tonic-gate  * intransit coming in, it will have an "exclusive" lock
6680Sstevel@tonic-gate  * and we wait for the i/o to complete.  A page found on
6690Sstevel@tonic-gate  * the free list is always reclaimed and then locked.
6700Sstevel@tonic-gate  * On success, the page is locked, its data is valid and
6710Sstevel@tonic-gate  * it isn't on the free list, while a NULL is returned
6720Sstevel@tonic-gate  * if the page doesn't exist and newpp is NULL;
6730Sstevel@tonic-gate  */
6740Sstevel@tonic-gate page_t *
6750Sstevel@tonic-gate page_lookup_create(
6760Sstevel@tonic-gate 	vnode_t *vp,
6770Sstevel@tonic-gate 	u_offset_t off,
6780Sstevel@tonic-gate 	se_t se,
6790Sstevel@tonic-gate 	page_t *newpp,
6800Sstevel@tonic-gate 	spgcnt_t *nrelocp,
6810Sstevel@tonic-gate 	int flags)
6820Sstevel@tonic-gate {
6830Sstevel@tonic-gate 	page_t		*pp;
6840Sstevel@tonic-gate 	kmutex_t	*phm;
6850Sstevel@tonic-gate 	ulong_t		index;
6860Sstevel@tonic-gate 	uint_t		hash_locked;
6870Sstevel@tonic-gate 	uint_t		es;
6880Sstevel@tonic-gate 
6890Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
6900Sstevel@tonic-gate 	VM_STAT_ADD(page_lookup_cnt[0]);
6910Sstevel@tonic-gate 	ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
6920Sstevel@tonic-gate 
6930Sstevel@tonic-gate 	/*
6940Sstevel@tonic-gate 	 * Acquire the appropriate page hash lock since
6950Sstevel@tonic-gate 	 * we have to search the hash list.  Pages that
6960Sstevel@tonic-gate 	 * hash to this list can't change identity while
6970Sstevel@tonic-gate 	 * this lock is held.
6980Sstevel@tonic-gate 	 */
6990Sstevel@tonic-gate 	hash_locked = 0;
7000Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
7010Sstevel@tonic-gate 	phm = NULL;
7020Sstevel@tonic-gate top:
7030Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
7040Sstevel@tonic-gate 	if (pp != NULL) {
7050Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_cnt[1]);
7060Sstevel@tonic-gate 		es = (newpp != NULL) ? 1 : 0;
7070Sstevel@tonic-gate 		es |= flags;
7080Sstevel@tonic-gate 		if (!hash_locked) {
7090Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_cnt[2]);
7100Sstevel@tonic-gate 			if (!page_try_reclaim_lock(pp, se, es)) {
7110Sstevel@tonic-gate 				/*
7120Sstevel@tonic-gate 				 * On a miss, acquire the phm.  Then
7130Sstevel@tonic-gate 				 * next time, page_lock() will be called,
7140Sstevel@tonic-gate 				 * causing a wait if the page is busy.
7150Sstevel@tonic-gate 				 * just looping with page_trylock() would
7160Sstevel@tonic-gate 				 * get pretty boring.
7170Sstevel@tonic-gate 				 */
7180Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_cnt[3]);
7190Sstevel@tonic-gate 				phm = PAGE_HASH_MUTEX(index);
7200Sstevel@tonic-gate 				mutex_enter(phm);
7210Sstevel@tonic-gate 				hash_locked = 1;
7220Sstevel@tonic-gate 				goto top;
7230Sstevel@tonic-gate 			}
7240Sstevel@tonic-gate 		} else {
7250Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_cnt[4]);
7260Sstevel@tonic-gate 			if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
7270Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_cnt[5]);
7280Sstevel@tonic-gate 				goto top;
7290Sstevel@tonic-gate 			}
7300Sstevel@tonic-gate 		}
7310Sstevel@tonic-gate 
7320Sstevel@tonic-gate 		/*
7330Sstevel@tonic-gate 		 * Since `pp' is locked it can not change identity now.
7340Sstevel@tonic-gate 		 * Reconfirm we locked the correct page.
7350Sstevel@tonic-gate 		 *
7360Sstevel@tonic-gate 		 * Both the p_vnode and p_offset *must* be cast volatile
7370Sstevel@tonic-gate 		 * to force a reload of their values: The PAGE_HASH_SEARCH
7380Sstevel@tonic-gate 		 * macro will have stuffed p_vnode and p_offset into
7390Sstevel@tonic-gate 		 * registers before calling page_trylock(); another thread,
7400Sstevel@tonic-gate 		 * actually holding the hash lock, could have changed the
7410Sstevel@tonic-gate 		 * page's identity in memory, but our registers would not
7420Sstevel@tonic-gate 		 * be changed, fooling the reconfirmation.  If the hash
7430Sstevel@tonic-gate 		 * lock was held during the search, the casting would
7440Sstevel@tonic-gate 		 * not be needed.
7450Sstevel@tonic-gate 		 */
7460Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_cnt[6]);
7470Sstevel@tonic-gate 		if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
7480Sstevel@tonic-gate 		    ((volatile u_offset_t)(pp->p_offset) != off)) {
7490Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_cnt[7]);
7500Sstevel@tonic-gate 			if (hash_locked) {
7510Sstevel@tonic-gate 				panic("page_lookup_create: lost page %p",
7520Sstevel@tonic-gate 				    (void *)pp);
7530Sstevel@tonic-gate 				/*NOTREACHED*/
7540Sstevel@tonic-gate 			}
7550Sstevel@tonic-gate 			page_unlock(pp);
7560Sstevel@tonic-gate 			phm = PAGE_HASH_MUTEX(index);
7570Sstevel@tonic-gate 			mutex_enter(phm);
7580Sstevel@tonic-gate 			hash_locked = 1;
7590Sstevel@tonic-gate 			goto top;
7600Sstevel@tonic-gate 		}
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate 		/*
7630Sstevel@tonic-gate 		 * If page_trylock() was called, then pp may still be on
7640Sstevel@tonic-gate 		 * the cachelist (can't be on the free list, it would not
7650Sstevel@tonic-gate 		 * have been found in the search).  If it is on the
7660Sstevel@tonic-gate 		 * cachelist it must be pulled now. To pull the page from
7670Sstevel@tonic-gate 		 * the cachelist, it must be exclusively locked.
7680Sstevel@tonic-gate 		 *
7690Sstevel@tonic-gate 		 * The other big difference between page_trylock() and
7700Sstevel@tonic-gate 		 * page_lock(), is that page_lock() will pull the
7710Sstevel@tonic-gate 		 * page from whatever free list (the cache list in this
7720Sstevel@tonic-gate 		 * case) the page is on.  If page_trylock() was used
7730Sstevel@tonic-gate 		 * above, then we have to do the reclaim ourselves.
7740Sstevel@tonic-gate 		 */
7750Sstevel@tonic-gate 		if ((!hash_locked) && (PP_ISFREE(pp))) {
7760Sstevel@tonic-gate 			ASSERT(PP_ISAGED(pp) == 0);
7770Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_cnt[8]);
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 			/*
7800Sstevel@tonic-gate 			 * page_relcaim will insure that we
7810Sstevel@tonic-gate 			 * have this page exclusively
7820Sstevel@tonic-gate 			 */
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 			if (!page_reclaim(pp, NULL)) {
7850Sstevel@tonic-gate 				/*
7860Sstevel@tonic-gate 				 * Page_reclaim dropped whatever lock
7870Sstevel@tonic-gate 				 * we held.
7880Sstevel@tonic-gate 				 */
7890Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_cnt[9]);
7900Sstevel@tonic-gate 				phm = PAGE_HASH_MUTEX(index);
7910Sstevel@tonic-gate 				mutex_enter(phm);
7920Sstevel@tonic-gate 				hash_locked = 1;
7930Sstevel@tonic-gate 				goto top;
7940Sstevel@tonic-gate 			} else if (se == SE_SHARED && newpp == NULL) {
7950Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_cnt[10]);
7960Sstevel@tonic-gate 				page_downgrade(pp);
7970Sstevel@tonic-gate 			}
7980Sstevel@tonic-gate 		}
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate 		if (hash_locked) {
8010Sstevel@tonic-gate 			mutex_exit(phm);
8020Sstevel@tonic-gate 		}
8030Sstevel@tonic-gate 
8040Sstevel@tonic-gate 		if (newpp != NULL && pp->p_szc < newpp->p_szc &&
8050Sstevel@tonic-gate 		    PAGE_EXCL(pp) && nrelocp != NULL) {
8060Sstevel@tonic-gate 			ASSERT(nrelocp != NULL);
8070Sstevel@tonic-gate 			(void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
8080Sstevel@tonic-gate 			    NULL);
8090Sstevel@tonic-gate 			if (*nrelocp > 0) {
8100Sstevel@tonic-gate 				VM_STAT_COND_ADD(*nrelocp == 1,
8110Sstevel@tonic-gate 				    page_lookup_cnt[11]);
8120Sstevel@tonic-gate 				VM_STAT_COND_ADD(*nrelocp > 1,
8130Sstevel@tonic-gate 				    page_lookup_cnt[12]);
8140Sstevel@tonic-gate 				pp = newpp;
8150Sstevel@tonic-gate 				se = SE_EXCL;
8160Sstevel@tonic-gate 			} else {
8170Sstevel@tonic-gate 				if (se == SE_SHARED) {
8180Sstevel@tonic-gate 					page_downgrade(pp);
8190Sstevel@tonic-gate 				}
8200Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_cnt[13]);
8210Sstevel@tonic-gate 			}
8220Sstevel@tonic-gate 		} else if (newpp != NULL && nrelocp != NULL) {
8230Sstevel@tonic-gate 			if (PAGE_EXCL(pp) && se == SE_SHARED) {
8240Sstevel@tonic-gate 				page_downgrade(pp);
8250Sstevel@tonic-gate 			}
8260Sstevel@tonic-gate 			VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
8270Sstevel@tonic-gate 			    page_lookup_cnt[14]);
8280Sstevel@tonic-gate 			VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
8290Sstevel@tonic-gate 			    page_lookup_cnt[15]);
8300Sstevel@tonic-gate 			VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
8310Sstevel@tonic-gate 			    page_lookup_cnt[16]);
8320Sstevel@tonic-gate 		} else if (newpp != NULL && PAGE_EXCL(pp)) {
8330Sstevel@tonic-gate 			se = SE_EXCL;
8340Sstevel@tonic-gate 		}
8350Sstevel@tonic-gate 	} else if (!hash_locked) {
8360Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_cnt[17]);
8370Sstevel@tonic-gate 		phm = PAGE_HASH_MUTEX(index);
8380Sstevel@tonic-gate 		mutex_enter(phm);
8390Sstevel@tonic-gate 		hash_locked = 1;
8400Sstevel@tonic-gate 		goto top;
8410Sstevel@tonic-gate 	} else if (newpp != NULL) {
8420Sstevel@tonic-gate 		/*
8430Sstevel@tonic-gate 		 * If we have a preallocated page then
8440Sstevel@tonic-gate 		 * insert it now and basically behave like
8450Sstevel@tonic-gate 		 * page_create.
8460Sstevel@tonic-gate 		 */
8470Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_cnt[18]);
8480Sstevel@tonic-gate 		/*
8490Sstevel@tonic-gate 		 * Since we hold the page hash mutex and
8500Sstevel@tonic-gate 		 * just searched for this page, page_hashin
8510Sstevel@tonic-gate 		 * had better not fail.  If it does, that
8520Sstevel@tonic-gate 		 * means some thread did not follow the
8530Sstevel@tonic-gate 		 * page hash mutex rules.  Panic now and
8540Sstevel@tonic-gate 		 * get it over with.  As usual, go down
8550Sstevel@tonic-gate 		 * holding all the locks.
8560Sstevel@tonic-gate 		 */
8570Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(phm));
8580Sstevel@tonic-gate 		if (!page_hashin(newpp, vp, off, phm)) {
8590Sstevel@tonic-gate 			ASSERT(MUTEX_HELD(phm));
8600Sstevel@tonic-gate 			panic("page_lookup_create: hashin failed %p %p %llx %p",
8610Sstevel@tonic-gate 			    (void *)newpp, (void *)vp, off, (void *)phm);
8620Sstevel@tonic-gate 			/*NOTREACHED*/
8630Sstevel@tonic-gate 		}
8640Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(phm));
8650Sstevel@tonic-gate 		mutex_exit(phm);
8660Sstevel@tonic-gate 		phm = NULL;
8670Sstevel@tonic-gate 		page_set_props(newpp, P_REF);
8680Sstevel@tonic-gate 		page_io_lock(newpp);
8690Sstevel@tonic-gate 		pp = newpp;
8700Sstevel@tonic-gate 		se = SE_EXCL;
8710Sstevel@tonic-gate 	} else {
8720Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_cnt[19]);
8730Sstevel@tonic-gate 		mutex_exit(phm);
8740Sstevel@tonic-gate 	}
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
8770Sstevel@tonic-gate 
8780Sstevel@tonic-gate 	ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	return (pp);
8810Sstevel@tonic-gate }
8820Sstevel@tonic-gate 
8830Sstevel@tonic-gate /*
8840Sstevel@tonic-gate  * Search the hash list for the page representing the
8850Sstevel@tonic-gate  * specified [vp, offset] and return it locked.  Skip
8860Sstevel@tonic-gate  * free pages and pages that cannot be locked as requested.
8870Sstevel@tonic-gate  * Used while attempting to kluster pages.
8880Sstevel@tonic-gate  */
8890Sstevel@tonic-gate page_t *
8900Sstevel@tonic-gate page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se)
8910Sstevel@tonic-gate {
8920Sstevel@tonic-gate 	page_t		*pp;
8930Sstevel@tonic-gate 	kmutex_t	*phm;
8940Sstevel@tonic-gate 	ulong_t		index;
8950Sstevel@tonic-gate 	uint_t		locked;
8960Sstevel@tonic-gate 
8970Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
8980Sstevel@tonic-gate 	VM_STAT_ADD(page_lookup_nowait_cnt[0]);
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
9010Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
9020Sstevel@tonic-gate 	locked = 0;
9030Sstevel@tonic-gate 	if (pp == NULL) {
9040Sstevel@tonic-gate top:
9050Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_nowait_cnt[1]);
9060Sstevel@tonic-gate 		locked = 1;
9070Sstevel@tonic-gate 		phm = PAGE_HASH_MUTEX(index);
9080Sstevel@tonic-gate 		mutex_enter(phm);
9090Sstevel@tonic-gate 		PAGE_HASH_SEARCH(index, pp, vp, off);
9100Sstevel@tonic-gate 	}
9110Sstevel@tonic-gate 
9120Sstevel@tonic-gate 	if (pp == NULL || PP_ISFREE(pp)) {
9130Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_nowait_cnt[2]);
9140Sstevel@tonic-gate 		pp = NULL;
9150Sstevel@tonic-gate 	} else {
9160Sstevel@tonic-gate 		if (!page_trylock(pp, se)) {
9170Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_nowait_cnt[3]);
9180Sstevel@tonic-gate 			pp = NULL;
9190Sstevel@tonic-gate 		} else {
9200Sstevel@tonic-gate 			VM_STAT_ADD(page_lookup_nowait_cnt[4]);
9210Sstevel@tonic-gate 			/*
9220Sstevel@tonic-gate 			 * See the comment in page_lookup()
9230Sstevel@tonic-gate 			 */
9240Sstevel@tonic-gate 			if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
9250Sstevel@tonic-gate 			    ((u_offset_t)(pp->p_offset) != off)) {
9260Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_nowait_cnt[5]);
9270Sstevel@tonic-gate 				if (locked) {
9280Sstevel@tonic-gate 					panic("page_lookup_nowait %p",
9290Sstevel@tonic-gate 					    (void *)pp);
9300Sstevel@tonic-gate 					/*NOTREACHED*/
9310Sstevel@tonic-gate 				}
9320Sstevel@tonic-gate 				page_unlock(pp);
9330Sstevel@tonic-gate 				goto top;
9340Sstevel@tonic-gate 			}
9350Sstevel@tonic-gate 			if (PP_ISFREE(pp)) {
9360Sstevel@tonic-gate 				VM_STAT_ADD(page_lookup_nowait_cnt[6]);
9370Sstevel@tonic-gate 				page_unlock(pp);
9380Sstevel@tonic-gate 				pp = NULL;
9390Sstevel@tonic-gate 			}
9400Sstevel@tonic-gate 		}
9410Sstevel@tonic-gate 	}
9420Sstevel@tonic-gate 	if (locked) {
9430Sstevel@tonic-gate 		VM_STAT_ADD(page_lookup_nowait_cnt[7]);
9440Sstevel@tonic-gate 		mutex_exit(phm);
9450Sstevel@tonic-gate 	}
9460Sstevel@tonic-gate 
9470Sstevel@tonic-gate 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
9480Sstevel@tonic-gate 
9490Sstevel@tonic-gate 	return (pp);
9500Sstevel@tonic-gate }
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate /*
9530Sstevel@tonic-gate  * Search the hash list for a page with the specified [vp, off]
9540Sstevel@tonic-gate  * that is known to exist and is already locked.  This routine
9550Sstevel@tonic-gate  * is typically used by segment SOFTUNLOCK routines.
9560Sstevel@tonic-gate  */
9570Sstevel@tonic-gate page_t *
9580Sstevel@tonic-gate page_find(vnode_t *vp, u_offset_t off)
9590Sstevel@tonic-gate {
9600Sstevel@tonic-gate 	page_t		*pp;
9610Sstevel@tonic-gate 	kmutex_t	*phm;
9620Sstevel@tonic-gate 	ulong_t		index;
9630Sstevel@tonic-gate 
9640Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
9650Sstevel@tonic-gate 	VM_STAT_ADD(page_find_cnt);
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
9680Sstevel@tonic-gate 	phm = PAGE_HASH_MUTEX(index);
9690Sstevel@tonic-gate 
9700Sstevel@tonic-gate 	mutex_enter(phm);
9710Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
9720Sstevel@tonic-gate 	mutex_exit(phm);
9730Sstevel@tonic-gate 
974*1338Selowe 	ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
9750Sstevel@tonic-gate 	return (pp);
9760Sstevel@tonic-gate }
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate /*
9790Sstevel@tonic-gate  * Determine whether a page with the specified [vp, off]
9800Sstevel@tonic-gate  * currently exists in the system.  Obviously this should
9810Sstevel@tonic-gate  * only be considered as a hint since nothing prevents the
9820Sstevel@tonic-gate  * page from disappearing or appearing immediately after
9830Sstevel@tonic-gate  * the return from this routine. Subsequently, we don't
9840Sstevel@tonic-gate  * even bother to lock the list.
9850Sstevel@tonic-gate  */
9860Sstevel@tonic-gate page_t *
9870Sstevel@tonic-gate page_exists(vnode_t *vp, u_offset_t off)
9880Sstevel@tonic-gate {
9890Sstevel@tonic-gate 	page_t	*pp;
9900Sstevel@tonic-gate 	ulong_t		index;
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
9930Sstevel@tonic-gate 	VM_STAT_ADD(page_exists_cnt);
9940Sstevel@tonic-gate 
9950Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
9960Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
9970Sstevel@tonic-gate 
9980Sstevel@tonic-gate 	return (pp);
9990Sstevel@tonic-gate }
10000Sstevel@tonic-gate 
10010Sstevel@tonic-gate /*
10020Sstevel@tonic-gate  * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
10030Sstevel@tonic-gate  * page_size(szc)) range.  if they exist and ppa is not NULL fill ppa array
10040Sstevel@tonic-gate  * with these pages locked SHARED. If necessary reclaim pages from
10050Sstevel@tonic-gate  * freelist. Return 1 if contiguous pages exist and 0 otherwise.
10060Sstevel@tonic-gate  *
10070Sstevel@tonic-gate  * If we fail to lock pages still return 1 if pages exist and contiguous.
10080Sstevel@tonic-gate  * But in this case return value is just a hint. ppa array won't be filled.
10090Sstevel@tonic-gate  * Caller should initialize ppa[0] as NULL to distinguish return value.
10100Sstevel@tonic-gate  *
10110Sstevel@tonic-gate  * Returns 0 if pages don't exist or not physically contiguous.
10120Sstevel@tonic-gate  *
10130Sstevel@tonic-gate  * This routine doesn't work for anonymous(swapfs) pages.
10140Sstevel@tonic-gate  */
10150Sstevel@tonic-gate int
10160Sstevel@tonic-gate page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[])
10170Sstevel@tonic-gate {
10180Sstevel@tonic-gate 	pgcnt_t pages;
10190Sstevel@tonic-gate 	pfn_t pfn;
10200Sstevel@tonic-gate 	page_t *rootpp;
10210Sstevel@tonic-gate 	pgcnt_t i;
10220Sstevel@tonic-gate 	pgcnt_t j;
10230Sstevel@tonic-gate 	u_offset_t save_off = off;
10240Sstevel@tonic-gate 	ulong_t index;
10250Sstevel@tonic-gate 	kmutex_t *phm;
10260Sstevel@tonic-gate 	page_t *pp;
10270Sstevel@tonic-gate 	uint_t pszc;
10280Sstevel@tonic-gate 	int loopcnt = 0;
10290Sstevel@tonic-gate 
10300Sstevel@tonic-gate 	ASSERT(szc != 0);
10310Sstevel@tonic-gate 	ASSERT(vp != NULL);
10320Sstevel@tonic-gate 	ASSERT(!IS_SWAPFSVP(vp));
10330Sstevel@tonic-gate 	ASSERT(vp != &kvp);
10340Sstevel@tonic-gate 
10350Sstevel@tonic-gate again:
10360Sstevel@tonic-gate 	if (++loopcnt > 3) {
10370Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[0]);
10380Sstevel@tonic-gate 		return (0);
10390Sstevel@tonic-gate 	}
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
10420Sstevel@tonic-gate 	phm = PAGE_HASH_MUTEX(index);
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate 	mutex_enter(phm);
10450Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
10460Sstevel@tonic-gate 	mutex_exit(phm);
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 	VM_STAT_ADD(page_exphcontg[1]);
10490Sstevel@tonic-gate 
10500Sstevel@tonic-gate 	if (pp == NULL) {
10510Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[2]);
10520Sstevel@tonic-gate 		return (0);
10530Sstevel@tonic-gate 	}
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate 	pages = page_get_pagecnt(szc);
10560Sstevel@tonic-gate 	rootpp = pp;
10570Sstevel@tonic-gate 	pfn = rootpp->p_pagenum;
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate 	if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
10600Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[3]);
10610Sstevel@tonic-gate 		if (!page_trylock(pp, SE_SHARED)) {
10620Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[4]);
10630Sstevel@tonic-gate 			return (1);
10640Sstevel@tonic-gate 		}
10650Sstevel@tonic-gate 		if (pp->p_szc != pszc || pp->p_vnode != vp ||
10660Sstevel@tonic-gate 		    pp->p_offset != off) {
10670Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[5]);
10680Sstevel@tonic-gate 			page_unlock(pp);
10690Sstevel@tonic-gate 			off = save_off;
10700Sstevel@tonic-gate 			goto again;
10710Sstevel@tonic-gate 		}
10720Sstevel@tonic-gate 		/*
10730Sstevel@tonic-gate 		 * szc was non zero and vnode and offset matched after we
10740Sstevel@tonic-gate 		 * locked the page it means it can't become free on us.
10750Sstevel@tonic-gate 		 */
10760Sstevel@tonic-gate 		ASSERT(!PP_ISFREE(pp));
10770Sstevel@tonic-gate 		if (!IS_P2ALIGNED(pfn, pages)) {
10780Sstevel@tonic-gate 			page_unlock(pp);
10790Sstevel@tonic-gate 			return (0);
10800Sstevel@tonic-gate 		}
10810Sstevel@tonic-gate 		ppa[0] = pp;
10820Sstevel@tonic-gate 		pp++;
10830Sstevel@tonic-gate 		off += PAGESIZE;
10840Sstevel@tonic-gate 		pfn++;
10850Sstevel@tonic-gate 		for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
10860Sstevel@tonic-gate 			if (!page_trylock(pp, SE_SHARED)) {
10870Sstevel@tonic-gate 				VM_STAT_ADD(page_exphcontg[6]);
10880Sstevel@tonic-gate 				pp--;
10890Sstevel@tonic-gate 				while (i-- > 0) {
10900Sstevel@tonic-gate 					page_unlock(pp);
10910Sstevel@tonic-gate 					pp--;
10920Sstevel@tonic-gate 				}
10930Sstevel@tonic-gate 				ppa[0] = NULL;
10940Sstevel@tonic-gate 				return (1);
10950Sstevel@tonic-gate 			}
10960Sstevel@tonic-gate 			if (pp->p_szc != pszc) {
10970Sstevel@tonic-gate 				VM_STAT_ADD(page_exphcontg[7]);
10980Sstevel@tonic-gate 				page_unlock(pp);
10990Sstevel@tonic-gate 				pp--;
11000Sstevel@tonic-gate 				while (i-- > 0) {
11010Sstevel@tonic-gate 					page_unlock(pp);
11020Sstevel@tonic-gate 					pp--;
11030Sstevel@tonic-gate 				}
11040Sstevel@tonic-gate 				ppa[0] = NULL;
11050Sstevel@tonic-gate 				off = save_off;
11060Sstevel@tonic-gate 				goto again;
11070Sstevel@tonic-gate 			}
11080Sstevel@tonic-gate 			/*
11090Sstevel@tonic-gate 			 * szc the same as for previous already locked pages
11100Sstevel@tonic-gate 			 * with right identity. Since this page had correct
11110Sstevel@tonic-gate 			 * szc after we locked it can't get freed or destroyed
11120Sstevel@tonic-gate 			 * and therefore must have the expected identity.
11130Sstevel@tonic-gate 			 */
11140Sstevel@tonic-gate 			ASSERT(!PP_ISFREE(pp));
11150Sstevel@tonic-gate 			if (pp->p_vnode != vp ||
11160Sstevel@tonic-gate 			    pp->p_offset != off) {
11170Sstevel@tonic-gate 				panic("page_exists_physcontig: "
11180Sstevel@tonic-gate 				    "large page identity doesn't match");
11190Sstevel@tonic-gate 			}
11200Sstevel@tonic-gate 			ppa[i] = pp;
11210Sstevel@tonic-gate 			ASSERT(pp->p_pagenum == pfn);
11220Sstevel@tonic-gate 		}
11230Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[8]);
11240Sstevel@tonic-gate 		ppa[pages] = NULL;
11250Sstevel@tonic-gate 		return (1);
11260Sstevel@tonic-gate 	} else if (pszc >= szc) {
11270Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[9]);
11280Sstevel@tonic-gate 		if (!IS_P2ALIGNED(pfn, pages)) {
11290Sstevel@tonic-gate 			return (0);
11300Sstevel@tonic-gate 		}
11310Sstevel@tonic-gate 		return (1);
11320Sstevel@tonic-gate 	}
11330Sstevel@tonic-gate 
11340Sstevel@tonic-gate 	if (!IS_P2ALIGNED(pfn, pages)) {
11350Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[10]);
11360Sstevel@tonic-gate 		return (0);
11370Sstevel@tonic-gate 	}
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate 	if (page_numtomemseg_nolock(pfn) !=
11400Sstevel@tonic-gate 	    page_numtomemseg_nolock(pfn + pages - 1)) {
11410Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[11]);
11420Sstevel@tonic-gate 		return (0);
11430Sstevel@tonic-gate 	}
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate 	/*
11460Sstevel@tonic-gate 	 * We loop up 4 times across pages to promote page size.
11470Sstevel@tonic-gate 	 * We're extra cautious to promote page size atomically with respect
11480Sstevel@tonic-gate 	 * to everybody else.  But we can probably optimize into 1 loop if
11490Sstevel@tonic-gate 	 * this becomes an issue.
11500Sstevel@tonic-gate 	 */
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
11530Sstevel@tonic-gate 		ASSERT(pp->p_pagenum == pfn);
11540Sstevel@tonic-gate 		if (!page_trylock(pp, SE_EXCL)) {
11550Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[12]);
11560Sstevel@tonic-gate 			break;
11570Sstevel@tonic-gate 		}
11580Sstevel@tonic-gate 		if (pp->p_vnode != vp ||
11590Sstevel@tonic-gate 		    pp->p_offset != off) {
11600Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[13]);
11610Sstevel@tonic-gate 			page_unlock(pp);
11620Sstevel@tonic-gate 			break;
11630Sstevel@tonic-gate 		}
11640Sstevel@tonic-gate 		if (pp->p_szc >= szc) {
11650Sstevel@tonic-gate 			ASSERT(i == 0);
11660Sstevel@tonic-gate 			page_unlock(pp);
11670Sstevel@tonic-gate 			off = save_off;
11680Sstevel@tonic-gate 			goto again;
11690Sstevel@tonic-gate 		}
11700Sstevel@tonic-gate 	}
11710Sstevel@tonic-gate 
11720Sstevel@tonic-gate 	if (i != pages) {
11730Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[14]);
11740Sstevel@tonic-gate 		--pp;
11750Sstevel@tonic-gate 		while (i-- > 0) {
11760Sstevel@tonic-gate 			page_unlock(pp);
11770Sstevel@tonic-gate 			--pp;
11780Sstevel@tonic-gate 		}
11790Sstevel@tonic-gate 		return (0);
11800Sstevel@tonic-gate 	}
11810Sstevel@tonic-gate 
11820Sstevel@tonic-gate 	pp = rootpp;
11830Sstevel@tonic-gate 	for (i = 0; i < pages; i++, pp++) {
11840Sstevel@tonic-gate 		if (PP_ISFREE(pp)) {
11850Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[15]);
11860Sstevel@tonic-gate 			ASSERT(!PP_ISAGED(pp));
11870Sstevel@tonic-gate 			ASSERT(pp->p_szc == 0);
11880Sstevel@tonic-gate 			if (!page_reclaim(pp, NULL)) {
11890Sstevel@tonic-gate 				break;
11900Sstevel@tonic-gate 			}
11910Sstevel@tonic-gate 		} else {
11920Sstevel@tonic-gate 			ASSERT(pp->p_szc < szc);
11930Sstevel@tonic-gate 			VM_STAT_ADD(page_exphcontg[16]);
11940Sstevel@tonic-gate 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
11950Sstevel@tonic-gate 		}
11960Sstevel@tonic-gate 	}
11970Sstevel@tonic-gate 	if (i < pages) {
11980Sstevel@tonic-gate 		VM_STAT_ADD(page_exphcontg[17]);
11990Sstevel@tonic-gate 		/*
12000Sstevel@tonic-gate 		 * page_reclaim failed because we were out of memory.
12010Sstevel@tonic-gate 		 * drop the rest of the locks and return because this page
12020Sstevel@tonic-gate 		 * must be already reallocated anyway.
12030Sstevel@tonic-gate 		 */
12040Sstevel@tonic-gate 		pp = rootpp;
12050Sstevel@tonic-gate 		for (j = 0; j < pages; j++, pp++) {
12060Sstevel@tonic-gate 			if (j != i) {
12070Sstevel@tonic-gate 				page_unlock(pp);
12080Sstevel@tonic-gate 			}
12090Sstevel@tonic-gate 		}
12100Sstevel@tonic-gate 		return (0);
12110Sstevel@tonic-gate 	}
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	off = save_off;
12140Sstevel@tonic-gate 	pp = rootpp;
12150Sstevel@tonic-gate 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
12160Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
12170Sstevel@tonic-gate 		ASSERT(!PP_ISFREE(pp));
12180Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(pp));
12190Sstevel@tonic-gate 		ASSERT(pp->p_vnode == vp);
12200Sstevel@tonic-gate 		ASSERT(pp->p_offset == off);
12210Sstevel@tonic-gate 		pp->p_szc = szc;
12220Sstevel@tonic-gate 	}
12230Sstevel@tonic-gate 	pp = rootpp;
12240Sstevel@tonic-gate 	for (i = 0; i < pages; i++, pp++) {
12250Sstevel@tonic-gate 		if (ppa == NULL) {
12260Sstevel@tonic-gate 			page_unlock(pp);
12270Sstevel@tonic-gate 		} else {
12280Sstevel@tonic-gate 			ppa[i] = pp;
12290Sstevel@tonic-gate 			page_downgrade(ppa[i]);
12300Sstevel@tonic-gate 		}
12310Sstevel@tonic-gate 	}
12320Sstevel@tonic-gate 	if (ppa != NULL) {
12330Sstevel@tonic-gate 		ppa[pages] = NULL;
12340Sstevel@tonic-gate 	}
12350Sstevel@tonic-gate 	VM_STAT_ADD(page_exphcontg[18]);
12360Sstevel@tonic-gate 	ASSERT(vp->v_pages != NULL);
12370Sstevel@tonic-gate 	return (1);
12380Sstevel@tonic-gate }
12390Sstevel@tonic-gate 
12400Sstevel@tonic-gate /*
12410Sstevel@tonic-gate  * Determine whether a page with the specified [vp, off]
12420Sstevel@tonic-gate  * currently exists in the system and if so return its
12430Sstevel@tonic-gate  * size code. Obviously this should only be considered as
12440Sstevel@tonic-gate  * a hint since nothing prevents the page from disappearing
12450Sstevel@tonic-gate  * or appearing immediately after the return from this routine.
12460Sstevel@tonic-gate  */
12470Sstevel@tonic-gate int
12480Sstevel@tonic-gate page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc)
12490Sstevel@tonic-gate {
12500Sstevel@tonic-gate 	page_t		*pp;
12510Sstevel@tonic-gate 	kmutex_t	*phm;
12520Sstevel@tonic-gate 	ulong_t		index;
12530Sstevel@tonic-gate 	int		rc = 0;
12540Sstevel@tonic-gate 
12550Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
12560Sstevel@tonic-gate 	ASSERT(szc != NULL);
12570Sstevel@tonic-gate 	VM_STAT_ADD(page_exists_forreal_cnt);
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
12600Sstevel@tonic-gate 	phm = PAGE_HASH_MUTEX(index);
12610Sstevel@tonic-gate 
12620Sstevel@tonic-gate 	mutex_enter(phm);
12630Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
12640Sstevel@tonic-gate 	if (pp != NULL) {
12650Sstevel@tonic-gate 		*szc = pp->p_szc;
12660Sstevel@tonic-gate 		rc = 1;
12670Sstevel@tonic-gate 	}
12680Sstevel@tonic-gate 	mutex_exit(phm);
12690Sstevel@tonic-gate 	return (rc);
12700Sstevel@tonic-gate }
12710Sstevel@tonic-gate 
12720Sstevel@tonic-gate /* wakeup threads waiting for pages in page_create_get_something() */
12730Sstevel@tonic-gate void
12740Sstevel@tonic-gate wakeup_pcgs(void)
12750Sstevel@tonic-gate {
12760Sstevel@tonic-gate 	if (!CV_HAS_WAITERS(&pcgs_cv))
12770Sstevel@tonic-gate 		return;
12780Sstevel@tonic-gate 	cv_broadcast(&pcgs_cv);
12790Sstevel@tonic-gate }
12800Sstevel@tonic-gate 
12810Sstevel@tonic-gate /*
12820Sstevel@tonic-gate  * 'freemem' is used all over the kernel as an indication of how many
12830Sstevel@tonic-gate  * pages are free (either on the cache list or on the free page list)
12840Sstevel@tonic-gate  * in the system.  In very few places is a really accurate 'freemem'
12850Sstevel@tonic-gate  * needed.  To avoid contention of the lock protecting a the
12860Sstevel@tonic-gate  * single freemem, it was spread out into NCPU buckets.  Set_freemem
12870Sstevel@tonic-gate  * sets freemem to the total of all NCPU buckets.  It is called from
12880Sstevel@tonic-gate  * clock() on each TICK.
12890Sstevel@tonic-gate  */
12900Sstevel@tonic-gate void
12910Sstevel@tonic-gate set_freemem()
12920Sstevel@tonic-gate {
12930Sstevel@tonic-gate 	struct pcf	*p;
12940Sstevel@tonic-gate 	ulong_t		t;
12950Sstevel@tonic-gate 	uint_t		i;
12960Sstevel@tonic-gate 
12970Sstevel@tonic-gate 	t = 0;
12980Sstevel@tonic-gate 	p = pcf;
12990Sstevel@tonic-gate 	for (i = 0;  i < PCF_FANOUT; i++) {
13000Sstevel@tonic-gate 		t += p->pcf_count;
13010Sstevel@tonic-gate 		p++;
13020Sstevel@tonic-gate 	}
13030Sstevel@tonic-gate 	freemem = t;
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate 	/*
13060Sstevel@tonic-gate 	 * Don't worry about grabbing mutex.  It's not that
13070Sstevel@tonic-gate 	 * critical if we miss a tick or two.  This is
13080Sstevel@tonic-gate 	 * where we wakeup possible delayers in
13090Sstevel@tonic-gate 	 * page_create_get_something().
13100Sstevel@tonic-gate 	 */
13110Sstevel@tonic-gate 	wakeup_pcgs();
13120Sstevel@tonic-gate }
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate ulong_t
13150Sstevel@tonic-gate get_freemem()
13160Sstevel@tonic-gate {
13170Sstevel@tonic-gate 	struct pcf	*p;
13180Sstevel@tonic-gate 	ulong_t		t;
13190Sstevel@tonic-gate 	uint_t		i;
13200Sstevel@tonic-gate 
13210Sstevel@tonic-gate 	t = 0;
13220Sstevel@tonic-gate 	p = pcf;
13230Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
13240Sstevel@tonic-gate 		t += p->pcf_count;
13250Sstevel@tonic-gate 		p++;
13260Sstevel@tonic-gate 	}
13270Sstevel@tonic-gate 	/*
13280Sstevel@tonic-gate 	 * We just calculated it, might as well set it.
13290Sstevel@tonic-gate 	 */
13300Sstevel@tonic-gate 	freemem = t;
13310Sstevel@tonic-gate 	return (t);
13320Sstevel@tonic-gate }
13330Sstevel@tonic-gate 
13340Sstevel@tonic-gate /*
13350Sstevel@tonic-gate  * Acquire all of the page cache & free (pcf) locks.
13360Sstevel@tonic-gate  */
13370Sstevel@tonic-gate void
13380Sstevel@tonic-gate pcf_acquire_all()
13390Sstevel@tonic-gate {
13400Sstevel@tonic-gate 	struct pcf	*p;
13410Sstevel@tonic-gate 	uint_t		i;
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 	p = pcf;
13440Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
13450Sstevel@tonic-gate 		p->pcf_touch = 1;
13460Sstevel@tonic-gate 		mutex_enter(&p->pcf_lock);
13470Sstevel@tonic-gate 		p++;
13480Sstevel@tonic-gate 	}
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate /*
13520Sstevel@tonic-gate  * Release all the pcf_locks.
13530Sstevel@tonic-gate  */
13540Sstevel@tonic-gate void
13550Sstevel@tonic-gate pcf_release_all()
13560Sstevel@tonic-gate {
13570Sstevel@tonic-gate 	struct pcf	*p;
13580Sstevel@tonic-gate 	uint_t		i;
13590Sstevel@tonic-gate 
13600Sstevel@tonic-gate 	p = pcf;
13610Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
13620Sstevel@tonic-gate 		mutex_exit(&p->pcf_lock);
13630Sstevel@tonic-gate 		p++;
13640Sstevel@tonic-gate 	}
13650Sstevel@tonic-gate }
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate /*
13680Sstevel@tonic-gate  * Inform the VM system that we need some pages freed up.
13690Sstevel@tonic-gate  * Calls must be symmetric, e.g.:
13700Sstevel@tonic-gate  *
13710Sstevel@tonic-gate  *	page_needfree(100);
13720Sstevel@tonic-gate  *	wait a bit;
13730Sstevel@tonic-gate  *	page_needfree(-100);
13740Sstevel@tonic-gate  */
13750Sstevel@tonic-gate void
13760Sstevel@tonic-gate page_needfree(spgcnt_t npages)
13770Sstevel@tonic-gate {
13780Sstevel@tonic-gate 	mutex_enter(&new_freemem_lock);
13790Sstevel@tonic-gate 	needfree += npages;
13800Sstevel@tonic-gate 	mutex_exit(&new_freemem_lock);
13810Sstevel@tonic-gate }
13820Sstevel@tonic-gate 
13830Sstevel@tonic-gate /*
13840Sstevel@tonic-gate  * Throttle for page_create(): try to prevent freemem from dropping
13850Sstevel@tonic-gate  * below throttlefree.  We can't provide a 100% guarantee because
13860Sstevel@tonic-gate  * KM_NOSLEEP allocations, page_reclaim(), and various other things
13870Sstevel@tonic-gate  * nibble away at the freelist.  However, we can block all PG_WAIT
13880Sstevel@tonic-gate  * allocations until memory becomes available.  The motivation is
13890Sstevel@tonic-gate  * that several things can fall apart when there's no free memory:
13900Sstevel@tonic-gate  *
13910Sstevel@tonic-gate  * (1) If pageout() needs memory to push a page, the system deadlocks.
13920Sstevel@tonic-gate  *
13930Sstevel@tonic-gate  * (2) By (broken) specification, timeout(9F) can neither fail nor
13940Sstevel@tonic-gate  *     block, so it has no choice but to panic the system if it
13950Sstevel@tonic-gate  *     cannot allocate a callout structure.
13960Sstevel@tonic-gate  *
13970Sstevel@tonic-gate  * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
13980Sstevel@tonic-gate  *     it panics if it cannot allocate a callback structure.
13990Sstevel@tonic-gate  *
14000Sstevel@tonic-gate  * (4) Untold numbers of third-party drivers have not yet been hardened
14010Sstevel@tonic-gate  *     against KM_NOSLEEP and/or allocb() failures; they simply assume
14020Sstevel@tonic-gate  *     success and panic the system with a data fault on failure.
14030Sstevel@tonic-gate  *     (The long-term solution to this particular problem is to ship
14040Sstevel@tonic-gate  *     hostile fault-injecting DEBUG kernels with the DDK.)
14050Sstevel@tonic-gate  *
14060Sstevel@tonic-gate  * It is theoretically impossible to guarantee success of non-blocking
14070Sstevel@tonic-gate  * allocations, but in practice, this throttle is very hard to break.
14080Sstevel@tonic-gate  */
14090Sstevel@tonic-gate static int
14100Sstevel@tonic-gate page_create_throttle(pgcnt_t npages, int flags)
14110Sstevel@tonic-gate {
14120Sstevel@tonic-gate 	ulong_t	fm;
14130Sstevel@tonic-gate 	uint_t	i;
14140Sstevel@tonic-gate 	pgcnt_t tf;	/* effective value of throttlefree */
14150Sstevel@tonic-gate 
14160Sstevel@tonic-gate 	/*
14170Sstevel@tonic-gate 	 * Never deny pages when:
14180Sstevel@tonic-gate 	 * - it's a thread that cannot block [NOMEMWAIT()]
14190Sstevel@tonic-gate 	 * - the allocation cannot block and must not fail
14200Sstevel@tonic-gate 	 * - the allocation cannot block and is pageout dispensated
14210Sstevel@tonic-gate 	 */
14220Sstevel@tonic-gate 	if (NOMEMWAIT() ||
14230Sstevel@tonic-gate 	    ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
14240Sstevel@tonic-gate 	    ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
14250Sstevel@tonic-gate 		return (1);
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate 	/*
14280Sstevel@tonic-gate 	 * If the allocation can't block, we look favorably upon it
14290Sstevel@tonic-gate 	 * unless we're below pageout_reserve.  In that case we fail
14300Sstevel@tonic-gate 	 * the allocation because we want to make sure there are a few
14310Sstevel@tonic-gate 	 * pages available for pageout.
14320Sstevel@tonic-gate 	 */
14330Sstevel@tonic-gate 	if ((flags & PG_WAIT) == 0)
14340Sstevel@tonic-gate 		return (freemem >= npages + pageout_reserve);
14350Sstevel@tonic-gate 
14360Sstevel@tonic-gate 	/* Calculate the effective throttlefree value */
14370Sstevel@tonic-gate 	tf = throttlefree -
14380Sstevel@tonic-gate 	    ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate 	cv_signal(&proc_pageout->p_cv);
14410Sstevel@tonic-gate 
14420Sstevel@tonic-gate 	while (freemem < npages + tf) {
14430Sstevel@tonic-gate 		pcf_acquire_all();
14440Sstevel@tonic-gate 		mutex_enter(&new_freemem_lock);
14450Sstevel@tonic-gate 		fm = 0;
14460Sstevel@tonic-gate 		for (i = 0; i < PCF_FANOUT; i++) {
14470Sstevel@tonic-gate 			fm += pcf[i].pcf_count;
14480Sstevel@tonic-gate 			pcf[i].pcf_wait++;
14490Sstevel@tonic-gate 			mutex_exit(&pcf[i].pcf_lock);
14500Sstevel@tonic-gate 		}
14510Sstevel@tonic-gate 		freemem = fm;
14520Sstevel@tonic-gate 		needfree += npages;
14530Sstevel@tonic-gate 		freemem_wait++;
14540Sstevel@tonic-gate 		cv_wait(&freemem_cv, &new_freemem_lock);
14550Sstevel@tonic-gate 		freemem_wait--;
14560Sstevel@tonic-gate 		needfree -= npages;
14570Sstevel@tonic-gate 		mutex_exit(&new_freemem_lock);
14580Sstevel@tonic-gate 	}
14590Sstevel@tonic-gate 	return (1);
14600Sstevel@tonic-gate }
14610Sstevel@tonic-gate 
14620Sstevel@tonic-gate /*
14630Sstevel@tonic-gate  * page_create_wait() is called to either coalecse pages from the
14640Sstevel@tonic-gate  * different pcf buckets or to wait because there simply are not
14650Sstevel@tonic-gate  * enough pages to satisfy the caller's request.
14660Sstevel@tonic-gate  *
14670Sstevel@tonic-gate  * Sadly, this is called from platform/vm/vm_machdep.c
14680Sstevel@tonic-gate  */
14690Sstevel@tonic-gate int
14700Sstevel@tonic-gate page_create_wait(size_t npages, uint_t flags)
14710Sstevel@tonic-gate {
14720Sstevel@tonic-gate 	pgcnt_t		total;
14730Sstevel@tonic-gate 	uint_t		i;
14740Sstevel@tonic-gate 	struct pcf	*p;
14750Sstevel@tonic-gate 
14760Sstevel@tonic-gate 	/*
14770Sstevel@tonic-gate 	 * Wait until there are enough free pages to satisfy our
14780Sstevel@tonic-gate 	 * entire request.
14790Sstevel@tonic-gate 	 * We set needfree += npages before prodding pageout, to make sure
14800Sstevel@tonic-gate 	 * it does real work when npages > lotsfree > freemem.
14810Sstevel@tonic-gate 	 */
14820Sstevel@tonic-gate 	VM_STAT_ADD(page_create_not_enough);
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate 	ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
14850Sstevel@tonic-gate checkagain:
14860Sstevel@tonic-gate 	if ((flags & PG_NORELOC) &&
14870Sstevel@tonic-gate 	    kcage_freemem < kcage_throttlefree + npages)
14880Sstevel@tonic-gate 		(void) kcage_create_throttle(npages, flags);
14890Sstevel@tonic-gate 
14900Sstevel@tonic-gate 	if (freemem < npages + throttlefree)
14910Sstevel@tonic-gate 		if (!page_create_throttle(npages, flags))
14920Sstevel@tonic-gate 			return (0);
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate 	/*
14950Sstevel@tonic-gate 	 * Since page_create_va() looked at every
14960Sstevel@tonic-gate 	 * bucket, assume we are going to have to wait.
14970Sstevel@tonic-gate 	 * Get all of the pcf locks.
14980Sstevel@tonic-gate 	 */
14990Sstevel@tonic-gate 	total = 0;
15000Sstevel@tonic-gate 	p = pcf;
15010Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
15020Sstevel@tonic-gate 		p->pcf_touch = 1;
15030Sstevel@tonic-gate 		mutex_enter(&p->pcf_lock);
15040Sstevel@tonic-gate 		total += p->pcf_count;
15050Sstevel@tonic-gate 		if (total >= npages) {
15060Sstevel@tonic-gate 			/*
15070Sstevel@tonic-gate 			 * Wow!  There are enough pages laying around
15080Sstevel@tonic-gate 			 * to satisfy the request.  Do the accounting,
15090Sstevel@tonic-gate 			 * drop the locks we acquired, and go back.
15100Sstevel@tonic-gate 			 *
15110Sstevel@tonic-gate 			 * freemem is not protected by any lock. So,
15120Sstevel@tonic-gate 			 * we cannot have any assertion containing
15130Sstevel@tonic-gate 			 * freemem.
15140Sstevel@tonic-gate 			 */
15150Sstevel@tonic-gate 			freemem -= npages;
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate 			while (p >= pcf) {
15180Sstevel@tonic-gate 				if (p->pcf_count <= npages) {
15190Sstevel@tonic-gate 					npages -= p->pcf_count;
15200Sstevel@tonic-gate 					p->pcf_count = 0;
15210Sstevel@tonic-gate 				} else {
15220Sstevel@tonic-gate 					p->pcf_count -= (uint_t)npages;
15230Sstevel@tonic-gate 					npages = 0;
15240Sstevel@tonic-gate 				}
15250Sstevel@tonic-gate 				mutex_exit(&p->pcf_lock);
15260Sstevel@tonic-gate 				p--;
15270Sstevel@tonic-gate 			}
15280Sstevel@tonic-gate 			ASSERT(npages == 0);
15290Sstevel@tonic-gate 			return (1);
15300Sstevel@tonic-gate 		}
15310Sstevel@tonic-gate 		p++;
15320Sstevel@tonic-gate 	}
15330Sstevel@tonic-gate 
15340Sstevel@tonic-gate 	/*
15350Sstevel@tonic-gate 	 * All of the pcf locks are held, there are not enough pages
15360Sstevel@tonic-gate 	 * to satisfy the request (npages < total).
15370Sstevel@tonic-gate 	 * Be sure to acquire the new_freemem_lock before dropping
15380Sstevel@tonic-gate 	 * the pcf locks.  This prevents dropping wakeups in page_free().
15390Sstevel@tonic-gate 	 * The order is always pcf_lock then new_freemem_lock.
15400Sstevel@tonic-gate 	 *
15410Sstevel@tonic-gate 	 * Since we hold all the pcf locks, it is a good time to set freemem.
15420Sstevel@tonic-gate 	 *
15430Sstevel@tonic-gate 	 * If the caller does not want to wait, return now.
15440Sstevel@tonic-gate 	 * Else turn the pageout daemon loose to find something
15450Sstevel@tonic-gate 	 * and wait till it does.
15460Sstevel@tonic-gate 	 *
15470Sstevel@tonic-gate 	 */
15480Sstevel@tonic-gate 	freemem = total;
15490Sstevel@tonic-gate 
15500Sstevel@tonic-gate 	if ((flags & PG_WAIT) == 0) {
15510Sstevel@tonic-gate 		pcf_release_all();
15520Sstevel@tonic-gate 
15530Sstevel@tonic-gate 		TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM,
15540Sstevel@tonic-gate 		"page_create_nomem:npages %ld freemem %ld", npages, freemem);
15550Sstevel@tonic-gate 		return (0);
15560Sstevel@tonic-gate 	}
15570Sstevel@tonic-gate 
15580Sstevel@tonic-gate 	ASSERT(proc_pageout != NULL);
15590Sstevel@tonic-gate 	cv_signal(&proc_pageout->p_cv);
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START,
15620Sstevel@tonic-gate 	    "page_create_sleep_start: freemem %ld needfree %ld",
15630Sstevel@tonic-gate 	    freemem, needfree);
15640Sstevel@tonic-gate 
15650Sstevel@tonic-gate 	/*
15660Sstevel@tonic-gate 	 * We are going to wait.
15670Sstevel@tonic-gate 	 * We currently hold all of the pcf_locks,
15680Sstevel@tonic-gate 	 * get the new_freemem_lock (it protects freemem_wait),
15690Sstevel@tonic-gate 	 * before dropping the pcf_locks.
15700Sstevel@tonic-gate 	 */
15710Sstevel@tonic-gate 	mutex_enter(&new_freemem_lock);
15720Sstevel@tonic-gate 
15730Sstevel@tonic-gate 	p = pcf;
15740Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
15750Sstevel@tonic-gate 		p->pcf_wait++;
15760Sstevel@tonic-gate 		mutex_exit(&p->pcf_lock);
15770Sstevel@tonic-gate 		p++;
15780Sstevel@tonic-gate 	}
15790Sstevel@tonic-gate 
15800Sstevel@tonic-gate 	needfree += npages;
15810Sstevel@tonic-gate 	freemem_wait++;
15820Sstevel@tonic-gate 
15830Sstevel@tonic-gate 	cv_wait(&freemem_cv, &new_freemem_lock);
15840Sstevel@tonic-gate 
15850Sstevel@tonic-gate 	freemem_wait--;
15860Sstevel@tonic-gate 	needfree -= npages;
15870Sstevel@tonic-gate 
15880Sstevel@tonic-gate 	mutex_exit(&new_freemem_lock);
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END,
15910Sstevel@tonic-gate 	    "page_create_sleep_end: freemem %ld needfree %ld",
15920Sstevel@tonic-gate 	    freemem, needfree);
15930Sstevel@tonic-gate 
15940Sstevel@tonic-gate 	VM_STAT_ADD(page_create_not_enough_again);
15950Sstevel@tonic-gate 	goto checkagain;
15960Sstevel@tonic-gate }
15970Sstevel@tonic-gate 
15980Sstevel@tonic-gate /*
15990Sstevel@tonic-gate  * A routine to do the opposite of page_create_wait().
16000Sstevel@tonic-gate  */
16010Sstevel@tonic-gate void
16020Sstevel@tonic-gate page_create_putback(spgcnt_t npages)
16030Sstevel@tonic-gate {
16040Sstevel@tonic-gate 	struct pcf	*p;
16050Sstevel@tonic-gate 	pgcnt_t		lump;
16060Sstevel@tonic-gate 	uint_t		*which;
16070Sstevel@tonic-gate 
16080Sstevel@tonic-gate 	/*
16090Sstevel@tonic-gate 	 * When a contiguous lump is broken up, we have to
16100Sstevel@tonic-gate 	 * deal with lots of pages (min 64) so lets spread
16110Sstevel@tonic-gate 	 * the wealth around.
16120Sstevel@tonic-gate 	 */
16130Sstevel@tonic-gate 	lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT;
16140Sstevel@tonic-gate 	freemem += npages;
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 	for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) {
16170Sstevel@tonic-gate 		which = &p->pcf_count;
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate 		mutex_enter(&p->pcf_lock);
16200Sstevel@tonic-gate 
16210Sstevel@tonic-gate 		if (p->pcf_block) {
16220Sstevel@tonic-gate 			which = &p->pcf_reserve;
16230Sstevel@tonic-gate 		}
16240Sstevel@tonic-gate 
16250Sstevel@tonic-gate 		if (lump < npages) {
16260Sstevel@tonic-gate 			*which += (uint_t)lump;
16270Sstevel@tonic-gate 			npages -= lump;
16280Sstevel@tonic-gate 		} else {
16290Sstevel@tonic-gate 			*which += (uint_t)npages;
16300Sstevel@tonic-gate 			npages = 0;
16310Sstevel@tonic-gate 		}
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate 		if (p->pcf_wait) {
16340Sstevel@tonic-gate 			mutex_enter(&new_freemem_lock);
16350Sstevel@tonic-gate 			/*
16360Sstevel@tonic-gate 			 * Check to see if some other thread
16370Sstevel@tonic-gate 			 * is actually waiting.  Another bucket
16380Sstevel@tonic-gate 			 * may have woken it up by now.  If there
16390Sstevel@tonic-gate 			 * are no waiters, then set our pcf_wait
16400Sstevel@tonic-gate 			 * count to zero to avoid coming in here
16410Sstevel@tonic-gate 			 * next time.
16420Sstevel@tonic-gate 			 */
16430Sstevel@tonic-gate 			if (freemem_wait) {
16440Sstevel@tonic-gate 				if (npages > 1) {
16450Sstevel@tonic-gate 					cv_broadcast(&freemem_cv);
16460Sstevel@tonic-gate 				} else {
16470Sstevel@tonic-gate 					cv_signal(&freemem_cv);
16480Sstevel@tonic-gate 				}
16490Sstevel@tonic-gate 				p->pcf_wait--;
16500Sstevel@tonic-gate 			} else {
16510Sstevel@tonic-gate 				p->pcf_wait = 0;
16520Sstevel@tonic-gate 			}
16530Sstevel@tonic-gate 			mutex_exit(&new_freemem_lock);
16540Sstevel@tonic-gate 		}
16550Sstevel@tonic-gate 		mutex_exit(&p->pcf_lock);
16560Sstevel@tonic-gate 	}
16570Sstevel@tonic-gate 	ASSERT(npages == 0);
16580Sstevel@tonic-gate }
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate /*
16610Sstevel@tonic-gate  * A helper routine for page_create_get_something.
16620Sstevel@tonic-gate  * The indenting got to deep down there.
16630Sstevel@tonic-gate  * Unblock the pcf counters.  Any pages freed after
16640Sstevel@tonic-gate  * pcf_block got set are moved to pcf_count and
16650Sstevel@tonic-gate  * wakeups (cv_broadcast() or cv_signal()) are done as needed.
16660Sstevel@tonic-gate  */
16670Sstevel@tonic-gate static void
16680Sstevel@tonic-gate pcgs_unblock(void)
16690Sstevel@tonic-gate {
16700Sstevel@tonic-gate 	int		i;
16710Sstevel@tonic-gate 	struct pcf	*p;
16720Sstevel@tonic-gate 
16730Sstevel@tonic-gate 	/* Update freemem while we're here. */
16740Sstevel@tonic-gate 	freemem = 0;
16750Sstevel@tonic-gate 	p = pcf;
16760Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
16770Sstevel@tonic-gate 		mutex_enter(&p->pcf_lock);
16780Sstevel@tonic-gate 		ASSERT(p->pcf_count == 0);
16790Sstevel@tonic-gate 		p->pcf_count = p->pcf_reserve;
16800Sstevel@tonic-gate 		p->pcf_block = 0;
16810Sstevel@tonic-gate 		freemem += p->pcf_count;
16820Sstevel@tonic-gate 		if (p->pcf_wait) {
16830Sstevel@tonic-gate 			mutex_enter(&new_freemem_lock);
16840Sstevel@tonic-gate 			if (freemem_wait) {
16850Sstevel@tonic-gate 				if (p->pcf_reserve > 1) {
16860Sstevel@tonic-gate 					cv_broadcast(&freemem_cv);
16870Sstevel@tonic-gate 					p->pcf_wait = 0;
16880Sstevel@tonic-gate 				} else {
16890Sstevel@tonic-gate 					cv_signal(&freemem_cv);
16900Sstevel@tonic-gate 					p->pcf_wait--;
16910Sstevel@tonic-gate 				}
16920Sstevel@tonic-gate 			} else {
16930Sstevel@tonic-gate 				p->pcf_wait = 0;
16940Sstevel@tonic-gate 			}
16950Sstevel@tonic-gate 			mutex_exit(&new_freemem_lock);
16960Sstevel@tonic-gate 		}
16970Sstevel@tonic-gate 		p->pcf_reserve = 0;
16980Sstevel@tonic-gate 		mutex_exit(&p->pcf_lock);
16990Sstevel@tonic-gate 		p++;
17000Sstevel@tonic-gate 	}
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate 
17030Sstevel@tonic-gate /*
17040Sstevel@tonic-gate  * Called from page_create_va() when both the cache and free lists
17050Sstevel@tonic-gate  * have been checked once.
17060Sstevel@tonic-gate  *
17070Sstevel@tonic-gate  * Either returns a page or panics since the accounting was done
17080Sstevel@tonic-gate  * way before we got here.
17090Sstevel@tonic-gate  *
17100Sstevel@tonic-gate  * We don't come here often, so leave the accounting on permanently.
17110Sstevel@tonic-gate  */
17120Sstevel@tonic-gate 
17130Sstevel@tonic-gate #define	MAX_PCGS	100
17140Sstevel@tonic-gate 
17150Sstevel@tonic-gate #ifdef	DEBUG
17160Sstevel@tonic-gate #define	PCGS_TRIES	100
17170Sstevel@tonic-gate #else	/* DEBUG */
17180Sstevel@tonic-gate #define	PCGS_TRIES	10
17190Sstevel@tonic-gate #endif	/* DEBUG */
17200Sstevel@tonic-gate 
17210Sstevel@tonic-gate #ifdef	VM_STATS
17220Sstevel@tonic-gate uint_t	pcgs_counts[PCGS_TRIES];
17230Sstevel@tonic-gate uint_t	pcgs_too_many;
17240Sstevel@tonic-gate uint_t	pcgs_entered;
17250Sstevel@tonic-gate uint_t	pcgs_entered_noreloc;
17260Sstevel@tonic-gate uint_t	pcgs_locked;
17270Sstevel@tonic-gate uint_t	pcgs_cagelocked;
17280Sstevel@tonic-gate #endif	/* VM_STATS */
17290Sstevel@tonic-gate 
17300Sstevel@tonic-gate static page_t *
17310Sstevel@tonic-gate page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg,
17320Sstevel@tonic-gate     caddr_t vaddr, uint_t flags)
17330Sstevel@tonic-gate {
17340Sstevel@tonic-gate 	uint_t		count;
17350Sstevel@tonic-gate 	page_t		*pp;
17360Sstevel@tonic-gate 	uint_t		locked, i;
17370Sstevel@tonic-gate 	struct	pcf	*p;
17380Sstevel@tonic-gate 	lgrp_t		*lgrp;
17390Sstevel@tonic-gate 	int		cagelocked = 0;
17400Sstevel@tonic-gate 
17410Sstevel@tonic-gate 	VM_STAT_ADD(pcgs_entered);
17420Sstevel@tonic-gate 
17430Sstevel@tonic-gate 	/*
17440Sstevel@tonic-gate 	 * Tap any reserve freelists: if we fail now, we'll die
17450Sstevel@tonic-gate 	 * since the page(s) we're looking for have already been
17460Sstevel@tonic-gate 	 * accounted for.
17470Sstevel@tonic-gate 	 */
17480Sstevel@tonic-gate 	flags |= PG_PANIC;
17490Sstevel@tonic-gate 
17500Sstevel@tonic-gate 	if ((flags & PG_NORELOC) != 0) {
17510Sstevel@tonic-gate 		VM_STAT_ADD(pcgs_entered_noreloc);
17520Sstevel@tonic-gate 		/*
17530Sstevel@tonic-gate 		 * Requests for free pages from critical threads
17540Sstevel@tonic-gate 		 * such as pageout still won't throttle here, but
17550Sstevel@tonic-gate 		 * we must try again, to give the cageout thread
17560Sstevel@tonic-gate 		 * another chance to catch up. Since we already
17570Sstevel@tonic-gate 		 * accounted for the pages, we had better get them
17580Sstevel@tonic-gate 		 * this time.
17590Sstevel@tonic-gate 		 *
17600Sstevel@tonic-gate 		 * N.B. All non-critical threads acquire the pcgs_cagelock
17610Sstevel@tonic-gate 		 * to serialize access to the freelists. This implements a
17620Sstevel@tonic-gate 		 * turnstile-type synchornization to avoid starvation of
17630Sstevel@tonic-gate 		 * critical requests for PG_NORELOC memory by non-critical
17640Sstevel@tonic-gate 		 * threads: all non-critical threads must acquire a 'ticket'
17650Sstevel@tonic-gate 		 * before passing through, which entails making sure
17660Sstevel@tonic-gate 		 * kcage_freemem won't fall below minfree prior to grabbing
17670Sstevel@tonic-gate 		 * pages from the freelists.
17680Sstevel@tonic-gate 		 */
17690Sstevel@tonic-gate 		if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
17700Sstevel@tonic-gate 			mutex_enter(&pcgs_cagelock);
17710Sstevel@tonic-gate 			cagelocked = 1;
17720Sstevel@tonic-gate 			VM_STAT_ADD(pcgs_cagelocked);
17730Sstevel@tonic-gate 		}
17740Sstevel@tonic-gate 	}
17750Sstevel@tonic-gate 
17760Sstevel@tonic-gate 	/*
17770Sstevel@tonic-gate 	 * Time to get serious.
17780Sstevel@tonic-gate 	 * We failed to get a `correctly colored' page from both the
17790Sstevel@tonic-gate 	 * free and cache lists.
17800Sstevel@tonic-gate 	 * We escalate in stage.
17810Sstevel@tonic-gate 	 *
17820Sstevel@tonic-gate 	 * First try both lists without worring about color.
17830Sstevel@tonic-gate 	 *
17840Sstevel@tonic-gate 	 * Then, grab all page accounting locks (ie. pcf[]) and
17850Sstevel@tonic-gate 	 * steal any pages that they have and set the pcf_block flag to
17860Sstevel@tonic-gate 	 * stop deletions from the lists.  This will help because
17870Sstevel@tonic-gate 	 * a page can get added to the free list while we are looking
17880Sstevel@tonic-gate 	 * at the cache list, then another page could be added to the cache
17890Sstevel@tonic-gate 	 * list allowing the page on the free list to be removed as we
17900Sstevel@tonic-gate 	 * move from looking at the cache list to the free list. This
17910Sstevel@tonic-gate 	 * could happen over and over. We would never find the page
17920Sstevel@tonic-gate 	 * we have accounted for.
17930Sstevel@tonic-gate 	 *
17940Sstevel@tonic-gate 	 * Noreloc pages are a subset of the global (relocatable) page pool.
17950Sstevel@tonic-gate 	 * They are not tracked separately in the pcf bins, so it is
17960Sstevel@tonic-gate 	 * impossible to know when doing pcf accounting if the available
17970Sstevel@tonic-gate 	 * page(s) are noreloc pages or not. When looking for a noreloc page
17980Sstevel@tonic-gate 	 * it is quite easy to end up here even if the global (relocatable)
17990Sstevel@tonic-gate 	 * page pool has plenty of free pages but the noreloc pool is empty.
18000Sstevel@tonic-gate 	 *
18010Sstevel@tonic-gate 	 * When the noreloc pool is empty (or low), additional noreloc pages
18020Sstevel@tonic-gate 	 * are created by converting pages from the global page pool. This
18030Sstevel@tonic-gate 	 * process will stall during pcf accounting if the pcf bins are
18040Sstevel@tonic-gate 	 * already locked. Such is the case when a noreloc allocation is
18050Sstevel@tonic-gate 	 * looping here in page_create_get_something waiting for more noreloc
18060Sstevel@tonic-gate 	 * pages to appear.
18070Sstevel@tonic-gate 	 *
18080Sstevel@tonic-gate 	 * Short of adding a new field to the pcf bins to accurately track
18090Sstevel@tonic-gate 	 * the number of free noreloc pages, we instead do not grab the
18100Sstevel@tonic-gate 	 * pcgs_lock, do not set the pcf blocks and do not timeout when
18110Sstevel@tonic-gate 	 * allocating a noreloc page. This allows noreloc allocations to
18120Sstevel@tonic-gate 	 * loop without blocking global page pool allocations.
18130Sstevel@tonic-gate 	 *
18140Sstevel@tonic-gate 	 * NOTE: the behaviour of page_create_get_something has not changed
18150Sstevel@tonic-gate 	 * for the case of global page pool allocations.
18160Sstevel@tonic-gate 	 */
18170Sstevel@tonic-gate 
18180Sstevel@tonic-gate 	flags &= ~PG_MATCH_COLOR;
18190Sstevel@tonic-gate 	locked = 0;
18200Sstevel@tonic-gate #ifndef __sparc
18210Sstevel@tonic-gate 	/*
18220Sstevel@tonic-gate 	 * page_create_get_something may be called because 4g memory may be
18230Sstevel@tonic-gate 	 * depleted. Set flags to allow for relocation of base page below
18240Sstevel@tonic-gate 	 * 4g if necessary.
18250Sstevel@tonic-gate 	 */
18260Sstevel@tonic-gate 	if (physmax4g)
18270Sstevel@tonic-gate 		flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
18280Sstevel@tonic-gate #endif
18290Sstevel@tonic-gate 
18300Sstevel@tonic-gate 	lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
18310Sstevel@tonic-gate 
18320Sstevel@tonic-gate 	for (count = 0; kcage_on || count < MAX_PCGS; count++) {
18330Sstevel@tonic-gate 		pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
18340Sstevel@tonic-gate 		    flags, lgrp);
18350Sstevel@tonic-gate 		if (pp == NULL) {
18360Sstevel@tonic-gate 			pp = page_get_cachelist(vp, off, seg, vaddr,
18370Sstevel@tonic-gate 				flags, lgrp);
18380Sstevel@tonic-gate 		}
18390Sstevel@tonic-gate 		if (pp == NULL) {
18400Sstevel@tonic-gate 			/*
18410Sstevel@tonic-gate 			 * Serialize.  Don't fight with other pcgs().
18420Sstevel@tonic-gate 			 */
18430Sstevel@tonic-gate 			if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
18440Sstevel@tonic-gate 				mutex_enter(&pcgs_lock);
18450Sstevel@tonic-gate 				VM_STAT_ADD(pcgs_locked);
18460Sstevel@tonic-gate 				locked = 1;
18470Sstevel@tonic-gate 				p = pcf;
18480Sstevel@tonic-gate 				for (i = 0; i < PCF_FANOUT; i++) {
18490Sstevel@tonic-gate 					mutex_enter(&p->pcf_lock);
18500Sstevel@tonic-gate 					ASSERT(p->pcf_block == 0);
18510Sstevel@tonic-gate 					p->pcf_block = 1;
18520Sstevel@tonic-gate 					p->pcf_reserve = p->pcf_count;
18530Sstevel@tonic-gate 					p->pcf_count = 0;
18540Sstevel@tonic-gate 					mutex_exit(&p->pcf_lock);
18550Sstevel@tonic-gate 					p++;
18560Sstevel@tonic-gate 				}
18570Sstevel@tonic-gate 				freemem = 0;
18580Sstevel@tonic-gate 			}
18590Sstevel@tonic-gate 
18600Sstevel@tonic-gate 			if (count) {
18610Sstevel@tonic-gate 				/*
18620Sstevel@tonic-gate 				 * Since page_free() puts pages on
18630Sstevel@tonic-gate 				 * a list then accounts for it, we
18640Sstevel@tonic-gate 				 * just have to wait for page_free()
18650Sstevel@tonic-gate 				 * to unlock any page it was working
18660Sstevel@tonic-gate 				 * with. The page_lock()-page_reclaim()
18670Sstevel@tonic-gate 				 * path falls in the same boat.
18680Sstevel@tonic-gate 				 *
18690Sstevel@tonic-gate 				 * We don't need to check on the
18700Sstevel@tonic-gate 				 * PG_WAIT flag, we have already
18710Sstevel@tonic-gate 				 * accounted for the page we are
18720Sstevel@tonic-gate 				 * looking for in page_create_va().
18730Sstevel@tonic-gate 				 *
18740Sstevel@tonic-gate 				 * We just wait a moment to let any
18750Sstevel@tonic-gate 				 * locked pages on the lists free up,
18760Sstevel@tonic-gate 				 * then continue around and try again.
18770Sstevel@tonic-gate 				 *
18780Sstevel@tonic-gate 				 * Will be awakened by set_freemem().
18790Sstevel@tonic-gate 				 */
18800Sstevel@tonic-gate 				mutex_enter(&pcgs_wait_lock);
18810Sstevel@tonic-gate 				cv_wait(&pcgs_cv, &pcgs_wait_lock);
18820Sstevel@tonic-gate 				mutex_exit(&pcgs_wait_lock);
18830Sstevel@tonic-gate 			}
18840Sstevel@tonic-gate 		} else {
18850Sstevel@tonic-gate #ifdef VM_STATS
18860Sstevel@tonic-gate 			if (count >= PCGS_TRIES) {
18870Sstevel@tonic-gate 				VM_STAT_ADD(pcgs_too_many);
18880Sstevel@tonic-gate 			} else {
18890Sstevel@tonic-gate 				VM_STAT_ADD(pcgs_counts[count]);
18900Sstevel@tonic-gate 			}
18910Sstevel@tonic-gate #endif
18920Sstevel@tonic-gate 			if (locked) {
18930Sstevel@tonic-gate 				pcgs_unblock();
18940Sstevel@tonic-gate 				mutex_exit(&pcgs_lock);
18950Sstevel@tonic-gate 			}
18960Sstevel@tonic-gate 			if (cagelocked)
18970Sstevel@tonic-gate 				mutex_exit(&pcgs_cagelock);
18980Sstevel@tonic-gate 			return (pp);
18990Sstevel@tonic-gate 		}
19000Sstevel@tonic-gate 	}
19010Sstevel@tonic-gate 	/*
19020Sstevel@tonic-gate 	 * we go down holding the pcf locks.
19030Sstevel@tonic-gate 	 */
19040Sstevel@tonic-gate 	panic("no %spage found %d",
19050Sstevel@tonic-gate 	    ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
19060Sstevel@tonic-gate 	/*NOTREACHED*/
19070Sstevel@tonic-gate }
19080Sstevel@tonic-gate 
19090Sstevel@tonic-gate /*
19100Sstevel@tonic-gate  * Create enough pages for "bytes" worth of data starting at
19110Sstevel@tonic-gate  * "off" in "vp".
19120Sstevel@tonic-gate  *
19130Sstevel@tonic-gate  *	Where flag must be one of:
19140Sstevel@tonic-gate  *
19150Sstevel@tonic-gate  *		PG_EXCL:	Exclusive create (fail if any page already
19160Sstevel@tonic-gate  *				exists in the page cache) which does not
19170Sstevel@tonic-gate  *				wait for memory to become available.
19180Sstevel@tonic-gate  *
19190Sstevel@tonic-gate  *		PG_WAIT:	Non-exclusive create which can wait for
19200Sstevel@tonic-gate  *				memory to become available.
19210Sstevel@tonic-gate  *
19220Sstevel@tonic-gate  *		PG_PHYSCONTIG:	Allocate physically contiguous pages.
19230Sstevel@tonic-gate  *				(Not Supported)
19240Sstevel@tonic-gate  *
19250Sstevel@tonic-gate  * A doubly linked list of pages is returned to the caller.  Each page
19260Sstevel@tonic-gate  * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
19270Sstevel@tonic-gate  * lock.
19280Sstevel@tonic-gate  *
19290Sstevel@tonic-gate  * Unable to change the parameters to page_create() in a minor release,
19300Sstevel@tonic-gate  * we renamed page_create() to page_create_va(), changed all known calls
19310Sstevel@tonic-gate  * from page_create() to page_create_va(), and created this wrapper.
19320Sstevel@tonic-gate  *
19330Sstevel@tonic-gate  * Upon a major release, we should break compatibility by deleting this
19340Sstevel@tonic-gate  * wrapper, and replacing all the strings "page_create_va", with "page_create".
19350Sstevel@tonic-gate  *
19360Sstevel@tonic-gate  * NOTE: There is a copy of this interface as page_create_io() in
19370Sstevel@tonic-gate  *	 i86/vm/vm_machdep.c. Any bugs fixed here should be applied
19380Sstevel@tonic-gate  *	 there.
19390Sstevel@tonic-gate  */
19400Sstevel@tonic-gate page_t *
19410Sstevel@tonic-gate page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags)
19420Sstevel@tonic-gate {
19430Sstevel@tonic-gate 	caddr_t random_vaddr;
19440Sstevel@tonic-gate 	struct seg kseg;
19450Sstevel@tonic-gate 
19460Sstevel@tonic-gate #ifdef DEBUG
19470Sstevel@tonic-gate 	cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p",
19480Sstevel@tonic-gate 	    (void *)caller());
19490Sstevel@tonic-gate #endif
19500Sstevel@tonic-gate 
19510Sstevel@tonic-gate 	random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^
19520Sstevel@tonic-gate 	    (uintptr_t)(off >> PAGESHIFT));
19530Sstevel@tonic-gate 	kseg.s_as = &kas;
19540Sstevel@tonic-gate 
19550Sstevel@tonic-gate 	return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr));
19560Sstevel@tonic-gate }
19570Sstevel@tonic-gate 
19580Sstevel@tonic-gate #ifdef DEBUG
19590Sstevel@tonic-gate uint32_t pg_alloc_pgs_mtbf = 0;
19600Sstevel@tonic-gate #endif
19610Sstevel@tonic-gate 
19620Sstevel@tonic-gate /*
19630Sstevel@tonic-gate  * Used for large page support. It will attempt to allocate
19640Sstevel@tonic-gate  * a large page(s) off the freelist.
19650Sstevel@tonic-gate  *
19660Sstevel@tonic-gate  * Returns non zero on failure.
19670Sstevel@tonic-gate  */
19680Sstevel@tonic-gate int
1969749Ssusans page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr,
1970749Ssusans     page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz)
19710Sstevel@tonic-gate {
19720Sstevel@tonic-gate 	pgcnt_t		npgs, curnpgs, totpgs;
19730Sstevel@tonic-gate 	size_t		pgsz;
19740Sstevel@tonic-gate 	page_t		*pplist = NULL, *pp;
19750Sstevel@tonic-gate 	int		err = 0;
19760Sstevel@tonic-gate 	lgrp_t		*lgrp;
19770Sstevel@tonic-gate 
19780Sstevel@tonic-gate 	ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
19790Sstevel@tonic-gate 
19800Sstevel@tonic-gate 	VM_STAT_ADD(alloc_pages[0]);
19810Sstevel@tonic-gate 
19820Sstevel@tonic-gate #ifdef DEBUG
19830Sstevel@tonic-gate 	if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
19840Sstevel@tonic-gate 		return (ENOMEM);
19850Sstevel@tonic-gate 	}
19860Sstevel@tonic-gate #endif
19870Sstevel@tonic-gate 
19880Sstevel@tonic-gate 	pgsz = page_get_pagesize(szc);
19890Sstevel@tonic-gate 	totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
19900Sstevel@tonic-gate 
19910Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
19920Sstevel@tonic-gate 	/*
19930Sstevel@tonic-gate 	 * One must be NULL but not both.
19940Sstevel@tonic-gate 	 * And one must be non NULL but not both.
19950Sstevel@tonic-gate 	 */
19960Sstevel@tonic-gate 	ASSERT(basepp != NULL || ppa != NULL);
19970Sstevel@tonic-gate 	ASSERT(basepp == NULL || ppa == NULL);
19980Sstevel@tonic-gate 
19990Sstevel@tonic-gate 	(void) page_create_wait(npgs, PG_WAIT);
20000Sstevel@tonic-gate 
20010Sstevel@tonic-gate 	while (npgs && szc) {
20020Sstevel@tonic-gate 		lgrp = lgrp_mem_choose(seg, addr, pgsz);
2003749Ssusans 		pp = page_get_freelist(vp, 0, seg, addr, pgsz, 0, lgrp);
20040Sstevel@tonic-gate 		if (pp != NULL) {
20050Sstevel@tonic-gate 			VM_STAT_ADD(alloc_pages[1]);
20060Sstevel@tonic-gate 			page_list_concat(&pplist, &pp);
20070Sstevel@tonic-gate 			ASSERT(npgs >= curnpgs);
20080Sstevel@tonic-gate 			npgs -= curnpgs;
20090Sstevel@tonic-gate 		} else if (anypgsz) {
20100Sstevel@tonic-gate 			VM_STAT_ADD(alloc_pages[2]);
20110Sstevel@tonic-gate 			szc--;
20120Sstevel@tonic-gate 			pgsz = page_get_pagesize(szc);
20130Sstevel@tonic-gate 			curnpgs = pgsz >> PAGESHIFT;
20140Sstevel@tonic-gate 		} else {
20150Sstevel@tonic-gate 			VM_STAT_ADD(alloc_pages[3]);
20160Sstevel@tonic-gate 			ASSERT(npgs == totpgs);
20170Sstevel@tonic-gate 			page_create_putback(npgs);
20180Sstevel@tonic-gate 			return (ENOMEM);
20190Sstevel@tonic-gate 		}
20200Sstevel@tonic-gate 	}
20210Sstevel@tonic-gate 	if (szc == 0) {
20220Sstevel@tonic-gate 		VM_STAT_ADD(alloc_pages[4]);
20230Sstevel@tonic-gate 		ASSERT(npgs != 0);
20240Sstevel@tonic-gate 		page_create_putback(npgs);
20250Sstevel@tonic-gate 		err = ENOMEM;
20260Sstevel@tonic-gate 	} else if (basepp != NULL) {
20270Sstevel@tonic-gate 		ASSERT(npgs == 0);
20280Sstevel@tonic-gate 		ASSERT(ppa == NULL);
20290Sstevel@tonic-gate 		*basepp = pplist;
20300Sstevel@tonic-gate 	}
20310Sstevel@tonic-gate 
20320Sstevel@tonic-gate 	npgs = totpgs - npgs;
20330Sstevel@tonic-gate 	pp = pplist;
20340Sstevel@tonic-gate 
20350Sstevel@tonic-gate 	/*
20360Sstevel@tonic-gate 	 * Clear the free and age bits. Also if we were passed in a ppa then
20370Sstevel@tonic-gate 	 * fill it in with all the constituent pages from the large page. But
20380Sstevel@tonic-gate 	 * if we failed to allocate all the pages just free what we got.
20390Sstevel@tonic-gate 	 */
20400Sstevel@tonic-gate 	while (npgs != 0) {
20410Sstevel@tonic-gate 		ASSERT(PP_ISFREE(pp));
20420Sstevel@tonic-gate 		ASSERT(PP_ISAGED(pp));
20430Sstevel@tonic-gate 		if (ppa != NULL || err != 0) {
20440Sstevel@tonic-gate 			if (err == 0) {
20450Sstevel@tonic-gate 				VM_STAT_ADD(alloc_pages[5]);
20460Sstevel@tonic-gate 				PP_CLRFREE(pp);
20470Sstevel@tonic-gate 				PP_CLRAGED(pp);
20480Sstevel@tonic-gate 				page_sub(&pplist, pp);
20490Sstevel@tonic-gate 				*ppa++ = pp;
20500Sstevel@tonic-gate 				npgs--;
20510Sstevel@tonic-gate 			} else {
20520Sstevel@tonic-gate 				VM_STAT_ADD(alloc_pages[6]);
20530Sstevel@tonic-gate 				ASSERT(pp->p_szc != 0);
20540Sstevel@tonic-gate 				curnpgs = page_get_pagecnt(pp->p_szc);
20550Sstevel@tonic-gate 				page_list_break(&pp, &pplist, curnpgs);
20560Sstevel@tonic-gate 				page_list_add_pages(pp, 0);
20570Sstevel@tonic-gate 				page_create_putback(curnpgs);
20580Sstevel@tonic-gate 				ASSERT(npgs >= curnpgs);
20590Sstevel@tonic-gate 				npgs -= curnpgs;
20600Sstevel@tonic-gate 			}
20610Sstevel@tonic-gate 			pp = pplist;
20620Sstevel@tonic-gate 		} else {
20630Sstevel@tonic-gate 			VM_STAT_ADD(alloc_pages[7]);
20640Sstevel@tonic-gate 			PP_CLRFREE(pp);
20650Sstevel@tonic-gate 			PP_CLRAGED(pp);
20660Sstevel@tonic-gate 			pp = pp->p_next;
20670Sstevel@tonic-gate 			npgs--;
20680Sstevel@tonic-gate 		}
20690Sstevel@tonic-gate 	}
20700Sstevel@tonic-gate 	return (err);
20710Sstevel@tonic-gate }
20720Sstevel@tonic-gate 
20730Sstevel@tonic-gate /*
20740Sstevel@tonic-gate  * Get a single large page off of the freelists, and set it up for use.
20750Sstevel@tonic-gate  * Number of bytes requested must be a supported page size.
20760Sstevel@tonic-gate  *
20770Sstevel@tonic-gate  * Note that this call may fail even if there is sufficient
20780Sstevel@tonic-gate  * memory available or PG_WAIT is set, so the caller must
20790Sstevel@tonic-gate  * be willing to fallback on page_create_va(), block and retry,
20800Sstevel@tonic-gate  * or fail the requester.
20810Sstevel@tonic-gate  */
20820Sstevel@tonic-gate page_t *
20830Sstevel@tonic-gate page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
20840Sstevel@tonic-gate     struct seg *seg, caddr_t vaddr, void *arg)
20850Sstevel@tonic-gate {
20860Sstevel@tonic-gate 	pgcnt_t		npages, pcftotal;
20870Sstevel@tonic-gate 	page_t		*pp;
20880Sstevel@tonic-gate 	page_t		*rootpp;
20890Sstevel@tonic-gate 	lgrp_t		*lgrp;
20900Sstevel@tonic-gate 	uint_t		enough;
20910Sstevel@tonic-gate 	uint_t		pcf_index;
20920Sstevel@tonic-gate 	uint_t		i;
20930Sstevel@tonic-gate 	struct pcf	*p;
20940Sstevel@tonic-gate 	struct pcf	*q;
20950Sstevel@tonic-gate 	lgrp_id_t	*lgrpid = (lgrp_id_t *)arg;
20960Sstevel@tonic-gate 
20970Sstevel@tonic-gate 	ASSERT(vp != NULL);
20980Sstevel@tonic-gate 
20990Sstevel@tonic-gate 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
21000Sstevel@tonic-gate 		    PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0);
21010Sstevel@tonic-gate 	/* but no others */
21020Sstevel@tonic-gate 
21030Sstevel@tonic-gate 	ASSERT((flags & PG_EXCL) == PG_EXCL);
21040Sstevel@tonic-gate 
21050Sstevel@tonic-gate 	npages = btop(bytes);
21060Sstevel@tonic-gate 
21070Sstevel@tonic-gate 	if (!kcage_on || panicstr) {
21080Sstevel@tonic-gate 		/*
21090Sstevel@tonic-gate 		 * Cage is OFF, or we are single threaded in
21100Sstevel@tonic-gate 		 * panic, so make everything a RELOC request.
21110Sstevel@tonic-gate 		 */
21120Sstevel@tonic-gate 		flags &= ~PG_NORELOC;
21130Sstevel@tonic-gate 	}
21140Sstevel@tonic-gate 
21150Sstevel@tonic-gate 	/*
21160Sstevel@tonic-gate 	 * Make sure there's adequate physical memory available.
21170Sstevel@tonic-gate 	 * Note: PG_WAIT is ignored here.
21180Sstevel@tonic-gate 	 */
21190Sstevel@tonic-gate 	if (freemem <= throttlefree + npages) {
21200Sstevel@tonic-gate 		VM_STAT_ADD(page_create_large_cnt[1]);
21210Sstevel@tonic-gate 		return (NULL);
21220Sstevel@tonic-gate 	}
21230Sstevel@tonic-gate 
21240Sstevel@tonic-gate 	/*
21250Sstevel@tonic-gate 	 * If cage is on, dampen draw from cage when available
21260Sstevel@tonic-gate 	 * cage space is low.
21270Sstevel@tonic-gate 	 */
21280Sstevel@tonic-gate 	if ((flags & (PG_NORELOC | PG_WAIT)) ==  (PG_NORELOC | PG_WAIT) &&
21290Sstevel@tonic-gate 	    kcage_freemem < kcage_throttlefree + npages) {
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate 		/*
21320Sstevel@tonic-gate 		 * The cage is on, the caller wants PG_NORELOC
21330Sstevel@tonic-gate 		 * pages and available cage memory is very low.
21340Sstevel@tonic-gate 		 * Call kcage_create_throttle() to attempt to
21350Sstevel@tonic-gate 		 * control demand on the cage.
21360Sstevel@tonic-gate 		 */
21370Sstevel@tonic-gate 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
21380Sstevel@tonic-gate 			VM_STAT_ADD(page_create_large_cnt[2]);
21390Sstevel@tonic-gate 			return (NULL);
21400Sstevel@tonic-gate 		}
21410Sstevel@tonic-gate 	}
21420Sstevel@tonic-gate 
21430Sstevel@tonic-gate 	enough = 0;
21440Sstevel@tonic-gate 	pcf_index = PCF_INDEX();
21450Sstevel@tonic-gate 	p = &pcf[pcf_index];
21460Sstevel@tonic-gate 	p->pcf_touch = 1;
21470Sstevel@tonic-gate 	q = &pcf[PCF_FANOUT];
21480Sstevel@tonic-gate 	for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) {
21490Sstevel@tonic-gate 		if (p->pcf_count > npages) {
21500Sstevel@tonic-gate 			/*
21510Sstevel@tonic-gate 			 * a good one to try.
21520Sstevel@tonic-gate 			 */
21530Sstevel@tonic-gate 			mutex_enter(&p->pcf_lock);
21540Sstevel@tonic-gate 			if (p->pcf_count > npages) {
21550Sstevel@tonic-gate 				p->pcf_count -= (uint_t)npages;
21560Sstevel@tonic-gate 				/*
21570Sstevel@tonic-gate 				 * freemem is not protected by any lock.
21580Sstevel@tonic-gate 				 * Thus, we cannot have any assertion
21590Sstevel@tonic-gate 				 * containing freemem here.
21600Sstevel@tonic-gate 				 */
21610Sstevel@tonic-gate 				freemem -= npages;
21620Sstevel@tonic-gate 				enough = 1;
21630Sstevel@tonic-gate 				mutex_exit(&p->pcf_lock);
21640Sstevel@tonic-gate 				break;
21650Sstevel@tonic-gate 			}
21660Sstevel@tonic-gate 			mutex_exit(&p->pcf_lock);
21670Sstevel@tonic-gate 		}
21680Sstevel@tonic-gate 		pcftotal += p->pcf_count;
21690Sstevel@tonic-gate 		p++;
21700Sstevel@tonic-gate 		if (p >= q) {
21710Sstevel@tonic-gate 			p = pcf;
21720Sstevel@tonic-gate 		}
21730Sstevel@tonic-gate 		p->pcf_touch = 1;
21740Sstevel@tonic-gate 	}
21750Sstevel@tonic-gate 
21760Sstevel@tonic-gate 	if (!enough) {
21770Sstevel@tonic-gate 		/* If there isn't enough memory available, give up. */
21780Sstevel@tonic-gate 		if (pcftotal < npages) {
21790Sstevel@tonic-gate 			VM_STAT_ADD(page_create_large_cnt[3]);
21800Sstevel@tonic-gate 			return (NULL);
21810Sstevel@tonic-gate 		}
21820Sstevel@tonic-gate 
21830Sstevel@tonic-gate 		/* try to collect pages from several pcf bins */
21840Sstevel@tonic-gate 		for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) {
21850Sstevel@tonic-gate 			p->pcf_touch = 1;
21860Sstevel@tonic-gate 			mutex_enter(&p->pcf_lock);
21870Sstevel@tonic-gate 			pcftotal += p->pcf_count;
21880Sstevel@tonic-gate 			if (pcftotal >= npages) {
21890Sstevel@tonic-gate 				/*
21900Sstevel@tonic-gate 				 * Wow!  There are enough pages laying around
21910Sstevel@tonic-gate 				 * to satisfy the request.  Do the accounting,
21920Sstevel@tonic-gate 				 * drop the locks we acquired, and go back.
21930Sstevel@tonic-gate 				 *
21940Sstevel@tonic-gate 				 * freemem is not protected by any lock. So,
21950Sstevel@tonic-gate 				 * we cannot have any assertion containing
21960Sstevel@tonic-gate 				 * freemem.
21970Sstevel@tonic-gate 				 */
21980Sstevel@tonic-gate 				pgcnt_t	tpages = npages;
21990Sstevel@tonic-gate 				freemem -= npages;
22000Sstevel@tonic-gate 				while (p >= pcf) {
22010Sstevel@tonic-gate 					if (p->pcf_count <= tpages) {
22020Sstevel@tonic-gate 						tpages -= p->pcf_count;
22030Sstevel@tonic-gate 						p->pcf_count = 0;
22040Sstevel@tonic-gate 					} else {
22050Sstevel@tonic-gate 						p->pcf_count -= (uint_t)tpages;
22060Sstevel@tonic-gate 						tpages = 0;
22070Sstevel@tonic-gate 					}
22080Sstevel@tonic-gate 					mutex_exit(&p->pcf_lock);
22090Sstevel@tonic-gate 					p--;
22100Sstevel@tonic-gate 				}
22110Sstevel@tonic-gate 				ASSERT(tpages == 0);
22120Sstevel@tonic-gate 				break;
22130Sstevel@tonic-gate 			}
22140Sstevel@tonic-gate 			p++;
22150Sstevel@tonic-gate 		}
22160Sstevel@tonic-gate 		if (i == PCF_FANOUT) {
22170Sstevel@tonic-gate 			/* failed to collect pages - release the locks */
22180Sstevel@tonic-gate 			while (--p >= pcf) {
22190Sstevel@tonic-gate 				mutex_exit(&p->pcf_lock);
22200Sstevel@tonic-gate 			}
22210Sstevel@tonic-gate 			VM_STAT_ADD(page_create_large_cnt[4]);
22220Sstevel@tonic-gate 			return (NULL);
22230Sstevel@tonic-gate 		}
22240Sstevel@tonic-gate 	}
22250Sstevel@tonic-gate 
22260Sstevel@tonic-gate 	/*
22270Sstevel@tonic-gate 	 * This is where this function behaves fundamentally differently
22280Sstevel@tonic-gate 	 * than page_create_va(); since we're intending to map the page
22290Sstevel@tonic-gate 	 * with a single TTE, we have to get it as a physically contiguous
22300Sstevel@tonic-gate 	 * hardware pagesize chunk.  If we can't, we fail.
22310Sstevel@tonic-gate 	 */
22320Sstevel@tonic-gate 	if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
22330Sstevel@tonic-gate 		LGRP_EXISTS(lgrp_table[*lgrpid]))
22340Sstevel@tonic-gate 		lgrp = lgrp_table[*lgrpid];
22350Sstevel@tonic-gate 	else
22360Sstevel@tonic-gate 		lgrp = lgrp_mem_choose(seg, vaddr, bytes);
22370Sstevel@tonic-gate 
22380Sstevel@tonic-gate 	if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr,
22390Sstevel@tonic-gate 	    bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
22400Sstevel@tonic-gate 		page_create_putback(npages);
22410Sstevel@tonic-gate 		VM_STAT_ADD(page_create_large_cnt[5]);
22420Sstevel@tonic-gate 		return (NULL);
22430Sstevel@tonic-gate 	}
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate 	/*
22460Sstevel@tonic-gate 	 * if we got the page with the wrong mtype give it back this is a
22470Sstevel@tonic-gate 	 * workaround for CR 6249718. When CR 6249718 is fixed we never get
22480Sstevel@tonic-gate 	 * inside "if" and the workaround becomes just a nop
22490Sstevel@tonic-gate 	 */
22500Sstevel@tonic-gate 	if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
22510Sstevel@tonic-gate 		page_list_add_pages(rootpp, 0);
22520Sstevel@tonic-gate 		page_create_putback(npages);
22530Sstevel@tonic-gate 		VM_STAT_ADD(page_create_large_cnt[6]);
22540Sstevel@tonic-gate 		return (NULL);
22550Sstevel@tonic-gate 	}
22560Sstevel@tonic-gate 
22570Sstevel@tonic-gate 	/*
22580Sstevel@tonic-gate 	 * If satisfying this request has left us with too little
22590Sstevel@tonic-gate 	 * memory, start the wheels turning to get some back.  The
22600Sstevel@tonic-gate 	 * first clause of the test prevents waking up the pageout
22610Sstevel@tonic-gate 	 * daemon in situations where it would decide that there's
22620Sstevel@tonic-gate 	 * nothing to do.
22630Sstevel@tonic-gate 	 */
22640Sstevel@tonic-gate 	if (nscan < desscan && freemem < minfree) {
22650Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
22660Sstevel@tonic-gate 		    "pageout_cv_signal:freemem %ld", freemem);
22670Sstevel@tonic-gate 		cv_signal(&proc_pageout->p_cv);
22680Sstevel@tonic-gate 	}
22690Sstevel@tonic-gate 
22700Sstevel@tonic-gate 	pp = rootpp;
22710Sstevel@tonic-gate 	while (npages--) {
22720Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
22730Sstevel@tonic-gate 		ASSERT(pp->p_vnode == NULL);
22740Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(pp));
22750Sstevel@tonic-gate 		PP_CLRFREE(pp);
22760Sstevel@tonic-gate 		PP_CLRAGED(pp);
22770Sstevel@tonic-gate 		if (!page_hashin(pp, vp, off, NULL))
22780Sstevel@tonic-gate 			panic("page_create_large: hashin failed: page %p",
22790Sstevel@tonic-gate 			    (void *)pp);
22800Sstevel@tonic-gate 		page_io_lock(pp);
22810Sstevel@tonic-gate 		off += PAGESIZE;
22820Sstevel@tonic-gate 		pp = pp->p_next;
22830Sstevel@tonic-gate 	}
22840Sstevel@tonic-gate 
22850Sstevel@tonic-gate 	VM_STAT_ADD(page_create_large_cnt[0]);
22860Sstevel@tonic-gate 	return (rootpp);
22870Sstevel@tonic-gate }
22880Sstevel@tonic-gate 
22890Sstevel@tonic-gate page_t *
22900Sstevel@tonic-gate page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
22910Sstevel@tonic-gate     struct seg *seg, caddr_t vaddr)
22920Sstevel@tonic-gate {
22930Sstevel@tonic-gate 	page_t		*plist = NULL;
22940Sstevel@tonic-gate 	pgcnt_t		npages;
22950Sstevel@tonic-gate 	pgcnt_t		found_on_free = 0;
22960Sstevel@tonic-gate 	pgcnt_t		pages_req;
22970Sstevel@tonic-gate 	page_t		*npp = NULL;
22980Sstevel@tonic-gate 	uint_t		enough;
22990Sstevel@tonic-gate 	uint_t		i;
23000Sstevel@tonic-gate 	uint_t		pcf_index;
23010Sstevel@tonic-gate 	struct pcf	*p;
23020Sstevel@tonic-gate 	struct pcf	*q;
23030Sstevel@tonic-gate 	lgrp_t		*lgrp;
23040Sstevel@tonic-gate 
23050Sstevel@tonic-gate 	TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
23060Sstevel@tonic-gate 		"page_create_start:vp %p off %llx bytes %lu flags %x",
23070Sstevel@tonic-gate 		vp, off, bytes, flags);
23080Sstevel@tonic-gate 
23090Sstevel@tonic-gate 	ASSERT(bytes != 0 && vp != NULL);
23100Sstevel@tonic-gate 
23110Sstevel@tonic-gate 	if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
23120Sstevel@tonic-gate 		panic("page_create: invalid flags");
23130Sstevel@tonic-gate 		/*NOTREACHED*/
23140Sstevel@tonic-gate 	}
23150Sstevel@tonic-gate 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
23160Sstevel@tonic-gate 	    PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0);
23170Sstevel@tonic-gate 	    /* but no others */
23180Sstevel@tonic-gate 
23190Sstevel@tonic-gate 	pages_req = npages = btopr(bytes);
23200Sstevel@tonic-gate 	/*
23210Sstevel@tonic-gate 	 * Try to see whether request is too large to *ever* be
23220Sstevel@tonic-gate 	 * satisfied, in order to prevent deadlock.  We arbitrarily
23230Sstevel@tonic-gate 	 * decide to limit maximum size requests to max_page_get.
23240Sstevel@tonic-gate 	 */
23250Sstevel@tonic-gate 	if (npages >= max_page_get) {
23260Sstevel@tonic-gate 		if ((flags & PG_WAIT) == 0) {
23270Sstevel@tonic-gate 			TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG,
23280Sstevel@tonic-gate 			    "page_create_toobig:vp %p off %llx npages "
23290Sstevel@tonic-gate 			    "%lu max_page_get %lu",
23300Sstevel@tonic-gate 			    vp, off, npages, max_page_get);
23310Sstevel@tonic-gate 			return (NULL);
23320Sstevel@tonic-gate 		} else {
23330Sstevel@tonic-gate 			cmn_err(CE_WARN,
23340Sstevel@tonic-gate 			    "Request for too much kernel memory "
23350Sstevel@tonic-gate 			    "(%lu bytes), will hang forever", bytes);
23360Sstevel@tonic-gate 			for (;;)
23370Sstevel@tonic-gate 				delay(1000000000);
23380Sstevel@tonic-gate 		}
23390Sstevel@tonic-gate 	}
23400Sstevel@tonic-gate 
23410Sstevel@tonic-gate 	if (!kcage_on || panicstr) {
23420Sstevel@tonic-gate 		/*
23430Sstevel@tonic-gate 		 * Cage is OFF, or we are single threaded in
23440Sstevel@tonic-gate 		 * panic, so make everything a RELOC request.
23450Sstevel@tonic-gate 		 */
23460Sstevel@tonic-gate 		flags &= ~PG_NORELOC;
23470Sstevel@tonic-gate 	}
23480Sstevel@tonic-gate 
23490Sstevel@tonic-gate 	if (freemem <= throttlefree + npages)
23500Sstevel@tonic-gate 		if (!page_create_throttle(npages, flags))
23510Sstevel@tonic-gate 			return (NULL);
23520Sstevel@tonic-gate 
23530Sstevel@tonic-gate 	/*
23540Sstevel@tonic-gate 	 * If cage is on, dampen draw from cage when available
23550Sstevel@tonic-gate 	 * cage space is low.
23560Sstevel@tonic-gate 	 */
23570Sstevel@tonic-gate 	if ((flags & PG_NORELOC) &&
23580Sstevel@tonic-gate 		kcage_freemem < kcage_throttlefree + npages) {
23590Sstevel@tonic-gate 
23600Sstevel@tonic-gate 		/*
23610Sstevel@tonic-gate 		 * The cage is on, the caller wants PG_NORELOC
23620Sstevel@tonic-gate 		 * pages and available cage memory is very low.
23630Sstevel@tonic-gate 		 * Call kcage_create_throttle() to attempt to
23640Sstevel@tonic-gate 		 * control demand on the cage.
23650Sstevel@tonic-gate 		 */
23660Sstevel@tonic-gate 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
23670Sstevel@tonic-gate 			return (NULL);
23680Sstevel@tonic-gate 	}
23690Sstevel@tonic-gate 
23700Sstevel@tonic-gate 	VM_STAT_ADD(page_create_cnt[0]);
23710Sstevel@tonic-gate 
23720Sstevel@tonic-gate 	enough = 0;
23730Sstevel@tonic-gate 	pcf_index = PCF_INDEX();
23740Sstevel@tonic-gate 
23750Sstevel@tonic-gate 	p = &pcf[pcf_index];
23760Sstevel@tonic-gate 	p->pcf_touch = 1;
23770Sstevel@tonic-gate 	q = &pcf[PCF_FANOUT];
23780Sstevel@tonic-gate 	for (i = 0; i < PCF_FANOUT; i++) {
23790Sstevel@tonic-gate 		if (p->pcf_count > npages) {
23800Sstevel@tonic-gate 			/*
23810Sstevel@tonic-gate 			 * a good one to try.
23820Sstevel@tonic-gate 			 */
23830Sstevel@tonic-gate 			mutex_enter(&p->pcf_lock);
23840Sstevel@tonic-gate 			if (p->pcf_count > npages) {
23850Sstevel@tonic-gate 				p->pcf_count -= (uint_t)npages;
23860Sstevel@tonic-gate 				/*
23870Sstevel@tonic-gate 				 * freemem is not protected by any lock.
23880Sstevel@tonic-gate 				 * Thus, we cannot have any assertion
23890Sstevel@tonic-gate 				 * containing freemem here.
23900Sstevel@tonic-gate 				 */
23910Sstevel@tonic-gate 				freemem -= npages;
23920Sstevel@tonic-gate 				enough = 1;
23930Sstevel@tonic-gate 				mutex_exit(&p->pcf_lock);
23940Sstevel@tonic-gate 				break;
23950Sstevel@tonic-gate 			}
23960Sstevel@tonic-gate 			mutex_exit(&p->pcf_lock);
23970Sstevel@tonic-gate 		}
23980Sstevel@tonic-gate 		p++;
23990Sstevel@tonic-gate 		if (p >= q) {
24000Sstevel@tonic-gate 			p = pcf;
24010Sstevel@tonic-gate 		}
24020Sstevel@tonic-gate 		p->pcf_touch = 1;
24030Sstevel@tonic-gate 	}
24040Sstevel@tonic-gate 
24050Sstevel@tonic-gate 	if (!enough) {
24060Sstevel@tonic-gate 		/*
24070Sstevel@tonic-gate 		 * Have to look harder.  If npages is greater than
24080Sstevel@tonic-gate 		 * one, then we might have to coalecse the counters.
24090Sstevel@tonic-gate 		 *
24100Sstevel@tonic-gate 		 * Go wait.  We come back having accounted
24110Sstevel@tonic-gate 		 * for the memory.
24120Sstevel@tonic-gate 		 */
24130Sstevel@tonic-gate 		VM_STAT_ADD(page_create_cnt[1]);
24140Sstevel@tonic-gate 		if (!page_create_wait(npages, flags)) {
24150Sstevel@tonic-gate 			VM_STAT_ADD(page_create_cnt[2]);
24160Sstevel@tonic-gate 			return (NULL);
24170Sstevel@tonic-gate 		}
24180Sstevel@tonic-gate 	}
24190Sstevel@tonic-gate 
24200Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
24210Sstevel@tonic-gate 		"page_create_success:vp %p off %llx", vp, off);
24220Sstevel@tonic-gate 
24230Sstevel@tonic-gate 	/*
24240Sstevel@tonic-gate 	 * If satisfying this request has left us with too little
24250Sstevel@tonic-gate 	 * memory, start the wheels turning to get some back.  The
24260Sstevel@tonic-gate 	 * first clause of the test prevents waking up the pageout
24270Sstevel@tonic-gate 	 * daemon in situations where it would decide that there's
24280Sstevel@tonic-gate 	 * nothing to do.
24290Sstevel@tonic-gate 	 */
24300Sstevel@tonic-gate 	if (nscan < desscan && freemem < minfree) {
24310Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
24320Sstevel@tonic-gate 			"pageout_cv_signal:freemem %ld", freemem);
24330Sstevel@tonic-gate 		cv_signal(&proc_pageout->p_cv);
24340Sstevel@tonic-gate 	}
24350Sstevel@tonic-gate 
24360Sstevel@tonic-gate 	/*
24370Sstevel@tonic-gate 	 * Loop around collecting the requested number of pages.
24380Sstevel@tonic-gate 	 * Most of the time, we have to `create' a new page. With
24390Sstevel@tonic-gate 	 * this in mind, pull the page off the free list before
24400Sstevel@tonic-gate 	 * getting the hash lock.  This will minimize the hash
24410Sstevel@tonic-gate 	 * lock hold time, nesting, and the like.  If it turns
24420Sstevel@tonic-gate 	 * out we don't need the page, we put it back at the end.
24430Sstevel@tonic-gate 	 */
24440Sstevel@tonic-gate 	while (npages--) {
24450Sstevel@tonic-gate 		page_t		*pp;
24460Sstevel@tonic-gate 		kmutex_t	*phm = NULL;
24470Sstevel@tonic-gate 		ulong_t		index;
24480Sstevel@tonic-gate 
24490Sstevel@tonic-gate 		index = PAGE_HASH_FUNC(vp, off);
24500Sstevel@tonic-gate top:
24510Sstevel@tonic-gate 		ASSERT(phm == NULL);
24520Sstevel@tonic-gate 		ASSERT(index == PAGE_HASH_FUNC(vp, off));
24530Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
24540Sstevel@tonic-gate 
24550Sstevel@tonic-gate 		if (npp == NULL) {
24560Sstevel@tonic-gate 			/*
24570Sstevel@tonic-gate 			 * Try to get a page from the freelist (ie,
24580Sstevel@tonic-gate 			 * a page with no [vp, off] tag).  If that
24590Sstevel@tonic-gate 			 * fails, use the cachelist.
24600Sstevel@tonic-gate 			 *
24610Sstevel@tonic-gate 			 * During the first attempt at both the free
24620Sstevel@tonic-gate 			 * and cache lists we try for the correct color.
24630Sstevel@tonic-gate 			 */
24640Sstevel@tonic-gate 			/*
24650Sstevel@tonic-gate 			 * XXXX-how do we deal with virtual indexed
24660Sstevel@tonic-gate 			 * caches and and colors?
24670Sstevel@tonic-gate 			 */
24680Sstevel@tonic-gate 			VM_STAT_ADD(page_create_cnt[4]);
24690Sstevel@tonic-gate 			/*
24700Sstevel@tonic-gate 			 * Get lgroup to allocate next page of shared memory
24710Sstevel@tonic-gate 			 * from and use it to specify where to allocate
24720Sstevel@tonic-gate 			 * the physical memory
24730Sstevel@tonic-gate 			 */
24740Sstevel@tonic-gate 			lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
24750Sstevel@tonic-gate 			npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
24760Sstevel@tonic-gate 			    flags | PG_MATCH_COLOR, lgrp);
24770Sstevel@tonic-gate 			if (npp == NULL) {
24780Sstevel@tonic-gate 				npp = page_get_cachelist(vp, off, seg,
24790Sstevel@tonic-gate 				    vaddr, flags | PG_MATCH_COLOR, lgrp);
24800Sstevel@tonic-gate 				if (npp == NULL) {
24810Sstevel@tonic-gate 					npp = page_create_get_something(vp,
24820Sstevel@tonic-gate 					    off, seg, vaddr,
24830Sstevel@tonic-gate 					    flags & ~PG_MATCH_COLOR);
24840Sstevel@tonic-gate 				}
24850Sstevel@tonic-gate 
24860Sstevel@tonic-gate 				if (PP_ISAGED(npp) == 0) {
24870Sstevel@tonic-gate 					/*
24880Sstevel@tonic-gate 					 * Since this page came from the
24890Sstevel@tonic-gate 					 * cachelist, we must destroy the
24900Sstevel@tonic-gate 					 * old vnode association.
24910Sstevel@tonic-gate 					 */
24920Sstevel@tonic-gate 					page_hashout(npp, NULL);
24930Sstevel@tonic-gate 				}
24940Sstevel@tonic-gate 			}
24950Sstevel@tonic-gate 		}
24960Sstevel@tonic-gate 
24970Sstevel@tonic-gate 		/*
24980Sstevel@tonic-gate 		 * We own this page!
24990Sstevel@tonic-gate 		 */
25000Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(npp));
25010Sstevel@tonic-gate 		ASSERT(npp->p_vnode == NULL);
25020Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(npp));
25030Sstevel@tonic-gate 		PP_CLRFREE(npp);
25040Sstevel@tonic-gate 		PP_CLRAGED(npp);
25050Sstevel@tonic-gate 
25060Sstevel@tonic-gate 		/*
25070Sstevel@tonic-gate 		 * Here we have a page in our hot little mits and are
25080Sstevel@tonic-gate 		 * just waiting to stuff it on the appropriate lists.
25090Sstevel@tonic-gate 		 * Get the mutex and check to see if it really does
25100Sstevel@tonic-gate 		 * not exist.
25110Sstevel@tonic-gate 		 */
25120Sstevel@tonic-gate 		phm = PAGE_HASH_MUTEX(index);
25130Sstevel@tonic-gate 		mutex_enter(phm);
25140Sstevel@tonic-gate 		PAGE_HASH_SEARCH(index, pp, vp, off);
25150Sstevel@tonic-gate 		if (pp == NULL) {
25160Sstevel@tonic-gate 			VM_STAT_ADD(page_create_new);
25170Sstevel@tonic-gate 			pp = npp;
25180Sstevel@tonic-gate 			npp = NULL;
25190Sstevel@tonic-gate 			if (!page_hashin(pp, vp, off, phm)) {
25200Sstevel@tonic-gate 				/*
25210Sstevel@tonic-gate 				 * Since we hold the page hash mutex and
25220Sstevel@tonic-gate 				 * just searched for this page, page_hashin
25230Sstevel@tonic-gate 				 * had better not fail.  If it does, that
25240Sstevel@tonic-gate 				 * means somethread did not follow the
25250Sstevel@tonic-gate 				 * page hash mutex rules.  Panic now and
25260Sstevel@tonic-gate 				 * get it over with.  As usual, go down
25270Sstevel@tonic-gate 				 * holding all the locks.
25280Sstevel@tonic-gate 				 */
25290Sstevel@tonic-gate 				ASSERT(MUTEX_HELD(phm));
25300Sstevel@tonic-gate 				panic("page_create: "
25310Sstevel@tonic-gate 				    "hashin failed %p %p %llx %p",
25320Sstevel@tonic-gate 				    (void *)pp, (void *)vp, off, (void *)phm);
25330Sstevel@tonic-gate 				/*NOTREACHED*/
25340Sstevel@tonic-gate 			}
25350Sstevel@tonic-gate 			ASSERT(MUTEX_HELD(phm));
25360Sstevel@tonic-gate 			mutex_exit(phm);
25370Sstevel@tonic-gate 			phm = NULL;
25380Sstevel@tonic-gate 
25390Sstevel@tonic-gate 			/*
25400Sstevel@tonic-gate 			 * Hat layer locking need not be done to set
25410Sstevel@tonic-gate 			 * the following bits since the page is not hashed
25420Sstevel@tonic-gate 			 * and was on the free list (i.e., had no mappings).
25430Sstevel@tonic-gate 			 *
25440Sstevel@tonic-gate 			 * Set the reference bit to protect
25450Sstevel@tonic-gate 			 * against immediate pageout
25460Sstevel@tonic-gate 			 *
25470Sstevel@tonic-gate 			 * XXXmh modify freelist code to set reference
25480Sstevel@tonic-gate 			 * bit so we don't have to do it here.
25490Sstevel@tonic-gate 			 */
25500Sstevel@tonic-gate 			page_set_props(pp, P_REF);
25510Sstevel@tonic-gate 			found_on_free++;
25520Sstevel@tonic-gate 		} else {
25530Sstevel@tonic-gate 			VM_STAT_ADD(page_create_exists);
25540Sstevel@tonic-gate 			if (flags & PG_EXCL) {
25550Sstevel@tonic-gate 				/*
25560Sstevel@tonic-gate 				 * Found an existing page, and the caller
25570Sstevel@tonic-gate 				 * wanted all new pages.  Undo all of the work
25580Sstevel@tonic-gate 				 * we have done.
25590Sstevel@tonic-gate 				 */
25600Sstevel@tonic-gate 				mutex_exit(phm);
25610Sstevel@tonic-gate 				phm = NULL;
25620Sstevel@tonic-gate 				while (plist != NULL) {
25630Sstevel@tonic-gate 					pp = plist;
25640Sstevel@tonic-gate 					page_sub(&plist, pp);
25650Sstevel@tonic-gate 					page_io_unlock(pp);
25660Sstevel@tonic-gate 					/* large pages should not end up here */
25670Sstevel@tonic-gate 					ASSERT(pp->p_szc == 0);
25680Sstevel@tonic-gate 					/*LINTED: constant in conditional ctx*/
25690Sstevel@tonic-gate 					VN_DISPOSE(pp, B_INVAL, 0, kcred);
25700Sstevel@tonic-gate 				}
25710Sstevel@tonic-gate 				VM_STAT_ADD(page_create_found_one);
25720Sstevel@tonic-gate 				goto fail;
25730Sstevel@tonic-gate 			}
25740Sstevel@tonic-gate 			ASSERT(flags & PG_WAIT);
25750Sstevel@tonic-gate 			if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
25760Sstevel@tonic-gate 				/*
25770Sstevel@tonic-gate 				 * Start all over again if we blocked trying
25780Sstevel@tonic-gate 				 * to lock the page.
25790Sstevel@tonic-gate 				 */
25800Sstevel@tonic-gate 				mutex_exit(phm);
25810Sstevel@tonic-gate 				VM_STAT_ADD(page_create_page_lock_failed);
25820Sstevel@tonic-gate 				phm = NULL;
25830Sstevel@tonic-gate 				goto top;
25840Sstevel@tonic-gate 			}
25850Sstevel@tonic-gate 			mutex_exit(phm);
25860Sstevel@tonic-gate 			phm = NULL;
25870Sstevel@tonic-gate 
25880Sstevel@tonic-gate 			if (PP_ISFREE(pp)) {
25890Sstevel@tonic-gate 				ASSERT(PP_ISAGED(pp) == 0);
25900Sstevel@tonic-gate 				VM_STAT_ADD(pagecnt.pc_get_cache);
25910Sstevel@tonic-gate 				page_list_sub(pp, PG_CACHE_LIST);
25920Sstevel@tonic-gate 				PP_CLRFREE(pp);
25930Sstevel@tonic-gate 				found_on_free++;
25940Sstevel@tonic-gate 			}
25950Sstevel@tonic-gate 		}
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate 		/*
25980Sstevel@tonic-gate 		 * Got a page!  It is locked.  Acquire the i/o
25990Sstevel@tonic-gate 		 * lock since we are going to use the p_next and
26000Sstevel@tonic-gate 		 * p_prev fields to link the requested pages together.
26010Sstevel@tonic-gate 		 */
26020Sstevel@tonic-gate 		page_io_lock(pp);
26030Sstevel@tonic-gate 		page_add(&plist, pp);
26040Sstevel@tonic-gate 		plist = plist->p_next;
26050Sstevel@tonic-gate 		off += PAGESIZE;
26060Sstevel@tonic-gate 		vaddr += PAGESIZE;
26070Sstevel@tonic-gate 	}
26080Sstevel@tonic-gate 
26090Sstevel@tonic-gate 	ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
26100Sstevel@tonic-gate fail:
26110Sstevel@tonic-gate 	if (npp != NULL) {
26120Sstevel@tonic-gate 		/*
26130Sstevel@tonic-gate 		 * Did not need this page after all.
26140Sstevel@tonic-gate 		 * Put it back on the free list.
26150Sstevel@tonic-gate 		 */
26160Sstevel@tonic-gate 		VM_STAT_ADD(page_create_putbacks);
26170Sstevel@tonic-gate 		PP_SETFREE(npp);
26180Sstevel@tonic-gate 		PP_SETAGED(npp);
26190Sstevel@tonic-gate 		npp->p_offset = (u_offset_t)-1;
26200Sstevel@tonic-gate 		page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
26210Sstevel@tonic-gate 		page_unlock(npp);
26220Sstevel@tonic-gate 
26230Sstevel@tonic-gate 	}
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate 	ASSERT(pages_req >= found_on_free);
26260Sstevel@tonic-gate 
26270Sstevel@tonic-gate 	{
26280Sstevel@tonic-gate 		uint_t overshoot = (uint_t)(pages_req - found_on_free);
26290Sstevel@tonic-gate 
26300Sstevel@tonic-gate 		if (overshoot) {
26310Sstevel@tonic-gate 			VM_STAT_ADD(page_create_overshoot);
26320Sstevel@tonic-gate 			p = &pcf[pcf_index];
26330Sstevel@tonic-gate 			p->pcf_touch = 1;
26340Sstevel@tonic-gate 			mutex_enter(&p->pcf_lock);
26350Sstevel@tonic-gate 			if (p->pcf_block) {
26360Sstevel@tonic-gate 				p->pcf_reserve += overshoot;
26370Sstevel@tonic-gate 			} else {
26380Sstevel@tonic-gate 				p->pcf_count += overshoot;
26390Sstevel@tonic-gate 				if (p->pcf_wait) {
26400Sstevel@tonic-gate 					mutex_enter(&new_freemem_lock);
26410Sstevel@tonic-gate 					if (freemem_wait) {
26420Sstevel@tonic-gate 						cv_signal(&freemem_cv);
26430Sstevel@tonic-gate 						p->pcf_wait--;
26440Sstevel@tonic-gate 					} else {
26450Sstevel@tonic-gate 						p->pcf_wait = 0;
26460Sstevel@tonic-gate 					}
26470Sstevel@tonic-gate 					mutex_exit(&new_freemem_lock);
26480Sstevel@tonic-gate 				}
26490Sstevel@tonic-gate 			}
26500Sstevel@tonic-gate 			mutex_exit(&p->pcf_lock);
26510Sstevel@tonic-gate 			/* freemem is approximate, so this test OK */
26520Sstevel@tonic-gate 			if (!p->pcf_block)
26530Sstevel@tonic-gate 				freemem += overshoot;
26540Sstevel@tonic-gate 		}
26550Sstevel@tonic-gate 	}
26560Sstevel@tonic-gate 
26570Sstevel@tonic-gate 	return (plist);
26580Sstevel@tonic-gate }
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate /*
26610Sstevel@tonic-gate  * One or more constituent pages of this large page has been marked
26620Sstevel@tonic-gate  * toxic. Simply demote the large page to PAGESIZE pages and let
26630Sstevel@tonic-gate  * page_free() handle it. This routine should only be called by
26640Sstevel@tonic-gate  * large page free routines (page_free_pages() and page_destroy_pages().
26650Sstevel@tonic-gate  * All pages are locked SE_EXCL and have already been marked free.
26660Sstevel@tonic-gate  */
26670Sstevel@tonic-gate static void
26680Sstevel@tonic-gate page_free_toxic_pages(page_t *rootpp)
26690Sstevel@tonic-gate {
26700Sstevel@tonic-gate 	page_t	*tpp;
26710Sstevel@tonic-gate 	pgcnt_t	i, pgcnt = page_get_pagecnt(rootpp->p_szc);
26720Sstevel@tonic-gate 	uint_t	szc = rootpp->p_szc;
26730Sstevel@tonic-gate 
26740Sstevel@tonic-gate 	for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
26750Sstevel@tonic-gate 		ASSERT(tpp->p_szc == szc);
26760Sstevel@tonic-gate 		ASSERT((PAGE_EXCL(tpp) &&
26770Sstevel@tonic-gate 		    !page_iolock_assert(tpp)) || panicstr);
26780Sstevel@tonic-gate 		tpp->p_szc = 0;
26790Sstevel@tonic-gate 	}
26800Sstevel@tonic-gate 
26810Sstevel@tonic-gate 	while (rootpp != NULL) {
26820Sstevel@tonic-gate 		tpp = rootpp;
26830Sstevel@tonic-gate 		page_sub(&rootpp, tpp);
26840Sstevel@tonic-gate 		ASSERT(PP_ISFREE(tpp));
26850Sstevel@tonic-gate 		PP_CLRFREE(tpp);
26860Sstevel@tonic-gate 		page_free(tpp, 1);
26870Sstevel@tonic-gate 	}
26880Sstevel@tonic-gate }
26890Sstevel@tonic-gate 
26900Sstevel@tonic-gate /*
26910Sstevel@tonic-gate  * Put page on the "free" list.
26920Sstevel@tonic-gate  * The free list is really two lists maintained by
26930Sstevel@tonic-gate  * the PSM of whatever machine we happen to be on.
26940Sstevel@tonic-gate  */
26950Sstevel@tonic-gate void
26960Sstevel@tonic-gate page_free(page_t *pp, int dontneed)
26970Sstevel@tonic-gate {
26980Sstevel@tonic-gate 	struct pcf	*p;
26990Sstevel@tonic-gate 	uint_t		pcf_index;
27000Sstevel@tonic-gate 
27010Sstevel@tonic-gate 	ASSERT((PAGE_EXCL(pp) &&
27020Sstevel@tonic-gate 	    !page_iolock_assert(pp)) || panicstr);
27030Sstevel@tonic-gate 
27040Sstevel@tonic-gate 	if (PP_ISFREE(pp)) {
27050Sstevel@tonic-gate 		panic("page_free: page %p is free", (void *)pp);
27060Sstevel@tonic-gate 	}
27070Sstevel@tonic-gate 
27080Sstevel@tonic-gate 	if (pp->p_szc != 0) {
27090Sstevel@tonic-gate 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
27100Sstevel@tonic-gate 		    pp->p_vnode == &kvp) {
27110Sstevel@tonic-gate 			panic("page_free: anon or kernel "
27120Sstevel@tonic-gate 			    "or no vnode large page %p", (void *)pp);
27130Sstevel@tonic-gate 		}
27140Sstevel@tonic-gate 		page_demote_vp_pages(pp);
27150Sstevel@tonic-gate 		ASSERT(pp->p_szc == 0);
27160Sstevel@tonic-gate 	}
27170Sstevel@tonic-gate 
27180Sstevel@tonic-gate 	/*
27190Sstevel@tonic-gate 	 * The page_struct_lock need not be acquired to examine these
27200Sstevel@tonic-gate 	 * fields since the page has an "exclusive" lock.
27210Sstevel@tonic-gate 	 */
27220Sstevel@tonic-gate 	if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
27230Sstevel@tonic-gate 		panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d",
27240Sstevel@tonic-gate 		    pp, page_pptonum(pp), pp->p_lckcnt, pp->p_cowcnt);
27250Sstevel@tonic-gate 		/*NOTREACHED*/
27260Sstevel@tonic-gate 	}
27270Sstevel@tonic-gate 
27280Sstevel@tonic-gate 	ASSERT(!hat_page_getshare(pp));
27290Sstevel@tonic-gate 
27300Sstevel@tonic-gate 	PP_SETFREE(pp);
27310Sstevel@tonic-gate 	ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
27320Sstevel@tonic-gate 	    !hat_ismod(pp));
27330Sstevel@tonic-gate 	page_clr_all_props(pp);
27340Sstevel@tonic-gate 	ASSERT(!hat_page_getshare(pp));
27350Sstevel@tonic-gate 
27360Sstevel@tonic-gate 	/*
27370Sstevel@tonic-gate 	 * Now we add the page to the head of the free list.
27380Sstevel@tonic-gate 	 * But if this page is associated with a paged vnode
27390Sstevel@tonic-gate 	 * then we adjust the head forward so that the page is
27400Sstevel@tonic-gate 	 * effectively at the end of the list.
27410Sstevel@tonic-gate 	 */
27420Sstevel@tonic-gate 	if (pp->p_vnode == NULL) {
27430Sstevel@tonic-gate 		/*
27440Sstevel@tonic-gate 		 * Page has no identity, put it on the free list.
27450Sstevel@tonic-gate 		 */
27460Sstevel@tonic-gate 		PP_SETAGED(pp);
27470Sstevel@tonic-gate 		pp->p_offset = (u_offset_t)-1;
27480Sstevel@tonic-gate 		page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
27490Sstevel@tonic-gate 		VM_STAT_ADD(pagecnt.pc_free_free);
27500Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
27510Sstevel@tonic-gate 		    "page_free_free:pp %p", pp);
27520Sstevel@tonic-gate 	} else {
27530Sstevel@tonic-gate 		PP_CLRAGED(pp);
27540Sstevel@tonic-gate 
27550Sstevel@tonic-gate 		if (!dontneed || nopageage) {
27560Sstevel@tonic-gate 			/* move it to the tail of the list */
27570Sstevel@tonic-gate 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
27580Sstevel@tonic-gate 
27590Sstevel@tonic-gate 			VM_STAT_ADD(pagecnt.pc_free_cache);
27600Sstevel@tonic-gate 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
27610Sstevel@tonic-gate 			    "page_free_cache_tail:pp %p", pp);
27620Sstevel@tonic-gate 		} else {
27630Sstevel@tonic-gate 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
27640Sstevel@tonic-gate 
27650Sstevel@tonic-gate 			VM_STAT_ADD(pagecnt.pc_free_dontneed);
27660Sstevel@tonic-gate 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
27670Sstevel@tonic-gate 			    "page_free_cache_head:pp %p", pp);
27680Sstevel@tonic-gate 		}
27690Sstevel@tonic-gate 	}
27700Sstevel@tonic-gate 	page_unlock(pp);
27710Sstevel@tonic-gate 
27720Sstevel@tonic-gate 	/*
27730Sstevel@tonic-gate 	 * Now do the `freemem' accounting.
27740Sstevel@tonic-gate 	 */
27750Sstevel@tonic-gate 	pcf_index = PCF_INDEX();
27760Sstevel@tonic-gate 	p = &pcf[pcf_index];
27770Sstevel@tonic-gate 	p->pcf_touch = 1;
27780Sstevel@tonic-gate 
27790Sstevel@tonic-gate 	mutex_enter(&p->pcf_lock);
27800Sstevel@tonic-gate 	if (p->pcf_block) {
27810Sstevel@tonic-gate 		p->pcf_reserve += 1;
27820Sstevel@tonic-gate 	} else {
27830Sstevel@tonic-gate 		p->pcf_count += 1;
27840Sstevel@tonic-gate 		if (p->pcf_wait) {
27850Sstevel@tonic-gate 			mutex_enter(&new_freemem_lock);
27860Sstevel@tonic-gate 			/*
27870Sstevel@tonic-gate 			 * Check to see if some other thread
27880Sstevel@tonic-gate 			 * is actually waiting.  Another bucket
27890Sstevel@tonic-gate 			 * may have woken it up by now.  If there
27900Sstevel@tonic-gate 			 * are no waiters, then set our pcf_wait
27910Sstevel@tonic-gate 			 * count to zero to avoid coming in here
27920Sstevel@tonic-gate 			 * next time.  Also, since only one page
27930Sstevel@tonic-gate 			 * was put on the free list, just wake
27940Sstevel@tonic-gate 			 * up one waiter.
27950Sstevel@tonic-gate 			 */
27960Sstevel@tonic-gate 			if (freemem_wait) {
27970Sstevel@tonic-gate 				cv_signal(&freemem_cv);
27980Sstevel@tonic-gate 				p->pcf_wait--;
27990Sstevel@tonic-gate 			} else {
28000Sstevel@tonic-gate 				p->pcf_wait = 0;
28010Sstevel@tonic-gate 			}
28020Sstevel@tonic-gate 			mutex_exit(&new_freemem_lock);
28030Sstevel@tonic-gate 		}
28040Sstevel@tonic-gate 	}
28050Sstevel@tonic-gate 	mutex_exit(&p->pcf_lock);
28060Sstevel@tonic-gate 
28070Sstevel@tonic-gate 	/* freemem is approximate, so this test OK */
28080Sstevel@tonic-gate 	if (!p->pcf_block)
28090Sstevel@tonic-gate 		freemem += 1;
28100Sstevel@tonic-gate }
28110Sstevel@tonic-gate 
28120Sstevel@tonic-gate /*
28130Sstevel@tonic-gate  * Put page on the "free" list during intial startup.
28140Sstevel@tonic-gate  * This happens during initial single threaded execution.
28150Sstevel@tonic-gate  */
28160Sstevel@tonic-gate void
28170Sstevel@tonic-gate page_free_at_startup(page_t *pp)
28180Sstevel@tonic-gate {
28190Sstevel@tonic-gate 	struct pcf	*p;
28200Sstevel@tonic-gate 	uint_t		pcf_index;
28210Sstevel@tonic-gate 
28220Sstevel@tonic-gate 	page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
28230Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_free_free);
28240Sstevel@tonic-gate 
28250Sstevel@tonic-gate 	/*
28260Sstevel@tonic-gate 	 * Now do the `freemem' accounting.
28270Sstevel@tonic-gate 	 */
28280Sstevel@tonic-gate 	pcf_index = PCF_INDEX();
28290Sstevel@tonic-gate 	p = &pcf[pcf_index];
28300Sstevel@tonic-gate 	p->pcf_touch = 1;
28310Sstevel@tonic-gate 
28320Sstevel@tonic-gate 	ASSERT(p->pcf_block == 0);
28330Sstevel@tonic-gate 	ASSERT(p->pcf_wait == 0);
28340Sstevel@tonic-gate 	p->pcf_count += 1;
28350Sstevel@tonic-gate 
28360Sstevel@tonic-gate 	/* freemem is approximate, so this is OK */
28370Sstevel@tonic-gate 	freemem += 1;
28380Sstevel@tonic-gate }
28390Sstevel@tonic-gate 
28400Sstevel@tonic-gate void
28410Sstevel@tonic-gate page_free_pages(page_t *pp)
28420Sstevel@tonic-gate {
28430Sstevel@tonic-gate 	page_t	*tpp, *rootpp = NULL;
28440Sstevel@tonic-gate 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
28450Sstevel@tonic-gate 	pgcnt_t	i;
28460Sstevel@tonic-gate 	uint_t	szc = pp->p_szc;
28470Sstevel@tonic-gate 
28480Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_free_pages);
28490Sstevel@tonic-gate 	TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
28500Sstevel@tonic-gate 	    "page_free_free:pp %p", pp);
28510Sstevel@tonic-gate 
28520Sstevel@tonic-gate 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
28530Sstevel@tonic-gate 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
28540Sstevel@tonic-gate 		panic("page_free_pages: not root page %p", (void *)pp);
28550Sstevel@tonic-gate 		/*NOTREACHED*/
28560Sstevel@tonic-gate 	}
28570Sstevel@tonic-gate 
2858414Skchow 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
28590Sstevel@tonic-gate 		ASSERT((PAGE_EXCL(tpp) &&
28600Sstevel@tonic-gate 		    !page_iolock_assert(tpp)) || panicstr);
28610Sstevel@tonic-gate 		if (PP_ISFREE(tpp)) {
28620Sstevel@tonic-gate 			panic("page_free_pages: page %p is free", (void *)tpp);
28630Sstevel@tonic-gate 			/*NOTREACHED*/
28640Sstevel@tonic-gate 		}
28650Sstevel@tonic-gate 		if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
28660Sstevel@tonic-gate 		    tpp->p_cowcnt != 0) {
28670Sstevel@tonic-gate 			panic("page_free_pages %p", (void *)tpp);
28680Sstevel@tonic-gate 			/*NOTREACHED*/
28690Sstevel@tonic-gate 		}
28700Sstevel@tonic-gate 
28710Sstevel@tonic-gate 		ASSERT(!hat_page_getshare(tpp));
28720Sstevel@tonic-gate 		ASSERT(tpp->p_vnode == NULL);
28730Sstevel@tonic-gate 		ASSERT(tpp->p_szc == szc);
28740Sstevel@tonic-gate 
28750Sstevel@tonic-gate 		PP_SETFREE(tpp);
28760Sstevel@tonic-gate 		page_clr_all_props(tpp);
28770Sstevel@tonic-gate 		PP_SETAGED(tpp);
28780Sstevel@tonic-gate 		tpp->p_offset = (u_offset_t)-1;
28790Sstevel@tonic-gate 		ASSERT(tpp->p_next == tpp);
28800Sstevel@tonic-gate 		ASSERT(tpp->p_prev == tpp);
28810Sstevel@tonic-gate 		page_list_concat(&rootpp, &tpp);
28820Sstevel@tonic-gate 	}
28830Sstevel@tonic-gate 	ASSERT(rootpp == pp);
28840Sstevel@tonic-gate 
28850Sstevel@tonic-gate 	page_list_add_pages(rootpp, 0);
28860Sstevel@tonic-gate 	page_create_putback(pgcnt);
28870Sstevel@tonic-gate }
28880Sstevel@tonic-gate 
28890Sstevel@tonic-gate int free_pages = 1;
28900Sstevel@tonic-gate 
28910Sstevel@tonic-gate /*
28920Sstevel@tonic-gate  * This routine attempts to return pages to the cachelist via page_release().
28930Sstevel@tonic-gate  * It does not *have* to be successful in all cases, since the pageout scanner
28940Sstevel@tonic-gate  * will catch any pages it misses.  It does need to be fast and not introduce
28950Sstevel@tonic-gate  * too much overhead.
28960Sstevel@tonic-gate  *
28970Sstevel@tonic-gate  * If a page isn't found on the unlocked sweep of the page_hash bucket, we
28980Sstevel@tonic-gate  * don't lock and retry.  This is ok, since the page scanner will eventually
28990Sstevel@tonic-gate  * find any page we miss in free_vp_pages().
29000Sstevel@tonic-gate  */
29010Sstevel@tonic-gate void
29020Sstevel@tonic-gate free_vp_pages(vnode_t *vp, u_offset_t off, size_t len)
29030Sstevel@tonic-gate {
29040Sstevel@tonic-gate 	page_t *pp;
29050Sstevel@tonic-gate 	u_offset_t eoff;
29060Sstevel@tonic-gate 	extern int swap_in_range(vnode_t *, u_offset_t, size_t);
29070Sstevel@tonic-gate 
29080Sstevel@tonic-gate 	eoff = off + len;
29090Sstevel@tonic-gate 
29100Sstevel@tonic-gate 	if (free_pages == 0)
29110Sstevel@tonic-gate 		return;
29120Sstevel@tonic-gate 	if (swap_in_range(vp, off, len))
29130Sstevel@tonic-gate 		return;
29140Sstevel@tonic-gate 
29150Sstevel@tonic-gate 	for (; off < eoff; off += PAGESIZE) {
29160Sstevel@tonic-gate 
29170Sstevel@tonic-gate 		/*
29180Sstevel@tonic-gate 		 * find the page using a fast, but inexact search. It'll be OK
29190Sstevel@tonic-gate 		 * if a few pages slip through the cracks here.
29200Sstevel@tonic-gate 		 */
29210Sstevel@tonic-gate 		pp = page_exists(vp, off);
29220Sstevel@tonic-gate 
29230Sstevel@tonic-gate 		/*
29240Sstevel@tonic-gate 		 * If we didn't find the page (it may not exist), the page
29250Sstevel@tonic-gate 		 * is free, looks still in use (shared), or we can't lock it,
29260Sstevel@tonic-gate 		 * just give up.
29270Sstevel@tonic-gate 		 */
29280Sstevel@tonic-gate 		if (pp == NULL ||
29290Sstevel@tonic-gate 		    PP_ISFREE(pp) ||
29300Sstevel@tonic-gate 		    page_share_cnt(pp) > 0 ||
29310Sstevel@tonic-gate 		    !page_trylock(pp, SE_EXCL))
29320Sstevel@tonic-gate 			continue;
29330Sstevel@tonic-gate 
29340Sstevel@tonic-gate 		/*
29350Sstevel@tonic-gate 		 * Once we have locked pp, verify that it's still the
29360Sstevel@tonic-gate 		 * correct page and not already free
29370Sstevel@tonic-gate 		 */
29380Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
29390Sstevel@tonic-gate 		if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
29400Sstevel@tonic-gate 			page_unlock(pp);
29410Sstevel@tonic-gate 			continue;
29420Sstevel@tonic-gate 		}
29430Sstevel@tonic-gate 
29440Sstevel@tonic-gate 		/*
29450Sstevel@tonic-gate 		 * try to release the page...
29460Sstevel@tonic-gate 		 */
29470Sstevel@tonic-gate 		(void) page_release(pp, 1);
29480Sstevel@tonic-gate 	}
29490Sstevel@tonic-gate }
29500Sstevel@tonic-gate 
29510Sstevel@tonic-gate /*
29520Sstevel@tonic-gate  * Reclaim the given page from the free list.
29530Sstevel@tonic-gate  * Returns 1 on success or 0 on failure.
29540Sstevel@tonic-gate  *
29550Sstevel@tonic-gate  * The page is unlocked if it can't be reclaimed (when freemem == 0).
29560Sstevel@tonic-gate  * If `lock' is non-null, it will be dropped and re-acquired if
29570Sstevel@tonic-gate  * the routine must wait while freemem is 0.
29580Sstevel@tonic-gate  *
29590Sstevel@tonic-gate  * As it turns out, boot_getpages() does this.  It picks a page,
29600Sstevel@tonic-gate  * based on where OBP mapped in some address, gets its pfn, searches
29610Sstevel@tonic-gate  * the memsegs, locks the page, then pulls it off the free list!
29620Sstevel@tonic-gate  */
29630Sstevel@tonic-gate int
29640Sstevel@tonic-gate page_reclaim(page_t *pp, kmutex_t *lock)
29650Sstevel@tonic-gate {
29660Sstevel@tonic-gate 	struct pcf	*p;
29670Sstevel@tonic-gate 	uint_t		pcf_index;
29680Sstevel@tonic-gate 	struct cpu	*cpup;
29690Sstevel@tonic-gate 	uint_t		i;
29701074Smec 	pgcnt_t		npgs, need;
29711074Smec 	pgcnt_t		collected = 0;
29720Sstevel@tonic-gate 
29730Sstevel@tonic-gate 	ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
29740Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2975917Selowe 
2976917Selowe 	npgs = page_get_pagecnt(pp->p_szc);
29770Sstevel@tonic-gate 
29780Sstevel@tonic-gate 	/*
29790Sstevel@tonic-gate 	 * If `freemem' is 0, we cannot reclaim this page from the
29800Sstevel@tonic-gate 	 * freelist, so release every lock we might hold: the page,
29810Sstevel@tonic-gate 	 * and the `lock' before blocking.
29820Sstevel@tonic-gate 	 *
29830Sstevel@tonic-gate 	 * The only way `freemem' can become 0 while there are pages
29840Sstevel@tonic-gate 	 * marked free (have their p->p_free bit set) is when the
29850Sstevel@tonic-gate 	 * system is low on memory and doing a page_create().  In
29860Sstevel@tonic-gate 	 * order to guarantee that once page_create() starts acquiring
29870Sstevel@tonic-gate 	 * pages it will be able to get all that it needs since `freemem'
29880Sstevel@tonic-gate 	 * was decreased by the requested amount.  So, we need to release
29890Sstevel@tonic-gate 	 * this page, and let page_create() have it.
29900Sstevel@tonic-gate 	 *
29910Sstevel@tonic-gate 	 * Since `freemem' being zero is not supposed to happen, just
29920Sstevel@tonic-gate 	 * use the usual hash stuff as a starting point.  If that bucket
29930Sstevel@tonic-gate 	 * is empty, then assume the worst, and start at the beginning
29940Sstevel@tonic-gate 	 * of the pcf array.  If we always start at the beginning
29950Sstevel@tonic-gate 	 * when acquiring more than one pcf lock, there won't be any
29960Sstevel@tonic-gate 	 * deadlock problems.
29970Sstevel@tonic-gate 	 */
29980Sstevel@tonic-gate 
29990Sstevel@tonic-gate 	/* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
30000Sstevel@tonic-gate 
3001973Selowe 	if (freemem <= throttlefree && !page_create_throttle(npgs, 0)) {
30020Sstevel@tonic-gate 		pcf_acquire_all();
30030Sstevel@tonic-gate 		goto page_reclaim_nomem;
30040Sstevel@tonic-gate 	}
30050Sstevel@tonic-gate 
30060Sstevel@tonic-gate 	pcf_index = PCF_INDEX();
30070Sstevel@tonic-gate 	p = &pcf[pcf_index];
30080Sstevel@tonic-gate 	p->pcf_touch = 1;
30090Sstevel@tonic-gate 	mutex_enter(&p->pcf_lock);
3010917Selowe 	if (p->pcf_count >= npgs) {
3011917Selowe 		collected = npgs;
3012917Selowe 		p->pcf_count -= npgs;
30130Sstevel@tonic-gate 	}
30140Sstevel@tonic-gate 	mutex_exit(&p->pcf_lock);
3015917Selowe 	need = npgs - collected;
3016917Selowe 
3017917Selowe 	if (need > 0) {
30180Sstevel@tonic-gate 		VM_STAT_ADD(page_reclaim_zero);
30190Sstevel@tonic-gate 		/*
30200Sstevel@tonic-gate 		 * Check again. Its possible that some other thread
30210Sstevel@tonic-gate 		 * could have been right behind us, and added one
30220Sstevel@tonic-gate 		 * to a list somewhere.  Acquire each of the pcf locks
30230Sstevel@tonic-gate 		 * until we find a page.
30240Sstevel@tonic-gate 		 */
30250Sstevel@tonic-gate 		p = pcf;
30260Sstevel@tonic-gate 		for (i = 0; i < PCF_FANOUT; i++) {
30270Sstevel@tonic-gate 			p->pcf_touch = 1;
30280Sstevel@tonic-gate 			mutex_enter(&p->pcf_lock);
3029917Selowe 			if (p->pcf_count) {
3030917Selowe 				if (p->pcf_count >= need) {
3031917Selowe 					p->pcf_count -= need;
3032917Selowe 					collected += need;
3033917Selowe 					need = 0;
3034917Selowe 					break;
3035917Selowe 				} else if (p->pcf_count) {
3036917Selowe 					collected += p->pcf_count;
3037917Selowe 					need -= p->pcf_count;
3038917Selowe 					p->pcf_count = 0;
3039917Selowe 				}
30400Sstevel@tonic-gate 			}
30410Sstevel@tonic-gate 			p++;
30420Sstevel@tonic-gate 		}
30430Sstevel@tonic-gate 
3044917Selowe 		if (need > 0) {
30450Sstevel@tonic-gate page_reclaim_nomem:
30460Sstevel@tonic-gate 			/*
30470Sstevel@tonic-gate 			 * We really can't have page `pp'.
30480Sstevel@tonic-gate 			 * Time for the no-memory dance with
30490Sstevel@tonic-gate 			 * page_free().  This is just like
30500Sstevel@tonic-gate 			 * page_create_wait().  Plus the added
30510Sstevel@tonic-gate 			 * attraction of releasing whatever mutex
30520Sstevel@tonic-gate 			 * we held when we were called with in `lock'.
30530Sstevel@tonic-gate 			 * Page_unlock() will wakeup any thread
30540Sstevel@tonic-gate 			 * waiting around for this page.
30550Sstevel@tonic-gate 			 */
30560Sstevel@tonic-gate 			if (lock) {
30570Sstevel@tonic-gate 				VM_STAT_ADD(page_reclaim_zero_locked);
30580Sstevel@tonic-gate 				mutex_exit(lock);
30590Sstevel@tonic-gate 			}
30600Sstevel@tonic-gate 			page_unlock(pp);
30610Sstevel@tonic-gate 
30620Sstevel@tonic-gate 			/*
30630Sstevel@tonic-gate 			 * get this before we drop all the pcf locks.
30640Sstevel@tonic-gate 			 */
30650Sstevel@tonic-gate 			mutex_enter(&new_freemem_lock);
30660Sstevel@tonic-gate 
30670Sstevel@tonic-gate 			p = pcf;
3068917Selowe 			p->pcf_count += collected;
30690Sstevel@tonic-gate 			for (i = 0; i < PCF_FANOUT; i++) {
30700Sstevel@tonic-gate 				p->pcf_wait++;
30710Sstevel@tonic-gate 				mutex_exit(&p->pcf_lock);
30720Sstevel@tonic-gate 				p++;
30730Sstevel@tonic-gate 			}
30740Sstevel@tonic-gate 
30750Sstevel@tonic-gate 			freemem_wait++;
30760Sstevel@tonic-gate 			cv_wait(&freemem_cv, &new_freemem_lock);
30770Sstevel@tonic-gate 			freemem_wait--;
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate 			mutex_exit(&new_freemem_lock);
30800Sstevel@tonic-gate 
30810Sstevel@tonic-gate 			if (lock) {
30820Sstevel@tonic-gate 				mutex_enter(lock);
30830Sstevel@tonic-gate 			}
30840Sstevel@tonic-gate 			return (0);
30850Sstevel@tonic-gate 		}
30860Sstevel@tonic-gate 
30870Sstevel@tonic-gate 		/*
3088917Selowe 		 * We beat the PCF bins over the head until
3089917Selowe 		 * we got the memory that we wanted.
30900Sstevel@tonic-gate 		 * The pcf accounting has been done,
30910Sstevel@tonic-gate 		 * though none of the pcf_wait flags have been set,
30920Sstevel@tonic-gate 		 * drop the locks and continue on.
30930Sstevel@tonic-gate 		 */
3094917Selowe 		ASSERT(collected == npgs);
30950Sstevel@tonic-gate 		while (p >= pcf) {
30960Sstevel@tonic-gate 			mutex_exit(&p->pcf_lock);
30970Sstevel@tonic-gate 			p--;
30980Sstevel@tonic-gate 		}
30990Sstevel@tonic-gate 	}
31000Sstevel@tonic-gate 
31010Sstevel@tonic-gate 	/*
31020Sstevel@tonic-gate 	 * freemem is not protected by any lock. Thus, we cannot
31030Sstevel@tonic-gate 	 * have any assertion containing freemem here.
31040Sstevel@tonic-gate 	 */
3105917Selowe 	freemem -= npgs;
31060Sstevel@tonic-gate 
31070Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_reclaim);
31080Sstevel@tonic-gate 	if (PP_ISAGED(pp)) {
3109917Selowe 		if (npgs > 1) {
3110917Selowe 			page_list_sub_pages(pp, pp->p_szc);
3111917Selowe 		} else {
3112917Selowe 			page_list_sub(pp, PG_FREE_LIST);
3113917Selowe 		}
31140Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE,
31150Sstevel@tonic-gate 		    "page_reclaim_free:pp %p", pp);
31160Sstevel@tonic-gate 	} else {
3117917Selowe 		ASSERT(npgs == 1);
31180Sstevel@tonic-gate 		page_list_sub(pp, PG_CACHE_LIST);
31190Sstevel@tonic-gate 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE,
31200Sstevel@tonic-gate 		    "page_reclaim_cache:pp %p", pp);
31210Sstevel@tonic-gate 	}
31220Sstevel@tonic-gate 
31230Sstevel@tonic-gate 	/*
31240Sstevel@tonic-gate 	 * clear the p_free & p_age bits since this page is no longer
31250Sstevel@tonic-gate 	 * on the free list.  Notice that there was a brief time where
31260Sstevel@tonic-gate 	 * a page is marked as free, but is not on the list.
31270Sstevel@tonic-gate 	 *
31280Sstevel@tonic-gate 	 * Set the reference bit to protect against immediate pageout.
31290Sstevel@tonic-gate 	 */
3130973Selowe 	for (i = 0; i < npgs; i++, pp++) {
3131917Selowe 		PP_CLRFREE(pp);
3132917Selowe 		PP_CLRAGED(pp);
3133917Selowe 		page_set_props(pp, P_REF);
3134917Selowe 	}
31350Sstevel@tonic-gate 
31360Sstevel@tonic-gate 	CPU_STATS_ENTER_K();
31370Sstevel@tonic-gate 	cpup = CPU;	/* get cpup now that CPU cannot change */
31380Sstevel@tonic-gate 	CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
31390Sstevel@tonic-gate 	CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
31400Sstevel@tonic-gate 	CPU_STATS_EXIT_K();
31410Sstevel@tonic-gate 
31420Sstevel@tonic-gate 	return (1);
31430Sstevel@tonic-gate }
31440Sstevel@tonic-gate 
31450Sstevel@tonic-gate 
31460Sstevel@tonic-gate 
31470Sstevel@tonic-gate /*
31480Sstevel@tonic-gate  * Destroy identity of the page and put it back on
31490Sstevel@tonic-gate  * the page free list.  Assumes that the caller has
31500Sstevel@tonic-gate  * acquired the "exclusive" lock on the page.
31510Sstevel@tonic-gate  */
31520Sstevel@tonic-gate void
31530Sstevel@tonic-gate page_destroy(page_t *pp, int dontfree)
31540Sstevel@tonic-gate {
31550Sstevel@tonic-gate 	ASSERT((PAGE_EXCL(pp) &&
31560Sstevel@tonic-gate 	    !page_iolock_assert(pp)) || panicstr);
31570Sstevel@tonic-gate 
31580Sstevel@tonic-gate 	if (pp->p_szc != 0) {
31590Sstevel@tonic-gate 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
31600Sstevel@tonic-gate 		    pp->p_vnode == &kvp) {
31610Sstevel@tonic-gate 			panic("page_destroy: anon or kernel or no vnode "
31620Sstevel@tonic-gate 			    "large page %p", (void *)pp);
31630Sstevel@tonic-gate 		}
31640Sstevel@tonic-gate 		page_demote_vp_pages(pp);
31650Sstevel@tonic-gate 		ASSERT(pp->p_szc == 0);
31660Sstevel@tonic-gate 	}
31670Sstevel@tonic-gate 
31680Sstevel@tonic-gate 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
31690Sstevel@tonic-gate 
31700Sstevel@tonic-gate 	/*
31710Sstevel@tonic-gate 	 * Unload translations, if any, then hash out the
31720Sstevel@tonic-gate 	 * page to erase its identity.
31730Sstevel@tonic-gate 	 */
31740Sstevel@tonic-gate 	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
31750Sstevel@tonic-gate 	page_hashout(pp, NULL);
31760Sstevel@tonic-gate 
31770Sstevel@tonic-gate 	if (!dontfree) {
31780Sstevel@tonic-gate 		/*
31790Sstevel@tonic-gate 		 * Acquire the "freemem_lock" for availrmem.
31800Sstevel@tonic-gate 		 * The page_struct_lock need not be acquired for lckcnt
31810Sstevel@tonic-gate 		 * and cowcnt since the page has an "exclusive" lock.
31820Sstevel@tonic-gate 		 */
31830Sstevel@tonic-gate 		if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
31840Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
31850Sstevel@tonic-gate 			if (pp->p_lckcnt != 0) {
31860Sstevel@tonic-gate 				availrmem++;
31870Sstevel@tonic-gate 				pp->p_lckcnt = 0;
31880Sstevel@tonic-gate 			}
31890Sstevel@tonic-gate 			if (pp->p_cowcnt != 0) {
31900Sstevel@tonic-gate 				availrmem += pp->p_cowcnt;
31910Sstevel@tonic-gate 				pp->p_cowcnt = 0;
31920Sstevel@tonic-gate 			}
31930Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
31940Sstevel@tonic-gate 		}
31950Sstevel@tonic-gate 		/*
31960Sstevel@tonic-gate 		 * Put the page on the "free" list.
31970Sstevel@tonic-gate 		 */
31980Sstevel@tonic-gate 		page_free(pp, 0);
31990Sstevel@tonic-gate 	}
32000Sstevel@tonic-gate }
32010Sstevel@tonic-gate 
32020Sstevel@tonic-gate void
32030Sstevel@tonic-gate page_destroy_pages(page_t *pp)
32040Sstevel@tonic-gate {
32050Sstevel@tonic-gate 
32060Sstevel@tonic-gate 	page_t	*tpp, *rootpp = NULL;
32070Sstevel@tonic-gate 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
32080Sstevel@tonic-gate 	pgcnt_t	i, pglcks = 0;
32090Sstevel@tonic-gate 	uint_t	szc = pp->p_szc;
32100Sstevel@tonic-gate 
32110Sstevel@tonic-gate 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
32120Sstevel@tonic-gate 
32130Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_destroy_pages);
32140Sstevel@tonic-gate 
32150Sstevel@tonic-gate 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
32160Sstevel@tonic-gate 
32170Sstevel@tonic-gate 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
32180Sstevel@tonic-gate 		panic("page_destroy_pages: not root page %p", (void *)pp);
32190Sstevel@tonic-gate 		/*NOTREACHED*/
32200Sstevel@tonic-gate 	}
32210Sstevel@tonic-gate 
3222414Skchow 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
32230Sstevel@tonic-gate 		ASSERT((PAGE_EXCL(tpp) &&
32240Sstevel@tonic-gate 		    !page_iolock_assert(tpp)) || panicstr);
32250Sstevel@tonic-gate 		(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
32260Sstevel@tonic-gate 		page_hashout(tpp, NULL);
32270Sstevel@tonic-gate 		ASSERT(tpp->p_offset == (u_offset_t)-1);
32280Sstevel@tonic-gate 		if (tpp->p_lckcnt != 0) {
32290Sstevel@tonic-gate 			pglcks++;
32300Sstevel@tonic-gate 			tpp->p_lckcnt = 0;
32310Sstevel@tonic-gate 		} else if (tpp->p_cowcnt != 0) {
32320Sstevel@tonic-gate 			pglcks += tpp->p_cowcnt;
32330Sstevel@tonic-gate 			tpp->p_cowcnt = 0;
32340Sstevel@tonic-gate 		}
32350Sstevel@tonic-gate 		ASSERT(!hat_page_getshare(tpp));
32360Sstevel@tonic-gate 		ASSERT(tpp->p_vnode == NULL);
32370Sstevel@tonic-gate 		ASSERT(tpp->p_szc == szc);
32380Sstevel@tonic-gate 
32390Sstevel@tonic-gate 		PP_SETFREE(tpp);
32400Sstevel@tonic-gate 		page_clr_all_props(tpp);
32410Sstevel@tonic-gate 		PP_SETAGED(tpp);
32420Sstevel@tonic-gate 		ASSERT(tpp->p_next == tpp);
32430Sstevel@tonic-gate 		ASSERT(tpp->p_prev == tpp);
32440Sstevel@tonic-gate 		page_list_concat(&rootpp, &tpp);
32450Sstevel@tonic-gate 	}
32460Sstevel@tonic-gate 
32470Sstevel@tonic-gate 	ASSERT(rootpp == pp);
32480Sstevel@tonic-gate 	if (pglcks != 0) {
32490Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
32500Sstevel@tonic-gate 		availrmem += pglcks;
32510Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
32520Sstevel@tonic-gate 	}
32530Sstevel@tonic-gate 
32540Sstevel@tonic-gate 	page_list_add_pages(rootpp, 0);
32550Sstevel@tonic-gate 	page_create_putback(pgcnt);
32560Sstevel@tonic-gate }
32570Sstevel@tonic-gate 
32580Sstevel@tonic-gate /*
32590Sstevel@tonic-gate  * Similar to page_destroy(), but destroys pages which are
32600Sstevel@tonic-gate  * locked and known to be on the page free list.  Since
32610Sstevel@tonic-gate  * the page is known to be free and locked, no one can access
32620Sstevel@tonic-gate  * it.
32630Sstevel@tonic-gate  *
32640Sstevel@tonic-gate  * Also, the number of free pages does not change.
32650Sstevel@tonic-gate  */
32660Sstevel@tonic-gate void
32670Sstevel@tonic-gate page_destroy_free(page_t *pp)
32680Sstevel@tonic-gate {
32690Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
32700Sstevel@tonic-gate 	ASSERT(PP_ISFREE(pp));
32710Sstevel@tonic-gate 	ASSERT(pp->p_vnode);
32720Sstevel@tonic-gate 	ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
32730Sstevel@tonic-gate 	ASSERT(!hat_page_is_mapped(pp));
32740Sstevel@tonic-gate 	ASSERT(PP_ISAGED(pp) == 0);
32750Sstevel@tonic-gate 	ASSERT(pp->p_szc == 0);
32760Sstevel@tonic-gate 
32770Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_destroy_free);
32780Sstevel@tonic-gate 	page_list_sub(pp, PG_CACHE_LIST);
32790Sstevel@tonic-gate 
32800Sstevel@tonic-gate 	page_hashout(pp, NULL);
32810Sstevel@tonic-gate 	ASSERT(pp->p_vnode == NULL);
32820Sstevel@tonic-gate 	ASSERT(pp->p_offset == (u_offset_t)-1);
32830Sstevel@tonic-gate 	ASSERT(pp->p_hash == NULL);
32840Sstevel@tonic-gate 
32850Sstevel@tonic-gate 	PP_SETAGED(pp);
32860Sstevel@tonic-gate 	page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
32870Sstevel@tonic-gate 	page_unlock(pp);
32880Sstevel@tonic-gate 
32890Sstevel@tonic-gate 	mutex_enter(&new_freemem_lock);
32900Sstevel@tonic-gate 	if (freemem_wait) {
32910Sstevel@tonic-gate 		cv_signal(&freemem_cv);
32920Sstevel@tonic-gate 	}
32930Sstevel@tonic-gate 	mutex_exit(&new_freemem_lock);
32940Sstevel@tonic-gate }
32950Sstevel@tonic-gate 
32960Sstevel@tonic-gate /*
32970Sstevel@tonic-gate  * Rename the page "opp" to have an identity specified
32980Sstevel@tonic-gate  * by [vp, off].  If a page already exists with this name
32990Sstevel@tonic-gate  * it is locked and destroyed.  Note that the page's
33000Sstevel@tonic-gate  * translations are not unloaded during the rename.
33010Sstevel@tonic-gate  *
33020Sstevel@tonic-gate  * This routine is used by the anon layer to "steal" the
33030Sstevel@tonic-gate  * original page and is not unlike destroying a page and
33040Sstevel@tonic-gate  * creating a new page using the same page frame.
33050Sstevel@tonic-gate  *
33060Sstevel@tonic-gate  * XXX -- Could deadlock if caller 1 tries to rename A to B while
33070Sstevel@tonic-gate  * caller 2 tries to rename B to A.
33080Sstevel@tonic-gate  */
33090Sstevel@tonic-gate void
33100Sstevel@tonic-gate page_rename(page_t *opp, vnode_t *vp, u_offset_t off)
33110Sstevel@tonic-gate {
33120Sstevel@tonic-gate 	page_t		*pp;
33130Sstevel@tonic-gate 	int		olckcnt = 0;
33140Sstevel@tonic-gate 	int		ocowcnt = 0;
33150Sstevel@tonic-gate 	kmutex_t	*phm;
33160Sstevel@tonic-gate 	ulong_t		index;
33170Sstevel@tonic-gate 
33180Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
33190Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
33200Sstevel@tonic-gate 	ASSERT(PP_ISFREE(opp) == 0);
33210Sstevel@tonic-gate 
33220Sstevel@tonic-gate 	VM_STAT_ADD(page_rename_count);
33230Sstevel@tonic-gate 
33240Sstevel@tonic-gate 	TRACE_3(TR_FAC_VM, TR_PAGE_RENAME,
33250Sstevel@tonic-gate 		"page rename:pp %p vp %p off %llx", opp, vp, off);
33260Sstevel@tonic-gate 
332763Saguzovsk 	/*
332863Saguzovsk 	 * CacheFS may call page_rename for a large NFS page
332963Saguzovsk 	 * when both CacheFS and NFS mount points are used
333063Saguzovsk 	 * by applications. Demote this large page before
333163Saguzovsk 	 * renaming it, to ensure that there are no "partial"
333263Saguzovsk 	 * large pages left lying around.
333363Saguzovsk 	 */
333463Saguzovsk 	if (opp->p_szc != 0) {
333563Saguzovsk 		vnode_t *ovp = opp->p_vnode;
333663Saguzovsk 		ASSERT(ovp != NULL);
333763Saguzovsk 		ASSERT(!IS_SWAPFSVP(ovp));
333863Saguzovsk 		ASSERT(ovp != &kvp);
333963Saguzovsk 		page_demote_vp_pages(opp);
334063Saguzovsk 		ASSERT(opp->p_szc == 0);
334163Saguzovsk 	}
334263Saguzovsk 
33430Sstevel@tonic-gate 	page_hashout(opp, NULL);
33440Sstevel@tonic-gate 	PP_CLRAGED(opp);
33450Sstevel@tonic-gate 
33460Sstevel@tonic-gate 	/*
33470Sstevel@tonic-gate 	 * Acquire the appropriate page hash lock, since
33480Sstevel@tonic-gate 	 * we're going to rename the page.
33490Sstevel@tonic-gate 	 */
33500Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, off);
33510Sstevel@tonic-gate 	phm = PAGE_HASH_MUTEX(index);
33520Sstevel@tonic-gate 	mutex_enter(phm);
33530Sstevel@tonic-gate top:
33540Sstevel@tonic-gate 	/*
33550Sstevel@tonic-gate 	 * Look for an existing page with this name and destroy it if found.
33560Sstevel@tonic-gate 	 * By holding the page hash lock all the way to the page_hashin()
33570Sstevel@tonic-gate 	 * call, we are assured that no page can be created with this
33580Sstevel@tonic-gate 	 * identity.  In the case when the phm lock is dropped to undo any
33590Sstevel@tonic-gate 	 * hat layer mappings, the existing page is held with an "exclusive"
33600Sstevel@tonic-gate 	 * lock, again preventing another page from being created with
33610Sstevel@tonic-gate 	 * this identity.
33620Sstevel@tonic-gate 	 */
33630Sstevel@tonic-gate 	PAGE_HASH_SEARCH(index, pp, vp, off);
33640Sstevel@tonic-gate 	if (pp != NULL) {
33650Sstevel@tonic-gate 		VM_STAT_ADD(page_rename_exists);
33660Sstevel@tonic-gate 
33670Sstevel@tonic-gate 		/*
33680Sstevel@tonic-gate 		 * As it turns out, this is one of only two places where
33690Sstevel@tonic-gate 		 * page_lock() needs to hold the passed in lock in the
33700Sstevel@tonic-gate 		 * successful case.  In all of the others, the lock could
33710Sstevel@tonic-gate 		 * be dropped as soon as the attempt is made to lock
33720Sstevel@tonic-gate 		 * the page.  It is tempting to add yet another arguement,
33730Sstevel@tonic-gate 		 * PL_KEEP or PL_DROP, to let page_lock know what to do.
33740Sstevel@tonic-gate 		 */
33750Sstevel@tonic-gate 		if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
33760Sstevel@tonic-gate 			/*
33770Sstevel@tonic-gate 			 * Went to sleep because the page could not
33780Sstevel@tonic-gate 			 * be locked.  We were woken up when the page
33790Sstevel@tonic-gate 			 * was unlocked, or when the page was destroyed.
33800Sstevel@tonic-gate 			 * In either case, `phm' was dropped while we
33810Sstevel@tonic-gate 			 * slept.  Hence we should not just roar through
33820Sstevel@tonic-gate 			 * this loop.
33830Sstevel@tonic-gate 			 */
33840Sstevel@tonic-gate 			goto top;
33850Sstevel@tonic-gate 		}
33860Sstevel@tonic-gate 
338763Saguzovsk 		/*
338863Saguzovsk 		 * If an existing page is a large page, then demote
338963Saguzovsk 		 * it to ensure that no "partial" large pages are
339063Saguzovsk 		 * "created" after page_rename. An existing page
339163Saguzovsk 		 * can be a CacheFS page, and can't belong to swapfs.
339263Saguzovsk 		 */
33930Sstevel@tonic-gate 		if (hat_page_is_mapped(pp)) {
33940Sstevel@tonic-gate 			/*
33950Sstevel@tonic-gate 			 * Unload translations.  Since we hold the
33960Sstevel@tonic-gate 			 * exclusive lock on this page, the page
33970Sstevel@tonic-gate 			 * can not be changed while we drop phm.
33980Sstevel@tonic-gate 			 * This is also not a lock protocol violation,
33990Sstevel@tonic-gate 			 * but rather the proper way to do things.
34000Sstevel@tonic-gate 			 */
34010Sstevel@tonic-gate 			mutex_exit(phm);
34020Sstevel@tonic-gate 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
340363Saguzovsk 			if (pp->p_szc != 0) {
340463Saguzovsk 				ASSERT(!IS_SWAPFSVP(vp));
340563Saguzovsk 				ASSERT(vp != &kvp);
340663Saguzovsk 				page_demote_vp_pages(pp);
340763Saguzovsk 				ASSERT(pp->p_szc == 0);
340863Saguzovsk 			}
340963Saguzovsk 			mutex_enter(phm);
341063Saguzovsk 		} else if (pp->p_szc != 0) {
341163Saguzovsk 			ASSERT(!IS_SWAPFSVP(vp));
341263Saguzovsk 			ASSERT(vp != &kvp);
341363Saguzovsk 			mutex_exit(phm);
341463Saguzovsk 			page_demote_vp_pages(pp);
341563Saguzovsk 			ASSERT(pp->p_szc == 0);
34160Sstevel@tonic-gate 			mutex_enter(phm);
34170Sstevel@tonic-gate 		}
34180Sstevel@tonic-gate 		page_hashout(pp, phm);
34190Sstevel@tonic-gate 	}
34200Sstevel@tonic-gate 	/*
34210Sstevel@tonic-gate 	 * Hash in the page with the new identity.
34220Sstevel@tonic-gate 	 */
34230Sstevel@tonic-gate 	if (!page_hashin(opp, vp, off, phm)) {
34240Sstevel@tonic-gate 		/*
34250Sstevel@tonic-gate 		 * We were holding phm while we searched for [vp, off]
34260Sstevel@tonic-gate 		 * and only dropped phm if we found and locked a page.
34270Sstevel@tonic-gate 		 * If we can't create this page now, then some thing
34280Sstevel@tonic-gate 		 * is really broken.
34290Sstevel@tonic-gate 		 */
34300Sstevel@tonic-gate 		panic("page_rename: Can't hash in page: %p", (void *)pp);
34310Sstevel@tonic-gate 		/*NOTREACHED*/
34320Sstevel@tonic-gate 	}
34330Sstevel@tonic-gate 
34340Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(phm));
34350Sstevel@tonic-gate 	mutex_exit(phm);
34360Sstevel@tonic-gate 
34370Sstevel@tonic-gate 	/*
34380Sstevel@tonic-gate 	 * Now that we have dropped phm, lets get around to finishing up
34390Sstevel@tonic-gate 	 * with pp.
34400Sstevel@tonic-gate 	 */
34410Sstevel@tonic-gate 	if (pp != NULL) {
34420Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(pp));
34430Sstevel@tonic-gate 		/* for now large pages should not end up here */
34440Sstevel@tonic-gate 		ASSERT(pp->p_szc == 0);
34450Sstevel@tonic-gate 		/*
34460Sstevel@tonic-gate 		 * Save the locks for transfer to the new page and then
34470Sstevel@tonic-gate 		 * clear them so page_free doesn't think they're important.
34480Sstevel@tonic-gate 		 * The page_struct_lock need not be acquired for lckcnt and
34490Sstevel@tonic-gate 		 * cowcnt since the page has an "exclusive" lock.
34500Sstevel@tonic-gate 		 */
34510Sstevel@tonic-gate 		olckcnt = pp->p_lckcnt;
34520Sstevel@tonic-gate 		ocowcnt = pp->p_cowcnt;
34530Sstevel@tonic-gate 		pp->p_lckcnt = pp->p_cowcnt = 0;
34540Sstevel@tonic-gate 
34550Sstevel@tonic-gate 		/*
34560Sstevel@tonic-gate 		 * Put the page on the "free" list after we drop
34570Sstevel@tonic-gate 		 * the lock.  The less work under the lock the better.
34580Sstevel@tonic-gate 		 */
34590Sstevel@tonic-gate 		/*LINTED: constant in conditional context*/
34600Sstevel@tonic-gate 		VN_DISPOSE(pp, B_FREE, 0, kcred);
34610Sstevel@tonic-gate 	}
34620Sstevel@tonic-gate 
34630Sstevel@tonic-gate 	/*
34640Sstevel@tonic-gate 	 * Transfer the lock count from the old page (if any).
34650Sstevel@tonic-gate 	 * The page_struct_lock need not be acquired for lckcnt and
34660Sstevel@tonic-gate 	 * cowcnt since the page has an "exclusive" lock.
34670Sstevel@tonic-gate 	 */
34680Sstevel@tonic-gate 	opp->p_lckcnt += olckcnt;
34690Sstevel@tonic-gate 	opp->p_cowcnt += ocowcnt;
34700Sstevel@tonic-gate }
34710Sstevel@tonic-gate 
34720Sstevel@tonic-gate /*
34730Sstevel@tonic-gate  * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
34740Sstevel@tonic-gate  *
34750Sstevel@tonic-gate  * Pages are normally inserted at the start of a vnode's v_pages list.
34760Sstevel@tonic-gate  * If the vnode is VMODSORT and the page is modified, it goes at the end.
34770Sstevel@tonic-gate  * This can happen when a modified page is relocated for DR.
34780Sstevel@tonic-gate  *
34790Sstevel@tonic-gate  * Returns 1 on success and 0 on failure.
34800Sstevel@tonic-gate  */
34810Sstevel@tonic-gate static int
34820Sstevel@tonic-gate page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
34830Sstevel@tonic-gate {
34840Sstevel@tonic-gate 	page_t		**listp;
34850Sstevel@tonic-gate 	page_t		*tp;
34860Sstevel@tonic-gate 	ulong_t		index;
34870Sstevel@tonic-gate 
34880Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
34890Sstevel@tonic-gate 	ASSERT(vp != NULL);
34900Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
34910Sstevel@tonic-gate 
34920Sstevel@tonic-gate 	/*
34930Sstevel@tonic-gate 	 * Be sure to set these up before the page is inserted on the hash
34940Sstevel@tonic-gate 	 * list.  As soon as the page is placed on the list some other
34950Sstevel@tonic-gate 	 * thread might get confused and wonder how this page could
34960Sstevel@tonic-gate 	 * possibly hash to this list.
34970Sstevel@tonic-gate 	 */
34980Sstevel@tonic-gate 	pp->p_vnode = vp;
34990Sstevel@tonic-gate 	pp->p_offset = offset;
35000Sstevel@tonic-gate 
35010Sstevel@tonic-gate 	/*
35020Sstevel@tonic-gate 	 * record if this page is on a swap vnode
35030Sstevel@tonic-gate 	 */
35040Sstevel@tonic-gate 	if ((vp->v_flag & VISSWAP) != 0)
35050Sstevel@tonic-gate 		PP_SETSWAP(pp);
35060Sstevel@tonic-gate 
35070Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, offset);
35080Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index)));
35090Sstevel@tonic-gate 	listp = &page_hash[index];
35100Sstevel@tonic-gate 
35110Sstevel@tonic-gate 	/*
35120Sstevel@tonic-gate 	 * If this page is already hashed in, fail this attempt to add it.
35130Sstevel@tonic-gate 	 */
35140Sstevel@tonic-gate 	for (tp = *listp; tp != NULL; tp = tp->p_hash) {
35150Sstevel@tonic-gate 		if (tp->p_vnode == vp && tp->p_offset == offset) {
35160Sstevel@tonic-gate 			pp->p_vnode = NULL;
35170Sstevel@tonic-gate 			pp->p_offset = (u_offset_t)(-1);
35180Sstevel@tonic-gate 			return (0);
35190Sstevel@tonic-gate 		}
35200Sstevel@tonic-gate 	}
35210Sstevel@tonic-gate 	pp->p_hash = *listp;
35220Sstevel@tonic-gate 	*listp = pp;
35230Sstevel@tonic-gate 
35240Sstevel@tonic-gate 	/*
35250Sstevel@tonic-gate 	 * Add the page to the vnode's list of pages
35260Sstevel@tonic-gate 	 */
35270Sstevel@tonic-gate 	if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
35280Sstevel@tonic-gate 		listp = &vp->v_pages->p_vpprev->p_vpnext;
35290Sstevel@tonic-gate 	else
35300Sstevel@tonic-gate 		listp = &vp->v_pages;
35310Sstevel@tonic-gate 
35320Sstevel@tonic-gate 	page_vpadd(listp, pp);
35330Sstevel@tonic-gate 
35340Sstevel@tonic-gate 	return (1);
35350Sstevel@tonic-gate }
35360Sstevel@tonic-gate 
35370Sstevel@tonic-gate /*
35380Sstevel@tonic-gate  * Add page `pp' to both the hash and vp chains for [vp, offset].
35390Sstevel@tonic-gate  *
35400Sstevel@tonic-gate  * Returns 1 on success and 0 on failure.
35410Sstevel@tonic-gate  * If hold is passed in, it is not dropped.
35420Sstevel@tonic-gate  */
35430Sstevel@tonic-gate int
35440Sstevel@tonic-gate page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
35450Sstevel@tonic-gate {
35460Sstevel@tonic-gate 	kmutex_t	*phm = NULL;
35470Sstevel@tonic-gate 	kmutex_t	*vphm;
35480Sstevel@tonic-gate 	int		rc;
35490Sstevel@tonic-gate 
35500Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
35510Sstevel@tonic-gate 
35520Sstevel@tonic-gate 	TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN,
35530Sstevel@tonic-gate 		"page_hashin:pp %p vp %p offset %llx",
35540Sstevel@tonic-gate 		pp, vp, offset);
35550Sstevel@tonic-gate 
35560Sstevel@tonic-gate 	VM_STAT_ADD(hashin_count);
35570Sstevel@tonic-gate 
35580Sstevel@tonic-gate 	if (hold != NULL)
35590Sstevel@tonic-gate 		phm = hold;
35600Sstevel@tonic-gate 	else {
35610Sstevel@tonic-gate 		VM_STAT_ADD(hashin_not_held);
35620Sstevel@tonic-gate 		phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset));
35630Sstevel@tonic-gate 		mutex_enter(phm);
35640Sstevel@tonic-gate 	}
35650Sstevel@tonic-gate 
35660Sstevel@tonic-gate 	vphm = page_vnode_mutex(vp);
35670Sstevel@tonic-gate 	mutex_enter(vphm);
35680Sstevel@tonic-gate 	rc = page_do_hashin(pp, vp, offset);
35690Sstevel@tonic-gate 	mutex_exit(vphm);
35700Sstevel@tonic-gate 	if (hold == NULL)
35710Sstevel@tonic-gate 		mutex_exit(phm);
35720Sstevel@tonic-gate 	if (rc == 0)
35730Sstevel@tonic-gate 		VM_STAT_ADD(hashin_already);
35740Sstevel@tonic-gate 	return (rc);
35750Sstevel@tonic-gate }
35760Sstevel@tonic-gate 
35770Sstevel@tonic-gate /*
35780Sstevel@tonic-gate  * Remove page ``pp'' from the hash and vp chains and remove vp association.
35790Sstevel@tonic-gate  * All mutexes must be held
35800Sstevel@tonic-gate  */
35810Sstevel@tonic-gate static void
35820Sstevel@tonic-gate page_do_hashout(page_t *pp)
35830Sstevel@tonic-gate {
35840Sstevel@tonic-gate 	page_t	**hpp;
35850Sstevel@tonic-gate 	page_t	*hp;
35860Sstevel@tonic-gate 	vnode_t	*vp = pp->p_vnode;
35870Sstevel@tonic-gate 
35880Sstevel@tonic-gate 	ASSERT(vp != NULL);
35890Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
35900Sstevel@tonic-gate 
35910Sstevel@tonic-gate 	/*
35920Sstevel@tonic-gate 	 * First, take pp off of its hash chain.
35930Sstevel@tonic-gate 	 */
35940Sstevel@tonic-gate 	hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
35950Sstevel@tonic-gate 
35960Sstevel@tonic-gate 	for (;;) {
35970Sstevel@tonic-gate 		hp = *hpp;
35980Sstevel@tonic-gate 		if (hp == pp)
35990Sstevel@tonic-gate 			break;
36000Sstevel@tonic-gate 		if (hp == NULL) {
36010Sstevel@tonic-gate 			panic("page_do_hashout");
36020Sstevel@tonic-gate 			/*NOTREACHED*/
36030Sstevel@tonic-gate 		}
36040Sstevel@tonic-gate 		hpp = &hp->p_hash;
36050Sstevel@tonic-gate 	}
36060Sstevel@tonic-gate 	*hpp = pp->p_hash;
36070Sstevel@tonic-gate 
36080Sstevel@tonic-gate 	/*
36090Sstevel@tonic-gate 	 * Now remove it from its associated vnode.
36100Sstevel@tonic-gate 	 */
36110Sstevel@tonic-gate 	if (vp->v_pages)
36120Sstevel@tonic-gate 		page_vpsub(&vp->v_pages, pp);
36130Sstevel@tonic-gate 
36140Sstevel@tonic-gate 	pp->p_hash = NULL;
36150Sstevel@tonic-gate 	page_clr_all_props(pp);
36160Sstevel@tonic-gate 	PP_CLRSWAP(pp);
36170Sstevel@tonic-gate 	pp->p_vnode = NULL;
36180Sstevel@tonic-gate 	pp->p_offset = (u_offset_t)-1;
36190Sstevel@tonic-gate }
36200Sstevel@tonic-gate 
36210Sstevel@tonic-gate /*
36220Sstevel@tonic-gate  * Remove page ``pp'' from the hash and vp chains and remove vp association.
36230Sstevel@tonic-gate  *
36240Sstevel@tonic-gate  * When `phm' is non-NULL it contains the address of the mutex protecting the
36250Sstevel@tonic-gate  * hash list pp is on.  It is not dropped.
36260Sstevel@tonic-gate  */
36270Sstevel@tonic-gate void
36280Sstevel@tonic-gate page_hashout(page_t *pp, kmutex_t *phm)
36290Sstevel@tonic-gate {
36300Sstevel@tonic-gate 	vnode_t		*vp;
36310Sstevel@tonic-gate 	ulong_t		index;
36320Sstevel@tonic-gate 	kmutex_t	*nphm;
36330Sstevel@tonic-gate 	kmutex_t	*vphm;
36340Sstevel@tonic-gate 	kmutex_t	*sep;
36350Sstevel@tonic-gate 
36360Sstevel@tonic-gate 	ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1);
36370Sstevel@tonic-gate 	ASSERT(pp->p_vnode != NULL);
36380Sstevel@tonic-gate 	ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
36390Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
36400Sstevel@tonic-gate 
36410Sstevel@tonic-gate 	vp = pp->p_vnode;
36420Sstevel@tonic-gate 
36430Sstevel@tonic-gate 	TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT,
36440Sstevel@tonic-gate 		"page_hashout:pp %p vp %p", pp, vp);
36450Sstevel@tonic-gate 
36460Sstevel@tonic-gate 	/* Kernel probe */
36470Sstevel@tonic-gate 	TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */,
36480Sstevel@tonic-gate 	    tnf_opaque, vnode, vp,
36490Sstevel@tonic-gate 	    tnf_offset, offset, pp->p_offset);
36500Sstevel@tonic-gate 
36510Sstevel@tonic-gate 	/*
36520Sstevel@tonic-gate 	 *
36530Sstevel@tonic-gate 	 */
36540Sstevel@tonic-gate 	VM_STAT_ADD(hashout_count);
36550Sstevel@tonic-gate 	index = PAGE_HASH_FUNC(vp, pp->p_offset);
36560Sstevel@tonic-gate 	if (phm == NULL) {
36570Sstevel@tonic-gate 		VM_STAT_ADD(hashout_not_held);
36580Sstevel@tonic-gate 		nphm = PAGE_HASH_MUTEX(index);
36590Sstevel@tonic-gate 		mutex_enter(nphm);
36600Sstevel@tonic-gate 	}
36610Sstevel@tonic-gate 	ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1);
36620Sstevel@tonic-gate 
36630Sstevel@tonic-gate 
36640Sstevel@tonic-gate 	/*
36650Sstevel@tonic-gate 	 * grab page vnode mutex and remove it...
36660Sstevel@tonic-gate 	 */
36670Sstevel@tonic-gate 	vphm = page_vnode_mutex(vp);
36680Sstevel@tonic-gate 	mutex_enter(vphm);
36690Sstevel@tonic-gate 
36700Sstevel@tonic-gate 	page_do_hashout(pp);
36710Sstevel@tonic-gate 
36720Sstevel@tonic-gate 	mutex_exit(vphm);
36730Sstevel@tonic-gate 	if (phm == NULL)
36740Sstevel@tonic-gate 		mutex_exit(nphm);
36750Sstevel@tonic-gate 
36760Sstevel@tonic-gate 	/*
36770Sstevel@tonic-gate 	 * Wake up processes waiting for this page.  The page's
36780Sstevel@tonic-gate 	 * identity has been changed, and is probably not the
36790Sstevel@tonic-gate 	 * desired page any longer.
36800Sstevel@tonic-gate 	 */
36810Sstevel@tonic-gate 	sep = page_se_mutex(pp);
36820Sstevel@tonic-gate 	mutex_enter(sep);
3683800Sstans 	pp->p_selock &= ~SE_EWANTED;
36840Sstevel@tonic-gate 	if (CV_HAS_WAITERS(&pp->p_cv))
36850Sstevel@tonic-gate 		cv_broadcast(&pp->p_cv);
36860Sstevel@tonic-gate 	mutex_exit(sep);
36870Sstevel@tonic-gate }
36880Sstevel@tonic-gate 
36890Sstevel@tonic-gate /*
36900Sstevel@tonic-gate  * Add the page to the front of a linked list of pages
36910Sstevel@tonic-gate  * using the p_next & p_prev pointers for the list.
36920Sstevel@tonic-gate  * The caller is responsible for protecting the list pointers.
36930Sstevel@tonic-gate  */
36940Sstevel@tonic-gate void
36950Sstevel@tonic-gate page_add(page_t **ppp, page_t *pp)
36960Sstevel@tonic-gate {
36970Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
36980Sstevel@tonic-gate 
36990Sstevel@tonic-gate 	page_add_common(ppp, pp);
37000Sstevel@tonic-gate }
37010Sstevel@tonic-gate 
37020Sstevel@tonic-gate 
37030Sstevel@tonic-gate 
37040Sstevel@tonic-gate /*
37050Sstevel@tonic-gate  *  Common code for page_add() and mach_page_add()
37060Sstevel@tonic-gate  */
37070Sstevel@tonic-gate void
37080Sstevel@tonic-gate page_add_common(page_t **ppp, page_t *pp)
37090Sstevel@tonic-gate {
37100Sstevel@tonic-gate 	if (*ppp == NULL) {
37110Sstevel@tonic-gate 		pp->p_next = pp->p_prev = pp;
37120Sstevel@tonic-gate 	} else {
37130Sstevel@tonic-gate 		pp->p_next = *ppp;
37140Sstevel@tonic-gate 		pp->p_prev = (*ppp)->p_prev;
37150Sstevel@tonic-gate 		(*ppp)->p_prev = pp;
37160Sstevel@tonic-gate 		pp->p_prev->p_next = pp;
37170Sstevel@tonic-gate 	}
37180Sstevel@tonic-gate 	*ppp = pp;
37190Sstevel@tonic-gate }
37200Sstevel@tonic-gate 
37210Sstevel@tonic-gate 
37220Sstevel@tonic-gate /*
37230Sstevel@tonic-gate  * Remove this page from a linked list of pages
37240Sstevel@tonic-gate  * using the p_next & p_prev pointers for the list.
37250Sstevel@tonic-gate  *
37260Sstevel@tonic-gate  * The caller is responsible for protecting the list pointers.
37270Sstevel@tonic-gate  */
37280Sstevel@tonic-gate void
37290Sstevel@tonic-gate page_sub(page_t **ppp, page_t *pp)
37300Sstevel@tonic-gate {
37310Sstevel@tonic-gate 	ASSERT((PP_ISFREE(pp)) ? 1 :
37320Sstevel@tonic-gate 	    (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
37330Sstevel@tonic-gate 
37340Sstevel@tonic-gate 	if (*ppp == NULL || pp == NULL) {
37350Sstevel@tonic-gate 		panic("page_sub: bad arg(s): pp %p, *ppp %p",
37360Sstevel@tonic-gate 		    (void *)pp, (void *)(*ppp));
37370Sstevel@tonic-gate 		/*NOTREACHED*/
37380Sstevel@tonic-gate 	}
37390Sstevel@tonic-gate 
37400Sstevel@tonic-gate 	page_sub_common(ppp, pp);
37410Sstevel@tonic-gate }
37420Sstevel@tonic-gate 
37430Sstevel@tonic-gate 
37440Sstevel@tonic-gate /*
37450Sstevel@tonic-gate  *  Common code for page_sub() and mach_page_sub()
37460Sstevel@tonic-gate  */
37470Sstevel@tonic-gate void
37480Sstevel@tonic-gate page_sub_common(page_t **ppp, page_t *pp)
37490Sstevel@tonic-gate {
37500Sstevel@tonic-gate 	if (*ppp == pp)
37510Sstevel@tonic-gate 		*ppp = pp->p_next;		/* go to next page */
37520Sstevel@tonic-gate 
37530Sstevel@tonic-gate 	if (*ppp == pp)
37540Sstevel@tonic-gate 		*ppp = NULL;			/* page list is gone */
37550Sstevel@tonic-gate 	else {
37560Sstevel@tonic-gate 		pp->p_prev->p_next = pp->p_next;
37570Sstevel@tonic-gate 		pp->p_next->p_prev = pp->p_prev;
37580Sstevel@tonic-gate 	}
37590Sstevel@tonic-gate 	pp->p_prev = pp->p_next = pp;		/* make pp a list of one */
37600Sstevel@tonic-gate }
37610Sstevel@tonic-gate 
37620Sstevel@tonic-gate 
37630Sstevel@tonic-gate /*
37640Sstevel@tonic-gate  * Break page list cppp into two lists with npages in the first list.
37650Sstevel@tonic-gate  * The tail is returned in nppp.
37660Sstevel@tonic-gate  */
37670Sstevel@tonic-gate void
37680Sstevel@tonic-gate page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
37690Sstevel@tonic-gate {
37700Sstevel@tonic-gate 	page_t *s1pp = *oppp;
37710Sstevel@tonic-gate 	page_t *s2pp;
37720Sstevel@tonic-gate 	page_t *e1pp, *e2pp;
37730Sstevel@tonic-gate 	long n = 0;
37740Sstevel@tonic-gate 
37750Sstevel@tonic-gate 	if (s1pp == NULL) {
37760Sstevel@tonic-gate 		*nppp = NULL;
37770Sstevel@tonic-gate 		return;
37780Sstevel@tonic-gate 	}
37790Sstevel@tonic-gate 	if (npages == 0) {
37800Sstevel@tonic-gate 		*nppp = s1pp;
37810Sstevel@tonic-gate 		*oppp = NULL;
37820Sstevel@tonic-gate 		return;
37830Sstevel@tonic-gate 	}
37840Sstevel@tonic-gate 	for (n = 0, s2pp = *oppp; n < npages; n++) {
37850Sstevel@tonic-gate 		s2pp = s2pp->p_next;
37860Sstevel@tonic-gate 	}
37870Sstevel@tonic-gate 	/* Fix head and tail of new lists */
37880Sstevel@tonic-gate 	e1pp = s2pp->p_prev;
37890Sstevel@tonic-gate 	e2pp = s1pp->p_prev;
37900Sstevel@tonic-gate 	s1pp->p_prev = e1pp;
37910Sstevel@tonic-gate 	e1pp->p_next = s1pp;
37920Sstevel@tonic-gate 	s2pp->p_prev = e2pp;
37930Sstevel@tonic-gate 	e2pp->p_next = s2pp;
37940Sstevel@tonic-gate 
37950Sstevel@tonic-gate 	/* second list empty */
37960Sstevel@tonic-gate 	if (s2pp == s1pp) {
37970Sstevel@tonic-gate 		*oppp = s1pp;
37980Sstevel@tonic-gate 		*nppp = NULL;
37990Sstevel@tonic-gate 	} else {
38000Sstevel@tonic-gate 		*oppp = s1pp;
38010Sstevel@tonic-gate 		*nppp = s2pp;
38020Sstevel@tonic-gate 	}
38030Sstevel@tonic-gate }
38040Sstevel@tonic-gate 
38050Sstevel@tonic-gate /*
38060Sstevel@tonic-gate  * Concatenate page list nppp onto the end of list ppp.
38070Sstevel@tonic-gate  */
38080Sstevel@tonic-gate void
38090Sstevel@tonic-gate page_list_concat(page_t **ppp, page_t **nppp)
38100Sstevel@tonic-gate {
38110Sstevel@tonic-gate 	page_t *s1pp, *s2pp, *e1pp, *e2pp;
38120Sstevel@tonic-gate 
38130Sstevel@tonic-gate 	if (*nppp == NULL) {
38140Sstevel@tonic-gate 		return;
38150Sstevel@tonic-gate 	}
38160Sstevel@tonic-gate 	if (*ppp == NULL) {
38170Sstevel@tonic-gate 		*ppp = *nppp;
38180Sstevel@tonic-gate 		return;
38190Sstevel@tonic-gate 	}
38200Sstevel@tonic-gate 	s1pp = *ppp;
38210Sstevel@tonic-gate 	e1pp =  s1pp->p_prev;
38220Sstevel@tonic-gate 	s2pp = *nppp;
38230Sstevel@tonic-gate 	e2pp = s2pp->p_prev;
38240Sstevel@tonic-gate 	s1pp->p_prev = e2pp;
38250Sstevel@tonic-gate 	e2pp->p_next = s1pp;
38260Sstevel@tonic-gate 	e1pp->p_next = s2pp;
38270Sstevel@tonic-gate 	s2pp->p_prev = e1pp;
38280Sstevel@tonic-gate }
38290Sstevel@tonic-gate 
38300Sstevel@tonic-gate /*
38310Sstevel@tonic-gate  * return the next page in the page list
38320Sstevel@tonic-gate  */
38330Sstevel@tonic-gate page_t *
38340Sstevel@tonic-gate page_list_next(page_t *pp)
38350Sstevel@tonic-gate {
38360Sstevel@tonic-gate 	return (pp->p_next);
38370Sstevel@tonic-gate }
38380Sstevel@tonic-gate 
38390Sstevel@tonic-gate 
38400Sstevel@tonic-gate /*
38410Sstevel@tonic-gate  * Add the page to the front of the linked list of pages
38420Sstevel@tonic-gate  * using p_vpnext/p_vpprev pointers for the list.
38430Sstevel@tonic-gate  *
38440Sstevel@tonic-gate  * The caller is responsible for protecting the lists.
38450Sstevel@tonic-gate  */
38460Sstevel@tonic-gate void
38470Sstevel@tonic-gate page_vpadd(page_t **ppp, page_t *pp)
38480Sstevel@tonic-gate {
38490Sstevel@tonic-gate 	if (*ppp == NULL) {
38500Sstevel@tonic-gate 		pp->p_vpnext = pp->p_vpprev = pp;
38510Sstevel@tonic-gate 	} else {
38520Sstevel@tonic-gate 		pp->p_vpnext = *ppp;
38530Sstevel@tonic-gate 		pp->p_vpprev = (*ppp)->p_vpprev;
38540Sstevel@tonic-gate 		(*ppp)->p_vpprev = pp;
38550Sstevel@tonic-gate 		pp->p_vpprev->p_vpnext = pp;
38560Sstevel@tonic-gate 	}
38570Sstevel@tonic-gate 	*ppp = pp;
38580Sstevel@tonic-gate }
38590Sstevel@tonic-gate 
38600Sstevel@tonic-gate /*
38610Sstevel@tonic-gate  * Remove this page from the linked list of pages
38620Sstevel@tonic-gate  * using p_vpnext/p_vpprev pointers for the list.
38630Sstevel@tonic-gate  *
38640Sstevel@tonic-gate  * The caller is responsible for protecting the lists.
38650Sstevel@tonic-gate  */
38660Sstevel@tonic-gate void
38670Sstevel@tonic-gate page_vpsub(page_t **ppp, page_t *pp)
38680Sstevel@tonic-gate {
38690Sstevel@tonic-gate 	if (*ppp == NULL || pp == NULL) {
38700Sstevel@tonic-gate 		panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
38710Sstevel@tonic-gate 		    (void *)pp, (void *)(*ppp));
38720Sstevel@tonic-gate 		/*NOTREACHED*/
38730Sstevel@tonic-gate 	}
38740Sstevel@tonic-gate 
38750Sstevel@tonic-gate 	if (*ppp == pp)
38760Sstevel@tonic-gate 		*ppp = pp->p_vpnext;		/* go to next page */
38770Sstevel@tonic-gate 
38780Sstevel@tonic-gate 	if (*ppp == pp)
38790Sstevel@tonic-gate 		*ppp = NULL;			/* page list is gone */
38800Sstevel@tonic-gate 	else {
38810Sstevel@tonic-gate 		pp->p_vpprev->p_vpnext = pp->p_vpnext;
38820Sstevel@tonic-gate 		pp->p_vpnext->p_vpprev = pp->p_vpprev;
38830Sstevel@tonic-gate 	}
38840Sstevel@tonic-gate 	pp->p_vpprev = pp->p_vpnext = pp;	/* make pp a list of one */
38850Sstevel@tonic-gate }
38860Sstevel@tonic-gate 
38870Sstevel@tonic-gate /*
38880Sstevel@tonic-gate  * Lock a physical page into memory "long term".  Used to support "lock
38890Sstevel@tonic-gate  * in memory" functions.  Accepts the page to be locked, and a cow variable
38900Sstevel@tonic-gate  * to indicate whether a the lock will travel to the new page during
38910Sstevel@tonic-gate  * a potential copy-on-write.
38920Sstevel@tonic-gate  */
38930Sstevel@tonic-gate int
38940Sstevel@tonic-gate page_pp_lock(
38950Sstevel@tonic-gate 	page_t *pp,			/* page to be locked */
38960Sstevel@tonic-gate 	int cow,			/* cow lock */
38970Sstevel@tonic-gate 	int kernel)			/* must succeed -- ignore checking */
38980Sstevel@tonic-gate {
38990Sstevel@tonic-gate 	int r = 0;			/* result -- assume failure */
39000Sstevel@tonic-gate 
39010Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
39020Sstevel@tonic-gate 
39030Sstevel@tonic-gate 	page_struct_lock(pp);
39040Sstevel@tonic-gate 	/*
39050Sstevel@tonic-gate 	 * Acquire the "freemem_lock" for availrmem.
39060Sstevel@tonic-gate 	 */
39070Sstevel@tonic-gate 	if (cow) {
39080Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
39090Sstevel@tonic-gate 		if ((availrmem > pages_pp_maximum) &&
39100Sstevel@tonic-gate 		    (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
39110Sstevel@tonic-gate 			availrmem--;
39120Sstevel@tonic-gate 			pages_locked++;
39130Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
39140Sstevel@tonic-gate 			r = 1;
39150Sstevel@tonic-gate 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
39160Sstevel@tonic-gate 				cmn_err(CE_WARN,
39170Sstevel@tonic-gate 				    "COW lock limit reached on pfn 0x%lx",
39180Sstevel@tonic-gate 				    page_pptonum(pp));
39190Sstevel@tonic-gate 			}
39200Sstevel@tonic-gate 		} else
39210Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
39220Sstevel@tonic-gate 	} else {
39230Sstevel@tonic-gate 		if (pp->p_lckcnt) {
39240Sstevel@tonic-gate 			if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
39250Sstevel@tonic-gate 				r = 1;
39260Sstevel@tonic-gate 				if (++pp->p_lckcnt ==
39270Sstevel@tonic-gate 				    (ushort_t)PAGE_LOCK_MAXIMUM) {
39280Sstevel@tonic-gate 					cmn_err(CE_WARN, "Page lock limit "
39290Sstevel@tonic-gate 					    "reached on pfn 0x%lx",
39300Sstevel@tonic-gate 					    page_pptonum(pp));
39310Sstevel@tonic-gate 				}
39320Sstevel@tonic-gate 			}
39330Sstevel@tonic-gate 		} else {
39340Sstevel@tonic-gate 			if (kernel) {
39350Sstevel@tonic-gate 				/* availrmem accounting done by caller */
39360Sstevel@tonic-gate 				++pp->p_lckcnt;
39370Sstevel@tonic-gate 				r = 1;
39380Sstevel@tonic-gate 			} else {
39390Sstevel@tonic-gate 				mutex_enter(&freemem_lock);
39400Sstevel@tonic-gate 				if (availrmem > pages_pp_maximum) {
39410Sstevel@tonic-gate 					availrmem--;
39420Sstevel@tonic-gate 					pages_locked++;
39430Sstevel@tonic-gate 					++pp->p_lckcnt;
39440Sstevel@tonic-gate 					r = 1;
39450Sstevel@tonic-gate 				}
39460Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
39470Sstevel@tonic-gate 			}
39480Sstevel@tonic-gate 		}
39490Sstevel@tonic-gate 	}
39500Sstevel@tonic-gate 	page_struct_unlock(pp);
39510Sstevel@tonic-gate 	return (r);
39520Sstevel@tonic-gate }
39530Sstevel@tonic-gate 
39540Sstevel@tonic-gate /*
39550Sstevel@tonic-gate  * Decommit a lock on a physical page frame.  Account for cow locks if
39560Sstevel@tonic-gate  * appropriate.
39570Sstevel@tonic-gate  */
39580Sstevel@tonic-gate void
39590Sstevel@tonic-gate page_pp_unlock(
39600Sstevel@tonic-gate 	page_t *pp,			/* page to be unlocked */
39610Sstevel@tonic-gate 	int cow,			/* expect cow lock */
39620Sstevel@tonic-gate 	int kernel)			/* this was a kernel lock */
39630Sstevel@tonic-gate {
39640Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
39650Sstevel@tonic-gate 
39660Sstevel@tonic-gate 	page_struct_lock(pp);
39670Sstevel@tonic-gate 	/*
39680Sstevel@tonic-gate 	 * Acquire the "freemem_lock" for availrmem.
39690Sstevel@tonic-gate 	 * If cowcnt or lcknt is already 0 do nothing; i.e., we
39700Sstevel@tonic-gate 	 * could be called to unlock even if nothing is locked. This could
39710Sstevel@tonic-gate 	 * happen if locked file pages were truncated (removing the lock)
39720Sstevel@tonic-gate 	 * and the file was grown again and new pages faulted in; the new
39730Sstevel@tonic-gate 	 * pages are unlocked but the segment still thinks they're locked.
39740Sstevel@tonic-gate 	 */
39750Sstevel@tonic-gate 	if (cow) {
39760Sstevel@tonic-gate 		if (pp->p_cowcnt) {
39770Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
39780Sstevel@tonic-gate 			pp->p_cowcnt--;
39790Sstevel@tonic-gate 			availrmem++;
39800Sstevel@tonic-gate 			pages_locked--;
39810Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
39820Sstevel@tonic-gate 		}
39830Sstevel@tonic-gate 	} else {
39840Sstevel@tonic-gate 		if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
39850Sstevel@tonic-gate 			if (!kernel) {
39860Sstevel@tonic-gate 				mutex_enter(&freemem_lock);
39870Sstevel@tonic-gate 				availrmem++;
39880Sstevel@tonic-gate 				pages_locked--;
39890Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
39900Sstevel@tonic-gate 			}
39910Sstevel@tonic-gate 		}
39920Sstevel@tonic-gate 	}
39930Sstevel@tonic-gate 	page_struct_unlock(pp);
39940Sstevel@tonic-gate }
39950Sstevel@tonic-gate 
39960Sstevel@tonic-gate /*
39970Sstevel@tonic-gate  * This routine reserves availrmem for npages;
39980Sstevel@tonic-gate  * 	flags: KM_NOSLEEP or KM_SLEEP
39990Sstevel@tonic-gate  * 	returns 1 on success or 0 on failure
40000Sstevel@tonic-gate  */
40010Sstevel@tonic-gate int
40020Sstevel@tonic-gate page_resv(pgcnt_t npages, uint_t flags)
40030Sstevel@tonic-gate {
40040Sstevel@tonic-gate 	mutex_enter(&freemem_lock);
40050Sstevel@tonic-gate 	while (availrmem < tune.t_minarmem + npages) {
40060Sstevel@tonic-gate 		if (flags & KM_NOSLEEP) {
40070Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
40080Sstevel@tonic-gate 			return (0);
40090Sstevel@tonic-gate 		}
40100Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
40110Sstevel@tonic-gate 		page_needfree(npages);
40120Sstevel@tonic-gate 		kmem_reap();
40130Sstevel@tonic-gate 		delay(hz >> 2);
40140Sstevel@tonic-gate 		page_needfree(-(spgcnt_t)npages);
40150Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
40160Sstevel@tonic-gate 	}
40170Sstevel@tonic-gate 	availrmem -= npages;
40180Sstevel@tonic-gate 	mutex_exit(&freemem_lock);
40190Sstevel@tonic-gate 	return (1);
40200Sstevel@tonic-gate }
40210Sstevel@tonic-gate 
40220Sstevel@tonic-gate /*
40230Sstevel@tonic-gate  * This routine unreserves availrmem for npages;
40240Sstevel@tonic-gate  */
40250Sstevel@tonic-gate void
40260Sstevel@tonic-gate page_unresv(pgcnt_t npages)
40270Sstevel@tonic-gate {
40280Sstevel@tonic-gate 	mutex_enter(&freemem_lock);
40290Sstevel@tonic-gate 	availrmem += npages;
40300Sstevel@tonic-gate 	mutex_exit(&freemem_lock);
40310Sstevel@tonic-gate }
40320Sstevel@tonic-gate 
40330Sstevel@tonic-gate /*
40340Sstevel@tonic-gate  * See Statement at the beginning of segvn_lockop() regarding
40350Sstevel@tonic-gate  * the way we handle cowcnts and lckcnts.
40360Sstevel@tonic-gate  *
40370Sstevel@tonic-gate  * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
40380Sstevel@tonic-gate  * that breaks COW has PROT_WRITE.
40390Sstevel@tonic-gate  *
40400Sstevel@tonic-gate  * Note that, we may also break COW in case we are softlocking
40410Sstevel@tonic-gate  * on read access during physio;
40420Sstevel@tonic-gate  * in this softlock case, the vpage may not have PROT_WRITE.
40430Sstevel@tonic-gate  * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
40440Sstevel@tonic-gate  * if the vpage doesn't have PROT_WRITE.
40450Sstevel@tonic-gate  *
40460Sstevel@tonic-gate  * This routine is never called if we are stealing a page
40470Sstevel@tonic-gate  * in anon_private.
40480Sstevel@tonic-gate  *
40490Sstevel@tonic-gate  * The caller subtracted from availrmem for read only mapping.
40500Sstevel@tonic-gate  * if lckcnt is 1 increment availrmem.
40510Sstevel@tonic-gate  */
40520Sstevel@tonic-gate void
40530Sstevel@tonic-gate page_pp_useclaim(
40540Sstevel@tonic-gate 	page_t *opp,		/* original page frame losing lock */
40550Sstevel@tonic-gate 	page_t *npp,		/* new page frame gaining lock */
40560Sstevel@tonic-gate 	uint_t	write_perm) 	/* set if vpage has PROT_WRITE */
40570Sstevel@tonic-gate {
40580Sstevel@tonic-gate 	int payback = 0;
40590Sstevel@tonic-gate 
40600Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(opp));
40610Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(npp));
40620Sstevel@tonic-gate 
40630Sstevel@tonic-gate 	page_struct_lock(opp);
40640Sstevel@tonic-gate 
40650Sstevel@tonic-gate 	ASSERT(npp->p_cowcnt == 0);
40660Sstevel@tonic-gate 	ASSERT(npp->p_lckcnt == 0);
40670Sstevel@tonic-gate 
40680Sstevel@tonic-gate 	/* Don't use claim if nothing is locked (see page_pp_unlock above) */
40690Sstevel@tonic-gate 	if ((write_perm && opp->p_cowcnt != 0) ||
40700Sstevel@tonic-gate 	    (!write_perm && opp->p_lckcnt != 0)) {
40710Sstevel@tonic-gate 
40720Sstevel@tonic-gate 		if (write_perm) {
40730Sstevel@tonic-gate 			npp->p_cowcnt++;
40740Sstevel@tonic-gate 			ASSERT(opp->p_cowcnt != 0);
40750Sstevel@tonic-gate 			opp->p_cowcnt--;
40760Sstevel@tonic-gate 		} else {
40770Sstevel@tonic-gate 
40780Sstevel@tonic-gate 			ASSERT(opp->p_lckcnt != 0);
40790Sstevel@tonic-gate 
40800Sstevel@tonic-gate 			/*
40810Sstevel@tonic-gate 			 * We didn't need availrmem decremented if p_lckcnt on
40820Sstevel@tonic-gate 			 * original page is 1. Here, we are unlocking
40830Sstevel@tonic-gate 			 * read-only copy belonging to original page and
40840Sstevel@tonic-gate 			 * are locking a copy belonging to new page.
40850Sstevel@tonic-gate 			 */
40860Sstevel@tonic-gate 			if (opp->p_lckcnt == 1)
40870Sstevel@tonic-gate 				payback = 1;
40880Sstevel@tonic-gate 
40890Sstevel@tonic-gate 			npp->p_lckcnt++;
40900Sstevel@tonic-gate 			opp->p_lckcnt--;
40910Sstevel@tonic-gate 		}
40920Sstevel@tonic-gate 	}
40930Sstevel@tonic-gate 	if (payback) {
40940Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
40950Sstevel@tonic-gate 		availrmem++;
40960Sstevel@tonic-gate 		pages_useclaim--;
40970Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
40980Sstevel@tonic-gate 	}
40990Sstevel@tonic-gate 	page_struct_unlock(opp);
41000Sstevel@tonic-gate }
41010Sstevel@tonic-gate 
41020Sstevel@tonic-gate /*
41030Sstevel@tonic-gate  * Simple claim adjust functions -- used to support changes in
41040Sstevel@tonic-gate  * claims due to changes in access permissions.  Used by segvn_setprot().
41050Sstevel@tonic-gate  */
41060Sstevel@tonic-gate int
41070Sstevel@tonic-gate page_addclaim(page_t *pp)
41080Sstevel@tonic-gate {
41090Sstevel@tonic-gate 	int r = 0;			/* result */
41100Sstevel@tonic-gate 
41110Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
41120Sstevel@tonic-gate 
41130Sstevel@tonic-gate 	page_struct_lock(pp);
41140Sstevel@tonic-gate 	ASSERT(pp->p_lckcnt != 0);
41150Sstevel@tonic-gate 
41160Sstevel@tonic-gate 	if (pp->p_lckcnt == 1) {
41170Sstevel@tonic-gate 		if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
41180Sstevel@tonic-gate 			--pp->p_lckcnt;
41190Sstevel@tonic-gate 			r = 1;
41200Sstevel@tonic-gate 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41210Sstevel@tonic-gate 				cmn_err(CE_WARN,
41220Sstevel@tonic-gate 				    "COW lock limit reached on pfn 0x%lx",
41230Sstevel@tonic-gate 				    page_pptonum(pp));
41240Sstevel@tonic-gate 			}
41250Sstevel@tonic-gate 		}
41260Sstevel@tonic-gate 	} else {
41270Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
41280Sstevel@tonic-gate 		if ((availrmem > pages_pp_maximum) &&
41290Sstevel@tonic-gate 		    (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
41300Sstevel@tonic-gate 			--availrmem;
41310Sstevel@tonic-gate 			++pages_claimed;
41320Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
41330Sstevel@tonic-gate 			--pp->p_lckcnt;
41340Sstevel@tonic-gate 			r = 1;
41350Sstevel@tonic-gate 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41360Sstevel@tonic-gate 				cmn_err(CE_WARN,
41370Sstevel@tonic-gate 				    "COW lock limit reached on pfn 0x%lx",
41380Sstevel@tonic-gate 				    page_pptonum(pp));
41390Sstevel@tonic-gate 			}
41400Sstevel@tonic-gate 		} else
41410Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
41420Sstevel@tonic-gate 	}
41430Sstevel@tonic-gate 	page_struct_unlock(pp);
41440Sstevel@tonic-gate 	return (r);
41450Sstevel@tonic-gate }
41460Sstevel@tonic-gate 
41470Sstevel@tonic-gate int
41480Sstevel@tonic-gate page_subclaim(page_t *pp)
41490Sstevel@tonic-gate {
41500Sstevel@tonic-gate 	int r = 0;
41510Sstevel@tonic-gate 
41520Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
41530Sstevel@tonic-gate 
41540Sstevel@tonic-gate 	page_struct_lock(pp);
41550Sstevel@tonic-gate 	ASSERT(pp->p_cowcnt != 0);
41560Sstevel@tonic-gate 
41570Sstevel@tonic-gate 	if (pp->p_lckcnt) {
41580Sstevel@tonic-gate 		if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
41590Sstevel@tonic-gate 			r = 1;
41600Sstevel@tonic-gate 			/*
41610Sstevel@tonic-gate 			 * for availrmem
41620Sstevel@tonic-gate 			 */
41630Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
41640Sstevel@tonic-gate 			availrmem++;
41650Sstevel@tonic-gate 			pages_claimed--;
41660Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
41670Sstevel@tonic-gate 
41680Sstevel@tonic-gate 			pp->p_cowcnt--;
41690Sstevel@tonic-gate 
41700Sstevel@tonic-gate 			if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41710Sstevel@tonic-gate 				cmn_err(CE_WARN,
41720Sstevel@tonic-gate 				    "Page lock limit reached on pfn 0x%lx",
41730Sstevel@tonic-gate 				    page_pptonum(pp));
41740Sstevel@tonic-gate 			}
41750Sstevel@tonic-gate 		}
41760Sstevel@tonic-gate 	} else {
41770Sstevel@tonic-gate 		r = 1;
41780Sstevel@tonic-gate 		pp->p_cowcnt--;
41790Sstevel@tonic-gate 		pp->p_lckcnt++;
41800Sstevel@tonic-gate 	}
41810Sstevel@tonic-gate 	page_struct_unlock(pp);
41820Sstevel@tonic-gate 	return (r);
41830Sstevel@tonic-gate }
41840Sstevel@tonic-gate 
41850Sstevel@tonic-gate int
41860Sstevel@tonic-gate page_addclaim_pages(page_t  **ppa)
41870Sstevel@tonic-gate {
41880Sstevel@tonic-gate 
41890Sstevel@tonic-gate 	pgcnt_t	lckpgs = 0, pg_idx;
41900Sstevel@tonic-gate 
41910Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_addclaim_pages);
41920Sstevel@tonic-gate 
41930Sstevel@tonic-gate 	mutex_enter(&page_llock);
41940Sstevel@tonic-gate 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
41950Sstevel@tonic-gate 
41960Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED(ppa[pg_idx]));
41970Sstevel@tonic-gate 		ASSERT(ppa[pg_idx]->p_lckcnt != 0);
41980Sstevel@tonic-gate 		if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41990Sstevel@tonic-gate 			mutex_exit(&page_llock);
42000Sstevel@tonic-gate 			return (0);
42010Sstevel@tonic-gate 		}
42020Sstevel@tonic-gate 		if (ppa[pg_idx]->p_lckcnt > 1)
42030Sstevel@tonic-gate 			lckpgs++;
42040Sstevel@tonic-gate 	}
42050Sstevel@tonic-gate 
42060Sstevel@tonic-gate 	if (lckpgs != 0) {
42070Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
42080Sstevel@tonic-gate 		if (availrmem >= pages_pp_maximum + lckpgs) {
42090Sstevel@tonic-gate 			availrmem -= lckpgs;
42100Sstevel@tonic-gate 			pages_claimed += lckpgs;
42110Sstevel@tonic-gate 		} else {
42120Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
42130Sstevel@tonic-gate 			mutex_exit(&page_llock);
42140Sstevel@tonic-gate 			return (0);
42150Sstevel@tonic-gate 		}
42160Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
42170Sstevel@tonic-gate 	}
42180Sstevel@tonic-gate 
42190Sstevel@tonic-gate 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42200Sstevel@tonic-gate 		ppa[pg_idx]->p_lckcnt--;
42210Sstevel@tonic-gate 		ppa[pg_idx]->p_cowcnt++;
42220Sstevel@tonic-gate 	}
42230Sstevel@tonic-gate 	mutex_exit(&page_llock);
42240Sstevel@tonic-gate 	return (1);
42250Sstevel@tonic-gate }
42260Sstevel@tonic-gate 
42270Sstevel@tonic-gate int
42280Sstevel@tonic-gate page_subclaim_pages(page_t  **ppa)
42290Sstevel@tonic-gate {
42300Sstevel@tonic-gate 	pgcnt_t	ulckpgs = 0, pg_idx;
42310Sstevel@tonic-gate 
42320Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_subclaim_pages);
42330Sstevel@tonic-gate 
42340Sstevel@tonic-gate 	mutex_enter(&page_llock);
42350Sstevel@tonic-gate 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42360Sstevel@tonic-gate 
42370Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED(ppa[pg_idx]));
42380Sstevel@tonic-gate 		ASSERT(ppa[pg_idx]->p_cowcnt != 0);
42390Sstevel@tonic-gate 		if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
42400Sstevel@tonic-gate 			mutex_exit(&page_llock);
42410Sstevel@tonic-gate 			return (0);
42420Sstevel@tonic-gate 		}
42430Sstevel@tonic-gate 		if (ppa[pg_idx]->p_lckcnt != 0)
42440Sstevel@tonic-gate 			ulckpgs++;
42450Sstevel@tonic-gate 	}
42460Sstevel@tonic-gate 
42470Sstevel@tonic-gate 	if (ulckpgs != 0) {
42480Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
42490Sstevel@tonic-gate 		availrmem += ulckpgs;
42500Sstevel@tonic-gate 		pages_claimed -= ulckpgs;
42510Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
42520Sstevel@tonic-gate 	}
42530Sstevel@tonic-gate 
42540Sstevel@tonic-gate 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42550Sstevel@tonic-gate 		ppa[pg_idx]->p_cowcnt--;
42560Sstevel@tonic-gate 		ppa[pg_idx]->p_lckcnt++;
42570Sstevel@tonic-gate 
42580Sstevel@tonic-gate 	}
42590Sstevel@tonic-gate 	mutex_exit(&page_llock);
42600Sstevel@tonic-gate 	return (1);
42610Sstevel@tonic-gate }
42620Sstevel@tonic-gate 
42630Sstevel@tonic-gate page_t *
42640Sstevel@tonic-gate page_numtopp(pfn_t pfnum, se_t se)
42650Sstevel@tonic-gate {
42660Sstevel@tonic-gate 	page_t *pp;
42670Sstevel@tonic-gate 
42680Sstevel@tonic-gate retry:
42690Sstevel@tonic-gate 	pp = page_numtopp_nolock(pfnum);
42700Sstevel@tonic-gate 	if (pp == NULL) {
42710Sstevel@tonic-gate 		return ((page_t *)NULL);
42720Sstevel@tonic-gate 	}
42730Sstevel@tonic-gate 
42740Sstevel@tonic-gate 	/*
42750Sstevel@tonic-gate 	 * Acquire the appropriate lock on the page.
42760Sstevel@tonic-gate 	 */
42770Sstevel@tonic-gate 	while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) {
42780Sstevel@tonic-gate 		if (page_pptonum(pp) != pfnum)
42790Sstevel@tonic-gate 			goto retry;
42800Sstevel@tonic-gate 		continue;
42810Sstevel@tonic-gate 	}
42820Sstevel@tonic-gate 
42830Sstevel@tonic-gate 	if (page_pptonum(pp) != pfnum) {
42840Sstevel@tonic-gate 		page_unlock(pp);
42850Sstevel@tonic-gate 		goto retry;
42860Sstevel@tonic-gate 	}
42870Sstevel@tonic-gate 
42880Sstevel@tonic-gate 	return (pp);
42890Sstevel@tonic-gate }
42900Sstevel@tonic-gate 
42910Sstevel@tonic-gate page_t *
42920Sstevel@tonic-gate page_numtopp_noreclaim(pfn_t pfnum, se_t se)
42930Sstevel@tonic-gate {
42940Sstevel@tonic-gate 	page_t *pp;
42950Sstevel@tonic-gate 
42960Sstevel@tonic-gate retry:
42970Sstevel@tonic-gate 	pp = page_numtopp_nolock(pfnum);
42980Sstevel@tonic-gate 	if (pp == NULL) {
42990Sstevel@tonic-gate 		return ((page_t *)NULL);
43000Sstevel@tonic-gate 	}
43010Sstevel@tonic-gate 
43020Sstevel@tonic-gate 	/*
43030Sstevel@tonic-gate 	 * Acquire the appropriate lock on the page.
43040Sstevel@tonic-gate 	 */
43050Sstevel@tonic-gate 	while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) {
43060Sstevel@tonic-gate 		if (page_pptonum(pp) != pfnum)
43070Sstevel@tonic-gate 			goto retry;
43080Sstevel@tonic-gate 		continue;
43090Sstevel@tonic-gate 	}
43100Sstevel@tonic-gate 
43110Sstevel@tonic-gate 	if (page_pptonum(pp) != pfnum) {
43120Sstevel@tonic-gate 		page_unlock(pp);
43130Sstevel@tonic-gate 		goto retry;
43140Sstevel@tonic-gate 	}
43150Sstevel@tonic-gate 
43160Sstevel@tonic-gate 	return (pp);
43170Sstevel@tonic-gate }
43180Sstevel@tonic-gate 
43190Sstevel@tonic-gate /*
43200Sstevel@tonic-gate  * This routine is like page_numtopp, but will only return page structs
43210Sstevel@tonic-gate  * for pages which are ok for loading into hardware using the page struct.
43220Sstevel@tonic-gate  */
43230Sstevel@tonic-gate page_t *
43240Sstevel@tonic-gate page_numtopp_nowait(pfn_t pfnum, se_t se)
43250Sstevel@tonic-gate {
43260Sstevel@tonic-gate 	page_t *pp;
43270Sstevel@tonic-gate 
43280Sstevel@tonic-gate retry:
43290Sstevel@tonic-gate 	pp = page_numtopp_nolock(pfnum);
43300Sstevel@tonic-gate 	if (pp == NULL) {
43310Sstevel@tonic-gate 		return ((page_t *)NULL);
43320Sstevel@tonic-gate 	}
43330Sstevel@tonic-gate 
43340Sstevel@tonic-gate 	/*
43350Sstevel@tonic-gate 	 * Try to acquire the appropriate lock on the page.
43360Sstevel@tonic-gate 	 */
43370Sstevel@tonic-gate 	if (PP_ISFREE(pp))
43380Sstevel@tonic-gate 		pp = NULL;
43390Sstevel@tonic-gate 	else {
43400Sstevel@tonic-gate 		if (!page_trylock(pp, se))
43410Sstevel@tonic-gate 			pp = NULL;
43420Sstevel@tonic-gate 		else {
43430Sstevel@tonic-gate 			if (page_pptonum(pp) != pfnum) {
43440Sstevel@tonic-gate 				page_unlock(pp);
43450Sstevel@tonic-gate 				goto retry;
43460Sstevel@tonic-gate 			}
43470Sstevel@tonic-gate 			if (PP_ISFREE(pp)) {
43480Sstevel@tonic-gate 				page_unlock(pp);
43490Sstevel@tonic-gate 				pp = NULL;
43500Sstevel@tonic-gate 			}
43510Sstevel@tonic-gate 		}
43520Sstevel@tonic-gate 	}
43530Sstevel@tonic-gate 	return (pp);
43540Sstevel@tonic-gate }
43550Sstevel@tonic-gate 
43560Sstevel@tonic-gate /*
43570Sstevel@tonic-gate  * Returns a count of dirty pages that are in the process
43580Sstevel@tonic-gate  * of being written out.  If 'cleanit' is set, try to push the page.
43590Sstevel@tonic-gate  */
43600Sstevel@tonic-gate pgcnt_t
43610Sstevel@tonic-gate page_busy(int cleanit)
43620Sstevel@tonic-gate {
43630Sstevel@tonic-gate 	page_t *page0 = page_first();
43640Sstevel@tonic-gate 	page_t *pp = page0;
43650Sstevel@tonic-gate 	pgcnt_t nppbusy = 0;
43660Sstevel@tonic-gate 	u_offset_t off;
43670Sstevel@tonic-gate 
43680Sstevel@tonic-gate 	do {
43690Sstevel@tonic-gate 		vnode_t *vp = pp->p_vnode;
43700Sstevel@tonic-gate 
43710Sstevel@tonic-gate 		/*
43720Sstevel@tonic-gate 		 * A page is a candidate for syncing if it is:
43730Sstevel@tonic-gate 		 *
43740Sstevel@tonic-gate 		 * (a)	On neither the freelist nor the cachelist
43750Sstevel@tonic-gate 		 * (b)	Hashed onto a vnode
43760Sstevel@tonic-gate 		 * (c)	Not a kernel page
43770Sstevel@tonic-gate 		 * (d)	Dirty
43780Sstevel@tonic-gate 		 * (e)	Not part of a swapfile
43790Sstevel@tonic-gate 		 * (f)	a page which belongs to a real vnode; eg has a non-null
43800Sstevel@tonic-gate 		 *	v_vfsp pointer.
43810Sstevel@tonic-gate 		 * (g)	Backed by a filesystem which doesn't have a
43820Sstevel@tonic-gate 		 *	stubbed-out sync operation
43830Sstevel@tonic-gate 		 */
43840Sstevel@tonic-gate 		if (!PP_ISFREE(pp) && vp != NULL && vp != &kvp &&
43850Sstevel@tonic-gate 		    hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
43860Sstevel@tonic-gate 		    vfs_can_sync(vp->v_vfsp)) {
43870Sstevel@tonic-gate 			nppbusy++;
43880Sstevel@tonic-gate 			vfs_syncprogress();
43890Sstevel@tonic-gate 
43900Sstevel@tonic-gate 			if (!cleanit)
43910Sstevel@tonic-gate 				continue;
43920Sstevel@tonic-gate 			if (!page_trylock(pp, SE_EXCL))
43930Sstevel@tonic-gate 				continue;
43940Sstevel@tonic-gate 
43950Sstevel@tonic-gate 			if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
43960Sstevel@tonic-gate 			    pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
43970Sstevel@tonic-gate 			    !(hat_pagesync(pp,
43980Sstevel@tonic-gate 			    HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) {
43990Sstevel@tonic-gate 				page_unlock(pp);
44000Sstevel@tonic-gate 				continue;
44010Sstevel@tonic-gate 			}
44020Sstevel@tonic-gate 			off = pp->p_offset;
44030Sstevel@tonic-gate 			VN_HOLD(vp);
44040Sstevel@tonic-gate 			page_unlock(pp);
44050Sstevel@tonic-gate 			(void) VOP_PUTPAGE(vp, off, PAGESIZE,
44060Sstevel@tonic-gate 			    B_ASYNC | B_FREE, kcred);
44070Sstevel@tonic-gate 			VN_RELE(vp);
44080Sstevel@tonic-gate 		}
44090Sstevel@tonic-gate 	} while ((pp = page_next(pp)) != page0);
44100Sstevel@tonic-gate 
44110Sstevel@tonic-gate 	return (nppbusy);
44120Sstevel@tonic-gate }
44130Sstevel@tonic-gate 
44140Sstevel@tonic-gate void page_invalidate_pages(void);
44150Sstevel@tonic-gate 
44160Sstevel@tonic-gate /*
44170Sstevel@tonic-gate  * callback handler to vm sub-system
44180Sstevel@tonic-gate  *
44190Sstevel@tonic-gate  * callers make sure no recursive entries to this func.
44200Sstevel@tonic-gate  */
44210Sstevel@tonic-gate /*ARGSUSED*/
44220Sstevel@tonic-gate boolean_t
44230Sstevel@tonic-gate callb_vm_cpr(void *arg, int code)
44240Sstevel@tonic-gate {
44250Sstevel@tonic-gate 	if (code == CB_CODE_CPR_CHKPT)
44260Sstevel@tonic-gate 		page_invalidate_pages();
44270Sstevel@tonic-gate 	return (B_TRUE);
44280Sstevel@tonic-gate }
44290Sstevel@tonic-gate 
44300Sstevel@tonic-gate /*
44310Sstevel@tonic-gate  * Invalidate all pages of the system.
44320Sstevel@tonic-gate  * It shouldn't be called until all user page activities are all stopped.
44330Sstevel@tonic-gate  */
44340Sstevel@tonic-gate void
44350Sstevel@tonic-gate page_invalidate_pages()
44360Sstevel@tonic-gate {
44370Sstevel@tonic-gate 	page_t *pp;
44380Sstevel@tonic-gate 	page_t *page0;
44390Sstevel@tonic-gate 	pgcnt_t nbusypages;
44400Sstevel@tonic-gate 	int retry = 0;
44410Sstevel@tonic-gate 	const int MAXRETRIES = 4;
44420Sstevel@tonic-gate #if defined(__sparc)
44430Sstevel@tonic-gate 	extern struct vnode prom_ppages;
44440Sstevel@tonic-gate #endif /* __sparc */
44450Sstevel@tonic-gate 
44460Sstevel@tonic-gate top:
44470Sstevel@tonic-gate 	/*
44480Sstevel@tonic-gate 	 * Flush dirty pages and destory the clean ones.
44490Sstevel@tonic-gate 	 */
44500Sstevel@tonic-gate 	nbusypages = 0;
44510Sstevel@tonic-gate 
44520Sstevel@tonic-gate 	pp = page0 = page_first();
44530Sstevel@tonic-gate 	do {
44540Sstevel@tonic-gate 		struct vnode	*vp;
44550Sstevel@tonic-gate 		u_offset_t	offset;
44560Sstevel@tonic-gate 		int		mod;
44570Sstevel@tonic-gate 
44580Sstevel@tonic-gate 		/*
44590Sstevel@tonic-gate 		 * skip the page if it has no vnode or the page associated
44600Sstevel@tonic-gate 		 * with the kernel vnode or prom allocated kernel mem.
44610Sstevel@tonic-gate 		 */
44620Sstevel@tonic-gate #if defined(__sparc)
44630Sstevel@tonic-gate 		if ((vp = pp->p_vnode) == NULL || vp == &kvp ||
44640Sstevel@tonic-gate 		    vp == &prom_ppages)
44650Sstevel@tonic-gate #else /* x86 doesn't have prom or prom_ppage */
44660Sstevel@tonic-gate 		if ((vp = pp->p_vnode) == NULL || vp == &kvp)
44670Sstevel@tonic-gate #endif /* __sparc */
44680Sstevel@tonic-gate 			continue;
44690Sstevel@tonic-gate 
44700Sstevel@tonic-gate 		/*
44710Sstevel@tonic-gate 		 * skip the page which is already free invalidated.
44720Sstevel@tonic-gate 		 */
44730Sstevel@tonic-gate 		if (PP_ISFREE(pp) && PP_ISAGED(pp))
44740Sstevel@tonic-gate 			continue;
44750Sstevel@tonic-gate 
44760Sstevel@tonic-gate 		/*
44770Sstevel@tonic-gate 		 * skip pages that are already locked or can't be "exclusively"
44780Sstevel@tonic-gate 		 * locked or are already free.  After we lock the page, check
44790Sstevel@tonic-gate 		 * the free and age bits again to be sure it's not destroied
44800Sstevel@tonic-gate 		 * yet.
44810Sstevel@tonic-gate 		 * To achieve max. parallelization, we use page_trylock instead
44820Sstevel@tonic-gate 		 * of page_lock so that we don't get block on individual pages
44830Sstevel@tonic-gate 		 * while we have thousands of other pages to process.
44840Sstevel@tonic-gate 		 */
44850Sstevel@tonic-gate 		if (!page_trylock(pp, SE_EXCL)) {
44860Sstevel@tonic-gate 			nbusypages++;
44870Sstevel@tonic-gate 			continue;
44880Sstevel@tonic-gate 		} else if (PP_ISFREE(pp)) {
44890Sstevel@tonic-gate 			if (!PP_ISAGED(pp)) {
44900Sstevel@tonic-gate 				page_destroy_free(pp);
44910Sstevel@tonic-gate 			} else {
44920Sstevel@tonic-gate 				page_unlock(pp);
44930Sstevel@tonic-gate 			}
44940Sstevel@tonic-gate 			continue;
44950Sstevel@tonic-gate 		}
44960Sstevel@tonic-gate 		/*
44970Sstevel@tonic-gate 		 * Is this page involved in some I/O? shared?
44980Sstevel@tonic-gate 		 *
44990Sstevel@tonic-gate 		 * The page_struct_lock need not be acquired to
45000Sstevel@tonic-gate 		 * examine these fields since the page has an
45010Sstevel@tonic-gate 		 * "exclusive" lock.
45020Sstevel@tonic-gate 		 */
45030Sstevel@tonic-gate 		if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
45040Sstevel@tonic-gate 			page_unlock(pp);
45050Sstevel@tonic-gate 			continue;
45060Sstevel@tonic-gate 		}
45070Sstevel@tonic-gate 
45080Sstevel@tonic-gate 		if (vp->v_type == VCHR) {
45090Sstevel@tonic-gate 			panic("vp->v_type == VCHR");
45100Sstevel@tonic-gate 			/*NOTREACHED*/
45110Sstevel@tonic-gate 		}
45120Sstevel@tonic-gate 
45130Sstevel@tonic-gate 		if (!page_try_demote_pages(pp)) {
45140Sstevel@tonic-gate 			page_unlock(pp);
45150Sstevel@tonic-gate 			continue;
45160Sstevel@tonic-gate 		}
45170Sstevel@tonic-gate 
45180Sstevel@tonic-gate 		/*
45190Sstevel@tonic-gate 		 * Check the modified bit. Leave the bits alone in hardware
45200Sstevel@tonic-gate 		 * (they will be modified if we do the putpage).
45210Sstevel@tonic-gate 		 */
45220Sstevel@tonic-gate 		mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
45230Sstevel@tonic-gate 			& P_MOD);
45240Sstevel@tonic-gate 		if (mod) {
45250Sstevel@tonic-gate 			offset = pp->p_offset;
45260Sstevel@tonic-gate 			/*
45270Sstevel@tonic-gate 			 * Hold the vnode before releasing the page lock
45280Sstevel@tonic-gate 			 * to prevent it from being freed and re-used by
45290Sstevel@tonic-gate 			 * some other thread.
45300Sstevel@tonic-gate 			 */
45310Sstevel@tonic-gate 			VN_HOLD(vp);
45320Sstevel@tonic-gate 			page_unlock(pp);
45330Sstevel@tonic-gate 			/*
45340Sstevel@tonic-gate 			 * No error return is checked here. Callers such as
45350Sstevel@tonic-gate 			 * cpr deals with the dirty pages at the dump time
45360Sstevel@tonic-gate 			 * if this putpage fails.
45370Sstevel@tonic-gate 			 */
45380Sstevel@tonic-gate 			(void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL,
45390Sstevel@tonic-gate 			    kcred);
45400Sstevel@tonic-gate 			VN_RELE(vp);
45410Sstevel@tonic-gate 		} else {
45420Sstevel@tonic-gate 			page_destroy(pp, 0);
45430Sstevel@tonic-gate 		}
45440Sstevel@tonic-gate 	} while ((pp = page_next(pp)) != page0);
45450Sstevel@tonic-gate 	if (nbusypages && retry++ < MAXRETRIES) {
45460Sstevel@tonic-gate 		delay(1);
45470Sstevel@tonic-gate 		goto top;
45480Sstevel@tonic-gate 	}
45490Sstevel@tonic-gate }
45500Sstevel@tonic-gate 
45510Sstevel@tonic-gate /*
45520Sstevel@tonic-gate  * Replace the page "old" with the page "new" on the page hash and vnode lists
45530Sstevel@tonic-gate  *
45540Sstevel@tonic-gate  * the replacemnt must be done in place, ie the equivalent sequence:
45550Sstevel@tonic-gate  *
45560Sstevel@tonic-gate  *	vp = old->p_vnode;
45570Sstevel@tonic-gate  *	off = old->p_offset;
45580Sstevel@tonic-gate  *	page_do_hashout(old)
45590Sstevel@tonic-gate  *	page_do_hashin(new, vp, off)
45600Sstevel@tonic-gate  *
45610Sstevel@tonic-gate  * doesn't work, since
45620Sstevel@tonic-gate  *  1) if old is the only page on the vnode, the v_pages list has a window
45630Sstevel@tonic-gate  *     where it looks empty. This will break file system assumptions.
45640Sstevel@tonic-gate  * and
45650Sstevel@tonic-gate  *  2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list.
45660Sstevel@tonic-gate  */
45670Sstevel@tonic-gate static void
45680Sstevel@tonic-gate page_do_relocate_hash(page_t *new, page_t *old)
45690Sstevel@tonic-gate {
45700Sstevel@tonic-gate 	page_t	**hash_list;
45710Sstevel@tonic-gate 	vnode_t	*vp = old->p_vnode;
45720Sstevel@tonic-gate 	kmutex_t *sep;
45730Sstevel@tonic-gate 
45740Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(old));
45750Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(new));
45760Sstevel@tonic-gate 	ASSERT(vp != NULL);
45770Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
45780Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset))));
45790Sstevel@tonic-gate 
45800Sstevel@tonic-gate 	/*
45810Sstevel@tonic-gate 	 * First find old page on the page hash list
45820Sstevel@tonic-gate 	 */
45830Sstevel@tonic-gate 	hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)];
45840Sstevel@tonic-gate 
45850Sstevel@tonic-gate 	for (;;) {
45860Sstevel@tonic-gate 		if (*hash_list == old)
45870Sstevel@tonic-gate 			break;
45880Sstevel@tonic-gate 		if (*hash_list == NULL) {
45890Sstevel@tonic-gate 			panic("page_do_hashout");
45900Sstevel@tonic-gate 			/*NOTREACHED*/
45910Sstevel@tonic-gate 		}
45920Sstevel@tonic-gate 		hash_list = &(*hash_list)->p_hash;
45930Sstevel@tonic-gate 	}
45940Sstevel@tonic-gate 
45950Sstevel@tonic-gate 	/*
45960Sstevel@tonic-gate 	 * update new and replace old with new on the page hash list
45970Sstevel@tonic-gate 	 */
45980Sstevel@tonic-gate 	new->p_vnode = old->p_vnode;
45990Sstevel@tonic-gate 	new->p_offset = old->p_offset;
46000Sstevel@tonic-gate 	new->p_hash = old->p_hash;
46010Sstevel@tonic-gate 	*hash_list = new;
46020Sstevel@tonic-gate 
46030Sstevel@tonic-gate 	if ((new->p_vnode->v_flag & VISSWAP) != 0)
46040Sstevel@tonic-gate 		PP_SETSWAP(new);
46050Sstevel@tonic-gate 
46060Sstevel@tonic-gate 	/*
46070Sstevel@tonic-gate 	 * replace old with new on the vnode's page list
46080Sstevel@tonic-gate 	 */
46090Sstevel@tonic-gate 	if (old->p_vpnext == old) {
46100Sstevel@tonic-gate 		new->p_vpnext = new;
46110Sstevel@tonic-gate 		new->p_vpprev = new;
46120Sstevel@tonic-gate 	} else {
46130Sstevel@tonic-gate 		new->p_vpnext = old->p_vpnext;
46140Sstevel@tonic-gate 		new->p_vpprev = old->p_vpprev;
46150Sstevel@tonic-gate 		new->p_vpnext->p_vpprev = new;
46160Sstevel@tonic-gate 		new->p_vpprev->p_vpnext = new;
46170Sstevel@tonic-gate 	}
46180Sstevel@tonic-gate 	if (vp->v_pages == old)
46190Sstevel@tonic-gate 		vp->v_pages = new;
46200Sstevel@tonic-gate 
46210Sstevel@tonic-gate 	/*
46220Sstevel@tonic-gate 	 * clear out the old page
46230Sstevel@tonic-gate 	 */
46240Sstevel@tonic-gate 	old->p_hash = NULL;
46250Sstevel@tonic-gate 	old->p_vpnext = NULL;
46260Sstevel@tonic-gate 	old->p_vpprev = NULL;
46270Sstevel@tonic-gate 	old->p_vnode = NULL;
46280Sstevel@tonic-gate 	PP_CLRSWAP(old);
46290Sstevel@tonic-gate 	old->p_offset = (u_offset_t)-1;
46300Sstevel@tonic-gate 	page_clr_all_props(old);
46310Sstevel@tonic-gate 
46320Sstevel@tonic-gate 	/*
46330Sstevel@tonic-gate 	 * Wake up processes waiting for this page.  The page's
46340Sstevel@tonic-gate 	 * identity has been changed, and is probably not the
46350Sstevel@tonic-gate 	 * desired page any longer.
46360Sstevel@tonic-gate 	 */
46370Sstevel@tonic-gate 	sep = page_se_mutex(old);
46380Sstevel@tonic-gate 	mutex_enter(sep);
4639800Sstans 	old->p_selock &= ~SE_EWANTED;
46400Sstevel@tonic-gate 	if (CV_HAS_WAITERS(&old->p_cv))
46410Sstevel@tonic-gate 		cv_broadcast(&old->p_cv);
46420Sstevel@tonic-gate 	mutex_exit(sep);
46430Sstevel@tonic-gate }
46440Sstevel@tonic-gate 
46450Sstevel@tonic-gate /*
46460Sstevel@tonic-gate  * This function moves the identity of page "pp_old" to page "pp_new".
46470Sstevel@tonic-gate  * Both pages must be locked on entry.  "pp_new" is free, has no identity,
46480Sstevel@tonic-gate  * and need not be hashed out from anywhere.
46490Sstevel@tonic-gate  */
46500Sstevel@tonic-gate void
46510Sstevel@tonic-gate page_relocate_hash(page_t *pp_new, page_t *pp_old)
46520Sstevel@tonic-gate {
46530Sstevel@tonic-gate 	vnode_t *vp = pp_old->p_vnode;
46540Sstevel@tonic-gate 	u_offset_t off = pp_old->p_offset;
46550Sstevel@tonic-gate 	kmutex_t *phm, *vphm;
46560Sstevel@tonic-gate 
46570Sstevel@tonic-gate 	/*
46580Sstevel@tonic-gate 	 * Rehash two pages
46590Sstevel@tonic-gate 	 */
46600Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp_old));
46610Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp_new));
46620Sstevel@tonic-gate 	ASSERT(vp != NULL);
46630Sstevel@tonic-gate 	ASSERT(pp_new->p_vnode == NULL);
46640Sstevel@tonic-gate 
46650Sstevel@tonic-gate 	/*
46660Sstevel@tonic-gate 	 * hashout then hashin while holding the mutexes
46670Sstevel@tonic-gate 	 */
46680Sstevel@tonic-gate 	phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off));
46690Sstevel@tonic-gate 	mutex_enter(phm);
46700Sstevel@tonic-gate 	vphm = page_vnode_mutex(vp);
46710Sstevel@tonic-gate 	mutex_enter(vphm);
46720Sstevel@tonic-gate 
46730Sstevel@tonic-gate 	page_do_relocate_hash(pp_new, pp_old);
46740Sstevel@tonic-gate 
46750Sstevel@tonic-gate 	mutex_exit(vphm);
46760Sstevel@tonic-gate 	mutex_exit(phm);
46770Sstevel@tonic-gate 
46780Sstevel@tonic-gate 	/*
46790Sstevel@tonic-gate 	 * The page_struct_lock need not be acquired for lckcnt and
46800Sstevel@tonic-gate 	 * cowcnt since the page has an "exclusive" lock.
46810Sstevel@tonic-gate 	 */
46820Sstevel@tonic-gate 	ASSERT(pp_new->p_lckcnt == 0);
46830Sstevel@tonic-gate 	ASSERT(pp_new->p_cowcnt == 0);
46840Sstevel@tonic-gate 	pp_new->p_lckcnt = pp_old->p_lckcnt;
46850Sstevel@tonic-gate 	pp_new->p_cowcnt = pp_old->p_cowcnt;
46860Sstevel@tonic-gate 	pp_old->p_lckcnt = pp_old->p_cowcnt = 0;
46870Sstevel@tonic-gate 
46880Sstevel@tonic-gate 	/* The following comment preserved from page_flip(). */
46890Sstevel@tonic-gate 	/* XXX - Do we need to protect fsdata? */
46900Sstevel@tonic-gate 	pp_new->p_fsdata = pp_old->p_fsdata;
46910Sstevel@tonic-gate }
46920Sstevel@tonic-gate 
46930Sstevel@tonic-gate /*
46940Sstevel@tonic-gate  * Helper routine used to lock all remaining members of a
46950Sstevel@tonic-gate  * large page. The caller is responsible for passing in a locked
46960Sstevel@tonic-gate  * pp. If pp is a large page, then it succeeds in locking all the
46970Sstevel@tonic-gate  * remaining constituent pages or it returns with only the
46980Sstevel@tonic-gate  * original page locked.
46990Sstevel@tonic-gate  *
47000Sstevel@tonic-gate  * Returns 1 on success, 0 on failure.
47010Sstevel@tonic-gate  *
47020Sstevel@tonic-gate  * If success is returned this routine gurantees p_szc for all constituent
47030Sstevel@tonic-gate  * pages of a large page pp belongs to can't change. To achieve this we
47040Sstevel@tonic-gate  * recheck szc of pp after locking all constituent pages and retry if szc
47050Sstevel@tonic-gate  * changed (it could only decrease). Since hat_page_demote() needs an EXCL
47060Sstevel@tonic-gate  * lock on one of constituent pages it can't be running after all constituent
47070Sstevel@tonic-gate  * pages are locked.  hat_page_demote() with a lock on a constituent page
47080Sstevel@tonic-gate  * outside of this large page (i.e. pp belonged to a larger large page) is
47090Sstevel@tonic-gate  * already done with all constituent pages of pp since the root's p_szc is
47100Sstevel@tonic-gate  * changed last. Thefore no need to synchronize with hat_page_demote() that
47110Sstevel@tonic-gate  * locked a constituent page outside of pp's current large page.
47120Sstevel@tonic-gate  */
47130Sstevel@tonic-gate #ifdef DEBUG
47140Sstevel@tonic-gate uint32_t gpg_trylock_mtbf = 0;
47150Sstevel@tonic-gate #endif
47160Sstevel@tonic-gate 
47170Sstevel@tonic-gate int
47180Sstevel@tonic-gate group_page_trylock(page_t *pp, se_t se)
47190Sstevel@tonic-gate {
47200Sstevel@tonic-gate 	page_t  *tpp;
47210Sstevel@tonic-gate 	pgcnt_t	npgs, i, j;
47220Sstevel@tonic-gate 	uint_t pszc = pp->p_szc;
47230Sstevel@tonic-gate 
47240Sstevel@tonic-gate #ifdef DEBUG
47250Sstevel@tonic-gate 	if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) {
47260Sstevel@tonic-gate 		return (0);
47270Sstevel@tonic-gate 	}
47280Sstevel@tonic-gate #endif
47290Sstevel@tonic-gate 
47300Sstevel@tonic-gate 	if (pp != PP_GROUPLEADER(pp, pszc)) {
47310Sstevel@tonic-gate 		return (0);
47320Sstevel@tonic-gate 	}
47330Sstevel@tonic-gate 
47340Sstevel@tonic-gate retry:
47350Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED_SE(pp, se));
47360Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
47370Sstevel@tonic-gate 	if (pszc == 0) {
47380Sstevel@tonic-gate 		return (1);
47390Sstevel@tonic-gate 	}
47400Sstevel@tonic-gate 	npgs = page_get_pagecnt(pszc);
47410Sstevel@tonic-gate 	tpp = pp + 1;
47420Sstevel@tonic-gate 	for (i = 1; i < npgs; i++, tpp++) {
47430Sstevel@tonic-gate 		if (!page_trylock(tpp, se)) {
47440Sstevel@tonic-gate 			tpp = pp + 1;
47450Sstevel@tonic-gate 			for (j = 1; j < i; j++, tpp++) {
47460Sstevel@tonic-gate 				page_unlock(tpp);
47470Sstevel@tonic-gate 			}
47480Sstevel@tonic-gate 			return (0);
47490Sstevel@tonic-gate 		}
47500Sstevel@tonic-gate 	}
47510Sstevel@tonic-gate 	if (pp->p_szc != pszc) {
47520Sstevel@tonic-gate 		ASSERT(pp->p_szc < pszc);
47530Sstevel@tonic-gate 		ASSERT(pp->p_vnode != NULL && pp->p_vnode != &kvp &&
47540Sstevel@tonic-gate 		    !IS_SWAPFSVP(pp->p_vnode));
47550Sstevel@tonic-gate 		tpp = pp + 1;
47560Sstevel@tonic-gate 		for (i = 1; i < npgs; i++, tpp++) {
47570Sstevel@tonic-gate 			page_unlock(tpp);
47580Sstevel@tonic-gate 		}
47590Sstevel@tonic-gate 		pszc = pp->p_szc;
47600Sstevel@tonic-gate 		goto retry;
47610Sstevel@tonic-gate 	}
47620Sstevel@tonic-gate 	return (1);
47630Sstevel@tonic-gate }
47640Sstevel@tonic-gate 
47650Sstevel@tonic-gate void
47660Sstevel@tonic-gate group_page_unlock(page_t *pp)
47670Sstevel@tonic-gate {
47680Sstevel@tonic-gate 	page_t *tpp;
47690Sstevel@tonic-gate 	pgcnt_t	npgs, i;
47700Sstevel@tonic-gate 
47710Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
47720Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
47730Sstevel@tonic-gate 	ASSERT(pp == PP_PAGEROOT(pp));
47740Sstevel@tonic-gate 	npgs = page_get_pagecnt(pp->p_szc);
47750Sstevel@tonic-gate 	for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
47760Sstevel@tonic-gate 		page_unlock(tpp);
47770Sstevel@tonic-gate 	}
47780Sstevel@tonic-gate }
47790Sstevel@tonic-gate 
47800Sstevel@tonic-gate /*
47810Sstevel@tonic-gate  * returns
47820Sstevel@tonic-gate  * 0 		: on success and *nrelocp is number of relocated PAGESIZE pages
47830Sstevel@tonic-gate  * ERANGE	: this is not a base page
47840Sstevel@tonic-gate  * EBUSY	: failure to get locks on the page/pages
47850Sstevel@tonic-gate  * ENOMEM	: failure to obtain replacement pages
47860Sstevel@tonic-gate  * EAGAIN	: OBP has not yet completed its boot-time handoff to the kernel
47870Sstevel@tonic-gate  *
47880Sstevel@tonic-gate  * Return with all constituent members of target and replacement
47890Sstevel@tonic-gate  * SE_EXCL locked. It is the callers responsibility to drop the
47900Sstevel@tonic-gate  * locks.
47910Sstevel@tonic-gate  */
47920Sstevel@tonic-gate int
47930Sstevel@tonic-gate do_page_relocate(
47940Sstevel@tonic-gate 	page_t **target,
47950Sstevel@tonic-gate 	page_t **replacement,
47960Sstevel@tonic-gate 	int grouplock,
47970Sstevel@tonic-gate 	spgcnt_t *nrelocp,
47980Sstevel@tonic-gate 	lgrp_t *lgrp)
47990Sstevel@tonic-gate {
48000Sstevel@tonic-gate #ifdef DEBUG
48010Sstevel@tonic-gate 	page_t *first_repl;
48020Sstevel@tonic-gate #endif /* DEBUG */
48030Sstevel@tonic-gate 	page_t *repl;
48040Sstevel@tonic-gate 	page_t *targ;
48050Sstevel@tonic-gate 	page_t *pl = NULL;
48060Sstevel@tonic-gate 	uint_t ppattr;
48070Sstevel@tonic-gate 	pfn_t   pfn, repl_pfn;
48080Sstevel@tonic-gate 	uint_t	szc;
48090Sstevel@tonic-gate 	spgcnt_t npgs, i;
48100Sstevel@tonic-gate 	int repl_contig = 0;
48110Sstevel@tonic-gate 	uint_t flags = 0;
48120Sstevel@tonic-gate 	spgcnt_t dofree = 0;
48130Sstevel@tonic-gate 
48140Sstevel@tonic-gate 	*nrelocp = 0;
48150Sstevel@tonic-gate 
48160Sstevel@tonic-gate #if defined(__sparc)
48170Sstevel@tonic-gate 	/*
48180Sstevel@tonic-gate 	 * We need to wait till OBP has completed
48190Sstevel@tonic-gate 	 * its boot-time handoff of its resources to the kernel
48200Sstevel@tonic-gate 	 * before we allow page relocation
48210Sstevel@tonic-gate 	 */
48220Sstevel@tonic-gate 	if (page_relocate_ready == 0) {
48230Sstevel@tonic-gate 		return (EAGAIN);
48240Sstevel@tonic-gate 	}
48250Sstevel@tonic-gate #endif
48260Sstevel@tonic-gate 
48270Sstevel@tonic-gate 	/*
48280Sstevel@tonic-gate 	 * If this is not a base page,
48290Sstevel@tonic-gate 	 * just return with 0x0 pages relocated.
48300Sstevel@tonic-gate 	 */
48310Sstevel@tonic-gate 	targ = *target;
48320Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(targ));
48330Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(targ));
48340Sstevel@tonic-gate 	szc = targ->p_szc;
48350Sstevel@tonic-gate 	ASSERT(szc < mmu_page_sizes);
48360Sstevel@tonic-gate 	VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
48370Sstevel@tonic-gate 	pfn = targ->p_pagenum;
48380Sstevel@tonic-gate 	if (pfn != PFN_BASE(pfn, szc)) {
48390Sstevel@tonic-gate 		VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]);
48400Sstevel@tonic-gate 		return (ERANGE);
48410Sstevel@tonic-gate 	}
48420Sstevel@tonic-gate 
48430Sstevel@tonic-gate 	if ((repl = *replacement) != NULL && repl->p_szc >= szc) {
48440Sstevel@tonic-gate 		repl_pfn = repl->p_pagenum;
48450Sstevel@tonic-gate 		if (repl_pfn != PFN_BASE(repl_pfn, szc)) {
48460Sstevel@tonic-gate 			VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]);
48470Sstevel@tonic-gate 			return (ERANGE);
48480Sstevel@tonic-gate 		}
48490Sstevel@tonic-gate 		repl_contig = 1;
48500Sstevel@tonic-gate 	}
48510Sstevel@tonic-gate 
48520Sstevel@tonic-gate 	/*
48530Sstevel@tonic-gate 	 * We must lock all members of this large page or we cannot
48540Sstevel@tonic-gate 	 * relocate any part of it.
48550Sstevel@tonic-gate 	 */
48560Sstevel@tonic-gate 	if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) {
48570Sstevel@tonic-gate 		VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]);
48580Sstevel@tonic-gate 		return (EBUSY);
48590Sstevel@tonic-gate 	}
48600Sstevel@tonic-gate 
48610Sstevel@tonic-gate 	/*
48620Sstevel@tonic-gate 	 * reread szc it could have been decreased before
48630Sstevel@tonic-gate 	 * group_page_trylock() was done.
48640Sstevel@tonic-gate 	 */
48650Sstevel@tonic-gate 	szc = targ->p_szc;
48660Sstevel@tonic-gate 	ASSERT(szc < mmu_page_sizes);
48670Sstevel@tonic-gate 	VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
48680Sstevel@tonic-gate 	ASSERT(pfn == PFN_BASE(pfn, szc));
48690Sstevel@tonic-gate 
48700Sstevel@tonic-gate 	npgs = page_get_pagecnt(targ->p_szc);
48710Sstevel@tonic-gate 
48720Sstevel@tonic-gate 	if (repl == NULL) {
48730Sstevel@tonic-gate 		dofree = npgs;		/* Size of target page in MMU pages */
48740Sstevel@tonic-gate 		if (!page_create_wait(dofree, 0)) {
48750Sstevel@tonic-gate 			if (grouplock != 0) {
48760Sstevel@tonic-gate 				group_page_unlock(targ);
48770Sstevel@tonic-gate 			}
48780Sstevel@tonic-gate 			VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
48790Sstevel@tonic-gate 			return (ENOMEM);
48800Sstevel@tonic-gate 		}
48810Sstevel@tonic-gate 
48820Sstevel@tonic-gate 		/*
48830Sstevel@tonic-gate 		 * seg kmem pages require that the target and replacement
48840Sstevel@tonic-gate 		 * page be the same pagesize.
48850Sstevel@tonic-gate 		 */
48860Sstevel@tonic-gate 		flags = (targ->p_vnode == &kvp) ? PGR_SAMESZC : 0;
48870Sstevel@tonic-gate 		repl = page_get_replacement_page(targ, lgrp, flags);
48880Sstevel@tonic-gate 		if (repl == NULL) {
48890Sstevel@tonic-gate 			if (grouplock != 0) {
48900Sstevel@tonic-gate 				group_page_unlock(targ);
48910Sstevel@tonic-gate 			}
48920Sstevel@tonic-gate 			page_create_putback(dofree);
48930Sstevel@tonic-gate 			VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
48940Sstevel@tonic-gate 			return (ENOMEM);
48950Sstevel@tonic-gate 		}
48960Sstevel@tonic-gate 	}
48970Sstevel@tonic-gate #ifdef DEBUG
48980Sstevel@tonic-gate 	else {
48990Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED(repl));
49000Sstevel@tonic-gate 	}
49010Sstevel@tonic-gate #endif /* DEBUG */
49020Sstevel@tonic-gate 
49030Sstevel@tonic-gate #if defined(__sparc)
49040Sstevel@tonic-gate 	/*
49050Sstevel@tonic-gate 	 * Let hat_page_relocate() complete the relocation if it's kernel page
49060Sstevel@tonic-gate 	 */
49070Sstevel@tonic-gate 	if (targ->p_vnode == &kvp) {
49080Sstevel@tonic-gate 		*replacement = repl;
49090Sstevel@tonic-gate 		if (hat_page_relocate(target, replacement, nrelocp) != 0) {
49100Sstevel@tonic-gate 			if (grouplock != 0) {
49110Sstevel@tonic-gate 				group_page_unlock(targ);
49120Sstevel@tonic-gate 			}
49130Sstevel@tonic-gate 			if (dofree) {
49140Sstevel@tonic-gate 				*replacement = NULL;
49150Sstevel@tonic-gate 				page_free_replacement_page(repl);
49160Sstevel@tonic-gate 				page_create_putback(dofree);
49170Sstevel@tonic-gate 			}
49180Sstevel@tonic-gate 			VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]);
49190Sstevel@tonic-gate 			return (EAGAIN);
49200Sstevel@tonic-gate 		}
49210Sstevel@tonic-gate 		VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
49220Sstevel@tonic-gate 		return (0);
49230Sstevel@tonic-gate 	}
49240Sstevel@tonic-gate #else
49250Sstevel@tonic-gate #if defined(lint)
49260Sstevel@tonic-gate 	dofree = dofree;
49270Sstevel@tonic-gate #endif
49280Sstevel@tonic-gate #endif
49290Sstevel@tonic-gate 
49300Sstevel@tonic-gate #ifdef DEBUG
49310Sstevel@tonic-gate 	first_repl = repl;
49320Sstevel@tonic-gate #endif /* DEBUG */
49330Sstevel@tonic-gate 
49340Sstevel@tonic-gate 	for (i = 0; i < npgs; i++) {
49350Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(targ));
49360Sstevel@tonic-gate 
49370Sstevel@tonic-gate 		(void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD);
49380Sstevel@tonic-gate 
49390Sstevel@tonic-gate 		ASSERT(hat_page_getshare(targ) == 0);
49400Sstevel@tonic-gate 		ASSERT(!PP_ISFREE(targ));
49410Sstevel@tonic-gate 		ASSERT(targ->p_pagenum == (pfn + i));
49420Sstevel@tonic-gate 		ASSERT(repl_contig == 0 ||
49430Sstevel@tonic-gate 		    repl->p_pagenum == (repl_pfn + i));
49440Sstevel@tonic-gate 
49450Sstevel@tonic-gate 		/*
49460Sstevel@tonic-gate 		 * Copy the page contents and attributes then
49470Sstevel@tonic-gate 		 * relocate the page in the page hash.
49480Sstevel@tonic-gate 		 */
49490Sstevel@tonic-gate 		ppcopy(targ, repl);
49500Sstevel@tonic-gate 		ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO));
49510Sstevel@tonic-gate 		page_clr_all_props(repl);
49520Sstevel@tonic-gate 		page_set_props(repl, ppattr);
49530Sstevel@tonic-gate 		page_relocate_hash(repl, targ);
49540Sstevel@tonic-gate 
49550Sstevel@tonic-gate 		ASSERT(hat_page_getshare(targ) == 0);
49560Sstevel@tonic-gate 		ASSERT(hat_page_getshare(repl) == 0);
49570Sstevel@tonic-gate 		/*
49580Sstevel@tonic-gate 		 * Now clear the props on targ, after the
49590Sstevel@tonic-gate 		 * page_relocate_hash(), they no longer
49600Sstevel@tonic-gate 		 * have any meaning.
49610Sstevel@tonic-gate 		 */
49620Sstevel@tonic-gate 		page_clr_all_props(targ);
49630Sstevel@tonic-gate 		ASSERT(targ->p_next == targ);
49640Sstevel@tonic-gate 		ASSERT(targ->p_prev == targ);
49650Sstevel@tonic-gate 		page_list_concat(&pl, &targ);
49660Sstevel@tonic-gate 
49670Sstevel@tonic-gate 		targ++;
49680Sstevel@tonic-gate 		if (repl_contig != 0) {
49690Sstevel@tonic-gate 			repl++;
49700Sstevel@tonic-gate 		} else {
49710Sstevel@tonic-gate 			repl = repl->p_next;
49720Sstevel@tonic-gate 		}
49730Sstevel@tonic-gate 	}
49740Sstevel@tonic-gate 	/* assert that we have come full circle with repl */
49750Sstevel@tonic-gate 	ASSERT(repl_contig == 1 || first_repl == repl);
49760Sstevel@tonic-gate 
49770Sstevel@tonic-gate 	*target = pl;
49780Sstevel@tonic-gate 	if (*replacement == NULL) {
49790Sstevel@tonic-gate 		ASSERT(first_repl == repl);
49800Sstevel@tonic-gate 		*replacement = repl;
49810Sstevel@tonic-gate 	}
49820Sstevel@tonic-gate 	VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
49830Sstevel@tonic-gate 	*nrelocp = npgs;
49840Sstevel@tonic-gate 	return (0);
49850Sstevel@tonic-gate }
49860Sstevel@tonic-gate /*
49870Sstevel@tonic-gate  * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
49880Sstevel@tonic-gate  */
49890Sstevel@tonic-gate int
49900Sstevel@tonic-gate page_relocate(
49910Sstevel@tonic-gate 	page_t **target,
49920Sstevel@tonic-gate 	page_t **replacement,
49930Sstevel@tonic-gate 	int grouplock,
49940Sstevel@tonic-gate 	int freetarget,
49950Sstevel@tonic-gate 	spgcnt_t *nrelocp,
49960Sstevel@tonic-gate 	lgrp_t *lgrp)
49970Sstevel@tonic-gate {
49980Sstevel@tonic-gate 	spgcnt_t ret;
49990Sstevel@tonic-gate 
50000Sstevel@tonic-gate 	/* do_page_relocate returns 0 on success or errno value */
50010Sstevel@tonic-gate 	ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp);
50020Sstevel@tonic-gate 
50030Sstevel@tonic-gate 	if (ret != 0 || freetarget == 0) {
50040Sstevel@tonic-gate 		return (ret);
50050Sstevel@tonic-gate 	}
50060Sstevel@tonic-gate 	if (*nrelocp == 1) {
50070Sstevel@tonic-gate 		ASSERT(*target != NULL);
50080Sstevel@tonic-gate 		page_free(*target, 1);
50090Sstevel@tonic-gate 	} else {
50100Sstevel@tonic-gate 		page_t *tpp = *target;
50110Sstevel@tonic-gate 		uint_t szc = tpp->p_szc;
50120Sstevel@tonic-gate 		pgcnt_t npgs = page_get_pagecnt(szc);
50130Sstevel@tonic-gate 		ASSERT(npgs > 1);
50140Sstevel@tonic-gate 		ASSERT(szc != 0);
50150Sstevel@tonic-gate 		do {
50160Sstevel@tonic-gate 			ASSERT(PAGE_EXCL(tpp));
50170Sstevel@tonic-gate 			ASSERT(!hat_page_is_mapped(tpp));
50180Sstevel@tonic-gate 			ASSERT(tpp->p_szc == szc);
50190Sstevel@tonic-gate 			PP_SETFREE(tpp);
50200Sstevel@tonic-gate 			PP_SETAGED(tpp);
50210Sstevel@tonic-gate 			npgs--;
50220Sstevel@tonic-gate 		} while ((tpp = tpp->p_next) != *target);
50230Sstevel@tonic-gate 		ASSERT(npgs == 0);
50240Sstevel@tonic-gate 		page_list_add_pages(*target, 0);
50250Sstevel@tonic-gate 		npgs = page_get_pagecnt(szc);
50260Sstevel@tonic-gate 		page_create_putback(npgs);
50270Sstevel@tonic-gate 	}
50280Sstevel@tonic-gate 	return (ret);
50290Sstevel@tonic-gate }
50300Sstevel@tonic-gate 
50310Sstevel@tonic-gate /*
50320Sstevel@tonic-gate  * it is up to the caller to deal with pcf accounting.
50330Sstevel@tonic-gate  */
50340Sstevel@tonic-gate void
50350Sstevel@tonic-gate page_free_replacement_page(page_t *pplist)
50360Sstevel@tonic-gate {
50370Sstevel@tonic-gate 	page_t *pp;
50380Sstevel@tonic-gate 
50390Sstevel@tonic-gate 	while (pplist != NULL) {
50400Sstevel@tonic-gate 		/*
50410Sstevel@tonic-gate 		 * pp_targ is a linked list.
50420Sstevel@tonic-gate 		 */
50430Sstevel@tonic-gate 		pp = pplist;
50440Sstevel@tonic-gate 		if (pp->p_szc == 0) {
50450Sstevel@tonic-gate 			page_sub(&pplist, pp);
50460Sstevel@tonic-gate 			page_clr_all_props(pp);
50470Sstevel@tonic-gate 			PP_SETFREE(pp);
50480Sstevel@tonic-gate 			PP_SETAGED(pp);
50490Sstevel@tonic-gate 			page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
50500Sstevel@tonic-gate 			page_unlock(pp);
50510Sstevel@tonic-gate 			VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]);
50520Sstevel@tonic-gate 		} else {
50530Sstevel@tonic-gate 			spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
50540Sstevel@tonic-gate 			page_t *tpp;
50550Sstevel@tonic-gate 			page_list_break(&pp, &pplist, curnpgs);
50560Sstevel@tonic-gate 			tpp = pp;
50570Sstevel@tonic-gate 			do {
50580Sstevel@tonic-gate 				ASSERT(PAGE_EXCL(tpp));
50590Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(tpp));
50600Sstevel@tonic-gate 				page_clr_all_props(pp);
50610Sstevel@tonic-gate 				PP_SETFREE(tpp);
50620Sstevel@tonic-gate 				PP_SETAGED(tpp);
50630Sstevel@tonic-gate 			} while ((tpp = tpp->p_next) != pp);
50640Sstevel@tonic-gate 			page_list_add_pages(pp, 0);
50650Sstevel@tonic-gate 			VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]);
50660Sstevel@tonic-gate 		}
50670Sstevel@tonic-gate 	}
50680Sstevel@tonic-gate }
50690Sstevel@tonic-gate 
50700Sstevel@tonic-gate /*
50710Sstevel@tonic-gate  * Relocate target to non-relocatable replacement page.
50720Sstevel@tonic-gate  */
50730Sstevel@tonic-gate int
50740Sstevel@tonic-gate page_relocate_cage(page_t **target, page_t **replacement)
50750Sstevel@tonic-gate {
50760Sstevel@tonic-gate 	page_t *tpp, *rpp;
50770Sstevel@tonic-gate 	spgcnt_t pgcnt, npgs;
50780Sstevel@tonic-gate 	int result;
50790Sstevel@tonic-gate 
50800Sstevel@tonic-gate 	tpp = *target;
50810Sstevel@tonic-gate 
50820Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(tpp));
50830Sstevel@tonic-gate 	ASSERT(tpp->p_szc == 0);
50840Sstevel@tonic-gate 
50850Sstevel@tonic-gate 	pgcnt = btop(page_get_pagesize(tpp->p_szc));
50860Sstevel@tonic-gate 
50870Sstevel@tonic-gate 	do {
50880Sstevel@tonic-gate 		(void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC);
50890Sstevel@tonic-gate 		rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC);
50900Sstevel@tonic-gate 		if (rpp == NULL) {
50910Sstevel@tonic-gate 			page_create_putback(pgcnt);
50920Sstevel@tonic-gate 			kcage_cageout_wakeup();
50930Sstevel@tonic-gate 		}
50940Sstevel@tonic-gate 	} while (rpp == NULL);
50950Sstevel@tonic-gate 
50960Sstevel@tonic-gate 	ASSERT(PP_ISNORELOC(rpp));
50970Sstevel@tonic-gate 
50980Sstevel@tonic-gate 	result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL);
50990Sstevel@tonic-gate 
51000Sstevel@tonic-gate 	if (result == 0) {
51010Sstevel@tonic-gate 		*replacement = rpp;
51020Sstevel@tonic-gate 		if (pgcnt != npgs)
51030Sstevel@tonic-gate 			panic("page_relocate_cage: partial relocation");
51040Sstevel@tonic-gate 	}
51050Sstevel@tonic-gate 
51060Sstevel@tonic-gate 	return (result);
51070Sstevel@tonic-gate }
51080Sstevel@tonic-gate 
51090Sstevel@tonic-gate /*
51100Sstevel@tonic-gate  * Release the page lock on a page, place on cachelist
51110Sstevel@tonic-gate  * tail if no longer mapped. Caller can let us know if
51120Sstevel@tonic-gate  * the page is known to be clean.
51130Sstevel@tonic-gate  */
51140Sstevel@tonic-gate int
51150Sstevel@tonic-gate page_release(page_t *pp, int checkmod)
51160Sstevel@tonic-gate {
51170Sstevel@tonic-gate 	int status;
51180Sstevel@tonic-gate 
51190Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
51200Sstevel@tonic-gate 		(pp->p_vnode != NULL));
51210Sstevel@tonic-gate 
51220Sstevel@tonic-gate 	if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
51230Sstevel@tonic-gate 	    ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
51240Sstevel@tonic-gate 	    pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
51250Sstevel@tonic-gate 	    !hat_page_is_mapped(pp)) {
51260Sstevel@tonic-gate 
51270Sstevel@tonic-gate 		/*
51280Sstevel@tonic-gate 		 * If page is modified, unlock it
51290Sstevel@tonic-gate 		 *
51300Sstevel@tonic-gate 		 * (p_nrm & P_MOD) bit has the latest stuff because:
51310Sstevel@tonic-gate 		 * (1) We found that this page doesn't have any mappings
51320Sstevel@tonic-gate 		 *	_after_ holding SE_EXCL and
51330Sstevel@tonic-gate 		 * (2) We didn't drop SE_EXCL lock after the check in (1)
51340Sstevel@tonic-gate 		 */
51350Sstevel@tonic-gate 		if (checkmod && hat_ismod(pp)) {
51360Sstevel@tonic-gate 			page_unlock(pp);
51370Sstevel@tonic-gate 			status = PGREL_MOD;
51380Sstevel@tonic-gate 		} else {
51390Sstevel@tonic-gate 			/*LINTED: constant in conditional context*/
51400Sstevel@tonic-gate 			VN_DISPOSE(pp, B_FREE, 0, kcred);
51410Sstevel@tonic-gate 			status = PGREL_CLEAN;
51420Sstevel@tonic-gate 		}
51430Sstevel@tonic-gate 	} else {
51440Sstevel@tonic-gate 		page_unlock(pp);
51450Sstevel@tonic-gate 		status = PGREL_NOTREL;
51460Sstevel@tonic-gate 	}
51470Sstevel@tonic-gate 	return (status);
51480Sstevel@tonic-gate }
51490Sstevel@tonic-gate 
5150917Selowe /*
5151917Selowe  * Given a constituent page, try to demote the large page on the freelist.
5152917Selowe  *
5153917Selowe  * Returns nonzero if the page could be demoted successfully. Returns with
5154917Selowe  * the constituent page still locked.
5155917Selowe  */
5156917Selowe int
5157917Selowe page_try_demote_free_pages(page_t *pp)
5158917Selowe {
5159917Selowe 	page_t *rootpp = pp;
5160917Selowe 	pfn_t	pfn = page_pptonum(pp);
5161917Selowe 	spgcnt_t npgs;
5162917Selowe 	uint_t	szc = pp->p_szc;
5163917Selowe 
5164917Selowe 	ASSERT(PP_ISFREE(pp));
5165917Selowe 	ASSERT(PAGE_EXCL(pp));
5166917Selowe 
5167917Selowe 	/*
5168917Selowe 	 * Adjust rootpp and lock it, if `pp' is not the base
5169917Selowe 	 * constituent page.
5170917Selowe 	 */
5171917Selowe 	npgs = page_get_pagecnt(pp->p_szc);
5172917Selowe 	if (npgs == 1) {
5173917Selowe 		return (0);
5174917Selowe 	}
5175917Selowe 
5176917Selowe 	if (!IS_P2ALIGNED(pfn, npgs)) {
5177917Selowe 		pfn = P2ALIGN(pfn, npgs);
5178917Selowe 		rootpp = page_numtopp_nolock(pfn);
5179917Selowe 	}
5180917Selowe 
5181917Selowe 	if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
5182917Selowe 		return (0);
5183917Selowe 	}
5184917Selowe 
5185917Selowe 	if (rootpp->p_szc != szc) {
5186917Selowe 		if (pp != rootpp)
5187917Selowe 			page_unlock(rootpp);
5188917Selowe 		return (0);
5189917Selowe 	}
5190917Selowe 
5191917Selowe 	page_demote_free_pages(rootpp);
5192917Selowe 
5193917Selowe 	if (pp != rootpp)
5194917Selowe 		page_unlock(rootpp);
5195917Selowe 
5196917Selowe 	ASSERT(PP_ISFREE(pp));
5197917Selowe 	ASSERT(PAGE_EXCL(pp));
5198917Selowe 	return (1);
5199917Selowe }
5200917Selowe 
5201917Selowe /*
5202917Selowe  * Given a constituent page, try to demote the large page.
5203917Selowe  *
5204917Selowe  * Returns nonzero if the page could be demoted successfully. Returns with
5205917Selowe  * the constituent page still locked.
5206917Selowe  */
52070Sstevel@tonic-gate int
52080Sstevel@tonic-gate page_try_demote_pages(page_t *pp)
52090Sstevel@tonic-gate {
52100Sstevel@tonic-gate 	page_t *tpp, *rootpp = pp;
52110Sstevel@tonic-gate 	pfn_t	pfn = page_pptonum(pp);
52120Sstevel@tonic-gate 	spgcnt_t i, npgs;
52130Sstevel@tonic-gate 	uint_t	szc = pp->p_szc;
52140Sstevel@tonic-gate 	vnode_t *vp = pp->p_vnode;
52150Sstevel@tonic-gate 
5216917Selowe 	ASSERT(PAGE_EXCL(pp));
52170Sstevel@tonic-gate 
52180Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]);
52190Sstevel@tonic-gate 
5220917Selowe 	if (pp->p_szc == 0) {
52210Sstevel@tonic-gate 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]);
52220Sstevel@tonic-gate 		return (1);
52230Sstevel@tonic-gate 	}
52240Sstevel@tonic-gate 
52250Sstevel@tonic-gate 	if (vp != NULL && !IS_SWAPFSVP(vp) && vp != &kvp) {
52260Sstevel@tonic-gate 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]);
5227917Selowe 		page_demote_vp_pages(pp);
52280Sstevel@tonic-gate 		ASSERT(pp->p_szc == 0);
52290Sstevel@tonic-gate 		return (1);
52300Sstevel@tonic-gate 	}
52310Sstevel@tonic-gate 
52320Sstevel@tonic-gate 	/*
5233917Selowe 	 * Adjust rootpp if passed in is not the base
52340Sstevel@tonic-gate 	 * constituent page.
52350Sstevel@tonic-gate 	 */
5236917Selowe 	npgs = page_get_pagecnt(pp->p_szc);
52370Sstevel@tonic-gate 	ASSERT(npgs > 1);
52380Sstevel@tonic-gate 	if (!IS_P2ALIGNED(pfn, npgs)) {
52390Sstevel@tonic-gate 		pfn = P2ALIGN(pfn, npgs);
52400Sstevel@tonic-gate 		rootpp = page_numtopp_nolock(pfn);
52410Sstevel@tonic-gate 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]);
52420Sstevel@tonic-gate 		ASSERT(rootpp->p_vnode != NULL);
52430Sstevel@tonic-gate 		ASSERT(rootpp->p_szc == szc);
52440Sstevel@tonic-gate 	}
52450Sstevel@tonic-gate 
52460Sstevel@tonic-gate 	/*
52470Sstevel@tonic-gate 	 * We can't demote kernel pages since we can't hat_unload()
52480Sstevel@tonic-gate 	 * the mappings.
52490Sstevel@tonic-gate 	 */
52500Sstevel@tonic-gate 	if (rootpp->p_vnode == &kvp)
52510Sstevel@tonic-gate 		return (0);
52520Sstevel@tonic-gate 
52530Sstevel@tonic-gate 	/*
52540Sstevel@tonic-gate 	 * Attempt to lock all constituent pages except the page passed
52550Sstevel@tonic-gate 	 * in since it's already locked.
52560Sstevel@tonic-gate 	 */
5257414Skchow 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
52580Sstevel@tonic-gate 		ASSERT(!PP_ISFREE(tpp));
52590Sstevel@tonic-gate 		ASSERT(tpp->p_vnode != NULL);
52600Sstevel@tonic-gate 
52610Sstevel@tonic-gate 		if (tpp != pp && !page_trylock(tpp, SE_EXCL))
52620Sstevel@tonic-gate 			break;
52630Sstevel@tonic-gate 		ASSERT(tpp->p_szc == rootpp->p_szc);
52640Sstevel@tonic-gate 		ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i);
52650Sstevel@tonic-gate 	}
52660Sstevel@tonic-gate 
52670Sstevel@tonic-gate 	/*
5268917Selowe 	 * If we failed to lock them all then unlock what we have
5269917Selowe 	 * locked so far and bail.
52700Sstevel@tonic-gate 	 */
52710Sstevel@tonic-gate 	if (i < npgs) {
52720Sstevel@tonic-gate 		tpp = rootpp;
52730Sstevel@tonic-gate 		while (i-- > 0) {
52740Sstevel@tonic-gate 			if (tpp != pp)
52750Sstevel@tonic-gate 				page_unlock(tpp);
5276414Skchow 			tpp++;
52770Sstevel@tonic-gate 		}
52780Sstevel@tonic-gate 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
52790Sstevel@tonic-gate 		return (0);
52800Sstevel@tonic-gate 	}
52810Sstevel@tonic-gate 
5282414Skchow 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
52830Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(tpp));
5284917Selowe 		(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
52850Sstevel@tonic-gate 		tpp->p_szc = 0;
52860Sstevel@tonic-gate 	}
52870Sstevel@tonic-gate 
52880Sstevel@tonic-gate 	/*
52890Sstevel@tonic-gate 	 * Unlock all pages except the page passed in.
52900Sstevel@tonic-gate 	 */
5291414Skchow 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
52920Sstevel@tonic-gate 		ASSERT(!hat_page_is_mapped(tpp));
52930Sstevel@tonic-gate 		if (tpp != pp)
52940Sstevel@tonic-gate 			page_unlock(tpp);
52950Sstevel@tonic-gate 	}
5296917Selowe 
52970Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]);
52980Sstevel@tonic-gate 	return (1);
52990Sstevel@tonic-gate }
53000Sstevel@tonic-gate 
53010Sstevel@tonic-gate /*
53020Sstevel@tonic-gate  * Called by page_free() and page_destroy() to demote the page size code
53030Sstevel@tonic-gate  * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
53040Sstevel@tonic-gate  * p_szc on free list, neither can we just clear p_szc of a single page_t
53050Sstevel@tonic-gate  * within a large page since it will break other code that relies on p_szc
53060Sstevel@tonic-gate  * being the same for all page_t's of a large page). Anonymous pages should
53070Sstevel@tonic-gate  * never end up here because anon_map_getpages() cannot deal with p_szc
53080Sstevel@tonic-gate  * changes after a single constituent page is locked.  While anonymous or
53090Sstevel@tonic-gate  * kernel large pages are demoted or freed the entire large page at a time
53100Sstevel@tonic-gate  * with all constituent pages locked EXCL for the file system pages we
53110Sstevel@tonic-gate  * have to be able to demote a large page (i.e. decrease all constituent pages
53120Sstevel@tonic-gate  * p_szc) with only just an EXCL lock on one of constituent pages. The reason
53130Sstevel@tonic-gate  * we can easily deal with anonymous page demotion the entire large page at a
53140Sstevel@tonic-gate  * time is that those operation originate at address space level and concern
53150Sstevel@tonic-gate  * the entire large page region with actual demotion only done when pages are
53160Sstevel@tonic-gate  * not shared with any other processes (therefore we can always get EXCL lock
53170Sstevel@tonic-gate  * on all anonymous constituent pages after clearing segment page
53180Sstevel@tonic-gate  * cache). However file system pages can be truncated or invalidated at a
53190Sstevel@tonic-gate  * PAGESIZE level from the file system side and end up in page_free() or
53200Sstevel@tonic-gate  * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
53210Sstevel@tonic-gate  * and therfore pageout should be able to demote a large page by EXCL locking
53220Sstevel@tonic-gate  * any constituent page that is not under SOFTLOCK). In those cases we cannot
53230Sstevel@tonic-gate  * rely on being able to lock EXCL all constituent pages.
53240Sstevel@tonic-gate  *
53250Sstevel@tonic-gate  * To prevent szc changes on file system pages one has to lock all constituent
53260Sstevel@tonic-gate  * pages at least SHARED (or call page_szc_lock()). The only subsystem that
53270Sstevel@tonic-gate  * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
53280Sstevel@tonic-gate  * prevent szc changes is hat layer that uses its own page level mlist
53290Sstevel@tonic-gate  * locks. hat assumes that szc doesn't change after mlist lock for a page is
53300Sstevel@tonic-gate  * taken. Therefore we need to change szc under hat level locks if we only
53310Sstevel@tonic-gate  * have an EXCL lock on a single constituent page and hat still references any
53320Sstevel@tonic-gate  * of constituent pages.  (Note we can't "ignore" hat layer by simply
53330Sstevel@tonic-gate  * hat_pageunload() all constituent pages without having EXCL locks on all of
53340Sstevel@tonic-gate  * constituent pages). We use hat_page_demote() call to safely demote szc of
53350Sstevel@tonic-gate  * all constituent pages under hat locks when we only have an EXCL lock on one
53360Sstevel@tonic-gate  * of constituent pages.
53370Sstevel@tonic-gate  *
53380Sstevel@tonic-gate  * This routine calls page_szc_lock() before calling hat_page_demote() to
53390Sstevel@tonic-gate  * allow segvn in one special case not to lock all constituent pages SHARED
53400Sstevel@tonic-gate  * before calling hat_memload_array() that relies on p_szc not changeing even
53410Sstevel@tonic-gate  * before hat level mlist lock is taken.  In that case segvn uses
53420Sstevel@tonic-gate  * page_szc_lock() to prevent hat_page_demote() changeing p_szc values.
53430Sstevel@tonic-gate  *
53440Sstevel@tonic-gate  * Anonymous or kernel page demotion still has to lock all pages exclusively
53450Sstevel@tonic-gate  * and do hat_pageunload() on all constituent pages before demoting the page
53460Sstevel@tonic-gate  * therefore there's no need for anonymous or kernel page demotion to use
53470Sstevel@tonic-gate  * hat_page_demote() mechanism.
53480Sstevel@tonic-gate  *
53490Sstevel@tonic-gate  * hat_page_demote() removes all large mappings that map pp and then decreases
53500Sstevel@tonic-gate  * p_szc starting from the last constituent page of the large page. By working
53510Sstevel@tonic-gate  * from the tail of a large page in pfn decreasing order allows one looking at
53520Sstevel@tonic-gate  * the root page to know that hat_page_demote() is done for root's szc area.
53530Sstevel@tonic-gate  * e.g. if a root page has szc 1 one knows it only has to lock all constituent
53540Sstevel@tonic-gate  * pages within szc 1 area to prevent szc changes because hat_page_demote()
53550Sstevel@tonic-gate  * that started on this page when it had szc > 1 is done for this szc 1 area.
53560Sstevel@tonic-gate  *
53570Sstevel@tonic-gate  * We are guranteed that all constituent pages of pp's large page belong to
53580Sstevel@tonic-gate  * the same vnode with the consecutive offsets increasing in the direction of
53590Sstevel@tonic-gate  * the pfn i.e. the identity of constituent pages can't change until their
53600Sstevel@tonic-gate  * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
53610Sstevel@tonic-gate  * large mappings to pp even though we don't lock any constituent page except
53620Sstevel@tonic-gate  * pp (i.e. we won't unload e.g. kernel locked page).
53630Sstevel@tonic-gate  */
53640Sstevel@tonic-gate static void
53650Sstevel@tonic-gate page_demote_vp_pages(page_t *pp)
53660Sstevel@tonic-gate {
53670Sstevel@tonic-gate 	kmutex_t *mtx;
53680Sstevel@tonic-gate 
53690Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
53700Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
53710Sstevel@tonic-gate 	ASSERT(pp->p_vnode != NULL);
53720Sstevel@tonic-gate 	ASSERT(!IS_SWAPFSVP(pp->p_vnode));
53730Sstevel@tonic-gate 	ASSERT(pp->p_vnode != &kvp);
53740Sstevel@tonic-gate 
53750Sstevel@tonic-gate 	VM_STAT_ADD(pagecnt.pc_demote_pages[0]);
53760Sstevel@tonic-gate 
53770Sstevel@tonic-gate 	mtx = page_szc_lock(pp);
53780Sstevel@tonic-gate 	if (mtx != NULL) {
53790Sstevel@tonic-gate 		hat_page_demote(pp);
53800Sstevel@tonic-gate 		mutex_exit(mtx);
53810Sstevel@tonic-gate 	}
53820Sstevel@tonic-gate 	ASSERT(pp->p_szc == 0);
53830Sstevel@tonic-gate }
53840Sstevel@tonic-gate 
53850Sstevel@tonic-gate /*
53860Sstevel@tonic-gate  * Mark any existing pages for migration in the given range
53870Sstevel@tonic-gate  */
53880Sstevel@tonic-gate void
53890Sstevel@tonic-gate page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
53900Sstevel@tonic-gate     struct anon_map *amp, ulong_t anon_index, vnode_t *vp,
53910Sstevel@tonic-gate     u_offset_t vnoff, int rflag)
53920Sstevel@tonic-gate {
53930Sstevel@tonic-gate 	struct anon	*ap;
53940Sstevel@tonic-gate 	vnode_t		*curvp;
53950Sstevel@tonic-gate 	lgrp_t		*from;
53960Sstevel@tonic-gate 	pgcnt_t		i;
53970Sstevel@tonic-gate 	pgcnt_t		nlocked;
53980Sstevel@tonic-gate 	u_offset_t	off;
53990Sstevel@tonic-gate 	pfn_t		pfn;
54000Sstevel@tonic-gate 	size_t		pgsz;
54010Sstevel@tonic-gate 	size_t		segpgsz;
54020Sstevel@tonic-gate 	pgcnt_t		pages;
54030Sstevel@tonic-gate 	uint_t		pszc;
54040Sstevel@tonic-gate 	page_t		**ppa;
54050Sstevel@tonic-gate 	pgcnt_t		ppa_nentries;
54060Sstevel@tonic-gate 	page_t		*pp;
54070Sstevel@tonic-gate 	caddr_t		va;
54080Sstevel@tonic-gate 	ulong_t		an_idx;
54090Sstevel@tonic-gate 	anon_sync_obj_t	cookie;
54100Sstevel@tonic-gate 
54110Sstevel@tonic-gate 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
54120Sstevel@tonic-gate 
54130Sstevel@tonic-gate 	/*
54140Sstevel@tonic-gate 	 * Don't do anything if don't need to do lgroup optimizations
54150Sstevel@tonic-gate 	 * on this system
54160Sstevel@tonic-gate 	 */
54170Sstevel@tonic-gate 	if (!lgrp_optimizations())
54180Sstevel@tonic-gate 		return;
54190Sstevel@tonic-gate 
54200Sstevel@tonic-gate 	/*
54210Sstevel@tonic-gate 	 * Align address and length to (potentially large) page boundary
54220Sstevel@tonic-gate 	 */
54230Sstevel@tonic-gate 	segpgsz = page_get_pagesize(seg->s_szc);
54240Sstevel@tonic-gate 	addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz);
54250Sstevel@tonic-gate 	if (rflag)
54260Sstevel@tonic-gate 		len = P2ROUNDUP(len, segpgsz);
54270Sstevel@tonic-gate 
54280Sstevel@tonic-gate 	/*
54290Sstevel@tonic-gate 	 * Allocate page array to accomodate largest page size
54300Sstevel@tonic-gate 	 */
54310Sstevel@tonic-gate 	pgsz = page_get_pagesize(page_num_pagesizes() - 1);
54320Sstevel@tonic-gate 	ppa_nentries = btop(pgsz);
54330Sstevel@tonic-gate 	ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP);
54340Sstevel@tonic-gate 
54350Sstevel@tonic-gate 	/*
54360Sstevel@tonic-gate 	 * Do one (large) page at a time
54370Sstevel@tonic-gate 	 */
54380Sstevel@tonic-gate 	va = addr;
54390Sstevel@tonic-gate 	while (va < addr + len) {
54400Sstevel@tonic-gate 		/*
54410Sstevel@tonic-gate 		 * Lookup (root) page for vnode and offset corresponding to
54420Sstevel@tonic-gate 		 * this virtual address
54430Sstevel@tonic-gate 		 * Try anonmap first since there may be copy-on-write
54440Sstevel@tonic-gate 		 * pages, but initialize vnode pointer and offset using
54450Sstevel@tonic-gate 		 * vnode arguments just in case there isn't an amp.
54460Sstevel@tonic-gate 		 */
54470Sstevel@tonic-gate 		curvp = vp;
54480Sstevel@tonic-gate 		off = vnoff + va - seg->s_base;
54490Sstevel@tonic-gate 		if (amp) {
54500Sstevel@tonic-gate 			ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
54510Sstevel@tonic-gate 			an_idx = anon_index + seg_page(seg, va);
54520Sstevel@tonic-gate 			anon_array_enter(amp, an_idx, &cookie);
54530Sstevel@tonic-gate 			ap = anon_get_ptr(amp->ahp, an_idx);
54540Sstevel@tonic-gate 			if (ap)
54550Sstevel@tonic-gate 				swap_xlate(ap, &curvp, &off);
54560Sstevel@tonic-gate 			anon_array_exit(&cookie);
54570Sstevel@tonic-gate 			ANON_LOCK_EXIT(&amp->a_rwlock);
54580Sstevel@tonic-gate 		}
54590Sstevel@tonic-gate 
54600Sstevel@tonic-gate 		pp = NULL;
54610Sstevel@tonic-gate 		if (curvp)
54620Sstevel@tonic-gate 			pp = page_lookup(curvp, off, SE_SHARED);
54630Sstevel@tonic-gate 
54640Sstevel@tonic-gate 		/*
54650Sstevel@tonic-gate 		 * If there isn't a page at this virtual address,
54660Sstevel@tonic-gate 		 * skip to next page
54670Sstevel@tonic-gate 		 */
54680Sstevel@tonic-gate 		if (pp == NULL) {
54690Sstevel@tonic-gate 			va += PAGESIZE;
54700Sstevel@tonic-gate 			continue;
54710Sstevel@tonic-gate 		}
54720Sstevel@tonic-gate 
54730Sstevel@tonic-gate 		/*
54740Sstevel@tonic-gate 		 * Figure out which lgroup this page is in for kstats
54750Sstevel@tonic-gate 		 */
54760Sstevel@tonic-gate 		pfn = page_pptonum(pp);
54770Sstevel@tonic-gate 		from = lgrp_pfn_to_lgrp(pfn);
54780Sstevel@tonic-gate 
54790Sstevel@tonic-gate 		/*
54800Sstevel@tonic-gate 		 * Get page size, and round up and skip to next page boundary
54810Sstevel@tonic-gate 		 * if unaligned address
54820Sstevel@tonic-gate 		 */
54830Sstevel@tonic-gate 		pszc = pp->p_szc;
54840Sstevel@tonic-gate 		pgsz = page_get_pagesize(pszc);
54850Sstevel@tonic-gate 		pages = btop(pgsz);
54860Sstevel@tonic-gate 		if (!IS_P2ALIGNED(va, pgsz) ||
54870Sstevel@tonic-gate 		    !IS_P2ALIGNED(pfn, pages) ||
54880Sstevel@tonic-gate 		    pgsz > segpgsz) {
54890Sstevel@tonic-gate 			pgsz = MIN(pgsz, segpgsz);
54900Sstevel@tonic-gate 			page_unlock(pp);
54910Sstevel@tonic-gate 			i = btop(P2END((uintptr_t)va, pgsz) -
54920Sstevel@tonic-gate 			    (uintptr_t)va);
54930Sstevel@tonic-gate 			va = (caddr_t)P2END((uintptr_t)va, pgsz);
54940Sstevel@tonic-gate 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i);
54950Sstevel@tonic-gate 			continue;
54960Sstevel@tonic-gate 		}
54970Sstevel@tonic-gate 
54980Sstevel@tonic-gate 		/*
54990Sstevel@tonic-gate 		 * Upgrade to exclusive lock on page
55000Sstevel@tonic-gate 		 */
55010Sstevel@tonic-gate 		if (!page_tryupgrade(pp)) {
55020Sstevel@tonic-gate 			page_unlock(pp);
55030Sstevel@tonic-gate 			va += pgsz;
55040Sstevel@tonic-gate 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
55050Sstevel@tonic-gate 			    btop(pgsz));
55060Sstevel@tonic-gate 			continue;
55070Sstevel@tonic-gate 		}
55080Sstevel@tonic-gate 
55090Sstevel@tonic-gate 		/*
55100Sstevel@tonic-gate 		 * Remember pages locked exclusively and how many
55110Sstevel@tonic-gate 		 */
55120Sstevel@tonic-gate 		ppa[0] = pp;
55130Sstevel@tonic-gate 		nlocked = 1;
55140Sstevel@tonic-gate 
55150Sstevel@tonic-gate 		/*
55160Sstevel@tonic-gate 		 * Lock constituent pages if this is large page
55170Sstevel@tonic-gate 		 */
55180Sstevel@tonic-gate 		if (pages > 1) {
55190Sstevel@tonic-gate 			/*
55200Sstevel@tonic-gate 			 * Lock all constituents except root page, since it
55210Sstevel@tonic-gate 			 * should be locked already.
55220Sstevel@tonic-gate 			 */
55230Sstevel@tonic-gate 			for (i = 1; i < pages; i++) {
5524414Skchow 				pp++;
55250Sstevel@tonic-gate 				if (!page_trylock(pp, SE_EXCL)) {
55260Sstevel@tonic-gate 					break;
55270Sstevel@tonic-gate 				}
55280Sstevel@tonic-gate 				if (PP_ISFREE(pp) ||
55290Sstevel@tonic-gate 				    pp->p_szc != pszc) {
55300Sstevel@tonic-gate 					/*
55310Sstevel@tonic-gate 					 * hat_page_demote() raced in with us.
55320Sstevel@tonic-gate 					 */
55330Sstevel@tonic-gate 					ASSERT(!IS_SWAPFSVP(curvp));
55340Sstevel@tonic-gate 					page_unlock(pp);
55350Sstevel@tonic-gate 					break;
55360Sstevel@tonic-gate 				}
55370Sstevel@tonic-gate 				ppa[nlocked] = pp;
55380Sstevel@tonic-gate 				nlocked++;
55390Sstevel@tonic-gate 			}
55400Sstevel@tonic-gate 		}
55410Sstevel@tonic-gate 
55420Sstevel@tonic-gate 		/*
55430Sstevel@tonic-gate 		 * If all constituent pages couldn't be locked,
55440Sstevel@tonic-gate 		 * unlock pages locked so far and skip to next page.
55450Sstevel@tonic-gate 		 */
55460Sstevel@tonic-gate 		if (nlocked != pages) {
55470Sstevel@tonic-gate 			for (i = 0; i < nlocked; i++)
55480Sstevel@tonic-gate 				page_unlock(ppa[i]);
55490Sstevel@tonic-gate 			va += pgsz;
55500Sstevel@tonic-gate 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
55510Sstevel@tonic-gate 			    btop(pgsz));
55520Sstevel@tonic-gate 			continue;
55530Sstevel@tonic-gate 		}
55540Sstevel@tonic-gate 
55550Sstevel@tonic-gate 		/*
55560Sstevel@tonic-gate 		 * hat_page_demote() can no longer happen
55570Sstevel@tonic-gate 		 * since last cons page had the right p_szc after
55580Sstevel@tonic-gate 		 * all cons pages were locked. all cons pages
55590Sstevel@tonic-gate 		 * should now have the same p_szc.
55600Sstevel@tonic-gate 		 */
55610Sstevel@tonic-gate 
55620Sstevel@tonic-gate 		/*
55630Sstevel@tonic-gate 		 * All constituent pages locked successfully, so mark
55640Sstevel@tonic-gate 		 * large page for migration and unload the mappings of
55650Sstevel@tonic-gate 		 * constituent pages, so a fault will occur on any part of the
55660Sstevel@tonic-gate 		 * large page
55670Sstevel@tonic-gate 		 */
55680Sstevel@tonic-gate 		PP_SETMIGRATE(ppa[0]);
55690Sstevel@tonic-gate 		for (i = 0; i < nlocked; i++) {
55700Sstevel@tonic-gate 			pp = ppa[i];
55710Sstevel@tonic-gate 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
55720Sstevel@tonic-gate 			ASSERT(hat_page_getshare(pp) == 0);
55730Sstevel@tonic-gate 			page_unlock(pp);
55740Sstevel@tonic-gate 		}
55750Sstevel@tonic-gate 		lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked);
55760Sstevel@tonic-gate 
55770Sstevel@tonic-gate 		va += pgsz;
55780Sstevel@tonic-gate 	}
55790Sstevel@tonic-gate 	kmem_free(ppa, ppa_nentries * sizeof (page_t *));
55800Sstevel@tonic-gate }
55810Sstevel@tonic-gate 
55820Sstevel@tonic-gate /*
55830Sstevel@tonic-gate  * Migrate any pages that have been marked for migration in the given range
55840Sstevel@tonic-gate  */
55850Sstevel@tonic-gate void
55860Sstevel@tonic-gate page_migrate(
55870Sstevel@tonic-gate 	struct seg	*seg,
55880Sstevel@tonic-gate 	caddr_t		addr,
55890Sstevel@tonic-gate 	page_t		**ppa,
55900Sstevel@tonic-gate 	pgcnt_t		npages)
55910Sstevel@tonic-gate {
55920Sstevel@tonic-gate 	lgrp_t		*from;
55930Sstevel@tonic-gate 	lgrp_t		*to;
55940Sstevel@tonic-gate 	page_t		*newpp;
55950Sstevel@tonic-gate 	page_t		*pp;
55960Sstevel@tonic-gate 	pfn_t		pfn;
55970Sstevel@tonic-gate 	size_t		pgsz;
55980Sstevel@tonic-gate 	spgcnt_t	page_cnt;
55990Sstevel@tonic-gate 	spgcnt_t	i;
56000Sstevel@tonic-gate 	uint_t		pszc;
56010Sstevel@tonic-gate 
56020Sstevel@tonic-gate 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
56030Sstevel@tonic-gate 
56040Sstevel@tonic-gate 	while (npages > 0) {
56050Sstevel@tonic-gate 		pp = *ppa;
56060Sstevel@tonic-gate 		pszc = pp->p_szc;
56070Sstevel@tonic-gate 		pgsz = page_get_pagesize(pszc);
56080Sstevel@tonic-gate 		page_cnt = btop(pgsz);
56090Sstevel@tonic-gate 
56100Sstevel@tonic-gate 		/*
56110Sstevel@tonic-gate 		 * Check to see whether this page is marked for migration
56120Sstevel@tonic-gate 		 *
56130Sstevel@tonic-gate 		 * Assume that root page of large page is marked for
56140Sstevel@tonic-gate 		 * migration and none of the other constituent pages
56150Sstevel@tonic-gate 		 * are marked.  This really simplifies clearing the
56160Sstevel@tonic-gate 		 * migrate bit by not having to clear it from each
56170Sstevel@tonic-gate 		 * constituent page.
56180Sstevel@tonic-gate 		 *
56190Sstevel@tonic-gate 		 * note we don't want to relocate an entire large page if
56200Sstevel@tonic-gate 		 * someone is only using one subpage.
56210Sstevel@tonic-gate 		 */
56220Sstevel@tonic-gate 		if (npages < page_cnt)
56230Sstevel@tonic-gate 			break;
56240Sstevel@tonic-gate 
56250Sstevel@tonic-gate 		/*
56260Sstevel@tonic-gate 		 * Is it marked for migration?
56270Sstevel@tonic-gate 		 */
56280Sstevel@tonic-gate 		if (!PP_ISMIGRATE(pp))
56290Sstevel@tonic-gate 			goto next;
56300Sstevel@tonic-gate 
56310Sstevel@tonic-gate 		/*
56320Sstevel@tonic-gate 		 * Determine lgroups that page is being migrated between
56330Sstevel@tonic-gate 		 */
56340Sstevel@tonic-gate 		pfn = page_pptonum(pp);
56350Sstevel@tonic-gate 		if (!IS_P2ALIGNED(pfn, page_cnt)) {
56360Sstevel@tonic-gate 			break;
56370Sstevel@tonic-gate 		}
56380Sstevel@tonic-gate 		from = lgrp_pfn_to_lgrp(pfn);
56390Sstevel@tonic-gate 		to = lgrp_mem_choose(seg, addr, pgsz);
56400Sstevel@tonic-gate 
56410Sstevel@tonic-gate 		/*
56420Sstevel@tonic-gate 		 * Check to see whether we are trying to migrate page to lgroup
56430Sstevel@tonic-gate 		 * where it is allocated already
56440Sstevel@tonic-gate 		 */
56450Sstevel@tonic-gate 		if (to == from) {
56460Sstevel@tonic-gate 			PP_CLRMIGRATE(pp);
56470Sstevel@tonic-gate 			goto next;
56480Sstevel@tonic-gate 		}
56490Sstevel@tonic-gate 
56500Sstevel@tonic-gate 		/*
56510Sstevel@tonic-gate 		 * Need to get exclusive lock's to migrate
56520Sstevel@tonic-gate 		 */
56530Sstevel@tonic-gate 		for (i = 0; i < page_cnt; i++) {
56540Sstevel@tonic-gate 			ASSERT(PAGE_LOCKED(ppa[i]));
56550Sstevel@tonic-gate 			if (page_pptonum(ppa[i]) != pfn + i ||
56560Sstevel@tonic-gate 			    ppa[i]->p_szc != pszc) {
56570Sstevel@tonic-gate 				break;
56580Sstevel@tonic-gate 			}
56590Sstevel@tonic-gate 			if (!page_tryupgrade(ppa[i])) {
56600Sstevel@tonic-gate 				lgrp_stat_add(from->lgrp_id,
56610Sstevel@tonic-gate 				    LGRP_PM_FAIL_LOCK_PGS,
56620Sstevel@tonic-gate 				    page_cnt);
56630Sstevel@tonic-gate 				break;
56640Sstevel@tonic-gate 			}
56650Sstevel@tonic-gate 		}
56660Sstevel@tonic-gate 		if (i != page_cnt) {
56670Sstevel@tonic-gate 			while (--i != -1) {
56680Sstevel@tonic-gate 				page_downgrade(ppa[i]);
56690Sstevel@tonic-gate 			}
56700Sstevel@tonic-gate 			goto next;
56710Sstevel@tonic-gate 		}
56720Sstevel@tonic-gate 
56730Sstevel@tonic-gate 		(void) page_create_wait(page_cnt, PG_WAIT);
56740Sstevel@tonic-gate 		newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
56750Sstevel@tonic-gate 		if (newpp == NULL) {
56760Sstevel@tonic-gate 			page_create_putback(page_cnt);
56770Sstevel@tonic-gate 			for (i = 0; i < page_cnt; i++) {
56780Sstevel@tonic-gate 				page_downgrade(ppa[i]);
56790Sstevel@tonic-gate 			}
56800Sstevel@tonic-gate 			lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS,
56810Sstevel@tonic-gate 			    page_cnt);
56820Sstevel@tonic-gate 			goto next;
56830Sstevel@tonic-gate 		}
56840Sstevel@tonic-gate 		ASSERT(newpp->p_szc == pszc);
56850Sstevel@tonic-gate 		/*
56860Sstevel@tonic-gate 		 * Clear migrate bit and relocate page
56870Sstevel@tonic-gate 		 */
56880Sstevel@tonic-gate 		PP_CLRMIGRATE(pp);
56890Sstevel@tonic-gate 		if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
56900Sstevel@tonic-gate 			panic("page_migrate: page_relocate failed");
56910Sstevel@tonic-gate 		}
56920Sstevel@tonic-gate 		ASSERT(page_cnt * PAGESIZE == pgsz);
56930Sstevel@tonic-gate 
56940Sstevel@tonic-gate 		/*
56950Sstevel@tonic-gate 		 * Keep stats for number of pages migrated from and to
56960Sstevel@tonic-gate 		 * each lgroup
56970Sstevel@tonic-gate 		 */
56980Sstevel@tonic-gate 		lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt);
56990Sstevel@tonic-gate 		lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt);
57000Sstevel@tonic-gate 		/*
57010Sstevel@tonic-gate 		 * update the page_t array we were passed in and
57020Sstevel@tonic-gate 		 * unlink constituent pages of a large page.
57030Sstevel@tonic-gate 		 */
57040Sstevel@tonic-gate 		for (i = 0; i < page_cnt; ++i, ++pp) {
57050Sstevel@tonic-gate 			ASSERT(PAGE_EXCL(newpp));
57060Sstevel@tonic-gate 			ASSERT(newpp->p_szc == pszc);
57070Sstevel@tonic-gate 			ppa[i] = newpp;
57080Sstevel@tonic-gate 			pp = newpp;
57090Sstevel@tonic-gate 			page_sub(&newpp, pp);
57100Sstevel@tonic-gate 			page_downgrade(pp);
57110Sstevel@tonic-gate 		}
57120Sstevel@tonic-gate 		ASSERT(newpp == NULL);
57130Sstevel@tonic-gate next:
57140Sstevel@tonic-gate 		addr += pgsz;
57150Sstevel@tonic-gate 		ppa += page_cnt;
57160Sstevel@tonic-gate 		npages -= page_cnt;
57170Sstevel@tonic-gate 	}
57180Sstevel@tonic-gate }
57190Sstevel@tonic-gate 
57200Sstevel@tonic-gate ulong_t mem_waiters 	= 0;
57210Sstevel@tonic-gate ulong_t	max_count 	= 20;
57220Sstevel@tonic-gate #define	MAX_DELAY	0x1ff
57230Sstevel@tonic-gate 
57240Sstevel@tonic-gate /*
57250Sstevel@tonic-gate  * Check if enough memory is available to proceed.
57260Sstevel@tonic-gate  * Depending on system configuration and how much memory is
57270Sstevel@tonic-gate  * reserved for swap we need to check against two variables.
57280Sstevel@tonic-gate  * e.g. on systems with little physical swap availrmem can be
57290Sstevel@tonic-gate  * more reliable indicator of how much memory is available.
57300Sstevel@tonic-gate  * On systems with large phys swap freemem can be better indicator.
57310Sstevel@tonic-gate  * If freemem drops below threshold level don't return an error
57320Sstevel@tonic-gate  * immediately but wake up pageout to free memory and block.
57330Sstevel@tonic-gate  * This is done number of times. If pageout is not able to free
57340Sstevel@tonic-gate  * memory within certain time return an error.
57350Sstevel@tonic-gate  * The same applies for availrmem but kmem_reap is used to
57360Sstevel@tonic-gate  * free memory.
57370Sstevel@tonic-gate  */
57380Sstevel@tonic-gate int
57390Sstevel@tonic-gate page_mem_avail(pgcnt_t npages)
57400Sstevel@tonic-gate {
57410Sstevel@tonic-gate 	ulong_t count;
57420Sstevel@tonic-gate 
57430Sstevel@tonic-gate #if defined(__i386)
57440Sstevel@tonic-gate 	if (freemem > desfree + npages &&
57450Sstevel@tonic-gate 	    availrmem > swapfs_reserve + npages &&
57460Sstevel@tonic-gate 	    btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem +
57470Sstevel@tonic-gate 	    npages)
57480Sstevel@tonic-gate 		return (1);
57490Sstevel@tonic-gate #else
57500Sstevel@tonic-gate 	if (freemem > desfree + npages &&
57510Sstevel@tonic-gate 	    availrmem > swapfs_reserve + npages)
57520Sstevel@tonic-gate 		return (1);
57530Sstevel@tonic-gate #endif
57540Sstevel@tonic-gate 
57550Sstevel@tonic-gate 	count = max_count;
57560Sstevel@tonic-gate 	atomic_add_long(&mem_waiters, 1);
57570Sstevel@tonic-gate 
57580Sstevel@tonic-gate 	while (freemem < desfree + npages && --count) {
57590Sstevel@tonic-gate 		cv_signal(&proc_pageout->p_cv);
57600Sstevel@tonic-gate 		if (delay_sig(hz + (mem_waiters & MAX_DELAY))) {
57610Sstevel@tonic-gate 			atomic_add_long(&mem_waiters, -1);
57620Sstevel@tonic-gate 			return (0);
57630Sstevel@tonic-gate 		}
57640Sstevel@tonic-gate 	}
57650Sstevel@tonic-gate 	if (count == 0) {
57660Sstevel@tonic-gate 		atomic_add_long(&mem_waiters, -1);
57670Sstevel@tonic-gate 		return (0);
57680Sstevel@tonic-gate 	}
57690Sstevel@tonic-gate 
57700Sstevel@tonic-gate 	count = max_count;
57710Sstevel@tonic-gate 	while (availrmem < swapfs_reserve + npages && --count) {
57720Sstevel@tonic-gate 		kmem_reap();
57730Sstevel@tonic-gate 		if (delay_sig(hz + (mem_waiters & MAX_DELAY))) {
57740Sstevel@tonic-gate 			atomic_add_long(&mem_waiters, -1);
57750Sstevel@tonic-gate 			return (0);
57760Sstevel@tonic-gate 		}
57770Sstevel@tonic-gate 	}
57780Sstevel@tonic-gate 	atomic_add_long(&mem_waiters, -1);
57790Sstevel@tonic-gate 	if (count == 0)
57800Sstevel@tonic-gate 		return (0);
57810Sstevel@tonic-gate 
57820Sstevel@tonic-gate #if defined(__i386)
57830Sstevel@tonic-gate 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
57840Sstevel@tonic-gate 	    tune.t_minarmem + npages)
57850Sstevel@tonic-gate 		return (0);
57860Sstevel@tonic-gate #endif
57870Sstevel@tonic-gate 	return (1);
57880Sstevel@tonic-gate }
57890Sstevel@tonic-gate 
57900Sstevel@tonic-gate 
57910Sstevel@tonic-gate /*
57920Sstevel@tonic-gate  * Search the memory segments to locate the desired page.  Within a
57930Sstevel@tonic-gate  * segment, pages increase linearly with one page structure per
57940Sstevel@tonic-gate  * physical page frame (size PAGESIZE).  The search begins
57950Sstevel@tonic-gate  * with the segment that was accessed last, to take advantage of locality.
57960Sstevel@tonic-gate  * If the hint misses, we start from the beginning of the sorted memseg list
57970Sstevel@tonic-gate  */
57980Sstevel@tonic-gate 
57990Sstevel@tonic-gate 
58000Sstevel@tonic-gate /*
58010Sstevel@tonic-gate  * Some data structures for pfn to pp lookup.
58020Sstevel@tonic-gate  */
58030Sstevel@tonic-gate ulong_t mhash_per_slot;
58040Sstevel@tonic-gate struct memseg *memseg_hash[N_MEM_SLOTS];
58050Sstevel@tonic-gate 
58060Sstevel@tonic-gate page_t *
58070Sstevel@tonic-gate page_numtopp_nolock(pfn_t pfnum)
58080Sstevel@tonic-gate {
58090Sstevel@tonic-gate 	struct memseg *seg;
58100Sstevel@tonic-gate 	page_t *pp;
5811414Skchow 	vm_cpu_data_t *vc = CPU->cpu_vm_data;
5812414Skchow 
5813414Skchow 	ASSERT(vc != NULL);
58140Sstevel@tonic-gate 
58150Sstevel@tonic-gate 	MEMSEG_STAT_INCR(nsearch);
58160Sstevel@tonic-gate 
58170Sstevel@tonic-gate 	/* Try last winner first */
5818414Skchow 	if (((seg = vc->vc_pnum_memseg) != NULL) &&
58190Sstevel@tonic-gate 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58200Sstevel@tonic-gate 		MEMSEG_STAT_INCR(nlastwon);
58210Sstevel@tonic-gate 		pp = seg->pages + (pfnum - seg->pages_base);
58220Sstevel@tonic-gate 		if (pp->p_pagenum == pfnum)
58230Sstevel@tonic-gate 			return ((page_t *)pp);
58240Sstevel@tonic-gate 	}
58250Sstevel@tonic-gate 
58260Sstevel@tonic-gate 	/* Else Try hash */
58270Sstevel@tonic-gate 	if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
58280Sstevel@tonic-gate 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58290Sstevel@tonic-gate 		MEMSEG_STAT_INCR(nhashwon);
5830414Skchow 		vc->vc_pnum_memseg = seg;
58310Sstevel@tonic-gate 		pp = seg->pages + (pfnum - seg->pages_base);
58320Sstevel@tonic-gate 		if (pp->p_pagenum == pfnum)
58330Sstevel@tonic-gate 			return ((page_t *)pp);
58340Sstevel@tonic-gate 	}
58350Sstevel@tonic-gate 
58360Sstevel@tonic-gate 	/* Else Brute force */
58370Sstevel@tonic-gate 	for (seg = memsegs; seg != NULL; seg = seg->next) {
58380Sstevel@tonic-gate 		if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5839414Skchow 			vc->vc_pnum_memseg = seg;
58400Sstevel@tonic-gate 			pp = seg->pages + (pfnum - seg->pages_base);
58410Sstevel@tonic-gate 			return ((page_t *)pp);
58420Sstevel@tonic-gate 		}
58430Sstevel@tonic-gate 	}
5844414Skchow 	vc->vc_pnum_memseg = NULL;
58450Sstevel@tonic-gate 	MEMSEG_STAT_INCR(nnotfound);
58460Sstevel@tonic-gate 	return ((page_t *)NULL);
58470Sstevel@tonic-gate 
58480Sstevel@tonic-gate }
58490Sstevel@tonic-gate 
58500Sstevel@tonic-gate struct memseg *
58510Sstevel@tonic-gate page_numtomemseg_nolock(pfn_t pfnum)
58520Sstevel@tonic-gate {
58530Sstevel@tonic-gate 	struct memseg *seg;
58540Sstevel@tonic-gate 	page_t *pp;
58550Sstevel@tonic-gate 
58560Sstevel@tonic-gate 	/* Try hash */
58570Sstevel@tonic-gate 	if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
58580Sstevel@tonic-gate 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58590Sstevel@tonic-gate 		pp = seg->pages + (pfnum - seg->pages_base);
58600Sstevel@tonic-gate 		if (pp->p_pagenum == pfnum)
58610Sstevel@tonic-gate 			return (seg);
58620Sstevel@tonic-gate 	}
58630Sstevel@tonic-gate 
58640Sstevel@tonic-gate 	/* Else Brute force */
58650Sstevel@tonic-gate 	for (seg = memsegs; seg != NULL; seg = seg->next) {
58660Sstevel@tonic-gate 		if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
58670Sstevel@tonic-gate 			return (seg);
58680Sstevel@tonic-gate 		}
58690Sstevel@tonic-gate 	}
58700Sstevel@tonic-gate 	return ((struct memseg *)NULL);
58710Sstevel@tonic-gate }
58720Sstevel@tonic-gate 
58730Sstevel@tonic-gate /*
58740Sstevel@tonic-gate  * Given a page and a count return the page struct that is
58750Sstevel@tonic-gate  * n structs away from the current one in the global page
58760Sstevel@tonic-gate  * list.
58770Sstevel@tonic-gate  *
58780Sstevel@tonic-gate  * This function wraps to the first page upon
58790Sstevel@tonic-gate  * reaching the end of the memseg list.
58800Sstevel@tonic-gate  */
58810Sstevel@tonic-gate page_t *
58820Sstevel@tonic-gate page_nextn(page_t *pp, ulong_t n)
58830Sstevel@tonic-gate {
58840Sstevel@tonic-gate 	struct memseg *seg;
58850Sstevel@tonic-gate 	page_t *ppn;
5886414Skchow 	vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
5887414Skchow 
5888414Skchow 	ASSERT(vc != NULL);
5889414Skchow 
5890414Skchow 	if (((seg = vc->vc_pnext_memseg) == NULL) ||
58910Sstevel@tonic-gate 	    (seg->pages_base == seg->pages_end) ||
58920Sstevel@tonic-gate 	    !(pp >= seg->pages && pp < seg->epages)) {
58930Sstevel@tonic-gate 
58940Sstevel@tonic-gate 		for (seg = memsegs; seg; seg = seg->next) {
58950Sstevel@tonic-gate 			if (pp >= seg->pages && pp < seg->epages)
58960Sstevel@tonic-gate 				break;
58970Sstevel@tonic-gate 		}
58980Sstevel@tonic-gate 
58990Sstevel@tonic-gate 		if (seg == NULL) {
59000Sstevel@tonic-gate 			/* Memory delete got in, return something valid. */
59010Sstevel@tonic-gate 			/* TODO: fix me. */
59020Sstevel@tonic-gate 			seg = memsegs;
59030Sstevel@tonic-gate 			pp = seg->pages;
59040Sstevel@tonic-gate 		}
59050Sstevel@tonic-gate 	}
59060Sstevel@tonic-gate 
59070Sstevel@tonic-gate 	/* check for wraparound - possible if n is large */
59080Sstevel@tonic-gate 	while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
59090Sstevel@tonic-gate 		n -= seg->epages - pp;
59100Sstevel@tonic-gate 		seg = seg->next;
59110Sstevel@tonic-gate 		if (seg == NULL)
59120Sstevel@tonic-gate 			seg = memsegs;
59130Sstevel@tonic-gate 		pp = seg->pages;
59140Sstevel@tonic-gate 	}
5915414Skchow 	vc->vc_pnext_memseg = seg;
59160Sstevel@tonic-gate 	return (ppn);
59170Sstevel@tonic-gate }
59180Sstevel@tonic-gate 
59190Sstevel@tonic-gate /*
59200Sstevel@tonic-gate  * Initialize for a loop using page_next_scan_large().
59210Sstevel@tonic-gate  */
59220Sstevel@tonic-gate page_t *
59230Sstevel@tonic-gate page_next_scan_init(void **cookie)
59240Sstevel@tonic-gate {
59250Sstevel@tonic-gate 	ASSERT(cookie != NULL);
59260Sstevel@tonic-gate 	*cookie = (void *)memsegs;
59270Sstevel@tonic-gate 	return ((page_t *)memsegs->pages);
59280Sstevel@tonic-gate }
59290Sstevel@tonic-gate 
59300Sstevel@tonic-gate /*
59310Sstevel@tonic-gate  * Return the next page in a scan of page_t's, assuming we want
59320Sstevel@tonic-gate  * to skip over sub-pages within larger page sizes.
59330Sstevel@tonic-gate  *
59340Sstevel@tonic-gate  * The cookie is used to keep track of the current memseg.
59350Sstevel@tonic-gate  */
59360Sstevel@tonic-gate page_t *
59370Sstevel@tonic-gate page_next_scan_large(
59380Sstevel@tonic-gate 	page_t		*pp,
59390Sstevel@tonic-gate 	ulong_t		*n,
59400Sstevel@tonic-gate 	void		**cookie)
59410Sstevel@tonic-gate {
59420Sstevel@tonic-gate 	struct memseg	*seg = (struct memseg *)*cookie;
59430Sstevel@tonic-gate 	page_t		*new_pp;
59440Sstevel@tonic-gate 	ulong_t		cnt;
59450Sstevel@tonic-gate 	pfn_t		pfn;
59460Sstevel@tonic-gate 
59470Sstevel@tonic-gate 
59480Sstevel@tonic-gate 	/*
59490Sstevel@tonic-gate 	 * get the count of page_t's to skip based on the page size
59500Sstevel@tonic-gate 	 */
59510Sstevel@tonic-gate 	ASSERT(pp != NULL);
59520Sstevel@tonic-gate 	if (pp->p_szc == 0) {
59530Sstevel@tonic-gate 		cnt = 1;
59540Sstevel@tonic-gate 	} else {
59550Sstevel@tonic-gate 		pfn = page_pptonum(pp);
59560Sstevel@tonic-gate 		cnt = page_get_pagecnt(pp->p_szc);
59570Sstevel@tonic-gate 		cnt -= pfn & (cnt - 1);
59580Sstevel@tonic-gate 	}
59590Sstevel@tonic-gate 	*n += cnt;
59600Sstevel@tonic-gate 	new_pp = pp + cnt;
59610Sstevel@tonic-gate 
59620Sstevel@tonic-gate 	/*
59630Sstevel@tonic-gate 	 * Catch if we went past the end of the current memory segment. If so,
59640Sstevel@tonic-gate 	 * just move to the next segment with pages.
59650Sstevel@tonic-gate 	 */
59660Sstevel@tonic-gate 	if (new_pp >= seg->epages) {
59670Sstevel@tonic-gate 		do {
59680Sstevel@tonic-gate 			seg = seg->next;
59690Sstevel@tonic-gate 			if (seg == NULL)
59700Sstevel@tonic-gate 				seg = memsegs;
59710Sstevel@tonic-gate 		} while (seg->pages == seg->epages);
59720Sstevel@tonic-gate 		new_pp = seg->pages;
59730Sstevel@tonic-gate 		*cookie = (void *)seg;
59740Sstevel@tonic-gate 	}
59750Sstevel@tonic-gate 
59760Sstevel@tonic-gate 	return (new_pp);
59770Sstevel@tonic-gate }
59780Sstevel@tonic-gate 
59790Sstevel@tonic-gate 
59800Sstevel@tonic-gate /*
59810Sstevel@tonic-gate  * Returns next page in list. Note: this function wraps
59820Sstevel@tonic-gate  * to the first page in the list upon reaching the end
59830Sstevel@tonic-gate  * of the list. Callers should be aware of this fact.
59840Sstevel@tonic-gate  */
59850Sstevel@tonic-gate 
59860Sstevel@tonic-gate /* We should change this be a #define */
59870Sstevel@tonic-gate 
59880Sstevel@tonic-gate page_t *
59890Sstevel@tonic-gate page_next(page_t *pp)
59900Sstevel@tonic-gate {
59910Sstevel@tonic-gate 	return (page_nextn(pp, 1));
59920Sstevel@tonic-gate }
59930Sstevel@tonic-gate 
59940Sstevel@tonic-gate page_t *
59950Sstevel@tonic-gate page_first()
59960Sstevel@tonic-gate {
59970Sstevel@tonic-gate 	return ((page_t *)memsegs->pages);
59980Sstevel@tonic-gate }
59990Sstevel@tonic-gate 
60000Sstevel@tonic-gate 
60010Sstevel@tonic-gate /*
60020Sstevel@tonic-gate  * This routine is called at boot with the initial memory configuration
60030Sstevel@tonic-gate  * and when memory is added or removed.
60040Sstevel@tonic-gate  */
60050Sstevel@tonic-gate void
60060Sstevel@tonic-gate build_pfn_hash()
60070Sstevel@tonic-gate {
60080Sstevel@tonic-gate 	pfn_t cur;
60090Sstevel@tonic-gate 	pgcnt_t index;
60100Sstevel@tonic-gate 	struct memseg *pseg;
60110Sstevel@tonic-gate 	int	i;
60120Sstevel@tonic-gate 
60130Sstevel@tonic-gate 	/*
60140Sstevel@tonic-gate 	 * Clear memseg_hash array.
60150Sstevel@tonic-gate 	 * Since memory add/delete is designed to operate concurrently
60160Sstevel@tonic-gate 	 * with normal operation, the hash rebuild must be able to run
60170Sstevel@tonic-gate 	 * concurrently with page_numtopp_nolock(). To support this
60180Sstevel@tonic-gate 	 * functionality, assignments to memseg_hash array members must
60190Sstevel@tonic-gate 	 * be done atomically.
60200Sstevel@tonic-gate 	 *
60210Sstevel@tonic-gate 	 * NOTE: bzero() does not currently guarantee this for kernel
60220Sstevel@tonic-gate 	 * threads, and cannot be used here.
60230Sstevel@tonic-gate 	 */
60240Sstevel@tonic-gate 	for (i = 0; i < N_MEM_SLOTS; i++)
60250Sstevel@tonic-gate 		memseg_hash[i] = NULL;
60260Sstevel@tonic-gate 
60270Sstevel@tonic-gate 	hat_kpm_mseghash_clear(N_MEM_SLOTS);
60280Sstevel@tonic-gate 
60290Sstevel@tonic-gate 	/*
60300Sstevel@tonic-gate 	 * Physmax is the last valid pfn.
60310Sstevel@tonic-gate 	 */
60320Sstevel@tonic-gate 	mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT;
60330Sstevel@tonic-gate 	for (pseg = memsegs; pseg != NULL; pseg = pseg->next) {
60340Sstevel@tonic-gate 		index = MEMSEG_PFN_HASH(pseg->pages_base);
60350Sstevel@tonic-gate 		cur = pseg->pages_base;
60360Sstevel@tonic-gate 		do {
60370Sstevel@tonic-gate 			if (index >= N_MEM_SLOTS)
60380Sstevel@tonic-gate 				index = MEMSEG_PFN_HASH(cur);
60390Sstevel@tonic-gate 
60400Sstevel@tonic-gate 			if (memseg_hash[index] == NULL ||
60410Sstevel@tonic-gate 			    memseg_hash[index]->pages_base > pseg->pages_base) {
60420Sstevel@tonic-gate 				memseg_hash[index] = pseg;
60430Sstevel@tonic-gate 				hat_kpm_mseghash_update(index, pseg);
60440Sstevel@tonic-gate 			}
60450Sstevel@tonic-gate 			cur += mhash_per_slot;
60460Sstevel@tonic-gate 			index++;
60470Sstevel@tonic-gate 		} while (cur < pseg->pages_end);
60480Sstevel@tonic-gate 	}
60490Sstevel@tonic-gate }
60500Sstevel@tonic-gate 
60510Sstevel@tonic-gate /*
60520Sstevel@tonic-gate  * Return the pagenum for the pp
60530Sstevel@tonic-gate  */
60540Sstevel@tonic-gate pfn_t
60550Sstevel@tonic-gate page_pptonum(page_t *pp)
60560Sstevel@tonic-gate {
60570Sstevel@tonic-gate 	return (pp->p_pagenum);
60580Sstevel@tonic-gate }
60590Sstevel@tonic-gate 
60600Sstevel@tonic-gate /*
60610Sstevel@tonic-gate  * interface to the referenced and modified etc bits
60620Sstevel@tonic-gate  * in the PSM part of the page struct
60630Sstevel@tonic-gate  * when no locking is desired.
60640Sstevel@tonic-gate  */
60650Sstevel@tonic-gate void
60660Sstevel@tonic-gate page_set_props(page_t *pp, uint_t flags)
60670Sstevel@tonic-gate {
60680Sstevel@tonic-gate 	ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0);
60690Sstevel@tonic-gate 	pp->p_nrm |= (uchar_t)flags;
60700Sstevel@tonic-gate }
60710Sstevel@tonic-gate 
60720Sstevel@tonic-gate void
60730Sstevel@tonic-gate page_clr_all_props(page_t *pp)
60740Sstevel@tonic-gate {
60750Sstevel@tonic-gate 	pp->p_nrm = 0;
60760Sstevel@tonic-gate }
60770Sstevel@tonic-gate 
60780Sstevel@tonic-gate /*
6079917Selowe  * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
6080917Selowe  */
6081917Selowe int
6082917Selowe page_clear_lck_cow(page_t *pp, int adjust)
6083917Selowe {
6084917Selowe 	int	f_amount;
6085917Selowe 
6086917Selowe 	ASSERT(PAGE_EXCL(pp));
6087917Selowe 
6088917Selowe 	/*
6089917Selowe 	 * The page_struct_lock need not be acquired here since
6090917Selowe 	 * we require the caller hold the page exclusively locked.
6091917Selowe 	 */
6092917Selowe 	f_amount = 0;
6093917Selowe 	if (pp->p_lckcnt) {
6094917Selowe 		f_amount = 1;
6095917Selowe 		pp->p_lckcnt = 0;
6096917Selowe 	}
6097917Selowe 	if (pp->p_cowcnt) {
6098917Selowe 		f_amount += pp->p_cowcnt;
6099917Selowe 		pp->p_cowcnt = 0;
6100917Selowe 	}
6101917Selowe 
6102917Selowe 	if (adjust && f_amount) {
6103917Selowe 		mutex_enter(&freemem_lock);
6104917Selowe 		availrmem += f_amount;
6105917Selowe 		mutex_exit(&freemem_lock);
6106917Selowe 	}
6107917Selowe 
6108917Selowe 	return (f_amount);
6109917Selowe }
6110917Selowe 
6111917Selowe /*
61120Sstevel@tonic-gate  * The following functions is called from free_vp_pages()
61130Sstevel@tonic-gate  * for an inexact estimate of a newly free'd page...
61140Sstevel@tonic-gate  */
61150Sstevel@tonic-gate ulong_t
61160Sstevel@tonic-gate page_share_cnt(page_t *pp)
61170Sstevel@tonic-gate {
61180Sstevel@tonic-gate 	return (hat_page_getshare(pp));
61190Sstevel@tonic-gate }
61200Sstevel@tonic-gate 
61210Sstevel@tonic-gate int
61220Sstevel@tonic-gate page_isshared(page_t *pp)
61230Sstevel@tonic-gate {
61240Sstevel@tonic-gate 	return (hat_page_getshare(pp) > 1);
61250Sstevel@tonic-gate }
61260Sstevel@tonic-gate 
61270Sstevel@tonic-gate int
61280Sstevel@tonic-gate page_isfree(page_t *pp)
61290Sstevel@tonic-gate {
61300Sstevel@tonic-gate 	return (PP_ISFREE(pp));
61310Sstevel@tonic-gate }
61320Sstevel@tonic-gate 
61330Sstevel@tonic-gate int
61340Sstevel@tonic-gate page_isref(page_t *pp)
61350Sstevel@tonic-gate {
61360Sstevel@tonic-gate 	return (hat_page_getattr(pp, P_REF));
61370Sstevel@tonic-gate }
61380Sstevel@tonic-gate 
61390Sstevel@tonic-gate int
61400Sstevel@tonic-gate page_ismod(page_t *pp)
61410Sstevel@tonic-gate {
61420Sstevel@tonic-gate 	return (hat_page_getattr(pp, P_MOD));
61430Sstevel@tonic-gate }
6144