xref: /onnv-gate/usr/src/uts/common/vm/vm_anon.c (revision 12173)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5925Scwb  * Common Development and Distribution License (the "License").
6925Scwb  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2212093SDavid.Valin@Sun.COM  * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
260Sstevel@tonic-gate /*	  All Rights Reserved  	*/
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * University Copyright- Copyright (c) 1982, 1986, 1988
300Sstevel@tonic-gate  * The Regents of the University of California
310Sstevel@tonic-gate  * All Rights Reserved
320Sstevel@tonic-gate  *
330Sstevel@tonic-gate  * University Acknowledgment- Portions of this document are derived from
340Sstevel@tonic-gate  * software developed by the University of California, Berkeley, and its
350Sstevel@tonic-gate  * contributors.
360Sstevel@tonic-gate  */
370Sstevel@tonic-gate 
380Sstevel@tonic-gate /*
390Sstevel@tonic-gate  * VM - anonymous pages.
400Sstevel@tonic-gate  *
410Sstevel@tonic-gate  * This layer sits immediately above the vm_swap layer.  It manages
420Sstevel@tonic-gate  * physical pages that have no permanent identity in the file system
430Sstevel@tonic-gate  * name space, using the services of the vm_swap layer to allocate
440Sstevel@tonic-gate  * backing storage for these pages.  Since these pages have no external
450Sstevel@tonic-gate  * identity, they are discarded when the last reference is removed.
460Sstevel@tonic-gate  *
470Sstevel@tonic-gate  * An important function of this layer is to manage low-level sharing
480Sstevel@tonic-gate  * of pages that are logically distinct but that happen to be
490Sstevel@tonic-gate  * physically identical (e.g., the corresponding pages of the processes
500Sstevel@tonic-gate  * resulting from a fork before one process or the other changes their
510Sstevel@tonic-gate  * contents).  This pseudo-sharing is present only as an optimization
520Sstevel@tonic-gate  * and is not to be confused with true sharing in which multiple
530Sstevel@tonic-gate  * address spaces deliberately contain references to the same object;
540Sstevel@tonic-gate  * such sharing is managed at a higher level.
550Sstevel@tonic-gate  *
560Sstevel@tonic-gate  * The key data structure here is the anon struct, which contains a
570Sstevel@tonic-gate  * reference count for its associated physical page and a hint about
580Sstevel@tonic-gate  * the identity of that page.  Anon structs typically live in arrays,
590Sstevel@tonic-gate  * with an instance's position in its array determining where the
600Sstevel@tonic-gate  * corresponding backing storage is allocated; however, the swap_xlate()
610Sstevel@tonic-gate  * routine abstracts away this representation information so that the
620Sstevel@tonic-gate  * rest of the anon layer need not know it.  (See the swap layer for
630Sstevel@tonic-gate  * more details on anon struct layout.)
640Sstevel@tonic-gate  *
650Sstevel@tonic-gate  * In the future versions of the system, the association between an
660Sstevel@tonic-gate  * anon struct and its position on backing store will change so that
670Sstevel@tonic-gate  * we don't require backing store all anonymous pages in the system.
680Sstevel@tonic-gate  * This is important for consideration for large memory systems.
690Sstevel@tonic-gate  * We can also use this technique to delay binding physical locations
700Sstevel@tonic-gate  * to anonymous pages until pageout/swapout time where we can make
710Sstevel@tonic-gate  * smarter allocation decisions to improve anonymous klustering.
720Sstevel@tonic-gate  *
730Sstevel@tonic-gate  * Many of the routines defined here take a (struct anon **) argument,
740Sstevel@tonic-gate  * which allows the code at this level to manage anon pages directly,
750Sstevel@tonic-gate  * so that callers can regard anon structs as opaque objects and not be
760Sstevel@tonic-gate  * concerned with assigning or inspecting their contents.
770Sstevel@tonic-gate  *
780Sstevel@tonic-gate  * Clients of this layer refer to anon pages indirectly.  That is, they
790Sstevel@tonic-gate  * maintain arrays of pointers to anon structs rather than maintaining
800Sstevel@tonic-gate  * anon structs themselves.  The (struct anon **) arguments mentioned
810Sstevel@tonic-gate  * above are pointers to entries in these arrays.  It is these arrays
820Sstevel@tonic-gate  * that capture the mapping between offsets within a given segment and
830Sstevel@tonic-gate  * the corresponding anonymous backing storage address.
840Sstevel@tonic-gate  */
850Sstevel@tonic-gate 
860Sstevel@tonic-gate #ifdef DEBUG
870Sstevel@tonic-gate #define	ANON_DEBUG
880Sstevel@tonic-gate #endif
890Sstevel@tonic-gate 
900Sstevel@tonic-gate #include <sys/types.h>
910Sstevel@tonic-gate #include <sys/t_lock.h>
920Sstevel@tonic-gate #include <sys/param.h>
930Sstevel@tonic-gate #include <sys/systm.h>
940Sstevel@tonic-gate #include <sys/mman.h>
950Sstevel@tonic-gate #include <sys/cred.h>
960Sstevel@tonic-gate #include <sys/thread.h>
970Sstevel@tonic-gate #include <sys/vnode.h>
980Sstevel@tonic-gate #include <sys/cpuvar.h>
990Sstevel@tonic-gate #include <sys/swap.h>
1000Sstevel@tonic-gate #include <sys/cmn_err.h>
1010Sstevel@tonic-gate #include <sys/vtrace.h>
1020Sstevel@tonic-gate #include <sys/kmem.h>
1030Sstevel@tonic-gate #include <sys/sysmacros.h>
1040Sstevel@tonic-gate #include <sys/bitmap.h>
1050Sstevel@tonic-gate #include <sys/vmsystm.h>
1066695Saguzovsk #include <sys/tuneable.h>
1070Sstevel@tonic-gate #include <sys/debug.h>
108749Ssusans #include <sys/fs/swapnode.h>
1090Sstevel@tonic-gate #include <sys/tnf_probe.h>
1100Sstevel@tonic-gate #include <sys/lgrp.h>
1110Sstevel@tonic-gate #include <sys/policy.h>
1120Sstevel@tonic-gate #include <sys/condvar_impl.h>
1130Sstevel@tonic-gate #include <sys/mutex_impl.h>
1143247Sgjelinek #include <sys/rctl.h>
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate #include <vm/as.h>
1170Sstevel@tonic-gate #include <vm/hat.h>
1180Sstevel@tonic-gate #include <vm/anon.h>
1190Sstevel@tonic-gate #include <vm/page.h>
1200Sstevel@tonic-gate #include <vm/vpage.h>
1210Sstevel@tonic-gate #include <vm/seg.h>
1220Sstevel@tonic-gate #include <vm/rm.h>
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate #include <fs/fs_subr.h>
1250Sstevel@tonic-gate 
126749Ssusans struct vnode *anon_vp;
127749Ssusans 
1280Sstevel@tonic-gate int anon_debug;
1290Sstevel@tonic-gate 
1300Sstevel@tonic-gate kmutex_t	anoninfo_lock;
1310Sstevel@tonic-gate struct		k_anoninfo k_anoninfo;
1320Sstevel@tonic-gate ani_free_t	ani_free_pool[ANI_MAX_POOL];
1330Sstevel@tonic-gate pad_mutex_t	anon_array_lock[ANON_LOCKSIZE];
1340Sstevel@tonic-gate kcondvar_t	anon_array_cv[ANON_LOCKSIZE];
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate /*
1370Sstevel@tonic-gate  * Global hash table for (vp, off) -> anon slot
1380Sstevel@tonic-gate  */
1390Sstevel@tonic-gate extern	int swap_maxcontig;
1400Sstevel@tonic-gate size_t	anon_hash_size;
1410Sstevel@tonic-gate struct anon **anon_hash;
1420Sstevel@tonic-gate 
1430Sstevel@tonic-gate static struct kmem_cache *anon_cache;
1440Sstevel@tonic-gate static struct kmem_cache *anonmap_cache;
1450Sstevel@tonic-gate 
146*12173SMichael.Corcoran@Sun.COM pad_mutex_t	*anonhash_lock;
147*12173SMichael.Corcoran@Sun.COM 
148*12173SMichael.Corcoran@Sun.COM /*
149*12173SMichael.Corcoran@Sun.COM  * Used to make the increment of all refcnts of all anon slots of a large
150*12173SMichael.Corcoran@Sun.COM  * page appear to be atomic.  The lock is grabbed for the first anon slot of
151*12173SMichael.Corcoran@Sun.COM  * a large page.
152*12173SMichael.Corcoran@Sun.COM  */
153*12173SMichael.Corcoran@Sun.COM pad_mutex_t	*anonpages_hash_lock;
154*12173SMichael.Corcoran@Sun.COM 
155*12173SMichael.Corcoran@Sun.COM #define	APH_MUTEX(vp, off)				\
156*12173SMichael.Corcoran@Sun.COM 	(&anonpages_hash_lock[(ANON_HASH((vp), (off)) &	\
157*12173SMichael.Corcoran@Sun.COM 	    (AH_LOCK_SIZE - 1))].pad_mutex)
158*12173SMichael.Corcoran@Sun.COM 
1590Sstevel@tonic-gate #ifdef VM_STATS
1600Sstevel@tonic-gate static struct anonvmstats_str {
1610Sstevel@tonic-gate 	ulong_t getpages[30];
1620Sstevel@tonic-gate 	ulong_t privatepages[10];
1630Sstevel@tonic-gate 	ulong_t demotepages[9];
1640Sstevel@tonic-gate 	ulong_t decrefpages[9];
1650Sstevel@tonic-gate 	ulong_t	dupfillholes[4];
1660Sstevel@tonic-gate 	ulong_t freepages[1];
1670Sstevel@tonic-gate } anonvmstats;
1680Sstevel@tonic-gate #endif /* VM_STATS */
1690Sstevel@tonic-gate 
1700Sstevel@tonic-gate /*ARGSUSED*/
1710Sstevel@tonic-gate static int
1720Sstevel@tonic-gate anonmap_cache_constructor(void *buf, void *cdrarg, int kmflags)
1730Sstevel@tonic-gate {
1740Sstevel@tonic-gate 	struct anon_map *amp = buf;
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	rw_init(&amp->a_rwlock, NULL, RW_DEFAULT, NULL);
1776695Saguzovsk 	cv_init(&amp->a_purgecv, NULL, CV_DEFAULT, NULL);
1786695Saguzovsk 	mutex_init(&amp->a_pmtx, NULL, MUTEX_DEFAULT, NULL);
1796695Saguzovsk 	mutex_init(&amp->a_purgemtx, NULL, MUTEX_DEFAULT, NULL);
1800Sstevel@tonic-gate 	return (0);
1810Sstevel@tonic-gate }
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate /*ARGSUSED1*/
1840Sstevel@tonic-gate static void
1850Sstevel@tonic-gate anonmap_cache_destructor(void *buf, void *cdrarg)
1860Sstevel@tonic-gate {
1870Sstevel@tonic-gate 	struct anon_map *amp = buf;
1880Sstevel@tonic-gate 
1890Sstevel@tonic-gate 	rw_destroy(&amp->a_rwlock);
1906695Saguzovsk 	cv_destroy(&amp->a_purgecv);
1916695Saguzovsk 	mutex_destroy(&amp->a_pmtx);
1926695Saguzovsk 	mutex_destroy(&amp->a_purgemtx);
1930Sstevel@tonic-gate }
1940Sstevel@tonic-gate 
1950Sstevel@tonic-gate void
1960Sstevel@tonic-gate anon_init(void)
1970Sstevel@tonic-gate {
1980Sstevel@tonic-gate 	int i;
199*12173SMichael.Corcoran@Sun.COM 	pad_mutex_t *tmp;
200*12173SMichael.Corcoran@Sun.COM 
201*12173SMichael.Corcoran@Sun.COM 	/* These both need to be powers of 2 so round up to the next power */
202*12173SMichael.Corcoran@Sun.COM 	anon_hash_size = 1L << highbit((physmem / ANON_HASHAVELEN) - 1);
203*12173SMichael.Corcoran@Sun.COM 
204*12173SMichael.Corcoran@Sun.COM 	/*
205*12173SMichael.Corcoran@Sun.COM 	 * We need to align the anonhash_lock and anonpages_hash_lock arrays
206*12173SMichael.Corcoran@Sun.COM 	 * to a 64B boundary to avoid false sharing.  We add 63B to our
207*12173SMichael.Corcoran@Sun.COM 	 * allocation so that we can get a 64B aligned address to use.
208*12173SMichael.Corcoran@Sun.COM 	 * We allocate both of these together to avoid wasting an additional
209*12173SMichael.Corcoran@Sun.COM 	 * 63B.
210*12173SMichael.Corcoran@Sun.COM 	 */
211*12173SMichael.Corcoran@Sun.COM 	tmp = kmem_zalloc((2 * AH_LOCK_SIZE * sizeof (pad_mutex_t)) + 63,
212*12173SMichael.Corcoran@Sun.COM 	    KM_SLEEP);
213*12173SMichael.Corcoran@Sun.COM 	anonhash_lock = (pad_mutex_t *)P2ROUNDUP((uintptr_t)tmp, 64);
214*12173SMichael.Corcoran@Sun.COM 	anonpages_hash_lock = anonhash_lock + AH_LOCK_SIZE;
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate 	for (i = 0; i < AH_LOCK_SIZE; i++) {
217*12173SMichael.Corcoran@Sun.COM 		mutex_init(&anonhash_lock[i].pad_mutex, NULL, MUTEX_DEFAULT,
218*12173SMichael.Corcoran@Sun.COM 		    NULL);
219*12173SMichael.Corcoran@Sun.COM 		mutex_init(&anonpages_hash_lock[i].pad_mutex, NULL,
220*12173SMichael.Corcoran@Sun.COM 		    MUTEX_DEFAULT, NULL);
2210Sstevel@tonic-gate 	}
2220Sstevel@tonic-gate 
2230Sstevel@tonic-gate 	for (i = 0; i < ANON_LOCKSIZE; i++) {
2240Sstevel@tonic-gate 		mutex_init(&anon_array_lock[i].pad_mutex, NULL,
2255466Skchow 		    MUTEX_DEFAULT, NULL);
2260Sstevel@tonic-gate 		cv_init(&anon_array_cv[i], NULL, CV_DEFAULT, NULL);
2270Sstevel@tonic-gate 	}
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate 	anon_hash = (struct anon **)
2305466Skchow 	    kmem_zalloc(sizeof (struct anon *) * anon_hash_size, KM_SLEEP);
2310Sstevel@tonic-gate 	anon_cache = kmem_cache_create("anon_cache", sizeof (struct anon),
23212093SDavid.Valin@Sun.COM 	    AN_CACHE_ALIGN, NULL, NULL, NULL, NULL, NULL, KMC_PREFILL);
2330Sstevel@tonic-gate 	anonmap_cache = kmem_cache_create("anonmap_cache",
2345466Skchow 	    sizeof (struct anon_map), 0,
2355466Skchow 	    anonmap_cache_constructor, anonmap_cache_destructor, NULL,
2365466Skchow 	    NULL, NULL, 0);
2370Sstevel@tonic-gate 	swap_maxcontig = (1024 * 1024) >> PAGESHIFT;	/* 1MB of pages */
238749Ssusans 
239749Ssusans 	anon_vp = vn_alloc(KM_SLEEP);
240749Ssusans 	vn_setops(anon_vp, swap_vnodeops);
241749Ssusans 	anon_vp->v_type = VREG;
242749Ssusans 	anon_vp->v_flag |= (VISSWAP|VISSWAPFS);
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate  * Global anon slot hash table manipulation.
2470Sstevel@tonic-gate  */
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate static void
2500Sstevel@tonic-gate anon_addhash(struct anon *ap)
2510Sstevel@tonic-gate {
2520Sstevel@tonic-gate 	int index;
2530Sstevel@tonic-gate 
254*12173SMichael.Corcoran@Sun.COM 	ASSERT(MUTEX_HELD(AH_MUTEX(ap->an_vp, ap->an_off)));
2550Sstevel@tonic-gate 	index = ANON_HASH(ap->an_vp, ap->an_off);
2560Sstevel@tonic-gate 	ap->an_hash = anon_hash[index];
2570Sstevel@tonic-gate 	anon_hash[index] = ap;
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate static void
2610Sstevel@tonic-gate anon_rmhash(struct anon *ap)
2620Sstevel@tonic-gate {
2630Sstevel@tonic-gate 	struct anon **app;
2640Sstevel@tonic-gate 
265*12173SMichael.Corcoran@Sun.COM 	ASSERT(MUTEX_HELD(AH_MUTEX(ap->an_vp, ap->an_off)));
2660Sstevel@tonic-gate 
2670Sstevel@tonic-gate 	for (app = &anon_hash[ANON_HASH(ap->an_vp, ap->an_off)];
2680Sstevel@tonic-gate 	    *app; app = &((*app)->an_hash)) {
2690Sstevel@tonic-gate 		if (*app == ap) {
2700Sstevel@tonic-gate 			*app = ap->an_hash;
2710Sstevel@tonic-gate 			break;
2720Sstevel@tonic-gate 		}
2730Sstevel@tonic-gate 	}
2740Sstevel@tonic-gate }
2750Sstevel@tonic-gate 
2760Sstevel@tonic-gate /*
2770Sstevel@tonic-gate  * The anon array interfaces. Functions allocating,
2780Sstevel@tonic-gate  * freeing array of pointers, and returning/setting
2790Sstevel@tonic-gate  * entries in the array of pointers for a given offset.
2800Sstevel@tonic-gate  *
2810Sstevel@tonic-gate  * Create the list of pointers
2820Sstevel@tonic-gate  */
2830Sstevel@tonic-gate struct anon_hdr *
2840Sstevel@tonic-gate anon_create(pgcnt_t npages, int flags)
2850Sstevel@tonic-gate {
2860Sstevel@tonic-gate 	struct anon_hdr *ahp;
2870Sstevel@tonic-gate 	ulong_t nchunks;
2880Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate 	if ((ahp = kmem_zalloc(sizeof (struct anon_hdr), kmemflags)) == NULL) {
2910Sstevel@tonic-gate 		return (NULL);
2920Sstevel@tonic-gate 	}
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate 	mutex_init(&ahp->serial_lock, NULL, MUTEX_DEFAULT, NULL);
2950Sstevel@tonic-gate 	/*
2960Sstevel@tonic-gate 	 * Single level case.
2970Sstevel@tonic-gate 	 */
2980Sstevel@tonic-gate 	ahp->size = npages;
2990Sstevel@tonic-gate 	if (npages <= ANON_CHUNK_SIZE || (flags & ANON_ALLOC_FORCE)) {
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 		if (flags & ANON_ALLOC_FORCE)
3020Sstevel@tonic-gate 			ahp->flags |= ANON_ALLOC_FORCE;
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 		ahp->array_chunk = kmem_zalloc(
3050Sstevel@tonic-gate 		    ahp->size * sizeof (struct anon *), kmemflags);
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate 		if (ahp->array_chunk == NULL) {
3080Sstevel@tonic-gate 			kmem_free(ahp, sizeof (struct anon_hdr));
3090Sstevel@tonic-gate 			return (NULL);
3100Sstevel@tonic-gate 		}
3110Sstevel@tonic-gate 	} else {
3120Sstevel@tonic-gate 		/*
3130Sstevel@tonic-gate 		 * 2 Level case.
3141660Sudpa 		 * anon hdr size needs to be rounded off  to be a multiple
3151660Sudpa 		 * of ANON_CHUNK_SIZE. This is important as various anon
3161660Sudpa 		 * related functions depend on this.
3171660Sudpa 		 * NOTE -
3181660Sudpa 		 * anon_grow()  makes anon hdr size a multiple of
3191660Sudpa 		 * ANON_CHUNK_SIZE.
3201660Sudpa 		 * amp size is <= anon hdr size.
3211660Sudpa 		 * anon_index + seg_pgs <= anon hdr size.
3220Sstevel@tonic-gate 		 */
3231660Sudpa 		ahp->size = P2ROUNDUP(npages, ANON_CHUNK_SIZE);
3241660Sudpa 		nchunks = ahp->size >> ANON_CHUNK_SHIFT;
3250Sstevel@tonic-gate 
3260Sstevel@tonic-gate 		ahp->array_chunk = kmem_zalloc(nchunks * sizeof (ulong_t *),
3270Sstevel@tonic-gate 		    kmemflags);
3280Sstevel@tonic-gate 
3290Sstevel@tonic-gate 		if (ahp->array_chunk == NULL) {
3300Sstevel@tonic-gate 			kmem_free(ahp, sizeof (struct anon_hdr));
3310Sstevel@tonic-gate 			return (NULL);
3320Sstevel@tonic-gate 		}
3330Sstevel@tonic-gate 	}
3340Sstevel@tonic-gate 	return (ahp);
3350Sstevel@tonic-gate }
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate /*
3380Sstevel@tonic-gate  * Free the array of pointers
3390Sstevel@tonic-gate  */
3400Sstevel@tonic-gate void
3410Sstevel@tonic-gate anon_release(struct anon_hdr *ahp, pgcnt_t npages)
3420Sstevel@tonic-gate {
3430Sstevel@tonic-gate 	ulong_t i;
3440Sstevel@tonic-gate 	void **ppp;
3450Sstevel@tonic-gate 	ulong_t nchunks;
3460Sstevel@tonic-gate 
3471660Sudpa 	ASSERT(npages <= ahp->size);
3480Sstevel@tonic-gate 
3490Sstevel@tonic-gate 	/*
3500Sstevel@tonic-gate 	 * Single level case.
3510Sstevel@tonic-gate 	 */
3520Sstevel@tonic-gate 	if (npages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
3530Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, ahp->size * sizeof (struct anon *));
3540Sstevel@tonic-gate 	} else {
3550Sstevel@tonic-gate 		/*
3560Sstevel@tonic-gate 		 * 2 level case.
3570Sstevel@tonic-gate 		 */
3581660Sudpa 		nchunks = ahp->size >> ANON_CHUNK_SHIFT;
3590Sstevel@tonic-gate 		for (i = 0; i < nchunks; i++) {
3600Sstevel@tonic-gate 			ppp = &ahp->array_chunk[i];
3610Sstevel@tonic-gate 			if (*ppp != NULL)
3620Sstevel@tonic-gate 				kmem_free(*ppp, PAGESIZE);
3630Sstevel@tonic-gate 		}
3640Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, nchunks * sizeof (ulong_t *));
3650Sstevel@tonic-gate 	}
3660Sstevel@tonic-gate 	mutex_destroy(&ahp->serial_lock);
3670Sstevel@tonic-gate 	kmem_free(ahp, sizeof (struct anon_hdr));
3680Sstevel@tonic-gate }
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate /*
3710Sstevel@tonic-gate  * Return the pointer from the list for a
3720Sstevel@tonic-gate  * specified anon index.
3730Sstevel@tonic-gate  */
3740Sstevel@tonic-gate struct anon *
3750Sstevel@tonic-gate anon_get_ptr(struct anon_hdr *ahp, ulong_t an_idx)
3760Sstevel@tonic-gate {
3770Sstevel@tonic-gate 	struct anon **app;
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	/*
3820Sstevel@tonic-gate 	 * Single level case.
3830Sstevel@tonic-gate 	 */
3840Sstevel@tonic-gate 	if ((ahp->size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
3850Sstevel@tonic-gate 		return ((struct anon *)
3865466Skchow 		    ((uintptr_t)ahp->array_chunk[an_idx] & ANON_PTRMASK));
3870Sstevel@tonic-gate 	} else {
3880Sstevel@tonic-gate 
3890Sstevel@tonic-gate 		/*
3900Sstevel@tonic-gate 		 * 2 level case.
3910Sstevel@tonic-gate 		 */
3920Sstevel@tonic-gate 		app = ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
3930Sstevel@tonic-gate 		if (app) {
3940Sstevel@tonic-gate 			return ((struct anon *)
3955466Skchow 			    ((uintptr_t)app[an_idx & ANON_CHUNK_OFF] &
3965466Skchow 			    ANON_PTRMASK));
3970Sstevel@tonic-gate 		} else {
3980Sstevel@tonic-gate 			return (NULL);
3990Sstevel@tonic-gate 		}
4000Sstevel@tonic-gate 	}
4010Sstevel@tonic-gate }
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate /*
4040Sstevel@tonic-gate  * Return the anon pointer for the first valid entry in the anon list,
4050Sstevel@tonic-gate  * starting from the given index.
4060Sstevel@tonic-gate  */
4070Sstevel@tonic-gate struct anon *
4080Sstevel@tonic-gate anon_get_next_ptr(struct anon_hdr *ahp, ulong_t *index)
4090Sstevel@tonic-gate {
4100Sstevel@tonic-gate 	struct anon *ap;
4110Sstevel@tonic-gate 	struct anon **app;
4120Sstevel@tonic-gate 	ulong_t chunkoff;
4130Sstevel@tonic-gate 	ulong_t i;
4140Sstevel@tonic-gate 	ulong_t j;
4150Sstevel@tonic-gate 	pgcnt_t size;
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	i = *index;
4180Sstevel@tonic-gate 	size = ahp->size;
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate 	ASSERT(i < size);
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	if ((size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
4230Sstevel@tonic-gate 		/*
4240Sstevel@tonic-gate 		 * 1 level case
4250Sstevel@tonic-gate 		 */
4260Sstevel@tonic-gate 		while (i < size) {
4270Sstevel@tonic-gate 			ap = (struct anon *)
4285466Skchow 			    ((uintptr_t)ahp->array_chunk[i] & ANON_PTRMASK);
4290Sstevel@tonic-gate 			if (ap) {
4300Sstevel@tonic-gate 				*index = i;
4310Sstevel@tonic-gate 				return (ap);
4320Sstevel@tonic-gate 			}
4330Sstevel@tonic-gate 			i++;
4340Sstevel@tonic-gate 		}
4350Sstevel@tonic-gate 	} else {
4360Sstevel@tonic-gate 		/*
4370Sstevel@tonic-gate 		 * 2 level case
4380Sstevel@tonic-gate 		 */
4390Sstevel@tonic-gate 		chunkoff = i & ANON_CHUNK_OFF;
4400Sstevel@tonic-gate 		while (i < size) {
4410Sstevel@tonic-gate 			app = ahp->array_chunk[i >> ANON_CHUNK_SHIFT];
4420Sstevel@tonic-gate 			if (app)
4430Sstevel@tonic-gate 				for (j = chunkoff; j < ANON_CHUNK_SIZE; j++) {
4440Sstevel@tonic-gate 					ap = (struct anon *)
4455466Skchow 					    ((uintptr_t)app[j] & ANON_PTRMASK);
4460Sstevel@tonic-gate 					if (ap) {
4470Sstevel@tonic-gate 						*index = i + (j - chunkoff);
4480Sstevel@tonic-gate 						return (ap);
4490Sstevel@tonic-gate 					}
4500Sstevel@tonic-gate 				}
4510Sstevel@tonic-gate 			chunkoff = 0;
4520Sstevel@tonic-gate 			i = (i + ANON_CHUNK_SIZE) & ~ANON_CHUNK_OFF;
4530Sstevel@tonic-gate 		}
4540Sstevel@tonic-gate 	}
4550Sstevel@tonic-gate 	*index = size;
4560Sstevel@tonic-gate 	return (NULL);
4570Sstevel@tonic-gate }
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate /*
4600Sstevel@tonic-gate  * Set list entry with a given pointer for a specified offset
4610Sstevel@tonic-gate  */
4620Sstevel@tonic-gate int
4630Sstevel@tonic-gate anon_set_ptr(struct anon_hdr *ahp, ulong_t an_idx, struct anon *ap, int flags)
4640Sstevel@tonic-gate {
4650Sstevel@tonic-gate 	void		**ppp;
4660Sstevel@tonic-gate 	struct anon	**app;
4670Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
4680Sstevel@tonic-gate 	uintptr_t	*ap_addr;
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
4710Sstevel@tonic-gate 
4720Sstevel@tonic-gate 	/*
4730Sstevel@tonic-gate 	 * Single level case.
4740Sstevel@tonic-gate 	 */
4750Sstevel@tonic-gate 	if (ahp->size <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
4760Sstevel@tonic-gate 		ap_addr = (uintptr_t *)&ahp->array_chunk[an_idx];
4770Sstevel@tonic-gate 	} else {
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 		/*
4800Sstevel@tonic-gate 		 * 2 level case.
4810Sstevel@tonic-gate 		 */
4820Sstevel@tonic-gate 		ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
4830Sstevel@tonic-gate 
4840Sstevel@tonic-gate 		ASSERT(ppp != NULL);
4850Sstevel@tonic-gate 		if (*ppp == NULL) {
4860Sstevel@tonic-gate 			mutex_enter(&ahp->serial_lock);
4870Sstevel@tonic-gate 			ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
4880Sstevel@tonic-gate 			if (*ppp == NULL) {
4890Sstevel@tonic-gate 				*ppp = kmem_zalloc(PAGESIZE, kmemflags);
4900Sstevel@tonic-gate 				if (*ppp == NULL) {
4910Sstevel@tonic-gate 					mutex_exit(&ahp->serial_lock);
4920Sstevel@tonic-gate 					return (ENOMEM);
4930Sstevel@tonic-gate 				}
4940Sstevel@tonic-gate 			}
4950Sstevel@tonic-gate 			mutex_exit(&ahp->serial_lock);
4960Sstevel@tonic-gate 		}
4970Sstevel@tonic-gate 		app = *ppp;
4980Sstevel@tonic-gate 		ap_addr = (uintptr_t *)&app[an_idx & ANON_CHUNK_OFF];
4990Sstevel@tonic-gate 	}
5000Sstevel@tonic-gate 	*ap_addr = (*ap_addr & ~ANON_PTRMASK) | (uintptr_t)ap;
5010Sstevel@tonic-gate 	return (0);
5020Sstevel@tonic-gate }
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate  * Copy anon array into a given new anon array
5060Sstevel@tonic-gate  */
5070Sstevel@tonic-gate int
5080Sstevel@tonic-gate anon_copy_ptr(struct anon_hdr *sahp, ulong_t s_idx,
5090Sstevel@tonic-gate 	struct anon_hdr *dahp, ulong_t d_idx,
5100Sstevel@tonic-gate 	pgcnt_t npages, int flags)
5110Sstevel@tonic-gate {
5120Sstevel@tonic-gate 	void **sapp, **dapp;
5130Sstevel@tonic-gate 	void *ap;
5140Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	ASSERT((s_idx < sahp->size) && (d_idx < dahp->size));
5170Sstevel@tonic-gate 	ASSERT((npages <= sahp->size) && (npages <= dahp->size));
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate 	/*
5200Sstevel@tonic-gate 	 * Both arrays are 1 level.
5210Sstevel@tonic-gate 	 */
5220Sstevel@tonic-gate 	if (((sahp->size <= ANON_CHUNK_SIZE) &&
5230Sstevel@tonic-gate 	    (dahp->size <= ANON_CHUNK_SIZE)) ||
5240Sstevel@tonic-gate 	    ((sahp->flags & ANON_ALLOC_FORCE) &&
5250Sstevel@tonic-gate 	    (dahp->flags & ANON_ALLOC_FORCE))) {
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 		bcopy(&sahp->array_chunk[s_idx], &dahp->array_chunk[d_idx],
5280Sstevel@tonic-gate 		    npages * sizeof (struct anon *));
5290Sstevel@tonic-gate 		return (0);
5300Sstevel@tonic-gate 	}
5310Sstevel@tonic-gate 
5320Sstevel@tonic-gate 	/*
5330Sstevel@tonic-gate 	 * Both arrays are 2 levels.
5340Sstevel@tonic-gate 	 */
5350Sstevel@tonic-gate 	if (sahp->size > ANON_CHUNK_SIZE &&
5360Sstevel@tonic-gate 	    dahp->size > ANON_CHUNK_SIZE &&
5370Sstevel@tonic-gate 	    ((sahp->flags & ANON_ALLOC_FORCE) == 0) &&
5380Sstevel@tonic-gate 	    ((dahp->flags & ANON_ALLOC_FORCE) == 0)) {
5390Sstevel@tonic-gate 
5400Sstevel@tonic-gate 		ulong_t sapidx, dapidx;
5410Sstevel@tonic-gate 		ulong_t *sap, *dap;
5420Sstevel@tonic-gate 		ulong_t chknp;
5430Sstevel@tonic-gate 
5440Sstevel@tonic-gate 		while (npages != 0) {
5450Sstevel@tonic-gate 
5460Sstevel@tonic-gate 			sapidx = s_idx & ANON_CHUNK_OFF;
5470Sstevel@tonic-gate 			dapidx = d_idx & ANON_CHUNK_OFF;
5480Sstevel@tonic-gate 			chknp = ANON_CHUNK_SIZE - MAX(sapidx, dapidx);
5490Sstevel@tonic-gate 			if (chknp > npages)
5500Sstevel@tonic-gate 				chknp = npages;
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 			sapp = &sahp->array_chunk[s_idx >> ANON_CHUNK_SHIFT];
5530Sstevel@tonic-gate 			if ((sap = *sapp) != NULL) {
5540Sstevel@tonic-gate 				dapp = &dahp->array_chunk[d_idx
5555466Skchow 				    >> ANON_CHUNK_SHIFT];
5560Sstevel@tonic-gate 				if ((dap = *dapp) == NULL) {
5570Sstevel@tonic-gate 					*dapp = kmem_zalloc(PAGESIZE,
5580Sstevel@tonic-gate 					    kmemflags);
5590Sstevel@tonic-gate 					if ((dap = *dapp) == NULL)
5600Sstevel@tonic-gate 						return (ENOMEM);
5610Sstevel@tonic-gate 				}
5620Sstevel@tonic-gate 				bcopy((sap + sapidx), (dap + dapidx),
5630Sstevel@tonic-gate 				    chknp << ANON_PTRSHIFT);
5640Sstevel@tonic-gate 			}
5650Sstevel@tonic-gate 			s_idx += chknp;
5660Sstevel@tonic-gate 			d_idx += chknp;
5670Sstevel@tonic-gate 			npages -= chknp;
5680Sstevel@tonic-gate 		}
5690Sstevel@tonic-gate 		return (0);
5700Sstevel@tonic-gate 	}
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 	/*
5730Sstevel@tonic-gate 	 * At least one of the arrays is 2 level.
5740Sstevel@tonic-gate 	 */
5750Sstevel@tonic-gate 	while (npages--) {
5760Sstevel@tonic-gate 		if ((ap = anon_get_ptr(sahp, s_idx)) != NULL) {
5770Sstevel@tonic-gate 			ASSERT(!ANON_ISBUSY(anon_get_slot(sahp, s_idx)));
5780Sstevel@tonic-gate 			if (anon_set_ptr(dahp, d_idx, ap, flags) == ENOMEM)
5790Sstevel@tonic-gate 					return (ENOMEM);
5800Sstevel@tonic-gate 		}
5810Sstevel@tonic-gate 		s_idx++;
5820Sstevel@tonic-gate 		d_idx++;
5830Sstevel@tonic-gate 	}
5840Sstevel@tonic-gate 	return (0);
5850Sstevel@tonic-gate }
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 
5880Sstevel@tonic-gate /*
5890Sstevel@tonic-gate  * ANON_INITBUF is a convenience macro for anon_grow() below. It
5900Sstevel@tonic-gate  * takes a buffer dst, which is at least as large as buffer src. It
5910Sstevel@tonic-gate  * does a bcopy from src into dst, and then bzeros the extra bytes
5920Sstevel@tonic-gate  * of dst. If tail is set, the data in src is tail aligned within
5930Sstevel@tonic-gate  * dst instead of head aligned.
5940Sstevel@tonic-gate  */
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate #define	ANON_INITBUF(src, srclen, dst, dstsize, tail)			      \
5970Sstevel@tonic-gate 	if (tail) {							      \
5980Sstevel@tonic-gate 		bzero((dst), (dstsize) - (srclen));			      \
5990Sstevel@tonic-gate 		bcopy((src), (char *)(dst) + (dstsize) - (srclen), (srclen)); \
6000Sstevel@tonic-gate 	} else {							      \
6010Sstevel@tonic-gate 		bcopy((src), (dst), (srclen));				      \
6020Sstevel@tonic-gate 		bzero((char *)(dst) + (srclen), (dstsize) - (srclen));	      \
6030Sstevel@tonic-gate 	}
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate #define	ANON_1_LEVEL_INC	(ANON_CHUNK_SIZE / 8)
6060Sstevel@tonic-gate #define	ANON_2_LEVEL_INC	(ANON_1_LEVEL_INC * ANON_CHUNK_SIZE)
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate /*
6090Sstevel@tonic-gate  * anon_grow() is used to efficiently extend an existing anon array.
6100Sstevel@tonic-gate  * startidx_p points to the index into the anon array of the first page
611575Sstans  * that is in use. oldseg_pgs is the number of pages in use, starting at
6120Sstevel@tonic-gate  * *startidx_p. newpages is the number of additional pages desired.
6130Sstevel@tonic-gate  *
6140Sstevel@tonic-gate  * If startidx_p == NULL, startidx is taken to be 0 and cannot be changed.
6150Sstevel@tonic-gate  *
6160Sstevel@tonic-gate  * The growth is done by creating a new top level of the anon array,
6170Sstevel@tonic-gate  * and (if the array is 2-level) reusing the existing second level arrays.
6180Sstevel@tonic-gate  *
6190Sstevel@tonic-gate  * flags can be used to specify ANON_NOSLEEP and ANON_GROWDOWN.
6200Sstevel@tonic-gate  *
6210Sstevel@tonic-gate  * Returns the new number of pages in the anon array.
6220Sstevel@tonic-gate  */
6230Sstevel@tonic-gate pgcnt_t
624575Sstans anon_grow(struct anon_hdr *ahp, ulong_t *startidx_p, pgcnt_t oldseg_pgs,
625575Sstans     pgcnt_t newseg_pgs, int flags)
6260Sstevel@tonic-gate {
6270Sstevel@tonic-gate 	ulong_t startidx = startidx_p ? *startidx_p : 0;
628575Sstans 	pgcnt_t oldamp_pgs = ahp->size, newamp_pgs;
6290Sstevel@tonic-gate 	pgcnt_t oelems, nelems, totpages;
6300Sstevel@tonic-gate 	void **level1;
6310Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
6320Sstevel@tonic-gate 	int growdown = (flags & ANON_GROWDOWN);
6330Sstevel@tonic-gate 	size_t newarrsz, oldarrsz;
6340Sstevel@tonic-gate 	void *level2;
6350Sstevel@tonic-gate 
6360Sstevel@tonic-gate 	ASSERT(!(startidx_p == NULL && growdown));
637575Sstans 	ASSERT(startidx + oldseg_pgs <= ahp->size);
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate 	/*
6400Sstevel@tonic-gate 	 * Determine the total number of pages needed in the new
6410Sstevel@tonic-gate 	 * anon array. If growing down, totpages is all pages from
642575Sstans 	 * startidx through the end of the array, plus <newseg_pgs>
6430Sstevel@tonic-gate 	 * pages. If growing up, keep all pages from page 0 through
644575Sstans 	 * the last page currently in use, plus <newseg_pgs> pages.
6450Sstevel@tonic-gate 	 */
6460Sstevel@tonic-gate 	if (growdown)
647575Sstans 		totpages = oldamp_pgs - startidx + newseg_pgs;
6480Sstevel@tonic-gate 	else
649575Sstans 		totpages = startidx + oldseg_pgs + newseg_pgs;
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 	/* If the array is already large enough, just return. */
6520Sstevel@tonic-gate 
653575Sstans 	if (oldamp_pgs >= totpages) {
654575Sstans 		if (growdown)
655575Sstans 			*startidx_p = oldamp_pgs - totpages;
656575Sstans 		return (oldamp_pgs);
6570Sstevel@tonic-gate 	}
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate 	/*
660575Sstans 	 * oldamp_pgs/newamp_pgs are the total numbers of pages represented
661575Sstans 	 * by the corresponding arrays.
662575Sstans 	 * oelems/nelems are the number of pointers in the top level arrays
663575Sstans 	 * which may be either level 1 or level 2.
6640Sstevel@tonic-gate 	 * Will the new anon array be one level or two levels?
6650Sstevel@tonic-gate 	 */
6660Sstevel@tonic-gate 	if (totpages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
667575Sstans 		newamp_pgs = P2ROUNDUP(totpages, ANON_1_LEVEL_INC);
668575Sstans 		oelems = oldamp_pgs;
669575Sstans 		nelems = newamp_pgs;
6700Sstevel@tonic-gate 	} else {
671575Sstans 		newamp_pgs = P2ROUNDUP(totpages, ANON_2_LEVEL_INC);
672575Sstans 		oelems = (oldamp_pgs + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
673575Sstans 		nelems = newamp_pgs >> ANON_CHUNK_SHIFT;
6740Sstevel@tonic-gate 	}
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 	newarrsz = nelems * sizeof (void *);
6770Sstevel@tonic-gate 	level1 = kmem_alloc(newarrsz, kmemflags);
6780Sstevel@tonic-gate 	if (level1 == NULL)
6790Sstevel@tonic-gate 		return (0);
6800Sstevel@tonic-gate 
6810Sstevel@tonic-gate 	/* Are we converting from a one level to a two level anon array? */
6820Sstevel@tonic-gate 
683575Sstans 	if (newamp_pgs > ANON_CHUNK_SIZE && oldamp_pgs <= ANON_CHUNK_SIZE &&
6840Sstevel@tonic-gate 	    !(ahp->flags & ANON_ALLOC_FORCE)) {
685575Sstans 
6860Sstevel@tonic-gate 		/*
6870Sstevel@tonic-gate 		 * Yes, we're converting to a two level. Reuse old level 1
6880Sstevel@tonic-gate 		 * as new level 2 if it is exactly PAGESIZE. Otherwise
6890Sstevel@tonic-gate 		 * alloc a new level 2 and copy the old level 1 data into it.
6900Sstevel@tonic-gate 		 */
691575Sstans 		if (oldamp_pgs == ANON_CHUNK_SIZE) {
6920Sstevel@tonic-gate 			level2 = (void *)ahp->array_chunk;
6930Sstevel@tonic-gate 		} else {
6940Sstevel@tonic-gate 			level2 = kmem_alloc(PAGESIZE, kmemflags);
6950Sstevel@tonic-gate 			if (level2 == NULL) {
6960Sstevel@tonic-gate 				kmem_free(level1, newarrsz);
6970Sstevel@tonic-gate 				return (0);
6980Sstevel@tonic-gate 			}
699575Sstans 			oldarrsz = oldamp_pgs * sizeof (void *);
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 			ANON_INITBUF(ahp->array_chunk, oldarrsz,
7020Sstevel@tonic-gate 			    level2, PAGESIZE, growdown);
7030Sstevel@tonic-gate 			kmem_free(ahp->array_chunk, oldarrsz);
7040Sstevel@tonic-gate 		}
7050Sstevel@tonic-gate 		bzero(level1, newarrsz);
7060Sstevel@tonic-gate 		if (growdown)
7070Sstevel@tonic-gate 			level1[nelems - 1] = level2;
7080Sstevel@tonic-gate 		else
7090Sstevel@tonic-gate 			level1[0] = level2;
7100Sstevel@tonic-gate 	} else {
7110Sstevel@tonic-gate 		oldarrsz = oelems * sizeof (void *);
7120Sstevel@tonic-gate 
7130Sstevel@tonic-gate 		ANON_INITBUF(ahp->array_chunk, oldarrsz,
7140Sstevel@tonic-gate 		    level1, newarrsz, growdown);
7150Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, oldarrsz);
7160Sstevel@tonic-gate 	}
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate 	ahp->array_chunk = level1;
719575Sstans 	ahp->size = newamp_pgs;
7201660Sudpa 	if (growdown)
721575Sstans 		*startidx_p = newamp_pgs - totpages;
7221660Sudpa 
723575Sstans 	return (newamp_pgs);
7240Sstevel@tonic-gate }
7250Sstevel@tonic-gate 
726575Sstans 
7270Sstevel@tonic-gate /*
7280Sstevel@tonic-gate  * Called from clock handler to sync ani_free value.
7290Sstevel@tonic-gate  */
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate void
7320Sstevel@tonic-gate set_anoninfo(void)
7330Sstevel@tonic-gate {
7340Sstevel@tonic-gate 	int	ix;
7350Sstevel@tonic-gate 	pgcnt_t	total = 0;
7360Sstevel@tonic-gate 
7370Sstevel@tonic-gate 	for (ix = 0; ix < ANI_MAX_POOL; ix++) {
7380Sstevel@tonic-gate 		total += ani_free_pool[ix].ani_count;
7390Sstevel@tonic-gate 	}
7400Sstevel@tonic-gate 	k_anoninfo.ani_free = total;
7410Sstevel@tonic-gate }
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate /*
7440Sstevel@tonic-gate  * Reserve anon space.
7450Sstevel@tonic-gate  *
7460Sstevel@tonic-gate  * It's no longer simply a matter of incrementing ani_resv to
7470Sstevel@tonic-gate  * reserve swap space, we need to check memory-based as well
7480Sstevel@tonic-gate  * as disk-backed (physical) swap.  The following algorithm
7490Sstevel@tonic-gate  * is used:
7500Sstevel@tonic-gate  * 	Check the space on physical swap
7510Sstevel@tonic-gate  * 		i.e. amount needed < ani_max - ani_phys_resv
7520Sstevel@tonic-gate  * 	If we are swapping on swapfs check
7530Sstevel@tonic-gate  *		amount needed < (availrmem - swapfs_minfree)
7540Sstevel@tonic-gate  * Since the algorithm to check for the quantity of swap space is
7550Sstevel@tonic-gate  * almost the same as that for reserving it, we'll just use anon_resvmem
7560Sstevel@tonic-gate  * with a flag to decrement availrmem.
7570Sstevel@tonic-gate  *
7580Sstevel@tonic-gate  * Return non-zero on success.
7590Sstevel@tonic-gate  */
7600Sstevel@tonic-gate int
7614426Saguzovsk anon_resvmem(size_t size, boolean_t takemem, zone_t *zone, int tryhard)
7620Sstevel@tonic-gate {
7630Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
7640Sstevel@tonic-gate 	pgcnt_t mswap_pages = 0;
7650Sstevel@tonic-gate 	pgcnt_t pswap_pages = 0;
7663247Sgjelinek 	proc_t *p = curproc;
7673247Sgjelinek 
7683247Sgjelinek 	if (zone != NULL && takemem) {
7693247Sgjelinek 		/* test zone.max-swap resource control */
7703247Sgjelinek 		mutex_enter(&p->p_lock);
7713247Sgjelinek 		if (rctl_incr_swap(p, zone, ptob(npages)) != 0) {
7723247Sgjelinek 			mutex_exit(&p->p_lock);
7733247Sgjelinek 			return (0);
7743247Sgjelinek 		}
7753247Sgjelinek 		mutex_exit(&p->p_lock);
7763247Sgjelinek 	}
7770Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	/*
7800Sstevel@tonic-gate 	 * pswap_pages is the number of pages we can take from
7810Sstevel@tonic-gate 	 * physical (i.e. disk-backed) swap.
7820Sstevel@tonic-gate 	 */
7830Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
7840Sstevel@tonic-gate 	pswap_pages = k_anoninfo.ani_max - k_anoninfo.ani_phys_resv;
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate 	ANON_PRINT(A_RESV,
7870Sstevel@tonic-gate 	    ("anon_resvmem: npages %lu takemem %u pswap %lu caller %p\n",
7880Sstevel@tonic-gate 	    npages, takemem, pswap_pages, (void *)caller()));
7890Sstevel@tonic-gate 
7900Sstevel@tonic-gate 	if (npages <= pswap_pages) {
7910Sstevel@tonic-gate 		/*
7920Sstevel@tonic-gate 		 * we have enough space on a physical swap
7930Sstevel@tonic-gate 		 */
7940Sstevel@tonic-gate 		if (takemem)
7950Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv += npages;
7960Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
7970Sstevel@tonic-gate 		return (1);
7980Sstevel@tonic-gate 	} else if (pswap_pages != 0) {
7990Sstevel@tonic-gate 		/*
8000Sstevel@tonic-gate 		 * we have some space on a physical swap
8010Sstevel@tonic-gate 		 */
8020Sstevel@tonic-gate 		if (takemem) {
8030Sstevel@tonic-gate 			/*
8040Sstevel@tonic-gate 			 * use up remainder of phys swap
8050Sstevel@tonic-gate 			 */
8060Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv += pswap_pages;
8070Sstevel@tonic-gate 			ASSERT(k_anoninfo.ani_phys_resv == k_anoninfo.ani_max);
8080Sstevel@tonic-gate 		}
8090Sstevel@tonic-gate 	}
8100Sstevel@tonic-gate 	/*
8110Sstevel@tonic-gate 	 * since (npages > pswap_pages) we need mem swap
8120Sstevel@tonic-gate 	 * mswap_pages is the number of pages needed from availrmem
8130Sstevel@tonic-gate 	 */
8140Sstevel@tonic-gate 	ASSERT(npages > pswap_pages);
8150Sstevel@tonic-gate 	mswap_pages = npages - pswap_pages;
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 	ANON_PRINT(A_RESV, ("anon_resvmem: need %ld pages from memory\n",
8180Sstevel@tonic-gate 	    mswap_pages));
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	/*
8210Sstevel@tonic-gate 	 * priv processes can reserve memory as swap as long as availrmem
8220Sstevel@tonic-gate 	 * remains greater than swapfs_minfree; in the case of non-priv
8230Sstevel@tonic-gate 	 * processes, memory can be reserved as swap only if availrmem
8240Sstevel@tonic-gate 	 * doesn't fall below (swapfs_minfree + swapfs_reserve). Thus,
8250Sstevel@tonic-gate 	 * swapfs_reserve amount of memswap is not available to non-priv
8260Sstevel@tonic-gate 	 * processes. This protects daemons such as automounter dying
8270Sstevel@tonic-gate 	 * as a result of application processes eating away almost entire
8280Sstevel@tonic-gate 	 * membased swap. This safeguard becomes useless if apps are run
8290Sstevel@tonic-gate 	 * with root access.
8300Sstevel@tonic-gate 	 *
8310Sstevel@tonic-gate 	 * swapfs_reserve is minimum of 4Mb or 1/16 of physmem.
8320Sstevel@tonic-gate 	 *
8330Sstevel@tonic-gate 	 */
8344426Saguzovsk 	if (tryhard) {
83510154SStan.Studzinski@Sun.COM 		pgcnt_t floor_pages;
83610154SStan.Studzinski@Sun.COM 
83710154SStan.Studzinski@Sun.COM 		if (secpolicy_resource_anon_mem(CRED())) {
83810154SStan.Studzinski@Sun.COM 			floor_pages = swapfs_minfree;
83910154SStan.Studzinski@Sun.COM 		} else {
84010154SStan.Studzinski@Sun.COM 			floor_pages = swapfs_minfree + swapfs_reserve;
84110154SStan.Studzinski@Sun.COM 		}
84210154SStan.Studzinski@Sun.COM 
8434426Saguzovsk 		mutex_exit(&anoninfo_lock);
84410154SStan.Studzinski@Sun.COM 		(void) page_reclaim_mem(mswap_pages, floor_pages, 0);
8454426Saguzovsk 		mutex_enter(&anoninfo_lock);
8464426Saguzovsk 	}
8472048Sstans 
8480Sstevel@tonic-gate 	mutex_enter(&freemem_lock);
8490Sstevel@tonic-gate 	if (availrmem > (swapfs_minfree + swapfs_reserve + mswap_pages) ||
8505466Skchow 	    (availrmem > (swapfs_minfree + mswap_pages) &&
8515466Skchow 	    secpolicy_resource(CRED()) == 0)) {
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 		if (takemem) {
8540Sstevel@tonic-gate 			/*
8550Sstevel@tonic-gate 			 * Take the memory from the rest of the system.
8560Sstevel@tonic-gate 			 */
8570Sstevel@tonic-gate 			availrmem -= mswap_pages;
8580Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
8590Sstevel@tonic-gate 			k_anoninfo.ani_mem_resv += mswap_pages;
8600Sstevel@tonic-gate 			ANI_ADD(mswap_pages);
8610Sstevel@tonic-gate 			ANON_PRINT((A_RESV | A_MRESV),
8625466Skchow 			    ("anon_resvmem: took %ld pages of availrmem\n",
8635466Skchow 			    mswap_pages));
8640Sstevel@tonic-gate 		} else {
8650Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
8660Sstevel@tonic-gate 		}
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 		ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
8690Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
8700Sstevel@tonic-gate 		return (1);
8710Sstevel@tonic-gate 	} else {
8720Sstevel@tonic-gate 		/*
8730Sstevel@tonic-gate 		 * Fail if not enough memory
8740Sstevel@tonic-gate 		 */
8750Sstevel@tonic-gate 		if (takemem) {
8760Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv -= pswap_pages;
8770Sstevel@tonic-gate 		}
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
8800Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
8810Sstevel@tonic-gate 		ANON_PRINT(A_RESV,
8825466Skchow 		    ("anon_resvmem: not enough space from swapfs\n"));
8833247Sgjelinek 		if (zone != NULL && takemem)
8843247Sgjelinek 			rctl_decr_swap(zone, ptob(npages));
8850Sstevel@tonic-gate 		return (0);
8860Sstevel@tonic-gate 	}
8870Sstevel@tonic-gate }
8880Sstevel@tonic-gate 
8890Sstevel@tonic-gate /*
8900Sstevel@tonic-gate  * Give back an anon reservation.
8910Sstevel@tonic-gate  */
8920Sstevel@tonic-gate void
8933247Sgjelinek anon_unresvmem(size_t size, zone_t *zone)
8940Sstevel@tonic-gate {
8950Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
8960Sstevel@tonic-gate 	spgcnt_t mem_free_pages = 0;
8970Sstevel@tonic-gate 	pgcnt_t phys_free_slots;
8980Sstevel@tonic-gate #ifdef	ANON_DEBUG
8990Sstevel@tonic-gate 	pgcnt_t mem_resv;
9000Sstevel@tonic-gate #endif
9013247Sgjelinek 	if (zone != NULL)
9023379Ssl108498 		rctl_decr_swap(zone, ptob(npages));
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
9076695Saguzovsk 
9080Sstevel@tonic-gate 	/*
9090Sstevel@tonic-gate 	 * If some of this reservation belonged to swapfs
9100Sstevel@tonic-gate 	 * give it back to availrmem.
9110Sstevel@tonic-gate 	 * ani_mem_resv is the amount of availrmem swapfs has reserved.
9120Sstevel@tonic-gate 	 * but some of that memory could be locked by segspt so we can only
9130Sstevel@tonic-gate 	 * return non locked ani_mem_resv back to availrmem
9140Sstevel@tonic-gate 	 */
9150Sstevel@tonic-gate 	if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
9160Sstevel@tonic-gate 		ANON_PRINT((A_RESV | A_MRESV),
9170Sstevel@tonic-gate 		    ("anon_unresv: growing availrmem by %ld pages\n",
9180Sstevel@tonic-gate 		    MIN(k_anoninfo.ani_mem_resv, npages)));
9190Sstevel@tonic-gate 
9200Sstevel@tonic-gate 		mem_free_pages = MIN((spgcnt_t)(k_anoninfo.ani_mem_resv -
9210Sstevel@tonic-gate 		    k_anoninfo.ani_locked_swap), npages);
9220Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
9230Sstevel@tonic-gate 		availrmem += mem_free_pages;
9240Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
9250Sstevel@tonic-gate 		k_anoninfo.ani_mem_resv -= mem_free_pages;
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 		ANI_ADD(-mem_free_pages);
9280Sstevel@tonic-gate 	}
9290Sstevel@tonic-gate 	/*
9300Sstevel@tonic-gate 	 * The remainder of the pages is returned to phys swap
9310Sstevel@tonic-gate 	 */
9320Sstevel@tonic-gate 	ASSERT(npages >= mem_free_pages);
9330Sstevel@tonic-gate 	phys_free_slots = npages - mem_free_pages;
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate 	if (phys_free_slots) {
9365466Skchow 		k_anoninfo.ani_phys_resv -= phys_free_slots;
9370Sstevel@tonic-gate 	}
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate #ifdef	ANON_DEBUG
9400Sstevel@tonic-gate 	mem_resv = k_anoninfo.ani_mem_resv;
9410Sstevel@tonic-gate #endif
9420Sstevel@tonic-gate 
9430Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
9440Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 	ANON_PRINT(A_RESV, ("anon_unresv: %lu, tot %lu, caller %p\n",
9490Sstevel@tonic-gate 	    npages, mem_resv, (void *)caller()));
9500Sstevel@tonic-gate }
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate /*
9530Sstevel@tonic-gate  * Allocate an anon slot and return it with the lock held.
9540Sstevel@tonic-gate  */
9550Sstevel@tonic-gate struct anon *
9560Sstevel@tonic-gate anon_alloc(struct vnode *vp, anoff_t off)
9570Sstevel@tonic-gate {
9580Sstevel@tonic-gate 	struct anon	*ap;
9590Sstevel@tonic-gate 	kmutex_t	*ahm;
9600Sstevel@tonic-gate 
9610Sstevel@tonic-gate 	ap = kmem_cache_alloc(anon_cache, KM_SLEEP);
9620Sstevel@tonic-gate 	if (vp == NULL) {
9630Sstevel@tonic-gate 		swap_alloc(ap);
9640Sstevel@tonic-gate 	} else {
9650Sstevel@tonic-gate 		ap->an_vp = vp;
9660Sstevel@tonic-gate 		ap->an_off = off;
9670Sstevel@tonic-gate 	}
9680Sstevel@tonic-gate 	ap->an_refcnt = 1;
9690Sstevel@tonic-gate 	ap->an_pvp = NULL;
9700Sstevel@tonic-gate 	ap->an_poff = 0;
971*12173SMichael.Corcoran@Sun.COM 	ahm = AH_MUTEX(ap->an_vp, ap->an_off);
9720Sstevel@tonic-gate 	mutex_enter(ahm);
9730Sstevel@tonic-gate 	anon_addhash(ap);
9740Sstevel@tonic-gate 	mutex_exit(ahm);
9750Sstevel@tonic-gate 	ANI_ADD(-1);
9760Sstevel@tonic-gate 	ANON_PRINT(A_ANON, ("anon_alloc: returning ap %p, vp %p\n",
9770Sstevel@tonic-gate 	    (void *)ap, (ap ? (void *)ap->an_vp : NULL)));
9780Sstevel@tonic-gate 	return (ap);
9790Sstevel@tonic-gate }
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate /*
9826695Saguzovsk  * Called for pages locked in memory via softlock/pagelock/mlock to make sure
9836695Saguzovsk  * such pages don't consume any physical swap resources needed for swapping
9846695Saguzovsk  * unlocked pages.
9856695Saguzovsk  */
9866695Saguzovsk void
9876695Saguzovsk anon_swap_free(struct anon *ap, page_t *pp)
9886695Saguzovsk {
9896695Saguzovsk 	kmutex_t *ahm;
9906695Saguzovsk 
9916695Saguzovsk 	ASSERT(ap != NULL);
9926695Saguzovsk 	ASSERT(pp != NULL);
9936695Saguzovsk 	ASSERT(PAGE_LOCKED(pp));
9946695Saguzovsk 	ASSERT(pp->p_vnode != NULL);
9956695Saguzovsk 	ASSERT(IS_SWAPFSVP(pp->p_vnode));
9966695Saguzovsk 	ASSERT(ap->an_refcnt != 0);
9976695Saguzovsk 	ASSERT(pp->p_vnode == ap->an_vp);
9986695Saguzovsk 	ASSERT(pp->p_offset == ap->an_off);
9996695Saguzovsk 
10006695Saguzovsk 	if (ap->an_pvp == NULL)
10016695Saguzovsk 		return;
10026695Saguzovsk 
10036695Saguzovsk 	page_io_lock(pp);
1004*12173SMichael.Corcoran@Sun.COM 	ahm = AH_MUTEX(ap->an_vp, ap->an_off);
10056695Saguzovsk 	mutex_enter(ahm);
10066695Saguzovsk 
10076695Saguzovsk 	ASSERT(ap->an_refcnt != 0);
10086695Saguzovsk 	ASSERT(pp->p_vnode == ap->an_vp);
10096695Saguzovsk 	ASSERT(pp->p_offset == ap->an_off);
10106695Saguzovsk 
10116695Saguzovsk 	if (ap->an_pvp != NULL) {
10126695Saguzovsk 		swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
10136695Saguzovsk 		ap->an_pvp = NULL;
10146695Saguzovsk 		ap->an_poff = 0;
10156695Saguzovsk 		mutex_exit(ahm);
10166695Saguzovsk 		hat_setmod(pp);
10176695Saguzovsk 	} else {
10186695Saguzovsk 		mutex_exit(ahm);
10196695Saguzovsk 	}
10206695Saguzovsk 	page_io_unlock(pp);
10216695Saguzovsk }
10226695Saguzovsk 
10236695Saguzovsk /*
10240Sstevel@tonic-gate  * Decrement the reference count of an anon page.
10250Sstevel@tonic-gate  * If reference count goes to zero, free it and
10260Sstevel@tonic-gate  * its associated page (if any).
10270Sstevel@tonic-gate  */
10280Sstevel@tonic-gate void
10290Sstevel@tonic-gate anon_decref(struct anon *ap)
10300Sstevel@tonic-gate {
10310Sstevel@tonic-gate 	page_t *pp;
10320Sstevel@tonic-gate 	struct vnode *vp;
10330Sstevel@tonic-gate 	anoff_t off;
10340Sstevel@tonic-gate 	kmutex_t *ahm;
10350Sstevel@tonic-gate 
1036*12173SMichael.Corcoran@Sun.COM 	ahm = AH_MUTEX(ap->an_vp, ap->an_off);
10370Sstevel@tonic-gate 	mutex_enter(ahm);
10380Sstevel@tonic-gate 	ASSERT(ap->an_refcnt != 0);
10390Sstevel@tonic-gate 	if (ap->an_refcnt == 0)
10400Sstevel@tonic-gate 		panic("anon_decref: slot count 0");
10410Sstevel@tonic-gate 	if (--ap->an_refcnt == 0) {
10420Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
10435665Sstans 		anon_rmhash(ap);
10445665Sstans 		if (ap->an_pvp != NULL)
10455665Sstans 			swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
10460Sstevel@tonic-gate 		mutex_exit(ahm);
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 		/*
10490Sstevel@tonic-gate 		 * If there is a page for this anon slot we will need to
10500Sstevel@tonic-gate 		 * call VN_DISPOSE to get rid of the vp association and
10510Sstevel@tonic-gate 		 * put the page back on the free list as really free.
10520Sstevel@tonic-gate 		 * Acquire the "exclusive" lock to ensure that any
10530Sstevel@tonic-gate 		 * pending i/o always completes before the swap slot
10540Sstevel@tonic-gate 		 * is freed.
10550Sstevel@tonic-gate 		 */
10560Sstevel@tonic-gate 		pp = page_lookup(vp, (u_offset_t)off, SE_EXCL);
10570Sstevel@tonic-gate 		if (pp != NULL) {
10580Sstevel@tonic-gate 			/*LINTED: constant in conditional context */
10590Sstevel@tonic-gate 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
10600Sstevel@tonic-gate 		}
10610Sstevel@tonic-gate 		ANON_PRINT(A_ANON, ("anon_decref: free ap %p, vp %p\n",
10620Sstevel@tonic-gate 		    (void *)ap, (void *)ap->an_vp));
10635665Sstans 
10640Sstevel@tonic-gate 		kmem_cache_free(anon_cache, ap);
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate 		ANI_ADD(1);
10670Sstevel@tonic-gate 	} else {
10680Sstevel@tonic-gate 		mutex_exit(ahm);
10690Sstevel@tonic-gate 	}
10700Sstevel@tonic-gate }
10710Sstevel@tonic-gate 
10725466Skchow 
10735466Skchow /*
10745466Skchow  * check an_refcnt of the root anon slot (anon_index argument is aligned at
10755466Skchow  * seg->s_szc level) to determine whether COW processing is required.
10765466Skchow  * anonpages_hash_lock[] held on the root ap ensures that if root's
10775466Skchow  * refcnt is 1 all other refcnt's are 1 as well (and they can't increase
10785466Skchow  * later since this process can't fork while its AS lock is held).
10795466Skchow  *
10805466Skchow  * returns 1 if the root anon slot has a refcnt > 1 otherwise returns 0.
10815466Skchow  */
10825466Skchow int
10835466Skchow anon_szcshare(struct anon_hdr *ahp, ulong_t anon_index)
10845466Skchow {
10855466Skchow 	struct anon	*ap;
10865466Skchow 	kmutex_t	*ahmpages = NULL;
10875466Skchow 
10885466Skchow 	ap = anon_get_ptr(ahp, anon_index);
10895466Skchow 	if (ap == NULL)
10905466Skchow 		return (0);
10915466Skchow 
1092*12173SMichael.Corcoran@Sun.COM 	ahmpages = APH_MUTEX(ap->an_vp, ap->an_off);
10935466Skchow 	mutex_enter(ahmpages);
10945466Skchow 	ASSERT(ap->an_refcnt >= 1);
10955466Skchow 	if (ap->an_refcnt == 1) {
10965466Skchow 		mutex_exit(ahmpages);
10975466Skchow 		return (0);
10985466Skchow 	}
10995466Skchow 	mutex_exit(ahmpages);
11005466Skchow 	return (1);
11015466Skchow }
11025466Skchow /*
11035466Skchow  * Check 'nslots' anon slots for refcnt > 1.
11045466Skchow  *
11055466Skchow  * returns 1 if any of the 'nslots' anon slots has a refcnt > 1 otherwise
11065466Skchow  * returns 0.
11075466Skchow  */
11080Sstevel@tonic-gate static int
11090Sstevel@tonic-gate anon_share(struct anon_hdr *ahp, ulong_t anon_index, pgcnt_t nslots)
11100Sstevel@tonic-gate {
11110Sstevel@tonic-gate 	struct anon *ap;
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 	while (nslots-- > 0) {
11140Sstevel@tonic-gate 		if ((ap = anon_get_ptr(ahp, anon_index)) != NULL &&
11150Sstevel@tonic-gate 		    ap->an_refcnt > 1)
11160Sstevel@tonic-gate 			return (1);
11170Sstevel@tonic-gate 		anon_index++;
11180Sstevel@tonic-gate 	}
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	return (0);
11210Sstevel@tonic-gate }
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate static void
11240Sstevel@tonic-gate anon_decref_pages(
11250Sstevel@tonic-gate 	struct anon_hdr *ahp,
11260Sstevel@tonic-gate 	ulong_t an_idx,
11270Sstevel@tonic-gate 	uint_t szc)
11280Sstevel@tonic-gate {
11290Sstevel@tonic-gate 	struct anon *ap = anon_get_ptr(ahp, an_idx);
11300Sstevel@tonic-gate 	kmutex_t *ahmpages = NULL;
11310Sstevel@tonic-gate 	page_t *pp;
11320Sstevel@tonic-gate 	pgcnt_t pgcnt = page_get_pagecnt(szc);
11330Sstevel@tonic-gate 	pgcnt_t i;
11340Sstevel@tonic-gate 	struct vnode *vp;
11350Sstevel@tonic-gate 	anoff_t   off;
11360Sstevel@tonic-gate 	kmutex_t *ahm;
11370Sstevel@tonic-gate #ifdef DEBUG
11380Sstevel@tonic-gate 	int refcnt = 1;
11390Sstevel@tonic-gate #endif
11400Sstevel@tonic-gate 
11410Sstevel@tonic-gate 	ASSERT(szc != 0);
11420Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
11430Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
11442414Saguzovsk 	ASSERT(an_idx < ahp->size);
11452414Saguzovsk 
11462414Saguzovsk 	if (ahp->size - an_idx < pgcnt) {
11472414Saguzovsk 		/*
11482414Saguzovsk 		 * In case of shared mappings total anon map size may not be
11492414Saguzovsk 		 * the largest page size aligned.
11502414Saguzovsk 		 */
11512414Saguzovsk 		pgcnt = ahp->size - an_idx;
11522414Saguzovsk 	}
11530Sstevel@tonic-gate 
11540Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.decrefpages[0]);
11550Sstevel@tonic-gate 
11560Sstevel@tonic-gate 	if (ap != NULL) {
1157*12173SMichael.Corcoran@Sun.COM 		ahmpages = APH_MUTEX(ap->an_vp, ap->an_off);
11580Sstevel@tonic-gate 		mutex_enter(ahmpages);
11590Sstevel@tonic-gate 		ASSERT((refcnt = ap->an_refcnt) != 0);
11600Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.decrefpages[1]);
11610Sstevel@tonic-gate 		if (ap->an_refcnt == 1) {
11620Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.decrefpages[2]);
11630Sstevel@tonic-gate 			ASSERT(!anon_share(ahp, an_idx, pgcnt));
11640Sstevel@tonic-gate 			mutex_exit(ahmpages);
11650Sstevel@tonic-gate 			ahmpages = NULL;
11660Sstevel@tonic-gate 		}
11670Sstevel@tonic-gate 	}
11680Sstevel@tonic-gate 
11690Sstevel@tonic-gate 	i = 0;
11700Sstevel@tonic-gate 	while (i < pgcnt) {
11710Sstevel@tonic-gate 		if ((ap = anon_get_ptr(ahp, an_idx + i)) == NULL) {
11720Sstevel@tonic-gate 			ASSERT(refcnt == 1 && ahmpages == NULL);
11730Sstevel@tonic-gate 			i++;
11740Sstevel@tonic-gate 			continue;
11750Sstevel@tonic-gate 		}
11760Sstevel@tonic-gate 		ASSERT(ap->an_refcnt == refcnt);
11770Sstevel@tonic-gate 		ASSERT(ahmpages != NULL || ap->an_refcnt == 1);
11780Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || ap->an_refcnt > 1);
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate 		if (ahmpages == NULL) {
11810Sstevel@tonic-gate 			swap_xlate(ap, &vp, &off);
11820Sstevel@tonic-gate 			pp = page_lookup(vp, (u_offset_t)off, SE_EXCL);
11830Sstevel@tonic-gate 			if (pp == NULL || pp->p_szc == 0) {
11840Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.decrefpages[3]);
1185*12173SMichael.Corcoran@Sun.COM 				ahm = AH_MUTEX(ap->an_vp, ap->an_off);
11860Sstevel@tonic-gate 				(void) anon_set_ptr(ahp, an_idx + i, NULL,
11870Sstevel@tonic-gate 				    ANON_SLEEP);
11880Sstevel@tonic-gate 				mutex_enter(ahm);
11890Sstevel@tonic-gate 				ap->an_refcnt--;
11900Sstevel@tonic-gate 				ASSERT(ap->an_refcnt == 0);
11910Sstevel@tonic-gate 				anon_rmhash(ap);
11920Sstevel@tonic-gate 				if (ap->an_pvp)
11930Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp, ap->an_poff,
11940Sstevel@tonic-gate 					    PAGESIZE);
11950Sstevel@tonic-gate 				mutex_exit(ahm);
11965665Sstans 				if (pp == NULL) {
11975665Sstans 					pp = page_lookup(vp, (u_offset_t)off,
11985665Sstans 					    SE_EXCL);
11995665Sstans 					ASSERT(pp == NULL || pp->p_szc == 0);
12005665Sstans 				}
12010Sstevel@tonic-gate 				if (pp != NULL) {
12020Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.decrefpages[4]);
12030Sstevel@tonic-gate 					/*LINTED*/
12040Sstevel@tonic-gate 					VN_DISPOSE(pp, B_INVAL, 0, kcred);
12050Sstevel@tonic-gate 				}
12060Sstevel@tonic-gate 				kmem_cache_free(anon_cache, ap);
12070Sstevel@tonic-gate 				ANI_ADD(1);
12080Sstevel@tonic-gate 				i++;
12090Sstevel@tonic-gate 			} else {
12100Sstevel@tonic-gate 				pgcnt_t j;
12110Sstevel@tonic-gate 				pgcnt_t curpgcnt =
12120Sstevel@tonic-gate 				    page_get_pagecnt(pp->p_szc);
12130Sstevel@tonic-gate 				size_t ppasize = curpgcnt * sizeof (page_t *);
12140Sstevel@tonic-gate 				page_t **ppa = kmem_alloc(ppasize, KM_SLEEP);
12150Sstevel@tonic-gate 				int dispose = 0;
12160Sstevel@tonic-gate 
12170Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.decrefpages[5]);
12180Sstevel@tonic-gate 
12190Sstevel@tonic-gate 				ASSERT(pp->p_szc <= szc);
12200Sstevel@tonic-gate 				ASSERT(IS_P2ALIGNED(curpgcnt, curpgcnt));
12210Sstevel@tonic-gate 				ASSERT(IS_P2ALIGNED(i, curpgcnt));
12220Sstevel@tonic-gate 				ASSERT(i + curpgcnt <= pgcnt);
12230Sstevel@tonic-gate 				ASSERT(!(page_pptonum(pp) & (curpgcnt - 1)));
12240Sstevel@tonic-gate 				ppa[0] = pp;
12250Sstevel@tonic-gate 				for (j = i + 1; j < i + curpgcnt; j++) {
12260Sstevel@tonic-gate 					ap = anon_get_ptr(ahp, an_idx + j);
12270Sstevel@tonic-gate 					ASSERT(ap != NULL &&
12280Sstevel@tonic-gate 					    ap->an_refcnt == 1);
12290Sstevel@tonic-gate 					swap_xlate(ap, &vp, &off);
12300Sstevel@tonic-gate 					pp = page_lookup(vp, (u_offset_t)off,
12310Sstevel@tonic-gate 					    SE_EXCL);
12320Sstevel@tonic-gate 					if (pp == NULL)
12330Sstevel@tonic-gate 						panic("anon_decref_pages: "
12340Sstevel@tonic-gate 						    "no page");
12350Sstevel@tonic-gate 
12360Sstevel@tonic-gate 					(void) hat_pageunload(pp,
12370Sstevel@tonic-gate 					    HAT_FORCE_PGUNLOAD);
12380Sstevel@tonic-gate 					ASSERT(pp->p_szc == ppa[0]->p_szc);
12390Sstevel@tonic-gate 					ASSERT(page_pptonum(pp) - 1 ==
12400Sstevel@tonic-gate 					    page_pptonum(ppa[j - i - 1]));
12410Sstevel@tonic-gate 					ppa[j - i] = pp;
12420Sstevel@tonic-gate 					if (ap->an_pvp != NULL &&
12430Sstevel@tonic-gate 					    !vn_matchopval(ap->an_pvp,
12445466Skchow 					    VOPNAME_DISPOSE,
12455466Skchow 					    (fs_generic_func_p)fs_dispose))
12460Sstevel@tonic-gate 						dispose = 1;
12470Sstevel@tonic-gate 				}
12480Sstevel@tonic-gate 				for (j = i; j < i + curpgcnt; j++) {
12490Sstevel@tonic-gate 					ap = anon_get_ptr(ahp, an_idx + j);
12500Sstevel@tonic-gate 					ASSERT(ap != NULL &&
12510Sstevel@tonic-gate 					    ap->an_refcnt == 1);
1252*12173SMichael.Corcoran@Sun.COM 					ahm = AH_MUTEX(ap->an_vp, ap->an_off);
12530Sstevel@tonic-gate 					(void) anon_set_ptr(ahp, an_idx + j,
12540Sstevel@tonic-gate 					    NULL, ANON_SLEEP);
12550Sstevel@tonic-gate 					mutex_enter(ahm);
12560Sstevel@tonic-gate 					ap->an_refcnt--;
12570Sstevel@tonic-gate 					ASSERT(ap->an_refcnt == 0);
12580Sstevel@tonic-gate 					anon_rmhash(ap);
12590Sstevel@tonic-gate 					if (ap->an_pvp)
12600Sstevel@tonic-gate 						swap_phys_free(ap->an_pvp,
12615466Skchow 						    ap->an_poff, PAGESIZE);
12620Sstevel@tonic-gate 					mutex_exit(ahm);
12630Sstevel@tonic-gate 					kmem_cache_free(anon_cache, ap);
12640Sstevel@tonic-gate 					ANI_ADD(1);
12650Sstevel@tonic-gate 				}
12665665Sstans 				if (!dispose) {
12675665Sstans 					VM_STAT_ADD(anonvmstats.decrefpages[6]);
12685665Sstans 					page_destroy_pages(ppa[0]);
12695665Sstans 				} else {
12705665Sstans 					VM_STAT_ADD(anonvmstats.decrefpages[7]);
12715665Sstans 					for (j = 0; j < curpgcnt; j++) {
12725665Sstans 						ASSERT(PAGE_EXCL(ppa[j]));
12735665Sstans 						ppa[j]->p_szc = 0;
12745665Sstans 					}
12755665Sstans 					for (j = 0; j < curpgcnt; j++) {
12765665Sstans 						ASSERT(!hat_page_is_mapped(
12775665Sstans 						    ppa[j]));
12785665Sstans 						/*LINTED*/
12795665Sstans 						VN_DISPOSE(ppa[j], B_INVAL, 0,
12805665Sstans 						    kcred);
12815665Sstans 					}
12825665Sstans 				}
12835665Sstans 				kmem_free(ppa, ppasize);
12840Sstevel@tonic-gate 				i += curpgcnt;
12850Sstevel@tonic-gate 			}
12860Sstevel@tonic-gate 		} else {
12870Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.decrefpages[8]);
12880Sstevel@tonic-gate 			(void) anon_set_ptr(ahp, an_idx + i, NULL, ANON_SLEEP);
1289*12173SMichael.Corcoran@Sun.COM 			ahm = AH_MUTEX(ap->an_vp, ap->an_off);
12900Sstevel@tonic-gate 			mutex_enter(ahm);
12910Sstevel@tonic-gate 			ap->an_refcnt--;
12920Sstevel@tonic-gate 			mutex_exit(ahm);
12930Sstevel@tonic-gate 			i++;
12940Sstevel@tonic-gate 		}
12950Sstevel@tonic-gate 	}
12960Sstevel@tonic-gate 
12970Sstevel@tonic-gate 	if (ahmpages != NULL) {
12980Sstevel@tonic-gate 		mutex_exit(ahmpages);
12990Sstevel@tonic-gate 	}
13000Sstevel@tonic-gate }
13010Sstevel@tonic-gate 
13020Sstevel@tonic-gate /*
13030Sstevel@tonic-gate  * Duplicate references to size bytes worth of anon pages.
13040Sstevel@tonic-gate  * Used when duplicating a segment that contains private anon pages.
13050Sstevel@tonic-gate  * This code assumes that procedure calling this one has already used
13060Sstevel@tonic-gate  * hat_chgprot() to disable write access to the range of addresses that
13070Sstevel@tonic-gate  * that *old actually refers to.
13080Sstevel@tonic-gate  */
13090Sstevel@tonic-gate void
13100Sstevel@tonic-gate anon_dup(struct anon_hdr *old, ulong_t old_idx, struct anon_hdr *new,
13110Sstevel@tonic-gate 			ulong_t new_idx, size_t size)
13120Sstevel@tonic-gate {
13130Sstevel@tonic-gate 	spgcnt_t npages;
13140Sstevel@tonic-gate 	kmutex_t *ahm;
13150Sstevel@tonic-gate 	struct anon *ap;
13160Sstevel@tonic-gate 	ulong_t off;
13170Sstevel@tonic-gate 	ulong_t index;
13180Sstevel@tonic-gate 
13190Sstevel@tonic-gate 	npages = btopr(size);
13200Sstevel@tonic-gate 	while (npages > 0) {
13210Sstevel@tonic-gate 		index = old_idx;
13220Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(old, &index)) == NULL)
13230Sstevel@tonic-gate 			break;
13240Sstevel@tonic-gate 
13250Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(old, index)));
13260Sstevel@tonic-gate 		off = index - old_idx;
13270Sstevel@tonic-gate 		npages -= off;
13280Sstevel@tonic-gate 		if (npages <= 0)
13290Sstevel@tonic-gate 			break;
13300Sstevel@tonic-gate 
13310Sstevel@tonic-gate 		(void) anon_set_ptr(new, new_idx + off, ap, ANON_SLEEP);
1332*12173SMichael.Corcoran@Sun.COM 		ahm = AH_MUTEX(ap->an_vp, ap->an_off);
13330Sstevel@tonic-gate 
13340Sstevel@tonic-gate 		mutex_enter(ahm);
13350Sstevel@tonic-gate 		ap->an_refcnt++;
13360Sstevel@tonic-gate 		mutex_exit(ahm);
13370Sstevel@tonic-gate 
13380Sstevel@tonic-gate 		off++;
13390Sstevel@tonic-gate 		new_idx += off;
13400Sstevel@tonic-gate 		old_idx += off;
13410Sstevel@tonic-gate 		npages--;
13420Sstevel@tonic-gate 	}
13430Sstevel@tonic-gate }
13440Sstevel@tonic-gate 
13450Sstevel@tonic-gate /*
13460Sstevel@tonic-gate  * Just like anon_dup but also guarantees there are no holes (unallocated anon
13470Sstevel@tonic-gate  * slots) within any large page region. That means if a large page region is
13480Sstevel@tonic-gate  * empty in the old array it will skip it. If there are 1 or more valid slots
13490Sstevel@tonic-gate  * in the large page region of the old array it will make sure to fill in any
13500Sstevel@tonic-gate  * unallocated ones and also copy them to the new array. If noalloc is 1 large
13510Sstevel@tonic-gate  * page region should either have no valid anon slots or all slots should be
13520Sstevel@tonic-gate  * valid.
13530Sstevel@tonic-gate  */
13540Sstevel@tonic-gate void
13550Sstevel@tonic-gate anon_dup_fill_holes(
13560Sstevel@tonic-gate 	struct anon_hdr *old,
13570Sstevel@tonic-gate 	ulong_t old_idx,
13580Sstevel@tonic-gate 	struct anon_hdr *new,
13590Sstevel@tonic-gate 	ulong_t new_idx,
13600Sstevel@tonic-gate 	size_t size,
13610Sstevel@tonic-gate 	uint_t szc,
13620Sstevel@tonic-gate 	int noalloc)
13630Sstevel@tonic-gate {
13640Sstevel@tonic-gate 	struct anon	*ap;
13650Sstevel@tonic-gate 	spgcnt_t	npages;
13660Sstevel@tonic-gate 	kmutex_t	*ahm, *ahmpages = NULL;
13670Sstevel@tonic-gate 	pgcnt_t		pgcnt, i;
13680Sstevel@tonic-gate 	ulong_t		index, off;
13690Sstevel@tonic-gate #ifdef DEBUG
13700Sstevel@tonic-gate 	int		refcnt;
13710Sstevel@tonic-gate #endif
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate 	ASSERT(szc != 0);
13740Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
13750Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
13760Sstevel@tonic-gate 	npages = btopr(size);
13770Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
13780Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(old_idx, pgcnt));
13790Sstevel@tonic-gate 
13800Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.dupfillholes[0]);
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate 	while (npages > 0) {
13830Sstevel@tonic-gate 		index = old_idx;
13840Sstevel@tonic-gate 
13850Sstevel@tonic-gate 		/*
13860Sstevel@tonic-gate 		 * Find the next valid slot.
13870Sstevel@tonic-gate 		 */
13880Sstevel@tonic-gate 		if (anon_get_next_ptr(old, &index) == NULL)
13890Sstevel@tonic-gate 			break;
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(old, index)));
13920Sstevel@tonic-gate 		/*
13930Sstevel@tonic-gate 		 * Now backup index to the beginning of the
13940Sstevel@tonic-gate 		 * current large page region of the old array.
13950Sstevel@tonic-gate 		 */
13960Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
13970Sstevel@tonic-gate 		off = index - old_idx;
13980Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
13990Sstevel@tonic-gate 		npages -= off;
14000Sstevel@tonic-gate 		if (npages <= 0)
14010Sstevel@tonic-gate 			break;
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate 		/*
14040Sstevel@tonic-gate 		 * Fill and copy a large page regions worth
14050Sstevel@tonic-gate 		 * of anon slots.
14060Sstevel@tonic-gate 		 */
14070Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++) {
14080Sstevel@tonic-gate 			if ((ap = anon_get_ptr(old, index + i)) == NULL) {
14090Sstevel@tonic-gate 				if (noalloc) {
14100Sstevel@tonic-gate 					panic("anon_dup_fill_holes: "
14110Sstevel@tonic-gate 					    "empty anon slot\n");
14120Sstevel@tonic-gate 				}
14130Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.dupfillholes[1]);
14140Sstevel@tonic-gate 				ap = anon_alloc(NULL, 0);
14150Sstevel@tonic-gate 				(void) anon_set_ptr(old, index + i, ap,
14160Sstevel@tonic-gate 				    ANON_SLEEP);
14170Sstevel@tonic-gate 			} else if (i == 0) {
14180Sstevel@tonic-gate 				/*
14190Sstevel@tonic-gate 				 * make the increment of all refcnts of all
14200Sstevel@tonic-gate 				 * anon slots of a large page appear atomic by
14210Sstevel@tonic-gate 				 * getting an anonpages_hash_lock for the
14220Sstevel@tonic-gate 				 * first anon slot of a large page.
14230Sstevel@tonic-gate 				 */
14240Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.dupfillholes[2]);
14250Sstevel@tonic-gate 
1426*12173SMichael.Corcoran@Sun.COM 				ahmpages = APH_MUTEX(ap->an_vp, ap->an_off);
14270Sstevel@tonic-gate 				mutex_enter(ahmpages);
14280Sstevel@tonic-gate 				/*LINTED*/
14290Sstevel@tonic-gate 				ASSERT(refcnt = ap->an_refcnt);
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 				VM_STAT_COND_ADD(ap->an_refcnt > 1,
14320Sstevel@tonic-gate 				    anonvmstats.dupfillholes[3]);
14330Sstevel@tonic-gate 			}
14340Sstevel@tonic-gate 			(void) anon_set_ptr(new, new_idx + off + i, ap,
14350Sstevel@tonic-gate 			    ANON_SLEEP);
1436*12173SMichael.Corcoran@Sun.COM 			ahm = AH_MUTEX(ap->an_vp, ap->an_off);
14370Sstevel@tonic-gate 			mutex_enter(ahm);
14380Sstevel@tonic-gate 			ASSERT(ahmpages != NULL || ap->an_refcnt == 1);
14390Sstevel@tonic-gate 			ASSERT(i == 0 || ahmpages == NULL ||
14400Sstevel@tonic-gate 			    refcnt == ap->an_refcnt);
14410Sstevel@tonic-gate 			ap->an_refcnt++;
14420Sstevel@tonic-gate 			mutex_exit(ahm);
14430Sstevel@tonic-gate 		}
14440Sstevel@tonic-gate 		if (ahmpages != NULL) {
14450Sstevel@tonic-gate 			mutex_exit(ahmpages);
14460Sstevel@tonic-gate 			ahmpages = NULL;
14470Sstevel@tonic-gate 		}
14480Sstevel@tonic-gate 		off += pgcnt;
14490Sstevel@tonic-gate 		new_idx += off;
14500Sstevel@tonic-gate 		old_idx += off;
14510Sstevel@tonic-gate 		npages -= pgcnt;
14520Sstevel@tonic-gate 	}
14530Sstevel@tonic-gate }
14540Sstevel@tonic-gate 
14550Sstevel@tonic-gate /*
14560Sstevel@tonic-gate  * Used when a segment with a vnode changes szc. similarly to
14570Sstevel@tonic-gate  * anon_dup_fill_holes() makes sure each large page region either has no anon
14580Sstevel@tonic-gate  * slots or all of them. but new slots are created by COWing the file
14590Sstevel@tonic-gate  * pages. on entrance no anon slots should be shared.
14600Sstevel@tonic-gate  */
14610Sstevel@tonic-gate int
14620Sstevel@tonic-gate anon_fill_cow_holes(
14630Sstevel@tonic-gate 	struct seg *seg,
14640Sstevel@tonic-gate 	caddr_t addr,
14650Sstevel@tonic-gate 	struct anon_hdr *ahp,
14660Sstevel@tonic-gate 	ulong_t an_idx,
14670Sstevel@tonic-gate 	struct vnode *vp,
14680Sstevel@tonic-gate 	u_offset_t vp_off,
14690Sstevel@tonic-gate 	size_t size,
14700Sstevel@tonic-gate 	uint_t szc,
14710Sstevel@tonic-gate 	uint_t prot,
14720Sstevel@tonic-gate 	struct vpage vpage[],
14730Sstevel@tonic-gate 	struct cred *cred)
14740Sstevel@tonic-gate {
14750Sstevel@tonic-gate 	struct anon	*ap;
14760Sstevel@tonic-gate 	spgcnt_t	npages;
14770Sstevel@tonic-gate 	pgcnt_t		pgcnt, i;
14780Sstevel@tonic-gate 	ulong_t		index, off;
14790Sstevel@tonic-gate 	int		err = 0;
14800Sstevel@tonic-gate 	int		pageflags = 0;
14810Sstevel@tonic-gate 
14820Sstevel@tonic-gate 	ASSERT(szc != 0);
14830Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
14840Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
14850Sstevel@tonic-gate 	npages = btopr(size);
14860Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
14870Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
14880Sstevel@tonic-gate 
14890Sstevel@tonic-gate 	while (npages > 0) {
14900Sstevel@tonic-gate 		index = an_idx;
14910Sstevel@tonic-gate 
14920Sstevel@tonic-gate 		/*
14930Sstevel@tonic-gate 		 * Find the next valid slot.
14940Sstevel@tonic-gate 		 */
14950Sstevel@tonic-gate 		if (anon_get_next_ptr(ahp, &index) == NULL) {
14960Sstevel@tonic-gate 			break;
14970Sstevel@tonic-gate 		}
14980Sstevel@tonic-gate 
14990Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
15000Sstevel@tonic-gate 		/*
15010Sstevel@tonic-gate 		 * Now backup index to the beginning of the
15020Sstevel@tonic-gate 		 * current large page region of the anon array.
15030Sstevel@tonic-gate 		 */
15040Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
15050Sstevel@tonic-gate 		off = index - an_idx;
15060Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
15070Sstevel@tonic-gate 		npages -= off;
15080Sstevel@tonic-gate 		if (npages <= 0)
15090Sstevel@tonic-gate 			break;
15100Sstevel@tonic-gate 		an_idx += off;
15110Sstevel@tonic-gate 		vp_off += ptob(off);
15120Sstevel@tonic-gate 		addr += ptob(off);
15130Sstevel@tonic-gate 		if (vpage != NULL) {
15140Sstevel@tonic-gate 			vpage += off;
15150Sstevel@tonic-gate 		}
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++, an_idx++, vp_off += PAGESIZE) {
15180Sstevel@tonic-gate 			if ((ap = anon_get_ptr(ahp, an_idx)) == NULL) {
15190Sstevel@tonic-gate 				page_t *pl[1 + 1];
15200Sstevel@tonic-gate 				page_t *pp;
15210Sstevel@tonic-gate 
15220Sstevel@tonic-gate 				err = VOP_GETPAGE(vp, vp_off, PAGESIZE, NULL,
15235331Samw 				    pl, PAGESIZE, seg, addr, S_READ, cred,
15245331Samw 				    NULL);
15250Sstevel@tonic-gate 				if (err) {
15260Sstevel@tonic-gate 					break;
15270Sstevel@tonic-gate 				}
15280Sstevel@tonic-gate 				if (vpage != NULL) {
15290Sstevel@tonic-gate 					prot = VPP_PROT(vpage);
15300Sstevel@tonic-gate 					pageflags = VPP_ISPPLOCK(vpage) ?
15310Sstevel@tonic-gate 					    LOCK_PAGE : 0;
15320Sstevel@tonic-gate 				}
15330Sstevel@tonic-gate 				pp = anon_private(&ap, seg, addr, prot, pl[0],
15345466Skchow 				    pageflags, cred);
15350Sstevel@tonic-gate 				if (pp == NULL) {
15360Sstevel@tonic-gate 					err = ENOMEM;
15370Sstevel@tonic-gate 					break;
15380Sstevel@tonic-gate 				}
15390Sstevel@tonic-gate 				(void) anon_set_ptr(ahp, an_idx, ap,
15400Sstevel@tonic-gate 				    ANON_SLEEP);
15410Sstevel@tonic-gate 				page_unlock(pp);
15420Sstevel@tonic-gate 			}
15430Sstevel@tonic-gate 			ASSERT(ap->an_refcnt == 1);
15440Sstevel@tonic-gate 			addr += PAGESIZE;
15450Sstevel@tonic-gate 			if (vpage != NULL) {
15460Sstevel@tonic-gate 				vpage++;
15470Sstevel@tonic-gate 			}
15480Sstevel@tonic-gate 		}
15490Sstevel@tonic-gate 		npages -= pgcnt;
15500Sstevel@tonic-gate 	}
15510Sstevel@tonic-gate 
15520Sstevel@tonic-gate 	return (err);
15530Sstevel@tonic-gate }
15540Sstevel@tonic-gate 
15550Sstevel@tonic-gate /*
15560Sstevel@tonic-gate  * Free a group of "size" anon pages, size in bytes,
15570Sstevel@tonic-gate  * and clear out the pointers to the anon entries.
15580Sstevel@tonic-gate  */
15590Sstevel@tonic-gate void
15600Sstevel@tonic-gate anon_free(struct anon_hdr *ahp, ulong_t index, size_t size)
15610Sstevel@tonic-gate {
15620Sstevel@tonic-gate 	spgcnt_t npages;
15630Sstevel@tonic-gate 	struct anon *ap;
15640Sstevel@tonic-gate 	ulong_t old;
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	npages = btopr(size);
15670Sstevel@tonic-gate 
15680Sstevel@tonic-gate 	while (npages > 0) {
15690Sstevel@tonic-gate 		old = index;
15700Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(ahp, &index)) == NULL)
15710Sstevel@tonic-gate 			break;
15720Sstevel@tonic-gate 
15730Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
15740Sstevel@tonic-gate 		npages -= index - old;
15750Sstevel@tonic-gate 		if (npages <= 0)
15760Sstevel@tonic-gate 			break;
15770Sstevel@tonic-gate 
15780Sstevel@tonic-gate 		(void) anon_set_ptr(ahp, index, NULL, ANON_SLEEP);
15790Sstevel@tonic-gate 		anon_decref(ap);
15800Sstevel@tonic-gate 		/*
15810Sstevel@tonic-gate 		 * Bump index and decrement page count
15820Sstevel@tonic-gate 		 */
15830Sstevel@tonic-gate 		index++;
15840Sstevel@tonic-gate 		npages--;
15850Sstevel@tonic-gate 	}
15860Sstevel@tonic-gate }
15870Sstevel@tonic-gate 
15880Sstevel@tonic-gate void
15890Sstevel@tonic-gate anon_free_pages(
15900Sstevel@tonic-gate 	struct anon_hdr *ahp,
15910Sstevel@tonic-gate 	ulong_t an_idx,
15920Sstevel@tonic-gate 	size_t size,
15930Sstevel@tonic-gate 	uint_t szc)
15940Sstevel@tonic-gate {
15950Sstevel@tonic-gate 	spgcnt_t	npages;
15960Sstevel@tonic-gate 	pgcnt_t		pgcnt;
15970Sstevel@tonic-gate 	ulong_t		index, off;
15980Sstevel@tonic-gate 
15990Sstevel@tonic-gate 	ASSERT(szc != 0);
16000Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
16010Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
16020Sstevel@tonic-gate 	npages = btopr(size);
16030Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
16040Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
16052414Saguzovsk 	ASSERT(an_idx < ahp->size);
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.freepages[0]);
16080Sstevel@tonic-gate 
16090Sstevel@tonic-gate 	while (npages > 0) {
16100Sstevel@tonic-gate 		index = an_idx;
16110Sstevel@tonic-gate 
16120Sstevel@tonic-gate 		/*
16130Sstevel@tonic-gate 		 * Find the next valid slot.
16140Sstevel@tonic-gate 		 */
16150Sstevel@tonic-gate 		if (anon_get_next_ptr(ahp, &index) == NULL)
16160Sstevel@tonic-gate 			break;
16170Sstevel@tonic-gate 
16180Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
16190Sstevel@tonic-gate 		/*
16200Sstevel@tonic-gate 		 * Now backup index to the beginning of the
16210Sstevel@tonic-gate 		 * current large page region of the old array.
16220Sstevel@tonic-gate 		 */
16230Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
16240Sstevel@tonic-gate 		off = index - an_idx;
16250Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
16260Sstevel@tonic-gate 		npages -= off;
16270Sstevel@tonic-gate 		if (npages <= 0)
16280Sstevel@tonic-gate 			break;
16290Sstevel@tonic-gate 
16300Sstevel@tonic-gate 		anon_decref_pages(ahp, index, szc);
16310Sstevel@tonic-gate 
16320Sstevel@tonic-gate 		off += pgcnt;
16330Sstevel@tonic-gate 		an_idx += off;
16340Sstevel@tonic-gate 		npages -= pgcnt;
16350Sstevel@tonic-gate 	}
16360Sstevel@tonic-gate }
16370Sstevel@tonic-gate 
16380Sstevel@tonic-gate /*
16390Sstevel@tonic-gate  * Make anonymous pages discardable
16400Sstevel@tonic-gate  */
16410Sstevel@tonic-gate void
16425224Smec anon_disclaim(struct anon_map *amp, ulong_t index, size_t size)
16430Sstevel@tonic-gate {
16440Sstevel@tonic-gate 	spgcnt_t npages = btopr(size);
16450Sstevel@tonic-gate 	struct anon *ap;
16460Sstevel@tonic-gate 	struct vnode *vp;
16470Sstevel@tonic-gate 	anoff_t off;
16480Sstevel@tonic-gate 	page_t *pp, *root_pp;
16490Sstevel@tonic-gate 	kmutex_t *ahm;
16500Sstevel@tonic-gate 	pgcnt_t pgcnt;
16510Sstevel@tonic-gate 	ulong_t old_idx, idx, i;
16520Sstevel@tonic-gate 	struct anon_hdr *ahp = amp->ahp;
16530Sstevel@tonic-gate 	anon_sync_obj_t cookie;
16540Sstevel@tonic-gate 
16550Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&amp->a_rwlock));
16560Sstevel@tonic-gate 	pgcnt = 1;
16575224Smec 	for (; npages > 0; index = (pgcnt == 1) ? index + 1 :
16585224Smec 	    P2ROUNDUP(index + 1, pgcnt), npages -= pgcnt) {
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate 		/*
16610Sstevel@tonic-gate 		 * get anon pointer and index for the first valid entry
16620Sstevel@tonic-gate 		 * in the anon list, starting from "index"
16630Sstevel@tonic-gate 		 */
16640Sstevel@tonic-gate 		old_idx = index;
16650Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(ahp, &index)) == NULL)
16660Sstevel@tonic-gate 			break;
16670Sstevel@tonic-gate 
16680Sstevel@tonic-gate 		/*
16690Sstevel@tonic-gate 		 * decrement npages by number of NULL anon slots we skipped
16700Sstevel@tonic-gate 		 */
16710Sstevel@tonic-gate 		npages -= index - old_idx;
16720Sstevel@tonic-gate 		if (npages <= 0)
16730Sstevel@tonic-gate 			break;
16740Sstevel@tonic-gate 
16750Sstevel@tonic-gate 		anon_array_enter(amp, index, &cookie);
16760Sstevel@tonic-gate 		ap = anon_get_ptr(ahp, index);
16770Sstevel@tonic-gate 		ASSERT(ap != NULL);
16780Sstevel@tonic-gate 
16790Sstevel@tonic-gate 		/*
16800Sstevel@tonic-gate 		 * Get anonymous page and try to lock it SE_EXCL;
16815224Smec 		 * if we couldn't grab the lock we skip to next page.
16820Sstevel@tonic-gate 		 */
16830Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
16845224Smec 		pp = page_lookup_nowait(vp, (u_offset_t)off, SE_EXCL);
16850Sstevel@tonic-gate 		if (pp == NULL) {
16860Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
16870Sstevel@tonic-gate 			pgcnt = 1;
16880Sstevel@tonic-gate 			anon_array_exit(&cookie);
16890Sstevel@tonic-gate 			continue;
16900Sstevel@tonic-gate 		}
16910Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(pp->p_szc);
16920Sstevel@tonic-gate 
16930Sstevel@tonic-gate 		/*
16940Sstevel@tonic-gate 		 * we cannot free a page which is permanently locked.
16950Sstevel@tonic-gate 		 * The page_struct_lock need not be acquired to examine
16960Sstevel@tonic-gate 		 * these fields since the page has an "exclusive" lock.
16970Sstevel@tonic-gate 		 */
16980Sstevel@tonic-gate 		if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
16990Sstevel@tonic-gate 			page_unlock(pp);
17000Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
17010Sstevel@tonic-gate 			anon_array_exit(&cookie);
17020Sstevel@tonic-gate 			continue;
17030Sstevel@tonic-gate 		}
17040Sstevel@tonic-gate 
1705*12173SMichael.Corcoran@Sun.COM 		ahm = AH_MUTEX(vp, off);
17060Sstevel@tonic-gate 		mutex_enter(ahm);
17070Sstevel@tonic-gate 		ASSERT(ap->an_refcnt != 0);
17080Sstevel@tonic-gate 		/*
17090Sstevel@tonic-gate 		 * skip this one if copy-on-write is not yet broken.
17100Sstevel@tonic-gate 		 */
17110Sstevel@tonic-gate 		if (ap->an_refcnt > 1) {
17120Sstevel@tonic-gate 			mutex_exit(ahm);
17130Sstevel@tonic-gate 			page_unlock(pp);
17140Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
17150Sstevel@tonic-gate 			anon_array_exit(&cookie);
17160Sstevel@tonic-gate 			continue;
17170Sstevel@tonic-gate 		}
17180Sstevel@tonic-gate 
17190Sstevel@tonic-gate 		if (pp->p_szc == 0) {
17200Sstevel@tonic-gate 			pgcnt = 1;
17210Sstevel@tonic-gate 
17220Sstevel@tonic-gate 			/*
17230Sstevel@tonic-gate 			 * free swap slot;
17240Sstevel@tonic-gate 			 */
17250Sstevel@tonic-gate 			if (ap->an_pvp) {
17260Sstevel@tonic-gate 				swap_phys_free(ap->an_pvp, ap->an_poff,
17270Sstevel@tonic-gate 				    PAGESIZE);
17280Sstevel@tonic-gate 				ap->an_pvp = NULL;
17290Sstevel@tonic-gate 				ap->an_poff = 0;
17300Sstevel@tonic-gate 			}
17310Sstevel@tonic-gate 			mutex_exit(ahm);
17320Sstevel@tonic-gate 			segadvstat.MADV_FREE_hit.value.ul++;
17330Sstevel@tonic-gate 
17340Sstevel@tonic-gate 			/*
17350Sstevel@tonic-gate 			 * while we are at it, unload all the translations
17360Sstevel@tonic-gate 			 * and attempt to free the page.
17370Sstevel@tonic-gate 			 */
17380Sstevel@tonic-gate 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
17390Sstevel@tonic-gate 			/*LINTED: constant in conditional context */
17400Sstevel@tonic-gate 			VN_DISPOSE(pp, B_FREE, 0, kcred);
17410Sstevel@tonic-gate 			anon_array_exit(&cookie);
17420Sstevel@tonic-gate 			continue;
17430Sstevel@tonic-gate 		}
17440Sstevel@tonic-gate 
17450Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(pp->p_szc);
17462414Saguzovsk 		if (!IS_P2ALIGNED(index, pgcnt) || npages < pgcnt) {
17470Sstevel@tonic-gate 			if (!page_try_demote_pages(pp)) {
17480Sstevel@tonic-gate 				mutex_exit(ahm);
17490Sstevel@tonic-gate 				page_unlock(pp);
17500Sstevel@tonic-gate 				segadvstat.MADV_FREE_miss.value.ul++;
17510Sstevel@tonic-gate 				anon_array_exit(&cookie);
17520Sstevel@tonic-gate 				continue;
17530Sstevel@tonic-gate 			} else {
17540Sstevel@tonic-gate 				pgcnt = 1;
17550Sstevel@tonic-gate 				if (ap->an_pvp) {
17560Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp,
17570Sstevel@tonic-gate 					    ap->an_poff, PAGESIZE);
17585224Smec 					ap->an_pvp = NULL;
17595224Smec 					ap->an_poff = 0;
17600Sstevel@tonic-gate 				}
17610Sstevel@tonic-gate 				mutex_exit(ahm);
17620Sstevel@tonic-gate 				(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
17630Sstevel@tonic-gate 				/*LINTED*/
17640Sstevel@tonic-gate 				VN_DISPOSE(pp, B_FREE, 0, kcred);
17650Sstevel@tonic-gate 				segadvstat.MADV_FREE_hit.value.ul++;
17660Sstevel@tonic-gate 				anon_array_exit(&cookie);
17670Sstevel@tonic-gate 				continue;
17680Sstevel@tonic-gate 			}
17690Sstevel@tonic-gate 		}
17700Sstevel@tonic-gate 		mutex_exit(ahm);
17710Sstevel@tonic-gate 		root_pp = pp;
17720Sstevel@tonic-gate 
17730Sstevel@tonic-gate 		/*
17740Sstevel@tonic-gate 		 * try to lock remaining pages
17750Sstevel@tonic-gate 		 */
17760Sstevel@tonic-gate 		for (idx = 1; idx < pgcnt; idx++) {
1777414Skchow 			pp++;
17780Sstevel@tonic-gate 			if (!page_trylock(pp, SE_EXCL))
17790Sstevel@tonic-gate 				break;
17800Sstevel@tonic-gate 			if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
17810Sstevel@tonic-gate 				page_unlock(pp);
17820Sstevel@tonic-gate 				break;
17830Sstevel@tonic-gate 			}
17840Sstevel@tonic-gate 		}
17850Sstevel@tonic-gate 
17860Sstevel@tonic-gate 		if (idx == pgcnt) {
17870Sstevel@tonic-gate 			for (i = 0; i < pgcnt; i++) {
17880Sstevel@tonic-gate 				ap = anon_get_ptr(ahp, index + i);
17890Sstevel@tonic-gate 				if (ap == NULL)
17900Sstevel@tonic-gate 					break;
17910Sstevel@tonic-gate 				swap_xlate(ap, &vp, &off);
1792*12173SMichael.Corcoran@Sun.COM 				ahm = AH_MUTEX(vp, off);
17930Sstevel@tonic-gate 				mutex_enter(ahm);
17940Sstevel@tonic-gate 				ASSERT(ap->an_refcnt != 0);
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate 				/*
17970Sstevel@tonic-gate 				 * skip this one if copy-on-write
17980Sstevel@tonic-gate 				 * is not yet broken.
17990Sstevel@tonic-gate 				 */
18000Sstevel@tonic-gate 				if (ap->an_refcnt > 1) {
18010Sstevel@tonic-gate 					mutex_exit(ahm);
18020Sstevel@tonic-gate 					goto skiplp;
18030Sstevel@tonic-gate 				}
18040Sstevel@tonic-gate 				if (ap->an_pvp) {
18050Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp,
18060Sstevel@tonic-gate 					    ap->an_poff, PAGESIZE);
18075224Smec 					ap->an_pvp = NULL;
18085224Smec 					ap->an_poff = 0;
18090Sstevel@tonic-gate 				}
18100Sstevel@tonic-gate 				mutex_exit(ahm);
18110Sstevel@tonic-gate 			}
18120Sstevel@tonic-gate 			page_destroy_pages(root_pp);
18130Sstevel@tonic-gate 			segadvstat.MADV_FREE_hit.value.ul += pgcnt;
18140Sstevel@tonic-gate 			anon_array_exit(&cookie);
18150Sstevel@tonic-gate 			continue;
18160Sstevel@tonic-gate 		}
18170Sstevel@tonic-gate skiplp:
18180Sstevel@tonic-gate 		segadvstat.MADV_FREE_miss.value.ul += pgcnt;
1819414Skchow 		for (i = 0, pp = root_pp; i < idx; pp++, i++)
18200Sstevel@tonic-gate 			page_unlock(pp);
18210Sstevel@tonic-gate 		anon_array_exit(&cookie);
18220Sstevel@tonic-gate 	}
18230Sstevel@tonic-gate }
18240Sstevel@tonic-gate 
18250Sstevel@tonic-gate /*
18260Sstevel@tonic-gate  * Return the kept page(s) and protections back to the segment driver.
18270Sstevel@tonic-gate  */
18280Sstevel@tonic-gate int
18290Sstevel@tonic-gate anon_getpage(
18300Sstevel@tonic-gate 	struct anon **app,
18310Sstevel@tonic-gate 	uint_t *protp,
18320Sstevel@tonic-gate 	page_t *pl[],
18330Sstevel@tonic-gate 	size_t plsz,
18340Sstevel@tonic-gate 	struct seg *seg,
18350Sstevel@tonic-gate 	caddr_t addr,
18360Sstevel@tonic-gate 	enum seg_rw rw,
18370Sstevel@tonic-gate 	struct cred *cred)
18380Sstevel@tonic-gate {
18390Sstevel@tonic-gate 	page_t *pp;
18400Sstevel@tonic-gate 	struct anon *ap = *app;
18410Sstevel@tonic-gate 	struct vnode *vp;
18420Sstevel@tonic-gate 	anoff_t off;
18430Sstevel@tonic-gate 	int err;
18440Sstevel@tonic-gate 	kmutex_t *ahm;
18450Sstevel@tonic-gate 
18460Sstevel@tonic-gate 	swap_xlate(ap, &vp, &off);
18470Sstevel@tonic-gate 
18480Sstevel@tonic-gate 	/*
18490Sstevel@tonic-gate 	 * Lookup the page. If page is being paged in,
18500Sstevel@tonic-gate 	 * wait for it to finish as we must return a list of
18510Sstevel@tonic-gate 	 * pages since this routine acts like the VOP_GETPAGE
18520Sstevel@tonic-gate 	 * routine does.
18530Sstevel@tonic-gate 	 */
18540Sstevel@tonic-gate 	if (pl != NULL && (pp = page_lookup(vp, (u_offset_t)off, SE_SHARED))) {
1855*12173SMichael.Corcoran@Sun.COM 		ahm = AH_MUTEX(ap->an_vp, ap->an_off);
18560Sstevel@tonic-gate 		mutex_enter(ahm);
18570Sstevel@tonic-gate 		if (ap->an_refcnt == 1)
18580Sstevel@tonic-gate 			*protp = PROT_ALL;
18590Sstevel@tonic-gate 		else
18600Sstevel@tonic-gate 			*protp = PROT_ALL & ~PROT_WRITE;
18610Sstevel@tonic-gate 		mutex_exit(ahm);
18620Sstevel@tonic-gate 		pl[0] = pp;
18630Sstevel@tonic-gate 		pl[1] = NULL;
18640Sstevel@tonic-gate 		return (0);
18650Sstevel@tonic-gate 	}
18660Sstevel@tonic-gate 
18670Sstevel@tonic-gate 	/*
18680Sstevel@tonic-gate 	 * Simply treat it as a vnode fault on the anon vp.
18690Sstevel@tonic-gate 	 */
18700Sstevel@tonic-gate 
18710Sstevel@tonic-gate 	TRACE_3(TR_FAC_VM, TR_ANON_GETPAGE,
18725466Skchow 	    "anon_getpage:seg %x addr %x vp %x",
18735466Skchow 	    seg, addr, vp);
18740Sstevel@tonic-gate 
18750Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, (u_offset_t)off, PAGESIZE, protp, pl, plsz,
18765331Samw 	    seg, addr, rw, cred, NULL);
18770Sstevel@tonic-gate 
18780Sstevel@tonic-gate 	if (err == 0 && pl != NULL) {
1879*12173SMichael.Corcoran@Sun.COM 		ahm = AH_MUTEX(ap->an_vp, ap->an_off);
18800Sstevel@tonic-gate 		mutex_enter(ahm);
18810Sstevel@tonic-gate 		if (ap->an_refcnt != 1)
18820Sstevel@tonic-gate 			*protp &= ~PROT_WRITE;	/* make read-only */
18830Sstevel@tonic-gate 		mutex_exit(ahm);
18840Sstevel@tonic-gate 	}
18850Sstevel@tonic-gate 	return (err);
18860Sstevel@tonic-gate }
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate /*
18890Sstevel@tonic-gate  * Creates or returns kept pages to the segment driver.  returns -1 if a large
18900Sstevel@tonic-gate  * page cannot be allocated. returns -2 if some other process has allocated a
18910Sstevel@tonic-gate  * larger page.
18920Sstevel@tonic-gate  *
18935331Samw  * For cowfault it will allocate any size pages to fill the requested area to
18945331Samw  * avoid partially overwriting anon slots (i.e. sharing only some of the anon
18950Sstevel@tonic-gate  * slots within a large page with other processes). This policy greatly
18960Sstevel@tonic-gate  * simplifies large page freeing (which is only freed when all anon slot
18970Sstevel@tonic-gate  * refcnts are 0).
18980Sstevel@tonic-gate  */
18990Sstevel@tonic-gate int
19000Sstevel@tonic-gate anon_map_getpages(
19010Sstevel@tonic-gate 	struct anon_map *amp,
19020Sstevel@tonic-gate 	ulong_t	start_idx,
19030Sstevel@tonic-gate 	uint_t	szc,
19040Sstevel@tonic-gate 	struct seg *seg,
19050Sstevel@tonic-gate 	caddr_t	addr,
19060Sstevel@tonic-gate 	uint_t prot,
19070Sstevel@tonic-gate 	uint_t *protp,
19080Sstevel@tonic-gate 	page_t	*ppa[],
19090Sstevel@tonic-gate 	uint_t	*ppa_szc,
19100Sstevel@tonic-gate 	struct vpage vpage[],
19110Sstevel@tonic-gate 	enum seg_rw rw,
19120Sstevel@tonic-gate 	int brkcow,
19130Sstevel@tonic-gate 	int anypgsz,
19144426Saguzovsk 	int pgflags,
19150Sstevel@tonic-gate 	struct cred *cred)
19160Sstevel@tonic-gate {
19170Sstevel@tonic-gate 	pgcnt_t		pgcnt;
19180Sstevel@tonic-gate 	struct anon	*ap;
19190Sstevel@tonic-gate 	struct vnode	*vp;
19200Sstevel@tonic-gate 	anoff_t		off;
19210Sstevel@tonic-gate 	page_t		*pp, *pl[2], *conpp = NULL;
19220Sstevel@tonic-gate 	caddr_t		vaddr;
19230Sstevel@tonic-gate 	ulong_t		pg_idx, an_idx, i;
19240Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
19250Sstevel@tonic-gate 	int		prealloc = 1;
19260Sstevel@tonic-gate 	int		err, slotcreate;
19270Sstevel@tonic-gate 	uint_t		vpprot;
19282414Saguzovsk 	int		upsize = (szc < seg->s_szc);
19290Sstevel@tonic-gate 
19300Sstevel@tonic-gate #if !defined(__i386) && !defined(__amd64)
19310Sstevel@tonic-gate 	ASSERT(seg->s_szc != 0);
19320Sstevel@tonic-gate #endif
19330Sstevel@tonic-gate 	ASSERT(szc <= seg->s_szc);
19340Sstevel@tonic-gate 	ASSERT(ppa_szc != NULL);
19350Sstevel@tonic-gate 	ASSERT(rw != S_CREATE);
19360Sstevel@tonic-gate 
19370Sstevel@tonic-gate 	*protp = PROT_ALL;
19380Sstevel@tonic-gate 
19390Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[0]);
19400Sstevel@tonic-gate 
19410Sstevel@tonic-gate 	if (szc == 0) {
19420Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[1]);
19430Sstevel@tonic-gate 		if ((ap = anon_get_ptr(amp->ahp, start_idx)) != NULL) {
19440Sstevel@tonic-gate 			err = anon_getpage(&ap, protp, pl, PAGESIZE, seg,
19450Sstevel@tonic-gate 			    addr, rw, cred);
19460Sstevel@tonic-gate 			if (err)
19470Sstevel@tonic-gate 				return (err);
19480Sstevel@tonic-gate 			ppa[0] = pl[0];
19490Sstevel@tonic-gate 			if (brkcow == 0 || (*protp & PROT_WRITE)) {
19500Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[2]);
19512414Saguzovsk 				if (ppa[0]->p_szc != 0 && upsize) {
19520Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.getpages[3]);
19532414Saguzovsk 					*ppa_szc = MIN(ppa[0]->p_szc,
19542414Saguzovsk 					    seg->s_szc);
19550Sstevel@tonic-gate 					page_unlock(ppa[0]);
19560Sstevel@tonic-gate 					return (-2);
19570Sstevel@tonic-gate 				}
19580Sstevel@tonic-gate 				return (0);
19590Sstevel@tonic-gate 			}
19600Sstevel@tonic-gate 			panic("anon_map_getpages: cowfault for szc 0");
19610Sstevel@tonic-gate 		} else {
19620Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[4]);
19630Sstevel@tonic-gate 			ppa[0] = anon_zero(seg, addr, &ap, cred);
19640Sstevel@tonic-gate 			if (ppa[0] == NULL)
19650Sstevel@tonic-gate 				return (ENOMEM);
19660Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, start_idx, ap,
19670Sstevel@tonic-gate 			    ANON_SLEEP);
19680Sstevel@tonic-gate 			return (0);
19690Sstevel@tonic-gate 		}
19700Sstevel@tonic-gate 	}
19710Sstevel@tonic-gate 
19720Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
19730Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
19740Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
19750Sstevel@tonic-gate 
19760Sstevel@tonic-gate 	/*
19770Sstevel@tonic-gate 	 * First we check for the case that the requtested large
19780Sstevel@tonic-gate 	 * page or larger page already exists in the system.
19790Sstevel@tonic-gate 	 * Actually we only check if the first constituent page
19800Sstevel@tonic-gate 	 * exists and only preallocate if it's not found.
19810Sstevel@tonic-gate 	 */
19820Sstevel@tonic-gate 	ap = anon_get_ptr(amp->ahp, start_idx);
19830Sstevel@tonic-gate 	if (ap) {
19840Sstevel@tonic-gate 		uint_t pszc;
19850Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
19860Sstevel@tonic-gate 		if (page_exists_forreal(vp, (u_offset_t)off, &pszc)) {
19872414Saguzovsk 			if (pszc > szc && upsize) {
19882414Saguzovsk 				*ppa_szc = MIN(pszc, seg->s_szc);
19890Sstevel@tonic-gate 				return (-2);
19900Sstevel@tonic-gate 			}
19912414Saguzovsk 			if (pszc >= szc) {
19920Sstevel@tonic-gate 				prealloc = 0;
19930Sstevel@tonic-gate 			}
19940Sstevel@tonic-gate 		}
19950Sstevel@tonic-gate 	}
19960Sstevel@tonic-gate 
19970Sstevel@tonic-gate 	VM_STAT_COND_ADD(prealloc == 0, anonvmstats.getpages[5]);
19980Sstevel@tonic-gate 	VM_STAT_COND_ADD(prealloc != 0, anonvmstats.getpages[6]);
19990Sstevel@tonic-gate 
20000Sstevel@tonic-gate top:
20010Sstevel@tonic-gate 	/*
20020Sstevel@tonic-gate 	 * If a smaller page or no page at all was found,
20030Sstevel@tonic-gate 	 * grab a large page off the freelist.
20040Sstevel@tonic-gate 	 */
20050Sstevel@tonic-gate 	if (prealloc) {
20060Sstevel@tonic-gate 		ASSERT(conpp == NULL);
2007749Ssusans 		if (page_alloc_pages(anon_vp, seg, addr, NULL, ppa,
20084426Saguzovsk 		    szc, 0, pgflags) != 0) {
20090Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[7]);
20105466Skchow 			if (brkcow == 0 || szc < seg->s_szc ||
20115466Skchow 			    !anon_szcshare(amp->ahp, start_idx)) {
20120Sstevel@tonic-gate 				/*
20130Sstevel@tonic-gate 				 * If the refcnt's of all anon slots are <= 1
20140Sstevel@tonic-gate 				 * they can't increase since we are holding
20150Sstevel@tonic-gate 				 * the address space's lock. So segvn can
20160Sstevel@tonic-gate 				 * safely decrease szc without risking to
20170Sstevel@tonic-gate 				 * generate a cow fault for the region smaller
20180Sstevel@tonic-gate 				 * than the segment's largest page size.
20190Sstevel@tonic-gate 				 */
20200Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[8]);
20210Sstevel@tonic-gate 				return (-1);
20220Sstevel@tonic-gate 			}
20230Sstevel@tonic-gate 		docow:
20240Sstevel@tonic-gate 			/*
20250Sstevel@tonic-gate 			 * This is a cow fault. Copy away the entire 1 large
20260Sstevel@tonic-gate 			 * page region of this segment.
20270Sstevel@tonic-gate 			 */
20280Sstevel@tonic-gate 			if (szc != seg->s_szc)
20290Sstevel@tonic-gate 				panic("anon_map_getpages: cowfault for szc %d",
20300Sstevel@tonic-gate 				    szc);
20310Sstevel@tonic-gate 			vaddr = addr;
20320Sstevel@tonic-gate 			for (pg_idx = 0, an_idx = start_idx; pg_idx < pgcnt;
20330Sstevel@tonic-gate 			    pg_idx++, an_idx++, vaddr += PAGESIZE) {
20340Sstevel@tonic-gate 				if ((ap = anon_get_ptr(amp->ahp, an_idx)) !=
20350Sstevel@tonic-gate 				    NULL) {
20360Sstevel@tonic-gate 					err = anon_getpage(&ap, &vpprot, pl,
20370Sstevel@tonic-gate 					    PAGESIZE, seg, vaddr, rw, cred);
20380Sstevel@tonic-gate 					if (err) {
20390Sstevel@tonic-gate 						for (i = 0; i < pg_idx; i++) {
20400Sstevel@tonic-gate 							if ((pp = ppa[i]) !=
20410Sstevel@tonic-gate 							    NULL)
20420Sstevel@tonic-gate 								page_unlock(pp);
20430Sstevel@tonic-gate 						}
20440Sstevel@tonic-gate 						return (err);
20450Sstevel@tonic-gate 					}
20460Sstevel@tonic-gate 					ppa[pg_idx] = pl[0];
20470Sstevel@tonic-gate 				} else {
20480Sstevel@tonic-gate 					/*
20490Sstevel@tonic-gate 					 * Since this is a cowfault we know
20500Sstevel@tonic-gate 					 * that this address space has a
20510Sstevel@tonic-gate 					 * parent or children which means
20520Sstevel@tonic-gate 					 * anon_dup_fill_holes() has initialized
20530Sstevel@tonic-gate 					 * all anon slots within a large page
20540Sstevel@tonic-gate 					 * region that had at least one anon
20550Sstevel@tonic-gate 					 * slot at the time of fork().
20560Sstevel@tonic-gate 					 */
20570Sstevel@tonic-gate 					panic("anon_map_getpages: "
20580Sstevel@tonic-gate 					    "cowfault but anon slot is empty");
20590Sstevel@tonic-gate 				}
20600Sstevel@tonic-gate 			}
20610Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[9]);
20620Sstevel@tonic-gate 			*protp = PROT_ALL;
20630Sstevel@tonic-gate 			return (anon_map_privatepages(amp, start_idx, szc, seg,
20644426Saguzovsk 			    addr, prot, ppa, vpage, anypgsz, pgflags, cred));
20650Sstevel@tonic-gate 		}
20660Sstevel@tonic-gate 	}
20670Sstevel@tonic-gate 
20680Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[10]);
20690Sstevel@tonic-gate 
20700Sstevel@tonic-gate 	an_idx = start_idx;
20710Sstevel@tonic-gate 	pg_idx = 0;
20720Sstevel@tonic-gate 	vaddr = addr;
20730Sstevel@tonic-gate 	while (pg_idx < pgcnt) {
20740Sstevel@tonic-gate 		slotcreate = 0;
20750Sstevel@tonic-gate 		if ((ap = anon_get_ptr(amp->ahp, an_idx)) == NULL) {
20760Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[11]);
20770Sstevel@tonic-gate 			/*
20780Sstevel@tonic-gate 			 * For us to have decided not to preallocate
20790Sstevel@tonic-gate 			 * would have meant that a large page
20800Sstevel@tonic-gate 			 * was found. Which also means that all of the
20810Sstevel@tonic-gate 			 * anon slots for that page would have been
20820Sstevel@tonic-gate 			 * already created for us.
20830Sstevel@tonic-gate 			 */
20840Sstevel@tonic-gate 			if (prealloc == 0)
20850Sstevel@tonic-gate 				panic("anon_map_getpages: prealloc = 0");
20860Sstevel@tonic-gate 
20870Sstevel@tonic-gate 			slotcreate = 1;
20880Sstevel@tonic-gate 			ap = anon_alloc(NULL, 0);
20890Sstevel@tonic-gate 		}
20900Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
20910Sstevel@tonic-gate 
20920Sstevel@tonic-gate 		/*
20930Sstevel@tonic-gate 		 * Now setup our preallocated page to pass down
20940Sstevel@tonic-gate 		 * to swap_getpage().
20950Sstevel@tonic-gate 		 */
20960Sstevel@tonic-gate 		if (prealloc) {
20970Sstevel@tonic-gate 			ASSERT(ppa[pg_idx]->p_szc == szc);
20980Sstevel@tonic-gate 			conpp = ppa[pg_idx];
20990Sstevel@tonic-gate 		}
21000Sstevel@tonic-gate 		ASSERT(prealloc || conpp == NULL);
21010Sstevel@tonic-gate 
21020Sstevel@tonic-gate 		/*
21030Sstevel@tonic-gate 		 * If we just created this anon slot then call
21040Sstevel@tonic-gate 		 * with S_CREATE to prevent doing IO on the page.
21050Sstevel@tonic-gate 		 * Similar to the anon_zero case.
21060Sstevel@tonic-gate 		 */
21070Sstevel@tonic-gate 		err = swap_getconpage(vp, (u_offset_t)off, PAGESIZE,
21082414Saguzovsk 		    NULL, pl, PAGESIZE, conpp, ppa_szc, &nreloc, seg, vaddr,
21090Sstevel@tonic-gate 		    slotcreate == 1 ? S_CREATE : rw, cred);
21100Sstevel@tonic-gate 
21110Sstevel@tonic-gate 		if (err) {
21122414Saguzovsk 			ASSERT(err != -2 || upsize);
21130Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[12]);
21140Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
21150Sstevel@tonic-gate 			goto io_err;
21160Sstevel@tonic-gate 		}
21170Sstevel@tonic-gate 
21180Sstevel@tonic-gate 		pp = pl[0];
21190Sstevel@tonic-gate 
21202414Saguzovsk 		if (pp->p_szc < szc || (pp->p_szc > szc && upsize)) {
21210Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[13]);
21220Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
21230Sstevel@tonic-gate 			ASSERT(prealloc == 0);
21240Sstevel@tonic-gate 			ASSERT(pg_idx == 0);
21250Sstevel@tonic-gate 			if (pp->p_szc > szc) {
21262414Saguzovsk 				ASSERT(upsize);
21272414Saguzovsk 				*ppa_szc = MIN(pp->p_szc, seg->s_szc);
21280Sstevel@tonic-gate 				page_unlock(pp);
21290Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[14]);
21300Sstevel@tonic-gate 				return (-2);
21310Sstevel@tonic-gate 			}
21320Sstevel@tonic-gate 			page_unlock(pp);
21330Sstevel@tonic-gate 			prealloc = 1;
21340Sstevel@tonic-gate 			goto top;
21350Sstevel@tonic-gate 		}
21360Sstevel@tonic-gate 
21370Sstevel@tonic-gate 		/*
21380Sstevel@tonic-gate 		 * If we decided to preallocate but VOP_GETPAGE
21390Sstevel@tonic-gate 		 * found a page in the system that satisfies our
21400Sstevel@tonic-gate 		 * request then free up our preallocated large page
21410Sstevel@tonic-gate 		 * and continue looping accross the existing large
21420Sstevel@tonic-gate 		 * page via VOP_GETPAGE.
21430Sstevel@tonic-gate 		 */
21440Sstevel@tonic-gate 		if (prealloc && pp != ppa[pg_idx]) {
21450Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[15]);
21460Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
21470Sstevel@tonic-gate 			ASSERT(pg_idx == 0);
21480Sstevel@tonic-gate 			conpp = NULL;
21490Sstevel@tonic-gate 			prealloc = 0;
21500Sstevel@tonic-gate 			page_free_pages(ppa[0]);
21510Sstevel@tonic-gate 		}
21520Sstevel@tonic-gate 
21530Sstevel@tonic-gate 		if (prealloc && nreloc > 1) {
21540Sstevel@tonic-gate 			/*
21550Sstevel@tonic-gate 			 * we have relocated out of a smaller large page.
21560Sstevel@tonic-gate 			 * skip npgs - 1 iterations and continue which will
21570Sstevel@tonic-gate 			 * increment by one the loop indices.
21580Sstevel@tonic-gate 			 */
21590Sstevel@tonic-gate 			spgcnt_t npgs = nreloc;
21600Sstevel@tonic-gate 
21610Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[16]);
21620Sstevel@tonic-gate 
21630Sstevel@tonic-gate 			ASSERT(pp == ppa[pg_idx]);
21640Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
21650Sstevel@tonic-gate 			ASSERT(pg_idx + npgs <= pgcnt);
21660Sstevel@tonic-gate 			if ((*protp & PROT_WRITE) &&
21670Sstevel@tonic-gate 			    anon_share(amp->ahp, an_idx, npgs)) {
21685466Skchow 				*protp &= ~PROT_WRITE;
21690Sstevel@tonic-gate 			}
21700Sstevel@tonic-gate 			pg_idx += npgs;
21710Sstevel@tonic-gate 			an_idx += npgs;
21720Sstevel@tonic-gate 			vaddr += PAGESIZE * npgs;
21730Sstevel@tonic-gate 			continue;
21740Sstevel@tonic-gate 		}
21750Sstevel@tonic-gate 
21760Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[17]);
21770Sstevel@tonic-gate 
21780Sstevel@tonic-gate 		/*
21790Sstevel@tonic-gate 		 * Anon_zero case.
21800Sstevel@tonic-gate 		 */
21810Sstevel@tonic-gate 		if (slotcreate) {
21820Sstevel@tonic-gate 			ASSERT(prealloc);
21830Sstevel@tonic-gate 			pagezero(pp, 0, PAGESIZE);
21840Sstevel@tonic-gate 			CPU_STATS_ADD_K(vm, zfod, 1);
21850Sstevel@tonic-gate 			hat_setrefmod(pp);
21860Sstevel@tonic-gate 		}
21870Sstevel@tonic-gate 
21880Sstevel@tonic-gate 		ASSERT(prealloc == 0 || ppa[pg_idx] == pp);
21890Sstevel@tonic-gate 		ASSERT(prealloc != 0 || PAGE_SHARED(pp));
21900Sstevel@tonic-gate 		ASSERT(prealloc == 0 || PAGE_EXCL(pp));
21910Sstevel@tonic-gate 
21920Sstevel@tonic-gate 		if (pg_idx > 0 &&
21930Sstevel@tonic-gate 		    ((page_pptonum(pp) != page_pptonum(ppa[pg_idx - 1]) + 1) ||
21942414Saguzovsk 		    (pp->p_szc != ppa[pg_idx - 1]->p_szc))) {
21950Sstevel@tonic-gate 			panic("anon_map_getpages: unexpected page");
21962414Saguzovsk 		} else if (pg_idx == 0 && (page_pptonum(pp) & (pgcnt - 1))) {
21972414Saguzovsk 			panic("anon_map_getpages: unaligned page");
21982414Saguzovsk 		}
21990Sstevel@tonic-gate 
22000Sstevel@tonic-gate 		if (prealloc == 0) {
22010Sstevel@tonic-gate 			ppa[pg_idx] = pp;
22020Sstevel@tonic-gate 		}
22030Sstevel@tonic-gate 
22040Sstevel@tonic-gate 		if (ap->an_refcnt > 1) {
22050Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[18]);
22060Sstevel@tonic-gate 			*protp &= ~PROT_WRITE;
22070Sstevel@tonic-gate 		}
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate 		/*
22100Sstevel@tonic-gate 		 * If this is a new anon slot then initialize
22110Sstevel@tonic-gate 		 * the anon array entry.
22120Sstevel@tonic-gate 		 */
22130Sstevel@tonic-gate 		if (slotcreate) {
22140Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, an_idx, ap, ANON_SLEEP);
22150Sstevel@tonic-gate 		}
22160Sstevel@tonic-gate 		pg_idx++;
22170Sstevel@tonic-gate 		an_idx++;
22180Sstevel@tonic-gate 		vaddr += PAGESIZE;
22190Sstevel@tonic-gate 	}
22200Sstevel@tonic-gate 
22210Sstevel@tonic-gate 	/*
22220Sstevel@tonic-gate 	 * Since preallocated pages come off the freelist
22230Sstevel@tonic-gate 	 * they are locked SE_EXCL. Simply downgrade and return.
22240Sstevel@tonic-gate 	 */
22250Sstevel@tonic-gate 	if (prealloc) {
22260Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[19]);
22270Sstevel@tonic-gate 		conpp = NULL;
22280Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
22290Sstevel@tonic-gate 			page_downgrade(ppa[pg_idx]);
22300Sstevel@tonic-gate 		}
22310Sstevel@tonic-gate 	}
22320Sstevel@tonic-gate 	ASSERT(conpp == NULL);
22330Sstevel@tonic-gate 
22340Sstevel@tonic-gate 	if (brkcow == 0 || (*protp & PROT_WRITE)) {
22350Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[20]);
22360Sstevel@tonic-gate 		return (0);
22370Sstevel@tonic-gate 	}
22380Sstevel@tonic-gate 
22390Sstevel@tonic-gate 	if (szc < seg->s_szc)
22400Sstevel@tonic-gate 		panic("anon_map_getpages: cowfault for szc %d", szc);
22410Sstevel@tonic-gate 
22420Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[21]);
22430Sstevel@tonic-gate 
22440Sstevel@tonic-gate 	*protp = PROT_ALL;
22450Sstevel@tonic-gate 	return (anon_map_privatepages(amp, start_idx, szc, seg, addr, prot,
22464426Saguzovsk 	    ppa, vpage, anypgsz, pgflags, cred));
22470Sstevel@tonic-gate io_err:
22480Sstevel@tonic-gate 	/*
22490Sstevel@tonic-gate 	 * We got an IO error somewhere in our large page.
22500Sstevel@tonic-gate 	 * If we were using a preallocated page then just demote
22510Sstevel@tonic-gate 	 * all the constituent pages that we've succeeded with sofar
22520Sstevel@tonic-gate 	 * to PAGESIZE pages and leave them in the system
22530Sstevel@tonic-gate 	 * unlocked.
22540Sstevel@tonic-gate 	 */
22550Sstevel@tonic-gate 
22562414Saguzovsk 	ASSERT(err != -2 || ((pg_idx == 0) && upsize));
22570Sstevel@tonic-gate 
22580Sstevel@tonic-gate 	VM_STAT_COND_ADD(err > 0, anonvmstats.getpages[22]);
22590Sstevel@tonic-gate 	VM_STAT_COND_ADD(err == -1, anonvmstats.getpages[23]);
22600Sstevel@tonic-gate 	VM_STAT_COND_ADD(err == -2, anonvmstats.getpages[24]);
22610Sstevel@tonic-gate 
22620Sstevel@tonic-gate 	if (prealloc) {
22630Sstevel@tonic-gate 		conpp = NULL;
22640Sstevel@tonic-gate 		if (pg_idx > 0) {
22650Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[25]);
22660Sstevel@tonic-gate 			for (i = 0; i < pgcnt; i++) {
22670Sstevel@tonic-gate 				pp = ppa[i];
22680Sstevel@tonic-gate 				ASSERT(PAGE_EXCL(pp));
22690Sstevel@tonic-gate 				ASSERT(pp->p_szc == szc);
22700Sstevel@tonic-gate 				pp->p_szc = 0;
22710Sstevel@tonic-gate 			}
22720Sstevel@tonic-gate 			for (i = 0; i < pg_idx; i++) {
22730Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(ppa[i]));
22740Sstevel@tonic-gate 				page_unlock(ppa[i]);
22750Sstevel@tonic-gate 			}
22760Sstevel@tonic-gate 			/*
22770Sstevel@tonic-gate 			 * Now free up the remaining unused constituent
22780Sstevel@tonic-gate 			 * pages.
22790Sstevel@tonic-gate 			 */
22800Sstevel@tonic-gate 			while (pg_idx < pgcnt) {
22810Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(ppa[pg_idx]));
22820Sstevel@tonic-gate 				page_free(ppa[pg_idx], 0);
22830Sstevel@tonic-gate 				pg_idx++;
22840Sstevel@tonic-gate 			}
22850Sstevel@tonic-gate 		} else {
22860Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[26]);
22870Sstevel@tonic-gate 			page_free_pages(ppa[0]);
22880Sstevel@tonic-gate 		}
22890Sstevel@tonic-gate 	} else {
22900Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[27]);
22910Sstevel@tonic-gate 		ASSERT(err > 0);
22920Sstevel@tonic-gate 		for (i = 0; i < pg_idx; i++)
22930Sstevel@tonic-gate 			page_unlock(ppa[i]);
22940Sstevel@tonic-gate 	}
22950Sstevel@tonic-gate 	ASSERT(conpp == NULL);
22960Sstevel@tonic-gate 	if (err != -1)
22970Sstevel@tonic-gate 		return (err);
22980Sstevel@tonic-gate 	/*
22990Sstevel@tonic-gate 	 * we are here because we failed to relocate.
23000Sstevel@tonic-gate 	 */
23010Sstevel@tonic-gate 	ASSERT(prealloc);
23025466Skchow 	if (brkcow == 0 || szc < seg->s_szc ||
23035466Skchow 	    !anon_szcshare(amp->ahp, start_idx)) {
23040Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[28]);
23050Sstevel@tonic-gate 		return (-1);
23060Sstevel@tonic-gate 	}
23070Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[29]);
23080Sstevel@tonic-gate 	goto docow;
23090Sstevel@tonic-gate }
23100Sstevel@tonic-gate 
23110Sstevel@tonic-gate 
23120Sstevel@tonic-gate /*
23130Sstevel@tonic-gate  * Turn a reference to an object or shared anon page
23140Sstevel@tonic-gate  * into a private page with a copy of the data from the
23150Sstevel@tonic-gate  * original page which is always locked by the caller.
23160Sstevel@tonic-gate  * This routine unloads the translation and unlocks the
23170Sstevel@tonic-gate  * original page, if it isn't being stolen, before returning
23180Sstevel@tonic-gate  * to the caller.
23190Sstevel@tonic-gate  *
23200Sstevel@tonic-gate  * NOTE:  The original anon slot is not freed by this routine
23210Sstevel@tonic-gate  *	  It must be freed by the caller while holding the
23220Sstevel@tonic-gate  *	  "anon_map" lock to prevent races which can occur if
23230Sstevel@tonic-gate  *	  a process has multiple lwps in its address space.
23240Sstevel@tonic-gate  */
23250Sstevel@tonic-gate page_t *
23260Sstevel@tonic-gate anon_private(
23270Sstevel@tonic-gate 	struct anon **app,
23280Sstevel@tonic-gate 	struct seg *seg,
23290Sstevel@tonic-gate 	caddr_t addr,
23300Sstevel@tonic-gate 	uint_t	prot,
23310Sstevel@tonic-gate 	page_t *opp,
23320Sstevel@tonic-gate 	int oppflags,
23330Sstevel@tonic-gate 	struct cred *cred)
23340Sstevel@tonic-gate {
23350Sstevel@tonic-gate 	struct anon *old = *app;
23360Sstevel@tonic-gate 	struct anon *new;
23370Sstevel@tonic-gate 	page_t *pp = NULL;
23380Sstevel@tonic-gate 	struct vnode *vp;
23390Sstevel@tonic-gate 	anoff_t off;
23400Sstevel@tonic-gate 	page_t *anon_pl[1 + 1];
23410Sstevel@tonic-gate 	int err;
23420Sstevel@tonic-gate 
23430Sstevel@tonic-gate 	if (oppflags & STEAL_PAGE)
23440Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(opp));
23450Sstevel@tonic-gate 	else
23460Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED(opp));
23470Sstevel@tonic-gate 
23480Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, cow_fault, 1);
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate 	/* Kernel probe */
23510Sstevel@tonic-gate 	TNF_PROBE_1(anon_private, "vm pagefault", /* CSTYLED */,
23520Sstevel@tonic-gate 		tnf_opaque,	address,	addr);
23530Sstevel@tonic-gate 
23540Sstevel@tonic-gate 	*app = new = anon_alloc(NULL, 0);
23550Sstevel@tonic-gate 	swap_xlate(new, &vp, &off);
23560Sstevel@tonic-gate 
23570Sstevel@tonic-gate 	if (oppflags & STEAL_PAGE) {
23580Sstevel@tonic-gate 		page_rename(opp, vp, (u_offset_t)off);
23590Sstevel@tonic-gate 		pp = opp;
23600Sstevel@tonic-gate 		TRACE_5(TR_FAC_VM, TR_ANON_PRIVATE,
23615466Skchow 		    "anon_private:seg %p addr %x pp %p vp %p off %lx",
23625466Skchow 		    seg, addr, pp, vp, off);
23630Sstevel@tonic-gate 		hat_setmod(pp);
23640Sstevel@tonic-gate 
23650Sstevel@tonic-gate 		/* bug 4026339 */
23660Sstevel@tonic-gate 		page_downgrade(pp);
23670Sstevel@tonic-gate 		return (pp);
23680Sstevel@tonic-gate 	}
23690Sstevel@tonic-gate 
23700Sstevel@tonic-gate 	/*
23710Sstevel@tonic-gate 	 * Call the VOP_GETPAGE routine to create the page, thereby
23720Sstevel@tonic-gate 	 * enabling the vnode driver to allocate any filesystem
23730Sstevel@tonic-gate 	 * space (e.g., disk block allocation for UFS).  This also
23740Sstevel@tonic-gate 	 * prevents more than one page from being added to the
23750Sstevel@tonic-gate 	 * vnode at the same time.
23760Sstevel@tonic-gate 	 */
23770Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, (u_offset_t)off, PAGESIZE, NULL,
23785331Samw 	    anon_pl, PAGESIZE, seg, addr, S_CREATE, cred, NULL);
23790Sstevel@tonic-gate 	if (err)
23800Sstevel@tonic-gate 		goto out;
23810Sstevel@tonic-gate 
23820Sstevel@tonic-gate 	pp = anon_pl[0];
23830Sstevel@tonic-gate 
23840Sstevel@tonic-gate 	/*
23850Sstevel@tonic-gate 	 * If the original page was locked, we need to move the lock
23860Sstevel@tonic-gate 	 * to the new page by transfering 'cowcnt/lckcnt' of the original
23870Sstevel@tonic-gate 	 * page to 'cowcnt/lckcnt' of the new page.
23880Sstevel@tonic-gate 	 *
23890Sstevel@tonic-gate 	 * See Statement at the beginning of segvn_lockop() and
23900Sstevel@tonic-gate 	 * comments in page_pp_useclaim() regarding the way
23910Sstevel@tonic-gate 	 * cowcnts/lckcnts are handled.
23920Sstevel@tonic-gate 	 *
23930Sstevel@tonic-gate 	 * Also availrmem must be decremented up front for read only mapping
23940Sstevel@tonic-gate 	 * before calling page_pp_useclaim. page_pp_useclaim will bump it back
23950Sstevel@tonic-gate 	 * if availrmem did not need to be decremented after all.
23960Sstevel@tonic-gate 	 */
23970Sstevel@tonic-gate 	if (oppflags & LOCK_PAGE) {
23980Sstevel@tonic-gate 		if ((prot & PROT_WRITE) == 0) {
23990Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
24000Sstevel@tonic-gate 			if (availrmem > pages_pp_maximum) {
24010Sstevel@tonic-gate 				availrmem--;
24020Sstevel@tonic-gate 				pages_useclaim++;
24030Sstevel@tonic-gate 			} else {
24040Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
24050Sstevel@tonic-gate 				goto out;
24060Sstevel@tonic-gate 			}
24070Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
24080Sstevel@tonic-gate 		}
24090Sstevel@tonic-gate 		page_pp_useclaim(opp, pp, prot & PROT_WRITE);
24100Sstevel@tonic-gate 	}
24110Sstevel@tonic-gate 
24120Sstevel@tonic-gate 	/*
24130Sstevel@tonic-gate 	 * Now copy the contents from the original page,
24140Sstevel@tonic-gate 	 * which is locked and loaded in the MMU by
24150Sstevel@tonic-gate 	 * the caller to prevent yet another page fault.
24160Sstevel@tonic-gate 	 */
24173253Smec 	/* XXX - should set mod bit in here */
24183253Smec 	if (ppcopy(opp, pp) == 0) {
24193253Smec 		/*
24203253Smec 		 * Before ppcopy could hanlde UE or other faults, we
24213253Smec 		 * would have panicked here, and still have no option
24223253Smec 		 * but to do so now.
24233253Smec 		 */
24243253Smec 		panic("anon_private, ppcopy failed, opp = 0x%p, pp = 0x%p",
24257632SNick.Todd@Sun.COM 		    (void *)opp, (void *)pp);
24263253Smec 	}
24270Sstevel@tonic-gate 
24280Sstevel@tonic-gate 	hat_setrefmod(pp);		/* mark as modified */
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate 	/*
24310Sstevel@tonic-gate 	 * Unload the old translation.
24320Sstevel@tonic-gate 	 */
24330Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, PAGESIZE, HAT_UNLOAD);
24340Sstevel@tonic-gate 
24350Sstevel@tonic-gate 	/*
24360Sstevel@tonic-gate 	 * Free unmapped, unmodified original page.
24370Sstevel@tonic-gate 	 * or release the lock on the original page,
24380Sstevel@tonic-gate 	 * otherwise the process will sleep forever in
24390Sstevel@tonic-gate 	 * anon_decref() waiting for the "exclusive" lock
24400Sstevel@tonic-gate 	 * on the page.
24410Sstevel@tonic-gate 	 */
24420Sstevel@tonic-gate 	(void) page_release(opp, 1);
24430Sstevel@tonic-gate 
24440Sstevel@tonic-gate 	/*
24450Sstevel@tonic-gate 	 * we are done with page creation so downgrade the new
24460Sstevel@tonic-gate 	 * page's selock to shared, this helps when multiple
24470Sstevel@tonic-gate 	 * as_fault(...SOFTLOCK...) are done to the same
24480Sstevel@tonic-gate 	 * page(aio)
24490Sstevel@tonic-gate 	 */
24500Sstevel@tonic-gate 	page_downgrade(pp);
24510Sstevel@tonic-gate 
24520Sstevel@tonic-gate 	/*
24530Sstevel@tonic-gate 	 * NOTE:  The original anon slot must be freed by the
24540Sstevel@tonic-gate 	 * caller while holding the "anon_map" lock, if we
24550Sstevel@tonic-gate 	 * copied away from an anonymous page.
24560Sstevel@tonic-gate 	 */
24570Sstevel@tonic-gate 	return (pp);
24580Sstevel@tonic-gate 
24590Sstevel@tonic-gate out:
24600Sstevel@tonic-gate 	*app = old;
24610Sstevel@tonic-gate 	if (pp)
24620Sstevel@tonic-gate 		page_unlock(pp);
24630Sstevel@tonic-gate 	anon_decref(new);
24640Sstevel@tonic-gate 	page_unlock(opp);
24650Sstevel@tonic-gate 	return ((page_t *)NULL);
24660Sstevel@tonic-gate }
24670Sstevel@tonic-gate 
24680Sstevel@tonic-gate int
24690Sstevel@tonic-gate anon_map_privatepages(
24700Sstevel@tonic-gate 	struct anon_map *amp,
24710Sstevel@tonic-gate 	ulong_t	start_idx,
24720Sstevel@tonic-gate 	uint_t	szc,
24730Sstevel@tonic-gate 	struct seg *seg,
24740Sstevel@tonic-gate 	caddr_t addr,
24750Sstevel@tonic-gate 	uint_t	prot,
24760Sstevel@tonic-gate 	page_t	*ppa[],
24770Sstevel@tonic-gate 	struct vpage vpage[],
24780Sstevel@tonic-gate 	int anypgsz,
24794426Saguzovsk 	int pgflags,
24800Sstevel@tonic-gate 	struct cred *cred)
24810Sstevel@tonic-gate {
24820Sstevel@tonic-gate 	pgcnt_t		pgcnt;
24830Sstevel@tonic-gate 	struct vnode	*vp;
24840Sstevel@tonic-gate 	anoff_t		off;
24850Sstevel@tonic-gate 	page_t		*pl[2], *conpp = NULL;
24860Sstevel@tonic-gate 	int		err;
24870Sstevel@tonic-gate 	int		prealloc = 1;
24880Sstevel@tonic-gate 	struct anon	*ap, *oldap;
24890Sstevel@tonic-gate 	caddr_t		vaddr;
24900Sstevel@tonic-gate 	page_t		*pplist, *pp;
24910Sstevel@tonic-gate 	ulong_t		pg_idx, an_idx;
24920Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
24930Sstevel@tonic-gate 	int		pagelock = 0;
24940Sstevel@tonic-gate 	kmutex_t	*ahmpages = NULL;
24950Sstevel@tonic-gate #ifdef DEBUG
24960Sstevel@tonic-gate 	int		refcnt;
24970Sstevel@tonic-gate #endif
24980Sstevel@tonic-gate 
24990Sstevel@tonic-gate 	ASSERT(szc != 0);
25000Sstevel@tonic-gate 	ASSERT(szc == seg->s_szc);
25010Sstevel@tonic-gate 
25020Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.privatepages[0]);
25030Sstevel@tonic-gate 
25040Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
25050Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
25060Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
25070Sstevel@tonic-gate 
25080Sstevel@tonic-gate 	ASSERT(amp != NULL);
25090Sstevel@tonic-gate 	ap = anon_get_ptr(amp->ahp, start_idx);
25100Sstevel@tonic-gate 	ASSERT(ap == NULL || ap->an_refcnt >= 1);
25110Sstevel@tonic-gate 
25120Sstevel@tonic-gate 	VM_STAT_COND_ADD(ap == NULL, anonvmstats.privatepages[1]);
25130Sstevel@tonic-gate 
25140Sstevel@tonic-gate 	/*
25150Sstevel@tonic-gate 	 * Now try and allocate the large page. If we fail then just
25160Sstevel@tonic-gate 	 * let VOP_GETPAGE give us PAGESIZE pages. Normally we let
25170Sstevel@tonic-gate 	 * the caller make this decision but to avoid added complexity
25180Sstevel@tonic-gate 	 * it's simplier to handle that case here.
25190Sstevel@tonic-gate 	 */
25200Sstevel@tonic-gate 	if (anypgsz == -1) {
25210Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[2]);
25220Sstevel@tonic-gate 		prealloc = 0;
2523749Ssusans 	} else if (page_alloc_pages(anon_vp, seg, addr, &pplist, NULL, szc,
25244426Saguzovsk 	    anypgsz, pgflags) != 0) {
25250Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[3]);
25260Sstevel@tonic-gate 		prealloc = 0;
25270Sstevel@tonic-gate 	}
25280Sstevel@tonic-gate 
25290Sstevel@tonic-gate 	/*
25300Sstevel@tonic-gate 	 * make the decrement of all refcnts of all
25310Sstevel@tonic-gate 	 * anon slots of a large page appear atomic by
25320Sstevel@tonic-gate 	 * getting an anonpages_hash_lock for the
25330Sstevel@tonic-gate 	 * first anon slot of a large page.
25340Sstevel@tonic-gate 	 */
25350Sstevel@tonic-gate 	if (ap != NULL) {
2536*12173SMichael.Corcoran@Sun.COM 		ahmpages = APH_MUTEX(ap->an_vp, ap->an_off);
25370Sstevel@tonic-gate 		mutex_enter(ahmpages);
25380Sstevel@tonic-gate 		if (ap->an_refcnt == 1) {
25390Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.privatepages[4]);
25400Sstevel@tonic-gate 			ASSERT(!anon_share(amp->ahp, start_idx, pgcnt));
25410Sstevel@tonic-gate 			mutex_exit(ahmpages);
25420Sstevel@tonic-gate 
25430Sstevel@tonic-gate 			if (prealloc) {
25440Sstevel@tonic-gate 				page_free_replacement_page(pplist);
25450Sstevel@tonic-gate 				page_create_putback(pgcnt);
25460Sstevel@tonic-gate 			}
25470Sstevel@tonic-gate 			ASSERT(ppa[0]->p_szc <= szc);
25480Sstevel@tonic-gate 			if (ppa[0]->p_szc == szc) {
25490Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.privatepages[5]);
25500Sstevel@tonic-gate 				return (0);
25510Sstevel@tonic-gate 			}
25520Sstevel@tonic-gate 			for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
25530Sstevel@tonic-gate 				ASSERT(ppa[pg_idx] != NULL);
25540Sstevel@tonic-gate 				page_unlock(ppa[pg_idx]);
25550Sstevel@tonic-gate 			}
25560Sstevel@tonic-gate 			return (-1);
25570Sstevel@tonic-gate 		}
25580Sstevel@tonic-gate 	}
25590Sstevel@tonic-gate 
25600Sstevel@tonic-gate 	/*
25610Sstevel@tonic-gate 	 * If we are passed in the vpage array and this is
25620Sstevel@tonic-gate 	 * not PROT_WRITE then we need to decrement availrmem
25630Sstevel@tonic-gate 	 * up front before we try anything. If we need to and
25640Sstevel@tonic-gate 	 * can't decrement availrmem then its better to fail now
25650Sstevel@tonic-gate 	 * than in the middle of processing the new large page.
25660Sstevel@tonic-gate 	 * page_pp_usclaim() on behalf of each constituent page
25670Sstevel@tonic-gate 	 * below will adjust availrmem back for the cases not needed.
25680Sstevel@tonic-gate 	 */
25690Sstevel@tonic-gate 	if (vpage != NULL && (prot & PROT_WRITE) == 0) {
25700Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
25710Sstevel@tonic-gate 			if (VPP_ISPPLOCK(&vpage[pg_idx])) {
25720Sstevel@tonic-gate 				pagelock = 1;
25730Sstevel@tonic-gate 				break;
25740Sstevel@tonic-gate 			}
25750Sstevel@tonic-gate 		}
25760Sstevel@tonic-gate 		if (pagelock) {
25770Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.privatepages[6]);
25780Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
25790Sstevel@tonic-gate 			if (availrmem >= pages_pp_maximum + pgcnt) {
25800Sstevel@tonic-gate 				availrmem -= pgcnt;
25810Sstevel@tonic-gate 				pages_useclaim += pgcnt;
25820Sstevel@tonic-gate 			} else {
25830Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.privatepages[7]);
25840Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
25850Sstevel@tonic-gate 				if (ahmpages != NULL) {
25860Sstevel@tonic-gate 					mutex_exit(ahmpages);
25870Sstevel@tonic-gate 				}
25880Sstevel@tonic-gate 				if (prealloc) {
25890Sstevel@tonic-gate 					page_free_replacement_page(pplist);
25900Sstevel@tonic-gate 					page_create_putback(pgcnt);
25910Sstevel@tonic-gate 				}
25920Sstevel@tonic-gate 				for (pg_idx = 0; pg_idx < pgcnt; pg_idx++)
25930Sstevel@tonic-gate 					if (ppa[pg_idx] != NULL)
25940Sstevel@tonic-gate 						page_unlock(ppa[pg_idx]);
25950Sstevel@tonic-gate 				return (ENOMEM);
25960Sstevel@tonic-gate 			}
25970Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
25980Sstevel@tonic-gate 		}
25990Sstevel@tonic-gate 	}
26000Sstevel@tonic-gate 
26010Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, cow_fault, pgcnt);
26020Sstevel@tonic-gate 
26030Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.privatepages[8]);
26040Sstevel@tonic-gate 
26050Sstevel@tonic-gate 	an_idx = start_idx;
26060Sstevel@tonic-gate 	pg_idx = 0;
26070Sstevel@tonic-gate 	vaddr = addr;
26080Sstevel@tonic-gate 	for (; pg_idx < pgcnt; pg_idx++, an_idx++, vaddr += PAGESIZE) {
26090Sstevel@tonic-gate 		ASSERT(ppa[pg_idx] != NULL);
26100Sstevel@tonic-gate 		oldap = anon_get_ptr(amp->ahp, an_idx);
26110Sstevel@tonic-gate 		ASSERT(ahmpages != NULL || oldap == NULL);
26120Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap != NULL);
26130Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap->an_refcnt > 1);
26140Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || pg_idx != 0 ||
26150Sstevel@tonic-gate 		    (refcnt = oldap->an_refcnt));
26160Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || pg_idx == 0 ||
26170Sstevel@tonic-gate 		    refcnt == oldap->an_refcnt);
26180Sstevel@tonic-gate 
26190Sstevel@tonic-gate 		ap = anon_alloc(NULL, 0);
26200Sstevel@tonic-gate 
26210Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
26220Sstevel@tonic-gate 
26230Sstevel@tonic-gate 		/*
26240Sstevel@tonic-gate 		 * Now setup our preallocated page to pass down to
26250Sstevel@tonic-gate 		 * swap_getpage().
26260Sstevel@tonic-gate 		 */
26270Sstevel@tonic-gate 		if (prealloc) {
26280Sstevel@tonic-gate 			pp = pplist;
26290Sstevel@tonic-gate 			page_sub(&pplist, pp);
26300Sstevel@tonic-gate 			conpp = pp;
26310Sstevel@tonic-gate 		}
26320Sstevel@tonic-gate 
26330Sstevel@tonic-gate 		err = swap_getconpage(vp, (u_offset_t)off, PAGESIZE, NULL, pl,
26345466Skchow 		    PAGESIZE, conpp, NULL, &nreloc, seg, vaddr,
26355466Skchow 		    S_CREATE, cred);
26360Sstevel@tonic-gate 
26370Sstevel@tonic-gate 		/*
26380Sstevel@tonic-gate 		 * Impossible to fail this is S_CREATE.
26390Sstevel@tonic-gate 		 */
26400Sstevel@tonic-gate 		if (err)
26410Sstevel@tonic-gate 			panic("anon_map_privatepages: VOP_GETPAGE failed");
26420Sstevel@tonic-gate 
26430Sstevel@tonic-gate 		ASSERT(prealloc ? pp == pl[0] : pl[0]->p_szc == 0);
26440Sstevel@tonic-gate 		ASSERT(prealloc == 0 || nreloc == 1);
26450Sstevel@tonic-gate 
26460Sstevel@tonic-gate 		pp = pl[0];
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate 		/*
26490Sstevel@tonic-gate 		 * If the original page was locked, we need to move
26500Sstevel@tonic-gate 		 * the lock to the new page by transfering
26510Sstevel@tonic-gate 		 * 'cowcnt/lckcnt' of the original page to 'cowcnt/lckcnt'
26520Sstevel@tonic-gate 		 * of the new page. pg_idx can be used to index
26530Sstevel@tonic-gate 		 * into the vpage array since the caller will guarentee
26540Sstevel@tonic-gate 		 * that vpage struct passed in corresponds to addr
26550Sstevel@tonic-gate 		 * and forward.
26560Sstevel@tonic-gate 		 */
26570Sstevel@tonic-gate 		if (vpage != NULL && VPP_ISPPLOCK(&vpage[pg_idx])) {
26580Sstevel@tonic-gate 			page_pp_useclaim(ppa[pg_idx], pp, prot & PROT_WRITE);
26590Sstevel@tonic-gate 		} else if (pagelock) {
26600Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
26610Sstevel@tonic-gate 			availrmem++;
26620Sstevel@tonic-gate 			pages_useclaim--;
26630Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
26640Sstevel@tonic-gate 		}
26650Sstevel@tonic-gate 
26660Sstevel@tonic-gate 		/*
26670Sstevel@tonic-gate 		 * Now copy the contents from the original page.
26680Sstevel@tonic-gate 		 */
26693253Smec 		if (ppcopy(ppa[pg_idx], pp) == 0) {
26703253Smec 			/*
26713253Smec 			 * Before ppcopy could hanlde UE or other faults, we
26723253Smec 			 * would have panicked here, and still have no option
26733253Smec 			 * but to do so now.
26743253Smec 			 */
26753253Smec 			panic("anon_map_privatepages, ppcopy failed");
26763253Smec 		}
26770Sstevel@tonic-gate 
26780Sstevel@tonic-gate 		hat_setrefmod(pp);		/* mark as modified */
26790Sstevel@tonic-gate 
26800Sstevel@tonic-gate 		/*
26810Sstevel@tonic-gate 		 * Release the lock on the original page,
26820Sstevel@tonic-gate 		 * derement the old slot, and down grade the lock
26830Sstevel@tonic-gate 		 * on the new copy.
26840Sstevel@tonic-gate 		 */
26850Sstevel@tonic-gate 		page_unlock(ppa[pg_idx]);
26860Sstevel@tonic-gate 
26870Sstevel@tonic-gate 		if (!prealloc)
26880Sstevel@tonic-gate 			page_downgrade(pp);
26890Sstevel@tonic-gate 
26900Sstevel@tonic-gate 		ppa[pg_idx] = pp;
26910Sstevel@tonic-gate 
26920Sstevel@tonic-gate 		/*
26930Sstevel@tonic-gate 		 * Now reflect the copy in the new anon array.
26940Sstevel@tonic-gate 		 */
26950Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap->an_refcnt > 1);
26960Sstevel@tonic-gate 		if (oldap != NULL)
26970Sstevel@tonic-gate 			anon_decref(oldap);
26980Sstevel@tonic-gate 		(void) anon_set_ptr(amp->ahp, an_idx, ap, ANON_SLEEP);
26990Sstevel@tonic-gate 	}
27006285Speterte 
27016285Speterte 	/*
27026285Speterte 	 * Unload the old large page translation.
27036285Speterte 	 */
27046285Speterte 	hat_unload(seg->s_as->a_hat, addr, pgcnt << PAGESHIFT, HAT_UNLOAD);
27056285Speterte 
27060Sstevel@tonic-gate 	if (ahmpages != NULL) {
27070Sstevel@tonic-gate 		mutex_exit(ahmpages);
27080Sstevel@tonic-gate 	}
27090Sstevel@tonic-gate 	ASSERT(prealloc == 0 || pplist == NULL);
27100Sstevel@tonic-gate 	if (prealloc) {
27110Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[9]);
27120Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
27130Sstevel@tonic-gate 			page_downgrade(ppa[pg_idx]);
27140Sstevel@tonic-gate 		}
27150Sstevel@tonic-gate 	}
27160Sstevel@tonic-gate 
27170Sstevel@tonic-gate 	return (0);
27180Sstevel@tonic-gate }
27190Sstevel@tonic-gate 
27200Sstevel@tonic-gate /*
27210Sstevel@tonic-gate  * Allocate a private zero-filled anon page.
27220Sstevel@tonic-gate  */
27230Sstevel@tonic-gate page_t *
27240Sstevel@tonic-gate anon_zero(struct seg *seg, caddr_t addr, struct anon **app, struct cred *cred)
27250Sstevel@tonic-gate {
27260Sstevel@tonic-gate 	struct anon *ap;
27270Sstevel@tonic-gate 	page_t *pp;
27280Sstevel@tonic-gate 	struct vnode *vp;
27290Sstevel@tonic-gate 	anoff_t off;
27300Sstevel@tonic-gate 	page_t *anon_pl[1 + 1];
27310Sstevel@tonic-gate 	int err;
27320Sstevel@tonic-gate 
27330Sstevel@tonic-gate 	/* Kernel probe */
27340Sstevel@tonic-gate 	TNF_PROBE_1(anon_zero, "vm pagefault", /* CSTYLED */,
27350Sstevel@tonic-gate 		tnf_opaque,	address,	addr);
27360Sstevel@tonic-gate 
27370Sstevel@tonic-gate 	*app = ap = anon_alloc(NULL, 0);
27380Sstevel@tonic-gate 	swap_xlate(ap, &vp, &off);
27390Sstevel@tonic-gate 
27400Sstevel@tonic-gate 	/*
27410Sstevel@tonic-gate 	 * Call the VOP_GETPAGE routine to create the page, thereby
27420Sstevel@tonic-gate 	 * enabling the vnode driver to allocate any filesystem
27430Sstevel@tonic-gate 	 * dependent structures (e.g., disk block allocation for UFS).
27440Sstevel@tonic-gate 	 * This also prevents more than on page from being added to
27450Sstevel@tonic-gate 	 * the vnode at the same time since it is locked.
27460Sstevel@tonic-gate 	 */
27470Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, off, PAGESIZE, NULL,
27485331Samw 	    anon_pl, PAGESIZE, seg, addr, S_CREATE, cred, NULL);
27490Sstevel@tonic-gate 	if (err) {
27500Sstevel@tonic-gate 		*app = NULL;
27510Sstevel@tonic-gate 		anon_decref(ap);
27520Sstevel@tonic-gate 		return (NULL);
27530Sstevel@tonic-gate 	}
27540Sstevel@tonic-gate 	pp = anon_pl[0];
27550Sstevel@tonic-gate 
27560Sstevel@tonic-gate 	pagezero(pp, 0, PAGESIZE);	/* XXX - should set mod bit */
27570Sstevel@tonic-gate 	page_downgrade(pp);
27580Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, zfod, 1);
27590Sstevel@tonic-gate 	hat_setrefmod(pp);	/* mark as modified so pageout writes back */
27600Sstevel@tonic-gate 	return (pp);
27610Sstevel@tonic-gate }
27620Sstevel@tonic-gate 
27630Sstevel@tonic-gate 
27640Sstevel@tonic-gate /*
27650Sstevel@tonic-gate  * Allocate array of private zero-filled anon pages for empty slots
27660Sstevel@tonic-gate  * and kept pages for non empty slots within given range.
27670Sstevel@tonic-gate  *
27680Sstevel@tonic-gate  * NOTE: This rontine will try and use large pages
27690Sstevel@tonic-gate  *	if available and supported by underlying platform.
27700Sstevel@tonic-gate  */
27710Sstevel@tonic-gate int
27720Sstevel@tonic-gate anon_map_createpages(
27730Sstevel@tonic-gate 	struct anon_map *amp,
27740Sstevel@tonic-gate 	ulong_t start_index,
27750Sstevel@tonic-gate 	size_t len,
27760Sstevel@tonic-gate 	page_t *ppa[],
27770Sstevel@tonic-gate 	struct seg *seg,
27780Sstevel@tonic-gate 	caddr_t addr,
27790Sstevel@tonic-gate 	enum seg_rw rw,
27800Sstevel@tonic-gate 	struct cred *cred)
27810Sstevel@tonic-gate {
27820Sstevel@tonic-gate 
27830Sstevel@tonic-gate 	struct anon	*ap;
27840Sstevel@tonic-gate 	struct vnode	*ap_vp;
27850Sstevel@tonic-gate 	page_t		*pp, *pplist, *anon_pl[1 + 1], *conpp = NULL;
27860Sstevel@tonic-gate 	int		err = 0;
27870Sstevel@tonic-gate 	ulong_t		p_index, index;
27880Sstevel@tonic-gate 	pgcnt_t		npgs, pg_cnt;
27890Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
27900Sstevel@tonic-gate 	uint_t		l_szc, szc, prot;
27910Sstevel@tonic-gate 	anoff_t		ap_off;
27920Sstevel@tonic-gate 	size_t		pgsz;
27930Sstevel@tonic-gate 	lgrp_t		*lgrp;
27944270Ssusans 	kmutex_t	*ahm;
27950Sstevel@tonic-gate 
27960Sstevel@tonic-gate 	/*
27970Sstevel@tonic-gate 	 * XXX For now only handle S_CREATE.
27980Sstevel@tonic-gate 	 */
27990Sstevel@tonic-gate 	ASSERT(rw == S_CREATE);
28000Sstevel@tonic-gate 
28010Sstevel@tonic-gate 	index	= start_index;
28020Sstevel@tonic-gate 	p_index	= 0;
28030Sstevel@tonic-gate 	npgs = btopr(len);
28040Sstevel@tonic-gate 
28050Sstevel@tonic-gate 	/*
28060Sstevel@tonic-gate 	 * If this platform supports multiple page sizes
28070Sstevel@tonic-gate 	 * then try and allocate directly from the free
28080Sstevel@tonic-gate 	 * list for pages larger than PAGESIZE.
28090Sstevel@tonic-gate 	 *
28100Sstevel@tonic-gate 	 * NOTE:When we have page_create_ru we can stop
28110Sstevel@tonic-gate 	 *	directly allocating from the freelist.
28120Sstevel@tonic-gate 	 */
28130Sstevel@tonic-gate 	l_szc  = seg->s_szc;
28140Sstevel@tonic-gate 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
28150Sstevel@tonic-gate 	while (npgs) {
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate 		/*
28180Sstevel@tonic-gate 		 * if anon slot already exists
28190Sstevel@tonic-gate 		 *   (means page has been created)
28200Sstevel@tonic-gate 		 * so 1) look up the page
28210Sstevel@tonic-gate 		 *    2) if the page is still in memory, get it.
28220Sstevel@tonic-gate 		 *    3) if not, create a page and
28230Sstevel@tonic-gate 		 *	  page in from physical swap device.
28240Sstevel@tonic-gate 		 * These are done in anon_getpage().
28250Sstevel@tonic-gate 		 */
28260Sstevel@tonic-gate 		ap = anon_get_ptr(amp->ahp, index);
28270Sstevel@tonic-gate 		if (ap) {
28280Sstevel@tonic-gate 			err = anon_getpage(&ap, &prot, anon_pl, PAGESIZE,
28290Sstevel@tonic-gate 			    seg, addr, S_READ, cred);
28300Sstevel@tonic-gate 			if (err) {
28310Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
28320Sstevel@tonic-gate 				panic("anon_map_createpages: anon_getpage");
28330Sstevel@tonic-gate 			}
28340Sstevel@tonic-gate 			pp = anon_pl[0];
28350Sstevel@tonic-gate 			ppa[p_index++] = pp;
28360Sstevel@tonic-gate 
28374270Ssusans 			/*
28384270Ssusans 			 * an_pvp can become non-NULL after SysV's page was
28394270Ssusans 			 * paged out before ISM was attached to this SysV
28404270Ssusans 			 * shared memory segment. So free swap slot if needed.
28414270Ssusans 			 */
28424270Ssusans 			if (ap->an_pvp != NULL) {
28434270Ssusans 				page_io_lock(pp);
2844*12173SMichael.Corcoran@Sun.COM 				ahm = AH_MUTEX(ap->an_vp, ap->an_off);
28454270Ssusans 				mutex_enter(ahm);
28464270Ssusans 				if (ap->an_pvp != NULL) {
28474270Ssusans 					swap_phys_free(ap->an_pvp,
28484270Ssusans 					    ap->an_poff, PAGESIZE);
28494270Ssusans 					ap->an_pvp = NULL;
28504270Ssusans 					ap->an_poff = 0;
28514270Ssusans 					mutex_exit(ahm);
28524270Ssusans 					hat_setmod(pp);
28534270Ssusans 				} else {
28544270Ssusans 					mutex_exit(ahm);
28554270Ssusans 				}
28564270Ssusans 				page_io_unlock(pp);
28574270Ssusans 			}
28584270Ssusans 
28590Sstevel@tonic-gate 			addr += PAGESIZE;
28600Sstevel@tonic-gate 			index++;
28610Sstevel@tonic-gate 			npgs--;
28620Sstevel@tonic-gate 			continue;
28630Sstevel@tonic-gate 		}
28640Sstevel@tonic-gate 		/*
28650Sstevel@tonic-gate 		 * Now try and allocate the largest page possible
28660Sstevel@tonic-gate 		 * for the current address and range.
28670Sstevel@tonic-gate 		 * Keep dropping down in page size until:
28680Sstevel@tonic-gate 		 *
28690Sstevel@tonic-gate 		 *	1) Properly aligned
28700Sstevel@tonic-gate 		 *	2) Does not overlap existing anon pages
28710Sstevel@tonic-gate 		 *	3) Fits in remaining range.
28720Sstevel@tonic-gate 		 *	4) able to allocate one.
28730Sstevel@tonic-gate 		 *
28740Sstevel@tonic-gate 		 * NOTE: XXX When page_create_ru is completed this code
28750Sstevel@tonic-gate 		 *	 will change.
28760Sstevel@tonic-gate 		 */
28770Sstevel@tonic-gate 		szc    = l_szc;
28780Sstevel@tonic-gate 		pplist = NULL;
28790Sstevel@tonic-gate 		pg_cnt = 0;
28800Sstevel@tonic-gate 		while (szc) {
28810Sstevel@tonic-gate 			pgsz	= page_get_pagesize(szc);
28820Sstevel@tonic-gate 			pg_cnt	= pgsz >> PAGESHIFT;
28830Sstevel@tonic-gate 			if (IS_P2ALIGNED(addr, pgsz) && pg_cnt <= npgs &&
28845466Skchow 			    anon_pages(amp->ahp, index, pg_cnt) == 0) {
28850Sstevel@tonic-gate 				/*
28860Sstevel@tonic-gate 				 * XXX
28870Sstevel@tonic-gate 				 * Since we are faking page_create()
28880Sstevel@tonic-gate 				 * we also need to do the freemem and
28890Sstevel@tonic-gate 				 * pcf accounting.
28900Sstevel@tonic-gate 				 */
28910Sstevel@tonic-gate 				(void) page_create_wait(pg_cnt, PG_WAIT);
28920Sstevel@tonic-gate 
28930Sstevel@tonic-gate 				/*
28940Sstevel@tonic-gate 				 * Get lgroup to allocate next page of shared
28950Sstevel@tonic-gate 				 * memory from and use it to specify where to
28960Sstevel@tonic-gate 				 * allocate the physical memory
28970Sstevel@tonic-gate 				 */
28980Sstevel@tonic-gate 				lgrp = lgrp_mem_choose(seg, addr, pgsz);
28990Sstevel@tonic-gate 
29000Sstevel@tonic-gate 				pplist = page_get_freelist(
2901749Ssusans 				    anon_vp, (u_offset_t)0, seg,
29020Sstevel@tonic-gate 				    addr, pgsz, 0, lgrp);
29030Sstevel@tonic-gate 
29040Sstevel@tonic-gate 				if (pplist == NULL) {
29050Sstevel@tonic-gate 					page_create_putback(pg_cnt);
29060Sstevel@tonic-gate 				}
29070Sstevel@tonic-gate 
29080Sstevel@tonic-gate 				/*
29090Sstevel@tonic-gate 				 * If a request for a page of size
29100Sstevel@tonic-gate 				 * larger than PAGESIZE failed
29110Sstevel@tonic-gate 				 * then don't try that size anymore.
29120Sstevel@tonic-gate 				 */
29130Sstevel@tonic-gate 				if (pplist == NULL) {
29140Sstevel@tonic-gate 					l_szc = szc - 1;
29150Sstevel@tonic-gate 				} else {
29160Sstevel@tonic-gate 					break;
29170Sstevel@tonic-gate 				}
29180Sstevel@tonic-gate 			}
29190Sstevel@tonic-gate 			szc--;
29200Sstevel@tonic-gate 		}
29210Sstevel@tonic-gate 
29220Sstevel@tonic-gate 		/*
29230Sstevel@tonic-gate 		 * If just using PAGESIZE pages then don't
29240Sstevel@tonic-gate 		 * directly allocate from the free list.
29250Sstevel@tonic-gate 		 */
29260Sstevel@tonic-gate 		if (pplist == NULL) {
29270Sstevel@tonic-gate 			ASSERT(szc == 0);
29280Sstevel@tonic-gate 			pp = anon_zero(seg, addr, &ap, cred);
29290Sstevel@tonic-gate 			if (pp == NULL) {
29300Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
29310Sstevel@tonic-gate 				panic("anon_map_createpages: anon_zero");
29320Sstevel@tonic-gate 			}
29330Sstevel@tonic-gate 			ppa[p_index++] = pp;
29340Sstevel@tonic-gate 
29350Sstevel@tonic-gate 			ASSERT(anon_get_ptr(amp->ahp, index) == NULL);
29360Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, index, ap, ANON_SLEEP);
29370Sstevel@tonic-gate 
29380Sstevel@tonic-gate 			addr += PAGESIZE;
29390Sstevel@tonic-gate 			index++;
29400Sstevel@tonic-gate 			npgs--;
29410Sstevel@tonic-gate 			continue;
29420Sstevel@tonic-gate 		}
29430Sstevel@tonic-gate 
29440Sstevel@tonic-gate 		/*
29450Sstevel@tonic-gate 		 * pplist is a list of pg_cnt PAGESIZE pages.
29460Sstevel@tonic-gate 		 * These pages are locked SE_EXCL since they
29470Sstevel@tonic-gate 		 * came directly off the free list.
29480Sstevel@tonic-gate 		 */
29490Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pg_cnt, pg_cnt));
29500Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(index, pg_cnt));
29510Sstevel@tonic-gate 		ASSERT(conpp == NULL);
29520Sstevel@tonic-gate 		while (pg_cnt--) {
29530Sstevel@tonic-gate 
29540Sstevel@tonic-gate 			ap = anon_alloc(NULL, 0);
29550Sstevel@tonic-gate 			swap_xlate(ap, &ap_vp, &ap_off);
29560Sstevel@tonic-gate 
29570Sstevel@tonic-gate 			ASSERT(pplist != NULL);
29580Sstevel@tonic-gate 			pp = pplist;
29590Sstevel@tonic-gate 			page_sub(&pplist, pp);
29600Sstevel@tonic-gate 			PP_CLRFREE(pp);
29610Sstevel@tonic-gate 			PP_CLRAGED(pp);
29620Sstevel@tonic-gate 			conpp = pp;
29630Sstevel@tonic-gate 
29640Sstevel@tonic-gate 			err = swap_getconpage(ap_vp, ap_off, PAGESIZE,
29652414Saguzovsk 			    (uint_t *)NULL, anon_pl, PAGESIZE, conpp, NULL,
29662414Saguzovsk 			    &nreloc, seg, addr, S_CREATE, cred);
29670Sstevel@tonic-gate 
29680Sstevel@tonic-gate 			if (err) {
29690Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
29700Sstevel@tonic-gate 				panic("anon_map_createpages: S_CREATE");
29710Sstevel@tonic-gate 			}
29720Sstevel@tonic-gate 
29730Sstevel@tonic-gate 			ASSERT(anon_pl[0] == pp);
29740Sstevel@tonic-gate 			ASSERT(nreloc == 1);
29750Sstevel@tonic-gate 			pagezero(pp, 0, PAGESIZE);
29760Sstevel@tonic-gate 			CPU_STATS_ADD_K(vm, zfod, 1);
29770Sstevel@tonic-gate 			hat_setrefmod(pp);
29780Sstevel@tonic-gate 
29790Sstevel@tonic-gate 			ASSERT(anon_get_ptr(amp->ahp, index) == NULL);
29800Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, index, ap, ANON_SLEEP);
29810Sstevel@tonic-gate 
29820Sstevel@tonic-gate 			ppa[p_index++] = pp;
29830Sstevel@tonic-gate 
29840Sstevel@tonic-gate 			addr += PAGESIZE;
29850Sstevel@tonic-gate 			index++;
29860Sstevel@tonic-gate 			npgs--;
29870Sstevel@tonic-gate 		}
29880Sstevel@tonic-gate 		conpp = NULL;
29890Sstevel@tonic-gate 		pg_cnt	= pgsz >> PAGESHIFT;
29900Sstevel@tonic-gate 		p_index = p_index - pg_cnt;
29910Sstevel@tonic-gate 		while (pg_cnt--) {
29920Sstevel@tonic-gate 			page_downgrade(ppa[p_index++]);
29930Sstevel@tonic-gate 		}
29940Sstevel@tonic-gate 	}
29950Sstevel@tonic-gate 	ANON_LOCK_EXIT(&amp->a_rwlock);
29960Sstevel@tonic-gate 	return (0);
29970Sstevel@tonic-gate }
29980Sstevel@tonic-gate 
29992414Saguzovsk static int
30002414Saguzovsk anon_try_demote_pages(
30012414Saguzovsk 	struct anon_hdr *ahp,
30022414Saguzovsk 	ulong_t sidx,
30032414Saguzovsk 	uint_t szc,
30042414Saguzovsk 	page_t **ppa,
30052414Saguzovsk 	int private)
30062414Saguzovsk {
30072414Saguzovsk 	struct anon	*ap;
30082414Saguzovsk 	pgcnt_t		pgcnt = page_get_pagecnt(szc);
30092414Saguzovsk 	page_t		*pp;
30102414Saguzovsk 	pgcnt_t		i;
30112414Saguzovsk 	kmutex_t	*ahmpages = NULL;
30122414Saguzovsk 	int		root = 0;
30132414Saguzovsk 	pgcnt_t		npgs;
30142414Saguzovsk 	pgcnt_t		curnpgs = 0;
30152414Saguzovsk 	size_t		ppasize = 0;
30162414Saguzovsk 
30172414Saguzovsk 	ASSERT(szc != 0);
30182414Saguzovsk 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
30192414Saguzovsk 	ASSERT(IS_P2ALIGNED(sidx, pgcnt));
30202414Saguzovsk 	ASSERT(sidx < ahp->size);
30212414Saguzovsk 
30222414Saguzovsk 	if (ppa == NULL) {
30232414Saguzovsk 		ppasize = pgcnt * sizeof (page_t *);
30242414Saguzovsk 		ppa = kmem_alloc(ppasize, KM_SLEEP);
30252414Saguzovsk 	}
30262414Saguzovsk 
30272414Saguzovsk 	ap = anon_get_ptr(ahp, sidx);
30282414Saguzovsk 	if (ap != NULL && private) {
30292414Saguzovsk 		VM_STAT_ADD(anonvmstats.demotepages[1]);
3030*12173SMichael.Corcoran@Sun.COM 		ahmpages = APH_MUTEX(ap->an_vp, ap->an_off);
30312414Saguzovsk 		mutex_enter(ahmpages);
30322414Saguzovsk 	}
30332414Saguzovsk 
30342414Saguzovsk 	if (ap != NULL && ap->an_refcnt > 1) {
30352414Saguzovsk 		if (ahmpages != NULL) {
30362414Saguzovsk 			VM_STAT_ADD(anonvmstats.demotepages[2]);
30372414Saguzovsk 			mutex_exit(ahmpages);
30382414Saguzovsk 		}
30392414Saguzovsk 		if (ppasize != 0) {
30402414Saguzovsk 			kmem_free(ppa, ppasize);
30412414Saguzovsk 		}
30422414Saguzovsk 		return (0);
30432414Saguzovsk 	}
30442414Saguzovsk 	if (ahmpages != NULL) {
30452414Saguzovsk 		mutex_exit(ahmpages);
30462414Saguzovsk 	}
30472414Saguzovsk 	if (ahp->size - sidx < pgcnt) {
30482414Saguzovsk 		ASSERT(private == 0);
30492414Saguzovsk 		pgcnt = ahp->size - sidx;
30502414Saguzovsk 	}
30512414Saguzovsk 	for (i = 0; i < pgcnt; i++, sidx++) {
30522414Saguzovsk 		ap = anon_get_ptr(ahp, sidx);
30532414Saguzovsk 		if (ap != NULL) {
30542414Saguzovsk 			if (ap->an_refcnt != 1) {
30552414Saguzovsk 				panic("anon_try_demote_pages: an_refcnt != 1");
30562414Saguzovsk 			}
30572414Saguzovsk 			pp = ppa[i] = page_lookup(ap->an_vp, ap->an_off,
30585466Skchow 			    SE_EXCL);
30592414Saguzovsk 			if (pp != NULL) {
30602414Saguzovsk 				(void) hat_pageunload(pp,
30615466Skchow 				    HAT_FORCE_PGUNLOAD);
30622414Saguzovsk 			}
30632414Saguzovsk 		} else {
30642414Saguzovsk 			ppa[i] = NULL;
30652414Saguzovsk 		}
30662414Saguzovsk 	}
30672414Saguzovsk 	for (i = 0; i < pgcnt; i++) {
30682414Saguzovsk 		if ((pp = ppa[i]) != NULL && pp->p_szc != 0) {
30692414Saguzovsk 			ASSERT(pp->p_szc <= szc);
30702414Saguzovsk 			if (!root) {
30712414Saguzovsk 				VM_STAT_ADD(anonvmstats.demotepages[3]);
30722414Saguzovsk 				if (curnpgs != 0)
30732414Saguzovsk 					panic("anon_try_demote_pages: "
30745466Skchow 					    "bad large page");
30752414Saguzovsk 
30762414Saguzovsk 				root = 1;
30772414Saguzovsk 				curnpgs = npgs =
30785466Skchow 				    page_get_pagecnt(pp->p_szc);
30792414Saguzovsk 
30802414Saguzovsk 				ASSERT(npgs <= pgcnt);
30812414Saguzovsk 				ASSERT(IS_P2ALIGNED(npgs, npgs));
30825466Skchow 				ASSERT(!(page_pptonum(pp) & (npgs - 1)));
30832414Saguzovsk 			} else {
30842414Saguzovsk 				ASSERT(i > 0);
30852414Saguzovsk 				ASSERT(page_pptonum(pp) - 1 ==
30865466Skchow 				    page_pptonum(ppa[i - 1]));
30872414Saguzovsk 				if ((page_pptonum(pp) & (npgs - 1)) ==
30885466Skchow 				    npgs - 1)
30892414Saguzovsk 					root = 0;
30902414Saguzovsk 			}
30912414Saguzovsk 			ASSERT(PAGE_EXCL(pp));
30922414Saguzovsk 			pp->p_szc = 0;
30932414Saguzovsk 			ASSERT(curnpgs > 0);
30942414Saguzovsk 			curnpgs--;
30952414Saguzovsk 		}
30962414Saguzovsk 	}
30972414Saguzovsk 	if (root != 0 || curnpgs != 0)
30982414Saguzovsk 		panic("anon_try_demote_pages: bad large page");
30992414Saguzovsk 
31002414Saguzovsk 	for (i = 0; i < pgcnt; i++) {
31012414Saguzovsk 		if ((pp = ppa[i]) != NULL) {
31022414Saguzovsk 			ASSERT(!hat_page_is_mapped(pp));
31032414Saguzovsk 			ASSERT(pp->p_szc == 0);
31042414Saguzovsk 			page_unlock(pp);
31052414Saguzovsk 		}
31062414Saguzovsk 	}
31072414Saguzovsk 	if (ppasize != 0) {
31082414Saguzovsk 		kmem_free(ppa, ppasize);
31092414Saguzovsk 	}
31102414Saguzovsk 	return (1);
31112414Saguzovsk }
31122414Saguzovsk 
31132414Saguzovsk /*
31142414Saguzovsk  * anon_map_demotepages() can only be called by MAP_PRIVATE segments.
31152414Saguzovsk  */
31160Sstevel@tonic-gate int
31170Sstevel@tonic-gate anon_map_demotepages(
31180Sstevel@tonic-gate 	struct anon_map *amp,
31190Sstevel@tonic-gate 	ulong_t	start_idx,
31200Sstevel@tonic-gate 	struct seg *seg,
31210Sstevel@tonic-gate 	caddr_t addr,
31220Sstevel@tonic-gate 	uint_t prot,
31230Sstevel@tonic-gate 	struct vpage vpage[],
31240Sstevel@tonic-gate 	struct cred *cred)
31250Sstevel@tonic-gate {
31260Sstevel@tonic-gate 	struct anon	*ap;
31270Sstevel@tonic-gate 	uint_t		szc = seg->s_szc;
31280Sstevel@tonic-gate 	pgcnt_t		pgcnt = page_get_pagecnt(szc);
31290Sstevel@tonic-gate 	size_t		ppasize = pgcnt * sizeof (page_t *);
31300Sstevel@tonic-gate 	page_t		**ppa = kmem_alloc(ppasize, KM_SLEEP);
31310Sstevel@tonic-gate 	page_t		*pp;
31320Sstevel@tonic-gate 	page_t		*pl[2];
31330Sstevel@tonic-gate 	pgcnt_t		i, pg_idx;
31340Sstevel@tonic-gate 	ulong_t		an_idx;
31350Sstevel@tonic-gate 	caddr_t		vaddr;
31360Sstevel@tonic-gate 	int 		err;
31370Sstevel@tonic-gate 	int		retry = 0;
31380Sstevel@tonic-gate 	uint_t		vpprot;
31390Sstevel@tonic-gate 
31400Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&amp->a_rwlock));
31410Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
31420Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
31430Sstevel@tonic-gate 	ASSERT(ppa != NULL);
31442414Saguzovsk 	ASSERT(szc != 0);
31452414Saguzovsk 	ASSERT(szc == amp->a_szc);
31460Sstevel@tonic-gate 
31470Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[0]);
31480Sstevel@tonic-gate 
31490Sstevel@tonic-gate top:
31502414Saguzovsk 	if (anon_try_demote_pages(amp->ahp, start_idx, szc, ppa, 1)) {
31512482Saguzovsk 		kmem_free(ppa, ppasize);
31520Sstevel@tonic-gate 		return (0);
31530Sstevel@tonic-gate 	}
31540Sstevel@tonic-gate 
31550Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[4]);
31560Sstevel@tonic-gate 
31570Sstevel@tonic-gate 	ASSERT(retry == 0); /* we can be here only once */
31580Sstevel@tonic-gate 
31590Sstevel@tonic-gate 	vaddr = addr;
31600Sstevel@tonic-gate 	for (pg_idx = 0, an_idx = start_idx; pg_idx < pgcnt;
31610Sstevel@tonic-gate 	    pg_idx++, an_idx++, vaddr += PAGESIZE) {
31620Sstevel@tonic-gate 		ap = anon_get_ptr(amp->ahp, an_idx);
31630Sstevel@tonic-gate 		if (ap == NULL)
31640Sstevel@tonic-gate 			panic("anon_map_demotepages: no anon slot");
31650Sstevel@tonic-gate 		err = anon_getpage(&ap, &vpprot, pl, PAGESIZE, seg, vaddr,
31660Sstevel@tonic-gate 		    S_READ, cred);
31670Sstevel@tonic-gate 		if (err) {
31680Sstevel@tonic-gate 			for (i = 0; i < pg_idx; i++) {
31690Sstevel@tonic-gate 				if ((pp = ppa[i]) != NULL)
31700Sstevel@tonic-gate 					page_unlock(pp);
31710Sstevel@tonic-gate 			}
31720Sstevel@tonic-gate 			kmem_free(ppa, ppasize);
31730Sstevel@tonic-gate 			return (err);
31740Sstevel@tonic-gate 		}
31750Sstevel@tonic-gate 		ppa[pg_idx] = pl[0];
31760Sstevel@tonic-gate 	}
31770Sstevel@tonic-gate 
31780Sstevel@tonic-gate 	err = anon_map_privatepages(amp, start_idx, szc, seg, addr, prot, ppa,
31794426Saguzovsk 	    vpage, -1, 0, cred);
31800Sstevel@tonic-gate 	if (err > 0) {
31810Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[5]);
31820Sstevel@tonic-gate 		kmem_free(ppa, ppasize);
31830Sstevel@tonic-gate 		return (err);
31840Sstevel@tonic-gate 	}
31850Sstevel@tonic-gate 	ASSERT(err == 0 || err == -1);
31860Sstevel@tonic-gate 	if (err == -1) {
31870Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[6]);
31880Sstevel@tonic-gate 		retry = 1;
31890Sstevel@tonic-gate 		goto top;
31900Sstevel@tonic-gate 	}
31910Sstevel@tonic-gate 	for (i = 0; i < pgcnt; i++) {
31920Sstevel@tonic-gate 		ASSERT(ppa[i] != NULL);
31930Sstevel@tonic-gate 		if (ppa[i]->p_szc != 0)
31940Sstevel@tonic-gate 			retry = 1;
31950Sstevel@tonic-gate 		page_unlock(ppa[i]);
31960Sstevel@tonic-gate 	}
31970Sstevel@tonic-gate 	if (retry) {
31980Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[7]);
31990Sstevel@tonic-gate 		goto top;
32000Sstevel@tonic-gate 	}
32010Sstevel@tonic-gate 
32020Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[8]);
32030Sstevel@tonic-gate 
32040Sstevel@tonic-gate 	kmem_free(ppa, ppasize);
32050Sstevel@tonic-gate 
32060Sstevel@tonic-gate 	return (0);
32070Sstevel@tonic-gate }
32080Sstevel@tonic-gate 
32090Sstevel@tonic-gate /*
32102414Saguzovsk  * Free pages of shared anon map. It's assumed that anon maps don't share anon
32112414Saguzovsk  * structures with private anon maps. Therefore all anon structures should
32122414Saguzovsk  * have at most one reference at this point. This means underlying pages can
32132414Saguzovsk  * be exclusively locked and demoted or freed.  If not freeing the entire
32142414Saguzovsk  * large pages demote the ends of the region we free to be able to free
32155331Samw  * subpages. Page roots correspond to aligned index positions in anon map.
32162414Saguzovsk  */
32172414Saguzovsk void
32182414Saguzovsk anon_shmap_free_pages(struct anon_map *amp, ulong_t sidx, size_t len)
32192414Saguzovsk {
32202414Saguzovsk 	ulong_t eidx = sidx + btopr(len);
32212414Saguzovsk 	pgcnt_t pages = page_get_pagecnt(amp->a_szc);
32222414Saguzovsk 	struct anon_hdr *ahp = amp->ahp;
32232414Saguzovsk 	ulong_t tidx;
32242414Saguzovsk 	size_t size;
32252414Saguzovsk 	ulong_t sidx_aligned;
32262414Saguzovsk 	ulong_t eidx_aligned;
32272414Saguzovsk 
32286695Saguzovsk 	ASSERT(ANON_WRITE_HELD(&amp->a_rwlock));
32292414Saguzovsk 	ASSERT(amp->refcnt <= 1);
32302414Saguzovsk 	ASSERT(amp->a_szc > 0);
32312414Saguzovsk 	ASSERT(eidx <= ahp->size);
32322414Saguzovsk 	ASSERT(!anon_share(ahp, sidx, btopr(len)));
32332414Saguzovsk 
32342414Saguzovsk 	if (len == 0) {	/* XXX */
32352414Saguzovsk 		return;
32362414Saguzovsk 	}
32372414Saguzovsk 
32382414Saguzovsk 	sidx_aligned = P2ALIGN(sidx, pages);
32392414Saguzovsk 	if (sidx_aligned != sidx ||
32402414Saguzovsk 	    (eidx < sidx_aligned + pages && eidx < ahp->size)) {
32412414Saguzovsk 		if (!anon_try_demote_pages(ahp, sidx_aligned,
32422414Saguzovsk 		    amp->a_szc, NULL, 0)) {
32432414Saguzovsk 			panic("anon_shmap_free_pages: demote failed");
32442414Saguzovsk 		}
32452414Saguzovsk 		size = (eidx <= sidx_aligned + pages) ? (eidx - sidx) :
32462414Saguzovsk 		    P2NPHASE(sidx, pages);
32472414Saguzovsk 		size <<= PAGESHIFT;
32482414Saguzovsk 		anon_free(ahp, sidx, size);
32492414Saguzovsk 		sidx = sidx_aligned + pages;
32502414Saguzovsk 		if (eidx <= sidx) {
32512414Saguzovsk 			return;
32522414Saguzovsk 		}
32532414Saguzovsk 	}
32542414Saguzovsk 	eidx_aligned = P2ALIGN(eidx, pages);
32552414Saguzovsk 	if (sidx < eidx_aligned) {
32562414Saguzovsk 		anon_free_pages(ahp, sidx,
32572414Saguzovsk 		    (eidx_aligned - sidx) << PAGESHIFT,
32582414Saguzovsk 		    amp->a_szc);
32592414Saguzovsk 		sidx = eidx_aligned;
32602414Saguzovsk 	}
32612414Saguzovsk 	ASSERT(sidx == eidx_aligned);
32622414Saguzovsk 	if (eidx == eidx_aligned) {
32632414Saguzovsk 		return;
32642414Saguzovsk 	}
32652414Saguzovsk 	tidx = eidx;
32662414Saguzovsk 	if (eidx != ahp->size && anon_get_next_ptr(ahp, &tidx) != NULL &&
32672414Saguzovsk 	    tidx - sidx < pages) {
32682414Saguzovsk 		if (!anon_try_demote_pages(ahp, sidx, amp->a_szc, NULL, 0)) {
32692414Saguzovsk 			panic("anon_shmap_free_pages: demote failed");
32702414Saguzovsk 		}
32712414Saguzovsk 		size = (eidx - sidx) << PAGESHIFT;
32722414Saguzovsk 		anon_free(ahp, sidx, size);
32732414Saguzovsk 	} else {
32742414Saguzovsk 		anon_free_pages(ahp, sidx, pages << PAGESHIFT, amp->a_szc);
32752414Saguzovsk 	}
32762414Saguzovsk }
32772414Saguzovsk 
32782414Saguzovsk /*
32796695Saguzovsk  * This routine should be called with amp's writer lock when there're no other
32806695Saguzovsk  * users of amp.  All pcache entries of this amp must have been already
32816695Saguzovsk  * inactivated. We must not drop a_rwlock here to prevent new users from
32826695Saguzovsk  * attaching to this amp.
32836695Saguzovsk  */
32846695Saguzovsk void
32856695Saguzovsk anonmap_purge(struct anon_map *amp)
32866695Saguzovsk {
32876695Saguzovsk 	ASSERT(ANON_WRITE_HELD(&amp->a_rwlock));
32886695Saguzovsk 	ASSERT(amp->refcnt <= 1);
32896695Saguzovsk 
32906695Saguzovsk 	if (amp->a_softlockcnt != 0) {
32916695Saguzovsk 		seg_ppurge(NULL, amp, 0);
32926695Saguzovsk 	}
32936695Saguzovsk 
32946695Saguzovsk 	/*
32956695Saguzovsk 	 * Since all pcache entries were already inactive before this routine
32966695Saguzovsk 	 * was called seg_ppurge() couldn't return while there're still
32976695Saguzovsk 	 * entries that can be found via the list anchored at a_phead. So we
32986695Saguzovsk 	 * can assert this list is empty now. a_softlockcnt may be still non 0
32996695Saguzovsk 	 * if asynchronous thread that manages pcache already removed pcache
33006695Saguzovsk 	 * entries but hasn't unlocked the pages yet. If a_softlockcnt is non
33016695Saguzovsk 	 * 0 we just wait on a_purgecv for shamp_reclaim() to finish. Even if
33026695Saguzovsk 	 * a_softlockcnt is 0 we grab a_purgemtx to avoid freeing anon map
33036695Saguzovsk 	 * before shamp_reclaim() is done with it. a_purgemtx also taken by
33046695Saguzovsk 	 * shamp_reclaim() while a_softlockcnt was still not 0 acts as a
33056695Saguzovsk 	 * barrier that prevents anonmap_purge() to complete while
33066695Saguzovsk 	 * shamp_reclaim() may still be referencing this amp.
33076695Saguzovsk 	 */
33086695Saguzovsk 	ASSERT(amp->a_phead.p_lnext == &amp->a_phead);
33096695Saguzovsk 	ASSERT(amp->a_phead.p_lprev == &amp->a_phead);
33106695Saguzovsk 
33116695Saguzovsk 	mutex_enter(&amp->a_purgemtx);
33126695Saguzovsk 	while (amp->a_softlockcnt != 0) {
33136695Saguzovsk 		ASSERT(amp->a_phead.p_lnext == &amp->a_phead);
33146695Saguzovsk 		ASSERT(amp->a_phead.p_lprev == &amp->a_phead);
33156695Saguzovsk 		amp->a_purgewait = 1;
33166695Saguzovsk 		cv_wait(&amp->a_purgecv, &amp->a_purgemtx);
33176695Saguzovsk 	}
33186695Saguzovsk 	mutex_exit(&amp->a_purgemtx);
33196695Saguzovsk 
33206695Saguzovsk 	ASSERT(amp->a_phead.p_lnext == &amp->a_phead);
33216695Saguzovsk 	ASSERT(amp->a_phead.p_lprev == &amp->a_phead);
33226695Saguzovsk 	ASSERT(amp->a_softlockcnt == 0);
33236695Saguzovsk }
33246695Saguzovsk 
33256695Saguzovsk /*
33260Sstevel@tonic-gate  * Allocate and initialize an anon_map structure for seg
33270Sstevel@tonic-gate  * associating the given swap reservation with the new anon_map.
33280Sstevel@tonic-gate  */
33290Sstevel@tonic-gate struct anon_map *
33304426Saguzovsk anonmap_alloc(size_t size, size_t swresv, int flags)
33310Sstevel@tonic-gate {
33320Sstevel@tonic-gate 	struct anon_map *amp;
33334426Saguzovsk 	int kmflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
33344426Saguzovsk 
33354426Saguzovsk 	amp = kmem_cache_alloc(anonmap_cache, kmflags);
33364426Saguzovsk 	if (amp == NULL) {
33374426Saguzovsk 		ASSERT(kmflags == KM_NOSLEEP);
33384426Saguzovsk 		return (NULL);
33394426Saguzovsk 	}
33404426Saguzovsk 
33414426Saguzovsk 	amp->ahp = anon_create(btopr(size), flags);
33424426Saguzovsk 	if (amp->ahp == NULL) {
33434426Saguzovsk 		ASSERT(flags == ANON_NOSLEEP);
33444426Saguzovsk 		kmem_cache_free(anonmap_cache, amp);
33454426Saguzovsk 		return (NULL);
33464426Saguzovsk 	}
33470Sstevel@tonic-gate 	amp->refcnt = 1;
33480Sstevel@tonic-gate 	amp->size = size;
33490Sstevel@tonic-gate 	amp->swresv = swresv;
33500Sstevel@tonic-gate 	amp->locality = 0;
33510Sstevel@tonic-gate 	amp->a_szc = 0;
33522768Ssl108498 	amp->a_sp = NULL;
33536695Saguzovsk 	amp->a_softlockcnt = 0;
33546695Saguzovsk 	amp->a_purgewait = 0;
33556695Saguzovsk 	amp->a_phead.p_lnext = &amp->a_phead;
33566695Saguzovsk 	amp->a_phead.p_lprev = &amp->a_phead;
33576695Saguzovsk 
33580Sstevel@tonic-gate 	return (amp);
33590Sstevel@tonic-gate }
33600Sstevel@tonic-gate 
33610Sstevel@tonic-gate void
33620Sstevel@tonic-gate anonmap_free(struct anon_map *amp)
33630Sstevel@tonic-gate {
33646695Saguzovsk 	ASSERT(amp->ahp != NULL);
33650Sstevel@tonic-gate 	ASSERT(amp->refcnt == 0);
33666695Saguzovsk 	ASSERT(amp->a_softlockcnt == 0);
33676695Saguzovsk 	ASSERT(amp->a_phead.p_lnext == &amp->a_phead);
33686695Saguzovsk 	ASSERT(amp->a_phead.p_lprev == &amp->a_phead);
33690Sstevel@tonic-gate 
33700Sstevel@tonic-gate 	lgrp_shm_policy_fini(amp, NULL);
33710Sstevel@tonic-gate 	anon_release(amp->ahp, btopr(amp->size));
33720Sstevel@tonic-gate 	kmem_cache_free(anonmap_cache, amp);
33730Sstevel@tonic-gate }
33740Sstevel@tonic-gate 
33750Sstevel@tonic-gate /*
33760Sstevel@tonic-gate  * Returns true if the app array has some empty slots.
33775331Samw  * The offp and lenp parameters are in/out parameters.  On entry
33780Sstevel@tonic-gate  * these values represent the starting offset and length of the
33790Sstevel@tonic-gate  * mapping.  When true is returned, these values may be modified
33800Sstevel@tonic-gate  * to be the largest range which includes empty slots.
33810Sstevel@tonic-gate  */
33820Sstevel@tonic-gate int
33830Sstevel@tonic-gate non_anon(struct anon_hdr *ahp, ulong_t anon_idx, u_offset_t *offp,
33840Sstevel@tonic-gate 				size_t *lenp)
33850Sstevel@tonic-gate {
33860Sstevel@tonic-gate 	ulong_t i, el;
33870Sstevel@tonic-gate 	ssize_t low, high;
33880Sstevel@tonic-gate 	struct anon *ap;
33890Sstevel@tonic-gate 
33900Sstevel@tonic-gate 	low = -1;
33910Sstevel@tonic-gate 	for (i = 0, el = *lenp; i < el; i += PAGESIZE, anon_idx++) {
33920Sstevel@tonic-gate 		ap = anon_get_ptr(ahp, anon_idx);
33930Sstevel@tonic-gate 		if (ap == NULL) {
33940Sstevel@tonic-gate 			if (low == -1)
33950Sstevel@tonic-gate 				low = i;
33960Sstevel@tonic-gate 			high = i;
33970Sstevel@tonic-gate 		}
33980Sstevel@tonic-gate 	}
33990Sstevel@tonic-gate 	if (low != -1) {
34000Sstevel@tonic-gate 		/*
34010Sstevel@tonic-gate 		 * Found at least one non-anon page.
34020Sstevel@tonic-gate 		 * Set up the off and len return values.
34030Sstevel@tonic-gate 		 */
34040Sstevel@tonic-gate 		if (low != 0)
34050Sstevel@tonic-gate 			*offp += low;
34060Sstevel@tonic-gate 		*lenp = high - low + PAGESIZE;
34070Sstevel@tonic-gate 		return (1);
34080Sstevel@tonic-gate 	}
34090Sstevel@tonic-gate 	return (0);
34100Sstevel@tonic-gate }
34110Sstevel@tonic-gate 
34120Sstevel@tonic-gate /*
34130Sstevel@tonic-gate  * Return a count of the number of existing anon pages in the anon array
34140Sstevel@tonic-gate  * app in the range (off, off+len). The array and slots must be guaranteed
34150Sstevel@tonic-gate  * stable by the caller.
34160Sstevel@tonic-gate  */
34170Sstevel@tonic-gate pgcnt_t
34180Sstevel@tonic-gate anon_pages(struct anon_hdr *ahp, ulong_t anon_index, pgcnt_t nslots)
34190Sstevel@tonic-gate {
34200Sstevel@tonic-gate 	pgcnt_t cnt = 0;
34210Sstevel@tonic-gate 
34220Sstevel@tonic-gate 	while (nslots-- > 0) {
34230Sstevel@tonic-gate 		if ((anon_get_ptr(ahp, anon_index)) != NULL)
34240Sstevel@tonic-gate 			cnt++;
34250Sstevel@tonic-gate 		anon_index++;
34260Sstevel@tonic-gate 	}
34270Sstevel@tonic-gate 	return (cnt);
34280Sstevel@tonic-gate }
34290Sstevel@tonic-gate 
34300Sstevel@tonic-gate /*
34310Sstevel@tonic-gate  * Move reserved phys swap into memory swap (unreserve phys swap
34320Sstevel@tonic-gate  * and reserve mem swap by the same amount).
34335331Samw  * Used by segspt when it needs to lock reserved swap npages in memory
34340Sstevel@tonic-gate  */
34350Sstevel@tonic-gate int
34360Sstevel@tonic-gate anon_swap_adjust(pgcnt_t npages)
34370Sstevel@tonic-gate {
34380Sstevel@tonic-gate 	pgcnt_t unlocked_mem_swap;
34390Sstevel@tonic-gate 
34400Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
34410Sstevel@tonic-gate 
34420Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
34430Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate 	unlocked_mem_swap = k_anoninfo.ani_mem_resv
34465466Skchow 	    - k_anoninfo.ani_locked_swap;
34470Sstevel@tonic-gate 	if (npages > unlocked_mem_swap) {
34480Sstevel@tonic-gate 		spgcnt_t adjusted_swap = npages - unlocked_mem_swap;
34490Sstevel@tonic-gate 
34500Sstevel@tonic-gate 		/*
34510Sstevel@tonic-gate 		 * if there is not enough unlocked mem swap we take missing
34520Sstevel@tonic-gate 		 * amount from phys swap and give it to mem swap
34530Sstevel@tonic-gate 		 */
34542048Sstans 		if (!page_reclaim_mem(adjusted_swap, segspt_minfree, 1)) {
34550Sstevel@tonic-gate 			mutex_exit(&anoninfo_lock);
34560Sstevel@tonic-gate 			return (ENOMEM);
34570Sstevel@tonic-gate 		}
34580Sstevel@tonic-gate 
34590Sstevel@tonic-gate 		k_anoninfo.ani_mem_resv += adjusted_swap;
34600Sstevel@tonic-gate 		ASSERT(k_anoninfo.ani_phys_resv >= adjusted_swap);
34610Sstevel@tonic-gate 		k_anoninfo.ani_phys_resv -= adjusted_swap;
34620Sstevel@tonic-gate 
34630Sstevel@tonic-gate 		ANI_ADD(adjusted_swap);
34640Sstevel@tonic-gate 	}
34650Sstevel@tonic-gate 	k_anoninfo.ani_locked_swap += npages;
34660Sstevel@tonic-gate 
34670Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
34680Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
34690Sstevel@tonic-gate 
34700Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
34710Sstevel@tonic-gate 
34720Sstevel@tonic-gate 	return (0);
34730Sstevel@tonic-gate }
34740Sstevel@tonic-gate 
34750Sstevel@tonic-gate /*
34760Sstevel@tonic-gate  * 'unlocked' reserved mem swap so when it is unreserved it
34770Sstevel@tonic-gate  * can be moved back phys (disk) swap
34780Sstevel@tonic-gate  */
34790Sstevel@tonic-gate void
34800Sstevel@tonic-gate anon_swap_restore(pgcnt_t npages)
34810Sstevel@tonic-gate {
34820Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
34830Sstevel@tonic-gate 
34840Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap <= k_anoninfo.ani_mem_resv);
34850Sstevel@tonic-gate 
34860Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap >= npages);
34870Sstevel@tonic-gate 	k_anoninfo.ani_locked_swap -= npages;
34880Sstevel@tonic-gate 
34890Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap <= k_anoninfo.ani_mem_resv);
34900Sstevel@tonic-gate 
34910Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
34920Sstevel@tonic-gate }
34930Sstevel@tonic-gate 
34940Sstevel@tonic-gate /*
34950Sstevel@tonic-gate  * Return the pointer from the list for a
34960Sstevel@tonic-gate  * specified anon index.
34970Sstevel@tonic-gate  */
34980Sstevel@tonic-gate ulong_t *
34990Sstevel@tonic-gate anon_get_slot(struct anon_hdr *ahp, ulong_t an_idx)
35000Sstevel@tonic-gate {
35010Sstevel@tonic-gate 	struct anon	**app;
35020Sstevel@tonic-gate 	void 		**ppp;
35030Sstevel@tonic-gate 
35040Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
35050Sstevel@tonic-gate 
35060Sstevel@tonic-gate 	/*
35070Sstevel@tonic-gate 	 * Single level case.
35080Sstevel@tonic-gate 	 */
35090Sstevel@tonic-gate 	if ((ahp->size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
35100Sstevel@tonic-gate 		return ((ulong_t *)&ahp->array_chunk[an_idx]);
35110Sstevel@tonic-gate 	} else {
35120Sstevel@tonic-gate 
35130Sstevel@tonic-gate 		/*
35140Sstevel@tonic-gate 		 * 2 level case.
35150Sstevel@tonic-gate 		 */
35160Sstevel@tonic-gate 		ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
35170Sstevel@tonic-gate 		if (*ppp == NULL) {
35180Sstevel@tonic-gate 			mutex_enter(&ahp->serial_lock);
35190Sstevel@tonic-gate 			ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
35200Sstevel@tonic-gate 			if (*ppp == NULL)
35210Sstevel@tonic-gate 				*ppp = kmem_zalloc(PAGESIZE, KM_SLEEP);
35220Sstevel@tonic-gate 			mutex_exit(&ahp->serial_lock);
35230Sstevel@tonic-gate 		}
35240Sstevel@tonic-gate 		app = *ppp;
35250Sstevel@tonic-gate 		return ((ulong_t *)&app[an_idx & ANON_CHUNK_OFF]);
35260Sstevel@tonic-gate 	}
35270Sstevel@tonic-gate }
35280Sstevel@tonic-gate 
35290Sstevel@tonic-gate void
35300Sstevel@tonic-gate anon_array_enter(struct anon_map *amp, ulong_t an_idx, anon_sync_obj_t *sobj)
35310Sstevel@tonic-gate {
35320Sstevel@tonic-gate 	ulong_t		*ap_slot;
35330Sstevel@tonic-gate 	kmutex_t	*mtx;
35340Sstevel@tonic-gate 	kcondvar_t	*cv;
35350Sstevel@tonic-gate 	int		hash;
35360Sstevel@tonic-gate 
35370Sstevel@tonic-gate 	/*
35380Sstevel@tonic-gate 	 * Use szc to determine anon slot(s) to appear atomic.
35390Sstevel@tonic-gate 	 * If szc = 0, then lock the anon slot and mark it busy.
35400Sstevel@tonic-gate 	 * If szc > 0, then lock the range of slots by getting the
35410Sstevel@tonic-gate 	 * anon_array_lock for the first anon slot, and mark only the
35420Sstevel@tonic-gate 	 * first anon slot busy to represent whole range being busy.
35430Sstevel@tonic-gate 	 */
35440Sstevel@tonic-gate 
35450Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&amp->a_rwlock));
35460Sstevel@tonic-gate 	an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
35470Sstevel@tonic-gate 	hash = ANON_ARRAY_HASH(amp, an_idx);
35480Sstevel@tonic-gate 	sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
35490Sstevel@tonic-gate 	sobj->sync_cv = cv = &anon_array_cv[hash];
35500Sstevel@tonic-gate 	mutex_enter(mtx);
35510Sstevel@tonic-gate 	ap_slot = anon_get_slot(amp->ahp, an_idx);
35520Sstevel@tonic-gate 	while (ANON_ISBUSY(ap_slot))
35530Sstevel@tonic-gate 		cv_wait(cv, mtx);
35540Sstevel@tonic-gate 	ANON_SETBUSY(ap_slot);
35550Sstevel@tonic-gate 	sobj->sync_data = ap_slot;
35560Sstevel@tonic-gate 	mutex_exit(mtx);
35570Sstevel@tonic-gate }
35580Sstevel@tonic-gate 
3559888Scwb int
3560888Scwb anon_array_try_enter(struct anon_map *amp, ulong_t an_idx,
3561888Scwb 			anon_sync_obj_t *sobj)
3562888Scwb {
3563888Scwb 	ulong_t		*ap_slot;
3564888Scwb 	kmutex_t	*mtx;
3565888Scwb 	int		hash;
3566888Scwb 
3567888Scwb 	/*
3568888Scwb 	 * Try to lock a range of anon slots.
3569888Scwb 	 * Use szc to determine anon slot(s) to appear atomic.
3570888Scwb 	 * If szc = 0, then lock the anon slot and mark it busy.
3571888Scwb 	 * If szc > 0, then lock the range of slots by getting the
3572888Scwb 	 * anon_array_lock for the first anon slot, and mark only the
3573888Scwb 	 * first anon slot busy to represent whole range being busy.
3574888Scwb 	 * Fail if the mutex or the anon_array are busy.
3575888Scwb 	 */
3576888Scwb 
3577888Scwb 	ASSERT(RW_READ_HELD(&amp->a_rwlock));
3578888Scwb 	an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
3579888Scwb 	hash = ANON_ARRAY_HASH(amp, an_idx);
3580888Scwb 	sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
3581925Scwb 	sobj->sync_cv = &anon_array_cv[hash];
3582888Scwb 	if (!mutex_tryenter(mtx)) {
3583888Scwb 		return (EWOULDBLOCK);
3584888Scwb 	}
3585888Scwb 	ap_slot = anon_get_slot(amp->ahp, an_idx);
3586888Scwb 	if (ANON_ISBUSY(ap_slot)) {
3587888Scwb 		mutex_exit(mtx);
3588888Scwb 		return (EWOULDBLOCK);
3589888Scwb 	}
3590888Scwb 	ANON_SETBUSY(ap_slot);
3591888Scwb 	sobj->sync_data = ap_slot;
3592888Scwb 	mutex_exit(mtx);
3593888Scwb 	return (0);
3594888Scwb }
3595888Scwb 
35960Sstevel@tonic-gate void
35970Sstevel@tonic-gate anon_array_exit(anon_sync_obj_t *sobj)
35980Sstevel@tonic-gate {
35990Sstevel@tonic-gate 	mutex_enter(sobj->sync_mutex);
36000Sstevel@tonic-gate 	ASSERT(ANON_ISBUSY(sobj->sync_data));
36010Sstevel@tonic-gate 	ANON_CLRBUSY(sobj->sync_data);
36020Sstevel@tonic-gate 	if (CV_HAS_WAITERS(sobj->sync_cv))
36030Sstevel@tonic-gate 		cv_broadcast(sobj->sync_cv);
36040Sstevel@tonic-gate 	mutex_exit(sobj->sync_mutex);
36050Sstevel@tonic-gate }
3606