1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28*0Sstevel@tonic-gate /*	  All Rights Reserved  	*/
29*0Sstevel@tonic-gate 
30*0Sstevel@tonic-gate /*
31*0Sstevel@tonic-gate  * University Copyright- Copyright (c) 1982, 1986, 1988
32*0Sstevel@tonic-gate  * The Regents of the University of California
33*0Sstevel@tonic-gate  * All Rights Reserved
34*0Sstevel@tonic-gate  *
35*0Sstevel@tonic-gate  * University Acknowledgment- Portions of this document are derived from
36*0Sstevel@tonic-gate  * software developed by the University of California, Berkeley, and its
37*0Sstevel@tonic-gate  * contributors.
38*0Sstevel@tonic-gate  */
39*0Sstevel@tonic-gate 
40*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
41*0Sstevel@tonic-gate 
42*0Sstevel@tonic-gate /*
43*0Sstevel@tonic-gate  * VM - anonymous pages.
44*0Sstevel@tonic-gate  *
45*0Sstevel@tonic-gate  * This layer sits immediately above the vm_swap layer.  It manages
46*0Sstevel@tonic-gate  * physical pages that have no permanent identity in the file system
47*0Sstevel@tonic-gate  * name space, using the services of the vm_swap layer to allocate
48*0Sstevel@tonic-gate  * backing storage for these pages.  Since these pages have no external
49*0Sstevel@tonic-gate  * identity, they are discarded when the last reference is removed.
50*0Sstevel@tonic-gate  *
51*0Sstevel@tonic-gate  * An important function of this layer is to manage low-level sharing
52*0Sstevel@tonic-gate  * of pages that are logically distinct but that happen to be
53*0Sstevel@tonic-gate  * physically identical (e.g., the corresponding pages of the processes
54*0Sstevel@tonic-gate  * resulting from a fork before one process or the other changes their
55*0Sstevel@tonic-gate  * contents).  This pseudo-sharing is present only as an optimization
56*0Sstevel@tonic-gate  * and is not to be confused with true sharing in which multiple
57*0Sstevel@tonic-gate  * address spaces deliberately contain references to the same object;
58*0Sstevel@tonic-gate  * such sharing is managed at a higher level.
59*0Sstevel@tonic-gate  *
60*0Sstevel@tonic-gate  * The key data structure here is the anon struct, which contains a
61*0Sstevel@tonic-gate  * reference count for its associated physical page and a hint about
62*0Sstevel@tonic-gate  * the identity of that page.  Anon structs typically live in arrays,
63*0Sstevel@tonic-gate  * with an instance's position in its array determining where the
64*0Sstevel@tonic-gate  * corresponding backing storage is allocated; however, the swap_xlate()
65*0Sstevel@tonic-gate  * routine abstracts away this representation information so that the
66*0Sstevel@tonic-gate  * rest of the anon layer need not know it.  (See the swap layer for
67*0Sstevel@tonic-gate  * more details on anon struct layout.)
68*0Sstevel@tonic-gate  *
69*0Sstevel@tonic-gate  * In the future versions of the system, the association between an
70*0Sstevel@tonic-gate  * anon struct and its position on backing store will change so that
71*0Sstevel@tonic-gate  * we don't require backing store all anonymous pages in the system.
72*0Sstevel@tonic-gate  * This is important for consideration for large memory systems.
73*0Sstevel@tonic-gate  * We can also use this technique to delay binding physical locations
74*0Sstevel@tonic-gate  * to anonymous pages until pageout/swapout time where we can make
75*0Sstevel@tonic-gate  * smarter allocation decisions to improve anonymous klustering.
76*0Sstevel@tonic-gate  *
77*0Sstevel@tonic-gate  * Many of the routines defined here take a (struct anon **) argument,
78*0Sstevel@tonic-gate  * which allows the code at this level to manage anon pages directly,
79*0Sstevel@tonic-gate  * so that callers can regard anon structs as opaque objects and not be
80*0Sstevel@tonic-gate  * concerned with assigning or inspecting their contents.
81*0Sstevel@tonic-gate  *
82*0Sstevel@tonic-gate  * Clients of this layer refer to anon pages indirectly.  That is, they
83*0Sstevel@tonic-gate  * maintain arrays of pointers to anon structs rather than maintaining
84*0Sstevel@tonic-gate  * anon structs themselves.  The (struct anon **) arguments mentioned
85*0Sstevel@tonic-gate  * above are pointers to entries in these arrays.  It is these arrays
86*0Sstevel@tonic-gate  * that capture the mapping between offsets within a given segment and
87*0Sstevel@tonic-gate  * the corresponding anonymous backing storage address.
88*0Sstevel@tonic-gate  */
89*0Sstevel@tonic-gate 
90*0Sstevel@tonic-gate #ifdef DEBUG
91*0Sstevel@tonic-gate #define	ANON_DEBUG
92*0Sstevel@tonic-gate #endif
93*0Sstevel@tonic-gate 
94*0Sstevel@tonic-gate #include <sys/types.h>
95*0Sstevel@tonic-gate #include <sys/t_lock.h>
96*0Sstevel@tonic-gate #include <sys/param.h>
97*0Sstevel@tonic-gate #include <sys/systm.h>
98*0Sstevel@tonic-gate #include <sys/mman.h>
99*0Sstevel@tonic-gate #include <sys/cred.h>
100*0Sstevel@tonic-gate #include <sys/thread.h>
101*0Sstevel@tonic-gate #include <sys/vnode.h>
102*0Sstevel@tonic-gate #include <sys/cpuvar.h>
103*0Sstevel@tonic-gate #include <sys/swap.h>
104*0Sstevel@tonic-gate #include <sys/cmn_err.h>
105*0Sstevel@tonic-gate #include <sys/vtrace.h>
106*0Sstevel@tonic-gate #include <sys/kmem.h>
107*0Sstevel@tonic-gate #include <sys/sysmacros.h>
108*0Sstevel@tonic-gate #include <sys/bitmap.h>
109*0Sstevel@tonic-gate #include <sys/vmsystm.h>
110*0Sstevel@tonic-gate #include <sys/debug.h>
111*0Sstevel@tonic-gate #include <sys/tnf_probe.h>
112*0Sstevel@tonic-gate #include <sys/lgrp.h>
113*0Sstevel@tonic-gate #include <sys/policy.h>
114*0Sstevel@tonic-gate #include <sys/condvar_impl.h>
115*0Sstevel@tonic-gate #include <sys/mutex_impl.h>
116*0Sstevel@tonic-gate 
117*0Sstevel@tonic-gate #include <vm/as.h>
118*0Sstevel@tonic-gate #include <vm/hat.h>
119*0Sstevel@tonic-gate #include <vm/anon.h>
120*0Sstevel@tonic-gate #include <vm/page.h>
121*0Sstevel@tonic-gate #include <vm/vpage.h>
122*0Sstevel@tonic-gate #include <vm/seg.h>
123*0Sstevel@tonic-gate #include <vm/rm.h>
124*0Sstevel@tonic-gate 
125*0Sstevel@tonic-gate #include <fs/fs_subr.h>
126*0Sstevel@tonic-gate 
127*0Sstevel@tonic-gate int anon_debug;
128*0Sstevel@tonic-gate 
129*0Sstevel@tonic-gate kmutex_t	anoninfo_lock;
130*0Sstevel@tonic-gate struct		k_anoninfo k_anoninfo;
131*0Sstevel@tonic-gate ani_free_t	ani_free_pool[ANI_MAX_POOL];
132*0Sstevel@tonic-gate pad_mutex_t	anon_array_lock[ANON_LOCKSIZE];
133*0Sstevel@tonic-gate kcondvar_t	anon_array_cv[ANON_LOCKSIZE];
134*0Sstevel@tonic-gate 
135*0Sstevel@tonic-gate /*
136*0Sstevel@tonic-gate  * Global hash table for (vp, off) -> anon slot
137*0Sstevel@tonic-gate  */
138*0Sstevel@tonic-gate extern	int swap_maxcontig;
139*0Sstevel@tonic-gate size_t	anon_hash_size;
140*0Sstevel@tonic-gate struct anon **anon_hash;
141*0Sstevel@tonic-gate 
142*0Sstevel@tonic-gate static struct kmem_cache *anon_cache;
143*0Sstevel@tonic-gate static struct kmem_cache *anonmap_cache;
144*0Sstevel@tonic-gate 
145*0Sstevel@tonic-gate #ifdef VM_STATS
146*0Sstevel@tonic-gate static struct anonvmstats_str {
147*0Sstevel@tonic-gate 	ulong_t getpages[30];
148*0Sstevel@tonic-gate 	ulong_t privatepages[10];
149*0Sstevel@tonic-gate 	ulong_t demotepages[9];
150*0Sstevel@tonic-gate 	ulong_t decrefpages[9];
151*0Sstevel@tonic-gate 	ulong_t	dupfillholes[4];
152*0Sstevel@tonic-gate 	ulong_t freepages[1];
153*0Sstevel@tonic-gate } anonvmstats;
154*0Sstevel@tonic-gate #endif /* VM_STATS */
155*0Sstevel@tonic-gate 
156*0Sstevel@tonic-gate 
157*0Sstevel@tonic-gate /*ARGSUSED*/
158*0Sstevel@tonic-gate static int
159*0Sstevel@tonic-gate anonmap_cache_constructor(void *buf, void *cdrarg, int kmflags)
160*0Sstevel@tonic-gate {
161*0Sstevel@tonic-gate 	struct anon_map *amp = buf;
162*0Sstevel@tonic-gate 
163*0Sstevel@tonic-gate 	rw_init(&amp->a_rwlock, NULL, RW_DEFAULT, NULL);
164*0Sstevel@tonic-gate 	return (0);
165*0Sstevel@tonic-gate }
166*0Sstevel@tonic-gate 
167*0Sstevel@tonic-gate /*ARGSUSED1*/
168*0Sstevel@tonic-gate static void
169*0Sstevel@tonic-gate anonmap_cache_destructor(void *buf, void *cdrarg)
170*0Sstevel@tonic-gate {
171*0Sstevel@tonic-gate 	struct anon_map *amp = buf;
172*0Sstevel@tonic-gate 
173*0Sstevel@tonic-gate 	rw_destroy(&amp->a_rwlock);
174*0Sstevel@tonic-gate }
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate kmutex_t	anonhash_lock[AH_LOCK_SIZE];
177*0Sstevel@tonic-gate kmutex_t	anonpages_hash_lock[AH_LOCK_SIZE];
178*0Sstevel@tonic-gate 
179*0Sstevel@tonic-gate void
180*0Sstevel@tonic-gate anon_init(void)
181*0Sstevel@tonic-gate {
182*0Sstevel@tonic-gate 	int i;
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate 	anon_hash_size = 1L << highbit(physmem / ANON_HASHAVELEN);
185*0Sstevel@tonic-gate 
186*0Sstevel@tonic-gate 	for (i = 0; i < AH_LOCK_SIZE; i++) {
187*0Sstevel@tonic-gate 		mutex_init(&anonhash_lock[i], NULL, MUTEX_DEFAULT, NULL);
188*0Sstevel@tonic-gate 		mutex_init(&anonpages_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
189*0Sstevel@tonic-gate 	}
190*0Sstevel@tonic-gate 
191*0Sstevel@tonic-gate 	for (i = 0; i < ANON_LOCKSIZE; i++) {
192*0Sstevel@tonic-gate 		mutex_init(&anon_array_lock[i].pad_mutex, NULL,
193*0Sstevel@tonic-gate 			MUTEX_DEFAULT, NULL);
194*0Sstevel@tonic-gate 		cv_init(&anon_array_cv[i], NULL, CV_DEFAULT, NULL);
195*0Sstevel@tonic-gate 	}
196*0Sstevel@tonic-gate 
197*0Sstevel@tonic-gate 	anon_hash = (struct anon **)
198*0Sstevel@tonic-gate 		kmem_zalloc(sizeof (struct anon *) * anon_hash_size, KM_SLEEP);
199*0Sstevel@tonic-gate 	anon_cache = kmem_cache_create("anon_cache", sizeof (struct anon),
200*0Sstevel@tonic-gate 		AN_CACHE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
201*0Sstevel@tonic-gate 	anonmap_cache = kmem_cache_create("anonmap_cache",
202*0Sstevel@tonic-gate 		sizeof (struct anon_map), 0,
203*0Sstevel@tonic-gate 		anonmap_cache_constructor, anonmap_cache_destructor, NULL,
204*0Sstevel@tonic-gate 		NULL, NULL, 0);
205*0Sstevel@tonic-gate 	swap_maxcontig = (1024 * 1024) >> PAGESHIFT;	/* 1MB of pages */
206*0Sstevel@tonic-gate }
207*0Sstevel@tonic-gate 
208*0Sstevel@tonic-gate /*
209*0Sstevel@tonic-gate  * Global anon slot hash table manipulation.
210*0Sstevel@tonic-gate  */
211*0Sstevel@tonic-gate 
212*0Sstevel@tonic-gate static void
213*0Sstevel@tonic-gate anon_addhash(struct anon *ap)
214*0Sstevel@tonic-gate {
215*0Sstevel@tonic-gate 	int index;
216*0Sstevel@tonic-gate 
217*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)]));
218*0Sstevel@tonic-gate 	index = ANON_HASH(ap->an_vp, ap->an_off);
219*0Sstevel@tonic-gate 	ap->an_hash = anon_hash[index];
220*0Sstevel@tonic-gate 	anon_hash[index] = ap;
221*0Sstevel@tonic-gate }
222*0Sstevel@tonic-gate 
223*0Sstevel@tonic-gate static void
224*0Sstevel@tonic-gate anon_rmhash(struct anon *ap)
225*0Sstevel@tonic-gate {
226*0Sstevel@tonic-gate 	struct anon **app;
227*0Sstevel@tonic-gate 
228*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)]));
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate 	for (app = &anon_hash[ANON_HASH(ap->an_vp, ap->an_off)];
231*0Sstevel@tonic-gate 	    *app; app = &((*app)->an_hash)) {
232*0Sstevel@tonic-gate 		if (*app == ap) {
233*0Sstevel@tonic-gate 			*app = ap->an_hash;
234*0Sstevel@tonic-gate 			break;
235*0Sstevel@tonic-gate 		}
236*0Sstevel@tonic-gate 	}
237*0Sstevel@tonic-gate }
238*0Sstevel@tonic-gate 
239*0Sstevel@tonic-gate /*
240*0Sstevel@tonic-gate  * The anon array interfaces. Functions allocating,
241*0Sstevel@tonic-gate  * freeing array of pointers, and returning/setting
242*0Sstevel@tonic-gate  * entries in the array of pointers for a given offset.
243*0Sstevel@tonic-gate  *
244*0Sstevel@tonic-gate  * Create the list of pointers
245*0Sstevel@tonic-gate  */
246*0Sstevel@tonic-gate struct anon_hdr *
247*0Sstevel@tonic-gate anon_create(pgcnt_t npages, int flags)
248*0Sstevel@tonic-gate {
249*0Sstevel@tonic-gate 	struct anon_hdr *ahp;
250*0Sstevel@tonic-gate 	ulong_t nchunks;
251*0Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
252*0Sstevel@tonic-gate 
253*0Sstevel@tonic-gate 	if ((ahp = kmem_zalloc(sizeof (struct anon_hdr), kmemflags)) == NULL) {
254*0Sstevel@tonic-gate 		return (NULL);
255*0Sstevel@tonic-gate 	}
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate 	mutex_init(&ahp->serial_lock, NULL, MUTEX_DEFAULT, NULL);
258*0Sstevel@tonic-gate 	/*
259*0Sstevel@tonic-gate 	 * Single level case.
260*0Sstevel@tonic-gate 	 */
261*0Sstevel@tonic-gate 	ahp->size = npages;
262*0Sstevel@tonic-gate 	if (npages <= ANON_CHUNK_SIZE || (flags & ANON_ALLOC_FORCE)) {
263*0Sstevel@tonic-gate 
264*0Sstevel@tonic-gate 		if (flags & ANON_ALLOC_FORCE)
265*0Sstevel@tonic-gate 			ahp->flags |= ANON_ALLOC_FORCE;
266*0Sstevel@tonic-gate 
267*0Sstevel@tonic-gate 		ahp->array_chunk = kmem_zalloc(
268*0Sstevel@tonic-gate 		    ahp->size * sizeof (struct anon *), kmemflags);
269*0Sstevel@tonic-gate 
270*0Sstevel@tonic-gate 		if (ahp->array_chunk == NULL) {
271*0Sstevel@tonic-gate 			kmem_free(ahp, sizeof (struct anon_hdr));
272*0Sstevel@tonic-gate 			return (NULL);
273*0Sstevel@tonic-gate 		}
274*0Sstevel@tonic-gate 	} else {
275*0Sstevel@tonic-gate 		/*
276*0Sstevel@tonic-gate 		 * 2 Level case.
277*0Sstevel@tonic-gate 		 */
278*0Sstevel@tonic-gate 		nchunks = (ahp->size + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
279*0Sstevel@tonic-gate 
280*0Sstevel@tonic-gate 		ahp->array_chunk = kmem_zalloc(nchunks * sizeof (ulong_t *),
281*0Sstevel@tonic-gate 		    kmemflags);
282*0Sstevel@tonic-gate 
283*0Sstevel@tonic-gate 		if (ahp->array_chunk == NULL) {
284*0Sstevel@tonic-gate 			kmem_free(ahp, sizeof (struct anon_hdr));
285*0Sstevel@tonic-gate 			return (NULL);
286*0Sstevel@tonic-gate 		}
287*0Sstevel@tonic-gate 	}
288*0Sstevel@tonic-gate 	return (ahp);
289*0Sstevel@tonic-gate }
290*0Sstevel@tonic-gate 
291*0Sstevel@tonic-gate /*
292*0Sstevel@tonic-gate  * Free the array of pointers
293*0Sstevel@tonic-gate  */
294*0Sstevel@tonic-gate void
295*0Sstevel@tonic-gate anon_release(struct anon_hdr *ahp, pgcnt_t npages)
296*0Sstevel@tonic-gate {
297*0Sstevel@tonic-gate 	ulong_t i;
298*0Sstevel@tonic-gate 	void **ppp;
299*0Sstevel@tonic-gate 	ulong_t nchunks;
300*0Sstevel@tonic-gate 
301*0Sstevel@tonic-gate 	ASSERT(npages == ahp->size);
302*0Sstevel@tonic-gate 
303*0Sstevel@tonic-gate 	/*
304*0Sstevel@tonic-gate 	 * Single level case.
305*0Sstevel@tonic-gate 	 */
306*0Sstevel@tonic-gate 	if (npages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
307*0Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, ahp->size * sizeof (struct anon *));
308*0Sstevel@tonic-gate 	} else {
309*0Sstevel@tonic-gate 		/*
310*0Sstevel@tonic-gate 		 * 2 level case.
311*0Sstevel@tonic-gate 		 */
312*0Sstevel@tonic-gate 		nchunks = (ahp->size + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
313*0Sstevel@tonic-gate 		for (i = 0; i < nchunks; i++) {
314*0Sstevel@tonic-gate 			ppp = &ahp->array_chunk[i];
315*0Sstevel@tonic-gate 			if (*ppp != NULL)
316*0Sstevel@tonic-gate 				kmem_free(*ppp, PAGESIZE);
317*0Sstevel@tonic-gate 		}
318*0Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, nchunks * sizeof (ulong_t *));
319*0Sstevel@tonic-gate 	}
320*0Sstevel@tonic-gate 	mutex_destroy(&ahp->serial_lock);
321*0Sstevel@tonic-gate 	kmem_free(ahp, sizeof (struct anon_hdr));
322*0Sstevel@tonic-gate }
323*0Sstevel@tonic-gate 
324*0Sstevel@tonic-gate /*
325*0Sstevel@tonic-gate  * Return the pointer from the list for a
326*0Sstevel@tonic-gate  * specified anon index.
327*0Sstevel@tonic-gate  */
328*0Sstevel@tonic-gate struct anon *
329*0Sstevel@tonic-gate anon_get_ptr(struct anon_hdr *ahp, ulong_t an_idx)
330*0Sstevel@tonic-gate {
331*0Sstevel@tonic-gate 	struct anon **app;
332*0Sstevel@tonic-gate 
333*0Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
334*0Sstevel@tonic-gate 
335*0Sstevel@tonic-gate 	/*
336*0Sstevel@tonic-gate 	 * Single level case.
337*0Sstevel@tonic-gate 	 */
338*0Sstevel@tonic-gate 	if ((ahp->size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
339*0Sstevel@tonic-gate 		return ((struct anon *)
340*0Sstevel@tonic-gate 			((uintptr_t)ahp->array_chunk[an_idx] & ANON_PTRMASK));
341*0Sstevel@tonic-gate 	} else {
342*0Sstevel@tonic-gate 
343*0Sstevel@tonic-gate 		/*
344*0Sstevel@tonic-gate 		 * 2 level case.
345*0Sstevel@tonic-gate 		 */
346*0Sstevel@tonic-gate 		app = ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
347*0Sstevel@tonic-gate 		if (app) {
348*0Sstevel@tonic-gate 			return ((struct anon *)
349*0Sstevel@tonic-gate 				((uintptr_t)app[an_idx & ANON_CHUNK_OFF] &
350*0Sstevel@tonic-gate 					ANON_PTRMASK));
351*0Sstevel@tonic-gate 		} else {
352*0Sstevel@tonic-gate 			return (NULL);
353*0Sstevel@tonic-gate 		}
354*0Sstevel@tonic-gate 	}
355*0Sstevel@tonic-gate }
356*0Sstevel@tonic-gate 
357*0Sstevel@tonic-gate /*
358*0Sstevel@tonic-gate  * Return the anon pointer for the first valid entry in the anon list,
359*0Sstevel@tonic-gate  * starting from the given index.
360*0Sstevel@tonic-gate  */
361*0Sstevel@tonic-gate struct anon *
362*0Sstevel@tonic-gate anon_get_next_ptr(struct anon_hdr *ahp, ulong_t *index)
363*0Sstevel@tonic-gate {
364*0Sstevel@tonic-gate 	struct anon *ap;
365*0Sstevel@tonic-gate 	struct anon **app;
366*0Sstevel@tonic-gate 	ulong_t chunkoff;
367*0Sstevel@tonic-gate 	ulong_t i;
368*0Sstevel@tonic-gate 	ulong_t j;
369*0Sstevel@tonic-gate 	pgcnt_t size;
370*0Sstevel@tonic-gate 
371*0Sstevel@tonic-gate 	i = *index;
372*0Sstevel@tonic-gate 	size = ahp->size;
373*0Sstevel@tonic-gate 
374*0Sstevel@tonic-gate 	ASSERT(i < size);
375*0Sstevel@tonic-gate 
376*0Sstevel@tonic-gate 	if ((size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
377*0Sstevel@tonic-gate 		/*
378*0Sstevel@tonic-gate 		 * 1 level case
379*0Sstevel@tonic-gate 		 */
380*0Sstevel@tonic-gate 		while (i < size) {
381*0Sstevel@tonic-gate 			ap = (struct anon *)
382*0Sstevel@tonic-gate 				((uintptr_t)ahp->array_chunk[i] & ANON_PTRMASK);
383*0Sstevel@tonic-gate 			if (ap) {
384*0Sstevel@tonic-gate 				*index = i;
385*0Sstevel@tonic-gate 				return (ap);
386*0Sstevel@tonic-gate 			}
387*0Sstevel@tonic-gate 			i++;
388*0Sstevel@tonic-gate 		}
389*0Sstevel@tonic-gate 	} else {
390*0Sstevel@tonic-gate 		/*
391*0Sstevel@tonic-gate 		 * 2 level case
392*0Sstevel@tonic-gate 		 */
393*0Sstevel@tonic-gate 		chunkoff = i & ANON_CHUNK_OFF;
394*0Sstevel@tonic-gate 		while (i < size) {
395*0Sstevel@tonic-gate 			app = ahp->array_chunk[i >> ANON_CHUNK_SHIFT];
396*0Sstevel@tonic-gate 			if (app)
397*0Sstevel@tonic-gate 				for (j = chunkoff; j < ANON_CHUNK_SIZE; j++) {
398*0Sstevel@tonic-gate 					ap = (struct anon *)
399*0Sstevel@tonic-gate 						((uintptr_t)app[j] &
400*0Sstevel@tonic-gate 							ANON_PTRMASK);
401*0Sstevel@tonic-gate 					if (ap) {
402*0Sstevel@tonic-gate 						*index = i + (j - chunkoff);
403*0Sstevel@tonic-gate 						return (ap);
404*0Sstevel@tonic-gate 					}
405*0Sstevel@tonic-gate 				}
406*0Sstevel@tonic-gate 			chunkoff = 0;
407*0Sstevel@tonic-gate 			i = (i + ANON_CHUNK_SIZE) & ~ANON_CHUNK_OFF;
408*0Sstevel@tonic-gate 		}
409*0Sstevel@tonic-gate 	}
410*0Sstevel@tonic-gate 	*index = size;
411*0Sstevel@tonic-gate 	return (NULL);
412*0Sstevel@tonic-gate }
413*0Sstevel@tonic-gate 
414*0Sstevel@tonic-gate /*
415*0Sstevel@tonic-gate  * Set list entry with a given pointer for a specified offset
416*0Sstevel@tonic-gate  */
417*0Sstevel@tonic-gate int
418*0Sstevel@tonic-gate anon_set_ptr(struct anon_hdr *ahp, ulong_t an_idx, struct anon *ap, int flags)
419*0Sstevel@tonic-gate {
420*0Sstevel@tonic-gate 	void		**ppp;
421*0Sstevel@tonic-gate 	struct anon	**app;
422*0Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
423*0Sstevel@tonic-gate 	uintptr_t	*ap_addr;
424*0Sstevel@tonic-gate 
425*0Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
426*0Sstevel@tonic-gate 
427*0Sstevel@tonic-gate 	/*
428*0Sstevel@tonic-gate 	 * Single level case.
429*0Sstevel@tonic-gate 	 */
430*0Sstevel@tonic-gate 	if (ahp->size <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
431*0Sstevel@tonic-gate 		ap_addr = (uintptr_t *)&ahp->array_chunk[an_idx];
432*0Sstevel@tonic-gate 	} else {
433*0Sstevel@tonic-gate 
434*0Sstevel@tonic-gate 		/*
435*0Sstevel@tonic-gate 		 * 2 level case.
436*0Sstevel@tonic-gate 		 */
437*0Sstevel@tonic-gate 		ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
438*0Sstevel@tonic-gate 
439*0Sstevel@tonic-gate 		ASSERT(ppp != NULL);
440*0Sstevel@tonic-gate 		if (*ppp == NULL) {
441*0Sstevel@tonic-gate 			mutex_enter(&ahp->serial_lock);
442*0Sstevel@tonic-gate 			ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
443*0Sstevel@tonic-gate 			if (*ppp == NULL) {
444*0Sstevel@tonic-gate 				*ppp = kmem_zalloc(PAGESIZE, kmemflags);
445*0Sstevel@tonic-gate 				if (*ppp == NULL) {
446*0Sstevel@tonic-gate 					mutex_exit(&ahp->serial_lock);
447*0Sstevel@tonic-gate 					return (ENOMEM);
448*0Sstevel@tonic-gate 				}
449*0Sstevel@tonic-gate 			}
450*0Sstevel@tonic-gate 			mutex_exit(&ahp->serial_lock);
451*0Sstevel@tonic-gate 		}
452*0Sstevel@tonic-gate 		app = *ppp;
453*0Sstevel@tonic-gate 		ap_addr = (uintptr_t *)&app[an_idx & ANON_CHUNK_OFF];
454*0Sstevel@tonic-gate 	}
455*0Sstevel@tonic-gate 	*ap_addr = (*ap_addr & ~ANON_PTRMASK) | (uintptr_t)ap;
456*0Sstevel@tonic-gate 	return (0);
457*0Sstevel@tonic-gate }
458*0Sstevel@tonic-gate 
459*0Sstevel@tonic-gate /*
460*0Sstevel@tonic-gate  * Copy anon array into a given new anon array
461*0Sstevel@tonic-gate  */
462*0Sstevel@tonic-gate int
463*0Sstevel@tonic-gate anon_copy_ptr(struct anon_hdr *sahp, ulong_t s_idx,
464*0Sstevel@tonic-gate 	struct anon_hdr *dahp, ulong_t d_idx,
465*0Sstevel@tonic-gate 	pgcnt_t npages, int flags)
466*0Sstevel@tonic-gate {
467*0Sstevel@tonic-gate 	void **sapp, **dapp;
468*0Sstevel@tonic-gate 	void *ap;
469*0Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 	ASSERT((s_idx < sahp->size) && (d_idx < dahp->size));
472*0Sstevel@tonic-gate 	ASSERT((npages <= sahp->size) && (npages <= dahp->size));
473*0Sstevel@tonic-gate 
474*0Sstevel@tonic-gate 	/*
475*0Sstevel@tonic-gate 	 * Both arrays are 1 level.
476*0Sstevel@tonic-gate 	 */
477*0Sstevel@tonic-gate 	if (((sahp->size <= ANON_CHUNK_SIZE) &&
478*0Sstevel@tonic-gate 	    (dahp->size <= ANON_CHUNK_SIZE)) ||
479*0Sstevel@tonic-gate 	    ((sahp->flags & ANON_ALLOC_FORCE) &&
480*0Sstevel@tonic-gate 	    (dahp->flags & ANON_ALLOC_FORCE))) {
481*0Sstevel@tonic-gate 
482*0Sstevel@tonic-gate 		bcopy(&sahp->array_chunk[s_idx], &dahp->array_chunk[d_idx],
483*0Sstevel@tonic-gate 		    npages * sizeof (struct anon *));
484*0Sstevel@tonic-gate 		return (0);
485*0Sstevel@tonic-gate 	}
486*0Sstevel@tonic-gate 
487*0Sstevel@tonic-gate 	/*
488*0Sstevel@tonic-gate 	 * Both arrays are 2 levels.
489*0Sstevel@tonic-gate 	 */
490*0Sstevel@tonic-gate 	if (sahp->size > ANON_CHUNK_SIZE &&
491*0Sstevel@tonic-gate 	    dahp->size > ANON_CHUNK_SIZE &&
492*0Sstevel@tonic-gate 	    ((sahp->flags & ANON_ALLOC_FORCE) == 0) &&
493*0Sstevel@tonic-gate 	    ((dahp->flags & ANON_ALLOC_FORCE) == 0)) {
494*0Sstevel@tonic-gate 
495*0Sstevel@tonic-gate 		ulong_t sapidx, dapidx;
496*0Sstevel@tonic-gate 		ulong_t *sap, *dap;
497*0Sstevel@tonic-gate 		ulong_t chknp;
498*0Sstevel@tonic-gate 
499*0Sstevel@tonic-gate 		while (npages != 0) {
500*0Sstevel@tonic-gate 
501*0Sstevel@tonic-gate 			sapidx = s_idx & ANON_CHUNK_OFF;
502*0Sstevel@tonic-gate 			dapidx = d_idx & ANON_CHUNK_OFF;
503*0Sstevel@tonic-gate 			chknp = ANON_CHUNK_SIZE - MAX(sapidx, dapidx);
504*0Sstevel@tonic-gate 			if (chknp > npages)
505*0Sstevel@tonic-gate 				chknp = npages;
506*0Sstevel@tonic-gate 
507*0Sstevel@tonic-gate 			sapp = &sahp->array_chunk[s_idx >> ANON_CHUNK_SHIFT];
508*0Sstevel@tonic-gate 			if ((sap = *sapp) != NULL) {
509*0Sstevel@tonic-gate 				dapp = &dahp->array_chunk[d_idx
510*0Sstevel@tonic-gate 							>> ANON_CHUNK_SHIFT];
511*0Sstevel@tonic-gate 				if ((dap = *dapp) == NULL) {
512*0Sstevel@tonic-gate 					*dapp = kmem_zalloc(PAGESIZE,
513*0Sstevel@tonic-gate 					    kmemflags);
514*0Sstevel@tonic-gate 					if ((dap = *dapp) == NULL)
515*0Sstevel@tonic-gate 						return (ENOMEM);
516*0Sstevel@tonic-gate 				}
517*0Sstevel@tonic-gate 				bcopy((sap + sapidx), (dap + dapidx),
518*0Sstevel@tonic-gate 				    chknp << ANON_PTRSHIFT);
519*0Sstevel@tonic-gate 			}
520*0Sstevel@tonic-gate 			s_idx += chknp;
521*0Sstevel@tonic-gate 			d_idx += chknp;
522*0Sstevel@tonic-gate 			npages -= chknp;
523*0Sstevel@tonic-gate 		}
524*0Sstevel@tonic-gate 		return (0);
525*0Sstevel@tonic-gate 	}
526*0Sstevel@tonic-gate 
527*0Sstevel@tonic-gate 	/*
528*0Sstevel@tonic-gate 	 * At least one of the arrays is 2 level.
529*0Sstevel@tonic-gate 	 */
530*0Sstevel@tonic-gate 	while (npages--) {
531*0Sstevel@tonic-gate 		if ((ap = anon_get_ptr(sahp, s_idx)) != NULL) {
532*0Sstevel@tonic-gate 			ASSERT(!ANON_ISBUSY(anon_get_slot(sahp, s_idx)));
533*0Sstevel@tonic-gate 			if (anon_set_ptr(dahp, d_idx, ap, flags) == ENOMEM)
534*0Sstevel@tonic-gate 					return (ENOMEM);
535*0Sstevel@tonic-gate 		}
536*0Sstevel@tonic-gate 		s_idx++;
537*0Sstevel@tonic-gate 		d_idx++;
538*0Sstevel@tonic-gate 	}
539*0Sstevel@tonic-gate 	return (0);
540*0Sstevel@tonic-gate }
541*0Sstevel@tonic-gate 
542*0Sstevel@tonic-gate 
543*0Sstevel@tonic-gate /*
544*0Sstevel@tonic-gate  * ANON_INITBUF is a convenience macro for anon_grow() below. It
545*0Sstevel@tonic-gate  * takes a buffer dst, which is at least as large as buffer src. It
546*0Sstevel@tonic-gate  * does a bcopy from src into dst, and then bzeros the extra bytes
547*0Sstevel@tonic-gate  * of dst. If tail is set, the data in src is tail aligned within
548*0Sstevel@tonic-gate  * dst instead of head aligned.
549*0Sstevel@tonic-gate  */
550*0Sstevel@tonic-gate 
551*0Sstevel@tonic-gate #define	ANON_INITBUF(src, srclen, dst, dstsize, tail)			      \
552*0Sstevel@tonic-gate 	if (tail) {							      \
553*0Sstevel@tonic-gate 		bzero((dst), (dstsize) - (srclen));			      \
554*0Sstevel@tonic-gate 		bcopy((src), (char *)(dst) + (dstsize) - (srclen), (srclen)); \
555*0Sstevel@tonic-gate 	} else {							      \
556*0Sstevel@tonic-gate 		bcopy((src), (dst), (srclen));				      \
557*0Sstevel@tonic-gate 		bzero((char *)(dst) + (srclen), (dstsize) - (srclen));	      \
558*0Sstevel@tonic-gate 	}
559*0Sstevel@tonic-gate 
560*0Sstevel@tonic-gate #define	ANON_1_LEVEL_INC	(ANON_CHUNK_SIZE / 8)
561*0Sstevel@tonic-gate #define	ANON_2_LEVEL_INC	(ANON_1_LEVEL_INC * ANON_CHUNK_SIZE)
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate /*
564*0Sstevel@tonic-gate  * anon_grow() is used to efficiently extend an existing anon array.
565*0Sstevel@tonic-gate  * startidx_p points to the index into the anon array of the first page
566*0Sstevel@tonic-gate  * that is in use. curpages is the number of pages in use, starting at
567*0Sstevel@tonic-gate  * *startidx_p. newpages is the number of additional pages desired.
568*0Sstevel@tonic-gate  *
569*0Sstevel@tonic-gate  * If startidx_p == NULL, startidx is taken to be 0 and cannot be changed.
570*0Sstevel@tonic-gate  *
571*0Sstevel@tonic-gate  * The growth is done by creating a new top level of the anon array,
572*0Sstevel@tonic-gate  * and (if the array is 2-level) reusing the existing second level arrays.
573*0Sstevel@tonic-gate  *
574*0Sstevel@tonic-gate  * flags can be used to specify ANON_NOSLEEP and ANON_GROWDOWN.
575*0Sstevel@tonic-gate  *
576*0Sstevel@tonic-gate  * Returns the new number of pages in the anon array.
577*0Sstevel@tonic-gate  */
578*0Sstevel@tonic-gate 
579*0Sstevel@tonic-gate pgcnt_t
580*0Sstevel@tonic-gate anon_grow(struct anon_hdr *ahp, ulong_t *startidx_p, pgcnt_t curpages,
581*0Sstevel@tonic-gate     pgcnt_t newpages, int flags)
582*0Sstevel@tonic-gate {
583*0Sstevel@tonic-gate 	ulong_t startidx = startidx_p ? *startidx_p : 0;
584*0Sstevel@tonic-gate 	pgcnt_t osz = ahp->size, nsz;
585*0Sstevel@tonic-gate 	pgcnt_t oelems, nelems, totpages;
586*0Sstevel@tonic-gate 	void **level1;
587*0Sstevel@tonic-gate 	int kmemflags = (flags & ANON_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
588*0Sstevel@tonic-gate 	int growdown = (flags & ANON_GROWDOWN);
589*0Sstevel@tonic-gate 	size_t newarrsz, oldarrsz;
590*0Sstevel@tonic-gate 	void *level2;
591*0Sstevel@tonic-gate 
592*0Sstevel@tonic-gate 	ASSERT(!(startidx_p == NULL && growdown));
593*0Sstevel@tonic-gate 	ASSERT(startidx + curpages <= ahp->size);
594*0Sstevel@tonic-gate 
595*0Sstevel@tonic-gate 	/*
596*0Sstevel@tonic-gate 	 * Determine the total number of pages needed in the new
597*0Sstevel@tonic-gate 	 * anon array. If growing down, totpages is all pages from
598*0Sstevel@tonic-gate 	 * startidx through the end of the array, plus <newpages>
599*0Sstevel@tonic-gate 	 * pages. If growing up, keep all pages from page 0 through
600*0Sstevel@tonic-gate 	 * the last page currently in use, plus <newpages> pages.
601*0Sstevel@tonic-gate 	 */
602*0Sstevel@tonic-gate 
603*0Sstevel@tonic-gate 	if (growdown)
604*0Sstevel@tonic-gate 		totpages = osz - startidx + newpages;
605*0Sstevel@tonic-gate 	else
606*0Sstevel@tonic-gate 		totpages = startidx + curpages + newpages;
607*0Sstevel@tonic-gate 
608*0Sstevel@tonic-gate 	/* If the array is already large enough, just return. */
609*0Sstevel@tonic-gate 
610*0Sstevel@tonic-gate 	if (osz >= totpages) {
611*0Sstevel@tonic-gate 		nsz = osz;
612*0Sstevel@tonic-gate 		goto out;
613*0Sstevel@tonic-gate 	}
614*0Sstevel@tonic-gate 
615*0Sstevel@tonic-gate 	/*
616*0Sstevel@tonic-gate 	 * osz/nsz are the total numbers of pages represented by the array.
617*0Sstevel@tonic-gate 	 * oelems/nelems are the number of pointers in the top level array.
618*0Sstevel@tonic-gate 	 *
619*0Sstevel@tonic-gate 	 * Will the new anon array be one level or two levels?
620*0Sstevel@tonic-gate 	 */
621*0Sstevel@tonic-gate 
622*0Sstevel@tonic-gate 	if (totpages <= ANON_CHUNK_SIZE || (ahp->flags & ANON_ALLOC_FORCE)) {
623*0Sstevel@tonic-gate 		nsz = P2ROUNDUP(totpages, ANON_1_LEVEL_INC);
624*0Sstevel@tonic-gate 		oelems = osz;
625*0Sstevel@tonic-gate 		nelems = nsz;
626*0Sstevel@tonic-gate 	} else {
627*0Sstevel@tonic-gate 		nsz = P2ROUNDUP(totpages, ANON_2_LEVEL_INC);
628*0Sstevel@tonic-gate 		oelems = (osz + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
629*0Sstevel@tonic-gate 		nelems = nsz >> ANON_CHUNK_SHIFT;
630*0Sstevel@tonic-gate 	}
631*0Sstevel@tonic-gate 
632*0Sstevel@tonic-gate 	newarrsz = nelems * sizeof (void *);
633*0Sstevel@tonic-gate 	level1 = kmem_alloc(newarrsz, kmemflags);
634*0Sstevel@tonic-gate 	if (level1 == NULL)
635*0Sstevel@tonic-gate 		return (0);
636*0Sstevel@tonic-gate 
637*0Sstevel@tonic-gate 	/* Are we converting from a one level to a two level anon array? */
638*0Sstevel@tonic-gate 
639*0Sstevel@tonic-gate 	if (nsz > ANON_CHUNK_SIZE && osz <= ANON_CHUNK_SIZE &&
640*0Sstevel@tonic-gate 	    !(ahp->flags & ANON_ALLOC_FORCE)) {
641*0Sstevel@tonic-gate 		/*
642*0Sstevel@tonic-gate 		 * Yes, we're converting to a two level. Reuse old level 1
643*0Sstevel@tonic-gate 		 * as new level 2 if it is exactly PAGESIZE. Otherwise
644*0Sstevel@tonic-gate 		 * alloc a new level 2 and copy the old level 1 data into it.
645*0Sstevel@tonic-gate 		 */
646*0Sstevel@tonic-gate 
647*0Sstevel@tonic-gate 		if (osz == ANON_CHUNK_SIZE) {
648*0Sstevel@tonic-gate 			level2 = (void *)ahp->array_chunk;
649*0Sstevel@tonic-gate 		} else {
650*0Sstevel@tonic-gate 			level2 = kmem_alloc(PAGESIZE, kmemflags);
651*0Sstevel@tonic-gate 			if (level2 == NULL) {
652*0Sstevel@tonic-gate 				kmem_free(level1, newarrsz);
653*0Sstevel@tonic-gate 				return (0);
654*0Sstevel@tonic-gate 			}
655*0Sstevel@tonic-gate 			oldarrsz = osz * sizeof (void *);
656*0Sstevel@tonic-gate 
657*0Sstevel@tonic-gate 			ANON_INITBUF(ahp->array_chunk, oldarrsz,
658*0Sstevel@tonic-gate 			    level2, PAGESIZE, growdown);
659*0Sstevel@tonic-gate 			kmem_free(ahp->array_chunk, oldarrsz);
660*0Sstevel@tonic-gate 		}
661*0Sstevel@tonic-gate 		bzero(level1, newarrsz);
662*0Sstevel@tonic-gate 		if (growdown)
663*0Sstevel@tonic-gate 			level1[nelems - 1] = level2;
664*0Sstevel@tonic-gate 		else
665*0Sstevel@tonic-gate 			level1[0] = level2;
666*0Sstevel@tonic-gate 	} else {
667*0Sstevel@tonic-gate 		oldarrsz = oelems * sizeof (void *);
668*0Sstevel@tonic-gate 
669*0Sstevel@tonic-gate 		ANON_INITBUF(ahp->array_chunk, oldarrsz,
670*0Sstevel@tonic-gate 		    level1, newarrsz, growdown);
671*0Sstevel@tonic-gate 		kmem_free(ahp->array_chunk, oldarrsz);
672*0Sstevel@tonic-gate 	}
673*0Sstevel@tonic-gate 
674*0Sstevel@tonic-gate 	ahp->array_chunk = level1;
675*0Sstevel@tonic-gate 	ahp->size = nsz;
676*0Sstevel@tonic-gate out:
677*0Sstevel@tonic-gate 	if (growdown)
678*0Sstevel@tonic-gate 		*startidx_p = nsz - totpages;
679*0Sstevel@tonic-gate 	return (nsz);
680*0Sstevel@tonic-gate }
681*0Sstevel@tonic-gate 
682*0Sstevel@tonic-gate /*
683*0Sstevel@tonic-gate  * Called from clock handler to sync ani_free value.
684*0Sstevel@tonic-gate  */
685*0Sstevel@tonic-gate 
686*0Sstevel@tonic-gate void
687*0Sstevel@tonic-gate set_anoninfo(void)
688*0Sstevel@tonic-gate {
689*0Sstevel@tonic-gate 	int	ix;
690*0Sstevel@tonic-gate 	pgcnt_t	total = 0;
691*0Sstevel@tonic-gate 
692*0Sstevel@tonic-gate 	for (ix = 0; ix < ANI_MAX_POOL; ix++) {
693*0Sstevel@tonic-gate 		total += ani_free_pool[ix].ani_count;
694*0Sstevel@tonic-gate 	}
695*0Sstevel@tonic-gate 	k_anoninfo.ani_free = total;
696*0Sstevel@tonic-gate }
697*0Sstevel@tonic-gate 
698*0Sstevel@tonic-gate /*
699*0Sstevel@tonic-gate  * Reserve anon space.
700*0Sstevel@tonic-gate  *
701*0Sstevel@tonic-gate  * It's no longer simply a matter of incrementing ani_resv to
702*0Sstevel@tonic-gate  * reserve swap space, we need to check memory-based as well
703*0Sstevel@tonic-gate  * as disk-backed (physical) swap.  The following algorithm
704*0Sstevel@tonic-gate  * is used:
705*0Sstevel@tonic-gate  * 	Check the space on physical swap
706*0Sstevel@tonic-gate  * 		i.e. amount needed < ani_max - ani_phys_resv
707*0Sstevel@tonic-gate  * 	If we are swapping on swapfs check
708*0Sstevel@tonic-gate  *		amount needed < (availrmem - swapfs_minfree)
709*0Sstevel@tonic-gate  * Since the algorithm to check for the quantity of swap space is
710*0Sstevel@tonic-gate  * almost the same as that for reserving it, we'll just use anon_resvmem
711*0Sstevel@tonic-gate  * with a flag to decrement availrmem.
712*0Sstevel@tonic-gate  *
713*0Sstevel@tonic-gate  * Return non-zero on success.
714*0Sstevel@tonic-gate  */
715*0Sstevel@tonic-gate int
716*0Sstevel@tonic-gate anon_resvmem(size_t size, uint_t takemem)
717*0Sstevel@tonic-gate {
718*0Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
719*0Sstevel@tonic-gate 	pgcnt_t mswap_pages = 0;
720*0Sstevel@tonic-gate 	pgcnt_t pswap_pages = 0;
721*0Sstevel@tonic-gate 
722*0Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
723*0Sstevel@tonic-gate 
724*0Sstevel@tonic-gate 	/*
725*0Sstevel@tonic-gate 	 * pswap_pages is the number of pages we can take from
726*0Sstevel@tonic-gate 	 * physical (i.e. disk-backed) swap.
727*0Sstevel@tonic-gate 	 */
728*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
729*0Sstevel@tonic-gate 	pswap_pages = k_anoninfo.ani_max - k_anoninfo.ani_phys_resv;
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 	ANON_PRINT(A_RESV,
732*0Sstevel@tonic-gate 	    ("anon_resvmem: npages %lu takemem %u pswap %lu caller %p\n",
733*0Sstevel@tonic-gate 	    npages, takemem, pswap_pages, (void *)caller()));
734*0Sstevel@tonic-gate 
735*0Sstevel@tonic-gate 	if (npages <= pswap_pages) {
736*0Sstevel@tonic-gate 		/*
737*0Sstevel@tonic-gate 		 * we have enough space on a physical swap
738*0Sstevel@tonic-gate 		 */
739*0Sstevel@tonic-gate 		if (takemem)
740*0Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv += npages;
741*0Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
742*0Sstevel@tonic-gate 		return (1);
743*0Sstevel@tonic-gate 	} else if (pswap_pages != 0) {
744*0Sstevel@tonic-gate 		/*
745*0Sstevel@tonic-gate 		 * we have some space on a physical swap
746*0Sstevel@tonic-gate 		 */
747*0Sstevel@tonic-gate 		if (takemem) {
748*0Sstevel@tonic-gate 			/*
749*0Sstevel@tonic-gate 			 * use up remainder of phys swap
750*0Sstevel@tonic-gate 			 */
751*0Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv += pswap_pages;
752*0Sstevel@tonic-gate 			ASSERT(k_anoninfo.ani_phys_resv == k_anoninfo.ani_max);
753*0Sstevel@tonic-gate 		}
754*0Sstevel@tonic-gate 	}
755*0Sstevel@tonic-gate 	/*
756*0Sstevel@tonic-gate 	 * since (npages > pswap_pages) we need mem swap
757*0Sstevel@tonic-gate 	 * mswap_pages is the number of pages needed from availrmem
758*0Sstevel@tonic-gate 	 */
759*0Sstevel@tonic-gate 	ASSERT(npages > pswap_pages);
760*0Sstevel@tonic-gate 	mswap_pages = npages - pswap_pages;
761*0Sstevel@tonic-gate 
762*0Sstevel@tonic-gate 	ANON_PRINT(A_RESV, ("anon_resvmem: need %ld pages from memory\n",
763*0Sstevel@tonic-gate 	    mswap_pages));
764*0Sstevel@tonic-gate 
765*0Sstevel@tonic-gate 	/*
766*0Sstevel@tonic-gate 	 * priv processes can reserve memory as swap as long as availrmem
767*0Sstevel@tonic-gate 	 * remains greater than swapfs_minfree; in the case of non-priv
768*0Sstevel@tonic-gate 	 * processes, memory can be reserved as swap only if availrmem
769*0Sstevel@tonic-gate 	 * doesn't fall below (swapfs_minfree + swapfs_reserve). Thus,
770*0Sstevel@tonic-gate 	 * swapfs_reserve amount of memswap is not available to non-priv
771*0Sstevel@tonic-gate 	 * processes. This protects daemons such as automounter dying
772*0Sstevel@tonic-gate 	 * as a result of application processes eating away almost entire
773*0Sstevel@tonic-gate 	 * membased swap. This safeguard becomes useless if apps are run
774*0Sstevel@tonic-gate 	 * with root access.
775*0Sstevel@tonic-gate 	 *
776*0Sstevel@tonic-gate 	 * swapfs_reserve is minimum of 4Mb or 1/16 of physmem.
777*0Sstevel@tonic-gate 	 *
778*0Sstevel@tonic-gate 	 */
779*0Sstevel@tonic-gate 	mutex_enter(&freemem_lock);
780*0Sstevel@tonic-gate 	if (availrmem > (swapfs_minfree + swapfs_reserve + mswap_pages) ||
781*0Sstevel@tonic-gate 		(availrmem > (swapfs_minfree + mswap_pages) &&
782*0Sstevel@tonic-gate 		secpolicy_resource(CRED()) == 0)) {
783*0Sstevel@tonic-gate 
784*0Sstevel@tonic-gate 		if (takemem) {
785*0Sstevel@tonic-gate 			/*
786*0Sstevel@tonic-gate 			 * Take the memory from the rest of the system.
787*0Sstevel@tonic-gate 			 */
788*0Sstevel@tonic-gate 			availrmem -= mswap_pages;
789*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
790*0Sstevel@tonic-gate 			k_anoninfo.ani_mem_resv += mswap_pages;
791*0Sstevel@tonic-gate 			ANI_ADD(mswap_pages);
792*0Sstevel@tonic-gate 			ANON_PRINT((A_RESV | A_MRESV),
793*0Sstevel@tonic-gate 				("anon_resvmem: took %ld pages of availrmem\n",
794*0Sstevel@tonic-gate 				mswap_pages));
795*0Sstevel@tonic-gate 		} else {
796*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
797*0Sstevel@tonic-gate 		}
798*0Sstevel@tonic-gate 
799*0Sstevel@tonic-gate 		ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
800*0Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
801*0Sstevel@tonic-gate 		return (1);
802*0Sstevel@tonic-gate 
803*0Sstevel@tonic-gate 	} else {
804*0Sstevel@tonic-gate 		/*
805*0Sstevel@tonic-gate 		 * Fail if not enough memory
806*0Sstevel@tonic-gate 		 */
807*0Sstevel@tonic-gate 
808*0Sstevel@tonic-gate 		if (takemem) {
809*0Sstevel@tonic-gate 			k_anoninfo.ani_phys_resv -= pswap_pages;
810*0Sstevel@tonic-gate 		}
811*0Sstevel@tonic-gate 
812*0Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
813*0Sstevel@tonic-gate 		mutex_exit(&anoninfo_lock);
814*0Sstevel@tonic-gate 		ANON_PRINT(A_RESV,
815*0Sstevel@tonic-gate 			("anon_resvmem: not enough space from swapfs\n"));
816*0Sstevel@tonic-gate 		return (0);
817*0Sstevel@tonic-gate 	}
818*0Sstevel@tonic-gate }
819*0Sstevel@tonic-gate 
820*0Sstevel@tonic-gate 
821*0Sstevel@tonic-gate /*
822*0Sstevel@tonic-gate  * Give back an anon reservation.
823*0Sstevel@tonic-gate  */
824*0Sstevel@tonic-gate void
825*0Sstevel@tonic-gate anon_unresv(size_t size)
826*0Sstevel@tonic-gate {
827*0Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
828*0Sstevel@tonic-gate 	spgcnt_t mem_free_pages = 0;
829*0Sstevel@tonic-gate 	pgcnt_t phys_free_slots;
830*0Sstevel@tonic-gate #ifdef	ANON_DEBUG
831*0Sstevel@tonic-gate 	pgcnt_t mem_resv;
832*0Sstevel@tonic-gate #endif
833*0Sstevel@tonic-gate 
834*0Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
835*0Sstevel@tonic-gate 
836*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
837*0Sstevel@tonic-gate 	/*
838*0Sstevel@tonic-gate 	 * If some of this reservation belonged to swapfs
839*0Sstevel@tonic-gate 	 * give it back to availrmem.
840*0Sstevel@tonic-gate 	 * ani_mem_resv is the amount of availrmem swapfs has reserved.
841*0Sstevel@tonic-gate 	 * but some of that memory could be locked by segspt so we can only
842*0Sstevel@tonic-gate 	 * return non locked ani_mem_resv back to availrmem
843*0Sstevel@tonic-gate 	 */
844*0Sstevel@tonic-gate 	if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
845*0Sstevel@tonic-gate 		ANON_PRINT((A_RESV | A_MRESV),
846*0Sstevel@tonic-gate 		    ("anon_unresv: growing availrmem by %ld pages\n",
847*0Sstevel@tonic-gate 		    MIN(k_anoninfo.ani_mem_resv, npages)));
848*0Sstevel@tonic-gate 
849*0Sstevel@tonic-gate 		mem_free_pages = MIN((spgcnt_t)(k_anoninfo.ani_mem_resv -
850*0Sstevel@tonic-gate 		    k_anoninfo.ani_locked_swap), npages);
851*0Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
852*0Sstevel@tonic-gate 		availrmem += mem_free_pages;
853*0Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
854*0Sstevel@tonic-gate 		k_anoninfo.ani_mem_resv -= mem_free_pages;
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate 		ANI_ADD(-mem_free_pages);
857*0Sstevel@tonic-gate 	}
858*0Sstevel@tonic-gate 	/*
859*0Sstevel@tonic-gate 	 * The remainder of the pages is returned to phys swap
860*0Sstevel@tonic-gate 	 */
861*0Sstevel@tonic-gate 	ASSERT(npages >= mem_free_pages);
862*0Sstevel@tonic-gate 	phys_free_slots = npages - mem_free_pages;
863*0Sstevel@tonic-gate 
864*0Sstevel@tonic-gate 	if (phys_free_slots) {
865*0Sstevel@tonic-gate 	    k_anoninfo.ani_phys_resv -= phys_free_slots;
866*0Sstevel@tonic-gate 	}
867*0Sstevel@tonic-gate 
868*0Sstevel@tonic-gate #ifdef	ANON_DEBUG
869*0Sstevel@tonic-gate 	mem_resv = k_anoninfo.ani_mem_resv;
870*0Sstevel@tonic-gate #endif
871*0Sstevel@tonic-gate 
872*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
873*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
874*0Sstevel@tonic-gate 
875*0Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate 	ANON_PRINT(A_RESV, ("anon_unresv: %lu, tot %lu, caller %p\n",
878*0Sstevel@tonic-gate 	    npages, mem_resv, (void *)caller()));
879*0Sstevel@tonic-gate }
880*0Sstevel@tonic-gate 
881*0Sstevel@tonic-gate /*
882*0Sstevel@tonic-gate  * Allocate an anon slot and return it with the lock held.
883*0Sstevel@tonic-gate  */
884*0Sstevel@tonic-gate struct anon *
885*0Sstevel@tonic-gate anon_alloc(struct vnode *vp, anoff_t off)
886*0Sstevel@tonic-gate {
887*0Sstevel@tonic-gate 	struct anon	*ap;
888*0Sstevel@tonic-gate 	kmutex_t	*ahm;
889*0Sstevel@tonic-gate 
890*0Sstevel@tonic-gate 	ap = kmem_cache_alloc(anon_cache, KM_SLEEP);
891*0Sstevel@tonic-gate 	if (vp == NULL) {
892*0Sstevel@tonic-gate 		swap_alloc(ap);
893*0Sstevel@tonic-gate 	} else {
894*0Sstevel@tonic-gate 		ap->an_vp = vp;
895*0Sstevel@tonic-gate 		ap->an_off = off;
896*0Sstevel@tonic-gate 	}
897*0Sstevel@tonic-gate 	ap->an_refcnt = 1;
898*0Sstevel@tonic-gate 	ap->an_pvp = NULL;
899*0Sstevel@tonic-gate 	ap->an_poff = 0;
900*0Sstevel@tonic-gate 	ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
901*0Sstevel@tonic-gate 	mutex_enter(ahm);
902*0Sstevel@tonic-gate 	anon_addhash(ap);
903*0Sstevel@tonic-gate 	mutex_exit(ahm);
904*0Sstevel@tonic-gate 	ANI_ADD(-1);
905*0Sstevel@tonic-gate 	ANON_PRINT(A_ANON, ("anon_alloc: returning ap %p, vp %p\n",
906*0Sstevel@tonic-gate 	    (void *)ap, (ap ? (void *)ap->an_vp : NULL)));
907*0Sstevel@tonic-gate 	return (ap);
908*0Sstevel@tonic-gate }
909*0Sstevel@tonic-gate 
910*0Sstevel@tonic-gate /*
911*0Sstevel@tonic-gate  * Decrement the reference count of an anon page.
912*0Sstevel@tonic-gate  * If reference count goes to zero, free it and
913*0Sstevel@tonic-gate  * its associated page (if any).
914*0Sstevel@tonic-gate  */
915*0Sstevel@tonic-gate void
916*0Sstevel@tonic-gate anon_decref(struct anon *ap)
917*0Sstevel@tonic-gate {
918*0Sstevel@tonic-gate 	page_t *pp;
919*0Sstevel@tonic-gate 	struct vnode *vp;
920*0Sstevel@tonic-gate 	anoff_t off;
921*0Sstevel@tonic-gate 	kmutex_t *ahm;
922*0Sstevel@tonic-gate 
923*0Sstevel@tonic-gate 	ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
924*0Sstevel@tonic-gate 	mutex_enter(ahm);
925*0Sstevel@tonic-gate 	ASSERT(ap->an_refcnt != 0);
926*0Sstevel@tonic-gate 	if (ap->an_refcnt == 0)
927*0Sstevel@tonic-gate 		panic("anon_decref: slot count 0");
928*0Sstevel@tonic-gate 	if (--ap->an_refcnt == 0) {
929*0Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
930*0Sstevel@tonic-gate 		mutex_exit(ahm);
931*0Sstevel@tonic-gate 
932*0Sstevel@tonic-gate 		/*
933*0Sstevel@tonic-gate 		 * If there is a page for this anon slot we will need to
934*0Sstevel@tonic-gate 		 * call VN_DISPOSE to get rid of the vp association and
935*0Sstevel@tonic-gate 		 * put the page back on the free list as really free.
936*0Sstevel@tonic-gate 		 * Acquire the "exclusive" lock to ensure that any
937*0Sstevel@tonic-gate 		 * pending i/o always completes before the swap slot
938*0Sstevel@tonic-gate 		 * is freed.
939*0Sstevel@tonic-gate 		 */
940*0Sstevel@tonic-gate 		pp = page_lookup(vp, (u_offset_t)off, SE_EXCL);
941*0Sstevel@tonic-gate 
942*0Sstevel@tonic-gate 		/*
943*0Sstevel@tonic-gate 		 * If there was a page, we've synchronized on it (getting
944*0Sstevel@tonic-gate 		 * the exclusive lock is as good as gettting the iolock)
945*0Sstevel@tonic-gate 		 * so now we can free the physical backing store. Also, this
946*0Sstevel@tonic-gate 		 * is where we would free the name of the anonymous page
947*0Sstevel@tonic-gate 		 * (swap_free(ap)), a no-op in the current implementation.
948*0Sstevel@tonic-gate 		 */
949*0Sstevel@tonic-gate 		mutex_enter(ahm);
950*0Sstevel@tonic-gate 		ASSERT(ap->an_refcnt == 0);
951*0Sstevel@tonic-gate 		anon_rmhash(ap);
952*0Sstevel@tonic-gate 		if (ap->an_pvp)
953*0Sstevel@tonic-gate 			swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
954*0Sstevel@tonic-gate 		mutex_exit(ahm);
955*0Sstevel@tonic-gate 
956*0Sstevel@tonic-gate 		if (pp != NULL) {
957*0Sstevel@tonic-gate 			/*LINTED: constant in conditional context */
958*0Sstevel@tonic-gate 			VN_DISPOSE(pp, B_INVAL, 0, kcred);
959*0Sstevel@tonic-gate 		}
960*0Sstevel@tonic-gate 		ANON_PRINT(A_ANON, ("anon_decref: free ap %p, vp %p\n",
961*0Sstevel@tonic-gate 		    (void *)ap, (void *)ap->an_vp));
962*0Sstevel@tonic-gate 		kmem_cache_free(anon_cache, ap);
963*0Sstevel@tonic-gate 
964*0Sstevel@tonic-gate 		ANI_ADD(1);
965*0Sstevel@tonic-gate 	} else {
966*0Sstevel@tonic-gate 		mutex_exit(ahm);
967*0Sstevel@tonic-gate 	}
968*0Sstevel@tonic-gate }
969*0Sstevel@tonic-gate 
970*0Sstevel@tonic-gate static int
971*0Sstevel@tonic-gate anon_share(struct anon_hdr *ahp, ulong_t anon_index, pgcnt_t nslots)
972*0Sstevel@tonic-gate {
973*0Sstevel@tonic-gate 	struct anon *ap;
974*0Sstevel@tonic-gate 
975*0Sstevel@tonic-gate 	while (nslots-- > 0) {
976*0Sstevel@tonic-gate 		if ((ap = anon_get_ptr(ahp, anon_index)) != NULL &&
977*0Sstevel@tonic-gate 		    ap->an_refcnt > 1)
978*0Sstevel@tonic-gate 			return (1);
979*0Sstevel@tonic-gate 		anon_index++;
980*0Sstevel@tonic-gate 	}
981*0Sstevel@tonic-gate 
982*0Sstevel@tonic-gate 	return (0);
983*0Sstevel@tonic-gate }
984*0Sstevel@tonic-gate 
985*0Sstevel@tonic-gate static void
986*0Sstevel@tonic-gate anon_decref_pages(
987*0Sstevel@tonic-gate 	struct anon_hdr *ahp,
988*0Sstevel@tonic-gate 	ulong_t an_idx,
989*0Sstevel@tonic-gate 	uint_t szc)
990*0Sstevel@tonic-gate {
991*0Sstevel@tonic-gate 	struct anon *ap = anon_get_ptr(ahp, an_idx);
992*0Sstevel@tonic-gate 	kmutex_t *ahmpages = NULL;
993*0Sstevel@tonic-gate 	page_t *pp;
994*0Sstevel@tonic-gate 	pgcnt_t pgcnt = page_get_pagecnt(szc);
995*0Sstevel@tonic-gate 	pgcnt_t i;
996*0Sstevel@tonic-gate 	struct vnode *vp;
997*0Sstevel@tonic-gate 	anoff_t   off;
998*0Sstevel@tonic-gate 	kmutex_t *ahm;
999*0Sstevel@tonic-gate #ifdef DEBUG
1000*0Sstevel@tonic-gate 	int refcnt = 1;
1001*0Sstevel@tonic-gate #endif
1002*0Sstevel@tonic-gate 
1003*0Sstevel@tonic-gate 	ASSERT(szc != 0);
1004*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
1005*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
1006*0Sstevel@tonic-gate 
1007*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.decrefpages[0]);
1008*0Sstevel@tonic-gate 
1009*0Sstevel@tonic-gate 	if (ap != NULL) {
1010*0Sstevel@tonic-gate 		ahmpages = &anonpages_hash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1011*0Sstevel@tonic-gate 		mutex_enter(ahmpages);
1012*0Sstevel@tonic-gate 		ASSERT((refcnt = ap->an_refcnt) != 0);
1013*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.decrefpages[1]);
1014*0Sstevel@tonic-gate 		if (ap->an_refcnt == 1) {
1015*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.decrefpages[2]);
1016*0Sstevel@tonic-gate 			ASSERT(!anon_share(ahp, an_idx, pgcnt));
1017*0Sstevel@tonic-gate 			mutex_exit(ahmpages);
1018*0Sstevel@tonic-gate 			ahmpages = NULL;
1019*0Sstevel@tonic-gate 		}
1020*0Sstevel@tonic-gate 	}
1021*0Sstevel@tonic-gate 
1022*0Sstevel@tonic-gate 	i = 0;
1023*0Sstevel@tonic-gate 	while (i < pgcnt) {
1024*0Sstevel@tonic-gate 		if ((ap = anon_get_ptr(ahp, an_idx + i)) == NULL) {
1025*0Sstevel@tonic-gate 			ASSERT(refcnt == 1 && ahmpages == NULL);
1026*0Sstevel@tonic-gate 			i++;
1027*0Sstevel@tonic-gate 			continue;
1028*0Sstevel@tonic-gate 		}
1029*0Sstevel@tonic-gate 		ASSERT(ap->an_refcnt == refcnt);
1030*0Sstevel@tonic-gate 		ASSERT(ahmpages != NULL || ap->an_refcnt == 1);
1031*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || ap->an_refcnt > 1);
1032*0Sstevel@tonic-gate 
1033*0Sstevel@tonic-gate 		if (ahmpages == NULL) {
1034*0Sstevel@tonic-gate 			swap_xlate(ap, &vp, &off);
1035*0Sstevel@tonic-gate 			pp = page_lookup(vp, (u_offset_t)off, SE_EXCL);
1036*0Sstevel@tonic-gate 			if (pp == NULL || pp->p_szc == 0) {
1037*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.decrefpages[3]);
1038*0Sstevel@tonic-gate 				ahm = &anonhash_lock[AH_LOCK(ap->an_vp,
1039*0Sstevel@tonic-gate 				    ap->an_off)];
1040*0Sstevel@tonic-gate 				(void) anon_set_ptr(ahp, an_idx + i, NULL,
1041*0Sstevel@tonic-gate 				    ANON_SLEEP);
1042*0Sstevel@tonic-gate 				mutex_enter(ahm);
1043*0Sstevel@tonic-gate 				ap->an_refcnt--;
1044*0Sstevel@tonic-gate 				ASSERT(ap->an_refcnt == 0);
1045*0Sstevel@tonic-gate 				anon_rmhash(ap);
1046*0Sstevel@tonic-gate 				if (ap->an_pvp)
1047*0Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp, ap->an_poff,
1048*0Sstevel@tonic-gate 					    PAGESIZE);
1049*0Sstevel@tonic-gate 				mutex_exit(ahm);
1050*0Sstevel@tonic-gate 				if (pp != NULL) {
1051*0Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.decrefpages[4]);
1052*0Sstevel@tonic-gate 					/*LINTED*/
1053*0Sstevel@tonic-gate 					VN_DISPOSE(pp, B_INVAL, 0, kcred);
1054*0Sstevel@tonic-gate 				}
1055*0Sstevel@tonic-gate 				kmem_cache_free(anon_cache, ap);
1056*0Sstevel@tonic-gate 				ANI_ADD(1);
1057*0Sstevel@tonic-gate 				i++;
1058*0Sstevel@tonic-gate 			} else {
1059*0Sstevel@tonic-gate 				pgcnt_t j;
1060*0Sstevel@tonic-gate 				pgcnt_t curpgcnt =
1061*0Sstevel@tonic-gate 				    page_get_pagecnt(pp->p_szc);
1062*0Sstevel@tonic-gate 				size_t ppasize = curpgcnt * sizeof (page_t *);
1063*0Sstevel@tonic-gate 				page_t **ppa = kmem_alloc(ppasize, KM_SLEEP);
1064*0Sstevel@tonic-gate 				int dispose = 0;
1065*0Sstevel@tonic-gate 
1066*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.decrefpages[5]);
1067*0Sstevel@tonic-gate 
1068*0Sstevel@tonic-gate 				ASSERT(pp->p_szc <= szc);
1069*0Sstevel@tonic-gate 				ASSERT(IS_P2ALIGNED(curpgcnt, curpgcnt));
1070*0Sstevel@tonic-gate 				ASSERT(IS_P2ALIGNED(i, curpgcnt));
1071*0Sstevel@tonic-gate 				ASSERT(i + curpgcnt <= pgcnt);
1072*0Sstevel@tonic-gate 				ASSERT(!(page_pptonum(pp) & (curpgcnt - 1)));
1073*0Sstevel@tonic-gate 				ppa[0] = pp;
1074*0Sstevel@tonic-gate 				for (j = i + 1; j < i + curpgcnt; j++) {
1075*0Sstevel@tonic-gate 					ap = anon_get_ptr(ahp, an_idx + j);
1076*0Sstevel@tonic-gate 					ASSERT(ap != NULL &&
1077*0Sstevel@tonic-gate 					    ap->an_refcnt == 1);
1078*0Sstevel@tonic-gate 					swap_xlate(ap, &vp, &off);
1079*0Sstevel@tonic-gate 					pp = page_lookup(vp, (u_offset_t)off,
1080*0Sstevel@tonic-gate 					    SE_EXCL);
1081*0Sstevel@tonic-gate 					if (pp == NULL)
1082*0Sstevel@tonic-gate 						panic("anon_decref_pages: "
1083*0Sstevel@tonic-gate 						    "no page");
1084*0Sstevel@tonic-gate 
1085*0Sstevel@tonic-gate 					(void) hat_pageunload(pp,
1086*0Sstevel@tonic-gate 					    HAT_FORCE_PGUNLOAD);
1087*0Sstevel@tonic-gate 					ASSERT(pp->p_szc == ppa[0]->p_szc);
1088*0Sstevel@tonic-gate 					ASSERT(page_pptonum(pp) - 1 ==
1089*0Sstevel@tonic-gate 					    page_pptonum(ppa[j - i - 1]));
1090*0Sstevel@tonic-gate 					ppa[j - i] = pp;
1091*0Sstevel@tonic-gate 					if (ap->an_pvp != NULL &&
1092*0Sstevel@tonic-gate 					    !vn_matchopval(ap->an_pvp,
1093*0Sstevel@tonic-gate 						VOPNAME_DISPOSE,
1094*0Sstevel@tonic-gate 						(fs_generic_func_p)fs_dispose))
1095*0Sstevel@tonic-gate 						dispose = 1;
1096*0Sstevel@tonic-gate 				}
1097*0Sstevel@tonic-gate 				if (!dispose) {
1098*0Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.decrefpages[6]);
1099*0Sstevel@tonic-gate 					page_destroy_pages(ppa[0]);
1100*0Sstevel@tonic-gate 				} else {
1101*0Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.decrefpages[7]);
1102*0Sstevel@tonic-gate 					for (j = 0; j < curpgcnt; j++) {
1103*0Sstevel@tonic-gate 						ASSERT(PAGE_EXCL(ppa[j]));
1104*0Sstevel@tonic-gate 						ppa[j]->p_szc = 0;
1105*0Sstevel@tonic-gate 					}
1106*0Sstevel@tonic-gate 					for (j = 0; j < curpgcnt; j++) {
1107*0Sstevel@tonic-gate 						ASSERT(!hat_page_is_mapped(
1108*0Sstevel@tonic-gate 						    ppa[j]));
1109*0Sstevel@tonic-gate 						/*LINTED*/
1110*0Sstevel@tonic-gate 						VN_DISPOSE(ppa[j], B_INVAL, 0,
1111*0Sstevel@tonic-gate 						    kcred);
1112*0Sstevel@tonic-gate 					}
1113*0Sstevel@tonic-gate 				}
1114*0Sstevel@tonic-gate 				kmem_free(ppa, ppasize);
1115*0Sstevel@tonic-gate 				for (j = i; j < i + curpgcnt; j++) {
1116*0Sstevel@tonic-gate 					ap = anon_get_ptr(ahp, an_idx + j);
1117*0Sstevel@tonic-gate 					ASSERT(ap != NULL &&
1118*0Sstevel@tonic-gate 					    ap->an_refcnt == 1);
1119*0Sstevel@tonic-gate 					ahm = &anonhash_lock[AH_LOCK(ap->an_vp,
1120*0Sstevel@tonic-gate 					    ap->an_off)];
1121*0Sstevel@tonic-gate 					(void) anon_set_ptr(ahp, an_idx + j,
1122*0Sstevel@tonic-gate 					    NULL, ANON_SLEEP);
1123*0Sstevel@tonic-gate 					mutex_enter(ahm);
1124*0Sstevel@tonic-gate 					ap->an_refcnt--;
1125*0Sstevel@tonic-gate 					ASSERT(ap->an_refcnt == 0);
1126*0Sstevel@tonic-gate 					anon_rmhash(ap);
1127*0Sstevel@tonic-gate 					if (ap->an_pvp)
1128*0Sstevel@tonic-gate 						swap_phys_free(ap->an_pvp,
1129*0Sstevel@tonic-gate 							ap->an_poff, PAGESIZE);
1130*0Sstevel@tonic-gate 					mutex_exit(ahm);
1131*0Sstevel@tonic-gate 					kmem_cache_free(anon_cache, ap);
1132*0Sstevel@tonic-gate 					ANI_ADD(1);
1133*0Sstevel@tonic-gate 				}
1134*0Sstevel@tonic-gate 				i += curpgcnt;
1135*0Sstevel@tonic-gate 			}
1136*0Sstevel@tonic-gate 		} else {
1137*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.decrefpages[8]);
1138*0Sstevel@tonic-gate 			(void) anon_set_ptr(ahp, an_idx + i, NULL, ANON_SLEEP);
1139*0Sstevel@tonic-gate 			ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1140*0Sstevel@tonic-gate 			mutex_enter(ahm);
1141*0Sstevel@tonic-gate 			ap->an_refcnt--;
1142*0Sstevel@tonic-gate 			mutex_exit(ahm);
1143*0Sstevel@tonic-gate 			i++;
1144*0Sstevel@tonic-gate 		}
1145*0Sstevel@tonic-gate 	}
1146*0Sstevel@tonic-gate 
1147*0Sstevel@tonic-gate 	if (ahmpages != NULL) {
1148*0Sstevel@tonic-gate 		mutex_exit(ahmpages);
1149*0Sstevel@tonic-gate 	}
1150*0Sstevel@tonic-gate }
1151*0Sstevel@tonic-gate 
1152*0Sstevel@tonic-gate /*
1153*0Sstevel@tonic-gate  * Duplicate references to size bytes worth of anon pages.
1154*0Sstevel@tonic-gate  * Used when duplicating a segment that contains private anon pages.
1155*0Sstevel@tonic-gate  * This code assumes that procedure calling this one has already used
1156*0Sstevel@tonic-gate  * hat_chgprot() to disable write access to the range of addresses that
1157*0Sstevel@tonic-gate  * that *old actually refers to.
1158*0Sstevel@tonic-gate  */
1159*0Sstevel@tonic-gate void
1160*0Sstevel@tonic-gate anon_dup(struct anon_hdr *old, ulong_t old_idx, struct anon_hdr *new,
1161*0Sstevel@tonic-gate 			ulong_t new_idx, size_t size)
1162*0Sstevel@tonic-gate {
1163*0Sstevel@tonic-gate 	spgcnt_t npages;
1164*0Sstevel@tonic-gate 	kmutex_t *ahm;
1165*0Sstevel@tonic-gate 	struct anon *ap;
1166*0Sstevel@tonic-gate 	ulong_t off;
1167*0Sstevel@tonic-gate 	ulong_t index;
1168*0Sstevel@tonic-gate 
1169*0Sstevel@tonic-gate 	npages = btopr(size);
1170*0Sstevel@tonic-gate 	while (npages > 0) {
1171*0Sstevel@tonic-gate 		index = old_idx;
1172*0Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(old, &index)) == NULL)
1173*0Sstevel@tonic-gate 			break;
1174*0Sstevel@tonic-gate 
1175*0Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(old, index)));
1176*0Sstevel@tonic-gate 		off = index - old_idx;
1177*0Sstevel@tonic-gate 		npages -= off;
1178*0Sstevel@tonic-gate 		if (npages <= 0)
1179*0Sstevel@tonic-gate 			break;
1180*0Sstevel@tonic-gate 
1181*0Sstevel@tonic-gate 		(void) anon_set_ptr(new, new_idx + off, ap, ANON_SLEEP);
1182*0Sstevel@tonic-gate 		ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1183*0Sstevel@tonic-gate 
1184*0Sstevel@tonic-gate 		mutex_enter(ahm);
1185*0Sstevel@tonic-gate 		ap->an_refcnt++;
1186*0Sstevel@tonic-gate 		mutex_exit(ahm);
1187*0Sstevel@tonic-gate 
1188*0Sstevel@tonic-gate 		off++;
1189*0Sstevel@tonic-gate 		new_idx += off;
1190*0Sstevel@tonic-gate 		old_idx += off;
1191*0Sstevel@tonic-gate 		npages--;
1192*0Sstevel@tonic-gate 	}
1193*0Sstevel@tonic-gate }
1194*0Sstevel@tonic-gate 
1195*0Sstevel@tonic-gate /*
1196*0Sstevel@tonic-gate  * Just like anon_dup but also guarantees there are no holes (unallocated anon
1197*0Sstevel@tonic-gate  * slots) within any large page region. That means if a large page region is
1198*0Sstevel@tonic-gate  * empty in the old array it will skip it. If there are 1 or more valid slots
1199*0Sstevel@tonic-gate  * in the large page region of the old array it will make sure to fill in any
1200*0Sstevel@tonic-gate  * unallocated ones and also copy them to the new array. If noalloc is 1 large
1201*0Sstevel@tonic-gate  * page region should either have no valid anon slots or all slots should be
1202*0Sstevel@tonic-gate  * valid.
1203*0Sstevel@tonic-gate  */
1204*0Sstevel@tonic-gate void
1205*0Sstevel@tonic-gate anon_dup_fill_holes(
1206*0Sstevel@tonic-gate 	struct anon_hdr *old,
1207*0Sstevel@tonic-gate 	ulong_t old_idx,
1208*0Sstevel@tonic-gate 	struct anon_hdr *new,
1209*0Sstevel@tonic-gate 	ulong_t new_idx,
1210*0Sstevel@tonic-gate 	size_t size,
1211*0Sstevel@tonic-gate 	uint_t szc,
1212*0Sstevel@tonic-gate 	int noalloc)
1213*0Sstevel@tonic-gate {
1214*0Sstevel@tonic-gate 	struct anon	*ap;
1215*0Sstevel@tonic-gate 	spgcnt_t	npages;
1216*0Sstevel@tonic-gate 	kmutex_t	*ahm, *ahmpages = NULL;
1217*0Sstevel@tonic-gate 	pgcnt_t		pgcnt, i;
1218*0Sstevel@tonic-gate 	ulong_t		index, off;
1219*0Sstevel@tonic-gate #ifdef DEBUG
1220*0Sstevel@tonic-gate 	int		refcnt;
1221*0Sstevel@tonic-gate #endif
1222*0Sstevel@tonic-gate 
1223*0Sstevel@tonic-gate 	ASSERT(szc != 0);
1224*0Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
1225*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
1226*0Sstevel@tonic-gate 	npages = btopr(size);
1227*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
1228*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(old_idx, pgcnt));
1229*0Sstevel@tonic-gate 
1230*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.dupfillholes[0]);
1231*0Sstevel@tonic-gate 
1232*0Sstevel@tonic-gate 	while (npages > 0) {
1233*0Sstevel@tonic-gate 		index = old_idx;
1234*0Sstevel@tonic-gate 
1235*0Sstevel@tonic-gate 		/*
1236*0Sstevel@tonic-gate 		 * Find the next valid slot.
1237*0Sstevel@tonic-gate 		 */
1238*0Sstevel@tonic-gate 		if (anon_get_next_ptr(old, &index) == NULL)
1239*0Sstevel@tonic-gate 			break;
1240*0Sstevel@tonic-gate 
1241*0Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(old, index)));
1242*0Sstevel@tonic-gate 		/*
1243*0Sstevel@tonic-gate 		 * Now backup index to the beginning of the
1244*0Sstevel@tonic-gate 		 * current large page region of the old array.
1245*0Sstevel@tonic-gate 		 */
1246*0Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
1247*0Sstevel@tonic-gate 		off = index - old_idx;
1248*0Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
1249*0Sstevel@tonic-gate 		npages -= off;
1250*0Sstevel@tonic-gate 		if (npages <= 0)
1251*0Sstevel@tonic-gate 			break;
1252*0Sstevel@tonic-gate 
1253*0Sstevel@tonic-gate 		/*
1254*0Sstevel@tonic-gate 		 * Fill and copy a large page regions worth
1255*0Sstevel@tonic-gate 		 * of anon slots.
1256*0Sstevel@tonic-gate 		 */
1257*0Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++) {
1258*0Sstevel@tonic-gate 			if ((ap = anon_get_ptr(old, index + i)) == NULL) {
1259*0Sstevel@tonic-gate 				if (noalloc) {
1260*0Sstevel@tonic-gate 					panic("anon_dup_fill_holes: "
1261*0Sstevel@tonic-gate 					    "empty anon slot\n");
1262*0Sstevel@tonic-gate 				}
1263*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.dupfillholes[1]);
1264*0Sstevel@tonic-gate 				ap = anon_alloc(NULL, 0);
1265*0Sstevel@tonic-gate 				(void) anon_set_ptr(old, index + i, ap,
1266*0Sstevel@tonic-gate 				    ANON_SLEEP);
1267*0Sstevel@tonic-gate 			} else if (i == 0) {
1268*0Sstevel@tonic-gate 				/*
1269*0Sstevel@tonic-gate 				 * make the increment of all refcnts of all
1270*0Sstevel@tonic-gate 				 * anon slots of a large page appear atomic by
1271*0Sstevel@tonic-gate 				 * getting an anonpages_hash_lock for the
1272*0Sstevel@tonic-gate 				 * first anon slot of a large page.
1273*0Sstevel@tonic-gate 				 */
1274*0Sstevel@tonic-gate 				int hash = AH_LOCK(ap->an_vp, ap->an_off);
1275*0Sstevel@tonic-gate 
1276*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.dupfillholes[2]);
1277*0Sstevel@tonic-gate 
1278*0Sstevel@tonic-gate 				ahmpages = &anonpages_hash_lock[hash];
1279*0Sstevel@tonic-gate 				mutex_enter(ahmpages);
1280*0Sstevel@tonic-gate 				/*LINTED*/
1281*0Sstevel@tonic-gate 				ASSERT(refcnt = ap->an_refcnt);
1282*0Sstevel@tonic-gate 
1283*0Sstevel@tonic-gate 				VM_STAT_COND_ADD(ap->an_refcnt > 1,
1284*0Sstevel@tonic-gate 				    anonvmstats.dupfillholes[3]);
1285*0Sstevel@tonic-gate 			}
1286*0Sstevel@tonic-gate 			(void) anon_set_ptr(new, new_idx + off + i, ap,
1287*0Sstevel@tonic-gate 			    ANON_SLEEP);
1288*0Sstevel@tonic-gate 			ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1289*0Sstevel@tonic-gate 			mutex_enter(ahm);
1290*0Sstevel@tonic-gate 			ASSERT(ahmpages != NULL || ap->an_refcnt == 1);
1291*0Sstevel@tonic-gate 			ASSERT(i == 0 || ahmpages == NULL ||
1292*0Sstevel@tonic-gate 			    refcnt == ap->an_refcnt);
1293*0Sstevel@tonic-gate 			ap->an_refcnt++;
1294*0Sstevel@tonic-gate 			mutex_exit(ahm);
1295*0Sstevel@tonic-gate 		}
1296*0Sstevel@tonic-gate 		if (ahmpages != NULL) {
1297*0Sstevel@tonic-gate 			mutex_exit(ahmpages);
1298*0Sstevel@tonic-gate 			ahmpages = NULL;
1299*0Sstevel@tonic-gate 		}
1300*0Sstevel@tonic-gate 		off += pgcnt;
1301*0Sstevel@tonic-gate 		new_idx += off;
1302*0Sstevel@tonic-gate 		old_idx += off;
1303*0Sstevel@tonic-gate 		npages -= pgcnt;
1304*0Sstevel@tonic-gate 	}
1305*0Sstevel@tonic-gate }
1306*0Sstevel@tonic-gate 
1307*0Sstevel@tonic-gate /*
1308*0Sstevel@tonic-gate  * Used when a segment with a vnode changes szc. similarly to
1309*0Sstevel@tonic-gate  * anon_dup_fill_holes() makes sure each large page region either has no anon
1310*0Sstevel@tonic-gate  * slots or all of them. but new slots are created by COWing the file
1311*0Sstevel@tonic-gate  * pages. on entrance no anon slots should be shared.
1312*0Sstevel@tonic-gate  */
1313*0Sstevel@tonic-gate int
1314*0Sstevel@tonic-gate anon_fill_cow_holes(
1315*0Sstevel@tonic-gate 	struct seg *seg,
1316*0Sstevel@tonic-gate 	caddr_t addr,
1317*0Sstevel@tonic-gate 	struct anon_hdr *ahp,
1318*0Sstevel@tonic-gate 	ulong_t an_idx,
1319*0Sstevel@tonic-gate 	struct vnode *vp,
1320*0Sstevel@tonic-gate 	u_offset_t vp_off,
1321*0Sstevel@tonic-gate 	size_t size,
1322*0Sstevel@tonic-gate 	uint_t szc,
1323*0Sstevel@tonic-gate 	uint_t prot,
1324*0Sstevel@tonic-gate 	struct vpage vpage[],
1325*0Sstevel@tonic-gate 	struct cred *cred)
1326*0Sstevel@tonic-gate {
1327*0Sstevel@tonic-gate 	struct anon	*ap;
1328*0Sstevel@tonic-gate 	spgcnt_t	npages;
1329*0Sstevel@tonic-gate 	pgcnt_t		pgcnt, i;
1330*0Sstevel@tonic-gate 	ulong_t		index, off;
1331*0Sstevel@tonic-gate 	int		err = 0;
1332*0Sstevel@tonic-gate 	int		pageflags = 0;
1333*0Sstevel@tonic-gate 
1334*0Sstevel@tonic-gate 	ASSERT(szc != 0);
1335*0Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
1336*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
1337*0Sstevel@tonic-gate 	npages = btopr(size);
1338*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
1339*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
1340*0Sstevel@tonic-gate 
1341*0Sstevel@tonic-gate 	while (npages > 0) {
1342*0Sstevel@tonic-gate 		index = an_idx;
1343*0Sstevel@tonic-gate 
1344*0Sstevel@tonic-gate 		/*
1345*0Sstevel@tonic-gate 		 * Find the next valid slot.
1346*0Sstevel@tonic-gate 		 */
1347*0Sstevel@tonic-gate 		if (anon_get_next_ptr(ahp, &index) == NULL) {
1348*0Sstevel@tonic-gate 			break;
1349*0Sstevel@tonic-gate 		}
1350*0Sstevel@tonic-gate 
1351*0Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
1352*0Sstevel@tonic-gate 		/*
1353*0Sstevel@tonic-gate 		 * Now backup index to the beginning of the
1354*0Sstevel@tonic-gate 		 * current large page region of the anon array.
1355*0Sstevel@tonic-gate 		 */
1356*0Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
1357*0Sstevel@tonic-gate 		off = index - an_idx;
1358*0Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
1359*0Sstevel@tonic-gate 		npages -= off;
1360*0Sstevel@tonic-gate 		if (npages <= 0)
1361*0Sstevel@tonic-gate 			break;
1362*0Sstevel@tonic-gate 		an_idx += off;
1363*0Sstevel@tonic-gate 		vp_off += ptob(off);
1364*0Sstevel@tonic-gate 		addr += ptob(off);
1365*0Sstevel@tonic-gate 		if (vpage != NULL) {
1366*0Sstevel@tonic-gate 			vpage += off;
1367*0Sstevel@tonic-gate 		}
1368*0Sstevel@tonic-gate 
1369*0Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++, an_idx++, vp_off += PAGESIZE) {
1370*0Sstevel@tonic-gate 			if ((ap = anon_get_ptr(ahp, an_idx)) == NULL) {
1371*0Sstevel@tonic-gate 				page_t *pl[1 + 1];
1372*0Sstevel@tonic-gate 				page_t *pp;
1373*0Sstevel@tonic-gate 
1374*0Sstevel@tonic-gate 				err = VOP_GETPAGE(vp, vp_off, PAGESIZE, NULL,
1375*0Sstevel@tonic-gate 				    pl, PAGESIZE, seg, addr, S_READ, cred);
1376*0Sstevel@tonic-gate 				if (err) {
1377*0Sstevel@tonic-gate 					break;
1378*0Sstevel@tonic-gate 				}
1379*0Sstevel@tonic-gate 				if (vpage != NULL) {
1380*0Sstevel@tonic-gate 					prot = VPP_PROT(vpage);
1381*0Sstevel@tonic-gate 					pageflags = VPP_ISPPLOCK(vpage) ?
1382*0Sstevel@tonic-gate 					    LOCK_PAGE : 0;
1383*0Sstevel@tonic-gate 				}
1384*0Sstevel@tonic-gate 				pp = anon_private(&ap, seg, addr, prot, pl[0],
1385*0Sstevel@tonic-gate 					pageflags, cred);
1386*0Sstevel@tonic-gate 				if (pp == NULL) {
1387*0Sstevel@tonic-gate 					err = ENOMEM;
1388*0Sstevel@tonic-gate 					break;
1389*0Sstevel@tonic-gate 				}
1390*0Sstevel@tonic-gate 				(void) anon_set_ptr(ahp, an_idx, ap,
1391*0Sstevel@tonic-gate 				    ANON_SLEEP);
1392*0Sstevel@tonic-gate 				page_unlock(pp);
1393*0Sstevel@tonic-gate 			}
1394*0Sstevel@tonic-gate 			ASSERT(ap->an_refcnt == 1);
1395*0Sstevel@tonic-gate 			addr += PAGESIZE;
1396*0Sstevel@tonic-gate 			if (vpage != NULL) {
1397*0Sstevel@tonic-gate 				vpage++;
1398*0Sstevel@tonic-gate 			}
1399*0Sstevel@tonic-gate 		}
1400*0Sstevel@tonic-gate 		npages -= pgcnt;
1401*0Sstevel@tonic-gate 	}
1402*0Sstevel@tonic-gate 
1403*0Sstevel@tonic-gate 	return (err);
1404*0Sstevel@tonic-gate }
1405*0Sstevel@tonic-gate 
1406*0Sstevel@tonic-gate /*
1407*0Sstevel@tonic-gate  * Free a group of "size" anon pages, size in bytes,
1408*0Sstevel@tonic-gate  * and clear out the pointers to the anon entries.
1409*0Sstevel@tonic-gate  */
1410*0Sstevel@tonic-gate void
1411*0Sstevel@tonic-gate anon_free(struct anon_hdr *ahp, ulong_t index, size_t size)
1412*0Sstevel@tonic-gate {
1413*0Sstevel@tonic-gate 	spgcnt_t npages;
1414*0Sstevel@tonic-gate 	struct anon *ap;
1415*0Sstevel@tonic-gate 	ulong_t old;
1416*0Sstevel@tonic-gate 
1417*0Sstevel@tonic-gate 	npages = btopr(size);
1418*0Sstevel@tonic-gate 
1419*0Sstevel@tonic-gate 	while (npages > 0) {
1420*0Sstevel@tonic-gate 		old = index;
1421*0Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(ahp, &index)) == NULL)
1422*0Sstevel@tonic-gate 			break;
1423*0Sstevel@tonic-gate 
1424*0Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
1425*0Sstevel@tonic-gate 		npages -= index - old;
1426*0Sstevel@tonic-gate 		if (npages <= 0)
1427*0Sstevel@tonic-gate 			break;
1428*0Sstevel@tonic-gate 
1429*0Sstevel@tonic-gate 		(void) anon_set_ptr(ahp, index, NULL, ANON_SLEEP);
1430*0Sstevel@tonic-gate 		anon_decref(ap);
1431*0Sstevel@tonic-gate 		/*
1432*0Sstevel@tonic-gate 		 * Bump index and decrement page count
1433*0Sstevel@tonic-gate 		 */
1434*0Sstevel@tonic-gate 		index++;
1435*0Sstevel@tonic-gate 		npages--;
1436*0Sstevel@tonic-gate 	}
1437*0Sstevel@tonic-gate }
1438*0Sstevel@tonic-gate 
1439*0Sstevel@tonic-gate void
1440*0Sstevel@tonic-gate anon_free_pages(
1441*0Sstevel@tonic-gate 	struct anon_hdr *ahp,
1442*0Sstevel@tonic-gate 	ulong_t an_idx,
1443*0Sstevel@tonic-gate 	size_t size,
1444*0Sstevel@tonic-gate 	uint_t szc)
1445*0Sstevel@tonic-gate {
1446*0Sstevel@tonic-gate 	spgcnt_t	npages;
1447*0Sstevel@tonic-gate 	pgcnt_t		pgcnt;
1448*0Sstevel@tonic-gate 	ulong_t		index, off;
1449*0Sstevel@tonic-gate 
1450*0Sstevel@tonic-gate 	ASSERT(szc != 0);
1451*0Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
1452*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
1453*0Sstevel@tonic-gate 	npages = btopr(size);
1454*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(npages, pgcnt));
1455*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(an_idx, pgcnt));
1456*0Sstevel@tonic-gate 
1457*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.freepages[0]);
1458*0Sstevel@tonic-gate 
1459*0Sstevel@tonic-gate 	while (npages > 0) {
1460*0Sstevel@tonic-gate 		index = an_idx;
1461*0Sstevel@tonic-gate 
1462*0Sstevel@tonic-gate 		/*
1463*0Sstevel@tonic-gate 		 * Find the next valid slot.
1464*0Sstevel@tonic-gate 		 */
1465*0Sstevel@tonic-gate 		if (anon_get_next_ptr(ahp, &index) == NULL)
1466*0Sstevel@tonic-gate 			break;
1467*0Sstevel@tonic-gate 
1468*0Sstevel@tonic-gate 		ASSERT(!ANON_ISBUSY(anon_get_slot(ahp, index)));
1469*0Sstevel@tonic-gate 		/*
1470*0Sstevel@tonic-gate 		 * Now backup index to the beginning of the
1471*0Sstevel@tonic-gate 		 * current large page region of the old array.
1472*0Sstevel@tonic-gate 		 */
1473*0Sstevel@tonic-gate 		index = P2ALIGN(index, pgcnt);
1474*0Sstevel@tonic-gate 		off = index - an_idx;
1475*0Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(off, pgcnt));
1476*0Sstevel@tonic-gate 		npages -= off;
1477*0Sstevel@tonic-gate 		if (npages <= 0)
1478*0Sstevel@tonic-gate 			break;
1479*0Sstevel@tonic-gate 
1480*0Sstevel@tonic-gate 		anon_decref_pages(ahp, index, szc);
1481*0Sstevel@tonic-gate 
1482*0Sstevel@tonic-gate 		off += pgcnt;
1483*0Sstevel@tonic-gate 		an_idx += off;
1484*0Sstevel@tonic-gate 		npages -= pgcnt;
1485*0Sstevel@tonic-gate 	}
1486*0Sstevel@tonic-gate }
1487*0Sstevel@tonic-gate 
1488*0Sstevel@tonic-gate /*
1489*0Sstevel@tonic-gate  * Make anonymous pages discardable
1490*0Sstevel@tonic-gate  */
1491*0Sstevel@tonic-gate void
1492*0Sstevel@tonic-gate anon_disclaim(struct anon_map *amp, ulong_t index, size_t size, int flags)
1493*0Sstevel@tonic-gate {
1494*0Sstevel@tonic-gate 	spgcnt_t npages = btopr(size);
1495*0Sstevel@tonic-gate 	struct anon *ap;
1496*0Sstevel@tonic-gate 	struct vnode *vp;
1497*0Sstevel@tonic-gate 	anoff_t off;
1498*0Sstevel@tonic-gate 	page_t *pp, *root_pp;
1499*0Sstevel@tonic-gate 	kmutex_t *ahm;
1500*0Sstevel@tonic-gate 	pgcnt_t pgcnt;
1501*0Sstevel@tonic-gate 	ulong_t old_idx, idx, i;
1502*0Sstevel@tonic-gate 	struct anon_hdr *ahp = amp->ahp;
1503*0Sstevel@tonic-gate 	anon_sync_obj_t cookie;
1504*0Sstevel@tonic-gate 
1505*0Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&amp->a_rwlock));
1506*0Sstevel@tonic-gate 	pgcnt = 1;
1507*0Sstevel@tonic-gate 	for (; npages > 0; index = (pgcnt == 1) ? index + 1:
1508*0Sstevel@tonic-gate 		P2ROUNDUP(index + 1, pgcnt), npages -= pgcnt) {
1509*0Sstevel@tonic-gate 
1510*0Sstevel@tonic-gate 		/*
1511*0Sstevel@tonic-gate 		 * get anon pointer and index for the first valid entry
1512*0Sstevel@tonic-gate 		 * in the anon list, starting from "index"
1513*0Sstevel@tonic-gate 		 */
1514*0Sstevel@tonic-gate 		old_idx = index;
1515*0Sstevel@tonic-gate 		if ((ap = anon_get_next_ptr(ahp, &index)) == NULL)
1516*0Sstevel@tonic-gate 			break;
1517*0Sstevel@tonic-gate 
1518*0Sstevel@tonic-gate 		/*
1519*0Sstevel@tonic-gate 		 * decrement npages by number of NULL anon slots we skipped
1520*0Sstevel@tonic-gate 		 */
1521*0Sstevel@tonic-gate 		npages -= index - old_idx;
1522*0Sstevel@tonic-gate 		if (npages <= 0)
1523*0Sstevel@tonic-gate 			break;
1524*0Sstevel@tonic-gate 
1525*0Sstevel@tonic-gate 		anon_array_enter(amp, index, &cookie);
1526*0Sstevel@tonic-gate 		ap = anon_get_ptr(ahp, index);
1527*0Sstevel@tonic-gate 		ASSERT(ap != NULL);
1528*0Sstevel@tonic-gate 
1529*0Sstevel@tonic-gate 		/*
1530*0Sstevel@tonic-gate 		 * Get anonymous page and try to lock it SE_EXCL;
1531*0Sstevel@tonic-gate 		 * For non blocking case if we couldn't grab the lock
1532*0Sstevel@tonic-gate 		 * we skip to next page.
1533*0Sstevel@tonic-gate 		 * For blocking case (ANON_PGLOOKUP_BLK) block
1534*0Sstevel@tonic-gate 		 * until we grab SE_EXCL lock.
1535*0Sstevel@tonic-gate 		 */
1536*0Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
1537*0Sstevel@tonic-gate 		if (flags & ANON_PGLOOKUP_BLK)
1538*0Sstevel@tonic-gate 			pp = page_lookup_create(vp, (u_offset_t)off,
1539*0Sstevel@tonic-gate 			    SE_EXCL, NULL, NULL, SE_EXCL_WANTED);
1540*0Sstevel@tonic-gate 		else
1541*0Sstevel@tonic-gate 			pp = page_lookup_nowait(vp, (u_offset_t)off, SE_EXCL);
1542*0Sstevel@tonic-gate 		if (pp == NULL) {
1543*0Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
1544*0Sstevel@tonic-gate 			pgcnt = 1;
1545*0Sstevel@tonic-gate 			anon_array_exit(&cookie);
1546*0Sstevel@tonic-gate 			continue;
1547*0Sstevel@tonic-gate 		}
1548*0Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(pp->p_szc);
1549*0Sstevel@tonic-gate 
1550*0Sstevel@tonic-gate 		/*
1551*0Sstevel@tonic-gate 		 * we cannot free a page which is permanently locked.
1552*0Sstevel@tonic-gate 		 * The page_struct_lock need not be acquired to examine
1553*0Sstevel@tonic-gate 		 * these fields since the page has an "exclusive" lock.
1554*0Sstevel@tonic-gate 		 */
1555*0Sstevel@tonic-gate 		if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
1556*0Sstevel@tonic-gate 			page_unlock(pp);
1557*0Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
1558*0Sstevel@tonic-gate 			anon_array_exit(&cookie);
1559*0Sstevel@tonic-gate 			continue;
1560*0Sstevel@tonic-gate 		}
1561*0Sstevel@tonic-gate 
1562*0Sstevel@tonic-gate 		ahm = &anonhash_lock[AH_LOCK(vp, off)];
1563*0Sstevel@tonic-gate 		mutex_enter(ahm);
1564*0Sstevel@tonic-gate 		ASSERT(ap->an_refcnt != 0);
1565*0Sstevel@tonic-gate 		/*
1566*0Sstevel@tonic-gate 		 * skip this one if copy-on-write is not yet broken.
1567*0Sstevel@tonic-gate 		 */
1568*0Sstevel@tonic-gate 		if (ap->an_refcnt > 1) {
1569*0Sstevel@tonic-gate 			mutex_exit(ahm);
1570*0Sstevel@tonic-gate 			page_unlock(pp);
1571*0Sstevel@tonic-gate 			segadvstat.MADV_FREE_miss.value.ul++;
1572*0Sstevel@tonic-gate 			anon_array_exit(&cookie);
1573*0Sstevel@tonic-gate 			continue;
1574*0Sstevel@tonic-gate 		}
1575*0Sstevel@tonic-gate 
1576*0Sstevel@tonic-gate 		if (pp->p_szc == 0) {
1577*0Sstevel@tonic-gate 			pgcnt = 1;
1578*0Sstevel@tonic-gate 
1579*0Sstevel@tonic-gate 			/*
1580*0Sstevel@tonic-gate 			 * free swap slot;
1581*0Sstevel@tonic-gate 			 */
1582*0Sstevel@tonic-gate 			if (ap->an_pvp) {
1583*0Sstevel@tonic-gate 				swap_phys_free(ap->an_pvp, ap->an_poff,
1584*0Sstevel@tonic-gate 				    PAGESIZE);
1585*0Sstevel@tonic-gate 				ap->an_pvp = NULL;
1586*0Sstevel@tonic-gate 				ap->an_poff = 0;
1587*0Sstevel@tonic-gate 			}
1588*0Sstevel@tonic-gate 			mutex_exit(ahm);
1589*0Sstevel@tonic-gate 			segadvstat.MADV_FREE_hit.value.ul++;
1590*0Sstevel@tonic-gate 
1591*0Sstevel@tonic-gate 			/*
1592*0Sstevel@tonic-gate 			 * while we are at it, unload all the translations
1593*0Sstevel@tonic-gate 			 * and attempt to free the page.
1594*0Sstevel@tonic-gate 			 */
1595*0Sstevel@tonic-gate 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1596*0Sstevel@tonic-gate 			/*LINTED: constant in conditional context */
1597*0Sstevel@tonic-gate 			VN_DISPOSE(pp, B_FREE, 0, kcred);
1598*0Sstevel@tonic-gate 			anon_array_exit(&cookie);
1599*0Sstevel@tonic-gate 			continue;
1600*0Sstevel@tonic-gate 		}
1601*0Sstevel@tonic-gate 
1602*0Sstevel@tonic-gate 		pgcnt = page_get_pagecnt(pp->p_szc);
1603*0Sstevel@tonic-gate 		if (!IS_P2ALIGNED(index, pgcnt)) {
1604*0Sstevel@tonic-gate 			if (!page_try_demote_pages(pp)) {
1605*0Sstevel@tonic-gate 				mutex_exit(ahm);
1606*0Sstevel@tonic-gate 				page_unlock(pp);
1607*0Sstevel@tonic-gate 				segadvstat.MADV_FREE_miss.value.ul++;
1608*0Sstevel@tonic-gate 				anon_array_exit(&cookie);
1609*0Sstevel@tonic-gate 				continue;
1610*0Sstevel@tonic-gate 			} else {
1611*0Sstevel@tonic-gate 				pgcnt = 1;
1612*0Sstevel@tonic-gate 				if (ap->an_pvp) {
1613*0Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp,
1614*0Sstevel@tonic-gate 					    ap->an_poff, PAGESIZE);
1615*0Sstevel@tonic-gate 					    ap->an_pvp = NULL;
1616*0Sstevel@tonic-gate 					    ap->an_poff = 0;
1617*0Sstevel@tonic-gate 				}
1618*0Sstevel@tonic-gate 				mutex_exit(ahm);
1619*0Sstevel@tonic-gate 				(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1620*0Sstevel@tonic-gate 				/*LINTED*/
1621*0Sstevel@tonic-gate 				VN_DISPOSE(pp, B_FREE, 0, kcred);
1622*0Sstevel@tonic-gate 				segadvstat.MADV_FREE_hit.value.ul++;
1623*0Sstevel@tonic-gate 				anon_array_exit(&cookie);
1624*0Sstevel@tonic-gate 				continue;
1625*0Sstevel@tonic-gate 			}
1626*0Sstevel@tonic-gate 		}
1627*0Sstevel@tonic-gate 		mutex_exit(ahm);
1628*0Sstevel@tonic-gate 		root_pp = pp;
1629*0Sstevel@tonic-gate 
1630*0Sstevel@tonic-gate 		/*
1631*0Sstevel@tonic-gate 		 * try to lock remaining pages
1632*0Sstevel@tonic-gate 		 */
1633*0Sstevel@tonic-gate 		for (idx = 1; idx < pgcnt; idx++) {
1634*0Sstevel@tonic-gate 			pp = page_next(pp);
1635*0Sstevel@tonic-gate 			if (!page_trylock(pp, SE_EXCL))
1636*0Sstevel@tonic-gate 				break;
1637*0Sstevel@tonic-gate 			if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
1638*0Sstevel@tonic-gate 				page_unlock(pp);
1639*0Sstevel@tonic-gate 				break;
1640*0Sstevel@tonic-gate 			}
1641*0Sstevel@tonic-gate 		}
1642*0Sstevel@tonic-gate 
1643*0Sstevel@tonic-gate 		if (idx == pgcnt) {
1644*0Sstevel@tonic-gate 			for (i = 0; i < pgcnt; i++) {
1645*0Sstevel@tonic-gate 				ap = anon_get_ptr(ahp, index + i);
1646*0Sstevel@tonic-gate 				if (ap == NULL)
1647*0Sstevel@tonic-gate 					break;
1648*0Sstevel@tonic-gate 				swap_xlate(ap, &vp, &off);
1649*0Sstevel@tonic-gate 				ahm = &anonhash_lock[AH_LOCK(vp, off)];
1650*0Sstevel@tonic-gate 				mutex_enter(ahm);
1651*0Sstevel@tonic-gate 				ASSERT(ap->an_refcnt != 0);
1652*0Sstevel@tonic-gate 
1653*0Sstevel@tonic-gate 				/*
1654*0Sstevel@tonic-gate 				 * skip this one if copy-on-write
1655*0Sstevel@tonic-gate 				 * is not yet broken.
1656*0Sstevel@tonic-gate 				 */
1657*0Sstevel@tonic-gate 				if (ap->an_refcnt > 1) {
1658*0Sstevel@tonic-gate 					mutex_exit(ahm);
1659*0Sstevel@tonic-gate 					goto skiplp;
1660*0Sstevel@tonic-gate 				}
1661*0Sstevel@tonic-gate 				if (ap->an_pvp) {
1662*0Sstevel@tonic-gate 					swap_phys_free(ap->an_pvp,
1663*0Sstevel@tonic-gate 					    ap->an_poff, PAGESIZE);
1664*0Sstevel@tonic-gate 					    ap->an_pvp = NULL;
1665*0Sstevel@tonic-gate 					    ap->an_poff = 0;
1666*0Sstevel@tonic-gate 				}
1667*0Sstevel@tonic-gate 				mutex_exit(ahm);
1668*0Sstevel@tonic-gate 			}
1669*0Sstevel@tonic-gate 			page_destroy_pages(root_pp);
1670*0Sstevel@tonic-gate 			segadvstat.MADV_FREE_hit.value.ul += pgcnt;
1671*0Sstevel@tonic-gate 			anon_array_exit(&cookie);
1672*0Sstevel@tonic-gate 			continue;
1673*0Sstevel@tonic-gate 		}
1674*0Sstevel@tonic-gate skiplp:
1675*0Sstevel@tonic-gate 		segadvstat.MADV_FREE_miss.value.ul += pgcnt;
1676*0Sstevel@tonic-gate 		for (i = 0, pp = root_pp; i < idx; pp = page_next(pp), i++)
1677*0Sstevel@tonic-gate 			page_unlock(pp);
1678*0Sstevel@tonic-gate 		anon_array_exit(&cookie);
1679*0Sstevel@tonic-gate 	}
1680*0Sstevel@tonic-gate }
1681*0Sstevel@tonic-gate 
1682*0Sstevel@tonic-gate /*
1683*0Sstevel@tonic-gate  * Return the kept page(s) and protections back to the segment driver.
1684*0Sstevel@tonic-gate  */
1685*0Sstevel@tonic-gate int
1686*0Sstevel@tonic-gate anon_getpage(
1687*0Sstevel@tonic-gate 	struct anon **app,
1688*0Sstevel@tonic-gate 	uint_t *protp,
1689*0Sstevel@tonic-gate 	page_t *pl[],
1690*0Sstevel@tonic-gate 	size_t plsz,
1691*0Sstevel@tonic-gate 	struct seg *seg,
1692*0Sstevel@tonic-gate 	caddr_t addr,
1693*0Sstevel@tonic-gate 	enum seg_rw rw,
1694*0Sstevel@tonic-gate 	struct cred *cred)
1695*0Sstevel@tonic-gate {
1696*0Sstevel@tonic-gate 	page_t *pp;
1697*0Sstevel@tonic-gate 	struct anon *ap = *app;
1698*0Sstevel@tonic-gate 	struct vnode *vp;
1699*0Sstevel@tonic-gate 	anoff_t off;
1700*0Sstevel@tonic-gate 	int err;
1701*0Sstevel@tonic-gate 	kmutex_t *ahm;
1702*0Sstevel@tonic-gate 
1703*0Sstevel@tonic-gate 	swap_xlate(ap, &vp, &off);
1704*0Sstevel@tonic-gate 
1705*0Sstevel@tonic-gate 	/*
1706*0Sstevel@tonic-gate 	 * Lookup the page. If page is being paged in,
1707*0Sstevel@tonic-gate 	 * wait for it to finish as we must return a list of
1708*0Sstevel@tonic-gate 	 * pages since this routine acts like the VOP_GETPAGE
1709*0Sstevel@tonic-gate 	 * routine does.
1710*0Sstevel@tonic-gate 	 */
1711*0Sstevel@tonic-gate 	if (pl != NULL && (pp = page_lookup(vp, (u_offset_t)off, SE_SHARED))) {
1712*0Sstevel@tonic-gate 		ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1713*0Sstevel@tonic-gate 		mutex_enter(ahm);
1714*0Sstevel@tonic-gate 		if (ap->an_refcnt == 1)
1715*0Sstevel@tonic-gate 			*protp = PROT_ALL;
1716*0Sstevel@tonic-gate 		else
1717*0Sstevel@tonic-gate 			*protp = PROT_ALL & ~PROT_WRITE;
1718*0Sstevel@tonic-gate 		mutex_exit(ahm);
1719*0Sstevel@tonic-gate 		pl[0] = pp;
1720*0Sstevel@tonic-gate 		pl[1] = NULL;
1721*0Sstevel@tonic-gate 		return (0);
1722*0Sstevel@tonic-gate 	}
1723*0Sstevel@tonic-gate 
1724*0Sstevel@tonic-gate 	/*
1725*0Sstevel@tonic-gate 	 * Simply treat it as a vnode fault on the anon vp.
1726*0Sstevel@tonic-gate 	 */
1727*0Sstevel@tonic-gate 
1728*0Sstevel@tonic-gate 	TRACE_3(TR_FAC_VM, TR_ANON_GETPAGE,
1729*0Sstevel@tonic-gate 		"anon_getpage:seg %x addr %x vp %x",
1730*0Sstevel@tonic-gate 		seg, addr, vp);
1731*0Sstevel@tonic-gate 
1732*0Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, (u_offset_t)off, PAGESIZE, protp, pl, plsz,
1733*0Sstevel@tonic-gate 	    seg, addr, rw, cred);
1734*0Sstevel@tonic-gate 
1735*0Sstevel@tonic-gate 	if (err == 0 && pl != NULL) {
1736*0Sstevel@tonic-gate 		ahm = &anonhash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
1737*0Sstevel@tonic-gate 		mutex_enter(ahm);
1738*0Sstevel@tonic-gate 		if (ap->an_refcnt != 1)
1739*0Sstevel@tonic-gate 			*protp &= ~PROT_WRITE;	/* make read-only */
1740*0Sstevel@tonic-gate 		mutex_exit(ahm);
1741*0Sstevel@tonic-gate 	}
1742*0Sstevel@tonic-gate 	return (err);
1743*0Sstevel@tonic-gate }
1744*0Sstevel@tonic-gate 
1745*0Sstevel@tonic-gate /*
1746*0Sstevel@tonic-gate  * Creates or returns kept pages to the segment driver.  returns -1 if a large
1747*0Sstevel@tonic-gate  * page cannot be allocated. returns -2 if some other process has allocated a
1748*0Sstevel@tonic-gate  * larger page.
1749*0Sstevel@tonic-gate  *
1750*0Sstevel@tonic-gate  * For cowfault it will alocate any size pages to fill the requested area to
1751*0Sstevel@tonic-gate  * avoid partially overwritting anon slots (i.e. sharing only some of the anon
1752*0Sstevel@tonic-gate  * slots within a large page with other processes). This policy greatly
1753*0Sstevel@tonic-gate  * simplifies large page freeing (which is only freed when all anon slot
1754*0Sstevel@tonic-gate  * refcnts are 0).
1755*0Sstevel@tonic-gate  */
1756*0Sstevel@tonic-gate int
1757*0Sstevel@tonic-gate anon_map_getpages(
1758*0Sstevel@tonic-gate 	struct anon_map *amp,
1759*0Sstevel@tonic-gate 	ulong_t	start_idx,
1760*0Sstevel@tonic-gate 	uint_t	szc,
1761*0Sstevel@tonic-gate 	struct seg *seg,
1762*0Sstevel@tonic-gate 	caddr_t	addr,
1763*0Sstevel@tonic-gate 	uint_t prot,
1764*0Sstevel@tonic-gate 	uint_t *protp,
1765*0Sstevel@tonic-gate 	page_t	*ppa[],
1766*0Sstevel@tonic-gate 	uint_t	*ppa_szc,
1767*0Sstevel@tonic-gate 	struct vpage vpage[],
1768*0Sstevel@tonic-gate 	enum seg_rw rw,
1769*0Sstevel@tonic-gate 	int brkcow,
1770*0Sstevel@tonic-gate 	int anypgsz,
1771*0Sstevel@tonic-gate 	struct cred *cred)
1772*0Sstevel@tonic-gate {
1773*0Sstevel@tonic-gate 	pgcnt_t		pgcnt;
1774*0Sstevel@tonic-gate 	struct anon	*ap;
1775*0Sstevel@tonic-gate 	struct vnode	*vp;
1776*0Sstevel@tonic-gate 	anoff_t		off;
1777*0Sstevel@tonic-gate 	page_t		*pp, *pl[2], *conpp = NULL;
1778*0Sstevel@tonic-gate 	caddr_t		vaddr;
1779*0Sstevel@tonic-gate 	ulong_t		pg_idx, an_idx, i;
1780*0Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
1781*0Sstevel@tonic-gate 	int		prealloc = 1;
1782*0Sstevel@tonic-gate 	int		err, slotcreate;
1783*0Sstevel@tonic-gate 	uint_t		vpprot;
1784*0Sstevel@tonic-gate 
1785*0Sstevel@tonic-gate #if !defined(__i386) && !defined(__amd64)
1786*0Sstevel@tonic-gate 	ASSERT(seg->s_szc != 0);
1787*0Sstevel@tonic-gate #endif
1788*0Sstevel@tonic-gate 	ASSERT(szc <= seg->s_szc);
1789*0Sstevel@tonic-gate 	ASSERT(ppa_szc != NULL);
1790*0Sstevel@tonic-gate 	ASSERT(rw != S_CREATE);
1791*0Sstevel@tonic-gate 
1792*0Sstevel@tonic-gate 	*protp = PROT_ALL;
1793*0Sstevel@tonic-gate 
1794*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[0]);
1795*0Sstevel@tonic-gate 
1796*0Sstevel@tonic-gate 	if (szc == 0) {
1797*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[1]);
1798*0Sstevel@tonic-gate 		if ((ap = anon_get_ptr(amp->ahp, start_idx)) != NULL) {
1799*0Sstevel@tonic-gate 			err = anon_getpage(&ap, protp, pl, PAGESIZE, seg,
1800*0Sstevel@tonic-gate 			    addr, rw, cred);
1801*0Sstevel@tonic-gate 			if (err)
1802*0Sstevel@tonic-gate 				return (err);
1803*0Sstevel@tonic-gate 			ppa[0] = pl[0];
1804*0Sstevel@tonic-gate 			if (brkcow == 0 || (*protp & PROT_WRITE)) {
1805*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[2]);
1806*0Sstevel@tonic-gate 				if (ppa[0]->p_szc != 0) {
1807*0Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.getpages[3]);
1808*0Sstevel@tonic-gate 					*ppa_szc = ppa[0]->p_szc;
1809*0Sstevel@tonic-gate 					page_unlock(ppa[0]);
1810*0Sstevel@tonic-gate 					return (-2);
1811*0Sstevel@tonic-gate 				}
1812*0Sstevel@tonic-gate 				return (0);
1813*0Sstevel@tonic-gate 			}
1814*0Sstevel@tonic-gate 			panic("anon_map_getpages: cowfault for szc 0");
1815*0Sstevel@tonic-gate 		} else {
1816*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[4]);
1817*0Sstevel@tonic-gate 			ppa[0] = anon_zero(seg, addr, &ap, cred);
1818*0Sstevel@tonic-gate 			if (ppa[0] == NULL)
1819*0Sstevel@tonic-gate 				return (ENOMEM);
1820*0Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, start_idx, ap,
1821*0Sstevel@tonic-gate 			    ANON_SLEEP);
1822*0Sstevel@tonic-gate 			return (0);
1823*0Sstevel@tonic-gate 		}
1824*0Sstevel@tonic-gate 	}
1825*0Sstevel@tonic-gate 
1826*0Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
1827*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
1828*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
1829*0Sstevel@tonic-gate 
1830*0Sstevel@tonic-gate 	/*
1831*0Sstevel@tonic-gate 	 * First we check for the case that the requtested large
1832*0Sstevel@tonic-gate 	 * page or larger page already exists in the system.
1833*0Sstevel@tonic-gate 	 * Actually we only check if the first constituent page
1834*0Sstevel@tonic-gate 	 * exists and only preallocate if it's not found.
1835*0Sstevel@tonic-gate 	 */
1836*0Sstevel@tonic-gate 	ap = anon_get_ptr(amp->ahp, start_idx);
1837*0Sstevel@tonic-gate 	if (ap) {
1838*0Sstevel@tonic-gate 		uint_t pszc;
1839*0Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
1840*0Sstevel@tonic-gate 		if (page_exists_forreal(vp, (u_offset_t)off, &pszc)) {
1841*0Sstevel@tonic-gate 			if (pszc > szc) {
1842*0Sstevel@tonic-gate 				*ppa_szc = pszc;
1843*0Sstevel@tonic-gate 				return (-2);
1844*0Sstevel@tonic-gate 			}
1845*0Sstevel@tonic-gate 			if (pszc == szc) {
1846*0Sstevel@tonic-gate 				prealloc = 0;
1847*0Sstevel@tonic-gate 			}
1848*0Sstevel@tonic-gate 		}
1849*0Sstevel@tonic-gate 	}
1850*0Sstevel@tonic-gate 
1851*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(prealloc == 0, anonvmstats.getpages[5]);
1852*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(prealloc != 0, anonvmstats.getpages[6]);
1853*0Sstevel@tonic-gate 
1854*0Sstevel@tonic-gate top:
1855*0Sstevel@tonic-gate 	/*
1856*0Sstevel@tonic-gate 	 * If a smaller page or no page at all was found,
1857*0Sstevel@tonic-gate 	 * grab a large page off the freelist.
1858*0Sstevel@tonic-gate 	 */
1859*0Sstevel@tonic-gate 	if (prealloc) {
1860*0Sstevel@tonic-gate 		ASSERT(conpp == NULL);
1861*0Sstevel@tonic-gate 		if (page_alloc_pages(seg, addr, NULL, ppa, szc, 0) != 0) {
1862*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[7]);
1863*0Sstevel@tonic-gate 			if (brkcow == 0 ||
1864*0Sstevel@tonic-gate 			    !anon_share(amp->ahp, start_idx, pgcnt)) {
1865*0Sstevel@tonic-gate 				/*
1866*0Sstevel@tonic-gate 				 * If the refcnt's of all anon slots are <= 1
1867*0Sstevel@tonic-gate 				 * they can't increase since we are holding
1868*0Sstevel@tonic-gate 				 * the address space's lock. So segvn can
1869*0Sstevel@tonic-gate 				 * safely decrease szc without risking to
1870*0Sstevel@tonic-gate 				 * generate a cow fault for the region smaller
1871*0Sstevel@tonic-gate 				 * than the segment's largest page size.
1872*0Sstevel@tonic-gate 				 */
1873*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[8]);
1874*0Sstevel@tonic-gate 				return (-1);
1875*0Sstevel@tonic-gate 			}
1876*0Sstevel@tonic-gate 		docow:
1877*0Sstevel@tonic-gate 			/*
1878*0Sstevel@tonic-gate 			 * This is a cow fault. Copy away the entire 1 large
1879*0Sstevel@tonic-gate 			 * page region of this segment.
1880*0Sstevel@tonic-gate 			 */
1881*0Sstevel@tonic-gate 			if (szc != seg->s_szc)
1882*0Sstevel@tonic-gate 				panic("anon_map_getpages: cowfault for szc %d",
1883*0Sstevel@tonic-gate 				    szc);
1884*0Sstevel@tonic-gate 			vaddr = addr;
1885*0Sstevel@tonic-gate 			for (pg_idx = 0, an_idx = start_idx; pg_idx < pgcnt;
1886*0Sstevel@tonic-gate 			    pg_idx++, an_idx++, vaddr += PAGESIZE) {
1887*0Sstevel@tonic-gate 				if ((ap = anon_get_ptr(amp->ahp, an_idx)) !=
1888*0Sstevel@tonic-gate 				    NULL) {
1889*0Sstevel@tonic-gate 					err = anon_getpage(&ap, &vpprot, pl,
1890*0Sstevel@tonic-gate 					    PAGESIZE, seg, vaddr, rw, cred);
1891*0Sstevel@tonic-gate 					if (err) {
1892*0Sstevel@tonic-gate 						for (i = 0; i < pg_idx; i++) {
1893*0Sstevel@tonic-gate 							if ((pp = ppa[i]) !=
1894*0Sstevel@tonic-gate 							    NULL)
1895*0Sstevel@tonic-gate 								page_unlock(pp);
1896*0Sstevel@tonic-gate 						}
1897*0Sstevel@tonic-gate 						return (err);
1898*0Sstevel@tonic-gate 					}
1899*0Sstevel@tonic-gate 					ppa[pg_idx] = pl[0];
1900*0Sstevel@tonic-gate 				} else {
1901*0Sstevel@tonic-gate 					/*
1902*0Sstevel@tonic-gate 					 * Since this is a cowfault we know
1903*0Sstevel@tonic-gate 					 * that this address space has a
1904*0Sstevel@tonic-gate 					 * parent or children which means
1905*0Sstevel@tonic-gate 					 * anon_dup_fill_holes() has initialized
1906*0Sstevel@tonic-gate 					 * all anon slots within a large page
1907*0Sstevel@tonic-gate 					 * region that had at least one anon
1908*0Sstevel@tonic-gate 					 * slot at the time of fork().
1909*0Sstevel@tonic-gate 					 */
1910*0Sstevel@tonic-gate 					panic("anon_map_getpages: "
1911*0Sstevel@tonic-gate 					    "cowfault but anon slot is empty");
1912*0Sstevel@tonic-gate 				}
1913*0Sstevel@tonic-gate 			}
1914*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[9]);
1915*0Sstevel@tonic-gate 			*protp = PROT_ALL;
1916*0Sstevel@tonic-gate 			return (anon_map_privatepages(amp, start_idx, szc, seg,
1917*0Sstevel@tonic-gate 			    addr, prot, ppa, vpage, anypgsz, cred));
1918*0Sstevel@tonic-gate 		}
1919*0Sstevel@tonic-gate 	}
1920*0Sstevel@tonic-gate 
1921*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[10]);
1922*0Sstevel@tonic-gate 
1923*0Sstevel@tonic-gate 	an_idx = start_idx;
1924*0Sstevel@tonic-gate 	pg_idx = 0;
1925*0Sstevel@tonic-gate 	vaddr = addr;
1926*0Sstevel@tonic-gate 	while (pg_idx < pgcnt) {
1927*0Sstevel@tonic-gate 		slotcreate = 0;
1928*0Sstevel@tonic-gate 		if ((ap = anon_get_ptr(amp->ahp, an_idx)) == NULL) {
1929*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[11]);
1930*0Sstevel@tonic-gate 			/*
1931*0Sstevel@tonic-gate 			 * For us to have decided not to preallocate
1932*0Sstevel@tonic-gate 			 * would have meant that a large page
1933*0Sstevel@tonic-gate 			 * was found. Which also means that all of the
1934*0Sstevel@tonic-gate 			 * anon slots for that page would have been
1935*0Sstevel@tonic-gate 			 * already created for us.
1936*0Sstevel@tonic-gate 			 */
1937*0Sstevel@tonic-gate 			if (prealloc == 0)
1938*0Sstevel@tonic-gate 				panic("anon_map_getpages: prealloc = 0");
1939*0Sstevel@tonic-gate 
1940*0Sstevel@tonic-gate 			slotcreate = 1;
1941*0Sstevel@tonic-gate 			ap = anon_alloc(NULL, 0);
1942*0Sstevel@tonic-gate 		}
1943*0Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
1944*0Sstevel@tonic-gate 
1945*0Sstevel@tonic-gate 		/*
1946*0Sstevel@tonic-gate 		 * Now setup our preallocated page to pass down
1947*0Sstevel@tonic-gate 		 * to swap_getpage().
1948*0Sstevel@tonic-gate 		 */
1949*0Sstevel@tonic-gate 		if (prealloc) {
1950*0Sstevel@tonic-gate 			ASSERT(ppa[pg_idx]->p_szc == szc);
1951*0Sstevel@tonic-gate 			conpp = ppa[pg_idx];
1952*0Sstevel@tonic-gate 		}
1953*0Sstevel@tonic-gate 		ASSERT(prealloc || conpp == NULL);
1954*0Sstevel@tonic-gate 
1955*0Sstevel@tonic-gate 		/*
1956*0Sstevel@tonic-gate 		 * If we just created this anon slot then call
1957*0Sstevel@tonic-gate 		 * with S_CREATE to prevent doing IO on the page.
1958*0Sstevel@tonic-gate 		 * Similar to the anon_zero case.
1959*0Sstevel@tonic-gate 		 */
1960*0Sstevel@tonic-gate 		err = swap_getconpage(vp, (u_offset_t)off, PAGESIZE,
1961*0Sstevel@tonic-gate 		    NULL, pl, PAGESIZE, conpp, &nreloc, seg, vaddr,
1962*0Sstevel@tonic-gate 		    slotcreate == 1 ? S_CREATE : rw, cred);
1963*0Sstevel@tonic-gate 
1964*0Sstevel@tonic-gate 		if (err) {
1965*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[12]);
1966*0Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
1967*0Sstevel@tonic-gate 			goto io_err;
1968*0Sstevel@tonic-gate 		}
1969*0Sstevel@tonic-gate 
1970*0Sstevel@tonic-gate 		pp = pl[0];
1971*0Sstevel@tonic-gate 
1972*0Sstevel@tonic-gate 		if (pp->p_szc != szc) {
1973*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[13]);
1974*0Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
1975*0Sstevel@tonic-gate 			ASSERT(prealloc == 0);
1976*0Sstevel@tonic-gate 			ASSERT(pg_idx == 0);
1977*0Sstevel@tonic-gate 			if (pp->p_szc > szc) {
1978*0Sstevel@tonic-gate 				page_unlock(pp);
1979*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.getpages[14]);
1980*0Sstevel@tonic-gate 				return (-2);
1981*0Sstevel@tonic-gate 			}
1982*0Sstevel@tonic-gate 			page_unlock(pp);
1983*0Sstevel@tonic-gate 			prealloc = 1;
1984*0Sstevel@tonic-gate 			goto top;
1985*0Sstevel@tonic-gate 		}
1986*0Sstevel@tonic-gate 
1987*0Sstevel@tonic-gate 		/*
1988*0Sstevel@tonic-gate 		 * If we decided to preallocate but VOP_GETPAGE
1989*0Sstevel@tonic-gate 		 * found a page in the system that satisfies our
1990*0Sstevel@tonic-gate 		 * request then free up our preallocated large page
1991*0Sstevel@tonic-gate 		 * and continue looping accross the existing large
1992*0Sstevel@tonic-gate 		 * page via VOP_GETPAGE.
1993*0Sstevel@tonic-gate 		 */
1994*0Sstevel@tonic-gate 		if (prealloc && pp != ppa[pg_idx]) {
1995*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[15]);
1996*0Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
1997*0Sstevel@tonic-gate 			ASSERT(pg_idx == 0);
1998*0Sstevel@tonic-gate 			conpp = NULL;
1999*0Sstevel@tonic-gate 			prealloc = 0;
2000*0Sstevel@tonic-gate 			page_free_pages(ppa[0]);
2001*0Sstevel@tonic-gate 		}
2002*0Sstevel@tonic-gate 
2003*0Sstevel@tonic-gate 		if (prealloc && nreloc > 1) {
2004*0Sstevel@tonic-gate 			/*
2005*0Sstevel@tonic-gate 			 * we have relocated out of a smaller large page.
2006*0Sstevel@tonic-gate 			 * skip npgs - 1 iterations and continue which will
2007*0Sstevel@tonic-gate 			 * increment by one the loop indices.
2008*0Sstevel@tonic-gate 			 */
2009*0Sstevel@tonic-gate 			spgcnt_t npgs = nreloc;
2010*0Sstevel@tonic-gate 
2011*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[16]);
2012*0Sstevel@tonic-gate 
2013*0Sstevel@tonic-gate 			ASSERT(pp == ppa[pg_idx]);
2014*0Sstevel@tonic-gate 			ASSERT(slotcreate == 0);
2015*0Sstevel@tonic-gate 			ASSERT(pg_idx + npgs <= pgcnt);
2016*0Sstevel@tonic-gate 			if ((*protp & PROT_WRITE) &&
2017*0Sstevel@tonic-gate 			    anon_share(amp->ahp, an_idx, npgs)) {
2018*0Sstevel@tonic-gate 			    *protp &= ~PROT_WRITE;
2019*0Sstevel@tonic-gate 			}
2020*0Sstevel@tonic-gate 			pg_idx += npgs;
2021*0Sstevel@tonic-gate 			an_idx += npgs;
2022*0Sstevel@tonic-gate 			vaddr += PAGESIZE * npgs;
2023*0Sstevel@tonic-gate 			continue;
2024*0Sstevel@tonic-gate 		}
2025*0Sstevel@tonic-gate 
2026*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[17]);
2027*0Sstevel@tonic-gate 
2028*0Sstevel@tonic-gate 		/*
2029*0Sstevel@tonic-gate 		 * Anon_zero case.
2030*0Sstevel@tonic-gate 		 */
2031*0Sstevel@tonic-gate 		if (slotcreate) {
2032*0Sstevel@tonic-gate 			ASSERT(prealloc);
2033*0Sstevel@tonic-gate 			pagezero(pp, 0, PAGESIZE);
2034*0Sstevel@tonic-gate 			CPU_STATS_ADD_K(vm, zfod, 1);
2035*0Sstevel@tonic-gate 			hat_setrefmod(pp);
2036*0Sstevel@tonic-gate 		}
2037*0Sstevel@tonic-gate 
2038*0Sstevel@tonic-gate 		ASSERT(prealloc == 0 || ppa[pg_idx] == pp);
2039*0Sstevel@tonic-gate 		ASSERT(prealloc != 0 || PAGE_SHARED(pp));
2040*0Sstevel@tonic-gate 		ASSERT(prealloc == 0 || PAGE_EXCL(pp));
2041*0Sstevel@tonic-gate 
2042*0Sstevel@tonic-gate 		if (pg_idx > 0 &&
2043*0Sstevel@tonic-gate 		    ((page_pptonum(pp) != page_pptonum(ppa[pg_idx - 1]) + 1) ||
2044*0Sstevel@tonic-gate 		    (pp->p_szc != ppa[pg_idx - 1]->p_szc)))
2045*0Sstevel@tonic-gate 			panic("anon_map_getpages: unexpected page");
2046*0Sstevel@tonic-gate 
2047*0Sstevel@tonic-gate 		if (prealloc == 0) {
2048*0Sstevel@tonic-gate 			ppa[pg_idx] = pp;
2049*0Sstevel@tonic-gate 		}
2050*0Sstevel@tonic-gate 
2051*0Sstevel@tonic-gate 		if (ap->an_refcnt > 1) {
2052*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[18]);
2053*0Sstevel@tonic-gate 			*protp &= ~PROT_WRITE;
2054*0Sstevel@tonic-gate 		}
2055*0Sstevel@tonic-gate 
2056*0Sstevel@tonic-gate 		/*
2057*0Sstevel@tonic-gate 		 * If this is a new anon slot then initialize
2058*0Sstevel@tonic-gate 		 * the anon array entry.
2059*0Sstevel@tonic-gate 		 */
2060*0Sstevel@tonic-gate 		if (slotcreate) {
2061*0Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, an_idx, ap, ANON_SLEEP);
2062*0Sstevel@tonic-gate 		}
2063*0Sstevel@tonic-gate 		pg_idx++;
2064*0Sstevel@tonic-gate 		an_idx++;
2065*0Sstevel@tonic-gate 		vaddr += PAGESIZE;
2066*0Sstevel@tonic-gate 	}
2067*0Sstevel@tonic-gate 
2068*0Sstevel@tonic-gate 	/*
2069*0Sstevel@tonic-gate 	 * Since preallocated pages come off the freelist
2070*0Sstevel@tonic-gate 	 * they are locked SE_EXCL. Simply downgrade and return.
2071*0Sstevel@tonic-gate 	 */
2072*0Sstevel@tonic-gate 	if (prealloc) {
2073*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[19]);
2074*0Sstevel@tonic-gate 		conpp = NULL;
2075*0Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
2076*0Sstevel@tonic-gate 			page_downgrade(ppa[pg_idx]);
2077*0Sstevel@tonic-gate 		}
2078*0Sstevel@tonic-gate 	}
2079*0Sstevel@tonic-gate 	ASSERT(conpp == NULL);
2080*0Sstevel@tonic-gate 
2081*0Sstevel@tonic-gate 	if (brkcow == 0 || (*protp & PROT_WRITE)) {
2082*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[20]);
2083*0Sstevel@tonic-gate 		return (0);
2084*0Sstevel@tonic-gate 	}
2085*0Sstevel@tonic-gate 
2086*0Sstevel@tonic-gate 	if (szc < seg->s_szc)
2087*0Sstevel@tonic-gate 		panic("anon_map_getpages: cowfault for szc %d", szc);
2088*0Sstevel@tonic-gate 
2089*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[21]);
2090*0Sstevel@tonic-gate 
2091*0Sstevel@tonic-gate 	*protp = PROT_ALL;
2092*0Sstevel@tonic-gate 	return (anon_map_privatepages(amp, start_idx, szc, seg, addr, prot,
2093*0Sstevel@tonic-gate 	    ppa, vpage, anypgsz, cred));
2094*0Sstevel@tonic-gate io_err:
2095*0Sstevel@tonic-gate 	/*
2096*0Sstevel@tonic-gate 	 * We got an IO error somewhere in our large page.
2097*0Sstevel@tonic-gate 	 * If we were using a preallocated page then just demote
2098*0Sstevel@tonic-gate 	 * all the constituent pages that we've succeeded with sofar
2099*0Sstevel@tonic-gate 	 * to PAGESIZE pages and leave them in the system
2100*0Sstevel@tonic-gate 	 * unlocked.
2101*0Sstevel@tonic-gate 	 */
2102*0Sstevel@tonic-gate 
2103*0Sstevel@tonic-gate 	ASSERT(err != -2 || pg_idx == 0);
2104*0Sstevel@tonic-gate 
2105*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(err > 0, anonvmstats.getpages[22]);
2106*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(err == -1, anonvmstats.getpages[23]);
2107*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(err == -2, anonvmstats.getpages[24]);
2108*0Sstevel@tonic-gate 
2109*0Sstevel@tonic-gate 	if (prealloc) {
2110*0Sstevel@tonic-gate 		conpp = NULL;
2111*0Sstevel@tonic-gate 		if (pg_idx > 0) {
2112*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[25]);
2113*0Sstevel@tonic-gate 			for (i = 0; i < pgcnt; i++) {
2114*0Sstevel@tonic-gate 				pp = ppa[i];
2115*0Sstevel@tonic-gate 				ASSERT(PAGE_EXCL(pp));
2116*0Sstevel@tonic-gate 				ASSERT(pp->p_szc == szc);
2117*0Sstevel@tonic-gate 				pp->p_szc = 0;
2118*0Sstevel@tonic-gate 			}
2119*0Sstevel@tonic-gate 			for (i = 0; i < pg_idx; i++) {
2120*0Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(ppa[i]));
2121*0Sstevel@tonic-gate 				page_unlock(ppa[i]);
2122*0Sstevel@tonic-gate 			}
2123*0Sstevel@tonic-gate 			/*
2124*0Sstevel@tonic-gate 			 * Now free up the remaining unused constituent
2125*0Sstevel@tonic-gate 			 * pages.
2126*0Sstevel@tonic-gate 			 */
2127*0Sstevel@tonic-gate 			while (pg_idx < pgcnt) {
2128*0Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(ppa[pg_idx]));
2129*0Sstevel@tonic-gate 				page_free(ppa[pg_idx], 0);
2130*0Sstevel@tonic-gate 				pg_idx++;
2131*0Sstevel@tonic-gate 			}
2132*0Sstevel@tonic-gate 		} else {
2133*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.getpages[26]);
2134*0Sstevel@tonic-gate 			page_free_pages(ppa[0]);
2135*0Sstevel@tonic-gate 		}
2136*0Sstevel@tonic-gate 	} else {
2137*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[27]);
2138*0Sstevel@tonic-gate 		ASSERT(err > 0);
2139*0Sstevel@tonic-gate 		for (i = 0; i < pg_idx; i++)
2140*0Sstevel@tonic-gate 			page_unlock(ppa[i]);
2141*0Sstevel@tonic-gate 	}
2142*0Sstevel@tonic-gate 	ASSERT(conpp == NULL);
2143*0Sstevel@tonic-gate 	if (err != -1)
2144*0Sstevel@tonic-gate 		return (err);
2145*0Sstevel@tonic-gate 	/*
2146*0Sstevel@tonic-gate 	 * we are here because we failed to relocate.
2147*0Sstevel@tonic-gate 	 */
2148*0Sstevel@tonic-gate 	ASSERT(prealloc);
2149*0Sstevel@tonic-gate 	if (brkcow == 0 || !anon_share(amp->ahp, start_idx, pgcnt)) {
2150*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.getpages[28]);
2151*0Sstevel@tonic-gate 		return (-1);
2152*0Sstevel@tonic-gate 	}
2153*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.getpages[29]);
2154*0Sstevel@tonic-gate 	goto docow;
2155*0Sstevel@tonic-gate }
2156*0Sstevel@tonic-gate 
2157*0Sstevel@tonic-gate 
2158*0Sstevel@tonic-gate /*
2159*0Sstevel@tonic-gate  * Turn a reference to an object or shared anon page
2160*0Sstevel@tonic-gate  * into a private page with a copy of the data from the
2161*0Sstevel@tonic-gate  * original page which is always locked by the caller.
2162*0Sstevel@tonic-gate  * This routine unloads the translation and unlocks the
2163*0Sstevel@tonic-gate  * original page, if it isn't being stolen, before returning
2164*0Sstevel@tonic-gate  * to the caller.
2165*0Sstevel@tonic-gate  *
2166*0Sstevel@tonic-gate  * NOTE:  The original anon slot is not freed by this routine
2167*0Sstevel@tonic-gate  *	  It must be freed by the caller while holding the
2168*0Sstevel@tonic-gate  *	  "anon_map" lock to prevent races which can occur if
2169*0Sstevel@tonic-gate  *	  a process has multiple lwps in its address space.
2170*0Sstevel@tonic-gate  */
2171*0Sstevel@tonic-gate page_t *
2172*0Sstevel@tonic-gate anon_private(
2173*0Sstevel@tonic-gate 	struct anon **app,
2174*0Sstevel@tonic-gate 	struct seg *seg,
2175*0Sstevel@tonic-gate 	caddr_t addr,
2176*0Sstevel@tonic-gate 	uint_t	prot,
2177*0Sstevel@tonic-gate 	page_t *opp,
2178*0Sstevel@tonic-gate 	int oppflags,
2179*0Sstevel@tonic-gate 	struct cred *cred)
2180*0Sstevel@tonic-gate {
2181*0Sstevel@tonic-gate 	struct anon *old = *app;
2182*0Sstevel@tonic-gate 	struct anon *new;
2183*0Sstevel@tonic-gate 	page_t *pp = NULL;
2184*0Sstevel@tonic-gate 	struct vnode *vp;
2185*0Sstevel@tonic-gate 	anoff_t off;
2186*0Sstevel@tonic-gate 	page_t *anon_pl[1 + 1];
2187*0Sstevel@tonic-gate 	int err;
2188*0Sstevel@tonic-gate 
2189*0Sstevel@tonic-gate 	if (oppflags & STEAL_PAGE)
2190*0Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(opp));
2191*0Sstevel@tonic-gate 	else
2192*0Sstevel@tonic-gate 		ASSERT(PAGE_LOCKED(opp));
2193*0Sstevel@tonic-gate 
2194*0Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, cow_fault, 1);
2195*0Sstevel@tonic-gate 
2196*0Sstevel@tonic-gate 	/* Kernel probe */
2197*0Sstevel@tonic-gate 	TNF_PROBE_1(anon_private, "vm pagefault", /* CSTYLED */,
2198*0Sstevel@tonic-gate 		tnf_opaque,	address,	addr);
2199*0Sstevel@tonic-gate 
2200*0Sstevel@tonic-gate 	*app = new = anon_alloc(NULL, 0);
2201*0Sstevel@tonic-gate 	swap_xlate(new, &vp, &off);
2202*0Sstevel@tonic-gate 
2203*0Sstevel@tonic-gate 	if (oppflags & STEAL_PAGE) {
2204*0Sstevel@tonic-gate 		page_rename(opp, vp, (u_offset_t)off);
2205*0Sstevel@tonic-gate 		pp = opp;
2206*0Sstevel@tonic-gate 		TRACE_5(TR_FAC_VM, TR_ANON_PRIVATE,
2207*0Sstevel@tonic-gate 			"anon_private:seg %p addr %x pp %p vp %p off %lx",
2208*0Sstevel@tonic-gate 			seg, addr, pp, vp, off);
2209*0Sstevel@tonic-gate 		hat_setmod(pp);
2210*0Sstevel@tonic-gate 
2211*0Sstevel@tonic-gate 		/* bug 4026339 */
2212*0Sstevel@tonic-gate 		page_downgrade(pp);
2213*0Sstevel@tonic-gate 		return (pp);
2214*0Sstevel@tonic-gate 	}
2215*0Sstevel@tonic-gate 
2216*0Sstevel@tonic-gate 	/*
2217*0Sstevel@tonic-gate 	 * Call the VOP_GETPAGE routine to create the page, thereby
2218*0Sstevel@tonic-gate 	 * enabling the vnode driver to allocate any filesystem
2219*0Sstevel@tonic-gate 	 * space (e.g., disk block allocation for UFS).  This also
2220*0Sstevel@tonic-gate 	 * prevents more than one page from being added to the
2221*0Sstevel@tonic-gate 	 * vnode at the same time.
2222*0Sstevel@tonic-gate 	 */
2223*0Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, (u_offset_t)off, PAGESIZE, NULL,
2224*0Sstevel@tonic-gate 	    anon_pl, PAGESIZE, seg, addr, S_CREATE, cred);
2225*0Sstevel@tonic-gate 	if (err)
2226*0Sstevel@tonic-gate 		goto out;
2227*0Sstevel@tonic-gate 
2228*0Sstevel@tonic-gate 	pp = anon_pl[0];
2229*0Sstevel@tonic-gate 
2230*0Sstevel@tonic-gate 	/*
2231*0Sstevel@tonic-gate 	 * If the original page was locked, we need to move the lock
2232*0Sstevel@tonic-gate 	 * to the new page by transfering 'cowcnt/lckcnt' of the original
2233*0Sstevel@tonic-gate 	 * page to 'cowcnt/lckcnt' of the new page.
2234*0Sstevel@tonic-gate 	 *
2235*0Sstevel@tonic-gate 	 * See Statement at the beginning of segvn_lockop() and
2236*0Sstevel@tonic-gate 	 * comments in page_pp_useclaim() regarding the way
2237*0Sstevel@tonic-gate 	 * cowcnts/lckcnts are handled.
2238*0Sstevel@tonic-gate 	 *
2239*0Sstevel@tonic-gate 	 * Also availrmem must be decremented up front for read only mapping
2240*0Sstevel@tonic-gate 	 * before calling page_pp_useclaim. page_pp_useclaim will bump it back
2241*0Sstevel@tonic-gate 	 * if availrmem did not need to be decremented after all.
2242*0Sstevel@tonic-gate 	 */
2243*0Sstevel@tonic-gate 	if (oppflags & LOCK_PAGE) {
2244*0Sstevel@tonic-gate 		if ((prot & PROT_WRITE) == 0) {
2245*0Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
2246*0Sstevel@tonic-gate 			if (availrmem > pages_pp_maximum) {
2247*0Sstevel@tonic-gate 				availrmem--;
2248*0Sstevel@tonic-gate 				pages_useclaim++;
2249*0Sstevel@tonic-gate 			} else {
2250*0Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
2251*0Sstevel@tonic-gate 				goto out;
2252*0Sstevel@tonic-gate 			}
2253*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
2254*0Sstevel@tonic-gate 		}
2255*0Sstevel@tonic-gate 		page_pp_useclaim(opp, pp, prot & PROT_WRITE);
2256*0Sstevel@tonic-gate 	}
2257*0Sstevel@tonic-gate 
2258*0Sstevel@tonic-gate 	/*
2259*0Sstevel@tonic-gate 	 * Now copy the contents from the original page,
2260*0Sstevel@tonic-gate 	 * which is locked and loaded in the MMU by
2261*0Sstevel@tonic-gate 	 * the caller to prevent yet another page fault.
2262*0Sstevel@tonic-gate 	 */
2263*0Sstevel@tonic-gate 	ppcopy(opp, pp);		/* XXX - should set mod bit in here */
2264*0Sstevel@tonic-gate 
2265*0Sstevel@tonic-gate 	hat_setrefmod(pp);		/* mark as modified */
2266*0Sstevel@tonic-gate 
2267*0Sstevel@tonic-gate 	/*
2268*0Sstevel@tonic-gate 	 * Unload the old translation.
2269*0Sstevel@tonic-gate 	 */
2270*0Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, PAGESIZE, HAT_UNLOAD);
2271*0Sstevel@tonic-gate 
2272*0Sstevel@tonic-gate 	/*
2273*0Sstevel@tonic-gate 	 * Free unmapped, unmodified original page.
2274*0Sstevel@tonic-gate 	 * or release the lock on the original page,
2275*0Sstevel@tonic-gate 	 * otherwise the process will sleep forever in
2276*0Sstevel@tonic-gate 	 * anon_decref() waiting for the "exclusive" lock
2277*0Sstevel@tonic-gate 	 * on the page.
2278*0Sstevel@tonic-gate 	 */
2279*0Sstevel@tonic-gate 	(void) page_release(opp, 1);
2280*0Sstevel@tonic-gate 
2281*0Sstevel@tonic-gate 	/*
2282*0Sstevel@tonic-gate 	 * we are done with page creation so downgrade the new
2283*0Sstevel@tonic-gate 	 * page's selock to shared, this helps when multiple
2284*0Sstevel@tonic-gate 	 * as_fault(...SOFTLOCK...) are done to the same
2285*0Sstevel@tonic-gate 	 * page(aio)
2286*0Sstevel@tonic-gate 	 */
2287*0Sstevel@tonic-gate 	page_downgrade(pp);
2288*0Sstevel@tonic-gate 
2289*0Sstevel@tonic-gate 	/*
2290*0Sstevel@tonic-gate 	 * NOTE:  The original anon slot must be freed by the
2291*0Sstevel@tonic-gate 	 * caller while holding the "anon_map" lock, if we
2292*0Sstevel@tonic-gate 	 * copied away from an anonymous page.
2293*0Sstevel@tonic-gate 	 */
2294*0Sstevel@tonic-gate 	return (pp);
2295*0Sstevel@tonic-gate 
2296*0Sstevel@tonic-gate out:
2297*0Sstevel@tonic-gate 	*app = old;
2298*0Sstevel@tonic-gate 	if (pp)
2299*0Sstevel@tonic-gate 		page_unlock(pp);
2300*0Sstevel@tonic-gate 	anon_decref(new);
2301*0Sstevel@tonic-gate 	page_unlock(opp);
2302*0Sstevel@tonic-gate 	return ((page_t *)NULL);
2303*0Sstevel@tonic-gate }
2304*0Sstevel@tonic-gate 
2305*0Sstevel@tonic-gate int
2306*0Sstevel@tonic-gate anon_map_privatepages(
2307*0Sstevel@tonic-gate 	struct anon_map *amp,
2308*0Sstevel@tonic-gate 	ulong_t	start_idx,
2309*0Sstevel@tonic-gate 	uint_t	szc,
2310*0Sstevel@tonic-gate 	struct seg *seg,
2311*0Sstevel@tonic-gate 	caddr_t addr,
2312*0Sstevel@tonic-gate 	uint_t	prot,
2313*0Sstevel@tonic-gate 	page_t	*ppa[],
2314*0Sstevel@tonic-gate 	struct vpage vpage[],
2315*0Sstevel@tonic-gate 	int anypgsz,
2316*0Sstevel@tonic-gate 	struct cred *cred)
2317*0Sstevel@tonic-gate {
2318*0Sstevel@tonic-gate 	pgcnt_t		pgcnt;
2319*0Sstevel@tonic-gate 	struct vnode	*vp;
2320*0Sstevel@tonic-gate 	anoff_t		off;
2321*0Sstevel@tonic-gate 	page_t		*pl[2], *conpp = NULL;
2322*0Sstevel@tonic-gate 	int		err;
2323*0Sstevel@tonic-gate 	int		prealloc = 1;
2324*0Sstevel@tonic-gate 	struct anon	*ap, *oldap;
2325*0Sstevel@tonic-gate 	caddr_t		vaddr;
2326*0Sstevel@tonic-gate 	page_t		*pplist, *pp;
2327*0Sstevel@tonic-gate 	ulong_t		pg_idx, an_idx;
2328*0Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
2329*0Sstevel@tonic-gate 	int		pagelock = 0;
2330*0Sstevel@tonic-gate 	kmutex_t	*ahmpages = NULL;
2331*0Sstevel@tonic-gate #ifdef DEBUG
2332*0Sstevel@tonic-gate 	int		refcnt;
2333*0Sstevel@tonic-gate #endif
2334*0Sstevel@tonic-gate 
2335*0Sstevel@tonic-gate 	ASSERT(szc != 0);
2336*0Sstevel@tonic-gate 	ASSERT(szc == seg->s_szc);
2337*0Sstevel@tonic-gate 
2338*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.privatepages[0]);
2339*0Sstevel@tonic-gate 
2340*0Sstevel@tonic-gate 	pgcnt = page_get_pagecnt(szc);
2341*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
2342*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
2343*0Sstevel@tonic-gate 
2344*0Sstevel@tonic-gate 	ASSERT(amp != NULL);
2345*0Sstevel@tonic-gate 	ap = anon_get_ptr(amp->ahp, start_idx);
2346*0Sstevel@tonic-gate 	ASSERT(ap == NULL || ap->an_refcnt >= 1);
2347*0Sstevel@tonic-gate 
2348*0Sstevel@tonic-gate 	VM_STAT_COND_ADD(ap == NULL, anonvmstats.privatepages[1]);
2349*0Sstevel@tonic-gate 
2350*0Sstevel@tonic-gate 	/*
2351*0Sstevel@tonic-gate 	 * Now try and allocate the large page. If we fail then just
2352*0Sstevel@tonic-gate 	 * let VOP_GETPAGE give us PAGESIZE pages. Normally we let
2353*0Sstevel@tonic-gate 	 * the caller make this decision but to avoid added complexity
2354*0Sstevel@tonic-gate 	 * it's simplier to handle that case here.
2355*0Sstevel@tonic-gate 	 */
2356*0Sstevel@tonic-gate 	if (anypgsz == -1) {
2357*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[2]);
2358*0Sstevel@tonic-gate 		prealloc = 0;
2359*0Sstevel@tonic-gate 	} else if (page_alloc_pages(seg, addr, &pplist, NULL, szc,
2360*0Sstevel@tonic-gate 	    anypgsz) != 0) {
2361*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[3]);
2362*0Sstevel@tonic-gate 		prealloc = 0;
2363*0Sstevel@tonic-gate 	}
2364*0Sstevel@tonic-gate 
2365*0Sstevel@tonic-gate 	/*
2366*0Sstevel@tonic-gate 	 * make the decrement of all refcnts of all
2367*0Sstevel@tonic-gate 	 * anon slots of a large page appear atomic by
2368*0Sstevel@tonic-gate 	 * getting an anonpages_hash_lock for the
2369*0Sstevel@tonic-gate 	 * first anon slot of a large page.
2370*0Sstevel@tonic-gate 	 */
2371*0Sstevel@tonic-gate 	if (ap != NULL) {
2372*0Sstevel@tonic-gate 		ahmpages = &anonpages_hash_lock[AH_LOCK(ap->an_vp,
2373*0Sstevel@tonic-gate 		    ap->an_off)];
2374*0Sstevel@tonic-gate 		mutex_enter(ahmpages);
2375*0Sstevel@tonic-gate 		if (ap->an_refcnt == 1) {
2376*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.privatepages[4]);
2377*0Sstevel@tonic-gate 			ASSERT(!anon_share(amp->ahp, start_idx, pgcnt));
2378*0Sstevel@tonic-gate 			mutex_exit(ahmpages);
2379*0Sstevel@tonic-gate 
2380*0Sstevel@tonic-gate 			if (prealloc) {
2381*0Sstevel@tonic-gate 				page_free_replacement_page(pplist);
2382*0Sstevel@tonic-gate 				page_create_putback(pgcnt);
2383*0Sstevel@tonic-gate 			}
2384*0Sstevel@tonic-gate 			ASSERT(ppa[0]->p_szc <= szc);
2385*0Sstevel@tonic-gate 			if (ppa[0]->p_szc == szc) {
2386*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.privatepages[5]);
2387*0Sstevel@tonic-gate 				return (0);
2388*0Sstevel@tonic-gate 			}
2389*0Sstevel@tonic-gate 			for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
2390*0Sstevel@tonic-gate 				ASSERT(ppa[pg_idx] != NULL);
2391*0Sstevel@tonic-gate 				page_unlock(ppa[pg_idx]);
2392*0Sstevel@tonic-gate 			}
2393*0Sstevel@tonic-gate 			return (-1);
2394*0Sstevel@tonic-gate 		}
2395*0Sstevel@tonic-gate 	}
2396*0Sstevel@tonic-gate 
2397*0Sstevel@tonic-gate 	/*
2398*0Sstevel@tonic-gate 	 * If we are passed in the vpage array and this is
2399*0Sstevel@tonic-gate 	 * not PROT_WRITE then we need to decrement availrmem
2400*0Sstevel@tonic-gate 	 * up front before we try anything. If we need to and
2401*0Sstevel@tonic-gate 	 * can't decrement availrmem then its better to fail now
2402*0Sstevel@tonic-gate 	 * than in the middle of processing the new large page.
2403*0Sstevel@tonic-gate 	 * page_pp_usclaim() on behalf of each constituent page
2404*0Sstevel@tonic-gate 	 * below will adjust availrmem back for the cases not needed.
2405*0Sstevel@tonic-gate 	 */
2406*0Sstevel@tonic-gate 	if (vpage != NULL && (prot & PROT_WRITE) == 0) {
2407*0Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
2408*0Sstevel@tonic-gate 			if (VPP_ISPPLOCK(&vpage[pg_idx])) {
2409*0Sstevel@tonic-gate 				pagelock = 1;
2410*0Sstevel@tonic-gate 				break;
2411*0Sstevel@tonic-gate 			}
2412*0Sstevel@tonic-gate 		}
2413*0Sstevel@tonic-gate 		if (pagelock) {
2414*0Sstevel@tonic-gate 			VM_STAT_ADD(anonvmstats.privatepages[6]);
2415*0Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
2416*0Sstevel@tonic-gate 			if (availrmem >= pages_pp_maximum + pgcnt) {
2417*0Sstevel@tonic-gate 				availrmem -= pgcnt;
2418*0Sstevel@tonic-gate 				pages_useclaim += pgcnt;
2419*0Sstevel@tonic-gate 			} else {
2420*0Sstevel@tonic-gate 				VM_STAT_ADD(anonvmstats.privatepages[7]);
2421*0Sstevel@tonic-gate 				mutex_exit(&freemem_lock);
2422*0Sstevel@tonic-gate 				if (ahmpages != NULL) {
2423*0Sstevel@tonic-gate 					mutex_exit(ahmpages);
2424*0Sstevel@tonic-gate 				}
2425*0Sstevel@tonic-gate 				if (prealloc) {
2426*0Sstevel@tonic-gate 					page_free_replacement_page(pplist);
2427*0Sstevel@tonic-gate 					page_create_putback(pgcnt);
2428*0Sstevel@tonic-gate 				}
2429*0Sstevel@tonic-gate 				for (pg_idx = 0; pg_idx < pgcnt; pg_idx++)
2430*0Sstevel@tonic-gate 					if (ppa[pg_idx] != NULL)
2431*0Sstevel@tonic-gate 						page_unlock(ppa[pg_idx]);
2432*0Sstevel@tonic-gate 				return (ENOMEM);
2433*0Sstevel@tonic-gate 			}
2434*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
2435*0Sstevel@tonic-gate 		}
2436*0Sstevel@tonic-gate 	}
2437*0Sstevel@tonic-gate 
2438*0Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, cow_fault, pgcnt);
2439*0Sstevel@tonic-gate 
2440*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.privatepages[8]);
2441*0Sstevel@tonic-gate 
2442*0Sstevel@tonic-gate 	an_idx = start_idx;
2443*0Sstevel@tonic-gate 	pg_idx = 0;
2444*0Sstevel@tonic-gate 	vaddr = addr;
2445*0Sstevel@tonic-gate 	for (; pg_idx < pgcnt; pg_idx++, an_idx++, vaddr += PAGESIZE) {
2446*0Sstevel@tonic-gate 		ASSERT(ppa[pg_idx] != NULL);
2447*0Sstevel@tonic-gate 		oldap = anon_get_ptr(amp->ahp, an_idx);
2448*0Sstevel@tonic-gate 		ASSERT(ahmpages != NULL || oldap == NULL);
2449*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap != NULL);
2450*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap->an_refcnt > 1);
2451*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || pg_idx != 0 ||
2452*0Sstevel@tonic-gate 		    (refcnt = oldap->an_refcnt));
2453*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || pg_idx == 0 ||
2454*0Sstevel@tonic-gate 		    refcnt == oldap->an_refcnt);
2455*0Sstevel@tonic-gate 
2456*0Sstevel@tonic-gate 		ap = anon_alloc(NULL, 0);
2457*0Sstevel@tonic-gate 
2458*0Sstevel@tonic-gate 		swap_xlate(ap, &vp, &off);
2459*0Sstevel@tonic-gate 
2460*0Sstevel@tonic-gate 		/*
2461*0Sstevel@tonic-gate 		 * Now setup our preallocated page to pass down to
2462*0Sstevel@tonic-gate 		 * swap_getpage().
2463*0Sstevel@tonic-gate 		 */
2464*0Sstevel@tonic-gate 		if (prealloc) {
2465*0Sstevel@tonic-gate 			pp = pplist;
2466*0Sstevel@tonic-gate 			page_sub(&pplist, pp);
2467*0Sstevel@tonic-gate 			conpp = pp;
2468*0Sstevel@tonic-gate 		}
2469*0Sstevel@tonic-gate 
2470*0Sstevel@tonic-gate 		err = swap_getconpage(vp, (u_offset_t)off, PAGESIZE, NULL, pl,
2471*0Sstevel@tonic-gate 			PAGESIZE, conpp, &nreloc, seg, vaddr, S_CREATE, cred);
2472*0Sstevel@tonic-gate 
2473*0Sstevel@tonic-gate 		/*
2474*0Sstevel@tonic-gate 		 * Impossible to fail this is S_CREATE.
2475*0Sstevel@tonic-gate 		 */
2476*0Sstevel@tonic-gate 		if (err)
2477*0Sstevel@tonic-gate 			panic("anon_map_privatepages: VOP_GETPAGE failed");
2478*0Sstevel@tonic-gate 
2479*0Sstevel@tonic-gate 		ASSERT(prealloc ? pp == pl[0] : pl[0]->p_szc == 0);
2480*0Sstevel@tonic-gate 		ASSERT(prealloc == 0 || nreloc == 1);
2481*0Sstevel@tonic-gate 
2482*0Sstevel@tonic-gate 		pp = pl[0];
2483*0Sstevel@tonic-gate 
2484*0Sstevel@tonic-gate 		/*
2485*0Sstevel@tonic-gate 		 * If the original page was locked, we need to move
2486*0Sstevel@tonic-gate 		 * the lock to the new page by transfering
2487*0Sstevel@tonic-gate 		 * 'cowcnt/lckcnt' of the original page to 'cowcnt/lckcnt'
2488*0Sstevel@tonic-gate 		 * of the new page. pg_idx can be used to index
2489*0Sstevel@tonic-gate 		 * into the vpage array since the caller will guarentee
2490*0Sstevel@tonic-gate 		 * that vpage struct passed in corresponds to addr
2491*0Sstevel@tonic-gate 		 * and forward.
2492*0Sstevel@tonic-gate 		 */
2493*0Sstevel@tonic-gate 		if (vpage != NULL && VPP_ISPPLOCK(&vpage[pg_idx])) {
2494*0Sstevel@tonic-gate 			page_pp_useclaim(ppa[pg_idx], pp, prot & PROT_WRITE);
2495*0Sstevel@tonic-gate 		} else if (pagelock) {
2496*0Sstevel@tonic-gate 			mutex_enter(&freemem_lock);
2497*0Sstevel@tonic-gate 			availrmem++;
2498*0Sstevel@tonic-gate 			pages_useclaim--;
2499*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
2500*0Sstevel@tonic-gate 		}
2501*0Sstevel@tonic-gate 
2502*0Sstevel@tonic-gate 		/*
2503*0Sstevel@tonic-gate 		 * Now copy the contents from the original page.
2504*0Sstevel@tonic-gate 		 */
2505*0Sstevel@tonic-gate 		ppcopy(ppa[pg_idx], pp);
2506*0Sstevel@tonic-gate 
2507*0Sstevel@tonic-gate 		hat_setrefmod(pp);		/* mark as modified */
2508*0Sstevel@tonic-gate 
2509*0Sstevel@tonic-gate 		/*
2510*0Sstevel@tonic-gate 		 * Release the lock on the original page,
2511*0Sstevel@tonic-gate 		 * derement the old slot, and down grade the lock
2512*0Sstevel@tonic-gate 		 * on the new copy.
2513*0Sstevel@tonic-gate 		 */
2514*0Sstevel@tonic-gate 		page_unlock(ppa[pg_idx]);
2515*0Sstevel@tonic-gate 
2516*0Sstevel@tonic-gate 		if (!prealloc)
2517*0Sstevel@tonic-gate 			page_downgrade(pp);
2518*0Sstevel@tonic-gate 
2519*0Sstevel@tonic-gate 		ppa[pg_idx] = pp;
2520*0Sstevel@tonic-gate 
2521*0Sstevel@tonic-gate 		/*
2522*0Sstevel@tonic-gate 		 * Now reflect the copy in the new anon array.
2523*0Sstevel@tonic-gate 		 */
2524*0Sstevel@tonic-gate 		ASSERT(ahmpages == NULL || oldap->an_refcnt > 1);
2525*0Sstevel@tonic-gate 		if (oldap != NULL)
2526*0Sstevel@tonic-gate 			anon_decref(oldap);
2527*0Sstevel@tonic-gate 		(void) anon_set_ptr(amp->ahp, an_idx, ap, ANON_SLEEP);
2528*0Sstevel@tonic-gate 	}
2529*0Sstevel@tonic-gate 	if (ahmpages != NULL) {
2530*0Sstevel@tonic-gate 		mutex_exit(ahmpages);
2531*0Sstevel@tonic-gate 	}
2532*0Sstevel@tonic-gate 	ASSERT(prealloc == 0 || pplist == NULL);
2533*0Sstevel@tonic-gate 	if (prealloc) {
2534*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.privatepages[9]);
2535*0Sstevel@tonic-gate 		for (pg_idx = 0; pg_idx < pgcnt; pg_idx++) {
2536*0Sstevel@tonic-gate 			page_downgrade(ppa[pg_idx]);
2537*0Sstevel@tonic-gate 		}
2538*0Sstevel@tonic-gate 	}
2539*0Sstevel@tonic-gate 
2540*0Sstevel@tonic-gate 	/*
2541*0Sstevel@tonic-gate 	 * Unload the old large page translation.
2542*0Sstevel@tonic-gate 	 */
2543*0Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, pgcnt << PAGESHIFT, HAT_UNLOAD);
2544*0Sstevel@tonic-gate 	return (0);
2545*0Sstevel@tonic-gate }
2546*0Sstevel@tonic-gate 
2547*0Sstevel@tonic-gate /*
2548*0Sstevel@tonic-gate  * Allocate a private zero-filled anon page.
2549*0Sstevel@tonic-gate  */
2550*0Sstevel@tonic-gate page_t *
2551*0Sstevel@tonic-gate anon_zero(struct seg *seg, caddr_t addr, struct anon **app, struct cred *cred)
2552*0Sstevel@tonic-gate {
2553*0Sstevel@tonic-gate 	struct anon *ap;
2554*0Sstevel@tonic-gate 	page_t *pp;
2555*0Sstevel@tonic-gate 	struct vnode *vp;
2556*0Sstevel@tonic-gate 	anoff_t off;
2557*0Sstevel@tonic-gate 	page_t *anon_pl[1 + 1];
2558*0Sstevel@tonic-gate 	int err;
2559*0Sstevel@tonic-gate 
2560*0Sstevel@tonic-gate 	/* Kernel probe */
2561*0Sstevel@tonic-gate 	TNF_PROBE_1(anon_zero, "vm pagefault", /* CSTYLED */,
2562*0Sstevel@tonic-gate 		tnf_opaque,	address,	addr);
2563*0Sstevel@tonic-gate 
2564*0Sstevel@tonic-gate 	*app = ap = anon_alloc(NULL, 0);
2565*0Sstevel@tonic-gate 	swap_xlate(ap, &vp, &off);
2566*0Sstevel@tonic-gate 
2567*0Sstevel@tonic-gate 	/*
2568*0Sstevel@tonic-gate 	 * Call the VOP_GETPAGE routine to create the page, thereby
2569*0Sstevel@tonic-gate 	 * enabling the vnode driver to allocate any filesystem
2570*0Sstevel@tonic-gate 	 * dependent structures (e.g., disk block allocation for UFS).
2571*0Sstevel@tonic-gate 	 * This also prevents more than on page from being added to
2572*0Sstevel@tonic-gate 	 * the vnode at the same time since it is locked.
2573*0Sstevel@tonic-gate 	 */
2574*0Sstevel@tonic-gate 	err = VOP_GETPAGE(vp, off, PAGESIZE, NULL,
2575*0Sstevel@tonic-gate 	    anon_pl, PAGESIZE, seg, addr, S_CREATE, cred);
2576*0Sstevel@tonic-gate 	if (err) {
2577*0Sstevel@tonic-gate 		*app = NULL;
2578*0Sstevel@tonic-gate 		anon_decref(ap);
2579*0Sstevel@tonic-gate 		return (NULL);
2580*0Sstevel@tonic-gate 	}
2581*0Sstevel@tonic-gate 	pp = anon_pl[0];
2582*0Sstevel@tonic-gate 
2583*0Sstevel@tonic-gate 	pagezero(pp, 0, PAGESIZE);	/* XXX - should set mod bit */
2584*0Sstevel@tonic-gate 	page_downgrade(pp);
2585*0Sstevel@tonic-gate 	CPU_STATS_ADD_K(vm, zfod, 1);
2586*0Sstevel@tonic-gate 	hat_setrefmod(pp);	/* mark as modified so pageout writes back */
2587*0Sstevel@tonic-gate 	return (pp);
2588*0Sstevel@tonic-gate }
2589*0Sstevel@tonic-gate 
2590*0Sstevel@tonic-gate 
2591*0Sstevel@tonic-gate /*
2592*0Sstevel@tonic-gate  * Allocate array of private zero-filled anon pages for empty slots
2593*0Sstevel@tonic-gate  * and kept pages for non empty slots within given range.
2594*0Sstevel@tonic-gate  *
2595*0Sstevel@tonic-gate  * NOTE: This rontine will try and use large pages
2596*0Sstevel@tonic-gate  *	if available and supported by underlying platform.
2597*0Sstevel@tonic-gate  */
2598*0Sstevel@tonic-gate int
2599*0Sstevel@tonic-gate anon_map_createpages(
2600*0Sstevel@tonic-gate 	struct anon_map *amp,
2601*0Sstevel@tonic-gate 	ulong_t start_index,
2602*0Sstevel@tonic-gate 	size_t len,
2603*0Sstevel@tonic-gate 	page_t *ppa[],
2604*0Sstevel@tonic-gate 	struct seg *seg,
2605*0Sstevel@tonic-gate 	caddr_t addr,
2606*0Sstevel@tonic-gate 	enum seg_rw rw,
2607*0Sstevel@tonic-gate 	struct cred *cred)
2608*0Sstevel@tonic-gate {
2609*0Sstevel@tonic-gate 
2610*0Sstevel@tonic-gate 	struct anon	*ap;
2611*0Sstevel@tonic-gate 	struct vnode	*ap_vp;
2612*0Sstevel@tonic-gate 	page_t		*pp, *pplist, *anon_pl[1 + 1], *conpp = NULL;
2613*0Sstevel@tonic-gate 	int		err = 0;
2614*0Sstevel@tonic-gate 	ulong_t		p_index, index;
2615*0Sstevel@tonic-gate 	pgcnt_t		npgs, pg_cnt;
2616*0Sstevel@tonic-gate 	spgcnt_t	nreloc = 0;
2617*0Sstevel@tonic-gate 	uint_t		l_szc, szc, prot;
2618*0Sstevel@tonic-gate 	anoff_t		ap_off;
2619*0Sstevel@tonic-gate 	size_t		pgsz;
2620*0Sstevel@tonic-gate 	lgrp_t		*lgrp;
2621*0Sstevel@tonic-gate 
2622*0Sstevel@tonic-gate 	/*
2623*0Sstevel@tonic-gate 	 * XXX For now only handle S_CREATE.
2624*0Sstevel@tonic-gate 	 */
2625*0Sstevel@tonic-gate 	ASSERT(rw == S_CREATE);
2626*0Sstevel@tonic-gate 
2627*0Sstevel@tonic-gate 	index	= start_index;
2628*0Sstevel@tonic-gate 	p_index	= 0;
2629*0Sstevel@tonic-gate 	npgs = btopr(len);
2630*0Sstevel@tonic-gate 
2631*0Sstevel@tonic-gate 	/*
2632*0Sstevel@tonic-gate 	 * If this platform supports multiple page sizes
2633*0Sstevel@tonic-gate 	 * then try and allocate directly from the free
2634*0Sstevel@tonic-gate 	 * list for pages larger than PAGESIZE.
2635*0Sstevel@tonic-gate 	 *
2636*0Sstevel@tonic-gate 	 * NOTE:When we have page_create_ru we can stop
2637*0Sstevel@tonic-gate 	 *	directly allocating from the freelist.
2638*0Sstevel@tonic-gate 	 */
2639*0Sstevel@tonic-gate 	l_szc  = seg->s_szc;
2640*0Sstevel@tonic-gate 	ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2641*0Sstevel@tonic-gate 	while (npgs) {
2642*0Sstevel@tonic-gate 
2643*0Sstevel@tonic-gate 		/*
2644*0Sstevel@tonic-gate 		 * if anon slot already exists
2645*0Sstevel@tonic-gate 		 *   (means page has been created)
2646*0Sstevel@tonic-gate 		 * so 1) look up the page
2647*0Sstevel@tonic-gate 		 *    2) if the page is still in memory, get it.
2648*0Sstevel@tonic-gate 		 *    3) if not, create a page and
2649*0Sstevel@tonic-gate 		 *	  page in from physical swap device.
2650*0Sstevel@tonic-gate 		 * These are done in anon_getpage().
2651*0Sstevel@tonic-gate 		 */
2652*0Sstevel@tonic-gate 		ap = anon_get_ptr(amp->ahp, index);
2653*0Sstevel@tonic-gate 		if (ap) {
2654*0Sstevel@tonic-gate 			err = anon_getpage(&ap, &prot, anon_pl, PAGESIZE,
2655*0Sstevel@tonic-gate 			    seg, addr, S_READ, cred);
2656*0Sstevel@tonic-gate 			if (err) {
2657*0Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
2658*0Sstevel@tonic-gate 				panic("anon_map_createpages: anon_getpage");
2659*0Sstevel@tonic-gate 			}
2660*0Sstevel@tonic-gate 			pp = anon_pl[0];
2661*0Sstevel@tonic-gate 			ppa[p_index++] = pp;
2662*0Sstevel@tonic-gate 
2663*0Sstevel@tonic-gate 			addr += PAGESIZE;
2664*0Sstevel@tonic-gate 			index++;
2665*0Sstevel@tonic-gate 			npgs--;
2666*0Sstevel@tonic-gate 			continue;
2667*0Sstevel@tonic-gate 		}
2668*0Sstevel@tonic-gate 		/*
2669*0Sstevel@tonic-gate 		 * Now try and allocate the largest page possible
2670*0Sstevel@tonic-gate 		 * for the current address and range.
2671*0Sstevel@tonic-gate 		 * Keep dropping down in page size until:
2672*0Sstevel@tonic-gate 		 *
2673*0Sstevel@tonic-gate 		 *	1) Properly aligned
2674*0Sstevel@tonic-gate 		 *	2) Does not overlap existing anon pages
2675*0Sstevel@tonic-gate 		 *	3) Fits in remaining range.
2676*0Sstevel@tonic-gate 		 *	4) able to allocate one.
2677*0Sstevel@tonic-gate 		 *
2678*0Sstevel@tonic-gate 		 * NOTE: XXX When page_create_ru is completed this code
2679*0Sstevel@tonic-gate 		 *	 will change.
2680*0Sstevel@tonic-gate 		 */
2681*0Sstevel@tonic-gate 		szc    = l_szc;
2682*0Sstevel@tonic-gate 		pplist = NULL;
2683*0Sstevel@tonic-gate 		pg_cnt = 0;
2684*0Sstevel@tonic-gate 		while (szc) {
2685*0Sstevel@tonic-gate 			pgsz	= page_get_pagesize(szc);
2686*0Sstevel@tonic-gate 			pg_cnt	= pgsz >> PAGESHIFT;
2687*0Sstevel@tonic-gate 			if (IS_P2ALIGNED(addr, pgsz) && pg_cnt <= npgs &&
2688*0Sstevel@tonic-gate 				anon_pages(amp->ahp, index, pg_cnt) == 0) {
2689*0Sstevel@tonic-gate 				/*
2690*0Sstevel@tonic-gate 				 * XXX
2691*0Sstevel@tonic-gate 				 * Since we are faking page_create()
2692*0Sstevel@tonic-gate 				 * we also need to do the freemem and
2693*0Sstevel@tonic-gate 				 * pcf accounting.
2694*0Sstevel@tonic-gate 				 */
2695*0Sstevel@tonic-gate 				(void) page_create_wait(pg_cnt, PG_WAIT);
2696*0Sstevel@tonic-gate 
2697*0Sstevel@tonic-gate 				/*
2698*0Sstevel@tonic-gate 				 * Get lgroup to allocate next page of shared
2699*0Sstevel@tonic-gate 				 * memory from and use it to specify where to
2700*0Sstevel@tonic-gate 				 * allocate the physical memory
2701*0Sstevel@tonic-gate 				 */
2702*0Sstevel@tonic-gate 				lgrp = lgrp_mem_choose(seg, addr, pgsz);
2703*0Sstevel@tonic-gate 
2704*0Sstevel@tonic-gate 				pplist = page_get_freelist(
2705*0Sstevel@tonic-gate 				    (struct vnode *)NULL, (u_offset_t)0, seg,
2706*0Sstevel@tonic-gate 				    addr, pgsz, 0, lgrp);
2707*0Sstevel@tonic-gate 
2708*0Sstevel@tonic-gate 				if (pplist == NULL) {
2709*0Sstevel@tonic-gate 					page_create_putback(pg_cnt);
2710*0Sstevel@tonic-gate 				}
2711*0Sstevel@tonic-gate 
2712*0Sstevel@tonic-gate 				/*
2713*0Sstevel@tonic-gate 				 * If a request for a page of size
2714*0Sstevel@tonic-gate 				 * larger than PAGESIZE failed
2715*0Sstevel@tonic-gate 				 * then don't try that size anymore.
2716*0Sstevel@tonic-gate 				 */
2717*0Sstevel@tonic-gate 				if (pplist == NULL) {
2718*0Sstevel@tonic-gate 					l_szc = szc - 1;
2719*0Sstevel@tonic-gate 				} else {
2720*0Sstevel@tonic-gate 					break;
2721*0Sstevel@tonic-gate 				}
2722*0Sstevel@tonic-gate 			}
2723*0Sstevel@tonic-gate 			szc--;
2724*0Sstevel@tonic-gate 		}
2725*0Sstevel@tonic-gate 
2726*0Sstevel@tonic-gate 		/*
2727*0Sstevel@tonic-gate 		 * If just using PAGESIZE pages then don't
2728*0Sstevel@tonic-gate 		 * directly allocate from the free list.
2729*0Sstevel@tonic-gate 		 */
2730*0Sstevel@tonic-gate 		if (pplist == NULL) {
2731*0Sstevel@tonic-gate 			ASSERT(szc == 0);
2732*0Sstevel@tonic-gate 			pp = anon_zero(seg, addr, &ap, cred);
2733*0Sstevel@tonic-gate 			if (pp == NULL) {
2734*0Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
2735*0Sstevel@tonic-gate 				panic("anon_map_createpages: anon_zero");
2736*0Sstevel@tonic-gate 			}
2737*0Sstevel@tonic-gate 			ppa[p_index++] = pp;
2738*0Sstevel@tonic-gate 
2739*0Sstevel@tonic-gate 			ASSERT(anon_get_ptr(amp->ahp, index) == NULL);
2740*0Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, index, ap, ANON_SLEEP);
2741*0Sstevel@tonic-gate 
2742*0Sstevel@tonic-gate 			addr += PAGESIZE;
2743*0Sstevel@tonic-gate 			index++;
2744*0Sstevel@tonic-gate 			npgs--;
2745*0Sstevel@tonic-gate 			continue;
2746*0Sstevel@tonic-gate 		}
2747*0Sstevel@tonic-gate 
2748*0Sstevel@tonic-gate 		/*
2749*0Sstevel@tonic-gate 		 * pplist is a list of pg_cnt PAGESIZE pages.
2750*0Sstevel@tonic-gate 		 * These pages are locked SE_EXCL since they
2751*0Sstevel@tonic-gate 		 * came directly off the free list.
2752*0Sstevel@tonic-gate 		 */
2753*0Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(pg_cnt, pg_cnt));
2754*0Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(index, pg_cnt));
2755*0Sstevel@tonic-gate 		ASSERT(conpp == NULL);
2756*0Sstevel@tonic-gate 		while (pg_cnt--) {
2757*0Sstevel@tonic-gate 
2758*0Sstevel@tonic-gate 			ap = anon_alloc(NULL, 0);
2759*0Sstevel@tonic-gate 			swap_xlate(ap, &ap_vp, &ap_off);
2760*0Sstevel@tonic-gate 
2761*0Sstevel@tonic-gate 			ASSERT(pplist != NULL);
2762*0Sstevel@tonic-gate 			pp = pplist;
2763*0Sstevel@tonic-gate 			page_sub(&pplist, pp);
2764*0Sstevel@tonic-gate 			PP_CLRFREE(pp);
2765*0Sstevel@tonic-gate 			PP_CLRAGED(pp);
2766*0Sstevel@tonic-gate 			conpp = pp;
2767*0Sstevel@tonic-gate 
2768*0Sstevel@tonic-gate 			err = swap_getconpage(ap_vp, ap_off, PAGESIZE,
2769*0Sstevel@tonic-gate 			    (uint_t *)NULL, anon_pl, PAGESIZE, conpp, &nreloc,
2770*0Sstevel@tonic-gate 			    seg, addr, S_CREATE, cred);
2771*0Sstevel@tonic-gate 
2772*0Sstevel@tonic-gate 			if (err) {
2773*0Sstevel@tonic-gate 				ANON_LOCK_EXIT(&amp->a_rwlock);
2774*0Sstevel@tonic-gate 				panic("anon_map_createpages: S_CREATE");
2775*0Sstevel@tonic-gate 			}
2776*0Sstevel@tonic-gate 
2777*0Sstevel@tonic-gate 			ASSERT(anon_pl[0] == pp);
2778*0Sstevel@tonic-gate 			ASSERT(nreloc == 1);
2779*0Sstevel@tonic-gate 			pagezero(pp, 0, PAGESIZE);
2780*0Sstevel@tonic-gate 			CPU_STATS_ADD_K(vm, zfod, 1);
2781*0Sstevel@tonic-gate 			hat_setrefmod(pp);
2782*0Sstevel@tonic-gate 
2783*0Sstevel@tonic-gate 			ASSERT(anon_get_ptr(amp->ahp, index) == NULL);
2784*0Sstevel@tonic-gate 			(void) anon_set_ptr(amp->ahp, index, ap, ANON_SLEEP);
2785*0Sstevel@tonic-gate 
2786*0Sstevel@tonic-gate 			ppa[p_index++] = pp;
2787*0Sstevel@tonic-gate 
2788*0Sstevel@tonic-gate 			addr += PAGESIZE;
2789*0Sstevel@tonic-gate 			index++;
2790*0Sstevel@tonic-gate 			npgs--;
2791*0Sstevel@tonic-gate 		}
2792*0Sstevel@tonic-gate 		conpp = NULL;
2793*0Sstevel@tonic-gate 		pg_cnt	= pgsz >> PAGESHIFT;
2794*0Sstevel@tonic-gate 		p_index = p_index - pg_cnt;
2795*0Sstevel@tonic-gate 		while (pg_cnt--) {
2796*0Sstevel@tonic-gate 			page_downgrade(ppa[p_index++]);
2797*0Sstevel@tonic-gate 		}
2798*0Sstevel@tonic-gate 	}
2799*0Sstevel@tonic-gate 	ANON_LOCK_EXIT(&amp->a_rwlock);
2800*0Sstevel@tonic-gate 	return (0);
2801*0Sstevel@tonic-gate }
2802*0Sstevel@tonic-gate 
2803*0Sstevel@tonic-gate int
2804*0Sstevel@tonic-gate anon_map_demotepages(
2805*0Sstevel@tonic-gate 	struct anon_map *amp,
2806*0Sstevel@tonic-gate 	ulong_t	start_idx,
2807*0Sstevel@tonic-gate 	struct seg *seg,
2808*0Sstevel@tonic-gate 	caddr_t addr,
2809*0Sstevel@tonic-gate 	uint_t prot,
2810*0Sstevel@tonic-gate 	struct vpage vpage[],
2811*0Sstevel@tonic-gate 	struct cred *cred)
2812*0Sstevel@tonic-gate {
2813*0Sstevel@tonic-gate 	struct anon	*ap;
2814*0Sstevel@tonic-gate 	uint_t		szc = seg->s_szc;
2815*0Sstevel@tonic-gate 	pgcnt_t		pgcnt = page_get_pagecnt(szc);
2816*0Sstevel@tonic-gate 	size_t		ppasize = pgcnt * sizeof (page_t *);
2817*0Sstevel@tonic-gate 	page_t		**ppa = kmem_alloc(ppasize, KM_SLEEP);
2818*0Sstevel@tonic-gate 	page_t		*pp;
2819*0Sstevel@tonic-gate 	page_t		*pl[2];
2820*0Sstevel@tonic-gate 	pgcnt_t		i, pg_idx;
2821*0Sstevel@tonic-gate 	ulong_t		an_idx;
2822*0Sstevel@tonic-gate 	caddr_t		vaddr;
2823*0Sstevel@tonic-gate 	kmutex_t	*ahmpages = NULL;
2824*0Sstevel@tonic-gate 	int 		err;
2825*0Sstevel@tonic-gate 	int		retry = 0;
2826*0Sstevel@tonic-gate 	uint_t		vpprot;
2827*0Sstevel@tonic-gate 
2828*0Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&amp->a_rwlock));
2829*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
2830*0Sstevel@tonic-gate 	ASSERT(IS_P2ALIGNED(start_idx, pgcnt));
2831*0Sstevel@tonic-gate 	ASSERT(ppa != NULL);
2832*0Sstevel@tonic-gate 
2833*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[0]);
2834*0Sstevel@tonic-gate 
2835*0Sstevel@tonic-gate 	ap = anon_get_ptr(amp->ahp, start_idx);
2836*0Sstevel@tonic-gate 	if (ap != NULL) {
2837*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[1]);
2838*0Sstevel@tonic-gate 		ahmpages = &anonpages_hash_lock[AH_LOCK(ap->an_vp, ap->an_off)];
2839*0Sstevel@tonic-gate 		mutex_enter(ahmpages);
2840*0Sstevel@tonic-gate 	}
2841*0Sstevel@tonic-gate top:
2842*0Sstevel@tonic-gate 	if (ap == NULL || ap->an_refcnt <= 1) {
2843*0Sstevel@tonic-gate 		int root = 0;
2844*0Sstevel@tonic-gate 		pgcnt_t npgs, curnpgs = 0;
2845*0Sstevel@tonic-gate 
2846*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[2]);
2847*0Sstevel@tonic-gate 
2848*0Sstevel@tonic-gate 		ASSERT(retry == 0 || ap != NULL);
2849*0Sstevel@tonic-gate 
2850*0Sstevel@tonic-gate 		if (ahmpages != NULL)
2851*0Sstevel@tonic-gate 			mutex_exit(ahmpages);
2852*0Sstevel@tonic-gate 		an_idx = start_idx;
2853*0Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++, an_idx++) {
2854*0Sstevel@tonic-gate 			ap = anon_get_ptr(amp->ahp, an_idx);
2855*0Sstevel@tonic-gate 			if (ap != NULL) {
2856*0Sstevel@tonic-gate 				ASSERT(ap->an_refcnt == 1);
2857*0Sstevel@tonic-gate 				pp = ppa[i] = page_lookup(ap->an_vp, ap->an_off,
2858*0Sstevel@tonic-gate 				    SE_EXCL);
2859*0Sstevel@tonic-gate 				if (pp != NULL) {
2860*0Sstevel@tonic-gate 					(void) hat_pageunload(pp,
2861*0Sstevel@tonic-gate 					    HAT_FORCE_PGUNLOAD);
2862*0Sstevel@tonic-gate 				}
2863*0Sstevel@tonic-gate 			} else {
2864*0Sstevel@tonic-gate 				ppa[i] = NULL;
2865*0Sstevel@tonic-gate 			}
2866*0Sstevel@tonic-gate 		}
2867*0Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++) {
2868*0Sstevel@tonic-gate 			if ((pp = ppa[i]) != NULL && pp->p_szc != 0) {
2869*0Sstevel@tonic-gate 				ASSERT(pp->p_szc <= szc);
2870*0Sstevel@tonic-gate 				if (!root) {
2871*0Sstevel@tonic-gate 					VM_STAT_ADD(anonvmstats.demotepages[3]);
2872*0Sstevel@tonic-gate 					if (curnpgs != 0)
2873*0Sstevel@tonic-gate 						panic("anon_map_demotepages: "
2874*0Sstevel@tonic-gate 						    "bad large page");
2875*0Sstevel@tonic-gate 
2876*0Sstevel@tonic-gate 					root = 1;
2877*0Sstevel@tonic-gate 					curnpgs = npgs =
2878*0Sstevel@tonic-gate 					    page_get_pagecnt(pp->p_szc);
2879*0Sstevel@tonic-gate 
2880*0Sstevel@tonic-gate 					ASSERT(npgs <= pgcnt);
2881*0Sstevel@tonic-gate 					ASSERT(IS_P2ALIGNED(npgs, npgs));
2882*0Sstevel@tonic-gate 					ASSERT(!(page_pptonum(pp) &
2883*0Sstevel@tonic-gate 					    (npgs - 1)));
2884*0Sstevel@tonic-gate 				} else {
2885*0Sstevel@tonic-gate 					ASSERT(i > 0);
2886*0Sstevel@tonic-gate 					ASSERT(page_pptonum(pp) - 1 ==
2887*0Sstevel@tonic-gate 					    page_pptonum(ppa[i - 1]));
2888*0Sstevel@tonic-gate 					if ((page_pptonum(pp) & (npgs - 1)) ==
2889*0Sstevel@tonic-gate 					    npgs - 1)
2890*0Sstevel@tonic-gate 						root = 0;
2891*0Sstevel@tonic-gate 				}
2892*0Sstevel@tonic-gate 				ASSERT(PAGE_EXCL(pp));
2893*0Sstevel@tonic-gate 				pp->p_szc = 0;
2894*0Sstevel@tonic-gate 				curnpgs--;
2895*0Sstevel@tonic-gate 			}
2896*0Sstevel@tonic-gate 		}
2897*0Sstevel@tonic-gate 		if (root != 0 || curnpgs != 0)
2898*0Sstevel@tonic-gate 			panic("anon_map_demotepages: bad large page");
2899*0Sstevel@tonic-gate 
2900*0Sstevel@tonic-gate 		for (i = 0; i < pgcnt; i++) {
2901*0Sstevel@tonic-gate 			if ((pp = ppa[i]) != NULL) {
2902*0Sstevel@tonic-gate 				ASSERT(!hat_page_is_mapped(pp));
2903*0Sstevel@tonic-gate 				ASSERT(pp->p_szc == 0);
2904*0Sstevel@tonic-gate 				page_unlock(pp);
2905*0Sstevel@tonic-gate 			}
2906*0Sstevel@tonic-gate 		}
2907*0Sstevel@tonic-gate 		kmem_free(ppa, ppasize);
2908*0Sstevel@tonic-gate 		return (0);
2909*0Sstevel@tonic-gate 	}
2910*0Sstevel@tonic-gate 	ASSERT(ahmpages != NULL);
2911*0Sstevel@tonic-gate 	mutex_exit(ahmpages);
2912*0Sstevel@tonic-gate 	ahmpages = NULL;
2913*0Sstevel@tonic-gate 
2914*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[4]);
2915*0Sstevel@tonic-gate 
2916*0Sstevel@tonic-gate 	ASSERT(retry == 0); /* we can be here only once */
2917*0Sstevel@tonic-gate 
2918*0Sstevel@tonic-gate 	vaddr = addr;
2919*0Sstevel@tonic-gate 	for (pg_idx = 0, an_idx = start_idx; pg_idx < pgcnt;
2920*0Sstevel@tonic-gate 	    pg_idx++, an_idx++, vaddr += PAGESIZE) {
2921*0Sstevel@tonic-gate 		ap = anon_get_ptr(amp->ahp, an_idx);
2922*0Sstevel@tonic-gate 		if (ap == NULL)
2923*0Sstevel@tonic-gate 			panic("anon_map_demotepages: no anon slot");
2924*0Sstevel@tonic-gate 		err = anon_getpage(&ap, &vpprot, pl, PAGESIZE, seg, vaddr,
2925*0Sstevel@tonic-gate 		    S_READ, cred);
2926*0Sstevel@tonic-gate 		if (err) {
2927*0Sstevel@tonic-gate 			for (i = 0; i < pg_idx; i++) {
2928*0Sstevel@tonic-gate 				if ((pp = ppa[i]) != NULL)
2929*0Sstevel@tonic-gate 					page_unlock(pp);
2930*0Sstevel@tonic-gate 			}
2931*0Sstevel@tonic-gate 			kmem_free(ppa, ppasize);
2932*0Sstevel@tonic-gate 			return (err);
2933*0Sstevel@tonic-gate 		}
2934*0Sstevel@tonic-gate 		ppa[pg_idx] = pl[0];
2935*0Sstevel@tonic-gate 	}
2936*0Sstevel@tonic-gate 
2937*0Sstevel@tonic-gate 	err = anon_map_privatepages(amp, start_idx, szc, seg, addr, prot, ppa,
2938*0Sstevel@tonic-gate 	    vpage, -1, cred);
2939*0Sstevel@tonic-gate 	if (err > 0) {
2940*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[5]);
2941*0Sstevel@tonic-gate 		kmem_free(ppa, ppasize);
2942*0Sstevel@tonic-gate 		return (err);
2943*0Sstevel@tonic-gate 	}
2944*0Sstevel@tonic-gate 	ASSERT(err == 0 || err == -1);
2945*0Sstevel@tonic-gate 	if (err == -1) {
2946*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[6]);
2947*0Sstevel@tonic-gate 		retry = 1;
2948*0Sstevel@tonic-gate 		goto top;
2949*0Sstevel@tonic-gate 	}
2950*0Sstevel@tonic-gate 	for (i = 0; i < pgcnt; i++) {
2951*0Sstevel@tonic-gate 		ASSERT(ppa[i] != NULL);
2952*0Sstevel@tonic-gate 		if (ppa[i]->p_szc != 0)
2953*0Sstevel@tonic-gate 			retry = 1;
2954*0Sstevel@tonic-gate 		page_unlock(ppa[i]);
2955*0Sstevel@tonic-gate 	}
2956*0Sstevel@tonic-gate 	if (retry) {
2957*0Sstevel@tonic-gate 		VM_STAT_ADD(anonvmstats.demotepages[7]);
2958*0Sstevel@tonic-gate 		goto top;
2959*0Sstevel@tonic-gate 	}
2960*0Sstevel@tonic-gate 
2961*0Sstevel@tonic-gate 	VM_STAT_ADD(anonvmstats.demotepages[8]);
2962*0Sstevel@tonic-gate 
2963*0Sstevel@tonic-gate 	kmem_free(ppa, ppasize);
2964*0Sstevel@tonic-gate 
2965*0Sstevel@tonic-gate 	return (0);
2966*0Sstevel@tonic-gate }
2967*0Sstevel@tonic-gate 
2968*0Sstevel@tonic-gate /*
2969*0Sstevel@tonic-gate  * Allocate and initialize an anon_map structure for seg
2970*0Sstevel@tonic-gate  * associating the given swap reservation with the new anon_map.
2971*0Sstevel@tonic-gate  */
2972*0Sstevel@tonic-gate struct anon_map *
2973*0Sstevel@tonic-gate anonmap_alloc(size_t size, size_t swresv)
2974*0Sstevel@tonic-gate {
2975*0Sstevel@tonic-gate 	struct anon_map *amp;
2976*0Sstevel@tonic-gate 
2977*0Sstevel@tonic-gate 	amp = kmem_cache_alloc(anonmap_cache, KM_SLEEP);
2978*0Sstevel@tonic-gate 
2979*0Sstevel@tonic-gate 	amp->refcnt = 1;
2980*0Sstevel@tonic-gate 	amp->size = size;
2981*0Sstevel@tonic-gate 
2982*0Sstevel@tonic-gate 	amp->ahp = anon_create(btopr(size), ANON_SLEEP);
2983*0Sstevel@tonic-gate 	amp->swresv = swresv;
2984*0Sstevel@tonic-gate 	amp->locality = 0;
2985*0Sstevel@tonic-gate 	amp->a_szc = 0;
2986*0Sstevel@tonic-gate 	return (amp);
2987*0Sstevel@tonic-gate }
2988*0Sstevel@tonic-gate 
2989*0Sstevel@tonic-gate void
2990*0Sstevel@tonic-gate anonmap_free(struct anon_map *amp)
2991*0Sstevel@tonic-gate {
2992*0Sstevel@tonic-gate 	ASSERT(amp->ahp);
2993*0Sstevel@tonic-gate 	ASSERT(amp->refcnt == 0);
2994*0Sstevel@tonic-gate 
2995*0Sstevel@tonic-gate 	lgrp_shm_policy_fini(amp, NULL);
2996*0Sstevel@tonic-gate 	anon_release(amp->ahp, btopr(amp->size));
2997*0Sstevel@tonic-gate 	kmem_cache_free(anonmap_cache, amp);
2998*0Sstevel@tonic-gate }
2999*0Sstevel@tonic-gate 
3000*0Sstevel@tonic-gate /*
3001*0Sstevel@tonic-gate  * Returns true if the app array has some empty slots.
3002*0Sstevel@tonic-gate  * The offp and lenp paramters are in/out paramters.  On entry
3003*0Sstevel@tonic-gate  * these values represent the starting offset and length of the
3004*0Sstevel@tonic-gate  * mapping.  When true is returned, these values may be modified
3005*0Sstevel@tonic-gate  * to be the largest range which includes empty slots.
3006*0Sstevel@tonic-gate  */
3007*0Sstevel@tonic-gate int
3008*0Sstevel@tonic-gate non_anon(struct anon_hdr *ahp, ulong_t anon_idx, u_offset_t *offp,
3009*0Sstevel@tonic-gate 				size_t *lenp)
3010*0Sstevel@tonic-gate {
3011*0Sstevel@tonic-gate 	ulong_t i, el;
3012*0Sstevel@tonic-gate 	ssize_t low, high;
3013*0Sstevel@tonic-gate 	struct anon *ap;
3014*0Sstevel@tonic-gate 
3015*0Sstevel@tonic-gate 	low = -1;
3016*0Sstevel@tonic-gate 	for (i = 0, el = *lenp; i < el; i += PAGESIZE, anon_idx++) {
3017*0Sstevel@tonic-gate 		ap = anon_get_ptr(ahp, anon_idx);
3018*0Sstevel@tonic-gate 		if (ap == NULL) {
3019*0Sstevel@tonic-gate 			if (low == -1)
3020*0Sstevel@tonic-gate 				low = i;
3021*0Sstevel@tonic-gate 			high = i;
3022*0Sstevel@tonic-gate 		}
3023*0Sstevel@tonic-gate 	}
3024*0Sstevel@tonic-gate 	if (low != -1) {
3025*0Sstevel@tonic-gate 		/*
3026*0Sstevel@tonic-gate 		 * Found at least one non-anon page.
3027*0Sstevel@tonic-gate 		 * Set up the off and len return values.
3028*0Sstevel@tonic-gate 		 */
3029*0Sstevel@tonic-gate 		if (low != 0)
3030*0Sstevel@tonic-gate 			*offp += low;
3031*0Sstevel@tonic-gate 		*lenp = high - low + PAGESIZE;
3032*0Sstevel@tonic-gate 		return (1);
3033*0Sstevel@tonic-gate 	}
3034*0Sstevel@tonic-gate 	return (0);
3035*0Sstevel@tonic-gate }
3036*0Sstevel@tonic-gate 
3037*0Sstevel@tonic-gate /*
3038*0Sstevel@tonic-gate  * Return a count of the number of existing anon pages in the anon array
3039*0Sstevel@tonic-gate  * app in the range (off, off+len). The array and slots must be guaranteed
3040*0Sstevel@tonic-gate  * stable by the caller.
3041*0Sstevel@tonic-gate  */
3042*0Sstevel@tonic-gate pgcnt_t
3043*0Sstevel@tonic-gate anon_pages(struct anon_hdr *ahp, ulong_t anon_index, pgcnt_t nslots)
3044*0Sstevel@tonic-gate {
3045*0Sstevel@tonic-gate 	pgcnt_t cnt = 0;
3046*0Sstevel@tonic-gate 
3047*0Sstevel@tonic-gate 	while (nslots-- > 0) {
3048*0Sstevel@tonic-gate 		if ((anon_get_ptr(ahp, anon_index)) != NULL)
3049*0Sstevel@tonic-gate 			cnt++;
3050*0Sstevel@tonic-gate 		anon_index++;
3051*0Sstevel@tonic-gate 	}
3052*0Sstevel@tonic-gate 	return (cnt);
3053*0Sstevel@tonic-gate }
3054*0Sstevel@tonic-gate 
3055*0Sstevel@tonic-gate /*
3056*0Sstevel@tonic-gate  * Move reserved phys swap into memory swap (unreserve phys swap
3057*0Sstevel@tonic-gate  * and reserve mem swap by the same amount).
3058*0Sstevel@tonic-gate  * Used by segspt when it needs to lock resrved swap npages in memory
3059*0Sstevel@tonic-gate  */
3060*0Sstevel@tonic-gate int
3061*0Sstevel@tonic-gate anon_swap_adjust(pgcnt_t npages)
3062*0Sstevel@tonic-gate {
3063*0Sstevel@tonic-gate 	pgcnt_t unlocked_mem_swap;
3064*0Sstevel@tonic-gate 
3065*0Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
3066*0Sstevel@tonic-gate 
3067*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
3068*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
3069*0Sstevel@tonic-gate 
3070*0Sstevel@tonic-gate 	unlocked_mem_swap = k_anoninfo.ani_mem_resv
3071*0Sstevel@tonic-gate 					- k_anoninfo.ani_locked_swap;
3072*0Sstevel@tonic-gate 	if (npages > unlocked_mem_swap) {
3073*0Sstevel@tonic-gate 		spgcnt_t adjusted_swap = npages - unlocked_mem_swap;
3074*0Sstevel@tonic-gate 
3075*0Sstevel@tonic-gate 		/*
3076*0Sstevel@tonic-gate 		 * if there is not enough unlocked mem swap we take missing
3077*0Sstevel@tonic-gate 		 * amount from phys swap and give it to mem swap
3078*0Sstevel@tonic-gate 		 */
3079*0Sstevel@tonic-gate 		mutex_enter(&freemem_lock);
3080*0Sstevel@tonic-gate 		if (availrmem < adjusted_swap + segspt_minfree) {
3081*0Sstevel@tonic-gate 			mutex_exit(&freemem_lock);
3082*0Sstevel@tonic-gate 			mutex_exit(&anoninfo_lock);
3083*0Sstevel@tonic-gate 			return (ENOMEM);
3084*0Sstevel@tonic-gate 		}
3085*0Sstevel@tonic-gate 		availrmem -= adjusted_swap;
3086*0Sstevel@tonic-gate 		mutex_exit(&freemem_lock);
3087*0Sstevel@tonic-gate 
3088*0Sstevel@tonic-gate 		k_anoninfo.ani_mem_resv += adjusted_swap;
3089*0Sstevel@tonic-gate 		ASSERT(k_anoninfo.ani_phys_resv >= adjusted_swap);
3090*0Sstevel@tonic-gate 		k_anoninfo.ani_phys_resv -= adjusted_swap;
3091*0Sstevel@tonic-gate 
3092*0Sstevel@tonic-gate 		ANI_ADD(adjusted_swap);
3093*0Sstevel@tonic-gate 	}
3094*0Sstevel@tonic-gate 	k_anoninfo.ani_locked_swap += npages;
3095*0Sstevel@tonic-gate 
3096*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
3097*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
3098*0Sstevel@tonic-gate 
3099*0Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
3100*0Sstevel@tonic-gate 
3101*0Sstevel@tonic-gate 	return (0);
3102*0Sstevel@tonic-gate }
3103*0Sstevel@tonic-gate 
3104*0Sstevel@tonic-gate /*
3105*0Sstevel@tonic-gate  * 'unlocked' reserved mem swap so when it is unreserved it
3106*0Sstevel@tonic-gate  * can be moved back phys (disk) swap
3107*0Sstevel@tonic-gate  */
3108*0Sstevel@tonic-gate void
3109*0Sstevel@tonic-gate anon_swap_restore(pgcnt_t npages)
3110*0Sstevel@tonic-gate {
3111*0Sstevel@tonic-gate 	mutex_enter(&anoninfo_lock);
3112*0Sstevel@tonic-gate 
3113*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap <= k_anoninfo.ani_mem_resv);
3114*0Sstevel@tonic-gate 
3115*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap >= npages);
3116*0Sstevel@tonic-gate 	k_anoninfo.ani_locked_swap -= npages;
3117*0Sstevel@tonic-gate 
3118*0Sstevel@tonic-gate 	ASSERT(k_anoninfo.ani_locked_swap <= k_anoninfo.ani_mem_resv);
3119*0Sstevel@tonic-gate 
3120*0Sstevel@tonic-gate 	mutex_exit(&anoninfo_lock);
3121*0Sstevel@tonic-gate }
3122*0Sstevel@tonic-gate 
3123*0Sstevel@tonic-gate /*
3124*0Sstevel@tonic-gate  * Return the pointer from the list for a
3125*0Sstevel@tonic-gate  * specified anon index.
3126*0Sstevel@tonic-gate  */
3127*0Sstevel@tonic-gate ulong_t *
3128*0Sstevel@tonic-gate anon_get_slot(struct anon_hdr *ahp, ulong_t an_idx)
3129*0Sstevel@tonic-gate {
3130*0Sstevel@tonic-gate 	struct anon	**app;
3131*0Sstevel@tonic-gate 	void 		**ppp;
3132*0Sstevel@tonic-gate 
3133*0Sstevel@tonic-gate 	ASSERT(an_idx < ahp->size);
3134*0Sstevel@tonic-gate 
3135*0Sstevel@tonic-gate 	/*
3136*0Sstevel@tonic-gate 	 * Single level case.
3137*0Sstevel@tonic-gate 	 */
3138*0Sstevel@tonic-gate 	if ((ahp->size <= ANON_CHUNK_SIZE) || (ahp->flags & ANON_ALLOC_FORCE)) {
3139*0Sstevel@tonic-gate 		return ((ulong_t *)&ahp->array_chunk[an_idx]);
3140*0Sstevel@tonic-gate 	} else {
3141*0Sstevel@tonic-gate 
3142*0Sstevel@tonic-gate 		/*
3143*0Sstevel@tonic-gate 		 * 2 level case.
3144*0Sstevel@tonic-gate 		 */
3145*0Sstevel@tonic-gate 		ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
3146*0Sstevel@tonic-gate 		if (*ppp == NULL) {
3147*0Sstevel@tonic-gate 			mutex_enter(&ahp->serial_lock);
3148*0Sstevel@tonic-gate 			ppp = &ahp->array_chunk[an_idx >> ANON_CHUNK_SHIFT];
3149*0Sstevel@tonic-gate 			if (*ppp == NULL)
3150*0Sstevel@tonic-gate 				*ppp = kmem_zalloc(PAGESIZE, KM_SLEEP);
3151*0Sstevel@tonic-gate 			mutex_exit(&ahp->serial_lock);
3152*0Sstevel@tonic-gate 		}
3153*0Sstevel@tonic-gate 		app = *ppp;
3154*0Sstevel@tonic-gate 		return ((ulong_t *)&app[an_idx & ANON_CHUNK_OFF]);
3155*0Sstevel@tonic-gate 	}
3156*0Sstevel@tonic-gate }
3157*0Sstevel@tonic-gate 
3158*0Sstevel@tonic-gate void
3159*0Sstevel@tonic-gate anon_array_enter(struct anon_map *amp, ulong_t an_idx, anon_sync_obj_t *sobj)
3160*0Sstevel@tonic-gate {
3161*0Sstevel@tonic-gate 	ulong_t		*ap_slot;
3162*0Sstevel@tonic-gate 	kmutex_t	*mtx;
3163*0Sstevel@tonic-gate 	kcondvar_t	*cv;
3164*0Sstevel@tonic-gate 	int		hash;
3165*0Sstevel@tonic-gate 
3166*0Sstevel@tonic-gate 	/*
3167*0Sstevel@tonic-gate 	 * Use szc to determine anon slot(s) to appear atomic.
3168*0Sstevel@tonic-gate 	 * If szc = 0, then lock the anon slot and mark it busy.
3169*0Sstevel@tonic-gate 	 * If szc > 0, then lock the range of slots by getting the
3170*0Sstevel@tonic-gate 	 * anon_array_lock for the first anon slot, and mark only the
3171*0Sstevel@tonic-gate 	 * first anon slot busy to represent whole range being busy.
3172*0Sstevel@tonic-gate 	 */
3173*0Sstevel@tonic-gate 
3174*0Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&amp->a_rwlock));
3175*0Sstevel@tonic-gate 	an_idx = P2ALIGN(an_idx, page_get_pagecnt(amp->a_szc));
3176*0Sstevel@tonic-gate 	hash = ANON_ARRAY_HASH(amp, an_idx);
3177*0Sstevel@tonic-gate 	sobj->sync_mutex = mtx = &anon_array_lock[hash].pad_mutex;
3178*0Sstevel@tonic-gate 	sobj->sync_cv = cv = &anon_array_cv[hash];
3179*0Sstevel@tonic-gate 	mutex_enter(mtx);
3180*0Sstevel@tonic-gate 	ap_slot = anon_get_slot(amp->ahp, an_idx);
3181*0Sstevel@tonic-gate 	while (ANON_ISBUSY(ap_slot))
3182*0Sstevel@tonic-gate 		cv_wait(cv, mtx);
3183*0Sstevel@tonic-gate 	ANON_SETBUSY(ap_slot);
3184*0Sstevel@tonic-gate 	sobj->sync_data = ap_slot;
3185*0Sstevel@tonic-gate 	mutex_exit(mtx);
3186*0Sstevel@tonic-gate }
3187*0Sstevel@tonic-gate 
3188*0Sstevel@tonic-gate void
3189*0Sstevel@tonic-gate anon_array_exit(anon_sync_obj_t *sobj)
3190*0Sstevel@tonic-gate {
3191*0Sstevel@tonic-gate 	mutex_enter(sobj->sync_mutex);
3192*0Sstevel@tonic-gate 	ASSERT(ANON_ISBUSY(sobj->sync_data));
3193*0Sstevel@tonic-gate 	ANON_CLRBUSY(sobj->sync_data);
3194*0Sstevel@tonic-gate 	if (CV_HAS_WAITERS(sobj->sync_cv))
3195*0Sstevel@tonic-gate 		cv_broadcast(sobj->sync_cv);
3196*0Sstevel@tonic-gate 	mutex_exit(sobj->sync_mutex);
3197*0Sstevel@tonic-gate }
3198