xref: /onnv-gate/usr/src/uts/common/vm/page.h (revision 0)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28*0Sstevel@tonic-gate /*	  All Rights Reserved  	*/
29*0Sstevel@tonic-gate 
30*0Sstevel@tonic-gate /*
31*0Sstevel@tonic-gate  * University Copyright- Copyright (c) 1982, 1986, 1988
32*0Sstevel@tonic-gate  * The Regents of the University of California
33*0Sstevel@tonic-gate  * All Rights Reserved
34*0Sstevel@tonic-gate  *
35*0Sstevel@tonic-gate  * University Acknowledgment- Portions of this document are derived from
36*0Sstevel@tonic-gate  * software developed by the University of California, Berkeley, and its
37*0Sstevel@tonic-gate  * contributors.
38*0Sstevel@tonic-gate  */
39*0Sstevel@tonic-gate 
40*0Sstevel@tonic-gate #ifndef	_VM_PAGE_H
41*0Sstevel@tonic-gate #define	_VM_PAGE_H
42*0Sstevel@tonic-gate 
43*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
44*0Sstevel@tonic-gate 
45*0Sstevel@tonic-gate #include <vm/seg.h>
46*0Sstevel@tonic-gate 
47*0Sstevel@tonic-gate #ifdef	__cplusplus
48*0Sstevel@tonic-gate extern "C" {
49*0Sstevel@tonic-gate #endif
50*0Sstevel@tonic-gate 
51*0Sstevel@tonic-gate #if defined(_KERNEL) || defined(_KMEMUSER)
52*0Sstevel@tonic-gate 
53*0Sstevel@tonic-gate /*
54*0Sstevel@tonic-gate  * Shared/Exclusive lock.
55*0Sstevel@tonic-gate  */
56*0Sstevel@tonic-gate 
57*0Sstevel@tonic-gate /*
58*0Sstevel@tonic-gate  * Types of page locking supported by page_lock & friends.
59*0Sstevel@tonic-gate  */
60*0Sstevel@tonic-gate typedef enum {
61*0Sstevel@tonic-gate 	SE_SHARED,
62*0Sstevel@tonic-gate 	SE_EXCL			/* exclusive lock (value == -1) */
63*0Sstevel@tonic-gate } se_t;
64*0Sstevel@tonic-gate 
65*0Sstevel@tonic-gate /*
66*0Sstevel@tonic-gate  * For requesting that page_lock reclaim the page from the free list.
67*0Sstevel@tonic-gate  */
68*0Sstevel@tonic-gate typedef enum {
69*0Sstevel@tonic-gate 	P_RECLAIM,		/* reclaim page from free list */
70*0Sstevel@tonic-gate 	P_NO_RECLAIM		/* DON`T reclaim the page	*/
71*0Sstevel@tonic-gate } reclaim_t;
72*0Sstevel@tonic-gate 
73*0Sstevel@tonic-gate /*
74*0Sstevel@tonic-gate  * Callers of page_try_reclaim_lock and page_lock_es can use this flag
75*0Sstevel@tonic-gate  * to get SE_EXCL access before reader/writers are given access.
76*0Sstevel@tonic-gate  */
77*0Sstevel@tonic-gate #define	SE_EXCL_WANTED	0x02
78*0Sstevel@tonic-gate 
79*0Sstevel@tonic-gate #endif	/* _KERNEL | _KMEMUSER */
80*0Sstevel@tonic-gate 
81*0Sstevel@tonic-gate typedef int	selock_t;
82*0Sstevel@tonic-gate 
83*0Sstevel@tonic-gate /*
84*0Sstevel@tonic-gate  * Define VM_STATS to turn on all sorts of statistic gathering about
85*0Sstevel@tonic-gate  * the VM layer.  By default, it is only turned on when DEBUG is
86*0Sstevel@tonic-gate  * also defined.
87*0Sstevel@tonic-gate  */
88*0Sstevel@tonic-gate #ifdef DEBUG
89*0Sstevel@tonic-gate #define	VM_STATS
90*0Sstevel@tonic-gate #endif	/* DEBUG */
91*0Sstevel@tonic-gate 
92*0Sstevel@tonic-gate #ifdef VM_STATS
93*0Sstevel@tonic-gate #define	VM_STAT_ADD(stat)			(stat)++
94*0Sstevel@tonic-gate #define	VM_STAT_COND_ADD(cond, stat)		((void) (!(cond) || (stat)++))
95*0Sstevel@tonic-gate #else
96*0Sstevel@tonic-gate #define	VM_STAT_ADD(stat)
97*0Sstevel@tonic-gate #define	VM_STAT_COND_ADD(cond, stat)
98*0Sstevel@tonic-gate #endif	/* VM_STATS */
99*0Sstevel@tonic-gate 
100*0Sstevel@tonic-gate #ifdef _KERNEL
101*0Sstevel@tonic-gate 
102*0Sstevel@tonic-gate /*
103*0Sstevel@tonic-gate  * Macros to acquire and release the page logical lock.
104*0Sstevel@tonic-gate  */
105*0Sstevel@tonic-gate #define	page_struct_lock(pp)	mutex_enter(&page_llock)
106*0Sstevel@tonic-gate #define	page_struct_unlock(pp)	mutex_exit(&page_llock)
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate #endif	/* _KERNEL */
109*0Sstevel@tonic-gate 
110*0Sstevel@tonic-gate #include <sys/t_lock.h>
111*0Sstevel@tonic-gate 
112*0Sstevel@tonic-gate struct as;
113*0Sstevel@tonic-gate 
114*0Sstevel@tonic-gate /*
115*0Sstevel@tonic-gate  * Each physical page has a page structure, which is used to maintain
116*0Sstevel@tonic-gate  * these pages as a cache.  A page can be found via a hashed lookup
117*0Sstevel@tonic-gate  * based on the [vp, offset].  If a page has an [vp, offset] identity,
118*0Sstevel@tonic-gate  * then it is entered on a doubly linked circular list off the
119*0Sstevel@tonic-gate  * vnode using the vpnext/vpprev pointers.   If the p_free bit
120*0Sstevel@tonic-gate  * is on, then the page is also on a doubly linked circular free
121*0Sstevel@tonic-gate  * list using next/prev pointers.  If the "p_selock" and "p_iolock"
122*0Sstevel@tonic-gate  * are held, then the page is currently being read in (exclusive p_selock)
123*0Sstevel@tonic-gate  * or written back (shared p_selock).  In this case, the next/prev pointers
124*0Sstevel@tonic-gate  * are used to link the pages together for a consecutive i/o request.  If
125*0Sstevel@tonic-gate  * the page is being brought in from its backing store, then other processes
126*0Sstevel@tonic-gate  * will wait for the i/o to complete before attaching to the page since it
127*0Sstevel@tonic-gate  * will have an "exclusive" lock.
128*0Sstevel@tonic-gate  *
129*0Sstevel@tonic-gate  * Each page structure has the locks described below along with
130*0Sstevel@tonic-gate  * the fields they protect:
131*0Sstevel@tonic-gate  *
132*0Sstevel@tonic-gate  *	p_selock	This is a per-page shared/exclusive lock that is
133*0Sstevel@tonic-gate  *			used to implement the logical shared/exclusive
134*0Sstevel@tonic-gate  *			lock for each page.  The "shared" lock is normally
135*0Sstevel@tonic-gate  *			used in most cases while the "exclusive" lock is
136*0Sstevel@tonic-gate  *			required to destroy or retain exclusive access to
137*0Sstevel@tonic-gate  *			a page (e.g., while reading in pages).  The appropriate
138*0Sstevel@tonic-gate  *			lock is always held whenever there is any reference
139*0Sstevel@tonic-gate  *			to a page structure (e.g., during i/o).
140*0Sstevel@tonic-gate  *			(Note that with the addition of the "writer-lock-wanted"
141*0Sstevel@tonic-gate  *			semantics (via SE_EWANTED), threads must not acquire
142*0Sstevel@tonic-gate  *			multiple reader locks or else a deadly embrace will
143*0Sstevel@tonic-gate  *			occur in the following situation: thread 1 obtains a
144*0Sstevel@tonic-gate  *			reader lock; next thread 2 fails to get a writer lock
145*0Sstevel@tonic-gate  *			but specified SE_EWANTED so it will wait by either
146*0Sstevel@tonic-gate  *			blocking (when using page_lock_es) or spinning while
147*0Sstevel@tonic-gate  *			retrying (when using page_try_reclaim_lock) until the
148*0Sstevel@tonic-gate  *			reader lock is released; then thread 1 attempts to
149*0Sstevel@tonic-gate  *			get another reader lock but is denied due to
150*0Sstevel@tonic-gate  *			SE_EWANTED being set, and now both threads are in a
151*0Sstevel@tonic-gate  *			deadly embrace.)
152*0Sstevel@tonic-gate  *
153*0Sstevel@tonic-gate  *				p_hash
154*0Sstevel@tonic-gate  *				p_vnode
155*0Sstevel@tonic-gate  *				p_offset
156*0Sstevel@tonic-gate  *
157*0Sstevel@tonic-gate  *				p_free
158*0Sstevel@tonic-gate  *				p_age
159*0Sstevel@tonic-gate  *
160*0Sstevel@tonic-gate  *	p_iolock	This is a binary semaphore lock that provides
161*0Sstevel@tonic-gate  *			exclusive access to the i/o list links in each
162*0Sstevel@tonic-gate  *			page structure.  It is always held while the page
163*0Sstevel@tonic-gate  *			is on an i/o list (i.e., involved in i/o).  That is,
164*0Sstevel@tonic-gate  *			even though a page may be only `shared' locked
165*0Sstevel@tonic-gate  *			while it is doing a write, the following fields may
166*0Sstevel@tonic-gate  *			change anyway.  Normally, the page must be
167*0Sstevel@tonic-gate  *			`exclusively' locked to change anything in it.
168*0Sstevel@tonic-gate  *
169*0Sstevel@tonic-gate  *				p_next
170*0Sstevel@tonic-gate  *				p_prev
171*0Sstevel@tonic-gate  *
172*0Sstevel@tonic-gate  * The following fields are protected by the global page_llock:
173*0Sstevel@tonic-gate  *
174*0Sstevel@tonic-gate  *				p_lckcnt
175*0Sstevel@tonic-gate  *				p_cowcnt
176*0Sstevel@tonic-gate  *
177*0Sstevel@tonic-gate  * The following lists are protected by the global page_freelock:
178*0Sstevel@tonic-gate  *
179*0Sstevel@tonic-gate  *				page_cachelist
180*0Sstevel@tonic-gate  *				page_freelist
181*0Sstevel@tonic-gate  *
182*0Sstevel@tonic-gate  * The following, for our purposes, are protected by
183*0Sstevel@tonic-gate  * the global freemem_lock:
184*0Sstevel@tonic-gate  *
185*0Sstevel@tonic-gate  *				freemem
186*0Sstevel@tonic-gate  *				freemem_wait
187*0Sstevel@tonic-gate  *				freemem_cv
188*0Sstevel@tonic-gate  *
189*0Sstevel@tonic-gate  * The following fields are protected by hat layer lock(s).  When a page
190*0Sstevel@tonic-gate  * structure is not mapped and is not associated with a vnode (after a call
191*0Sstevel@tonic-gate  * to page_hashout() for example) the p_nrm field may be modified with out
192*0Sstevel@tonic-gate  * holding the hat layer lock:
193*0Sstevel@tonic-gate  *
194*0Sstevel@tonic-gate  *				p_nrm
195*0Sstevel@tonic-gate  *				p_mapping
196*0Sstevel@tonic-gate  *				p_share
197*0Sstevel@tonic-gate  *
198*0Sstevel@tonic-gate  * The following field is file system dependent.  How it is used and
199*0Sstevel@tonic-gate  * the locking strategies applied are up to the individual file system
200*0Sstevel@tonic-gate  * implementation.
201*0Sstevel@tonic-gate  *
202*0Sstevel@tonic-gate  *				p_fsdata
203*0Sstevel@tonic-gate  *
204*0Sstevel@tonic-gate  * The page structure is used to represent and control the system's
205*0Sstevel@tonic-gate  * physical pages.  There is one instance of the structure for each
206*0Sstevel@tonic-gate  * page that is not permenately allocated.  For example, the pages that
207*0Sstevel@tonic-gate  * hold the page structures are permanently held by the kernel
208*0Sstevel@tonic-gate  * and hence do not need page structures to track them.  The array
209*0Sstevel@tonic-gate  * of page structures is allocated early on in the kernel's life and
210*0Sstevel@tonic-gate  * is based on the amount of available physical memory.
211*0Sstevel@tonic-gate  *
212*0Sstevel@tonic-gate  * Each page structure may simultaneously appear on several linked lists.
213*0Sstevel@tonic-gate  * The lists are:  hash list, free or in i/o list, and a vnode's page list.
214*0Sstevel@tonic-gate  * Each type of list is protected by a different group of mutexes as described
215*0Sstevel@tonic-gate  * below:
216*0Sstevel@tonic-gate  *
217*0Sstevel@tonic-gate  * The hash list is used to quickly find a page when the page's vnode and
218*0Sstevel@tonic-gate  * offset within the vnode are known.  Each page that is hashed is
219*0Sstevel@tonic-gate  * connected via the `p_hash' field.  The anchor for each hash is in the
220*0Sstevel@tonic-gate  * array `page_hash'.  An array of mutexes, `ph_mutex', protects the
221*0Sstevel@tonic-gate  * lists anchored by page_hash[].  To either search or modify a given hash
222*0Sstevel@tonic-gate  * list, the appropriate mutex in the ph_mutex array must be held.
223*0Sstevel@tonic-gate  *
224*0Sstevel@tonic-gate  * The free list contains pages that are `free to be given away'.  For
225*0Sstevel@tonic-gate  * efficiency reasons, pages on this list are placed in two catagories:
226*0Sstevel@tonic-gate  * pages that are still associated with a vnode, and pages that are not
227*0Sstevel@tonic-gate  * associated with a vnode.  Free pages always have their `p_free' bit set,
228*0Sstevel@tonic-gate  * free pages that are still associated with a vnode also have their
229*0Sstevel@tonic-gate  * `p_age' bit set.  Pages on the free list are connected via their
230*0Sstevel@tonic-gate  * `p_next' and `p_prev' fields.  When a page is involved in some sort
231*0Sstevel@tonic-gate  * of i/o, it is not free and these fields may be used to link associated
232*0Sstevel@tonic-gate  * pages together.  At the moment, the free list is protected by a
233*0Sstevel@tonic-gate  * single mutex `page_freelock'.  The list of free pages still associated
234*0Sstevel@tonic-gate  * with a vnode is anchored by `page_cachelist' while other free pages
235*0Sstevel@tonic-gate  * are anchored in architecture dependent ways (to handle page coloring etc.).
236*0Sstevel@tonic-gate  *
237*0Sstevel@tonic-gate  * Pages associated with a given vnode appear on a list anchored in the
238*0Sstevel@tonic-gate  * vnode by the `v_pages' field.  They are linked together with
239*0Sstevel@tonic-gate  * `p_vpnext' and `p_vpprev'.  The field `p_offset' contains a page's
240*0Sstevel@tonic-gate  * offset within the vnode.  The pages on this list are not kept in
241*0Sstevel@tonic-gate  * offset order.  These lists, in a manner similar to the hash lists,
242*0Sstevel@tonic-gate  * are protected by an array of mutexes called `vph_hash'.  Before
243*0Sstevel@tonic-gate  * searching or modifying this chain the appropriate mutex in the
244*0Sstevel@tonic-gate  * vph_hash[] array must be held.
245*0Sstevel@tonic-gate  *
246*0Sstevel@tonic-gate  * Again, each of the lists that a page can appear on is protected by a
247*0Sstevel@tonic-gate  * mutex.  Before reading or writing any of the fields comprising the
248*0Sstevel@tonic-gate  * list, the appropriate lock must be held.  These list locks should only
249*0Sstevel@tonic-gate  * be held for very short intervals.
250*0Sstevel@tonic-gate  *
251*0Sstevel@tonic-gate  * In addition to the list locks, each page structure contains a
252*0Sstevel@tonic-gate  * shared/exclusive lock that protects various fields within it.
253*0Sstevel@tonic-gate  * To modify one of these fields, the `p_selock' must be exclusively held.
254*0Sstevel@tonic-gate  * To read a field with a degree of certainty, the lock must be at least
255*0Sstevel@tonic-gate  * held shared.
256*0Sstevel@tonic-gate  *
257*0Sstevel@tonic-gate  * Removing a page structure from one of the lists requires holding
258*0Sstevel@tonic-gate  * the appropriate list lock and the page's p_selock.  A page may be
259*0Sstevel@tonic-gate  * prevented from changing identity, being freed, or otherwise modified
260*0Sstevel@tonic-gate  * by acquiring p_selock shared.
261*0Sstevel@tonic-gate  *
262*0Sstevel@tonic-gate  * To avoid deadlocks, a strict locking protocol must be followed.  Basically
263*0Sstevel@tonic-gate  * there are two cases:  In the first case, the page structure in question
264*0Sstevel@tonic-gate  * is known ahead of time (e.g., when the page is to be added or removed
265*0Sstevel@tonic-gate  * from a list).  In the second case, the page structure is not known and
266*0Sstevel@tonic-gate  * must be found by searching one of the lists.
267*0Sstevel@tonic-gate  *
268*0Sstevel@tonic-gate  * When adding or removing a known page to one of the lists, first the
269*0Sstevel@tonic-gate  * page must be exclusively locked (since at least one of its fields
270*0Sstevel@tonic-gate  * will be modified), second the lock protecting the list must be acquired,
271*0Sstevel@tonic-gate  * third the page inserted or deleted, and finally the list lock dropped.
272*0Sstevel@tonic-gate  *
273*0Sstevel@tonic-gate  * The more interesting case occures when the particular page structure
274*0Sstevel@tonic-gate  * is not known ahead of time.  For example, when a call is made to
275*0Sstevel@tonic-gate  * page_lookup(), it is not known if a page with the desired (vnode and
276*0Sstevel@tonic-gate  * offset pair) identity exists.  So the appropriate mutex in ph_mutex is
277*0Sstevel@tonic-gate  * acquired, the hash list searched, and if the desired page is found
278*0Sstevel@tonic-gate  * an attempt is made to lock it.  The attempt to acquire p_selock must
279*0Sstevel@tonic-gate  * not block while the hash list lock is held.  A deadlock could occure
280*0Sstevel@tonic-gate  * if some other process was trying to remove the page from the list.
281*0Sstevel@tonic-gate  * The removing process (following the above protocol) would have exclusively
282*0Sstevel@tonic-gate  * locked the page, and be spinning waiting to acquire the lock protecting
283*0Sstevel@tonic-gate  * the hash list.  Since the searching process holds the hash list lock
284*0Sstevel@tonic-gate  * and is waiting to acquire the page lock, a deadlock occurs.
285*0Sstevel@tonic-gate  *
286*0Sstevel@tonic-gate  * The proper scheme to follow is: first, lock the appropriate list,
287*0Sstevel@tonic-gate  * search the list, and if the desired page is found either use
288*0Sstevel@tonic-gate  * page_trylock() (which will not block) or pass the address of the
289*0Sstevel@tonic-gate  * list lock to page_lock().  If page_lock() can not acquire the page's
290*0Sstevel@tonic-gate  * lock, it will drop the list lock before going to sleep.  page_lock()
291*0Sstevel@tonic-gate  * returns a value to indicate if the list lock was dropped allowing the
292*0Sstevel@tonic-gate  * calling program to react appropriately (i.e., retry the operation).
293*0Sstevel@tonic-gate  *
294*0Sstevel@tonic-gate  * If the list lock was dropped before the attempt at locking the page
295*0Sstevel@tonic-gate  * was made, checks would have to be made to ensure that the page had
296*0Sstevel@tonic-gate  * not changed identity before its lock was obtained.  This is because
297*0Sstevel@tonic-gate  * the interval between dropping the list lock and acquiring the page
298*0Sstevel@tonic-gate  * lock is indeterminate.
299*0Sstevel@tonic-gate  *
300*0Sstevel@tonic-gate  * In addition, when both a hash list lock (ph_mutex[]) and a vnode list
301*0Sstevel@tonic-gate  * lock (vph_mutex[]) are needed, the hash list lock must be acquired first.
302*0Sstevel@tonic-gate  * The routine page_hashin() is a good example of this sequence.
303*0Sstevel@tonic-gate  * This sequence is ASSERTed by checking that the vph_mutex[] is not held
304*0Sstevel@tonic-gate  * just before each acquisition of one of the mutexs in ph_mutex[].
305*0Sstevel@tonic-gate  *
306*0Sstevel@tonic-gate  * So, as a quick summary:
307*0Sstevel@tonic-gate  *
308*0Sstevel@tonic-gate  * 	pse_mutex[]'s protect the p_selock and p_cv fields.
309*0Sstevel@tonic-gate  *
310*0Sstevel@tonic-gate  * 	p_selock protects the p_free, p_age, p_vnode, p_offset and p_hash,
311*0Sstevel@tonic-gate  *
312*0Sstevel@tonic-gate  * 	ph_mutex[]'s protect the page_hash[] array and its chains.
313*0Sstevel@tonic-gate  *
314*0Sstevel@tonic-gate  * 	vph_mutex[]'s protect the v_pages field and the vp page chains.
315*0Sstevel@tonic-gate  *
316*0Sstevel@tonic-gate  *	First lock the page, then the hash chain, then the vnode chain.  When
317*0Sstevel@tonic-gate  *	this is not possible `trylocks' must be used.  Sleeping while holding
318*0Sstevel@tonic-gate  *	any of these mutexes (p_selock is not a mutex) is not allowed.
319*0Sstevel@tonic-gate  *
320*0Sstevel@tonic-gate  *
321*0Sstevel@tonic-gate  *	field		reading		writing		    ordering
322*0Sstevel@tonic-gate  *	======================================================================
323*0Sstevel@tonic-gate  *	p_vnode		p_selock(E,S)	p_selock(E)
324*0Sstevel@tonic-gate  *	p_offset
325*0Sstevel@tonic-gate  *	p_free
326*0Sstevel@tonic-gate  *	p_age
327*0Sstevel@tonic-gate  *	=====================================================================
328*0Sstevel@tonic-gate  *	p_hash		p_selock(E,S)	p_selock(E) &&	    p_selock, ph_mutex
329*0Sstevel@tonic-gate  *					ph_mutex[]
330*0Sstevel@tonic-gate  *	=====================================================================
331*0Sstevel@tonic-gate  *	p_vpnext	p_selock(E,S)	p_selock(E) &&	    p_selock, vph_mutex
332*0Sstevel@tonic-gate  *	p_vpprev			vph_mutex[]
333*0Sstevel@tonic-gate  *	=====================================================================
334*0Sstevel@tonic-gate  *	When the p_free bit is set:
335*0Sstevel@tonic-gate  *
336*0Sstevel@tonic-gate  *	p_next		p_selock(E,S)	p_selock(E) &&	    p_selock,
337*0Sstevel@tonic-gate  *	p_prev				page_freelock	    page_freelock
338*0Sstevel@tonic-gate  *
339*0Sstevel@tonic-gate  *	When the p_free bit is not set:
340*0Sstevel@tonic-gate  *
341*0Sstevel@tonic-gate  *	p_next		p_selock(E,S)	p_selock(E) &&	    p_selock, p_iolock
342*0Sstevel@tonic-gate  *	p_prev				p_iolock
343*0Sstevel@tonic-gate  *	=====================================================================
344*0Sstevel@tonic-gate  *	p_selock	pse_mutex[]	pse_mutex[]	    can`t acquire any
345*0Sstevel@tonic-gate  *	p_cv						    other mutexes or
346*0Sstevel@tonic-gate  *							    sleep while holding
347*0Sstevel@tonic-gate  *							    this lock.
348*0Sstevel@tonic-gate  *	=====================================================================
349*0Sstevel@tonic-gate  *	p_lckcnt	p_selock(E,S)	p_selock(E) &&
350*0Sstevel@tonic-gate  *	p_cowcnt			page_llock
351*0Sstevel@tonic-gate  *	=====================================================================
352*0Sstevel@tonic-gate  *	p_nrm		hat layer lock	hat layer lock
353*0Sstevel@tonic-gate  *	p_mapping
354*0Sstevel@tonic-gate  *	p_pagenum
355*0Sstevel@tonic-gate  *	=====================================================================
356*0Sstevel@tonic-gate  *
357*0Sstevel@tonic-gate  *	where:
358*0Sstevel@tonic-gate  *		E----> exclusive version of p_selock.
359*0Sstevel@tonic-gate  *		S----> shared version of p_selock.
360*0Sstevel@tonic-gate  *
361*0Sstevel@tonic-gate  *
362*0Sstevel@tonic-gate  *	Global data structures and variable:
363*0Sstevel@tonic-gate  *
364*0Sstevel@tonic-gate  *	field		reading		writing		    ordering
365*0Sstevel@tonic-gate  *	=====================================================================
366*0Sstevel@tonic-gate  *	page_hash[]	ph_mutex[]	ph_mutex[]	    can hold this lock
367*0Sstevel@tonic-gate  *							    before acquiring
368*0Sstevel@tonic-gate  *							    a vph_mutex or
369*0Sstevel@tonic-gate  *							    pse_mutex.
370*0Sstevel@tonic-gate  *	=====================================================================
371*0Sstevel@tonic-gate  *	vp->v_pages	vph_mutex[]	vph_mutex[]	    can only acquire
372*0Sstevel@tonic-gate  *							    a pse_mutex while
373*0Sstevel@tonic-gate  *							    holding this lock.
374*0Sstevel@tonic-gate  *	=====================================================================
375*0Sstevel@tonic-gate  *	page_cachelist	page_freelock	page_freelock	    can't acquire any
376*0Sstevel@tonic-gate  *	page_freelist	page_freelock	page_freelock
377*0Sstevel@tonic-gate  *	=====================================================================
378*0Sstevel@tonic-gate  *	freemem		freemem_lock	freemem_lock	    can't acquire any
379*0Sstevel@tonic-gate  *	freemem_wait					    other mutexes while
380*0Sstevel@tonic-gate  *	freemem_cv					    holding this mutex.
381*0Sstevel@tonic-gate  *	=====================================================================
382*0Sstevel@tonic-gate  *
383*0Sstevel@tonic-gate  * Page relocation, PG_NORELOC and P_NORELOC.
384*0Sstevel@tonic-gate  *
385*0Sstevel@tonic-gate  * Pages may be relocated using the page_relocate() interface. Relocation
386*0Sstevel@tonic-gate  * involves moving the contents and identity of a page to another, free page.
387*0Sstevel@tonic-gate  * To relocate a page, the SE_EXCL lock must be obtained. The way to prevent
388*0Sstevel@tonic-gate  * a page from being relocated is to hold the SE_SHARED lock (the SE_EXCL
389*0Sstevel@tonic-gate  * lock must not be held indefinitely). If the page is going to be held
390*0Sstevel@tonic-gate  * SE_SHARED indefinitely, then the PG_NORELOC hint should be passed
391*0Sstevel@tonic-gate  * to page_create_va so that pages that are prevented from being relocated
392*0Sstevel@tonic-gate  * can be managed differently by the platform specific layer.
393*0Sstevel@tonic-gate  *
394*0Sstevel@tonic-gate  * Pages locked in memory using page_pp_lock (p_lckcnt/p_cowcnt != 0)
395*0Sstevel@tonic-gate  * are guaranteed to be held in memory, but can still be relocated
396*0Sstevel@tonic-gate  * providing the SE_EXCL lock can be obtained.
397*0Sstevel@tonic-gate  *
398*0Sstevel@tonic-gate  * The P_NORELOC bit in the page_t.p_state field is provided for use by
399*0Sstevel@tonic-gate  * the platform specific code in managing pages when the PG_NORELOC
400*0Sstevel@tonic-gate  * hint is used.
401*0Sstevel@tonic-gate  *
402*0Sstevel@tonic-gate  * Memory delete and page locking.
403*0Sstevel@tonic-gate  *
404*0Sstevel@tonic-gate  * The set of all usable pages is managed using the global page list as
405*0Sstevel@tonic-gate  * implemented by the memseg structure defined below. When memory is added
406*0Sstevel@tonic-gate  * or deleted this list changes. Additions to this list guarantee that the
407*0Sstevel@tonic-gate  * list is never corrupt.  In order to avoid the necessity of an additional
408*0Sstevel@tonic-gate  * lock to protect against failed accesses to the memseg being deleted and,
409*0Sstevel@tonic-gate  * more importantly, the page_ts, the memseg structure is never freed and the
410*0Sstevel@tonic-gate  * page_t virtual address space is remapped to a page (or pages) of
411*0Sstevel@tonic-gate  * zeros.  If a page_t is manipulated while it is p_selock'd, or if it is
412*0Sstevel@tonic-gate  * locked indirectly via a hash or freelist lock, it is not possible for
413*0Sstevel@tonic-gate  * memory delete to collect the page and so that part of the page list is
414*0Sstevel@tonic-gate  * prevented from being deleted. If the page is referenced outside of one
415*0Sstevel@tonic-gate  * of these locks, it is possible for the page_t being referenced to be
416*0Sstevel@tonic-gate  * deleted.  Examples of this are page_t pointers returned by
417*0Sstevel@tonic-gate  * page_numtopp_nolock, page_first and page_next.  Providing the page_t
418*0Sstevel@tonic-gate  * is re-checked after taking the p_selock (for p_vnode != NULL), the
419*0Sstevel@tonic-gate  * remapping to the zero pages will be detected.
420*0Sstevel@tonic-gate  *
421*0Sstevel@tonic-gate  *
422*0Sstevel@tonic-gate  * Page size (p_szc field) and page locking.
423*0Sstevel@tonic-gate  *
424*0Sstevel@tonic-gate  * p_szc field of free pages is changed by free list manager under freelist
425*0Sstevel@tonic-gate  * locks and is of no concern to the rest of VM subsystem.
426*0Sstevel@tonic-gate  *
427*0Sstevel@tonic-gate  * p_szc changes of allocated anonymous (swapfs) can only be done only after
428*0Sstevel@tonic-gate  * exclusively locking all constituent pages and calling hat_pageunload() on
429*0Sstevel@tonic-gate  * each of them. To prevent p_szc changes of non free anonymous (swapfs) large
430*0Sstevel@tonic-gate  * pages it's enough to either lock SHARED any of constituent pages or prevent
431*0Sstevel@tonic-gate  * hat_pageunload() by holding hat level lock that protects mapping lists (this
432*0Sstevel@tonic-gate  * method is for hat code only)
433*0Sstevel@tonic-gate  *
434*0Sstevel@tonic-gate  * To increase (promote) p_szc of allocated non anonymous file system pages
435*0Sstevel@tonic-gate  * one has to first lock exclusively all involved constituent pages and call
436*0Sstevel@tonic-gate  * hat_pageunload() on each of them. To prevent p_szc promote it's enough to
437*0Sstevel@tonic-gate  * either lock SHARED any of constituent pages that will be needed to make a
438*0Sstevel@tonic-gate  * large page or prevent hat_pageunload() by holding hat level lock that
439*0Sstevel@tonic-gate  * protects mapping lists (this method is for hat code only).
440*0Sstevel@tonic-gate  *
441*0Sstevel@tonic-gate  * To decrease (demote) p_szc of an allocated non anonymous file system large
442*0Sstevel@tonic-gate  * page one can either use the same method as used for changeing p_szc of
443*0Sstevel@tonic-gate  * anonymous large pages or if it's not possible to lock all constituent pages
444*0Sstevel@tonic-gate  * exclusively a different method can be used. In the second method one only
445*0Sstevel@tonic-gate  * has to exclusively lock one of constituent pages but then one has to
446*0Sstevel@tonic-gate  * acquire further locks by calling page_szc_lock() and
447*0Sstevel@tonic-gate  * hat_page_demote(). hat_page_demote() acquires hat level locks and then
448*0Sstevel@tonic-gate  * demotes the page. This mechanism relies on the fact that any code that
449*0Sstevel@tonic-gate  * needs to prevent p_szc of a file system large page from changeing either
450*0Sstevel@tonic-gate  * locks all constituent large pages at least SHARED or locks some pages at
451*0Sstevel@tonic-gate  * least SHARED and calls page_szc_lock() or uses hat level page locks.
452*0Sstevel@tonic-gate  * Demotion using this method is implemented by page_demote_vp_pages().
453*0Sstevel@tonic-gate  * Please see comments in front of page_demote_vp_pages(), hat_page_demote()
454*0Sstevel@tonic-gate  * and page_szc_lock() for more details.
455*0Sstevel@tonic-gate  *
456*0Sstevel@tonic-gate  * Lock order: p_selock, page_szc_lock, ph_mutex/vph_mutex/freelist,
457*0Sstevel@tonic-gate  * hat level locks.
458*0Sstevel@tonic-gate  */
459*0Sstevel@tonic-gate 
460*0Sstevel@tonic-gate typedef struct page {
461*0Sstevel@tonic-gate 	u_offset_t	p_offset;	/* offset into vnode for this page */
462*0Sstevel@tonic-gate 	struct vnode	*p_vnode;	/* vnode that this page is named by */
463*0Sstevel@tonic-gate 	selock_t	p_selock;	/* shared/exclusive lock on the page */
464*0Sstevel@tonic-gate #if defined(_LP64)
465*0Sstevel@tonic-gate 	int		p_selockpad;	/* pad for growing selock */
466*0Sstevel@tonic-gate #endif
467*0Sstevel@tonic-gate 	struct page	*p_hash;	/* hash by [vnode, offset] */
468*0Sstevel@tonic-gate 	struct page	*p_vpnext;	/* next page in vnode list */
469*0Sstevel@tonic-gate 	struct page	*p_vpprev;	/* prev page in vnode list */
470*0Sstevel@tonic-gate 	struct page	*p_next;	/* next page in free/intrans lists */
471*0Sstevel@tonic-gate 	struct page	*p_prev;	/* prev page in free/intrans lists */
472*0Sstevel@tonic-gate 	ushort_t	p_lckcnt;	/* number of locks on page data */
473*0Sstevel@tonic-gate 	ushort_t	p_cowcnt;	/* number of copy on write lock */
474*0Sstevel@tonic-gate 	kcondvar_t	p_cv;		/* page struct's condition var */
475*0Sstevel@tonic-gate 	kcondvar_t	p_io_cv;	/* for iolock */
476*0Sstevel@tonic-gate 	uchar_t		p_iolock_state;	/* replaces p_iolock */
477*0Sstevel@tonic-gate 	volatile uchar_t p_szc;		/* page size code */
478*0Sstevel@tonic-gate 	uchar_t		p_fsdata;	/* file system dependent byte */
479*0Sstevel@tonic-gate 	uchar_t		p_state;	/* p_free, p_noreloc */
480*0Sstevel@tonic-gate 	uchar_t		p_nrm;		/* non-cache, ref, mod readonly bits */
481*0Sstevel@tonic-gate #if defined(__sparc)
482*0Sstevel@tonic-gate 	uchar_t		p_vcolor;	/* virtual color */
483*0Sstevel@tonic-gate #else
484*0Sstevel@tonic-gate 	uchar_t		p_embed;	/* x86 - changes p_mapping & p_index */
485*0Sstevel@tonic-gate #endif
486*0Sstevel@tonic-gate 	uchar_t		p_index;	/* MPSS mapping info. Not used on x86 */
487*0Sstevel@tonic-gate 	uchar_t		p_toxic;	/* page has an unrecoverable error */
488*0Sstevel@tonic-gate 	void		*p_mapping;	/* hat specific translation info */
489*0Sstevel@tonic-gate 	pfn_t		p_pagenum;	/* physical page number */
490*0Sstevel@tonic-gate 
491*0Sstevel@tonic-gate 	uint_t		p_share;	/* number of translations */
492*0Sstevel@tonic-gate #if defined(_LP64)
493*0Sstevel@tonic-gate 	uint_t		p_sharepad;	/* pad for growing p_share */
494*0Sstevel@tonic-gate #endif
495*0Sstevel@tonic-gate 	uint_t		p_msresv_1;	/* reserved for future use */
496*0Sstevel@tonic-gate #if defined(__sparc)
497*0Sstevel@tonic-gate 	uint_t		p_kpmref;	/* number of kpm mapping sharers */
498*0Sstevel@tonic-gate 	struct kpme	*p_kpmelist;	/* kpm specific mapping info */
499*0Sstevel@tonic-gate #else
500*0Sstevel@tonic-gate 	/* index of entry in p_map when p_embed is set */
501*0Sstevel@tonic-gate 	uint_t		p_mlentry;
502*0Sstevel@tonic-gate #endif
503*0Sstevel@tonic-gate 	uint64_t	p_msresv_2;	/* page allocation debugging */
504*0Sstevel@tonic-gate } page_t;
505*0Sstevel@tonic-gate 
506*0Sstevel@tonic-gate 
507*0Sstevel@tonic-gate typedef	page_t	devpage_t;
508*0Sstevel@tonic-gate #define	devpage	page
509*0Sstevel@tonic-gate 
510*0Sstevel@tonic-gate 
511*0Sstevel@tonic-gate /*
512*0Sstevel@tonic-gate  * Page hash table is a power-of-two in size, externally chained
513*0Sstevel@tonic-gate  * through the hash field.  PAGE_HASHAVELEN is the average length
514*0Sstevel@tonic-gate  * desired for this chain, from which the size of the page_hash
515*0Sstevel@tonic-gate  * table is derived at boot time and stored in the kernel variable
516*0Sstevel@tonic-gate  * page_hashsz.  In the hash function it is given by PAGE_HASHSZ.
517*0Sstevel@tonic-gate  *
518*0Sstevel@tonic-gate  * PAGE_HASH_FUNC returns an index into the page_hash[] array.  This
519*0Sstevel@tonic-gate  * index is also used to derive the mutex that protects the chain.
520*0Sstevel@tonic-gate  *
521*0Sstevel@tonic-gate  * In constructing the hash function, first we dispose of unimportant bits
522*0Sstevel@tonic-gate  * (page offset from "off" and the low 3 bits of "vp" which are zero for
523*0Sstevel@tonic-gate  * struct alignment). Then shift and sum the remaining bits a couple times
524*0Sstevel@tonic-gate  * in order to get as many source bits from the two source values into the
525*0Sstevel@tonic-gate  * resulting hashed value.  Note that this will perform quickly, since the
526*0Sstevel@tonic-gate  * shifting/summing are fast register to register operations with no additional
527*0Sstevel@tonic-gate  * memory references).
528*0Sstevel@tonic-gate  */
529*0Sstevel@tonic-gate #if NCPU < 4
530*0Sstevel@tonic-gate #define	PH_TABLE_SIZE	16
531*0Sstevel@tonic-gate #define	VP_SHIFT	7
532*0Sstevel@tonic-gate #else
533*0Sstevel@tonic-gate #define	PH_TABLE_SIZE	128
534*0Sstevel@tonic-gate #define	VP_SHIFT	9
535*0Sstevel@tonic-gate #endif
536*0Sstevel@tonic-gate 
537*0Sstevel@tonic-gate /*
538*0Sstevel@tonic-gate  * The amount to use for the successive shifts in the hash function below.
539*0Sstevel@tonic-gate  * The actual value is LOG2(PH_TABLE_SIZE), so that as many bits as
540*0Sstevel@tonic-gate  * possible will filter thru PAGE_HASH_FUNC() and PAGE_HASH_MUTEX().
541*0Sstevel@tonic-gate  */
542*0Sstevel@tonic-gate #define	PH_SHIFT_SIZE   (7)
543*0Sstevel@tonic-gate 
544*0Sstevel@tonic-gate #define	PAGE_HASHSZ	page_hashsz
545*0Sstevel@tonic-gate #define	PAGE_HASHAVELEN		4
546*0Sstevel@tonic-gate #define	PAGE_HASH_FUNC(vp, off) \
547*0Sstevel@tonic-gate 	((((uintptr_t)(off) >> PAGESHIFT) + \
548*0Sstevel@tonic-gate 		((uintptr_t)(off) >> (PAGESHIFT + PH_SHIFT_SIZE)) + \
549*0Sstevel@tonic-gate 		((uintptr_t)(vp) >> 3) + \
550*0Sstevel@tonic-gate 		((uintptr_t)(vp) >> (3 + PH_SHIFT_SIZE)) + \
551*0Sstevel@tonic-gate 		((uintptr_t)(vp) >> (3 + 2 * PH_SHIFT_SIZE))) & \
552*0Sstevel@tonic-gate 		(PAGE_HASHSZ - 1))
553*0Sstevel@tonic-gate #ifdef _KERNEL
554*0Sstevel@tonic-gate 
555*0Sstevel@tonic-gate /*
556*0Sstevel@tonic-gate  * The page hash value is re-hashed to an index for the ph_mutex array.
557*0Sstevel@tonic-gate  *
558*0Sstevel@tonic-gate  * For 64 bit kernels, the mutex array is padded out to prevent false
559*0Sstevel@tonic-gate  * sharing of cache sub-blocks (64 bytes) of adjacent mutexes.
560*0Sstevel@tonic-gate  *
561*0Sstevel@tonic-gate  * For 32 bit kernels, we don't want to waste kernel address space with
562*0Sstevel@tonic-gate  * padding, so instead we rely on the hash function to introduce skew of
563*0Sstevel@tonic-gate  * adjacent vnode/offset indexes (the left shift part of the hash function).
564*0Sstevel@tonic-gate  * Since sizeof (kmutex_t) is 8, we shift an additional 3 to skew to a different
565*0Sstevel@tonic-gate  * 64 byte sub-block.
566*0Sstevel@tonic-gate  */
567*0Sstevel@tonic-gate typedef struct pad_mutex {
568*0Sstevel@tonic-gate 	kmutex_t	pad_mutex;
569*0Sstevel@tonic-gate #ifdef _LP64
570*0Sstevel@tonic-gate 	char		pad_pad[64 - sizeof (kmutex_t)];
571*0Sstevel@tonic-gate #endif
572*0Sstevel@tonic-gate } pad_mutex_t;
573*0Sstevel@tonic-gate extern pad_mutex_t ph_mutex[];
574*0Sstevel@tonic-gate 
575*0Sstevel@tonic-gate #define	PAGE_HASH_MUTEX(x) \
576*0Sstevel@tonic-gate 	&(ph_mutex[((x) + ((x) >> VP_SHIFT) + ((x) << 3)) & \
577*0Sstevel@tonic-gate 		(PH_TABLE_SIZE - 1)].pad_mutex)
578*0Sstevel@tonic-gate 
579*0Sstevel@tonic-gate /*
580*0Sstevel@tonic-gate  * Flags used while creating pages.
581*0Sstevel@tonic-gate  */
582*0Sstevel@tonic-gate #define	PG_EXCL		0x0001
583*0Sstevel@tonic-gate #define	PG_WAIT		0x0002
584*0Sstevel@tonic-gate #define	PG_PHYSCONTIG	0x0004		/* NOT SUPPORTED */
585*0Sstevel@tonic-gate #define	PG_MATCH_COLOR	0x0008		/* SUPPORTED by free list routines */
586*0Sstevel@tonic-gate #define	PG_NORELOC	0x0010		/* Non-relocatable alloc hint. */
587*0Sstevel@tonic-gate 					/* Page must be PP_ISNORELOC */
588*0Sstevel@tonic-gate #define	PG_PANIC	0x0020		/* system will panic if alloc fails */
589*0Sstevel@tonic-gate #define	PG_PUSHPAGE	0x0040		/* alloc may use reserve */
590*0Sstevel@tonic-gate 
591*0Sstevel@tonic-gate /*
592*0Sstevel@tonic-gate  * When p_selock has the SE_EWANTED bit set, threads waiting for SE_EXCL
593*0Sstevel@tonic-gate  * access are given priority over all other waiting threads.
594*0Sstevel@tonic-gate  */
595*0Sstevel@tonic-gate #define	SE_EWANTED	0x40000000
596*0Sstevel@tonic-gate #define	PAGE_LOCKED(pp)		(((pp)->p_selock & ~SE_EWANTED) != 0)
597*0Sstevel@tonic-gate #define	PAGE_SHARED(pp)		(((pp)->p_selock & ~SE_EWANTED) > 0)
598*0Sstevel@tonic-gate #define	PAGE_EXCL(pp)		((pp)->p_selock < 0)
599*0Sstevel@tonic-gate #define	PAGE_LOCKED_SE(pp, se)	\
600*0Sstevel@tonic-gate 	((se) == SE_EXCL ? PAGE_EXCL(pp) : PAGE_SHARED(pp))
601*0Sstevel@tonic-gate 
602*0Sstevel@tonic-gate extern	long page_hashsz;
603*0Sstevel@tonic-gate extern	page_t **page_hash;
604*0Sstevel@tonic-gate 
605*0Sstevel@tonic-gate extern	kmutex_t page_llock;		/* page logical lock mutex */
606*0Sstevel@tonic-gate extern	kmutex_t freemem_lock;		/* freemem lock */
607*0Sstevel@tonic-gate 
608*0Sstevel@tonic-gate extern	pgcnt_t	total_pages;		/* total pages in the system */
609*0Sstevel@tonic-gate 
610*0Sstevel@tonic-gate /*
611*0Sstevel@tonic-gate  * Variables controlling locking of physical memory.
612*0Sstevel@tonic-gate  */
613*0Sstevel@tonic-gate extern	pgcnt_t	pages_pp_maximum;	/* tuning: lock + claim <= max */
614*0Sstevel@tonic-gate extern	void init_pages_pp_maximum(void);
615*0Sstevel@tonic-gate 
616*0Sstevel@tonic-gate struct lgrp;
617*0Sstevel@tonic-gate 
618*0Sstevel@tonic-gate /* page_list_{add,sub} flags */
619*0Sstevel@tonic-gate 
620*0Sstevel@tonic-gate /* which list */
621*0Sstevel@tonic-gate #define	PG_FREE_LIST	0x0001
622*0Sstevel@tonic-gate #define	PG_CACHE_LIST	0x0002
623*0Sstevel@tonic-gate 
624*0Sstevel@tonic-gate /* where on list */
625*0Sstevel@tonic-gate #define	PG_LIST_TAIL	0x0010
626*0Sstevel@tonic-gate #define	PG_LIST_HEAD	0x0020
627*0Sstevel@tonic-gate 
628*0Sstevel@tonic-gate /* called from */
629*0Sstevel@tonic-gate #define	PG_LIST_ISINIT	0x1000
630*0Sstevel@tonic-gate #define	PG_LIST_ISCAGE	0x2000
631*0Sstevel@tonic-gate 
632*0Sstevel@tonic-gate /*
633*0Sstevel@tonic-gate  * Flags for setting the p_toxic flag when a page has errors
634*0Sstevel@tonic-gate  * These flags may be OR'ed into the p_toxic page flag to
635*0Sstevel@tonic-gate  * indicate that error(s) have occurred on a page,
636*0Sstevel@tonic-gate  * (see page_settoxic()). If both PAGE_IS_TOXIC and
637*0Sstevel@tonic-gate  * PAGE_IS_FAILING are set, PAGE_IS_FAILING takes precedence.
638*0Sstevel@tonic-gate  *
639*0Sstevel@tonic-gate  * When an error happens on a page, the trap handler sets
640*0Sstevel@tonic-gate  * PAGE_IS_FAULTY on the page to indicate that an error has been
641*0Sstevel@tonic-gate  * seen on the page. The error could be really a memory error or
642*0Sstevel@tonic-gate  * something else (like a datapath error). When it is determined
643*0Sstevel@tonic-gate  * that it is a memory error, the page is marked as PAGE_IS_TOXIC
644*0Sstevel@tonic-gate  * or PAGE_IS_FAILING depending on the type of error and then
645*0Sstevel@tonic-gate  * retired.
646*0Sstevel@tonic-gate  *
647*0Sstevel@tonic-gate  * We use the page's 'toxic' flag to determine whether the page
648*0Sstevel@tonic-gate  * has just got a single error - PAGE_IS_TOXIC - or is being
649*0Sstevel@tonic-gate  * retired due to multiple soft errors - PAGE_IS_FAILING. In
650*0Sstevel@tonic-gate  * page_free(), a page that has been marked PAGE_IS_FAILING will
651*0Sstevel@tonic-gate  * not be cleaned, it will always be retired. A page marked
652*0Sstevel@tonic-gate  * PAGE_IS_TOXIC is cleaned and is retired only if this attempt at
653*0Sstevel@tonic-gate  * cleaning fails.
654*0Sstevel@tonic-gate  *
655*0Sstevel@tonic-gate  * When a page has been successfully retired, we set PAGE_IS_RETIRED.
656*0Sstevel@tonic-gate  */
657*0Sstevel@tonic-gate #define	PAGE_IS_OK		0x0
658*0Sstevel@tonic-gate #define	PAGE_IS_TOXIC		0x1
659*0Sstevel@tonic-gate #define	PAGE_IS_FAILING		0x2
660*0Sstevel@tonic-gate #define	PAGE_IS_RETIRED		0x4
661*0Sstevel@tonic-gate #define	PAGE_IS_FAULTY		0x8
662*0Sstevel@tonic-gate 
663*0Sstevel@tonic-gate /*
664*0Sstevel@tonic-gate  * Page frame operations.
665*0Sstevel@tonic-gate  */
666*0Sstevel@tonic-gate page_t	*page_lookup(struct vnode *, u_offset_t, se_t);
667*0Sstevel@tonic-gate page_t	*page_lookup_create(struct vnode *, u_offset_t, se_t, page_t *,
668*0Sstevel@tonic-gate 	spgcnt_t *, int);
669*0Sstevel@tonic-gate page_t	*page_lookup_nowait(struct vnode *, u_offset_t, se_t);
670*0Sstevel@tonic-gate page_t	*page_find(struct vnode *, u_offset_t);
671*0Sstevel@tonic-gate page_t	*page_exists(struct vnode *, u_offset_t);
672*0Sstevel@tonic-gate int	page_exists_physcontig(vnode_t *, u_offset_t, uint_t, page_t *[]);
673*0Sstevel@tonic-gate int	page_exists_forreal(struct vnode *, u_offset_t, uint_t *);
674*0Sstevel@tonic-gate void	page_needfree(spgcnt_t);
675*0Sstevel@tonic-gate page_t	*page_create(struct vnode *, u_offset_t, size_t, uint_t);
676*0Sstevel@tonic-gate int	page_alloc_pages(struct seg *, caddr_t, page_t **, page_t **,
677*0Sstevel@tonic-gate 		uint_t, int);
678*0Sstevel@tonic-gate page_t  *page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes,
679*0Sstevel@tonic-gate 	uint_t flags, struct seg *seg, caddr_t vaddr, void *arg);
680*0Sstevel@tonic-gate page_t	*page_create_va(struct vnode *, u_offset_t, size_t, uint_t,
681*0Sstevel@tonic-gate 	struct seg *, caddr_t);
682*0Sstevel@tonic-gate int	page_create_wait(size_t npages, uint_t flags);
683*0Sstevel@tonic-gate void    page_create_putback(ssize_t npages);
684*0Sstevel@tonic-gate void	page_free(page_t *, int);
685*0Sstevel@tonic-gate void	page_free_at_startup(page_t *);
686*0Sstevel@tonic-gate void	page_free_pages(page_t *);
687*0Sstevel@tonic-gate void	free_vp_pages(struct vnode *, u_offset_t, size_t);
688*0Sstevel@tonic-gate int	page_reclaim(page_t *, kmutex_t *);
689*0Sstevel@tonic-gate void	page_destroy(page_t *, int);
690*0Sstevel@tonic-gate void	page_destroy_pages(page_t *);
691*0Sstevel@tonic-gate void	page_destroy_free(page_t *);
692*0Sstevel@tonic-gate void	page_rename(page_t *, struct vnode *, u_offset_t);
693*0Sstevel@tonic-gate int	page_hashin(page_t *, struct vnode *, u_offset_t, kmutex_t *);
694*0Sstevel@tonic-gate void	page_hashout(page_t *, kmutex_t *);
695*0Sstevel@tonic-gate int	page_num_hashin(pfn_t, struct vnode *, u_offset_t);
696*0Sstevel@tonic-gate void	page_add(page_t **, page_t *);
697*0Sstevel@tonic-gate void	page_add_common(page_t **, page_t *);
698*0Sstevel@tonic-gate void	page_sub(page_t **, page_t *);
699*0Sstevel@tonic-gate void	page_sub_common(page_t **, page_t *);
700*0Sstevel@tonic-gate page_t	*page_get_freelist(struct vnode *, u_offset_t, struct seg *,
701*0Sstevel@tonic-gate 		caddr_t, size_t, uint_t, struct lgrp *);
702*0Sstevel@tonic-gate 
703*0Sstevel@tonic-gate page_t	*page_get_cachelist(struct vnode *, u_offset_t, struct seg *,
704*0Sstevel@tonic-gate 		caddr_t, uint_t, struct lgrp *);
705*0Sstevel@tonic-gate void	page_list_add(page_t *, int);
706*0Sstevel@tonic-gate void	page_boot_demote(page_t *);
707*0Sstevel@tonic-gate void	page_promote_size(page_t *, uint_t);
708*0Sstevel@tonic-gate void	page_list_add_pages(page_t *, int);
709*0Sstevel@tonic-gate void	page_list_sub(page_t *, int);
710*0Sstevel@tonic-gate void	page_list_break(page_t **, page_t **, size_t);
711*0Sstevel@tonic-gate void	page_list_concat(page_t **, page_t **);
712*0Sstevel@tonic-gate void	page_vpadd(page_t **, page_t *);
713*0Sstevel@tonic-gate void	page_vpsub(page_t **, page_t *);
714*0Sstevel@tonic-gate int	page_lock(page_t *, se_t, kmutex_t *, reclaim_t);
715*0Sstevel@tonic-gate int	page_lock_es(page_t *, se_t, kmutex_t *, reclaim_t, int);
716*0Sstevel@tonic-gate void page_lock_clr_exclwanted(page_t *);
717*0Sstevel@tonic-gate int	page_trylock(page_t *, se_t);
718*0Sstevel@tonic-gate int	page_try_reclaim_lock(page_t *, se_t, int);
719*0Sstevel@tonic-gate int	page_tryupgrade(page_t *);
720*0Sstevel@tonic-gate void	page_downgrade(page_t *);
721*0Sstevel@tonic-gate void	page_unlock(page_t *);
722*0Sstevel@tonic-gate void	page_lock_delete(page_t *);
723*0Sstevel@tonic-gate int	page_pp_lock(page_t *, int, int);
724*0Sstevel@tonic-gate void	page_pp_unlock(page_t *, int, int);
725*0Sstevel@tonic-gate int	page_resv(pgcnt_t, uint_t);
726*0Sstevel@tonic-gate void	page_unresv(pgcnt_t);
727*0Sstevel@tonic-gate void	page_pp_useclaim(page_t *, page_t *, uint_t);
728*0Sstevel@tonic-gate int	page_addclaim(page_t *);
729*0Sstevel@tonic-gate int	page_subclaim(page_t *);
730*0Sstevel@tonic-gate int	page_addclaim_pages(page_t **);
731*0Sstevel@tonic-gate int	page_subclaim_pages(page_t **);
732*0Sstevel@tonic-gate pfn_t	page_pptonum(page_t *);
733*0Sstevel@tonic-gate page_t	*page_numtopp(pfn_t, se_t);
734*0Sstevel@tonic-gate page_t	*page_numtopp_noreclaim(pfn_t, se_t);
735*0Sstevel@tonic-gate page_t	*page_numtopp_nolock(pfn_t);
736*0Sstevel@tonic-gate page_t	*page_numtopp_nowait(pfn_t, se_t);
737*0Sstevel@tonic-gate page_t  *page_first();
738*0Sstevel@tonic-gate page_t  *page_next(page_t *);
739*0Sstevel@tonic-gate page_t  *page_nextn_raw(page_t *, ulong_t);	/* pp += n */
740*0Sstevel@tonic-gate #define	page_next_raw(PP)	page_nextn_raw((PP), 1)
741*0Sstevel@tonic-gate page_t  *page_list_next(page_t *);
742*0Sstevel@tonic-gate page_t	*page_nextn(page_t *, ulong_t);
743*0Sstevel@tonic-gate page_t	*page_next_scan_init(void **);
744*0Sstevel@tonic-gate page_t	*page_next_scan_large(page_t *, ulong_t *, void **);
745*0Sstevel@tonic-gate void    prefetch_page_r(void *);
746*0Sstevel@tonic-gate void	ppcopy(page_t *, page_t *);
747*0Sstevel@tonic-gate void	page_relocate_hash(page_t *, page_t *);
748*0Sstevel@tonic-gate void	pagezero(page_t *, uint_t, uint_t);
749*0Sstevel@tonic-gate void	pagescrub(page_t *, uint_t, uint_t);
750*0Sstevel@tonic-gate void	page_io_lock(page_t *);
751*0Sstevel@tonic-gate void	page_io_unlock(page_t *);
752*0Sstevel@tonic-gate int	page_io_trylock(page_t *);
753*0Sstevel@tonic-gate int	page_iolock_assert(page_t *);
754*0Sstevel@tonic-gate void	page_iolock_init(page_t *);
755*0Sstevel@tonic-gate pgcnt_t	page_busy(int);
756*0Sstevel@tonic-gate void	page_lock_init(void);
757*0Sstevel@tonic-gate ulong_t	page_share_cnt(page_t *);
758*0Sstevel@tonic-gate int	page_isshared(page_t *);
759*0Sstevel@tonic-gate int	page_isfree(page_t *);
760*0Sstevel@tonic-gate int	page_isref(page_t *);
761*0Sstevel@tonic-gate int	page_ismod(page_t *);
762*0Sstevel@tonic-gate int	page_release(page_t *, int);
763*0Sstevel@tonic-gate int	page_retire(page_t *, uchar_t);
764*0Sstevel@tonic-gate int	page_istoxic(page_t *);
765*0Sstevel@tonic-gate int	page_isfailing(page_t *);
766*0Sstevel@tonic-gate int	page_isretired(page_t *);
767*0Sstevel@tonic-gate int	page_deteriorating(page_t *);
768*0Sstevel@tonic-gate void	page_settoxic(page_t *, uchar_t);
769*0Sstevel@tonic-gate void	page_clrtoxic(page_t *);
770*0Sstevel@tonic-gate void	page_clrtoxic_flag(page_t *, uchar_t);
771*0Sstevel@tonic-gate int	page_isfaulty(page_t *);
772*0Sstevel@tonic-gate int	page_mem_avail(pgcnt_t);
773*0Sstevel@tonic-gate 
774*0Sstevel@tonic-gate void page_set_props(page_t *, uint_t);
775*0Sstevel@tonic-gate void page_clr_all_props(page_t *);
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate kmutex_t	*page_vnode_mutex(struct vnode *);
778*0Sstevel@tonic-gate kmutex_t	*page_se_mutex(struct page *);
779*0Sstevel@tonic-gate kmutex_t	*page_szc_lock(struct page *);
780*0Sstevel@tonic-gate int		page_szc_lock_assert(struct page *pp);
781*0Sstevel@tonic-gate 
782*0Sstevel@tonic-gate /*
783*0Sstevel@tonic-gate  * Page relocation interfaces. page_relocate() is generic.
784*0Sstevel@tonic-gate  * page_get_replacement_page() is provided by the PSM.
785*0Sstevel@tonic-gate  * page_free_replacement_page() is generic.
786*0Sstevel@tonic-gate  */
787*0Sstevel@tonic-gate int group_page_trylock(page_t *, se_t);
788*0Sstevel@tonic-gate void group_page_unlock(page_t *);
789*0Sstevel@tonic-gate int page_relocate(page_t **, page_t **, int, int, spgcnt_t *, struct lgrp *);
790*0Sstevel@tonic-gate int do_page_relocate(page_t **, page_t **, int, spgcnt_t *, struct lgrp *);
791*0Sstevel@tonic-gate page_t *page_get_replacement_page(page_t *, struct lgrp *, uint_t);
792*0Sstevel@tonic-gate void page_free_replacement_page(page_t *);
793*0Sstevel@tonic-gate int page_relocate_cage(page_t **, page_t **);
794*0Sstevel@tonic-gate 
795*0Sstevel@tonic-gate int page_try_demote_pages(page_t *);
796*0Sstevel@tonic-gate void page_demote_free_pages(page_t *);
797*0Sstevel@tonic-gate 
798*0Sstevel@tonic-gate struct anon_map;
799*0Sstevel@tonic-gate 
800*0Sstevel@tonic-gate void page_mark_migrate(struct seg *, caddr_t, size_t, struct anon_map *,
801*0Sstevel@tonic-gate     ulong_t, vnode_t *, u_offset_t, int);
802*0Sstevel@tonic-gate void page_migrate(struct seg *, caddr_t, page_t **, pgcnt_t);
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate /*
805*0Sstevel@tonic-gate  * Tell the PIM we are adding physical memory
806*0Sstevel@tonic-gate  */
807*0Sstevel@tonic-gate void add_physmem(page_t *, size_t, pfn_t);
808*0Sstevel@tonic-gate void add_physmem_cb(page_t *, pfn_t);	/* callback for page_t part */
809*0Sstevel@tonic-gate 
810*0Sstevel@tonic-gate /*
811*0Sstevel@tonic-gate  * hw_page_array[] is configured with hardware supported page sizes by
812*0Sstevel@tonic-gate  * platform specific code.
813*0Sstevel@tonic-gate  */
814*0Sstevel@tonic-gate typedef struct {
815*0Sstevel@tonic-gate 	size_t	hp_size;
816*0Sstevel@tonic-gate 	uint_t	hp_shift;
817*0Sstevel@tonic-gate 	pgcnt_t	hp_pgcnt;	/* base pagesize cnt */
818*0Sstevel@tonic-gate } hw_pagesize_t;
819*0Sstevel@tonic-gate 
820*0Sstevel@tonic-gate extern hw_pagesize_t	hw_page_array[];
821*0Sstevel@tonic-gate extern uint_t		page_colors, page_colors_mask;
822*0Sstevel@tonic-gate extern uint_t		page_coloring_shift;
823*0Sstevel@tonic-gate extern int		cpu_page_colors;
824*0Sstevel@tonic-gate 
825*0Sstevel@tonic-gate uint_t	page_num_pagesizes(void);
826*0Sstevel@tonic-gate uint_t	page_num_user_pagesizes(void);
827*0Sstevel@tonic-gate size_t	page_get_pagesize(uint_t);
828*0Sstevel@tonic-gate size_t	page_get_user_pagesize(uint_t n);
829*0Sstevel@tonic-gate pgcnt_t	page_get_pagecnt(uint_t);
830*0Sstevel@tonic-gate uint_t	page_get_shift(uint_t);
831*0Sstevel@tonic-gate int	page_szc(size_t);
832*0Sstevel@tonic-gate int	page_user_szc(size_t);
833*0Sstevel@tonic-gate 
834*0Sstevel@tonic-gate 
835*0Sstevel@tonic-gate /* page_get_replacement page flags */
836*0Sstevel@tonic-gate #define	PGR_SAMESZC	0x1	/* only look for page size same as orig */
837*0Sstevel@tonic-gate #define	PGR_NORELOC	0x2	/* allocate a P_NORELOC page */
838*0Sstevel@tonic-gate 
839*0Sstevel@tonic-gate #endif	/* _KERNEL */
840*0Sstevel@tonic-gate 
841*0Sstevel@tonic-gate /*
842*0Sstevel@tonic-gate  * Constants used for the p_iolock_state
843*0Sstevel@tonic-gate  */
844*0Sstevel@tonic-gate #define	PAGE_IO_INUSE	0x1
845*0Sstevel@tonic-gate #define	PAGE_IO_WANTED	0x2
846*0Sstevel@tonic-gate 
847*0Sstevel@tonic-gate /*
848*0Sstevel@tonic-gate  * Constants used for page_release status
849*0Sstevel@tonic-gate  */
850*0Sstevel@tonic-gate #define	PGREL_NOTREL    0x1
851*0Sstevel@tonic-gate #define	PGREL_CLEAN	0x2
852*0Sstevel@tonic-gate #define	PGREL_MOD	0x3
853*0Sstevel@tonic-gate 
854*0Sstevel@tonic-gate /*
855*0Sstevel@tonic-gate  * The p_state field holds what used to be the p_age and p_free
856*0Sstevel@tonic-gate  * bits.  These fields are protected by p_selock (see above).
857*0Sstevel@tonic-gate  */
858*0Sstevel@tonic-gate #define	P_FREE		0x80		/* Page on free list */
859*0Sstevel@tonic-gate #define	P_NORELOC	0x40		/* Page is non-relocatable */
860*0Sstevel@tonic-gate #define	P_MIGRATE	0x20		/* Migrate page on next touch */
861*0Sstevel@tonic-gate #define	P_SWAP		0x10		/* belongs to vnode that is V_ISSWAP */
862*0Sstevel@tonic-gate 
863*0Sstevel@tonic-gate #define	PP_ISFREE(pp)		((pp)->p_state & P_FREE)
864*0Sstevel@tonic-gate #define	PP_ISAGED(pp)		(((pp)->p_state & P_FREE) && \
865*0Sstevel@tonic-gate 					((pp)->p_vnode == NULL))
866*0Sstevel@tonic-gate #define	PP_ISNORELOC(pp)	((pp)->p_state & P_NORELOC)
867*0Sstevel@tonic-gate #define	PP_ISMIGRATE(pp)	((pp)->p_state & P_MIGRATE)
868*0Sstevel@tonic-gate #define	PP_ISSWAP(pp)		((pp)->p_state & P_SWAP)
869*0Sstevel@tonic-gate 
870*0Sstevel@tonic-gate #define	PP_SETFREE(pp)		((pp)->p_state = ((pp)->p_state & ~P_MIGRATE) \
871*0Sstevel@tonic-gate 				| P_FREE)
872*0Sstevel@tonic-gate #define	PP_SETAGED(pp)		ASSERT(PP_ISAGED(pp))
873*0Sstevel@tonic-gate #define	PP_SETNORELOC(pp)	((pp)->p_state |= P_NORELOC)
874*0Sstevel@tonic-gate #define	PP_SETMIGRATE(pp)	((pp)->p_state |= P_MIGRATE)
875*0Sstevel@tonic-gate #define	PP_SETSWAP(pp)		((pp)->p_state |= P_SWAP)
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate #define	PP_CLRFREE(pp)		((pp)->p_state &= ~P_FREE)
878*0Sstevel@tonic-gate #define	PP_CLRAGED(pp)		ASSERT(!PP_ISAGED(pp))
879*0Sstevel@tonic-gate #define	PP_CLRNORELOC(pp)	((pp)->p_state &= ~P_NORELOC)
880*0Sstevel@tonic-gate #define	PP_CLRMIGRATE(pp)	((pp)->p_state &= ~P_MIGRATE)
881*0Sstevel@tonic-gate #define	PP_CLRSWAP(pp)		((pp)->p_state &= ~P_SWAP)
882*0Sstevel@tonic-gate 
883*0Sstevel@tonic-gate 
884*0Sstevel@tonic-gate 
885*0Sstevel@tonic-gate /*
886*0Sstevel@tonic-gate  * kpm large page description.
887*0Sstevel@tonic-gate  * The virtual address range of segkpm is divided into chunks of
888*0Sstevel@tonic-gate  * kpm_pgsz. Each chunk is controlled by a kpm_page_t. The ushort
889*0Sstevel@tonic-gate  * is sufficient for 2^^15 * PAGESIZE, so e.g. the maximum kpm_pgsz
890*0Sstevel@tonic-gate  * for 8K is 256M and 2G for 64K pages. It it kept as small as
891*0Sstevel@tonic-gate  * possible to save physical memory space.
892*0Sstevel@tonic-gate  *
893*0Sstevel@tonic-gate  * There are 2 segkpm mapping windows within in the virtual address
894*0Sstevel@tonic-gate  * space when we have to prevent VAC alias conflicts. The so called
895*0Sstevel@tonic-gate  * Alias window (mappings are always by PAGESIZE) is controlled by
896*0Sstevel@tonic-gate  * kp_refcnta. The regular window is controlled by kp_refcnt for the
897*0Sstevel@tonic-gate  * normal operation, which is to use the largest available pagesize.
898*0Sstevel@tonic-gate  * When VAC alias conflicts are present within a chunk in the regular
899*0Sstevel@tonic-gate  * window the large page mapping is broken up into smaller PAGESIZE
900*0Sstevel@tonic-gate  * mappings. kp_refcntc is used to control the pages that are invoked
901*0Sstevel@tonic-gate  * in the conflict and kp_refcnts holds the active mappings done
902*0Sstevel@tonic-gate  * with the small page size. In non vac conflict mode kp_refcntc is
903*0Sstevel@tonic-gate  * also used as "go" indication (-1) for the trap level tsbmiss
904*0Sstevel@tonic-gate  * handler.
905*0Sstevel@tonic-gate  */
906*0Sstevel@tonic-gate typedef struct kpm_page {
907*0Sstevel@tonic-gate 	short kp_refcnt;	/* pages mapped large */
908*0Sstevel@tonic-gate 	short kp_refcnta;	/* pages mapped in Alias window */
909*0Sstevel@tonic-gate 	short kp_refcntc;	/* TL-tsbmiss flag; #vac alias conflict pages */
910*0Sstevel@tonic-gate 	short kp_refcnts;	/* vac alias: pages mapped small */
911*0Sstevel@tonic-gate } kpm_page_t;
912*0Sstevel@tonic-gate 
913*0Sstevel@tonic-gate /*
914*0Sstevel@tonic-gate  * Note: khl_lock offset changes must be reflected in sfmmu_asm.s
915*0Sstevel@tonic-gate  */
916*0Sstevel@tonic-gate typedef struct kpm_hlk {
917*0Sstevel@tonic-gate 	kmutex_t khl_mutex;	/* kpm_page mutex */
918*0Sstevel@tonic-gate 	uint_t   khl_lock;	/* trap level tsbmiss handling */
919*0Sstevel@tonic-gate } kpm_hlk_t;
920*0Sstevel@tonic-gate 
921*0Sstevel@tonic-gate /*
922*0Sstevel@tonic-gate  * kpm small page description.
923*0Sstevel@tonic-gate  * When kpm_pgsz is equal to PAGESIZE a smaller representation is used
924*0Sstevel@tonic-gate  * to save memory space. Alias range mappings and regular segkpm
925*0Sstevel@tonic-gate  * mappings are done in units of PAGESIZE and can share the mapping
926*0Sstevel@tonic-gate  * information and the mappings are always distinguishable by their
927*0Sstevel@tonic-gate  * virtual address. Other information neeeded for VAC conflict prevention
928*0Sstevel@tonic-gate  * is already available on a per page basis. There are basically 3 states
929*0Sstevel@tonic-gate  * a kpm_spage can have: not mapped (0), mapped in Alias range or virtually
930*0Sstevel@tonic-gate  * uncached (1) and mapped in the regular segkpm window (-1). The -1 value
931*0Sstevel@tonic-gate  * is also used as "go" indication for the segkpm trap level tsbmiss
932*0Sstevel@tonic-gate  * handler for small pages (value is kept the same as it is used for large
933*0Sstevel@tonic-gate  * mappings).
934*0Sstevel@tonic-gate  */
935*0Sstevel@tonic-gate typedef struct kpm_spage {
936*0Sstevel@tonic-gate 	char	kp_mapped;	/* page mapped small */
937*0Sstevel@tonic-gate } kpm_spage_t;
938*0Sstevel@tonic-gate 
939*0Sstevel@tonic-gate /*
940*0Sstevel@tonic-gate  * Note: kshl_lock offset changes must be reflected in sfmmu_asm.s
941*0Sstevel@tonic-gate  */
942*0Sstevel@tonic-gate typedef struct kpm_shlk {
943*0Sstevel@tonic-gate 	uint_t   kshl_lock;	/* trap level tsbmiss handling */
944*0Sstevel@tonic-gate } kpm_shlk_t;
945*0Sstevel@tonic-gate 
946*0Sstevel@tonic-gate /*
947*0Sstevel@tonic-gate  * Each segment of physical memory is described by a memseg struct.
948*0Sstevel@tonic-gate  * Within a segment, memory is considered contiguous. The members
949*0Sstevel@tonic-gate  * can be categorized as follows:
950*0Sstevel@tonic-gate  * . Platform independent:
951*0Sstevel@tonic-gate  *         pages, epages, pages_base, pages_end, next, lnext.
952*0Sstevel@tonic-gate  * . 64bit only but platform independent:
953*0Sstevel@tonic-gate  *         kpm_pbase, kpm_nkpmpgs, kpm_pages, kpm_spages.
954*0Sstevel@tonic-gate  * . Really platform or mmu specific:
955*0Sstevel@tonic-gate  *         pagespa, epagespa, nextpa, kpm_pagespa.
956*0Sstevel@tonic-gate  * . Mixed:
957*0Sstevel@tonic-gate  *         msegflags.
958*0Sstevel@tonic-gate  */
959*0Sstevel@tonic-gate struct memseg {
960*0Sstevel@tonic-gate 	page_t *pages, *epages;		/* [from, to] in page array */
961*0Sstevel@tonic-gate 	pfn_t pages_base, pages_end;	/* [from, to] in page numbers */
962*0Sstevel@tonic-gate 	struct memseg *next;		/* next segment in list */
963*0Sstevel@tonic-gate #if defined(__sparc)
964*0Sstevel@tonic-gate 	struct memseg *lnext;		/* next segment in deleted list */
965*0Sstevel@tonic-gate 	uint64_t pagespa, epagespa;	/* [from, to] page array physical */
966*0Sstevel@tonic-gate 	uint64_t nextpa;		/* physical next pointer */
967*0Sstevel@tonic-gate 	pfn_t	kpm_pbase;		/* start of kpm range */
968*0Sstevel@tonic-gate 	pgcnt_t kpm_nkpmpgs;		/* # of kpm_pgsz pages */
969*0Sstevel@tonic-gate 	union _mseg_un {
970*0Sstevel@tonic-gate 		kpm_page_t  *kpm_lpgs;	/* ptr to kpm_page array */
971*0Sstevel@tonic-gate 		kpm_spage_t *kpm_spgs;	/* ptr to kpm_spage array */
972*0Sstevel@tonic-gate 	} mseg_un;
973*0Sstevel@tonic-gate 	uint64_t kpm_pagespa;		/* physical ptr to kpm (s)pages array */
974*0Sstevel@tonic-gate 	uint_t msegflags;		/* memseg flags */
975*0Sstevel@tonic-gate #endif /* __sparc */
976*0Sstevel@tonic-gate };
977*0Sstevel@tonic-gate 
978*0Sstevel@tonic-gate /* memseg union aliases */
979*0Sstevel@tonic-gate #define	kpm_pages	mseg_un.kpm_lpgs
980*0Sstevel@tonic-gate #define	kpm_spages	mseg_un.kpm_spgs
981*0Sstevel@tonic-gate 
982*0Sstevel@tonic-gate /* msegflags */
983*0Sstevel@tonic-gate #define	MEMSEG_DYNAMIC		0x1	/* DR: memory was added dynamically */
984*0Sstevel@tonic-gate 
985*0Sstevel@tonic-gate /* memseg support macros */
986*0Sstevel@tonic-gate #define	MSEG_NPAGES(SEG)	((SEG)->pages_end - (SEG)->pages_base)
987*0Sstevel@tonic-gate 
988*0Sstevel@tonic-gate /* memseg hash */
989*0Sstevel@tonic-gate #define	MEM_HASH_SHIFT		0x9
990*0Sstevel@tonic-gate #define	N_MEM_SLOTS		0x200		/* must be a power of 2 */
991*0Sstevel@tonic-gate #define	MEMSEG_PFN_HASH(pfn)	(((pfn)/mhash_per_slot) & (N_MEM_SLOTS - 1))
992*0Sstevel@tonic-gate 
993*0Sstevel@tonic-gate /* memseg  externals */
994*0Sstevel@tonic-gate extern struct memseg *memsegs;		/* list of memory segments */
995*0Sstevel@tonic-gate extern ulong_t mhash_per_slot;
996*0Sstevel@tonic-gate extern uint64_t memsegspa;		/* memsegs as physical address */
997*0Sstevel@tonic-gate 
998*0Sstevel@tonic-gate void build_pfn_hash();
999*0Sstevel@tonic-gate extern struct memseg *page_numtomemseg_nolock(pfn_t pfnum);
1000*0Sstevel@tonic-gate 
1001*0Sstevel@tonic-gate 
1002*0Sstevel@tonic-gate #ifdef	__cplusplus
1003*0Sstevel@tonic-gate }
1004*0Sstevel@tonic-gate #endif
1005*0Sstevel@tonic-gate 
1006*0Sstevel@tonic-gate #endif	/* _VM_PAGE_H */
1007