xref: /onnv-gate/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 11713:03615b084875)
10Sstevel@tonic-gate/*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51772Sjl139090 * Common Development and Distribution License (the "License").
61772Sjl139090 * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate/*
22*11713SPavel.Tatashin@Sun.COM * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate/*
270Sstevel@tonic-gate * SFMMU primitives.  These primitives should only be used by sfmmu
280Sstevel@tonic-gate * routines.
290Sstevel@tonic-gate */
300Sstevel@tonic-gate
310Sstevel@tonic-gate#if defined(lint)
320Sstevel@tonic-gate#include <sys/types.h>
330Sstevel@tonic-gate#else	/* lint */
340Sstevel@tonic-gate#include "assym.h"
350Sstevel@tonic-gate#endif	/* lint */
360Sstevel@tonic-gate
370Sstevel@tonic-gate#include <sys/asm_linkage.h>
380Sstevel@tonic-gate#include <sys/machtrap.h>
390Sstevel@tonic-gate#include <sys/machasi.h>
400Sstevel@tonic-gate#include <sys/sun4asi.h>
410Sstevel@tonic-gate#include <sys/pte.h>
420Sstevel@tonic-gate#include <sys/mmu.h>
430Sstevel@tonic-gate#include <vm/hat_sfmmu.h>
440Sstevel@tonic-gate#include <vm/seg_spt.h>
450Sstevel@tonic-gate#include <sys/machparam.h>
460Sstevel@tonic-gate#include <sys/privregs.h>
470Sstevel@tonic-gate#include <sys/scb.h>
480Sstevel@tonic-gate#include <sys/intreg.h>
490Sstevel@tonic-gate#include <sys/machthread.h>
500Sstevel@tonic-gate#include <sys/intr.h>
510Sstevel@tonic-gate#include <sys/clock.h>
520Sstevel@tonic-gate#include <sys/trapstat.h>
530Sstevel@tonic-gate
540Sstevel@tonic-gate#ifdef TRAPTRACE
550Sstevel@tonic-gate#include <sys/traptrace.h>
560Sstevel@tonic-gate
570Sstevel@tonic-gate/*
580Sstevel@tonic-gate * Tracing macro. Adds two instructions if TRAPTRACE is defined.
590Sstevel@tonic-gate */
600Sstevel@tonic-gate#define	TT_TRACE(label)		\
610Sstevel@tonic-gate	ba	label		;\
620Sstevel@tonic-gate	rd	%pc, %g7
630Sstevel@tonic-gate#else
640Sstevel@tonic-gate
650Sstevel@tonic-gate#define	TT_TRACE(label)
660Sstevel@tonic-gate
670Sstevel@tonic-gate#endif /* TRAPTRACE */
680Sstevel@tonic-gate
690Sstevel@tonic-gate#ifndef	lint
700Sstevel@tonic-gate
710Sstevel@tonic-gate#if (TTE_SUSPEND_SHIFT > 0)
720Sstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)				\
730Sstevel@tonic-gate	sllx	reg, TTE_SUSPEND_SHIFT, reg
740Sstevel@tonic-gate#else
750Sstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)
760Sstevel@tonic-gate#endif
770Sstevel@tonic-gate
780Sstevel@tonic-gate#endif /* lint */
790Sstevel@tonic-gate
800Sstevel@tonic-gate#ifndef	lint
810Sstevel@tonic-gate
820Sstevel@tonic-gate/*
830Sstevel@tonic-gate * Assumes TSBE_TAG is 0
840Sstevel@tonic-gate * Assumes TSBE_INTHI is 0
850Sstevel@tonic-gate * Assumes TSBREG.split is 0
860Sstevel@tonic-gate */
870Sstevel@tonic-gate
880Sstevel@tonic-gate#if TSBE_TAG != 0
890Sstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
900Sstevel@tonic-gate#endif
910Sstevel@tonic-gate
920Sstevel@tonic-gate#if TSBTAG_INTHI != 0
930Sstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
940Sstevel@tonic-gate#endif
950Sstevel@tonic-gate
960Sstevel@tonic-gate/*
970Sstevel@tonic-gate * The following code assumes the tsb is not split.
980Sstevel@tonic-gate *
990Sstevel@tonic-gate * With TSBs no longer shared between processes, it's no longer
1000Sstevel@tonic-gate * necessary to hash the context bits into the tsb index to get
1010Sstevel@tonic-gate * tsb coloring; the new implementation treats the TSB as a
1020Sstevel@tonic-gate * direct-mapped, virtually-addressed cache.
1030Sstevel@tonic-gate *
1040Sstevel@tonic-gate * In:
1050Sstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
1060Sstevel@tonic-gate *    tsbbase = base address of TSB (clobbered)
1070Sstevel@tonic-gate *    tagacc = tag access register (clobbered)
1080Sstevel@tonic-gate *    szc = size code of TSB (ro)
1090Sstevel@tonic-gate *    tmp = scratch reg
1100Sstevel@tonic-gate * Out:
1110Sstevel@tonic-gate *    tsbbase = pointer to entry in TSB
1120Sstevel@tonic-gate */
1130Sstevel@tonic-gate#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
1140Sstevel@tonic-gate	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
1150Sstevel@tonic-gate	srlx	tagacc, vpshift, tagacc 				;\
1160Sstevel@tonic-gate	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
1170Sstevel@tonic-gate	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
1180Sstevel@tonic-gate	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
1190Sstevel@tonic-gate	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
1200Sstevel@tonic-gate	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
1210Sstevel@tonic-gate
1220Sstevel@tonic-gate/*
1230Sstevel@tonic-gate * When the kpm TSB is used it is assumed that it is direct mapped
1240Sstevel@tonic-gate * using (vaddr>>vpshift)%tsbsz as the index.
1250Sstevel@tonic-gate *
1260Sstevel@tonic-gate * Note that, for now, the kpm TSB and kernel TSB are the same for
1270Sstevel@tonic-gate * each mapping size.  However that need not always be the case.  If
1280Sstevel@tonic-gate * the trap handlers are updated to search a different TSB for kpm
1290Sstevel@tonic-gate * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
1300Sstevel@tonic-gate * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
1310Sstevel@tonic-gate *
1320Sstevel@tonic-gate * In:
1330Sstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
1340Sstevel@tonic-gate *    vaddr = virtual address (clobbered)
1350Sstevel@tonic-gate *    tsbp, szc, tmp = scratch
1360Sstevel@tonic-gate * Out:
1370Sstevel@tonic-gate *    tsbp = pointer to entry in TSB
1380Sstevel@tonic-gate */
1390Sstevel@tonic-gate#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
1400Sstevel@tonic-gate	cmp	vpshift, MMU_PAGESHIFT					;\
1410Sstevel@tonic-gate	bne,pn	%icc, 1f		/* branch if large case */	;\
1420Sstevel@tonic-gate	  sethi	%hi(kpmsm_tsbsz), szc					;\
1430Sstevel@tonic-gate	sethi	%hi(kpmsm_tsbbase), tsbp				;\
1440Sstevel@tonic-gate	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
1450Sstevel@tonic-gate	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
1460Sstevel@tonic-gate	ba,pt	%icc, 2f						;\
1470Sstevel@tonic-gate	  nop								;\
1480Sstevel@tonic-gate1:	sethi	%hi(kpm_tsbsz), szc					;\
1490Sstevel@tonic-gate	sethi	%hi(kpm_tsbbase), tsbp					;\
1500Sstevel@tonic-gate	ld	[szc + %lo(kpm_tsbsz)], szc				;\
1510Sstevel@tonic-gate	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1520Sstevel@tonic-gate2:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
1530Sstevel@tonic-gate
1540Sstevel@tonic-gate/*
1550Sstevel@tonic-gate * Lock the TSBE at virtual address tsbep.
1560Sstevel@tonic-gate *
1570Sstevel@tonic-gate * tsbep = TSBE va (ro)
1580Sstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
1598187SPaul.Sandhu@Sun.COM * label = label to jump to if we fail to lock the tsb entry
1600Sstevel@tonic-gate * %asi = ASI to use for TSB access
1610Sstevel@tonic-gate *
1620Sstevel@tonic-gate * NOTE that we flush the TSB using fast VIS instructions that
1630Sstevel@tonic-gate * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
1640Sstevel@tonic-gate * not be treated as a locked entry or we'll get stuck spinning on
1650Sstevel@tonic-gate * an entry that isn't locked but really invalid.
1660Sstevel@tonic-gate */
1670Sstevel@tonic-gate
1680Sstevel@tonic-gate#if defined(UTSB_PHYS)
1690Sstevel@tonic-gate
1700Sstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1710Sstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1					;\
1720Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1730Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1748187SPaul.Sandhu@Sun.COM	be,a,pn	%icc, label		/* if locked ignore */		;\
1758187SPaul.Sandhu@Sun.COM	  nop								;\
1760Sstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
1770Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1788187SPaul.Sandhu@Sun.COM	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1798187SPaul.Sandhu@Sun.COM	  nop								;\
1800Sstevel@tonic-gate	/* tsbe lock acquired */					;\
1810Sstevel@tonic-gate	membar #StoreStore
1820Sstevel@tonic-gate
1830Sstevel@tonic-gate#else /* UTSB_PHYS */
1840Sstevel@tonic-gate
1850Sstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1860Sstevel@tonic-gate	lda	[tsbep]%asi, tmp1					;\
1870Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1880Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1898187SPaul.Sandhu@Sun.COM	be,a,pn	%icc, label		/* if locked ignore */		;\
1908187SPaul.Sandhu@Sun.COM	  nop								;\
1910Sstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp2					;\
1920Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1938187SPaul.Sandhu@Sun.COM	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1948187SPaul.Sandhu@Sun.COM	  nop								;\
1950Sstevel@tonic-gate	/* tsbe lock acquired */					;\
1960Sstevel@tonic-gate	membar #StoreStore
1970Sstevel@tonic-gate
1980Sstevel@tonic-gate#endif /* UTSB_PHYS */
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate/*
2010Sstevel@tonic-gate * Atomically write TSBE at virtual address tsbep.
2020Sstevel@tonic-gate *
2030Sstevel@tonic-gate * tsbep = TSBE va (ro)
2040Sstevel@tonic-gate * tte = TSBE TTE (ro)
2050Sstevel@tonic-gate * tagtarget = TSBE tag (ro)
2060Sstevel@tonic-gate * %asi = ASI to use for TSB access
2070Sstevel@tonic-gate */
2080Sstevel@tonic-gate
2090Sstevel@tonic-gate#if defined(UTSB_PHYS)
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
2120Sstevel@tonic-gate	add	tsbep, TSBE_TTE, tmp1					;\
2130Sstevel@tonic-gate	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
2140Sstevel@tonic-gate	membar #StoreStore						;\
2150Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2160Sstevel@tonic-gate	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
2170Sstevel@tonic-gate
2180Sstevel@tonic-gate#else /* UTSB_PHYS */
2190Sstevel@tonic-gate
2200Sstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
2210Sstevel@tonic-gate	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
2220Sstevel@tonic-gate	membar #StoreStore						;\
2230Sstevel@tonic-gate	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
2240Sstevel@tonic-gate
2250Sstevel@tonic-gate#endif /* UTSB_PHYS */
2260Sstevel@tonic-gate
2270Sstevel@tonic-gate/*
2280Sstevel@tonic-gate * Load an entry into the TSB at TL > 0.
2290Sstevel@tonic-gate *
2300Sstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
2310Sstevel@tonic-gate * tte = value of the TTE retrieved and loaded (wo)
2320Sstevel@tonic-gate * tagtarget = tag target register.  To get TSBE tag to load,
2330Sstevel@tonic-gate *   we need to mask off the context and leave only the va (clobbered)
2340Sstevel@tonic-gate * ttepa = pointer to the TTE to retrieve/load as pa (ro)
2350Sstevel@tonic-gate * tmp1, tmp2 = scratch registers
2368187SPaul.Sandhu@Sun.COM * label = label to jump to if we fail to lock the tsb entry
2370Sstevel@tonic-gate * %asi = ASI to use for TSB access
2380Sstevel@tonic-gate */
2390Sstevel@tonic-gate
2400Sstevel@tonic-gate#if defined(UTSB_PHYS)
2410Sstevel@tonic-gate
2420Sstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2430Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2440Sstevel@tonic-gate	/*								;\
2450Sstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2460Sstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2470Sstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2480Sstevel@tonic-gate	 */								;\
2490Sstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2500Sstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2510Sstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2520Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2530Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2548187SPaul.Sandhu@Sun.COM	brgez,a,pn tte, label						;\
2550Sstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
2560Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
2570Sstevel@tonic-gatelabel:
2580Sstevel@tonic-gate
2590Sstevel@tonic-gate#else /* UTSB_PHYS */
2600Sstevel@tonic-gate
2610Sstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2620Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2630Sstevel@tonic-gate	/*								;\
2640Sstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2650Sstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2660Sstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2670Sstevel@tonic-gate	 */								;\
2680Sstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2690Sstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2700Sstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2710Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2728187SPaul.Sandhu@Sun.COM	brgez,a,pn tte, label						;\
2730Sstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
2740Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
2750Sstevel@tonic-gatelabel:
2760Sstevel@tonic-gate
2771772Sjl139090#endif /* UTSB_PHYS */
2781772Sjl139090
279490Ssusans/*
280490Ssusans * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
281490Ssusans *   for ITLB synthesis.
282490Ssusans *
283490Ssusans * tsbep = pointer to the TSBE to load as va (ro)
284490Ssusans * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
285490Ssusans *   with exec_perm turned off and exec_synth turned on
286490Ssusans * tagtarget = tag target register.  To get TSBE tag to load,
287490Ssusans *   we need to mask off the context and leave only the va (clobbered)
288490Ssusans * ttepa = pointer to the TTE to retrieve/load as pa (ro)
289490Ssusans * tmp1, tmp2 = scratch registers
290490Ssusans * label = label to use for branch (text)
291490Ssusans * %asi = ASI to use for TSB access
292490Ssusans */
293490Ssusans
294490Ssusans#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
295490Ssusans	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
296490Ssusans	/*								;\
297490Ssusans	 * I don't need to update the TSB then check for the valid tte.	;\
298490Ssusans	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
299490Ssusans	 * we always invalidate the hash table before we unload the TSB.;\
300490Ssusans	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
301490Ssusans	 * and exec_synth bit to 1.					;\
302490Ssusans	 */								;\
303490Ssusans	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
304490Ssusans	mov	tte, tmp1						;\
305490Ssusans	ldxa	[ttepa]ASI_MEM, tte					;\
306490Ssusans	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
307490Ssusans	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3088187SPaul.Sandhu@Sun.COM	brgez,a,pn tte, label						;\
309490Ssusans	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
310490Ssusans	or	tte, tmp1, tte						;\
311490Ssusans	andn	tte, TTE_EXECPRM_INT, tte				;\
312490Ssusans	or	tte, TTE_E_SYNTH_INT, tte				;\
313490Ssusans	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
314490Ssusanslabel:
315490Ssusans
316490Ssusans/*
317490Ssusans * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
318490Ssusans *
319490Ssusans * tte = value of the TTE, used to get tte_size bits (ro)
320490Ssusans * tagaccess = tag access register, used to get 4M pfn bits (ro)
321490Ssusans * pfn = 4M pfn bits shifted to offset for tte (out)
322490Ssusans * tmp1 = scratch register
323490Ssusans * label = label to use for branch (text)
324490Ssusans */
325490Ssusans
326490Ssusans#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
327490Ssusans	/*								;\
328490Ssusans	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
329490Ssusans	 * Return them, shifted, in pfn.				;\
330490Ssusans	 */								;\
331490Ssusans	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
332490Ssusans	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
333490Ssusans	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
334490Ssusans	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
335490Ssusans	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
336490Ssusans	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
337490Ssusanslabel:									;\
338490Ssusans	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
339490Ssusans
340490Ssusans/*
341490Ssusans * Add 4M TTE size code to a tte for a Panther 32M/256M page,
342490Ssusans * for ITLB synthesis.
343490Ssusans *
344490Ssusans * tte = value of the TTE, used to get tte_size bits (rw)
345490Ssusans * tmp1 = scratch register
346490Ssusans */
347490Ssusans
348490Ssusans#define	SET_TTE4M_PN(tte, tmp)						\
349490Ssusans	/*								;\
350490Ssusans	 * Set 4M pagesize tte bits. 					;\
351490Ssusans	 */								;\
352490Ssusans	set	TTE4M, tmp						;\
353490Ssusans	sllx	tmp, TTE_SZ_SHFT, tmp					;\
354490Ssusans	or	tte, tmp, tte
355490Ssusans
3560Sstevel@tonic-gate/*
3570Sstevel@tonic-gate * Load an entry into the TSB at TL=0.
3580Sstevel@tonic-gate *
3590Sstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
3600Sstevel@tonic-gate * tteva = pointer to the TTE to load as va (ro)
3610Sstevel@tonic-gate * tagtarget = TSBE tag to load (which contains no context), synthesized
3620Sstevel@tonic-gate * to match va of MMU tag target register only (ro)
3630Sstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
3640Sstevel@tonic-gate * label = label to use for branches (text)
3650Sstevel@tonic-gate * %asi = ASI to use for TSB access
3660Sstevel@tonic-gate */
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate#if defined(UTSB_PHYS)
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3710Sstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3720Sstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3730Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3740Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3750Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
3768187SPaul.Sandhu@Sun.COM	brgez,a,pn tteva, label						;\
3770Sstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
3780Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3790Sstevel@tonic-gatelabel:
3800Sstevel@tonic-gate
3810Sstevel@tonic-gate#else /* UTSB_PHYS */
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3840Sstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3850Sstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3860Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3870Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3888187SPaul.Sandhu@Sun.COM	brgez,a,pn tteva, label						;\
3890Sstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
3900Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3910Sstevel@tonic-gatelabel:
3920Sstevel@tonic-gate
3930Sstevel@tonic-gate#endif /* UTSB_PHYS */
3940Sstevel@tonic-gate
3950Sstevel@tonic-gate/*
3960Sstevel@tonic-gate * Invalidate a TSB entry in the TSB.
3970Sstevel@tonic-gate *
3980Sstevel@tonic-gate * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
3990Sstevel@tonic-gate *	 about this earlier to ensure this is true.  Thus when we are
4000Sstevel@tonic-gate *	 directly referencing tsbep below, we are referencing the tte_tag
4010Sstevel@tonic-gate *	 field of the TSBE.  If this  offset ever changes, the code below
4020Sstevel@tonic-gate *	 will need to be modified.
4030Sstevel@tonic-gate *
4040Sstevel@tonic-gate * tsbep = pointer to TSBE as va (ro)
4050Sstevel@tonic-gate * tag = invalidation is done if this matches the TSBE tag (ro)
4060Sstevel@tonic-gate * tmp1 - tmp3 = scratch registers (clobbered)
4070Sstevel@tonic-gate * label = label name to use for branches (text)
4080Sstevel@tonic-gate * %asi = ASI to use for TSB access
4090Sstevel@tonic-gate */
4100Sstevel@tonic-gate
4110Sstevel@tonic-gate#if defined(UTSB_PHYS)
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4140Sstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
4150Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4160Sstevel@tonic-gatelabel/**/1:								;\
4170Sstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4180Sstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4190Sstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
4200Sstevel@tonic-gate	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
4210Sstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4220Sstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4230Sstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4240Sstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
4250Sstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4260Sstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4270Sstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
4280Sstevel@tonic-gatelabel/**/2:
4290Sstevel@tonic-gate
4300Sstevel@tonic-gate#else /* UTSB_PHYS */
4310Sstevel@tonic-gate
4320Sstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4330Sstevel@tonic-gate	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
4340Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4350Sstevel@tonic-gatelabel/**/1:								;\
4360Sstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4370Sstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4380Sstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
4390Sstevel@tonic-gate	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
4400Sstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4410Sstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4420Sstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4430Sstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
4440Sstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4450Sstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4460Sstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
4470Sstevel@tonic-gatelabel/**/2:
4480Sstevel@tonic-gate
4490Sstevel@tonic-gate#endif /* UTSB_PHYS */
4500Sstevel@tonic-gate
4510Sstevel@tonic-gate#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
4520Sstevel@tonic-gate#error	- TSB_SOFTSZ_MASK too small
4530Sstevel@tonic-gate#endif
4540Sstevel@tonic-gate
4550Sstevel@tonic-gate
4560Sstevel@tonic-gate/*
4570Sstevel@tonic-gate * An implementation of setx which will be hot patched at run time.
4580Sstevel@tonic-gate * since it is being hot patched, there is no value passed in.
4590Sstevel@tonic-gate * Thus, essentially we are implementing
4600Sstevel@tonic-gate *	setx value, tmp, dest
4610Sstevel@tonic-gate * where value is RUNTIME_PATCH (aka 0) in this case.
4620Sstevel@tonic-gate */
4630Sstevel@tonic-gate#define	RUNTIME_PATCH_SETX(dest, tmp)					\
4640Sstevel@tonic-gate	sethi	%hh(RUNTIME_PATCH), tmp					;\
4650Sstevel@tonic-gate	sethi	%lm(RUNTIME_PATCH), dest				;\
4660Sstevel@tonic-gate	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
4670Sstevel@tonic-gate	or	dest, %lo(RUNTIME_PATCH), dest				;\
4680Sstevel@tonic-gate	sllx	tmp, 32, tmp						;\
4690Sstevel@tonic-gate	nop				/* for perf reasons */		;\
4700Sstevel@tonic-gate	or	tmp, dest, dest		/* contents of patched value */
4710Sstevel@tonic-gate
4727718SJason.Beloro@Sun.COM#endif /* lint */
4730Sstevel@tonic-gate
4740Sstevel@tonic-gate
4750Sstevel@tonic-gate#if defined (lint)
4760Sstevel@tonic-gate
4770Sstevel@tonic-gate/*
4780Sstevel@tonic-gate * sfmmu related subroutines
4790Sstevel@tonic-gate */
4802241Shuahuint_t
4812241Shuahsfmmu_disable_intrs()
4822241Shuah{ return(0); }
4832241Shuah
4842241Shuah/* ARGSUSED */
4852241Shuahvoid
4862241Shuahsfmmu_enable_intrs(uint_t pstate_save)
4872241Shuah{}
4882241Shuah
4892241Shuah/* ARGSUSED */
4904528Spaulsanint
4914528Spaulsansfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
4924528Spaulsan{ return(0); }
4934528Spaulsan
4940Sstevel@tonic-gate/*
4950Sstevel@tonic-gate * Use cas, if tte has changed underneath us then reread and try again.
4960Sstevel@tonic-gate * In the case of a retry, it will update sttep with the new original.
4970Sstevel@tonic-gate */
4980Sstevel@tonic-gate/* ARGSUSED */
4990Sstevel@tonic-gateint
5000Sstevel@tonic-gatesfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
5010Sstevel@tonic-gate{ return(0); }
5020Sstevel@tonic-gate
5030Sstevel@tonic-gate/*
5040Sstevel@tonic-gate * Use cas, if tte has changed underneath us then return 1, else return 0
5050Sstevel@tonic-gate */
5060Sstevel@tonic-gate/* ARGSUSED */
5070Sstevel@tonic-gateint
5080Sstevel@tonic-gatesfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
5090Sstevel@tonic-gate{ return(0); }
5100Sstevel@tonic-gate
5110Sstevel@tonic-gate/* ARGSUSED */
5120Sstevel@tonic-gatevoid
5130Sstevel@tonic-gatesfmmu_copytte(tte_t *sttep, tte_t *dttep)
5140Sstevel@tonic-gate{}
5150Sstevel@tonic-gate
5160Sstevel@tonic-gate/*ARGSUSED*/
5170Sstevel@tonic-gatestruct tsbe *
5180Sstevel@tonic-gatesfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
5190Sstevel@tonic-gate{ return(0); }
5200Sstevel@tonic-gate
5210Sstevel@tonic-gate/*ARGSUSED*/
5220Sstevel@tonic-gateuint64_t
5230Sstevel@tonic-gatesfmmu_make_tsbtag(caddr_t va)
5240Sstevel@tonic-gate{ return(0); }
5250Sstevel@tonic-gate
5260Sstevel@tonic-gate#else	/* lint */
5270Sstevel@tonic-gate
5280Sstevel@tonic-gate	.seg	".data"
5290Sstevel@tonic-gate	.global	sfmmu_panic1
5300Sstevel@tonic-gatesfmmu_panic1:
5310Sstevel@tonic-gate	.asciz	"sfmmu_asm: interrupts already disabled"
5320Sstevel@tonic-gate
5330Sstevel@tonic-gate	.global	sfmmu_panic3
5340Sstevel@tonic-gatesfmmu_panic3:
5350Sstevel@tonic-gate	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
5360Sstevel@tonic-gate
5370Sstevel@tonic-gate	.global	sfmmu_panic4
5380Sstevel@tonic-gatesfmmu_panic4:
5390Sstevel@tonic-gate	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate	.global	sfmmu_panic5
5420Sstevel@tonic-gatesfmmu_panic5:
5430Sstevel@tonic-gate	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
5440Sstevel@tonic-gate
5452241Shuah	.global	sfmmu_panic6
5462241Shuahsfmmu_panic6:
5472241Shuah	.asciz	"sfmmu_asm: interrupts not disabled"
5482241Shuah
5492241Shuah	.global	sfmmu_panic7
5502241Shuahsfmmu_panic7:
5512241Shuah	.asciz	"sfmmu_asm: kernel as"
5522241Shuah
5532241Shuah	.global	sfmmu_panic8
5542241Shuahsfmmu_panic8:
5552241Shuah	.asciz	"sfmmu_asm: gnum is zero"
5562241Shuah
5572241Shuah	.global	sfmmu_panic9
5582241Shuahsfmmu_panic9:
5592241Shuah	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
5604528Spaulsan
5614528Spaulsan	.global	sfmmu_panic10
5624528Spaulsansfmmu_panic10:
5634528Spaulsan	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
5648819SJason.Beloro@Sun.COM
5658819SJason.Beloro@Sun.COM	.global	sfmmu_panic11
5668819SJason.Beloro@Sun.COMsfmmu_panic11:
5678819SJason.Beloro@Sun.COM	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
5684528Spaulsan
5692241Shuah        ENTRY(sfmmu_disable_intrs)
5702241Shuah        rdpr    %pstate, %o0
5712241Shuah#ifdef DEBUG
5722241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
5732241Shuah#endif /* DEBUG */
5742241Shuah        retl
5752241Shuah          wrpr   %o0, PSTATE_IE, %pstate
5762241Shuah        SET_SIZE(sfmmu_disable_intrs)
5772241Shuah
5782241Shuah	ENTRY(sfmmu_enable_intrs)
5792241Shuah        retl
5802241Shuah          wrpr    %g0, %o0, %pstate
5812241Shuah        SET_SIZE(sfmmu_enable_intrs)
5822241Shuah
5832241Shuah/*
5842241Shuah * This routine is called both by resume() and sfmmu_get_ctx() to
5852241Shuah * allocate a new context for the process on a MMU.
5862241Shuah * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
5872241Shuah * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
5882241Shuah * is the case when sfmmu_alloc_ctx is called from resume().
5892241Shuah *
5902241Shuah * The caller must disable interrupts before entering this routine.
5912241Shuah * To reduce ctx switch overhead, the code contains both 'fast path' and
5922241Shuah * 'slow path' code. The fast path code covers the common case where only
5932241Shuah * a quick check is needed and the real ctx allocation is not required.
5942241Shuah * It can be done without holding the per-process (PP) lock.
5952241Shuah * The 'slow path' code must be protected by the PP Lock and performs ctx
5962241Shuah * allocation.
5972241Shuah * Hardware context register and HAT mmu cnum are updated accordingly.
5982241Shuah *
5992241Shuah * %o0 - sfmmup
6002241Shuah * %o1 - allocflag
6012241Shuah * %o2 - CPU
6024528Spaulsan * %o3 - sfmmu private/shared flag
6034528Spaulsan *
6044528Spaulsan * ret - 0: no ctx is allocated
6054528Spaulsan *       1: a ctx is allocated
6062241Shuah */
6072241Shuah        ENTRY_NP(sfmmu_alloc_ctx)
6082241Shuah
6092241Shuah#ifdef DEBUG
6104528Spaulsan	sethi   %hi(ksfmmup), %g1
6114528Spaulsan	ldx     [%g1 + %lo(ksfmmup)], %g1
6124528Spaulsan	cmp     %g1, %o0
6132241Shuah	bne,pt   %xcc, 0f
6142241Shuah	  nop
6152241Shuah
6162241Shuah	sethi   %hi(panicstr), %g1		! if kernel as, panic
6172241Shuah        ldx     [%g1 + %lo(panicstr)], %g1
6182241Shuah        tst     %g1
6192241Shuah        bnz,pn  %icc, 7f
6202241Shuah          nop
6212241Shuah
6222241Shuah	sethi	%hi(sfmmu_panic7), %o0
6232241Shuah	call	panic
6242241Shuah	  or	%o0, %lo(sfmmu_panic7), %o0
6252241Shuah
6262241Shuah7:
6272241Shuah	retl
6284528Spaulsan	  mov	%g0, %o0			! %o0 = ret = 0
6292241Shuah
6302241Shuah0:
6312241Shuah	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
6324528Spaulsan#endif /* DEBUG */
6334528Spaulsan
6344528Spaulsan	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
6354528Spaulsan
6362241Shuah	! load global mmu_ctxp info
6372241Shuah	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
638*11713SPavel.Tatashin@Sun.COM
639*11713SPavel.Tatashin@Sun.COM#ifdef sun4v
640*11713SPavel.Tatashin@Sun.COM	/* During suspend on sun4v, context domains can be temporary removed */
641*11713SPavel.Tatashin@Sun.COM	brz,a,pn       %o3, 0f
642*11713SPavel.Tatashin@Sun.COM	  nop
643*11713SPavel.Tatashin@Sun.COM#endif
644*11713SPavel.Tatashin@Sun.COM
6452241Shuah        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
6462241Shuah
6472241Shuah	! load global mmu_ctxp gnum
6482241Shuah	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
6492241Shuah
6502241Shuah#ifdef DEBUG
6512241Shuah	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
6522241Shuah	bne,pt	%xcc, 3f
6532241Shuah	  nop
6542241Shuah
6552241Shuah	sethi   %hi(panicstr), %g1	! test if panicstr is already set
6562241Shuah        ldx     [%g1 + %lo(panicstr)], %g1
6572241Shuah        tst     %g1
6584528Spaulsan        bnz,pn  %icc, 1f
6592241Shuah          nop
6602241Shuah
6612241Shuah	sethi	%hi(sfmmu_panic8), %o0
6622241Shuah	call	panic
6632241Shuah	  or	%o0, %lo(sfmmu_panic8), %o0
6644528Spaulsan1:
6654528Spaulsan	retl
6664528Spaulsan	  mov	%g0, %o0			! %o0 = ret = 0
6674528Spaulsan3:
6682241Shuah#endif
6692241Shuah
6702241Shuah	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
6712241Shuah
6722241Shuah	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
6732241Shuah	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6742241Shuah
6752241Shuah	/*
6762241Shuah	 * %g5 = sfmmu gnum returned
6772241Shuah	 * %g6 = sfmmu cnum returned
6782241Shuah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6792241Shuah	 * %g4 = scratch
6802241Shuah	 *
6812241Shuah	 * Fast path code, do a quick check.
6822241Shuah	 */
6832241Shuah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
6842241Shuah
6852241Shuah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
6862241Shuah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
6872241Shuah	  nop
6882241Shuah
6892241Shuah	! cnum == INVALID, check allocflag
6904528Spaulsan	mov	%g0, %g4	! %g4 = ret = 0
6912241Shuah	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
6922241Shuah	  mov	%g6, %o1
6932241Shuah
6942241Shuah	! (invalid HAT cnum) && (allocflag == 1)
6952241Shuah	ba,pt	%icc, 2f
6962241Shuah	  nop
697*11713SPavel.Tatashin@Sun.COM#ifdef sun4v
698*11713SPavel.Tatashin@Sun.COM0:
699*11713SPavel.Tatashin@Sun.COM	set	INVALID_CONTEXT, %o1
700*11713SPavel.Tatashin@Sun.COM	membar	#LoadStore|#StoreStore
701*11713SPavel.Tatashin@Sun.COM	ba,pt	%icc, 8f
702*11713SPavel.Tatashin@Sun.COM	  mov   %g0, %g4                ! %g4 = ret = 0
703*11713SPavel.Tatashin@Sun.COM#endif
7042241Shuah1:
7052241Shuah	! valid HAT cnum, check gnum
7062241Shuah	cmp	%g5, %o4
7074528Spaulsan	mov	1, %g4				!%g4 = ret = 1
7082241Shuah	be,a,pt	%icc, 8f			! gnum unchanged, go to done
7092241Shuah	  mov	%g6, %o1
7102241Shuah
7112241Shuah2:
7122241Shuah	/*
7132241Shuah	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
7142241Shuah	 * followed by the 'slow path' code.
7152241Shuah	 */
7162241Shuah	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7172241Shuah3:
7182241Shuah	brz	%g3, 5f
7192241Shuah	  nop
7202241Shuah4:
7212241Shuah	brnz,a,pt       %g3, 4b				! spin if lock is 1
7222241Shuah	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
7232241Shuah	ba	%xcc, 3b				! retry the lock
7242241Shuah	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
7252241Shuah
7262241Shuah5:
7272241Shuah	membar  #LoadLoad
7282241Shuah	/*
7292241Shuah	 * %g5 = sfmmu gnum returned
7302241Shuah	 * %g6 = sfmmu cnum returned
7312241Shuah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
7322241Shuah	 * %g4 = scratch
7332241Shuah	 */
7342241Shuah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
7352241Shuah
7362241Shuah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
7372241Shuah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
7382241Shuah	  nop
7392241Shuah
7402241Shuah	! cnum == INVALID, check allocflag
7414528Spaulsan	mov	%g0, %g4	! %g4 = ret = 0
7422241Shuah	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
7432241Shuah	  mov	%g6, %o1
7442241Shuah
7452241Shuah	! (invalid HAT cnum) && (allocflag == 1)
7462241Shuah	ba,pt	%icc, 6f
7472241Shuah	  nop
7482241Shuah1:
7492241Shuah	! valid HAT cnum, check gnum
7502241Shuah	cmp	%g5, %o4
7514528Spaulsan	mov	1, %g4				! %g4 = ret  = 1
7522241Shuah	be,a,pt	%icc, 2f			! gnum unchanged, go to done
7532241Shuah	  mov	%g6, %o1
7542241Shuah
7552241Shuah	ba,pt	%icc, 6f
7562241Shuah	  nop
7572241Shuah2:
7582241Shuah	membar  #LoadStore|#StoreStore
7592241Shuah	ba,pt %icc, 8f
7602241Shuah	  clrb  [%o0 + SFMMU_CTX_LOCK]
7612241Shuah6:
7622241Shuah	/*
7632241Shuah	 * We get here if we do not have a valid context, or
7642241Shuah	 * the HAT gnum does not match global gnum. We hold
7652241Shuah	 * sfmmu_ctx_lock spinlock. Allocate that context.
7662241Shuah	 *
7672241Shuah	 * %o3 = mmu_ctxp
7682241Shuah	 */
7692241Shuah	add	%o3, MMU_CTX_CNUM, %g3
7702241Shuah	ld	[%o3 + MMU_CTX_NCTXS], %g4
7712241Shuah
7722241Shuah	/*
7732241Shuah         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
7742241Shuah         * %g3 = mmu cnum address
7752241Shuah	 * %g4 = mmu nctxs
7762241Shuah	 *
7772241Shuah	 * %o0 = sfmmup
7782241Shuah	 * %o1 = mmu current cnum value (used as new cnum)
7792241Shuah	 * %o4 = mmu gnum
7802241Shuah	 *
7812241Shuah	 * %o5 = scratch
7822241Shuah	 */
7832241Shuah	ld	[%g3], %o1
7842241Shuah0:
7852241Shuah	cmp	%o1, %g4
7862241Shuah	bl,a,pt %icc, 1f
7872241Shuah	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
7882241Shuah
7892241Shuah	/*
7904528Spaulsan	 * cnum reachs max, bail, so wrap around can be performed later.
7912241Shuah	 */
7922241Shuah	set	INVALID_CONTEXT, %o1
7934528Spaulsan	mov	%g0, %g4		! %g4 = ret = 0
7947114Ssm142603
7952241Shuah	membar  #LoadStore|#StoreStore
7962241Shuah	ba,pt	%icc, 8f
7972241Shuah	  clrb	[%o0 + SFMMU_CTX_LOCK]
7982241Shuah1:
7992241Shuah	! %g3 = addr of mmu_ctxp->cnum
8002241Shuah	! %o5 = mmu_ctxp->cnum + 1
8012241Shuah	cas	[%g3], %o1, %o5
8022241Shuah	cmp	%o1, %o5
8032241Shuah	bne,a,pn %xcc, 0b	! cas failed
8042241Shuah	  ld	[%g3], %o1
8052241Shuah
8062241Shuah#ifdef DEBUG
8072241Shuah        set	MAX_SFMMU_CTX_VAL, %o5
8082241Shuah	cmp	%o1, %o5
8092241Shuah	ble,pt %icc, 2f
8102241Shuah	  nop
8112241Shuah
8122241Shuah	sethi	%hi(sfmmu_panic9), %o0
8132241Shuah	call	panic
8142241Shuah	  or	%o0, %lo(sfmmu_panic9), %o0
8152241Shuah2:
8162241Shuah#endif
8172241Shuah	! update hat gnum and cnum
8182241Shuah	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
8192241Shuah	or	%o4, %o1, %o4
8202241Shuah	stx	%o4, [%g2 + SFMMU_CTXS]
8212241Shuah
8222241Shuah	membar  #LoadStore|#StoreStore
8232241Shuah	clrb	[%o0 + SFMMU_CTX_LOCK]
8244528Spaulsan
8254528Spaulsan	mov	1, %g4			! %g4 = ret = 1
8262241Shuah8:
8272241Shuah	/*
8282241Shuah	 * program the secondary context register
8292241Shuah	 *
8302241Shuah	 * %o1 = cnum
8314528Spaulsan	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
8322241Shuah	 */
8337114Ssm142603
8347114Ssm142603	/*
8357114Ssm142603	 * When we come here and context is invalid, we want to set both
8367114Ssm142603	 * private and shared ctx regs to INVALID. In order to
8377114Ssm142603	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
8387114Ssm142603	 * so that private ctx reg will be set to invalid.
8397114Ssm142603	 * Note that on sun4v values written to private context register are
8407114Ssm142603	 * automatically written to corresponding shared context register as
8417114Ssm142603	 * well. On sun4u SET_SECCTX() will invalidate shared context register
8427114Ssm142603	 * when it sets a private secondary context register.
8437114Ssm142603	 */
8447114Ssm142603
8457114Ssm142603	cmp	%o1, INVALID_CONTEXT
8467114Ssm142603	be,a,pn	%icc, 9f
8477114Ssm142603	  clr	%g1
8487114Ssm1426039:
8494528Spaulsan
8502241Shuah#ifdef	sun4u
8512241Shuah	ldub	[%o0 + SFMMU_CEXT], %o2
8522241Shuah	sll	%o2, CTXREG_EXT_SHIFT, %o2
8532241Shuah	or	%o1, %o2, %o1
8546127Ssm142603#endif /* sun4u */
8556127Ssm142603
8566127Ssm142603	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
8576127Ssm142603
8586127Ssm142603        retl
8596127Ssm142603          mov   %g4, %o0                        ! %o0 = ret
8602241Shuah
8612241Shuah	SET_SIZE(sfmmu_alloc_ctx)
8622241Shuah
8630Sstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte)
8640Sstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8650Sstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8660Sstevel@tonic-gate2:
8670Sstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8680Sstevel@tonic-gate	cmp	%g2, %g3			/* is modified = current? */
8690Sstevel@tonic-gate	be,a,pt	%xcc,1f				/* yes, don't write */
8700Sstevel@tonic-gate	stx	%g3, [%o0]			/* update new original */
8710Sstevel@tonic-gate	casx	[%o2], %g1, %g2
8720Sstevel@tonic-gate	cmp	%g1, %g2
8730Sstevel@tonic-gate	be,pt	%xcc, 1f			/* cas succeeded - return */
8740Sstevel@tonic-gate	  nop
8750Sstevel@tonic-gate	ldx	[%o2], %g3			/* new current */
8760Sstevel@tonic-gate	stx	%g3, [%o0]			/* save as new original */
8770Sstevel@tonic-gate	ba,pt	%xcc, 2b
8780Sstevel@tonic-gate	  mov	%g3, %g1
8790Sstevel@tonic-gate1:	retl
8800Sstevel@tonic-gate	membar	#StoreLoad
8810Sstevel@tonic-gate	SET_SIZE(sfmmu_modifytte)
8820Sstevel@tonic-gate
8830Sstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte_try)
8840Sstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8850Sstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8860Sstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8870Sstevel@tonic-gate	cmp	%g3, %g2			/* is modified = current? */
8880Sstevel@tonic-gate	be,a,pn %xcc,1f				/* yes, don't write */
8890Sstevel@tonic-gate	mov	0, %o1				/* as if cas failed. */
8900Sstevel@tonic-gate
8910Sstevel@tonic-gate	casx	[%o2], %g1, %g2
8920Sstevel@tonic-gate	membar	#StoreLoad
8930Sstevel@tonic-gate	cmp	%g1, %g2
8940Sstevel@tonic-gate	movne	%xcc, -1, %o1			/* cas failed. */
8950Sstevel@tonic-gate	move	%xcc, 1, %o1			/* cas succeeded. */
8960Sstevel@tonic-gate1:
8970Sstevel@tonic-gate	stx	%g2, [%o0]			/* report "current" value */
8980Sstevel@tonic-gate	retl
8990Sstevel@tonic-gate	mov	%o1, %o0
9000Sstevel@tonic-gate	SET_SIZE(sfmmu_modifytte_try)
9010Sstevel@tonic-gate
9020Sstevel@tonic-gate	ENTRY_NP(sfmmu_copytte)
9030Sstevel@tonic-gate	ldx	[%o0], %g1
9040Sstevel@tonic-gate	retl
9050Sstevel@tonic-gate	stx	%g1, [%o1]
9060Sstevel@tonic-gate	SET_SIZE(sfmmu_copytte)
9070Sstevel@tonic-gate
9080Sstevel@tonic-gate
9090Sstevel@tonic-gate	/*
9100Sstevel@tonic-gate	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
9110Sstevel@tonic-gate	 * %o0 = TSB base address (in), pointer to TSB entry (out)
9120Sstevel@tonic-gate	 * %o1 = vaddr (in)
9130Sstevel@tonic-gate	 * %o2 = vpshift (in)
9140Sstevel@tonic-gate	 * %o3 = tsb size code (in)
9150Sstevel@tonic-gate	 * %o4 = scratch register
9160Sstevel@tonic-gate	 */
9170Sstevel@tonic-gate	ENTRY_NP(sfmmu_get_tsbe)
9180Sstevel@tonic-gate	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
9190Sstevel@tonic-gate	retl
9200Sstevel@tonic-gate	nop
9210Sstevel@tonic-gate	SET_SIZE(sfmmu_get_tsbe)
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate	/*
9240Sstevel@tonic-gate	 * Return a TSB tag for the given va.
9250Sstevel@tonic-gate	 * %o0 = va (in/clobbered)
9260Sstevel@tonic-gate	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
9270Sstevel@tonic-gate	 */
9280Sstevel@tonic-gate	ENTRY_NP(sfmmu_make_tsbtag)
9290Sstevel@tonic-gate	retl
9300Sstevel@tonic-gate	srln	%o0, TTARGET_VA_SHIFT, %o0
9310Sstevel@tonic-gate	SET_SIZE(sfmmu_make_tsbtag)
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate#endif /* lint */
9340Sstevel@tonic-gate
9350Sstevel@tonic-gate/*
9360Sstevel@tonic-gate * Other sfmmu primitives
9370Sstevel@tonic-gate */
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate
9400Sstevel@tonic-gate#if defined (lint)
9410Sstevel@tonic-gatevoid
9420Sstevel@tonic-gatesfmmu_patch_ktsb(void)
9430Sstevel@tonic-gate{
9440Sstevel@tonic-gate}
9450Sstevel@tonic-gate
9460Sstevel@tonic-gatevoid
9470Sstevel@tonic-gatesfmmu_kpm_patch_tlbm(void)
9480Sstevel@tonic-gate{
9490Sstevel@tonic-gate}
9500Sstevel@tonic-gate
9510Sstevel@tonic-gatevoid
9520Sstevel@tonic-gatesfmmu_kpm_patch_tsbm(void)
9530Sstevel@tonic-gate{
9540Sstevel@tonic-gate}
9550Sstevel@tonic-gate
9564528Spaulsanvoid
9574528Spaulsansfmmu_patch_shctx(void)
9584528Spaulsan{
9594528Spaulsan}
9604528Spaulsan
9610Sstevel@tonic-gate/* ARGSUSED */
9620Sstevel@tonic-gatevoid
9630Sstevel@tonic-gatesfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
9640Sstevel@tonic-gate{
9650Sstevel@tonic-gate}
9660Sstevel@tonic-gate
9670Sstevel@tonic-gate/* ARGSUSED */
9680Sstevel@tonic-gatevoid
9690Sstevel@tonic-gatesfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
9700Sstevel@tonic-gate{
9710Sstevel@tonic-gate}
9720Sstevel@tonic-gate
9730Sstevel@tonic-gate/* ARGSUSED */
9740Sstevel@tonic-gatevoid
9750Sstevel@tonic-gatesfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
9760Sstevel@tonic-gate{
9770Sstevel@tonic-gate}
9780Sstevel@tonic-gate
9790Sstevel@tonic-gate/* ARGSUSED */
9800Sstevel@tonic-gatevoid
9810Sstevel@tonic-gatesfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
9820Sstevel@tonic-gate{
9830Sstevel@tonic-gate}
9840Sstevel@tonic-gate
9850Sstevel@tonic-gate#else /* lint */
9860Sstevel@tonic-gate
9870Sstevel@tonic-gate#define	I_SIZE		4
9880Sstevel@tonic-gate
9890Sstevel@tonic-gate	ENTRY_NP(sfmmu_fix_ktlb_traptable)
9900Sstevel@tonic-gate	/*
9910Sstevel@tonic-gate	 * %o0 = start of patch area
9920Sstevel@tonic-gate	 * %o1 = size code of TSB to patch
9930Sstevel@tonic-gate	 * %o3 = scratch
9940Sstevel@tonic-gate	 */
9950Sstevel@tonic-gate	/* fix sll */
9960Sstevel@tonic-gate	ld	[%o0], %o3			/* get sll */
9970Sstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
9980Sstevel@tonic-gate	st	%o3, [%o0]			/* write sll */
9990Sstevel@tonic-gate	flush	%o0
10000Sstevel@tonic-gate	/* fix srl */
10010Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		/* goto next instr. */
10020Sstevel@tonic-gate	ld	[%o0], %o3			/* get srl */
10030Sstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
10040Sstevel@tonic-gate	st	%o3, [%o0]			/* write srl */
10050Sstevel@tonic-gate	retl
10060Sstevel@tonic-gate	flush	%o0
10070Sstevel@tonic-gate	SET_SIZE(sfmmu_fix_ktlb_traptable)
10080Sstevel@tonic-gate
10090Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_ktsbbase)
10100Sstevel@tonic-gate	/*
10110Sstevel@tonic-gate	 * %o0 = start of patch area
10120Sstevel@tonic-gate	 * %o5 = kernel virtual or physical tsb base address
10130Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10140Sstevel@tonic-gate	 */
10150Sstevel@tonic-gate	/* fixup sethi instruction */
10160Sstevel@tonic-gate	ld	[%o0], %o3
10170Sstevel@tonic-gate	srl	%o5, 10, %o2			! offset is bits 32:10
10180Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10190Sstevel@tonic-gate	st	%o3, [%o0]
10200Sstevel@tonic-gate	/* fixup offset of lduw/ldx */
10210Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10220Sstevel@tonic-gate	ld	[%o0], %o3
10230Sstevel@tonic-gate	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
10240Sstevel@tonic-gate	or	%o3, %o2, %o3
10250Sstevel@tonic-gate	st	%o3, [%o0]
10260Sstevel@tonic-gate	retl
10270Sstevel@tonic-gate	flush	%o0
10280Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_ktsbbase)
10290Sstevel@tonic-gate
10300Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_setx)
10310Sstevel@tonic-gate	/*
10320Sstevel@tonic-gate	 * %o0 = start of patch area
10330Sstevel@tonic-gate	 * %o4 = 64 bit value to patch
10340Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10350Sstevel@tonic-gate	 *
10360Sstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
10370Sstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
10380Sstevel@tonic-gate	 *
10390Sstevel@tonic-gate	 * Note the implementation of setx which is being patched is as follows:
10400Sstevel@tonic-gate	 *
10410Sstevel@tonic-gate	 * sethi   %hh(RUNTIME_PATCH), tmp
10420Sstevel@tonic-gate	 * sethi   %lm(RUNTIME_PATCH), dest
10430Sstevel@tonic-gate	 * or      tmp, %hm(RUNTIME_PATCH), tmp
10440Sstevel@tonic-gate	 * or      dest, %lo(RUNTIME_PATCH), dest
10450Sstevel@tonic-gate	 * sllx    tmp, 32, tmp
10460Sstevel@tonic-gate	 * nop
10470Sstevel@tonic-gate	 * or      tmp, dest, dest
10480Sstevel@tonic-gate	 *
10490Sstevel@tonic-gate	 * which differs from the implementation in the
10500Sstevel@tonic-gate	 * "SPARC Architecture Manual"
10510Sstevel@tonic-gate	 */
10520Sstevel@tonic-gate	/* fixup sethi instruction */
10530Sstevel@tonic-gate	ld	[%o0], %o3
10540Sstevel@tonic-gate	srlx	%o4, 42, %o2			! bits [63:42]
10550Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10560Sstevel@tonic-gate	st	%o3, [%o0]
10570Sstevel@tonic-gate	/* fixup sethi instruction */
10580Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10590Sstevel@tonic-gate	ld	[%o0], %o3
10600Sstevel@tonic-gate	sllx	%o4, 32, %o2			! clear upper bits
10610Sstevel@tonic-gate	srlx	%o2, 42, %o2			! bits [31:10]
10620Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10630Sstevel@tonic-gate	st	%o3, [%o0]
10640Sstevel@tonic-gate	/* fixup or instruction */
10650Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10660Sstevel@tonic-gate	ld	[%o0], %o3
10670Sstevel@tonic-gate	srlx	%o4, 32, %o2			! bits [63:32]
10680Sstevel@tonic-gate	and	%o2, 0x3ff, %o2			! bits [41:32]
10690Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10700Sstevel@tonic-gate	st	%o3, [%o0]
10710Sstevel@tonic-gate	/* fixup or instruction */
10720Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10730Sstevel@tonic-gate	ld	[%o0], %o3
10740Sstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
10750Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10760Sstevel@tonic-gate	st	%o3, [%o0]
10770Sstevel@tonic-gate	retl
10780Sstevel@tonic-gate	flush	%o0
10790Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_setx)
10800Sstevel@tonic-gate
10810Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_or)
10820Sstevel@tonic-gate	/*
10830Sstevel@tonic-gate	 * %o0 = start of patch area
10840Sstevel@tonic-gate	 * %o4 = 32 bit value to patch
10850Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10860Sstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
10870Sstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
10880Sstevel@tonic-gate	 */
10890Sstevel@tonic-gate	ld	[%o0], %o3
10900Sstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
10910Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10920Sstevel@tonic-gate	st	%o3, [%o0]
10930Sstevel@tonic-gate	retl
10940Sstevel@tonic-gate	flush	%o0
10950Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_or)
10960Sstevel@tonic-gate
10970Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_shiftx)
10980Sstevel@tonic-gate	/*
10990Sstevel@tonic-gate	 * %o0 = start of patch area
11000Sstevel@tonic-gate	 * %o4 = signed int immediate value to add to sllx/srlx imm field
11010Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
11020Sstevel@tonic-gate	 *
11030Sstevel@tonic-gate	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
11040Sstevel@tonic-gate	 * so we do a simple add.  The caller must be careful to prevent
11050Sstevel@tonic-gate	 * overflow, which could easily occur if the initial value is nonzero!
11060Sstevel@tonic-gate	 */
11070Sstevel@tonic-gate	ld	[%o0], %o3			! %o3 = instruction to patch
11080Sstevel@tonic-gate	and	%o3, 0x3f, %o2			! %o2 = existing imm value
11090Sstevel@tonic-gate	add	%o2, %o4, %o2			! %o2 = new imm value
11100Sstevel@tonic-gate	andn	%o3, 0x3f, %o3			! clear old imm value
11110Sstevel@tonic-gate	and	%o2, 0x3f, %o2			! truncate new imm value
11120Sstevel@tonic-gate	or	%o3, %o2, %o3			! set new imm value
11130Sstevel@tonic-gate	st	%o3, [%o0]			! store updated instruction
11140Sstevel@tonic-gate	retl
11150Sstevel@tonic-gate	flush	%o0
11160Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_shiftx)
11170Sstevel@tonic-gate
11180Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_mmu_asi)
11190Sstevel@tonic-gate	/*
11200Sstevel@tonic-gate	 * Patch imm_asi of all ldda instructions in the MMU
11210Sstevel@tonic-gate	 * trap handlers.  We search MMU_PATCH_INSTR instructions
11220Sstevel@tonic-gate	 * starting from the itlb miss handler (trap 0x64).
11230Sstevel@tonic-gate	 * %o0 = address of tt[0,1]_itlbmiss
11240Sstevel@tonic-gate	 * %o1 = imm_asi to setup, shifted by appropriate offset.
11250Sstevel@tonic-gate	 * %o3 = number of instructions to search
11260Sstevel@tonic-gate	 * %o4 = reserved by caller: called from leaf routine
11270Sstevel@tonic-gate	 */
11280Sstevel@tonic-gate1:	ldsw	[%o0], %o2			! load instruction to %o2
11290Sstevel@tonic-gate	brgez,pt %o2, 2f
11300Sstevel@tonic-gate	  srl	%o2, 30, %o5
11310Sstevel@tonic-gate	btst	1, %o5				! test bit 30; skip if not set
11320Sstevel@tonic-gate	bz,pt	%icc, 2f
11330Sstevel@tonic-gate	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
11340Sstevel@tonic-gate	srlx	%o5, 58, %o5			! isolate op3 part of opcode
11350Sstevel@tonic-gate	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
11360Sstevel@tonic-gate	brnz,pt	%o5, 2f				! skip if not a match
11370Sstevel@tonic-gate	  or	%o2, %o1, %o2			! or in imm_asi
11380Sstevel@tonic-gate	st	%o2, [%o0]			! write patched instruction
11390Sstevel@tonic-gate2:	dec	%o3
11400Sstevel@tonic-gate	brnz,a,pt %o3, 1b			! loop until we're done
11410Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
11420Sstevel@tonic-gate	retl
11430Sstevel@tonic-gate	flush	%o0
11440Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_mmu_asi)
11450Sstevel@tonic-gate
11460Sstevel@tonic-gate	/*
11470Sstevel@tonic-gate	 * Patch immediate ASI used to access the TSB in the
11480Sstevel@tonic-gate	 * trap table.
11490Sstevel@tonic-gate	 * inputs: %o0 = value of ktsb_phys
11500Sstevel@tonic-gate	 */
11510Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_mmu_asi)
11520Sstevel@tonic-gate	mov	%o7, %o4			! save return pc in %o4
11538819SJason.Beloro@Sun.COM	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
11548819SJason.Beloro@Sun.COM
11558819SJason.Beloro@Sun.COM#ifdef sun4v
11568819SJason.Beloro@Sun.COM
11578819SJason.Beloro@Sun.COM	/*
11588819SJason.Beloro@Sun.COM	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
11598819SJason.Beloro@Sun.COM	 */
11608819SJason.Beloro@Sun.COM
11618819SJason.Beloro@Sun.COM	brnz,pt %o0, do_patch
11628819SJason.Beloro@Sun.COM	nop
11638819SJason.Beloro@Sun.COM
11648819SJason.Beloro@Sun.COM	sethi	%hi(sfmmu_panic11), %o0
11658819SJason.Beloro@Sun.COM	call	panic
11668819SJason.Beloro@Sun.COM	  or	%o0, %lo(sfmmu_panic11), %o0
11678819SJason.Beloro@Sun.COMdo_patch:
11688819SJason.Beloro@Sun.COM
11698819SJason.Beloro@Sun.COM#else /* sun4v */
11708819SJason.Beloro@Sun.COM	/*
11718819SJason.Beloro@Sun.COM	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
11728819SJason.Beloro@Sun.COM	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
11738819SJason.Beloro@Sun.COM	 */
11740Sstevel@tonic-gate	movrz	%o0, ASI_NQUAD_LD, %o3
11758819SJason.Beloro@Sun.COM
11768819SJason.Beloro@Sun.COM#endif /* sun4v */
11778819SJason.Beloro@Sun.COM
11780Sstevel@tonic-gate	sll	%o3, 5, %o1			! imm_asi offset
11790Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11800Sstevel@tonic-gate	sethi	%hi(dktsb), %o0			! to search
11810Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
11820Sstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
11830Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11840Sstevel@tonic-gate	sethi	%hi(dktsb4m), %o0		! to search
11850Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
11860Sstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
11870Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11880Sstevel@tonic-gate	sethi	%hi(iktsb), %o0			! to search
11890Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
11900Sstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
11914528Spaulsan	mov	6, %o3				! number of instructions
11924528Spaulsan	sethi	%hi(iktsb4m), %o0		! to search
11934528Spaulsan	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
11944528Spaulsan	  or	%o0, %lo(iktsb4m), %o0
11950Sstevel@tonic-gate	mov	%o4, %o7			! retore return pc -- leaf
11960Sstevel@tonic-gate	retl
11970Sstevel@tonic-gate	nop
11980Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_mmu_asi)
11990Sstevel@tonic-gate
12008819SJason.Beloro@Sun.COM
12010Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_ktsb)
12020Sstevel@tonic-gate	/*
12030Sstevel@tonic-gate	 * We need to fix iktsb, dktsb, et. al.
12040Sstevel@tonic-gate	 */
12050Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
12060Sstevel@tonic-gate	set	ktsb_phys, %o1
12070Sstevel@tonic-gate	ld	[%o1], %o4
12080Sstevel@tonic-gate	set	ktsb_base, %o5
12090Sstevel@tonic-gate	set	ktsb4m_base, %l1
12100Sstevel@tonic-gate	brz,pt	%o4, 1f
12110Sstevel@tonic-gate	  nop
12120Sstevel@tonic-gate	set	ktsb_pbase, %o5
12130Sstevel@tonic-gate	set	ktsb4m_pbase, %l1
12140Sstevel@tonic-gate1:
12150Sstevel@tonic-gate	sethi	%hi(ktsb_szcode), %o1
12160Sstevel@tonic-gate	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
12170Sstevel@tonic-gate
12180Sstevel@tonic-gate	sethi	%hi(iktsb), %o0
12190Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12200Sstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
12210Sstevel@tonic-gate
12220Sstevel@tonic-gate	sethi	%hi(dktsb), %o0
12230Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12240Sstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
12250Sstevel@tonic-gate
12260Sstevel@tonic-gate	sethi	%hi(ktsb4m_szcode), %o1
12270Sstevel@tonic-gate	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
12280Sstevel@tonic-gate
12294528Spaulsan	sethi	%hi(iktsb4m), %o0
12304528Spaulsan	call	sfmmu_fix_ktlb_traptable
12314528Spaulsan	  or	%o0, %lo(iktsb4m), %o0
12324528Spaulsan
12330Sstevel@tonic-gate	sethi	%hi(dktsb4m), %o0
12340Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12350Sstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
12360Sstevel@tonic-gate
12370Sstevel@tonic-gate#ifndef sun4v
12380Sstevel@tonic-gate	mov	ASI_N, %o2
12390Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
12400Sstevel@tonic-gate	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
12410Sstevel@tonic-gate	sethi	%hi(tsb_kernel_patch_asi), %o0
12420Sstevel@tonic-gate	call	sfmmu_fixup_or
12430Sstevel@tonic-gate	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
12446127Ssm142603#endif /* !sun4v */
12450Sstevel@tonic-gate
12460Sstevel@tonic-gate	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
12470Sstevel@tonic-gate
12480Sstevel@tonic-gate	sethi	%hi(dktsbbase), %o0
12490Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12500Sstevel@tonic-gate	  or	%o0, %lo(dktsbbase), %o0
12510Sstevel@tonic-gate
12520Sstevel@tonic-gate	sethi	%hi(iktsbbase), %o0
12530Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12540Sstevel@tonic-gate	  or	%o0, %lo(iktsbbase), %o0
12550Sstevel@tonic-gate
12560Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
12570Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12580Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
12590Sstevel@tonic-gate
12600Sstevel@tonic-gate#ifdef sun4v
12610Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
12620Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12630Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
12640Sstevel@tonic-gate#endif /* sun4v */
12650Sstevel@tonic-gate
12660Sstevel@tonic-gate	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
12670Sstevel@tonic-gate
12680Sstevel@tonic-gate	sethi	%hi(dktsb4mbase), %o0
12690Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12700Sstevel@tonic-gate	  or	%o0, %lo(dktsb4mbase), %o0
12710Sstevel@tonic-gate
12724528Spaulsan	sethi	%hi(iktsb4mbase), %o0
12734528Spaulsan	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12744528Spaulsan	  or	%o0, %lo(iktsb4mbase), %o0
12754528Spaulsan
12760Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
12770Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12780Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
12790Sstevel@tonic-gate
12800Sstevel@tonic-gate#ifdef sun4v
12810Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
12820Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12830Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
12840Sstevel@tonic-gate#endif /* sun4v */
12850Sstevel@tonic-gate
12860Sstevel@tonic-gate	set	ktsb_szcode, %o4
12870Sstevel@tonic-gate	ld	[%o4], %o4
12880Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
12890Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
12900Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
12910Sstevel@tonic-gate
12920Sstevel@tonic-gate#ifdef sun4v
12930Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
12940Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
12950Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
12960Sstevel@tonic-gate#endif /* sun4v */
12970Sstevel@tonic-gate
12980Sstevel@tonic-gate	set	ktsb4m_szcode, %o4
12990Sstevel@tonic-gate	ld	[%o4], %o4
13000Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
13010Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
13020Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
13030Sstevel@tonic-gate
13040Sstevel@tonic-gate#ifdef sun4v
13050Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
13060Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
13070Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
13080Sstevel@tonic-gate#endif /* sun4v */
13090Sstevel@tonic-gate
13100Sstevel@tonic-gate	ret
13110Sstevel@tonic-gate	restore
13120Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_ktsb)
13130Sstevel@tonic-gate
13140Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tlbm)
13150Sstevel@tonic-gate	/*
13160Sstevel@tonic-gate	 * Fixup trap handlers in common segkpm case.  This is reserved
13170Sstevel@tonic-gate	 * for future use should kpm TSB be changed to be other than the
13180Sstevel@tonic-gate	 * kernel TSB.
13190Sstevel@tonic-gate	 */
13200Sstevel@tonic-gate	retl
13210Sstevel@tonic-gate	nop
13220Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tlbm)
13230Sstevel@tonic-gate
13240Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tsbm)
13250Sstevel@tonic-gate	/*
13260Sstevel@tonic-gate	 * nop the branch to sfmmu_kpm_dtsb_miss_small
13270Sstevel@tonic-gate	 * in the case where we are using large pages for
13280Sstevel@tonic-gate	 * seg_kpm (and hence must probe the second TSB for
13290Sstevel@tonic-gate	 * seg_kpm VAs)
13300Sstevel@tonic-gate	 */
13310Sstevel@tonic-gate	set	dktsb4m_kpmcheck_small, %o0
13320Sstevel@tonic-gate	MAKE_NOP_INSTR(%o1)
13330Sstevel@tonic-gate	st	%o1, [%o0]
13340Sstevel@tonic-gate	flush	%o0
13350Sstevel@tonic-gate	retl
13360Sstevel@tonic-gate	nop
13370Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tsbm)
13380Sstevel@tonic-gate
13390Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_utsb)
13401772Sjl139090#ifdef UTSB_PHYS
13410Sstevel@tonic-gate	retl
13420Sstevel@tonic-gate	nop
13431772Sjl139090#else /* UTSB_PHYS */
13440Sstevel@tonic-gate	/*
13450Sstevel@tonic-gate	 * We need to hot patch utsb_vabase and utsb4m_vabase
13460Sstevel@tonic-gate	 */
13470Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
13480Sstevel@tonic-gate
13490Sstevel@tonic-gate	/* patch value of utsb_vabase */
13500Sstevel@tonic-gate	set	utsb_vabase, %o1
13510Sstevel@tonic-gate	ldx	[%o1], %o4
13520Sstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
13530Sstevel@tonic-gate	call	sfmmu_fixup_setx
13540Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
13550Sstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
13560Sstevel@tonic-gate	call	sfmmu_fixup_setx
13570Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
13580Sstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
13590Sstevel@tonic-gate	call	sfmmu_fixup_setx
13600Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
13610Sstevel@tonic-gate
13620Sstevel@tonic-gate	/* patch value of utsb4m_vabase */
13630Sstevel@tonic-gate	set	utsb4m_vabase, %o1
13640Sstevel@tonic-gate	ldx	[%o1], %o4
13650Sstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
13660Sstevel@tonic-gate	call	sfmmu_fixup_setx
13670Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
13680Sstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
13690Sstevel@tonic-gate	call	sfmmu_fixup_setx
13700Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
13710Sstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
13720Sstevel@tonic-gate	call	sfmmu_fixup_setx
13730Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
13740Sstevel@tonic-gate
13750Sstevel@tonic-gate	/*
13760Sstevel@tonic-gate	 * Patch TSB base register masks and shifts if needed.
13770Sstevel@tonic-gate	 * By default the TSB base register contents are set up for 4M slab.
13780Sstevel@tonic-gate	 * If we're using a smaller slab size and reserved VA range we need
13790Sstevel@tonic-gate	 * to patch up those values here.
13800Sstevel@tonic-gate	 */
13810Sstevel@tonic-gate	set	tsb_slab_shift, %o1
13820Sstevel@tonic-gate	set	MMU_PAGESHIFT4M, %o4
13834528Spaulsan	lduw	[%o1], %o3
13840Sstevel@tonic-gate	subcc	%o4, %o3, %o4
13850Sstevel@tonic-gate	bz,pt	%icc, 1f
13860Sstevel@tonic-gate	  /* delay slot safe */
13870Sstevel@tonic-gate
13880Sstevel@tonic-gate	/* patch reserved VA range size if needed. */
13890Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
13900Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13910Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
13920Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13930Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
13940Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
13950Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13960Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
13970Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13980Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
13990Sstevel@tonic-gate1:
14000Sstevel@tonic-gate	/* patch TSBREG_VAMASK used to set up TSB base register */
14010Sstevel@tonic-gate	set	tsb_slab_mask, %o1
14024528Spaulsan	ldx	[%o1], %o4
14030Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
14040Sstevel@tonic-gate	call	sfmmu_fixup_or
14050Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
14060Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
14070Sstevel@tonic-gate	call	sfmmu_fixup_or
14080Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
14090Sstevel@tonic-gate
14100Sstevel@tonic-gate	ret
14110Sstevel@tonic-gate	restore
14121772Sjl139090#endif /* UTSB_PHYS */
14130Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_utsb)
14140Sstevel@tonic-gate
14154528Spaulsan	ENTRY_NP(sfmmu_patch_shctx)
14164528Spaulsan#ifdef sun4u
14174528Spaulsan	retl
14184528Spaulsan	  nop
14194528Spaulsan#else /* sun4u */
14204528Spaulsan	set	sfmmu_shctx_cpu_mondo_patch, %o0
14214528Spaulsan	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
14224528Spaulsan	st	%o1, [%o0]
14234528Spaulsan	flush	%o0
14244528Spaulsan	MAKE_NOP_INSTR(%o1)
14254528Spaulsan	add	%o0, I_SIZE, %o0	! next instr
14264528Spaulsan	st	%o1, [%o0]
14274528Spaulsan	flush	%o0
14284528Spaulsan
14294528Spaulsan	set	sfmmu_shctx_user_rtt_patch, %o0
14304528Spaulsan	st      %o1, [%o0]		! nop 1st instruction
14314528Spaulsan	flush	%o0
14324528Spaulsan	add     %o0, I_SIZE, %o0
14334528Spaulsan	st      %o1, [%o0]		! nop 2nd instruction
14344528Spaulsan	flush	%o0
14354528Spaulsan	add     %o0, I_SIZE, %o0
14364528Spaulsan	st      %o1, [%o0]		! nop 3rd instruction
14374528Spaulsan	flush	%o0
14384528Spaulsan	add     %o0, I_SIZE, %o0
14394528Spaulsan	st      %o1, [%o0]		! nop 4th instruction
14408574SJason.Beloro@Sun.COM	flush	%o0
14418574SJason.Beloro@Sun.COM	add     %o0, I_SIZE, %o0
14428574SJason.Beloro@Sun.COM	st      %o1, [%o0]		! nop 5th instruction
14438574SJason.Beloro@Sun.COM	flush	%o0
14448574SJason.Beloro@Sun.COM	add     %o0, I_SIZE, %o0
14458574SJason.Beloro@Sun.COM	st      %o1, [%o0]		! nop 6th instruction
14467718SJason.Beloro@Sun.COM	retl
14474528Spaulsan	flush	%o0
14484528Spaulsan#endif /* sun4u */
14494528Spaulsan	SET_SIZE(sfmmu_patch_shctx)
14500Sstevel@tonic-gate
14510Sstevel@tonic-gate	/*
14520Sstevel@tonic-gate	 * Routine that loads an entry into a tsb using virtual addresses.
14530Sstevel@tonic-gate	 * Locking is required since all cpus can use the same TSB.
14540Sstevel@tonic-gate	 * Note that it is no longer required to have a valid context
14550Sstevel@tonic-gate	 * when calling this function.
14560Sstevel@tonic-gate	 */
14570Sstevel@tonic-gate	ENTRY_NP(sfmmu_load_tsbe)
14580Sstevel@tonic-gate	/*
14590Sstevel@tonic-gate	 * %o0 = pointer to tsbe to load
14600Sstevel@tonic-gate	 * %o1 = tsb tag
14610Sstevel@tonic-gate	 * %o2 = virtual pointer to TTE
14620Sstevel@tonic-gate	 * %o3 = 1 if physical address in %o0 else 0
14630Sstevel@tonic-gate	 */
14640Sstevel@tonic-gate	rdpr	%pstate, %o5
14650Sstevel@tonic-gate#ifdef DEBUG
14662241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
14670Sstevel@tonic-gate#endif /* DEBUG */
14680Sstevel@tonic-gate
14690Sstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
14700Sstevel@tonic-gate
14710Sstevel@tonic-gate	SETUP_TSB_ASI(%o3, %g3)
14728187SPaul.Sandhu@Sun.COM	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
14730Sstevel@tonic-gate
14740Sstevel@tonic-gate	wrpr	%g0, %o5, %pstate		/* enable interrupts */
14750Sstevel@tonic-gate
14760Sstevel@tonic-gate	retl
14770Sstevel@tonic-gate	membar	#StoreStore|#StoreLoad
14780Sstevel@tonic-gate	SET_SIZE(sfmmu_load_tsbe)
14790Sstevel@tonic-gate
14800Sstevel@tonic-gate	/*
14810Sstevel@tonic-gate	 * Flush TSB of a given entry if the tag matches.
14820Sstevel@tonic-gate	 */
14830Sstevel@tonic-gate	ENTRY(sfmmu_unload_tsbe)
14840Sstevel@tonic-gate	/*
14850Sstevel@tonic-gate	 * %o0 = pointer to tsbe to be flushed
14860Sstevel@tonic-gate	 * %o1 = tag to match
14870Sstevel@tonic-gate	 * %o2 = 1 if physical address in %o0 else 0
14880Sstevel@tonic-gate	 */
14890Sstevel@tonic-gate	SETUP_TSB_ASI(%o2, %g1)
14900Sstevel@tonic-gate	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
14910Sstevel@tonic-gate	retl
14920Sstevel@tonic-gate	membar	#StoreStore|#StoreLoad
14930Sstevel@tonic-gate	SET_SIZE(sfmmu_unload_tsbe)
14940Sstevel@tonic-gate
14950Sstevel@tonic-gate	/*
14960Sstevel@tonic-gate	 * Routine that loads a TTE into the kpm TSB from C code.
14970Sstevel@tonic-gate	 * Locking is required since kpm TSB is shared among all CPUs.
14980Sstevel@tonic-gate	 */
14990Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_load_tsb)
15000Sstevel@tonic-gate	/*
15010Sstevel@tonic-gate	 * %o0 = vaddr
15020Sstevel@tonic-gate	 * %o1 = ttep
15030Sstevel@tonic-gate	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
15040Sstevel@tonic-gate	 */
15050Sstevel@tonic-gate	rdpr	%pstate, %o5			! %o5 = saved pstate
15060Sstevel@tonic-gate#ifdef DEBUG
15072241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
15080Sstevel@tonic-gate#endif /* DEBUG */
15090Sstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
15100Sstevel@tonic-gate
15110Sstevel@tonic-gate#ifndef sun4v
15120Sstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
15130Sstevel@tonic-gate	mov	ASI_N, %o3
15140Sstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
15150Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
15160Sstevel@tonic-gate	mov	%o3, %asi
15176127Ssm142603#endif /* !sun4v */
15180Sstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
15190Sstevel@tonic-gate
15200Sstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
15210Sstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
15220Sstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
15230Sstevel@tonic-gate
15240Sstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
15250Sstevel@tonic-gate	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
15268187SPaul.Sandhu@Sun.COM	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
15270Sstevel@tonic-gate
15280Sstevel@tonic-gate	wrpr	%g0, %o5, %pstate		! enable interrupts
15290Sstevel@tonic-gate	retl
15300Sstevel@tonic-gate	  membar #StoreStore|#StoreLoad
15310Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_load_tsb)
15320Sstevel@tonic-gate
15330Sstevel@tonic-gate	/*
15340Sstevel@tonic-gate	 * Routine that shoots down a TTE in the kpm TSB or in the
15350Sstevel@tonic-gate	 * kernel TSB depending on virtpg. Locking is required since
15360Sstevel@tonic-gate	 * kpm/kernel TSB is shared among all CPUs.
15370Sstevel@tonic-gate	 */
15380Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_unload_tsb)
15390Sstevel@tonic-gate	/*
15400Sstevel@tonic-gate	 * %o0 = vaddr
15410Sstevel@tonic-gate	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
15420Sstevel@tonic-gate	 */
15430Sstevel@tonic-gate#ifndef sun4v
15440Sstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
15450Sstevel@tonic-gate	mov	ASI_N, %o3
15460Sstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
15470Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
15480Sstevel@tonic-gate	mov	%o3, %asi
15496127Ssm142603#endif /* !sun4v */
15500Sstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
15510Sstevel@tonic-gate
15520Sstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
15530Sstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
15540Sstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
15550Sstevel@tonic-gate
15560Sstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
15570Sstevel@tonic-gate	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
15580Sstevel@tonic-gate	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
15590Sstevel@tonic-gate
15600Sstevel@tonic-gate	retl
15610Sstevel@tonic-gate	  membar	#StoreStore|#StoreLoad
15620Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_unload_tsb)
15630Sstevel@tonic-gate
15640Sstevel@tonic-gate#endif /* lint */
15650Sstevel@tonic-gate
15660Sstevel@tonic-gate
15670Sstevel@tonic-gate#if defined (lint)
15680Sstevel@tonic-gate
15690Sstevel@tonic-gate/*ARGSUSED*/
15700Sstevel@tonic-gatepfn_t
15710Sstevel@tonic-gatesfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
15720Sstevel@tonic-gate{ return(0); }
15730Sstevel@tonic-gate
15740Sstevel@tonic-gate#else /* lint */
15750Sstevel@tonic-gate
15760Sstevel@tonic-gate	ENTRY_NP(sfmmu_ttetopfn)
15770Sstevel@tonic-gate	ldx	[%o0], %g1			/* read tte */
15780Sstevel@tonic-gate	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
15790Sstevel@tonic-gate	/*
15800Sstevel@tonic-gate	 * g1 = pfn
15810Sstevel@tonic-gate	 */
15820Sstevel@tonic-gate	retl
15830Sstevel@tonic-gate	mov	%g1, %o0
15840Sstevel@tonic-gate	SET_SIZE(sfmmu_ttetopfn)
15850Sstevel@tonic-gate
15860Sstevel@tonic-gate#endif /* !lint */
15870Sstevel@tonic-gate
15880Sstevel@tonic-gate/*
15890Sstevel@tonic-gate * These macros are used to update global sfmmu hme hash statistics
15900Sstevel@tonic-gate * in perf critical paths. It is only enabled in debug kernels or
15910Sstevel@tonic-gate * if SFMMU_STAT_GATHER is defined
15920Sstevel@tonic-gate */
15930Sstevel@tonic-gate#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
15940Sstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
15950Sstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
15960Sstevel@tonic-gate	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
15970Sstevel@tonic-gate	cmp	tmp1, hatid						;\
15980Sstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
15990Sstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
16000Sstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
16010Sstevel@tonic-gate	ld	[tmp1], tmp2						;\
16020Sstevel@tonic-gate	inc	tmp2							;\
16030Sstevel@tonic-gate	st	tmp2, [tmp1]
16040Sstevel@tonic-gate
16050Sstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
16060Sstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
16070Sstevel@tonic-gate	mov	HATSTAT_KHASH_LINKS, tmp2				;\
16080Sstevel@tonic-gate	cmp	tmp1, hatid						;\
16090Sstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
16100Sstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
16110Sstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
16120Sstevel@tonic-gate	ld	[tmp1], tmp2						;\
16130Sstevel@tonic-gate	inc	tmp2							;\
16140Sstevel@tonic-gate	st	tmp2, [tmp1]
16150Sstevel@tonic-gate
16160Sstevel@tonic-gate
16170Sstevel@tonic-gate#else /* DEBUG || SFMMU_STAT_GATHER */
16180Sstevel@tonic-gate
16190Sstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
16200Sstevel@tonic-gate
16210Sstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
16220Sstevel@tonic-gate
16230Sstevel@tonic-gate#endif  /* DEBUG || SFMMU_STAT_GATHER */
16240Sstevel@tonic-gate
16250Sstevel@tonic-gate/*
16260Sstevel@tonic-gate * This macro is used to update global sfmmu kstas in non
16270Sstevel@tonic-gate * perf critical areas so they are enabled all the time
16280Sstevel@tonic-gate */
16290Sstevel@tonic-gate#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
16300Sstevel@tonic-gate	sethi	%hi(sfmmu_global_stat), tmp1				;\
16310Sstevel@tonic-gate	add	tmp1, statname, tmp1					;\
16320Sstevel@tonic-gate	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
16330Sstevel@tonic-gate	inc	tmp2							;\
16340Sstevel@tonic-gate	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
16350Sstevel@tonic-gate
16360Sstevel@tonic-gate/*
16370Sstevel@tonic-gate * These macros are used to update per cpu stats in non perf
16380Sstevel@tonic-gate * critical areas so they are enabled all the time
16390Sstevel@tonic-gate */
16400Sstevel@tonic-gate#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
16410Sstevel@tonic-gate	ld	[tsbarea + stat], tmp1					;\
16420Sstevel@tonic-gate	inc	tmp1							;\
16430Sstevel@tonic-gate	st	tmp1, [tsbarea + stat]
16440Sstevel@tonic-gate
16450Sstevel@tonic-gate/*
16460Sstevel@tonic-gate * These macros are used to update per cpu stats in non perf
16470Sstevel@tonic-gate * critical areas so they are enabled all the time
16480Sstevel@tonic-gate */
16490Sstevel@tonic-gate#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
16500Sstevel@tonic-gate	lduh	[tsbarea + stat], tmp1					;\
16510Sstevel@tonic-gate	inc	tmp1							;\
16520Sstevel@tonic-gate	stuh	tmp1, [tsbarea + stat]
16530Sstevel@tonic-gate
16540Sstevel@tonic-gate#if defined(KPM_TLBMISS_STATS_GATHER)
16550Sstevel@tonic-gate	/*
16560Sstevel@tonic-gate	 * Count kpm dtlb misses separately to allow a different
16570Sstevel@tonic-gate	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
16580Sstevel@tonic-gate	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
16590Sstevel@tonic-gate	 */
16600Sstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
16610Sstevel@tonic-gate	brgez	tagacc, label	/* KPM VA? */				;\
16620Sstevel@tonic-gate	nop								;\
16630Sstevel@tonic-gate	CPU_INDEX(tmp1, tsbma)						;\
16640Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), tsbma				;\
16650Sstevel@tonic-gate	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
16660Sstevel@tonic-gate	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
16670Sstevel@tonic-gate	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
16680Sstevel@tonic-gate	/* VA range check */						;\
16690Sstevel@tonic-gate	ldx	[tsbma + KPMTSBM_VBASE], val				;\
16700Sstevel@tonic-gate	cmp	tagacc, val						;\
16710Sstevel@tonic-gate	blu,pn	%xcc, label						;\
16720Sstevel@tonic-gate	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
16730Sstevel@tonic-gate	cmp	tagacc, tmp1						;\
16740Sstevel@tonic-gate	bgeu,pn	%xcc, label						;\
16750Sstevel@tonic-gate	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
16760Sstevel@tonic-gate	inc	val							;\
16770Sstevel@tonic-gate	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
16780Sstevel@tonic-gatelabel:
16790Sstevel@tonic-gate#else
16800Sstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
16810Sstevel@tonic-gate#endif	/* KPM_TLBMISS_STATS_GATHER */
16820Sstevel@tonic-gate
16830Sstevel@tonic-gate#if defined (lint)
16840Sstevel@tonic-gate/*
16850Sstevel@tonic-gate * The following routines are jumped to from the mmu trap handlers to do
16860Sstevel@tonic-gate * the setting up to call systrap.  They are separate routines instead of
16870Sstevel@tonic-gate * being part of the handlers because the handlers would exceed 32
16880Sstevel@tonic-gate * instructions and since this is part of the slow path the jump
16890Sstevel@tonic-gate * cost is irrelevant.
16900Sstevel@tonic-gate */
16910Sstevel@tonic-gatevoid
16920Sstevel@tonic-gatesfmmu_pagefault(void)
16930Sstevel@tonic-gate{
16940Sstevel@tonic-gate}
16950Sstevel@tonic-gate
16960Sstevel@tonic-gatevoid
16970Sstevel@tonic-gatesfmmu_mmu_trap(void)
16980Sstevel@tonic-gate{
16990Sstevel@tonic-gate}
17000Sstevel@tonic-gate
17010Sstevel@tonic-gatevoid
17020Sstevel@tonic-gatesfmmu_window_trap(void)
17030Sstevel@tonic-gate{
17040Sstevel@tonic-gate}
17050Sstevel@tonic-gate
17060Sstevel@tonic-gatevoid
17070Sstevel@tonic-gatesfmmu_kpm_exception(void)
17080Sstevel@tonic-gate{
17090Sstevel@tonic-gate}
17100Sstevel@tonic-gate
17110Sstevel@tonic-gate#else /* lint */
17120Sstevel@tonic-gate
17130Sstevel@tonic-gate#ifdef	PTL1_PANIC_DEBUG
17140Sstevel@tonic-gate	.seg	".data"
17150Sstevel@tonic-gate	.global	test_ptl1_panic
17160Sstevel@tonic-gatetest_ptl1_panic:
17170Sstevel@tonic-gate	.word	0
17180Sstevel@tonic-gate	.align	8
17190Sstevel@tonic-gate
17200Sstevel@tonic-gate	.seg	".text"
17210Sstevel@tonic-gate	.align	4
17220Sstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
17230Sstevel@tonic-gate
17240Sstevel@tonic-gate
17250Sstevel@tonic-gate	ENTRY_NP(sfmmu_pagefault)
1726526Sarao	SET_GL_REG(1)
17270Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17280Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
17290Sstevel@tonic-gate	rdpr	%tt, %g6
17300Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
17310Sstevel@tonic-gate	be,a,pn	%icc, 1f
17320Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17330Sstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
17340Sstevel@tonic-gate	be,a,pn	%icc, 1f
17350Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17360Sstevel@tonic-gate	mov	%g5, %g2
17370Sstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
17380Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
17390Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17400Sstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
17410Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17420Sstevel@tonic-gate
17430Sstevel@tonic-gate#ifdef  PTL1_PANIC_DEBUG
17440Sstevel@tonic-gate	/* check if we want to test the tl1 panic */
17450Sstevel@tonic-gate	sethi	%hi(test_ptl1_panic), %g4
17460Sstevel@tonic-gate	ld	[%g4 + %lo(test_ptl1_panic)], %g1
17470Sstevel@tonic-gate	st	%g0, [%g4 + %lo(test_ptl1_panic)]
17480Sstevel@tonic-gate	cmp	%g1, %g0
17490Sstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
17500Sstevel@tonic-gate	  or	%g0, PTL1_BAD_DEBUG, %g1
17510Sstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
17520Sstevel@tonic-gate1:
17530Sstevel@tonic-gate	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
17540Sstevel@tonic-gate	/*
17550Sstevel@tonic-gate	 * g2 = tag access reg
17560Sstevel@tonic-gate	 * g3.l = type
17570Sstevel@tonic-gate	 * g3.h = 0
17580Sstevel@tonic-gate	 */
17590Sstevel@tonic-gate	sethi	%hi(trap), %g1
17600Sstevel@tonic-gate	or	%g1, %lo(trap), %g1
17610Sstevel@tonic-gate2:
17620Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
17630Sstevel@tonic-gate	  mov	-1, %g4
17640Sstevel@tonic-gate	SET_SIZE(sfmmu_pagefault)
17650Sstevel@tonic-gate
17660Sstevel@tonic-gate	ENTRY_NP(sfmmu_mmu_trap)
1767526Sarao	SET_GL_REG(1)
17680Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17690Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
17700Sstevel@tonic-gate	rdpr	%tt, %g6
17710Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
17720Sstevel@tonic-gate	be,a,pn	%icc, 1f
17730Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17740Sstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
17750Sstevel@tonic-gate	be,a,pn	%icc, 1f
17760Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17770Sstevel@tonic-gate	mov	%g5, %g2
17780Sstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
17790Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
17800Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17810Sstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
17820Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17830Sstevel@tonic-gate1:
17840Sstevel@tonic-gate	/*
17850Sstevel@tonic-gate	 * g2 = tag access reg
17860Sstevel@tonic-gate	 * g3 = type
17870Sstevel@tonic-gate	 */
17880Sstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_exception), %g1
17890Sstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
17900Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
17910Sstevel@tonic-gate	  mov	-1, %g4
17920Sstevel@tonic-gate	/*NOTREACHED*/
17930Sstevel@tonic-gate	SET_SIZE(sfmmu_mmu_trap)
17940Sstevel@tonic-gate
17950Sstevel@tonic-gate	ENTRY_NP(sfmmu_suspend_tl)
1796526Sarao	SET_GL_REG(1)
17970Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17980Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
17990Sstevel@tonic-gate	rdpr	%tt, %g6
18000Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
18010Sstevel@tonic-gate	be,a,pn	%icc, 1f
18020Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
18030Sstevel@tonic-gate	mov	%g5, %g2
18040Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
18050Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3
18060Sstevel@tonic-gate	movne	%icc, T_DATA_PROT, %g3
18070Sstevel@tonic-gate1:
18080Sstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
18090Sstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
18100Sstevel@tonic-gate	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
18110Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
18120Sstevel@tonic-gate	  mov	PIL_15, %g4
18130Sstevel@tonic-gate	/*NOTREACHED*/
18140Sstevel@tonic-gate	SET_SIZE(sfmmu_suspend_tl)
18150Sstevel@tonic-gate
18160Sstevel@tonic-gate	/*
18170Sstevel@tonic-gate	 * No %g registers in use at this point.
18180Sstevel@tonic-gate	 */
18190Sstevel@tonic-gate	ENTRY_NP(sfmmu_window_trap)
18200Sstevel@tonic-gate	rdpr	%tpc, %g1
18210Sstevel@tonic-gate#ifdef sun4v
18220Sstevel@tonic-gate#ifdef DEBUG
18230Sstevel@tonic-gate	/* We assume previous %gl was 1 */
18240Sstevel@tonic-gate	rdpr	%tstate, %g4
18250Sstevel@tonic-gate	srlx	%g4, TSTATE_GL_SHIFT, %g4
18260Sstevel@tonic-gate	and	%g4, TSTATE_GL_MASK, %g4
18270Sstevel@tonic-gate	cmp	%g4, 1
18280Sstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
18290Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
18300Sstevel@tonic-gate#endif /* DEBUG */
18310Sstevel@tonic-gate	/* user miss at tl>1. better be the window handler or user_rtt */
18320Sstevel@tonic-gate	/* in user_rtt? */
18330Sstevel@tonic-gate	set	rtt_fill_start, %g4
18340Sstevel@tonic-gate	cmp	%g1, %g4
18350Sstevel@tonic-gate	blu,pn %xcc, 6f
18360Sstevel@tonic-gate	 .empty
18370Sstevel@tonic-gate	set	rtt_fill_end, %g4
18380Sstevel@tonic-gate	cmp	%g1, %g4
18390Sstevel@tonic-gate	bgeu,pn %xcc, 6f
18400Sstevel@tonic-gate	 nop
18410Sstevel@tonic-gate	set	fault_rtt_fn1, %g1
18420Sstevel@tonic-gate	wrpr	%g0, %g1, %tnpc
18430Sstevel@tonic-gate	ba,a	7f
18440Sstevel@tonic-gate6:
18450Sstevel@tonic-gate	! must save this trap level before descending trap stack
18460Sstevel@tonic-gate	! no need to save %tnpc, either overwritten or discarded
18470Sstevel@tonic-gate	! already got it: rdpr	%tpc, %g1
18480Sstevel@tonic-gate	rdpr	%tstate, %g6
18490Sstevel@tonic-gate	rdpr	%tt, %g7
18500Sstevel@tonic-gate	! trap level saved, go get underlying trap type
18510Sstevel@tonic-gate	rdpr	%tl, %g5
18520Sstevel@tonic-gate	sub	%g5, 1, %g3
18530Sstevel@tonic-gate	wrpr	%g3, %tl
18540Sstevel@tonic-gate	rdpr	%tt, %g2
18550Sstevel@tonic-gate	wrpr	%g5, %tl
18560Sstevel@tonic-gate	! restore saved trap level
18570Sstevel@tonic-gate	wrpr	%g1, %tpc
18580Sstevel@tonic-gate	wrpr	%g6, %tstate
18590Sstevel@tonic-gate	wrpr	%g7, %tt
18600Sstevel@tonic-gate#else /* sun4v */
18610Sstevel@tonic-gate	/* user miss at tl>1. better be the window handler */
18620Sstevel@tonic-gate	rdpr	%tl, %g5
18630Sstevel@tonic-gate	sub	%g5, 1, %g3
18640Sstevel@tonic-gate	wrpr	%g3, %tl
18650Sstevel@tonic-gate	rdpr	%tt, %g2
18660Sstevel@tonic-gate	wrpr	%g5, %tl
18670Sstevel@tonic-gate#endif /* sun4v */
18680Sstevel@tonic-gate	and	%g2, WTRAP_TTMASK, %g4
18690Sstevel@tonic-gate	cmp	%g4, WTRAP_TYPE
18700Sstevel@tonic-gate	bne,pn	%xcc, 1f
18710Sstevel@tonic-gate	 nop
18720Sstevel@tonic-gate	/* tpc should be in the trap table */
18730Sstevel@tonic-gate	set	trap_table, %g4
18740Sstevel@tonic-gate	cmp	%g1, %g4
18750Sstevel@tonic-gate	blt,pn %xcc, 1f
18760Sstevel@tonic-gate	 .empty
18770Sstevel@tonic-gate	set	etrap_table, %g4
18780Sstevel@tonic-gate	cmp	%g1, %g4
18790Sstevel@tonic-gate	bge,pn %xcc, 1f
18800Sstevel@tonic-gate	 .empty
18810Sstevel@tonic-gate	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
18820Sstevel@tonic-gate	add	%g1, WTRAP_FAULTOFF, %g1
18830Sstevel@tonic-gate	wrpr	%g0, %g1, %tnpc
18840Sstevel@tonic-gate7:
18850Sstevel@tonic-gate	/*
18860Sstevel@tonic-gate	 * some wbuf handlers will call systrap to resolve the fault
18870Sstevel@tonic-gate	 * we pass the trap type so they figure out the correct parameters.
18880Sstevel@tonic-gate	 * g5 = trap type, g6 = tag access reg
18890Sstevel@tonic-gate	 */
18900Sstevel@tonic-gate
18910Sstevel@tonic-gate	/*
18920Sstevel@tonic-gate	 * only use g5, g6, g7 registers after we have switched to alternate
18930Sstevel@tonic-gate	 * globals.
18940Sstevel@tonic-gate	 */
18950Sstevel@tonic-gate	SET_GL_REG(1)
18960Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
18970Sstevel@tonic-gate	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
18980Sstevel@tonic-gate	rdpr	%tt, %g7
18990Sstevel@tonic-gate	cmp	%g7, FAST_IMMU_MISS_TT
19000Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
19010Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
19020Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
19030Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
19040Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
19050Sstevel@tonic-gate	mov	T_DATA_PROT, %g5
19060Sstevel@tonic-gate	cmp	%g7, FAST_DMMU_MISS_TT
19070Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
19080Sstevel@tonic-gate	cmp	%g7, T_DATA_MMU_MISS
19090Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
19100Sstevel@tonic-gate	! XXXQ AGS re-check out this one
19110Sstevel@tonic-gate	done
19120Sstevel@tonic-gate1:
19135054Swh94709	CPU_PADDR(%g1, %g4)
19145054Swh94709	add	%g1, CPU_TL1_HDLR, %g1
19155054Swh94709	lda	[%g1]ASI_MEM, %g4
19160Sstevel@tonic-gate	brnz,a,pt %g4, sfmmu_mmu_trap
19175054Swh94709	  sta	%g0, [%g1]ASI_MEM
19180Sstevel@tonic-gate	ba,pt	%icc, ptl1_panic
19190Sstevel@tonic-gate	  mov	PTL1_BAD_TRAP, %g1
19200Sstevel@tonic-gate	SET_SIZE(sfmmu_window_trap)
19210Sstevel@tonic-gate
19220Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_exception)
19230Sstevel@tonic-gate	/*
19240Sstevel@tonic-gate	 * We have accessed an unmapped segkpm address or a legal segkpm
19250Sstevel@tonic-gate	 * address which is involved in a VAC alias conflict prevention.
19260Sstevel@tonic-gate	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
19270Sstevel@tonic-gate	 * set. If it is, we will instead note that a fault has occurred
19280Sstevel@tonic-gate	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
19290Sstevel@tonic-gate	 * a "retry"). This will step over the faulting instruction.
19300Sstevel@tonic-gate	 * Note that this means that a legal segkpm address involved in
19310Sstevel@tonic-gate	 * a VAC alias conflict prevention (a rare case to begin with)
19320Sstevel@tonic-gate	 * cannot be used in DTrace.
19330Sstevel@tonic-gate	 */
19340Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
19350Sstevel@tonic-gate	set	cpu_core, %g2
19360Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
19370Sstevel@tonic-gate	add	%g1, %g2, %g1
19380Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
19390Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
19400Sstevel@tonic-gate	bz	0f
19410Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
19420Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
19430Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
19440Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
19450Sstevel@tonic-gate	done
19460Sstevel@tonic-gate0:
19470Sstevel@tonic-gate	TSTAT_CHECK_TL1(1f, %g1, %g2)
19480Sstevel@tonic-gate1:
1949526Sarao	SET_GL_REG(1)
19500Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
19510Sstevel@tonic-gate	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
19520Sstevel@tonic-gate	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
19530Sstevel@tonic-gate	/*
19540Sstevel@tonic-gate	 * g2=tagacc g3.l=type g3.h=0
19550Sstevel@tonic-gate	 */
19560Sstevel@tonic-gate	sethi	%hi(trap), %g1
19570Sstevel@tonic-gate	or	%g1, %lo(trap), %g1
19580Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
19590Sstevel@tonic-gate	mov	-1, %g4
19600Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_exception)
19610Sstevel@tonic-gate
19620Sstevel@tonic-gate#endif /* lint */
19630Sstevel@tonic-gate
19640Sstevel@tonic-gate#if defined (lint)
19650Sstevel@tonic-gate
19660Sstevel@tonic-gatevoid
19670Sstevel@tonic-gatesfmmu_tsb_miss(void)
19680Sstevel@tonic-gate{
19690Sstevel@tonic-gate}
19700Sstevel@tonic-gate
19710Sstevel@tonic-gatevoid
19720Sstevel@tonic-gatesfmmu_kpm_dtsb_miss(void)
19730Sstevel@tonic-gate{
19740Sstevel@tonic-gate}
19750Sstevel@tonic-gate
19760Sstevel@tonic-gatevoid
19770Sstevel@tonic-gatesfmmu_kpm_dtsb_miss_small(void)
19780Sstevel@tonic-gate{
19790Sstevel@tonic-gate}
19800Sstevel@tonic-gate
19810Sstevel@tonic-gate#else /* lint */
19820Sstevel@tonic-gate
19830Sstevel@tonic-gate#if (IMAP_SEG != 0)
19840Sstevel@tonic-gate#error - ism_map->ism_seg offset is not zero
19850Sstevel@tonic-gate#endif
19860Sstevel@tonic-gate
19870Sstevel@tonic-gate/*
19880Sstevel@tonic-gate * Copies ism mapping for this ctx in param "ism" if this is a ISM
19890Sstevel@tonic-gate * tlb miss and branches to label "ismhit". If this is not an ISM
19900Sstevel@tonic-gate * process or an ISM tlb miss it falls thru.
19910Sstevel@tonic-gate *
19920Sstevel@tonic-gate * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
19930Sstevel@tonic-gate * this process.
19940Sstevel@tonic-gate * If so, it will branch to label "ismhit".  If not, it will fall through.
19950Sstevel@tonic-gate *
19960Sstevel@tonic-gate * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
19970Sstevel@tonic-gate * so that any other threads of this process will not try and walk the ism
19980Sstevel@tonic-gate * maps while they are being changed.
19990Sstevel@tonic-gate *
20000Sstevel@tonic-gate * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
20010Sstevel@tonic-gate *       will make sure of that. This means we can terminate our search on
20020Sstevel@tonic-gate *       the first zero mapping we find.
20030Sstevel@tonic-gate *
20040Sstevel@tonic-gate * Parameters:
20053687Sjb145095 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
20060Sstevel@tonic-gate * tsbmiss	= address of tsb miss area (in)
20070Sstevel@tonic-gate * ismseg	= contents of ism_seg for this ism map (out)
20080Sstevel@tonic-gate * ismhat	= physical address of imap_ismhat for this ism map (out)
20090Sstevel@tonic-gate * tmp1		= scratch reg (CLOBBERED)
20100Sstevel@tonic-gate * tmp2		= scratch reg (CLOBBERED)
20110Sstevel@tonic-gate * tmp3		= scratch reg (CLOBBERED)
20120Sstevel@tonic-gate * label:    temporary labels
20130Sstevel@tonic-gate * ismhit:   label where to jump to if an ism dtlb miss
20140Sstevel@tonic-gate * exitlabel:label where to jump if hat is busy due to hat_unshare.
20150Sstevel@tonic-gate */
20160Sstevel@tonic-gate#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
20170Sstevel@tonic-gate	label, ismhit)							\
20180Sstevel@tonic-gate	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
20190Sstevel@tonic-gate	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
20200Sstevel@tonic-gate	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
20210Sstevel@tonic-gatelabel/**/1:								;\
20220Sstevel@tonic-gate	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
20230Sstevel@tonic-gate	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
20240Sstevel@tonic-gatelabel/**/2:								;\
20250Sstevel@tonic-gate	brz,pt  ismseg, label/**/3		/* no mapping */	;\
20260Sstevel@tonic-gate	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
20274528Spaulsan	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
20280Sstevel@tonic-gate	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
20290Sstevel@tonic-gate	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
20300Sstevel@tonic-gate	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
20310Sstevel@tonic-gate	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
20320Sstevel@tonic-gate	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
20330Sstevel@tonic-gate	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
20340Sstevel@tonic-gate	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
20350Sstevel@tonic-gate	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
20360Sstevel@tonic-gate	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
20370Sstevel@tonic-gate									;\
20380Sstevel@tonic-gate	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
20390Sstevel@tonic-gate	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
20400Sstevel@tonic-gate	cmp	ismhat, tmp1						;\
20410Sstevel@tonic-gate	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
20420Sstevel@tonic-gate	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
20430Sstevel@tonic-gate									;\
20440Sstevel@tonic-gate	add	tmp3, IBLK_NEXTPA, tmp1					;\
20450Sstevel@tonic-gate	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
20460Sstevel@tonic-gate	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
20470Sstevel@tonic-gate	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
20480Sstevel@tonic-gatelabel/**/3:
20490Sstevel@tonic-gate
20500Sstevel@tonic-gate/*
20510Sstevel@tonic-gate * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
20520Sstevel@tonic-gate * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
20530Sstevel@tonic-gate * Parameters:
20543687Sjb145095 * tagacc = reg containing virtual address
20550Sstevel@tonic-gate * hatid = reg containing sfmmu pointer
20560Sstevel@tonic-gate * hmeshift = constant/register to shift vaddr to obtain vapg
20570Sstevel@tonic-gate * hmebp = register where bucket pointer will be stored
20580Sstevel@tonic-gate * vapg = register where virtual page will be stored
20590Sstevel@tonic-gate * tmp1, tmp2 = tmp registers
20600Sstevel@tonic-gate */
20610Sstevel@tonic-gate
20620Sstevel@tonic-gate
20630Sstevel@tonic-gate#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
20640Sstevel@tonic-gate	vapg, label, tmp1, tmp2)					\
20650Sstevel@tonic-gate	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
20660Sstevel@tonic-gate	brnz,a,pt tmp1, label/**/1					;\
20670Sstevel@tonic-gate	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
20680Sstevel@tonic-gate	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
20690Sstevel@tonic-gate	ba,pt	%xcc, label/**/2					;\
20700Sstevel@tonic-gate	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
20710Sstevel@tonic-gatelabel/**/1:								;\
20720Sstevel@tonic-gate	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
20730Sstevel@tonic-gatelabel/**/2:								;\
20740Sstevel@tonic-gate	srlx	tagacc, hmeshift, vapg					;\
20750Sstevel@tonic-gate	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
20760Sstevel@tonic-gate	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
20770Sstevel@tonic-gate	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
20780Sstevel@tonic-gate	add	hmebp, tmp1, hmebp
20790Sstevel@tonic-gate
20800Sstevel@tonic-gate/*
20810Sstevel@tonic-gate * hashtag includes bspage + hashno (64 bits).
20820Sstevel@tonic-gate */
20830Sstevel@tonic-gate
20840Sstevel@tonic-gate#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
20850Sstevel@tonic-gate	sllx	vapg, hmeshift, vapg					;\
20864528Spaulsan	mov	hashno, hblktag						;\
20874528Spaulsan	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
20884528Spaulsan	or	vapg, hblktag, hblktag
20890Sstevel@tonic-gate
20900Sstevel@tonic-gate/*
20910Sstevel@tonic-gate * Function to traverse hmeblk hash link list and find corresponding match.
20920Sstevel@tonic-gate * The search is done using physical pointers. It returns the physical address
20938187SPaul.Sandhu@Sun.COM * pointer to the hmeblk that matches with the tag provided.
20940Sstevel@tonic-gate * Parameters:
20950Sstevel@tonic-gate * hmebp	= register that points to hme hash bucket, also used as
20960Sstevel@tonic-gate *		  tmp reg (clobbered)
20970Sstevel@tonic-gate * hmeblktag	= register with hmeblk tag match
20980Sstevel@tonic-gate * hatid	= register with hatid
20990Sstevel@tonic-gate * hmeblkpa	= register where physical ptr will be stored
21000Sstevel@tonic-gate * tmp1		= tmp reg
21010Sstevel@tonic-gate * label: temporary label
21020Sstevel@tonic-gate */
21030Sstevel@tonic-gate
21048187SPaul.Sandhu@Sun.COM#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
21058187SPaul.Sandhu@Sun.COM	tmp1, label)							\
21060Sstevel@tonic-gate	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
21070Sstevel@tonic-gate	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
21080Sstevel@tonic-gate	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
21090Sstevel@tonic-gatelabel/**/1:								;\
21108187SPaul.Sandhu@Sun.COM	cmp	hmeblkpa, HMEBLK_ENDPA					;\
21118187SPaul.Sandhu@Sun.COM	be,pn   %xcc, label/**/2					;\
21120Sstevel@tonic-gate	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
21130Sstevel@tonic-gate	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
21140Sstevel@tonic-gate	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
21150Sstevel@tonic-gate	add	hmebp, CLONGSIZE, hmebp					;\
21160Sstevel@tonic-gate	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
21170Sstevel@tonic-gate	xor	tmp1, hmeblktag, tmp1					;\
21180Sstevel@tonic-gate	xor	hmebp, hatid, hmebp					;\
21190Sstevel@tonic-gate	or	hmebp, tmp1, hmebp					;\
21200Sstevel@tonic-gate	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
21218187SPaul.Sandhu@Sun.COM	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
21220Sstevel@tonic-gate	ba,pt	%xcc, label/**/1					;\
21230Sstevel@tonic-gate	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
21240Sstevel@tonic-gatelabel/**/2:
21250Sstevel@tonic-gate
21264528Spaulsan/*
21274528Spaulsan * Function to traverse hmeblk hash link list and find corresponding match.
21284528Spaulsan * The search is done using physical pointers. It returns the physical address
21298187SPaul.Sandhu@Sun.COM * pointer to the hmeblk that matches with the tag
21304528Spaulsan * provided.
21314528Spaulsan * Parameters:
21324528Spaulsan * hmeblktag	= register with hmeblk tag match (rid field is 0)
21334528Spaulsan * hatid	= register with hatid (pointer to SRD)
21344528Spaulsan * hmeblkpa	= register where physical ptr will be stored
21354528Spaulsan * tmp1		= tmp reg
21364528Spaulsan * tmp2		= tmp reg
21374528Spaulsan * label: temporary label
21384528Spaulsan */
21394528Spaulsan
21408187SPaul.Sandhu@Sun.COM#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
21418187SPaul.Sandhu@Sun.COM	tmp1, tmp2, label)			 			\
21424528Spaulsanlabel/**/1:								;\
21438187SPaul.Sandhu@Sun.COM	cmp	hmeblkpa, HMEBLK_ENDPA					;\
21448187SPaul.Sandhu@Sun.COM	be,pn   %xcc, label/**/4					;\
21454528Spaulsan	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
21464528Spaulsan	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
21474528Spaulsan	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
21484528Spaulsan	add	tmp2, CLONGSIZE, tmp2					;\
21494528Spaulsan	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
21504528Spaulsan	xor	tmp1, hmeblktag, tmp1					;\
21514528Spaulsan	xor	tmp2, hatid, tmp2					;\
21524528Spaulsan	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
21538187SPaul.Sandhu@Sun.COM	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
21544528Spaulsanlabel/**/2:								;\
21554528Spaulsan	ba,pt	%xcc, label/**/1					;\
21564528Spaulsan	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
21574528Spaulsanlabel/**/3:								;\
21584528Spaulsan	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
21594528Spaulsan	bgeu,pt	%xcc, label/**/2					;\
21608187SPaul.Sandhu@Sun.COM	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
21614528Spaulsan	and	tmp1, BT_ULMASK, tmp2					;\
21624528Spaulsan	srlx	tmp1, BT_ULSHIFT, tmp1					;\
21634528Spaulsan	sllx	tmp1, CLONGSHIFT, tmp1					;\
21644528Spaulsan	add	tsbarea, tmp1, tmp1					;\
21654528Spaulsan	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
21664528Spaulsan	srlx	tmp1, tmp2, tmp1					;\
21674528Spaulsan	btst	0x1, tmp1						;\
21684528Spaulsan	bz,pn	%xcc, label/**/2					;\
21698187SPaul.Sandhu@Sun.COM	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
21704528Spaulsanlabel/**/4:
21710Sstevel@tonic-gate
21720Sstevel@tonic-gate#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
21730Sstevel@tonic-gate#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
21740Sstevel@tonic-gate#endif
21750Sstevel@tonic-gate
21760Sstevel@tonic-gate/*
21770Sstevel@tonic-gate * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
21780Sstevel@tonic-gate * he offset for the corresponding hment.
21790Sstevel@tonic-gate * Parameters:
21804528Spaulsan * In:
21814528Spaulsan *	vaddr = register with virtual address
21824528Spaulsan *	hmeblkpa = physical pointer to hme_blk
21834528Spaulsan * Out:
21844528Spaulsan *	hmentoff = register where hment offset will be stored
21854528Spaulsan *	hmemisc = hblk_misc
21864528Spaulsan * Scratch:
21874528Spaulsan *	tmp1
21880Sstevel@tonic-gate */
21894528Spaulsan#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
21900Sstevel@tonic-gate	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
21914528Spaulsan	lda	[hmentoff]ASI_MEM, hmemisc 				;\
21924528Spaulsan	andcc	hmemisc, HBLK_SZMASK, %g0				;\
21930Sstevel@tonic-gate	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
21940Sstevel@tonic-gate	  or	%g0, HMEBLK_HME1, hmentoff				;\
21950Sstevel@tonic-gate	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
21960Sstevel@tonic-gate	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
21970Sstevel@tonic-gate	sllx	tmp1, SFHME_SHIFT, tmp1					;\
21980Sstevel@tonic-gate	add	tmp1, HMEBLK_HME1, hmentoff				;\
21990Sstevel@tonic-gatelabel1:
22000Sstevel@tonic-gate
22010Sstevel@tonic-gate/*
22020Sstevel@tonic-gate * GET_TTE is a macro that returns a TTE given a tag and hatid.
22030Sstevel@tonic-gate *
22043687Sjb145095 * tagacc	= (pseudo-)tag access register (in)
22050Sstevel@tonic-gate * hatid	= sfmmu pointer for TSB miss (in)
22060Sstevel@tonic-gate * tte		= tte for TLB miss if found, otherwise clobbered (out)
22070Sstevel@tonic-gate * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
22080Sstevel@tonic-gate * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
22094528Spaulsan * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
22100Sstevel@tonic-gate * hmeshift	= constant/register to shift VA to obtain the virtual pfn
22110Sstevel@tonic-gate *		  for this page size.
22120Sstevel@tonic-gate * hashno	= constant/register hash number
22138187SPaul.Sandhu@Sun.COM * tmp		= temp value - clobbered
22140Sstevel@tonic-gate * label	= temporary label for branching within macro.
22150Sstevel@tonic-gate * foundlabel	= label to jump to when tte is found.
22160Sstevel@tonic-gate * suspendlabel= label to jump to when tte is suspended.
22174528Spaulsan * exitlabel	= label to jump to when tte is not found.
22180Sstevel@tonic-gate *
22190Sstevel@tonic-gate */
22208187SPaul.Sandhu@Sun.COM#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
22218187SPaul.Sandhu@Sun.COM		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
22220Sstevel@tonic-gate									;\
22230Sstevel@tonic-gate	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
22240Sstevel@tonic-gate	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
22250Sstevel@tonic-gate	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
22268187SPaul.Sandhu@Sun.COM		hmeblkpa, label/**/5, hmemisc, tmp)			;\
22270Sstevel@tonic-gate									;\
22280Sstevel@tonic-gate	/*								;\
22290Sstevel@tonic-gate	 * tagacc = tagacc						;\
22300Sstevel@tonic-gate	 * hatid = hatid						;\
22310Sstevel@tonic-gate	 * tsbarea = tsbarea						;\
22320Sstevel@tonic-gate	 * tte   = hmebp (hme bucket pointer)				;\
22330Sstevel@tonic-gate	 * hmeblkpa  = vapg  (virtual page)				;\
22348187SPaul.Sandhu@Sun.COM	 * hmemisc, tmp = scratch					;\
22350Sstevel@tonic-gate	 */								;\
22364528Spaulsan	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
22374528Spaulsan	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
22380Sstevel@tonic-gate									;\
22390Sstevel@tonic-gate	/*								;\
22400Sstevel@tonic-gate	 * tagacc = tagacc						;\
22410Sstevel@tonic-gate	 * hatid = hatid						;\
22420Sstevel@tonic-gate	 * tte   = hmebp						;\
22430Sstevel@tonic-gate	 * hmeblkpa  = CLOBBERED					;\
22444528Spaulsan	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
22458187SPaul.Sandhu@Sun.COM	 * tmp  = scratch						;\
22460Sstevel@tonic-gate	 */								;\
22470Sstevel@tonic-gate	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
22488187SPaul.Sandhu@Sun.COM	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
22490Sstevel@tonic-gate		tsbarea, tagacc, label/**/1)				;\
22500Sstevel@tonic-gate	/*								;\
22510Sstevel@tonic-gate	 * tagacc = CLOBBERED						;\
22520Sstevel@tonic-gate	 * tte = CLOBBERED						;\
22530Sstevel@tonic-gate	 * hmeblkpa = hmeblkpa						;\
22548187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
22550Sstevel@tonic-gate	 */								;\
22568187SPaul.Sandhu@Sun.COM	cmp	hmeblkpa, HMEBLK_ENDPA					;\
22578187SPaul.Sandhu@Sun.COM	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
22580Sstevel@tonic-gate	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
22590Sstevel@tonic-gate	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
22600Sstevel@tonic-gate	  nop								;\
22610Sstevel@tonic-gatelabel/**/4:								;\
22620Sstevel@tonic-gate	/*								;\
22630Sstevel@tonic-gate	 * We have found the hmeblk containing the hment.		;\
22640Sstevel@tonic-gate	 * Now we calculate the corresponding tte.			;\
22650Sstevel@tonic-gate	 *								;\
22660Sstevel@tonic-gate	 * tagacc = tagacc						;\
22674528Spaulsan	 * hatid = hatid						;\
22684528Spaulsan	 * tte   = clobbered						;\
22690Sstevel@tonic-gate	 * hmeblkpa  = hmeblkpa						;\
22704528Spaulsan	 * hmemisc  = hblktag						;\
22718187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
22724528Spaulsan	 */								;\
22734528Spaulsan	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
22744528Spaulsan		label/**/2)						;\
22754528Spaulsan									;\
22764528Spaulsan	/*								;\
22774528Spaulsan	 * tagacc = tagacc						;\
22784528Spaulsan	 * hatid = hmentoff						;\
22794528Spaulsan	 * tte   = clobbered						;\
22804528Spaulsan	 * hmeblkpa  = hmeblkpa						;\
22814528Spaulsan	 * hmemisc  = hblk_misc						;\
22828187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
22830Sstevel@tonic-gate	 */								;\
22840Sstevel@tonic-gate									;\
22854528Spaulsan	add	hatid, SFHME_TTE, hatid					;\
22864528Spaulsan	add	hmeblkpa, hatid, hmeblkpa				;\
22870Sstevel@tonic-gate	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
22884528Spaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
22894528Spaulsan	set	TTE_SUSPEND, hatid					;\
22904528Spaulsan	TTE_SUSPEND_INT_SHIFT(hatid)					;\
22914528Spaulsan	btst	tte, hatid						;\
22924528Spaulsan	bz,pt	%xcc, foundlabel					;\
22930Sstevel@tonic-gate	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
22944528Spaulsan									;\
22954528Spaulsan	/*								;\
22964528Spaulsan	 * Mapping is suspended, so goto suspend label.			;\
22974528Spaulsan	 */								;\
22984528Spaulsan	ba,pt	%xcc, suspendlabel					;\
22994528Spaulsan	  nop
23004528Spaulsan
23014528Spaulsan/*
23024528Spaulsan * GET_SHME_TTE is similar to GET_TTE() except it searches
23034528Spaulsan * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
23044528Spaulsan * If valid tte is found, hmemisc = shctx flag, i.e., shme is
23054528Spaulsan * either 0 (not part of scd) or 1 (part of scd).
23064528Spaulsan */
23078187SPaul.Sandhu@Sun.COM#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
23088187SPaul.Sandhu@Sun.COM		hmeshift, hashno, tmp, label, foundlabel,		\
23094528Spaulsan		suspendlabel, exitlabel)				\
23104528Spaulsan									;\
23114528Spaulsan	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
23124528Spaulsan	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
23134528Spaulsan	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
23148187SPaul.Sandhu@Sun.COM		hmeblkpa, label/**/5, hmemisc, tmp)			;\
23154528Spaulsan									;\
23164528Spaulsan	/*								;\
23174528Spaulsan	 * tagacc = tagacc						;\
23184528Spaulsan	 * hatid = hatid						;\
23194528Spaulsan	 * tsbarea = tsbarea						;\
23204528Spaulsan	 * tte   = hmebp (hme bucket pointer)				;\
23214528Spaulsan	 * hmeblkpa  = vapg  (virtual page)				;\
23228187SPaul.Sandhu@Sun.COM	 * hmemisc, tmp = scratch					;\
23234528Spaulsan	 */								;\
23244528Spaulsan	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
23254528Spaulsan									;\
23264528Spaulsan	/*								;\
23274528Spaulsan	 * tagacc = tagacc						;\
23284528Spaulsan	 * hatid = hatid						;\
23294528Spaulsan	 * tsbarea = tsbarea						;\
23304528Spaulsan	 * tte   = hmebp						;\
23314528Spaulsan	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
23324528Spaulsan	 * hmeblkpa  = CLOBBERED					;\
23338187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
23344528Spaulsan	 */								;\
23354528Spaulsan	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
23364528Spaulsan									;\
23374528Spaulsan	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
23384528Spaulsan	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
23394528Spaulsan	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
23404528Spaulsan									;\
23414528Spaulsanlabel/**/8:								;\
23428187SPaul.Sandhu@Sun.COM	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
23434528Spaulsan		tsbarea, tagacc, tte, label/**/1)			;\
23444528Spaulsan	/*								;\
23454528Spaulsan	 * tagacc = CLOBBERED						;\
23464528Spaulsan	 * tte = CLOBBERED						;\
23474528Spaulsan	 * hmeblkpa = hmeblkpa						;\
23488187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
23494528Spaulsan	 */								;\
23508187SPaul.Sandhu@Sun.COM	cmp	hmeblkpa, HMEBLK_ENDPA					;\
23518187SPaul.Sandhu@Sun.COM	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
23524528Spaulsan	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
23534528Spaulsan	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
23544528Spaulsan	  nop								;\
23554528Spaulsanlabel/**/4:								;\
23564528Spaulsan	/*								;\
23574528Spaulsan	 * We have found the hmeblk containing the hment.		;\
23584528Spaulsan	 * Now we calculate the corresponding tte.			;\
23594528Spaulsan	 *								;\
23604528Spaulsan	 * tagacc = tagacc						;\
23614528Spaulsan	 * hatid = hatid						;\
23624528Spaulsan	 * tte   = clobbered						;\
23634528Spaulsan	 * hmeblkpa  = hmeblkpa						;\
23644528Spaulsan	 * hmemisc  = hblktag						;\
23654528Spaulsan	 * tsbarea = tsbmiss area					;\
23668187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
23674528Spaulsan	 */								;\
23684528Spaulsan	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
23694528Spaulsan		label/**/2)						;\
23704528Spaulsan									;\
23714528Spaulsan	/*								;\
23724528Spaulsan	 * tagacc = tagacc						;\
23734528Spaulsan	 * hatid = hmentoff						;\
23744528Spaulsan	 * tte = clobbered						;\
23754528Spaulsan	 * hmeblkpa  = hmeblkpa						;\
23764528Spaulsan	 * hmemisc  = hblk_misc						;\
23774528Spaulsan	 * tsbarea = tsbmiss area					;\
23788187SPaul.Sandhu@Sun.COM	 * tmp = scratch						;\
23794528Spaulsan	 */								;\
23804528Spaulsan									;\
23814528Spaulsan	add	hatid, SFHME_TTE, hatid					;\
23824528Spaulsan	add	hmeblkpa, hatid, hmeblkpa				;\
23834528Spaulsan	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
23844528Spaulsan	brlz,pt tte, label/**/6						;\
23858187SPaul.Sandhu@Sun.COM	  nop								;\
23864528Spaulsan	btst	HBLK_SZMASK, hmemisc					;\
23874528Spaulsan	bnz,a,pt %icc, label/**/7					;\
23884528Spaulsan	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
23894528Spaulsan									;\
23904528Spaulsan	/*								;\
23914528Spaulsan 	 * We found an invalid 8K tte in shme.				;\
23924528Spaulsan	 * it may not belong to shme's region since			;\
23934528Spaulsan	 * region size/alignment granularity is 8K but different	;\
23944528Spaulsan	 * regions don't share hmeblks. Continue the search.		;\
23954528Spaulsan	 */								;\
23964528Spaulsan	sub	hmeblkpa, hatid, hmeblkpa				;\
23974528Spaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
23984528Spaulsan	srlx	tagacc, hmeshift, tte					;\
23994528Spaulsan	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
24004528Spaulsan	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
24014528Spaulsan	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
24024528Spaulsan	ba,a,pt	%xcc, label/**/8					;\
24034528Spaulsanlabel/**/6:								;\
24044528Spaulsan	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
24054528Spaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
24064528Spaulsanlabel/**/7:								;\
24074528Spaulsan	set	TTE_SUSPEND, hatid					;\
24084528Spaulsan	TTE_SUSPEND_INT_SHIFT(hatid)					;\
24094528Spaulsan	btst	tte, hatid						;\
24100Sstevel@tonic-gate	bz,pt	%xcc, foundlabel					;\
24114528Spaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
24120Sstevel@tonic-gate									;\
24130Sstevel@tonic-gate	/*								;\
24140Sstevel@tonic-gate	 * Mapping is suspended, so goto suspend label.			;\
24150Sstevel@tonic-gate	 */								;\
24160Sstevel@tonic-gate	ba,pt	%xcc, suspendlabel					;\
24170Sstevel@tonic-gate	  nop
24180Sstevel@tonic-gate
24190Sstevel@tonic-gate	/*
24200Sstevel@tonic-gate	 * KERNEL PROTECTION HANDLER
24210Sstevel@tonic-gate	 *
24220Sstevel@tonic-gate	 * g1 = tsb8k pointer register (clobbered)
24230Sstevel@tonic-gate	 * g2 = tag access register (ro)
24240Sstevel@tonic-gate	 * g3 - g7 = scratch registers
24250Sstevel@tonic-gate	 *
24260Sstevel@tonic-gate	 * Note: This function is patched at runtime for performance reasons.
24270Sstevel@tonic-gate	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
24280Sstevel@tonic-gate	 */
24290Sstevel@tonic-gate	ENTRY_NP(sfmmu_kprot_trap)
24300Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
24310Sstevel@tonic-gatesfmmu_kprot_patch_ktsb_base:
24320Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g1, %g6)
24330Sstevel@tonic-gate	/* %g1 = contents of ktsb_base or ktsb_pbase */
24340Sstevel@tonic-gatesfmmu_kprot_patch_ktsb_szcode:
24350Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
24360Sstevel@tonic-gate
24370Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
24380Sstevel@tonic-gate	! %g1 = First TSB entry pointer, as TSB miss handler expects
24390Sstevel@tonic-gate
24400Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
24410Sstevel@tonic-gatesfmmu_kprot_patch_ktsb4m_base:
24420Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g3, %g6)
24430Sstevel@tonic-gate	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
24440Sstevel@tonic-gatesfmmu_kprot_patch_ktsb4m_szcode:
24450Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
24460Sstevel@tonic-gate
24470Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
24480Sstevel@tonic-gate	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
24490Sstevel@tonic-gate
24504528Spaulsan        CPU_TSBMISS_AREA(%g6, %g7)
24514528Spaulsan        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
24520Sstevel@tonic-gate	ba,pt	%xcc, sfmmu_tsb_miss_tt
24530Sstevel@tonic-gate	  nop
24540Sstevel@tonic-gate
24550Sstevel@tonic-gate	/*
24560Sstevel@tonic-gate	 * USER PROTECTION HANDLER
24570Sstevel@tonic-gate	 *
24580Sstevel@tonic-gate	 * g1 = tsb8k pointer register (ro)
24590Sstevel@tonic-gate	 * g2 = tag access register (ro)
24600Sstevel@tonic-gate	 * g3 = faulting context (clobbered, currently not used)
24610Sstevel@tonic-gate	 * g4 - g7 = scratch registers
24620Sstevel@tonic-gate	 */
24630Sstevel@tonic-gate	ALTENTRY(sfmmu_uprot_trap)
24646127Ssm142603#ifdef sun4v
24650Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
24660Sstevel@tonic-gate	/* %g1 = first TSB entry ptr now, %g2 preserved */
24670Sstevel@tonic-gate
24680Sstevel@tonic-gate	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
24694528Spaulsan	brlz,pt %g3, 9f				/* check for 2nd TSB */
24704528Spaulsan	  nop
24710Sstevel@tonic-gate
24720Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
24730Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g2 preserved */
24740Sstevel@tonic-gate
24750Sstevel@tonic-gate#else /* sun4v */
24761772Sjl139090#ifdef UTSB_PHYS
24771772Sjl139090	/* g1 = first TSB entry ptr */
24786127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
24794528Spaulsan	brlz,pt %g3, 9f			/* check for 2nd TSB */
24804528Spaulsan	  nop
24811772Sjl139090
24821772Sjl139090	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
24831772Sjl139090	/* %g3 = second TSB entry ptr now, %g2 preserved */
24841772Sjl139090#else /* UTSB_PHYS */
24850Sstevel@tonic-gate	brgez,pt %g1, 9f		/* check for 2nd TSB */
24864528Spaulsan	  mov	-1, %g3			/* set second tsbe ptr to -1 */
24870Sstevel@tonic-gate
24880Sstevel@tonic-gate	mov	%g2, %g7
24890Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
24900Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g7 clobbered */
24910Sstevel@tonic-gate	mov	%g1, %g7
24920Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
24931772Sjl139090#endif /* UTSB_PHYS */
24941772Sjl139090#endif /* sun4v */
24950Sstevel@tonic-gate9:
24960Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g6, %g7)
24970Sstevel@tonic-gate	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
24980Sstevel@tonic-gate	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
24990Sstevel@tonic-gate	  nop
25000Sstevel@tonic-gate
25010Sstevel@tonic-gate	/*
25020Sstevel@tonic-gate	 * Kernel 8K page iTLB miss.  We also get here if we took a
25030Sstevel@tonic-gate	 * fast instruction access mmu miss trap while running in
25040Sstevel@tonic-gate	 * invalid context.
25050Sstevel@tonic-gate	 *
25060Sstevel@tonic-gate	 * %g1 = 8K TSB pointer register (not used, clobbered)
25070Sstevel@tonic-gate	 * %g2 = tag access register (used)
25080Sstevel@tonic-gate	 * %g3 = faulting context id (used)
25094528Spaulsan	 * %g7 = TSB tag to match (used)
25100Sstevel@tonic-gate	 */
25110Sstevel@tonic-gate	.align	64
25120Sstevel@tonic-gate	ALTENTRY(sfmmu_kitlb_miss)
25130Sstevel@tonic-gate	brnz,pn %g3, tsb_tl0_noctxt
25140Sstevel@tonic-gate	  nop
25150Sstevel@tonic-gate
25160Sstevel@tonic-gate	/* kernel miss */
25170Sstevel@tonic-gate	/* get kernel tsb pointer */
25180Sstevel@tonic-gate	/* we patch the next set of instructions at run time */
25190Sstevel@tonic-gate	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
25200Sstevel@tonic-gateiktsbbase:
25210Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g4, %g5)
25220Sstevel@tonic-gate	/* %g4 = contents of ktsb_base or ktsb_pbase */
25230Sstevel@tonic-gate
25240Sstevel@tonic-gateiktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
25250Sstevel@tonic-gate	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
25260Sstevel@tonic-gate	or	%g4, %g1, %g1			! form tsb ptr
25270Sstevel@tonic-gate	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
25280Sstevel@tonic-gate	cmp	%g4, %g7
25294528Spaulsan	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
25304528Spaulsan	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
25314528Spaulsan
25324528Spaulsan	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
25334528Spaulsan	bz,pn	%icc, exec_fault
25344528Spaulsan	  nop
25354528Spaulsan	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
25364528Spaulsan	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
25374528Spaulsan	retry
25384528Spaulsan
25394528Spaulsaniktsb4mbase:
25404528Spaulsan        RUNTIME_PATCH_SETX(%g4, %g6)
25414528Spaulsan        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
25424528Spaulsaniktsb4m:
25434528Spaulsan	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
25444528Spaulsan        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
25454528Spaulsan	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
25464528Spaulsan	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
25474528Spaulsan	cmp	%g4, %g7
25480Sstevel@tonic-gate	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
25494528Spaulsan	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
25500Sstevel@tonic-gate	bz,pn	%icc, exec_fault
25510Sstevel@tonic-gate	  nop
25520Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
25530Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
25540Sstevel@tonic-gate	retry
25550Sstevel@tonic-gate
25560Sstevel@tonic-gate	/*
25570Sstevel@tonic-gate	 * Kernel dTLB miss.  We also get here if we took a fast data
25580Sstevel@tonic-gate	 * access mmu miss trap while running in invalid context.
25590Sstevel@tonic-gate	 *
25600Sstevel@tonic-gate	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
25610Sstevel@tonic-gate	 *	We select the TSB miss handler to branch to depending on
25620Sstevel@tonic-gate	 *	the virtual address of the access.  In the future it may
25630Sstevel@tonic-gate	 *	be desirable to separate kpm TTEs into their own TSB,
25640Sstevel@tonic-gate	 *	in which case all that needs to be done is to set
25650Sstevel@tonic-gate	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
25660Sstevel@tonic-gate	 *	early in the miss if we detect a kpm VA to a new handler.
25670Sstevel@tonic-gate	 *
25680Sstevel@tonic-gate	 * %g1 = 8K TSB pointer register (not used, clobbered)
25690Sstevel@tonic-gate	 * %g2 = tag access register (used)
25700Sstevel@tonic-gate	 * %g3 = faulting context id (used)
25710Sstevel@tonic-gate	 */
25720Sstevel@tonic-gate	.align	64
25730Sstevel@tonic-gate	ALTENTRY(sfmmu_kdtlb_miss)
25740Sstevel@tonic-gate	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
25750Sstevel@tonic-gate	  nop
25760Sstevel@tonic-gate
25770Sstevel@tonic-gate	/* Gather some stats for kpm misses in the TLB. */
25780Sstevel@tonic-gate	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
25790Sstevel@tonic-gate	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
25800Sstevel@tonic-gate
25810Sstevel@tonic-gate	/*
25820Sstevel@tonic-gate	 * Get first TSB offset and look for 8K/64K/512K mapping
25830Sstevel@tonic-gate	 * using the 8K virtual page as the index.
25840Sstevel@tonic-gate	 *
25850Sstevel@tonic-gate	 * We patch the next set of instructions at run time;
25860Sstevel@tonic-gate	 * any changes here require sfmmu_patch_ktsb changes too.
25870Sstevel@tonic-gate	 */
25880Sstevel@tonic-gatedktsbbase:
25890Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g7, %g6)
25900Sstevel@tonic-gate	/* %g7 = contents of ktsb_base or ktsb_pbase */
25910Sstevel@tonic-gate
25920Sstevel@tonic-gatedktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
25930Sstevel@tonic-gate	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
25940Sstevel@tonic-gate
25950Sstevel@tonic-gate	/*
25960Sstevel@tonic-gate	 * At this point %g1 is our index into the TSB.
25970Sstevel@tonic-gate	 * We just masked off enough bits of the VA depending
25980Sstevel@tonic-gate	 * on our TSB size code.
25990Sstevel@tonic-gate	 */
26000Sstevel@tonic-gate	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
26010Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
26020Sstevel@tonic-gate	cmp	%g6, %g4			! compare tag
26030Sstevel@tonic-gate	bne,pn	%xcc, dktsb4m_kpmcheck_small
26040Sstevel@tonic-gate	  add	%g7, %g1, %g1			/* form tsb ptr */
26050Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)
26060Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
26070Sstevel@tonic-gate	/* trapstat expects tte in %g5 */
26080Sstevel@tonic-gate	retry
26090Sstevel@tonic-gate
26100Sstevel@tonic-gate	/*
26110Sstevel@tonic-gate	 * If kpm is using large pages, the following instruction needs
26120Sstevel@tonic-gate	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
26130Sstevel@tonic-gate	 * so that we will probe the 4M TSB regardless of the VA.  In
26140Sstevel@tonic-gate	 * the case kpm is using small pages, we know no large kernel
26150Sstevel@tonic-gate	 * mappings are located above 0x80000000.00000000 so we skip the
26160Sstevel@tonic-gate	 * probe as an optimization.
26170Sstevel@tonic-gate	 */
26180Sstevel@tonic-gatedktsb4m_kpmcheck_small:
26190Sstevel@tonic-gate	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
26200Sstevel@tonic-gate	  /* delay slot safe, below */
26210Sstevel@tonic-gate
26220Sstevel@tonic-gate	/*
26230Sstevel@tonic-gate	 * Get second TSB offset and look for 4M mapping
26240Sstevel@tonic-gate	 * using 4M virtual page as the TSB index.
26250Sstevel@tonic-gate	 *
26260Sstevel@tonic-gate	 * Here:
26270Sstevel@tonic-gate	 * %g1 = 8K TSB pointer.  Don't squash it.
26280Sstevel@tonic-gate	 * %g2 = tag access register (we still need it)
26290Sstevel@tonic-gate	 */
26300Sstevel@tonic-gate	srlx	%g2, MMU_PAGESHIFT4M, %g3
26310Sstevel@tonic-gate
26320Sstevel@tonic-gate	/*
26330Sstevel@tonic-gate	 * We patch the next set of instructions at run time;
26340Sstevel@tonic-gate	 * any changes here require sfmmu_patch_ktsb changes too.
26350Sstevel@tonic-gate	 */
26360Sstevel@tonic-gatedktsb4mbase:
26370Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g7, %g6)
26380Sstevel@tonic-gate	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
26390Sstevel@tonic-gatedktsb4m:
26400Sstevel@tonic-gate	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
26410Sstevel@tonic-gate	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
26420Sstevel@tonic-gate
26430Sstevel@tonic-gate	/*
26440Sstevel@tonic-gate	 * At this point %g3 is our index into the TSB.
26450Sstevel@tonic-gate	 * We just masked off enough bits of the VA depending
26460Sstevel@tonic-gate	 * on our TSB size code.
26470Sstevel@tonic-gate	 */
26480Sstevel@tonic-gate	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
26490Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
26500Sstevel@tonic-gate	cmp	%g6, %g4			! compare tag
26510Sstevel@tonic-gate
26520Sstevel@tonic-gatedktsb4m_tsbmiss:
26530Sstevel@tonic-gate	bne,pn	%xcc, dktsb4m_kpmcheck
26540Sstevel@tonic-gate	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
26550Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)
26560Sstevel@tonic-gate	/* we don't check TTE size here since we assume 4M TSB is separate */
26570Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
26580Sstevel@tonic-gate	/* trapstat expects tte in %g5 */
26590Sstevel@tonic-gate	retry
26600Sstevel@tonic-gate
26610Sstevel@tonic-gate	/*
26620Sstevel@tonic-gate	 * So, we failed to find a valid TTE to match the faulting
26630Sstevel@tonic-gate	 * address in either TSB.  There are a few cases that could land
26640Sstevel@tonic-gate	 * us here:
26650Sstevel@tonic-gate	 *
26660Sstevel@tonic-gate	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
26670Sstevel@tonic-gate	 *    to sfmmu_tsb_miss_tt to handle the miss.
26680Sstevel@tonic-gate	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
26690Sstevel@tonic-gate	 *    4M TSB.  Let segkpm handle it.
26700Sstevel@tonic-gate	 *
26710Sstevel@tonic-gate	 * Note that we shouldn't land here in the case of a kpm VA when
26720Sstevel@tonic-gate	 * kpm_smallpages is active -- we handled that case earlier at
26730Sstevel@tonic-gate	 * dktsb4m_kpmcheck_small.
26740Sstevel@tonic-gate	 *
26750Sstevel@tonic-gate	 * At this point:
26760Sstevel@tonic-gate	 *  g1 = 8K-indexed primary TSB pointer
26770Sstevel@tonic-gate	 *  g2 = tag access register
26780Sstevel@tonic-gate	 *  g3 = 4M-indexed secondary TSB pointer
26790Sstevel@tonic-gate	 */
26800Sstevel@tonic-gatedktsb4m_kpmcheck:
26810Sstevel@tonic-gate	cmp	%g2, %g0
26820Sstevel@tonic-gate	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
26830Sstevel@tonic-gate	  nop
26840Sstevel@tonic-gate	ba,a,pt	%icc, sfmmu_tsb_miss_tt
26850Sstevel@tonic-gate	  nop
26860Sstevel@tonic-gate
26870Sstevel@tonic-gate#ifdef sun4v
26880Sstevel@tonic-gate	/*
26890Sstevel@tonic-gate	 * User instruction miss w/ single TSB.
26900Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
26910Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
26920Sstevel@tonic-gate	 * pointer.
26930Sstevel@tonic-gate	 *
26940Sstevel@tonic-gate	 * g1 = tsb8k pointer register
26950Sstevel@tonic-gate	 * g2 = tag access register
26960Sstevel@tonic-gate	 * g3 - g6 = scratch registers
26970Sstevel@tonic-gate	 * g7 = TSB tag to match
26980Sstevel@tonic-gate	 */
26990Sstevel@tonic-gate	.align	64
27000Sstevel@tonic-gate	ALTENTRY(sfmmu_uitlb_fastpath)
27016127Ssm142603
27020Sstevel@tonic-gate	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
27030Sstevel@tonic-gate	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
27040Sstevel@tonic-gate	ba,pn	%xcc, sfmmu_tsb_miss_tt
27054528Spaulsan	  mov	-1, %g3
27060Sstevel@tonic-gate
27070Sstevel@tonic-gate	/*
27080Sstevel@tonic-gate	 * User data miss w/ single TSB.
27090Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
27100Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
27110Sstevel@tonic-gate	 * pointer.
27120Sstevel@tonic-gate	 *
27130Sstevel@tonic-gate	 * g1 = tsb8k pointer register
27140Sstevel@tonic-gate	 * g2 = tag access register
27150Sstevel@tonic-gate	 * g3 - g6 = scratch registers
27160Sstevel@tonic-gate	 * g7 = TSB tag to match
27170Sstevel@tonic-gate	 */
27180Sstevel@tonic-gate	.align 64
27190Sstevel@tonic-gate	ALTENTRY(sfmmu_udtlb_fastpath)
27200Sstevel@tonic-gate
27210Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
27220Sstevel@tonic-gate	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
27230Sstevel@tonic-gate	ba,pn	%xcc, sfmmu_tsb_miss_tt
27244528Spaulsan	  mov	-1, %g3
27250Sstevel@tonic-gate
27260Sstevel@tonic-gate	/*
27271772Sjl139090	 * User instruction miss w/ multiple TSBs (sun4v).
27280Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
27290Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
27300Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
27310Sstevel@tonic-gate	 *
27320Sstevel@tonic-gate	 * Just like sfmmu_udtlb_slowpath, except:
27330Sstevel@tonic-gate	 *   o Uses ASI_ITLB_IN
27340Sstevel@tonic-gate	 *   o checks for execute permission
27350Sstevel@tonic-gate	 *   o No ISM prediction.
27360Sstevel@tonic-gate	 *
27370Sstevel@tonic-gate	 * g1 = tsb8k pointer register
27380Sstevel@tonic-gate	 * g2 = tag access register
27390Sstevel@tonic-gate	 * g3 - g6 = scratch registers
27400Sstevel@tonic-gate	 * g7 = TSB tag to match
27410Sstevel@tonic-gate	 */
27420Sstevel@tonic-gate	.align	64
27430Sstevel@tonic-gate	ALTENTRY(sfmmu_uitlb_slowpath)
27440Sstevel@tonic-gate
27450Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
27460Sstevel@tonic-gate	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
27470Sstevel@tonic-gate	/* g4 - g5 = clobbered here */
27480Sstevel@tonic-gate
27490Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
27500Sstevel@tonic-gate	/* g1 = first TSB pointer, g3 = second TSB pointer */
27510Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g7
27520Sstevel@tonic-gate	PROBE_2ND_ITSB(%g3, %g7)
27530Sstevel@tonic-gate	/* NOT REACHED */
27541772Sjl139090
27550Sstevel@tonic-gate#else /* sun4v */
27561772Sjl139090
27571772Sjl139090	/*
27581772Sjl139090	 * User instruction miss w/ multiple TSBs (sun4u).
27591772Sjl139090	 * The first probe covers 8K, 64K, and 512K page sizes,
27601772Sjl139090	 * because 64K and 512K mappings are replicated off 8K
27616127Ssm142603	 * pointer.  Probe of 1st TSB has already been done prior to entry
27626127Ssm142603	 * into this routine. For the UTSB_PHYS case we probe up to 3
27636127Ssm142603	 * valid other TSBs in the following order:
27646127Ssm142603	 * 1) shared TSB for 4M-256M pages
27656127Ssm142603	 * 2) private TSB for 4M-256M pages
27666127Ssm142603	 * 3) shared TSB for 8K-512K pages
27676127Ssm142603	 *
27686127Ssm142603	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
27696127Ssm142603	 * 4M-256M pages.
27701772Sjl139090	 *
27711772Sjl139090	 * Just like sfmmu_udtlb_slowpath, except:
27721772Sjl139090	 *   o Uses ASI_ITLB_IN
27731772Sjl139090	 *   o checks for execute permission
27741772Sjl139090	 *   o No ISM prediction.
27751772Sjl139090	 *
27761772Sjl139090	 * g1 = tsb8k pointer register
27771772Sjl139090	 * g2 = tag access register
27781772Sjl139090	 * g4 - g6 = scratch registers
27791772Sjl139090	 * g7 = TSB tag to match
27801772Sjl139090	 */
27811772Sjl139090	.align	64
27821772Sjl139090	ALTENTRY(sfmmu_uitlb_slowpath)
27831772Sjl139090
27841772Sjl139090#ifdef UTSB_PHYS
27856127Ssm142603
27866127Ssm142603       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
27876127Ssm142603        brlz,pt %g6, 1f
27886127Ssm142603          nop
27896127Ssm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
27906127Ssm142603        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
27916127Ssm1426031:
27926127Ssm142603        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
27936127Ssm142603        brlz,pt %g3, 2f
27946127Ssm142603          nop
27956127Ssm142603        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
27966127Ssm142603        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
27976127Ssm1426032:
27986127Ssm142603        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
27996127Ssm142603        brlz,pt %g6, sfmmu_tsb_miss_tt
28006127Ssm142603          nop
28016127Ssm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
28026127Ssm142603        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
28036127Ssm142603        ba,pn   %xcc, sfmmu_tsb_miss_tt
28046127Ssm142603          nop
28056127Ssm142603
28061772Sjl139090#else /* UTSB_PHYS */
28070Sstevel@tonic-gate	mov	%g1, %g3	/* save tsb8k reg in %g3 */
28080Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
28090Sstevel@tonic-gate	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
28100Sstevel@tonic-gate	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
28110Sstevel@tonic-gate	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
28120Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
28136127Ssm142603       /* g1 = first TSB pointer, g3 = second TSB pointer */
28146127Ssm142603        srlx    %g2, TAG_VALO_SHIFT, %g7
28156127Ssm142603        PROBE_2ND_ITSB(%g3, %g7, isynth)
28166127Ssm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt
28176127Ssm142603	  nop
28186127Ssm142603
28191772Sjl139090#endif /* UTSB_PHYS */
28200Sstevel@tonic-gate#endif /* sun4v */
28210Sstevel@tonic-gate
28226127Ssm142603#if defined(sun4u) && defined(UTSB_PHYS)
28236127Ssm142603
28246127Ssm142603        /*
28256127Ssm142603	 * We come here for ism predict DTLB_MISS case or if
28266127Ssm142603	 * if probe in first TSB failed.
28276127Ssm142603         */
28286127Ssm142603
28296127Ssm142603        .align 64
28306127Ssm142603        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
28316127Ssm142603
28326127Ssm142603	/*
28336127Ssm142603         * g1 = tsb8k pointer register
28346127Ssm142603         * g2 = tag access register
28356127Ssm142603         * g4 - %g6 = scratch registers
28366127Ssm142603         * g7 = TSB tag to match
28376127Ssm142603	 */
28386127Ssm142603
28396127Ssm142603	/*
28406127Ssm142603	 * ISM non-predict probe order
28416127Ssm142603         * probe 1ST_TSB (8K index)
28426127Ssm142603         * probe 2ND_TSB (4M index)
28436127Ssm142603         * probe 4TH_TSB (4M index)
28446127Ssm142603         * probe 3RD_TSB (8K index)
28456127Ssm142603	 *
28466127Ssm142603	 * We already probed first TSB in DTLB_MISS handler.
28476127Ssm142603	 */
28486127Ssm142603
28496127Ssm142603        /*
28506127Ssm142603         * Private 2ND TSB 4M-256 pages
28516127Ssm142603         */
28526127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
28536127Ssm142603	brlz,pt %g3, 1f
28546127Ssm142603	  nop
28556127Ssm142603        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
28566127Ssm142603        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
28576127Ssm142603
28580Sstevel@tonic-gate	/*
28596127Ssm142603	 * Shared Context 4TH TSB 4M-256 pages
28600Sstevel@tonic-gate	 */
28616127Ssm1426031:
28626127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
28636127Ssm142603	brlz,pt %g6, 2f
28646127Ssm142603	  nop
28656127Ssm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
28666127Ssm142603        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
28676127Ssm142603
28686127Ssm142603        /*
28696127Ssm142603         * Shared Context 3RD TSB 8K-512K pages
28706127Ssm142603         */
28716127Ssm1426032:
28726127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
28736127Ssm142603	brlz,pt %g6, sfmmu_tsb_miss_tt
28746127Ssm142603	  nop
28756127Ssm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
28766127Ssm142603        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
28776127Ssm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt
28786127Ssm142603	  nop
28796127Ssm142603
28800Sstevel@tonic-gate	.align 64
28816127Ssm142603        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
28826127Ssm142603
28836127Ssm142603	/*
28846127Ssm142603         * g1 = tsb8k pointer register
28856127Ssm142603         * g2 = tag access register
28866127Ssm142603         * g4 - g6 = scratch registers
28876127Ssm142603         * g7 = TSB tag to match
28886127Ssm142603	 */
28896127Ssm142603
28906127Ssm142603	/*
28916127Ssm142603	 * ISM predict probe order
28926127Ssm142603	 * probe 4TH_TSB (4M index)
28936127Ssm142603	 * probe 2ND_TSB (4M index)
28946127Ssm142603	 * probe 1ST_TSB (8K index)
28956127Ssm142603	 * probe 3RD_TSB (8K index)
28960Sstevel@tonic-gate
28970Sstevel@tonic-gate	/*
28986127Ssm142603	 * Shared Context 4TH TSB 4M-256 pages
28990Sstevel@tonic-gate	 */
29006127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
29016127Ssm142603	brlz,pt %g6, 4f
29026127Ssm142603	  nop
29036127Ssm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
29046127Ssm142603        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
29056127Ssm142603
29066127Ssm142603        /*
29076127Ssm142603         * Private 2ND TSB 4M-256 pages
29086127Ssm142603         */
29096127Ssm1426034:
29106127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
29116127Ssm142603	brlz,pt %g3, 5f
29126127Ssm142603	  nop
29136127Ssm142603        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
29146127Ssm142603        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
29156127Ssm142603
29166127Ssm1426035:
29176127Ssm142603        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
29186127Ssm142603
29196127Ssm142603        /*
29206127Ssm142603         * Shared Context 3RD TSB 8K-512K pages
29216127Ssm142603         */
29226127Ssm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
29236127Ssm142603	brlz,pt %g6, 6f
29246127Ssm142603	  nop
29256127Ssm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
29266127Ssm142603        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
29276127Ssm1426036:
29286127Ssm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
29296127Ssm142603	  nop
29306127Ssm142603
29316127Ssm142603#else /* sun4u && UTSB_PHYS */
29326127Ssm142603
29336127Ssm142603       .align 64
29346127Ssm142603        ALTENTRY(sfmmu_udtlb_slowpath)
29356127Ssm142603
29360Sstevel@tonic-gate	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
29370Sstevel@tonic-gate	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
29380Sstevel@tonic-gate	  mov	%g1, %g3
29390Sstevel@tonic-gate
29400Sstevel@tonic-gateudtlb_miss_probefirst:
29410Sstevel@tonic-gate	/*
29420Sstevel@tonic-gate	 * g1 = 8K TSB pointer register
29430Sstevel@tonic-gate	 * g2 = tag access register
29440Sstevel@tonic-gate	 * g3 = (potentially) second TSB entry ptr
29450Sstevel@tonic-gate	 * g6 = ism pred.
29460Sstevel@tonic-gate	 * g7 = vpg_4m
29470Sstevel@tonic-gate	 */
29480Sstevel@tonic-gate#ifdef sun4v
29490Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
29500Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
29510Sstevel@tonic-gate
29520Sstevel@tonic-gate	/*
29530Sstevel@tonic-gate	 * Here:
29540Sstevel@tonic-gate	 *   g1 = first TSB pointer
29550Sstevel@tonic-gate	 *   g2 = tag access reg
29560Sstevel@tonic-gate	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
29570Sstevel@tonic-gate	 */
29580Sstevel@tonic-gate	brgz,pn	%g6, sfmmu_tsb_miss_tt
29590Sstevel@tonic-gate	  nop
29600Sstevel@tonic-gate#else /* sun4v */
29610Sstevel@tonic-gate	mov	%g1, %g4
29620Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
29630Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
29640Sstevel@tonic-gate
29650Sstevel@tonic-gate	/*
29660Sstevel@tonic-gate	 * Here:
29670Sstevel@tonic-gate	 *   g1 = first TSB pointer
29680Sstevel@tonic-gate	 *   g2 = tag access reg
29690Sstevel@tonic-gate	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
29700Sstevel@tonic-gate	 */
29710Sstevel@tonic-gate	brgz,pn	%g6, sfmmu_tsb_miss_tt
29720Sstevel@tonic-gate	  nop
29730Sstevel@tonic-gate	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
29740Sstevel@tonic-gate	/* fall through in 8K->4M probe order */
29750Sstevel@tonic-gate#endif /* sun4v */
29760Sstevel@tonic-gate
29770Sstevel@tonic-gateudtlb_miss_probesecond:
29780Sstevel@tonic-gate	/*
29790Sstevel@tonic-gate	 * Look in the second TSB for the TTE
29800Sstevel@tonic-gate	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
29810Sstevel@tonic-gate	 * g2 = tag access reg
29820Sstevel@tonic-gate	 * g3 = 8K TSB pointer register
29830Sstevel@tonic-gate	 * g6 = ism pred.
29840Sstevel@tonic-gate	 * g7 = vpg_4m
29850Sstevel@tonic-gate	 */
29860Sstevel@tonic-gate#ifdef sun4v
29870Sstevel@tonic-gate	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
29880Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
29890Sstevel@tonic-gate	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
29901772Sjl139090#else /* sun4v */
29910Sstevel@tonic-gate	mov	%g3, %g7
29920Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
29930Sstevel@tonic-gate	/* %g2 clobbered, %g3 =second tsbe ptr */
29940Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g2
29950Sstevel@tonic-gate	ldxa	[%g2]ASI_DMMU, %g2
29961772Sjl139090#endif /* sun4v */
29970Sstevel@tonic-gate
29980Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g7
29990Sstevel@tonic-gate	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
30000Sstevel@tonic-gate	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
30010Sstevel@tonic-gate	brgz,pn	%g6, udtlb_miss_probefirst
30020Sstevel@tonic-gate	  nop
30030Sstevel@tonic-gate
30040Sstevel@tonic-gate	/* fall through to sfmmu_tsb_miss_tt */
30056127Ssm142603#endif /* sun4u && UTSB_PHYS */
30066127Ssm142603
30070Sstevel@tonic-gate
30080Sstevel@tonic-gate	ALTENTRY(sfmmu_tsb_miss_tt)
30090Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
30100Sstevel@tonic-gate	/*
30110Sstevel@tonic-gate	 * We get here if there is a TSB miss OR a write protect trap.
30120Sstevel@tonic-gate	 *
30130Sstevel@tonic-gate	 * g1 = First TSB entry pointer
30140Sstevel@tonic-gate	 * g2 = tag access register
30154528Spaulsan	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
30160Sstevel@tonic-gate	 * g4 - g7 = scratch registers
30170Sstevel@tonic-gate	 */
30180Sstevel@tonic-gate
30190Sstevel@tonic-gate	ALTENTRY(sfmmu_tsb_miss)
30200Sstevel@tonic-gate
30210Sstevel@tonic-gate	/*
30220Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
30230Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
30240Sstevel@tonic-gate	 * itself will patch the correct offset to add).
30250Sstevel@tonic-gate	 */
30260Sstevel@tonic-gate	rdpr	%tl, %g7
30270Sstevel@tonic-gate	cmp	%g7, 1
30280Sstevel@tonic-gate	ble,pt	%xcc, 0f
30290Sstevel@tonic-gate	  sethi	%hi(KERNELBASE), %g6
30300Sstevel@tonic-gate	rdpr	%tpc, %g7
30310Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
30320Sstevel@tonic-gate	cmp	%g7, %g6
30330Sstevel@tonic-gate	bgeu,pt	%xcc, 0f
30340Sstevel@tonic-gate	/* delay slot safe */
30350Sstevel@tonic-gate
30360Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point)
30370Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
30380Sstevel@tonic-gate	wrpr	%g7, %tpc
30390Sstevel@tonic-gate	add	%g7, 4, %g7
30400Sstevel@tonic-gate	wrpr	%g7, %tnpc
30410Sstevel@tonic-gate0:
30420Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g6, %g7)
30436127Ssm142603	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
30446127Ssm142603	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
30450Sstevel@tonic-gate
30460Sstevel@tonic-gate	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
30470Sstevel@tonic-gate	brz,a,pn %g3, 1f			/* skip ahead if kernel */
30480Sstevel@tonic-gate	  ldn	[%g6 + TSBMISS_KHATID], %g7
30490Sstevel@tonic-gate	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
30500Sstevel@tonic-gate	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
30510Sstevel@tonic-gate
30520Sstevel@tonic-gate	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
30530Sstevel@tonic-gate
30540Sstevel@tonic-gate	cmp	%g3, INVALID_CONTEXT
30550Sstevel@tonic-gate	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
30560Sstevel@tonic-gate	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
30570Sstevel@tonic-gate
30586127Ssm142603#if defined(sun4v) || defined(UTSB_PHYS)
30594528Spaulsan        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
30604528Spaulsan        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
30614528Spaulsan        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
30626127Ssm142603#endif /* sun4v || UTSB_PHYS */
30634528Spaulsan
30640Sstevel@tonic-gate	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
30650Sstevel@tonic-gate	/*
30660Sstevel@tonic-gate	 * The miss wasn't in an ISM segment.
30670Sstevel@tonic-gate	 *
30680Sstevel@tonic-gate	 * %g1 %g3, %g4, %g5, %g7 all clobbered
30693687Sjb145095	 * %g2 = (pseudo) tag access
30700Sstevel@tonic-gate	 */
30710Sstevel@tonic-gate
30720Sstevel@tonic-gate	ba,pt	%icc, 2f
30730Sstevel@tonic-gate	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
30740Sstevel@tonic-gate
30750Sstevel@tonic-gate1:
30760Sstevel@tonic-gate	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
30770Sstevel@tonic-gate	/*
30780Sstevel@tonic-gate	 * 8K and 64K hash.
30790Sstevel@tonic-gate	 */
30800Sstevel@tonic-gate2:
30810Sstevel@tonic-gate
30828187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
30838187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
30840Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_512K)
30850Sstevel@tonic-gate	/* NOT REACHED */
30860Sstevel@tonic-gate
30870Sstevel@tonic-gatetsb_512K:
30884528Spaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
30890Sstevel@tonic-gate	brz,pn	%g5, 3f
30904528Spaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
30910Sstevel@tonic-gate	and	%g4, HAT_512K_FLAG, %g5
30920Sstevel@tonic-gate
30930Sstevel@tonic-gate	/*
30940Sstevel@tonic-gate	 * Note that there is a small window here where we may have
30950Sstevel@tonic-gate	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
30960Sstevel@tonic-gate	 * flag yet, so we will skip searching the 512k hash list.
30970Sstevel@tonic-gate	 * In this case we will end up in pagefault which will find
30980Sstevel@tonic-gate	 * the mapping and return.  So, in this instance we will end up
30990Sstevel@tonic-gate	 * spending a bit more time resolving this TSB miss, but it can
31000Sstevel@tonic-gate	 * only happen once per process and even then, the chances of that
31010Sstevel@tonic-gate	 * are very small, so it's not worth the extra overhead it would
31020Sstevel@tonic-gate	 * take to close this window.
31030Sstevel@tonic-gate	 */
31040Sstevel@tonic-gate	brz,pn	%g5, tsb_4M
31050Sstevel@tonic-gate	  nop
31060Sstevel@tonic-gate3:
31070Sstevel@tonic-gate	/*
31080Sstevel@tonic-gate	 * 512K hash
31090Sstevel@tonic-gate	 */
31100Sstevel@tonic-gate
31118187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31128187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
31130Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_4M)
31140Sstevel@tonic-gate	/* NOT REACHED */
31150Sstevel@tonic-gate
31160Sstevel@tonic-gatetsb_4M:
31174528Spaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
31180Sstevel@tonic-gate	brz,pn	%g5, 4f
31194528Spaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31200Sstevel@tonic-gate	and	%g4, HAT_4M_FLAG, %g5
31210Sstevel@tonic-gate	brz,pn	%g5, tsb_32M
31220Sstevel@tonic-gate	  nop
31230Sstevel@tonic-gate4:
31240Sstevel@tonic-gate	/*
31250Sstevel@tonic-gate	 * 4M hash
31260Sstevel@tonic-gate	 */
31270Sstevel@tonic-gate
31288187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31298187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
31300Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_32M)
31310Sstevel@tonic-gate	/* NOT REACHED */
31320Sstevel@tonic-gate
31330Sstevel@tonic-gatetsb_32M:
31344528Spaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
31356127Ssm142603#ifdef	sun4v
31361772Sjl139090        brz,pn	%g5, 6f
31376127Ssm142603#else
31386127Ssm142603	brz,pn  %g5, tsb_pagefault
31396127Ssm142603#endif
31404528Spaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31410Sstevel@tonic-gate	and	%g4, HAT_32M_FLAG, %g5
31420Sstevel@tonic-gate	brz,pn	%g5, tsb_256M
31430Sstevel@tonic-gate	  nop
31440Sstevel@tonic-gate5:
31450Sstevel@tonic-gate	/*
31460Sstevel@tonic-gate	 * 32M hash
31470Sstevel@tonic-gate	 */
31480Sstevel@tonic-gate
31498187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31508187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
31510Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_256M)
31520Sstevel@tonic-gate	/* NOT REACHED */
31536127Ssm142603
31546127Ssm142603#if defined(sun4u) && !defined(UTSB_PHYS)
31554528Spaulsan#define tsb_shme        tsb_pagefault
31564528Spaulsan#endif
31570Sstevel@tonic-gatetsb_256M:
31584528Spaulsan	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31590Sstevel@tonic-gate	and	%g4, HAT_256M_FLAG, %g5
31604528Spaulsan	brz,pn	%g5, tsb_shme
31610Sstevel@tonic-gate	  nop
31620Sstevel@tonic-gate6:
31630Sstevel@tonic-gate	/*
31640Sstevel@tonic-gate	 * 256M hash
31650Sstevel@tonic-gate	 */
31660Sstevel@tonic-gate
31678187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31688187SPaul.Sandhu@Sun.COM	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
31694528Spaulsan	    sfmmu_suspend_tl, tsb_shme)
31700Sstevel@tonic-gate	/* NOT REACHED */
31710Sstevel@tonic-gate
31720Sstevel@tonic-gatetsb_checktte:
31730Sstevel@tonic-gate	/*
31744528Spaulsan	 * g1 = hblk_misc
31754528Spaulsan	 * g2 = tagacc
31764528Spaulsan	 * g3 = tte
31774528Spaulsan	 * g4 = tte pa
31784528Spaulsan	 * g6 = tsbmiss area
31794528Spaulsan	 * g7 = hatid
31804528Spaulsan	 */
31814528Spaulsan	brlz,a,pt %g3, tsb_validtte
31824528Spaulsan	  rdpr	%tt, %g7
31834528Spaulsan
31846127Ssm142603#if defined(sun4u) && !defined(UTSB_PHYS)
31854528Spaulsan#undef tsb_shme
31866127Ssm142603	ba      tsb_pagefault
31876127Ssm142603	  nop
31886127Ssm142603#else /* sun4u && !UTSB_PHYS */
31894528Spaulsan
31904528Spaulsantsb_shme:
31914528Spaulsan	/*
31924528Spaulsan	 * g2 = tagacc
31934528Spaulsan	 * g6 = tsbmiss area
31944528Spaulsan	 */
31954528Spaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
31964528Spaulsan	brz,pn	%g5, tsb_pagefault
31974528Spaulsan	  nop
31984528Spaulsan	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
31994528Spaulsan	brz,pn	%g7, tsb_pagefault
32004528Spaulsan	  nop
32014528Spaulsan
32028187SPaul.Sandhu@Sun.COM	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32038187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
32044528Spaulsan		sfmmu_suspend_tl, tsb_shme_512K)
32054528Spaulsan	/* NOT REACHED */
32064528Spaulsan
32074528Spaulsantsb_shme_512K:
32084528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
32094528Spaulsan	and	%g4, HAT_512K_FLAG, %g5
32104528Spaulsan	brz,pn	%g5, tsb_shme_4M
32114528Spaulsan	  nop
32124528Spaulsan
32134528Spaulsan	/*
32144528Spaulsan	 * 512K hash
32154528Spaulsan	 */
32164528Spaulsan
32178187SPaul.Sandhu@Sun.COM	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32188187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
32194528Spaulsan		sfmmu_suspend_tl, tsb_shme_4M)
32204528Spaulsan	/* NOT REACHED */
32214528Spaulsan
32224528Spaulsantsb_shme_4M:
32234528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
32244528Spaulsan	and	%g4, HAT_4M_FLAG, %g5
32254528Spaulsan	brz,pn	%g5, tsb_shme_32M
32264528Spaulsan	  nop
32274528Spaulsan4:
32284528Spaulsan	/*
32294528Spaulsan	 * 4M hash
32304528Spaulsan	 */
32318187SPaul.Sandhu@Sun.COM	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32328187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
32334528Spaulsan		sfmmu_suspend_tl, tsb_shme_32M)
32344528Spaulsan	/* NOT REACHED */
32354528Spaulsan
32364528Spaulsantsb_shme_32M:
32374528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
32384528Spaulsan	and	%g4, HAT_32M_FLAG, %g5
32394528Spaulsan	brz,pn	%g5, tsb_shme_256M
32404528Spaulsan	  nop
32414528Spaulsan
32424528Spaulsan	/*
32434528Spaulsan	 * 32M hash
32444528Spaulsan	 */
32454528Spaulsan
32468187SPaul.Sandhu@Sun.COM	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32478187SPaul.Sandhu@Sun.COM		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
32484528Spaulsan		sfmmu_suspend_tl, tsb_shme_256M)
32494528Spaulsan	/* NOT REACHED */
32504528Spaulsan
32514528Spaulsantsb_shme_256M:
32524528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
32534528Spaulsan	and	%g4, HAT_256M_FLAG, %g5
32544528Spaulsan	brz,pn	%g5, tsb_pagefault
32554528Spaulsan	  nop
32564528Spaulsan
32574528Spaulsan	/*
32584528Spaulsan	 * 256M hash
32594528Spaulsan	 */
32604528Spaulsan
32618187SPaul.Sandhu@Sun.COM	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32628187SPaul.Sandhu@Sun.COM	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
32634528Spaulsan	    sfmmu_suspend_tl, tsb_pagefault)
32644528Spaulsan	/* NOT REACHED */
32654528Spaulsan
32664528Spaulsantsb_shme_checktte:
32674528Spaulsan
32684528Spaulsan	brgez,pn %g3, tsb_pagefault
32694528Spaulsan	  rdpr	%tt, %g7
32704528Spaulsan	/*
32714528Spaulsan	 * g1 = ctx1 flag
32724528Spaulsan	 * g3 = tte
32734528Spaulsan	 * g4 = tte pa
32744528Spaulsan	 * g6 = tsbmiss area
32754528Spaulsan	 * g7 = tt
32764528Spaulsan	 */
32774528Spaulsan
32784528Spaulsan	brz,pt  %g1, tsb_validtte
32794528Spaulsan	  nop
32804528Spaulsan	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
32814528Spaulsan	  or	%g1, HAT_CHKCTX1_FLAG, %g1
32824528Spaulsan	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
32834528Spaulsan
32846127Ssm142603	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
32856127Ssm142603#endif /* sun4u && !UTSB_PHYS */
32864528Spaulsan
32874528Spaulsantsb_validtte:
32884528Spaulsan	/*
32890Sstevel@tonic-gate	 * g3 = tte
32900Sstevel@tonic-gate	 * g4 = tte pa
32910Sstevel@tonic-gate	 * g6 = tsbmiss area
32924528Spaulsan	 * g7 = tt
32930Sstevel@tonic-gate	 */
32944528Spaulsan
32950Sstevel@tonic-gate	/*
32960Sstevel@tonic-gate	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
32970Sstevel@tonic-gate	 */
32980Sstevel@tonic-gate	cmp	%g7, FAST_PROT_TT
32990Sstevel@tonic-gate	bne,pt	%icc, 4f
33000Sstevel@tonic-gate	  nop
33010Sstevel@tonic-gate
33028187SPaul.Sandhu@Sun.COM	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
33030Sstevel@tonic-gate	    tsb_protfault)
33040Sstevel@tonic-gate
33050Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
33064528Spaulsan#ifdef sun4v
33074528Spaulsan	MMU_FAULT_STATUS_AREA(%g7)
33086127Ssm142603	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
33096127Ssm142603#else /* sun4v */
33106127Ssm142603	mov     MMU_TAG_ACCESS, %g5
33116127Ssm142603	ldxa    [%g5]ASI_DMMU, %g5
33126127Ssm142603#endif /* sun4v */
33130Sstevel@tonic-gate	ba,pt	%xcc, tsb_update_tl1
33146127Ssm142603	  nop
33150Sstevel@tonic-gate4:
331610271SJason.Beloro@Sun.COM	/*
331710271SJason.Beloro@Sun.COM	 * If ITLB miss check exec bit.
331810271SJason.Beloro@Sun.COM	 * If not set treat as invalid TTE.
33190Sstevel@tonic-gate	 */
33200Sstevel@tonic-gate	cmp     %g7, T_INSTR_MMU_MISS
33210Sstevel@tonic-gate	be,pn	%icc, 5f
33220Sstevel@tonic-gate	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33230Sstevel@tonic-gate	cmp     %g7, FAST_IMMU_MISS_TT
33240Sstevel@tonic-gate	bne,pt %icc, 3f
33250Sstevel@tonic-gate	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33260Sstevel@tonic-gate5:
33270Sstevel@tonic-gate	bz,pn %icc, tsb_protfault
33280Sstevel@tonic-gate	  nop
332910271SJason.Beloro@Sun.COM
33300Sstevel@tonic-gate3:
33310Sstevel@tonic-gate	/*
33320Sstevel@tonic-gate	 * Set reference bit if not already set
33330Sstevel@tonic-gate	 */
33348187SPaul.Sandhu@Sun.COM	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
33350Sstevel@tonic-gate
33360Sstevel@tonic-gate	/*
33370Sstevel@tonic-gate	 * Now, load into TSB/TLB.  At this point:
33380Sstevel@tonic-gate	 * g3 = tte
33390Sstevel@tonic-gate	 * g4 = patte
33400Sstevel@tonic-gate	 * g6 = tsbmiss area
33410Sstevel@tonic-gate	 */
33426127Ssm142603	rdpr	%tt, %g7
33430Sstevel@tonic-gate#ifdef sun4v
33440Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g2)
33456127Ssm142603	cmp	%g7, T_INSTR_MMU_MISS
33460Sstevel@tonic-gate	be,a,pt	%icc, 9f
33470Sstevel@tonic-gate	  nop
33486127Ssm142603	cmp	%g7, FAST_IMMU_MISS_TT
33490Sstevel@tonic-gate	be,a,pt	%icc, 9f
33500Sstevel@tonic-gate	  nop
33510Sstevel@tonic-gate	add	%g2, MMFSA_D_, %g2
33520Sstevel@tonic-gate9:
33530Sstevel@tonic-gate	ldx	[%g2 + MMFSA_CTX_], %g7
33540Sstevel@tonic-gate	sllx	%g7, TTARGET_CTX_SHIFT, %g7
33550Sstevel@tonic-gate	ldx	[%g2 + MMFSA_ADDR_], %g2
33566127Ssm142603	mov	%g2, %g5		! load the fault addr for later use
33570Sstevel@tonic-gate	srlx	%g2, TTARGET_VA_SHIFT, %g2
33580Sstevel@tonic-gate	or	%g2, %g7, %g2
33596127Ssm142603#else /* sun4v */
33606127Ssm142603	mov     MMU_TAG_ACCESS, %g5
33616127Ssm142603	cmp     %g7, FAST_IMMU_MISS_TT
33626127Ssm142603	be,a,pt %icc, 9f
33636127Ssm142603	   ldxa  [%g0]ASI_IMMU, %g2
33646127Ssm142603	ldxa    [%g0]ASI_DMMU, %g2
33656127Ssm142603	ba,pt   %icc, tsb_update_tl1
33666127Ssm142603	   ldxa  [%g5]ASI_DMMU, %g5
33676127Ssm1426039:
33686127Ssm142603	ldxa    [%g5]ASI_IMMU, %g5
33696127Ssm142603#endif /* sun4v */
33706127Ssm142603
33710Sstevel@tonic-gatetsb_update_tl1:
33720Sstevel@tonic-gate	srlx	%g2, TTARGET_CTX_SHIFT, %g7
33730Sstevel@tonic-gate	brz,pn	%g7, tsb_kernel
33740Sstevel@tonic-gate#ifdef sun4v
33750Sstevel@tonic-gate	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
33766127Ssm142603#else  /* sun4v */
33770Sstevel@tonic-gate	  srlx	%g3, TTE_SZ_SHFT, %g7
33786127Ssm142603#endif /* sun4v */
33790Sstevel@tonic-gate
33800Sstevel@tonic-gatetsb_user:
33810Sstevel@tonic-gate#ifdef sun4v
33820Sstevel@tonic-gate	cmp	%g7, TTE4M
33830Sstevel@tonic-gate	bge,pn	%icc, tsb_user4m
33840Sstevel@tonic-gate	  nop
33851903Sjimand#else /* sun4v */
33860Sstevel@tonic-gate	cmp	%g7, TTESZ_VALID | TTE4M
33870Sstevel@tonic-gate	be,pn	%icc, tsb_user4m
33880Sstevel@tonic-gate	  srlx	%g3, TTE_SZ2_SHFT, %g7
33890Sstevel@tonic-gate	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
33901903Sjimand#ifdef ITLB_32M_256M_SUPPORT
33911903Sjimand	bnz,pn	%icc, tsb_user4m
33921903Sjimand	  nop
33931903Sjimand#else /* ITLB_32M_256M_SUPPORT */
3394490Ssusans	bnz,a,pn %icc, tsb_user_pn_synth
33956127Ssm142603	 nop
33961903Sjimand#endif /* ITLB_32M_256M_SUPPORT */
33971903Sjimand#endif /* sun4v */
33980Sstevel@tonic-gate
33990Sstevel@tonic-gatetsb_user8k:
34006127Ssm142603#if defined(sun4v) || defined(UTSB_PHYS)
34014528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
34024528Spaulsan	and	%g7, HAT_CHKCTX1_FLAG, %g1
34034528Spaulsan	brz,a,pn %g1, 1f
34046127Ssm142603	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
34054528Spaulsan	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
34066127Ssm142603	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
34074528Spaulsan	  mov PTL1_NO_SCDTSB8K, %g1			! panic
34086127Ssm142603        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
34094528Spaulsan1:
34106127Ssm142603#else /* defined(sun4v) || defined(UTSB_PHYS) */
34116127Ssm142603	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
34126127Ssm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
34130Sstevel@tonic-gate
34141772Sjl139090#ifndef UTSB_PHYS
34151772Sjl139090	mov	ASI_N, %g7	! user TSBs accessed by VA
34160Sstevel@tonic-gate	mov	%g7, %asi
34176127Ssm142603#endif /* !UTSB_PHYS */
34184528Spaulsan
34198187SPaul.Sandhu@Sun.COM	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
34200Sstevel@tonic-gate
34216127Ssm142603	rdpr    %tt, %g5
34220Sstevel@tonic-gate#ifdef sun4v
34230Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
34240Sstevel@tonic-gate	be,a,pn	%xcc, 9f
34250Sstevel@tonic-gate	  mov	%g3, %g5
34260Sstevel@tonic-gate#endif /* sun4v */
34270Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
34280Sstevel@tonic-gate	be,pn	%xcc, 9f
34290Sstevel@tonic-gate	  mov	%g3, %g5
34300Sstevel@tonic-gate
34310Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34320Sstevel@tonic-gate	! trapstat wants TTE in %g5
34330Sstevel@tonic-gate	retry
34340Sstevel@tonic-gate9:
34350Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34360Sstevel@tonic-gate	! trapstat wants TTE in %g5
34370Sstevel@tonic-gate	retry
34380Sstevel@tonic-gate
34390Sstevel@tonic-gatetsb_user4m:
34406127Ssm142603#if defined(sun4v) || defined(UTSB_PHYS)
34414528Spaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
34424528Spaulsan	and	%g7, HAT_CHKCTX1_FLAG, %g1
34434528Spaulsan	brz,a,pn %g1, 4f
34446127Ssm142603	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
34456127Ssm142603	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
34466127Ssm142603	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
34474528Spaulsan	  nop
34486127Ssm142603        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
34496332Ssm142603
34506127Ssm142603#else /* defined(sun4v) || defined(UTSB_PHYS) */
34516127Ssm142603	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
34526127Ssm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
34536332Ssm1426034:
34544528Spaulsan	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
34550Sstevel@tonic-gate	  nop
34560Sstevel@tonic-gate
34571772Sjl139090#ifndef UTSB_PHYS
34581772Sjl139090	mov	ASI_N, %g7	! user TSBs accessed by VA
34591772Sjl139090	mov	%g7, %asi
34601772Sjl139090#endif /* UTSB_PHYS */
34610Sstevel@tonic-gate
34628187SPaul.Sandhu@Sun.COM        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
34630Sstevel@tonic-gate
34640Sstevel@tonic-gate5:
34656127Ssm142603	rdpr    %tt, %g5
34660Sstevel@tonic-gate#ifdef sun4v
3467490Ssusans        cmp     %g5, T_INSTR_MMU_MISS
3468490Ssusans        be,a,pn %xcc, 9f
3469490Ssusans          mov   %g3, %g5
34700Sstevel@tonic-gate#endif /* sun4v */
3471490Ssusans        cmp     %g5, FAST_IMMU_MISS_TT
3472490Ssusans        be,pn   %xcc, 9f
3473490Ssusans        mov     %g3, %g5
3474490Ssusans
3475490Ssusans        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3476490Ssusans        ! trapstat wants TTE in %g5
3477490Ssusans        retry
34780Sstevel@tonic-gate9:
3479490Ssusans        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3480490Ssusans        ! trapstat wants TTE in %g5
3481490Ssusans        retry
3482490Ssusans
34831903Sjimand#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3484490Ssusans	/*
3485490Ssusans	 * Panther ITLB synthesis.
3486490Ssusans	 * The Panther 32M and 256M ITLB code simulates these two large page
3487490Ssusans	 * sizes with 4M pages, to provide support for programs, for example
3488490Ssusans	 * Java, that may copy instructions into a 32M or 256M data page and
3489490Ssusans	 * then execute them. The code below generates the 4M pfn bits and
3490490Ssusans	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3491490Ssusans	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3492490Ssusans	 * are ignored by the hardware.
3493490Ssusans	 *
3494490Ssusans	 * Now, load into TSB/TLB.  At this point:
3495490Ssusans	 * g2 = tagtarget
3496490Ssusans	 * g3 = tte
3497490Ssusans	 * g4 = patte
3498490Ssusans	 * g5 = tt
3499490Ssusans	 * g6 = tsbmiss area
3500490Ssusans	 */
3501490Ssusanstsb_user_pn_synth:
35026127Ssm142603	rdpr %tt, %g5
35036127Ssm142603	cmp    %g5, FAST_IMMU_MISS_TT
3504490Ssusans	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3505490Ssusans	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3506490Ssusans	bz,pn %icc, 4b				/* if not, been here before */
3507490Ssusans	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
35084528Spaulsan	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3509490Ssusans	  mov	%g3, %g5
3510490Ssusans
3511490Ssusans	mov	MMU_TAG_ACCESS, %g7
3512490Ssusans	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3513490Ssusans	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3514490Ssusans
3515490Ssusans	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3516490Ssusans	mov	%g7, %asi
35178187SPaul.Sandhu@Sun.COM	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
3518490Ssusans5:
3519490Ssusans        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3520490Ssusans        retry
3521490Ssusans
3522490Ssusanstsb_user_itlb_synth:
35236127Ssm142603	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3524490Ssusans
3525490Ssusans	mov	MMU_TAG_ACCESS, %g7
3526490Ssusans	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3527490Ssusans	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
35284528Spaulsan	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3529490Ssusans	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3530490Ssusans
3531490Ssusans	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3532490Ssusans	mov	%g7, %asi
35338187SPaul.Sandhu@Sun.COM	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
3534490Ssusans7:
3535490Ssusans	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3536490Ssusans        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3537490Ssusans        retry
35381903Sjimand#endif /* sun4v && ITLB_32M_256M_SUPPORT */
35390Sstevel@tonic-gate
3540773Seg155566tsb_kernel:
35416127Ssm142603	rdpr	%tt, %g5
35420Sstevel@tonic-gate#ifdef sun4v
35430Sstevel@tonic-gate	cmp	%g7, TTE4M
3544773Seg155566	bge,pn	%icc, 5f
35450Sstevel@tonic-gate#else
3546773Seg155566	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3547773Seg155566	be,pn	%icc, 5f
35486127Ssm142603#endif /* sun4v */
35490Sstevel@tonic-gate	  nop
35506127Ssm142603	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
35510Sstevel@tonic-gate	ba,pt	%xcc, 6f
35520Sstevel@tonic-gate	  nop
35530Sstevel@tonic-gate5:
35546127Ssm142603	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
35556127Ssm142603	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
35560Sstevel@tonic-gate	  nop
35570Sstevel@tonic-gate6:
35580Sstevel@tonic-gate#ifndef sun4v
35590Sstevel@tonic-gatetsb_kernel_patch_asi:
35600Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6
35610Sstevel@tonic-gate	mov	%g6, %asi	! XXX avoid writing to %asi !!
35620Sstevel@tonic-gate#endif
35638187SPaul.Sandhu@Sun.COM	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
35640Sstevel@tonic-gate3:
35650Sstevel@tonic-gate#ifdef sun4v
35660Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
35670Sstevel@tonic-gate	be,a,pn	%icc, 1f
35680Sstevel@tonic-gate	  mov	%g3, %g5			! trapstat wants TTE in %g5
35690Sstevel@tonic-gate#endif /* sun4v */
35700Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
35710Sstevel@tonic-gate	be,pn	%icc, 1f
35720Sstevel@tonic-gate	  mov	%g3, %g5			! trapstat wants TTE in %g5
35730Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35740Sstevel@tonic-gate	! trapstat wants TTE in %g5
35750Sstevel@tonic-gate	retry
35760Sstevel@tonic-gate1:
35770Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35780Sstevel@tonic-gate	! trapstat wants TTE in %g5
35790Sstevel@tonic-gate	retry
35800Sstevel@tonic-gate
35810Sstevel@tonic-gatetsb_ism:
35820Sstevel@tonic-gate	/*
35830Sstevel@tonic-gate	 * This is an ISM [i|d]tlb miss.  We optimize for largest
35840Sstevel@tonic-gate	 * page size down to smallest.
35850Sstevel@tonic-gate	 *
35863687Sjb145095	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
35873687Sjb145095	 *	register
35880Sstevel@tonic-gate	 * g3 = ismmap->ism_seg
35890Sstevel@tonic-gate	 * g4 = physical address of ismmap->ism_sfmmu
35900Sstevel@tonic-gate	 * g6 = tsbmiss area
35910Sstevel@tonic-gate	 */
35920Sstevel@tonic-gate	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
35930Sstevel@tonic-gate	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
35940Sstevel@tonic-gate	  mov	PTL1_BAD_ISM, %g1
35950Sstevel@tonic-gate						/* g5 = pa of imap_vb_shift */
35960Sstevel@tonic-gate	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
35974528Spaulsan	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
35980Sstevel@tonic-gate	srlx	%g3, %g4, %g3			/* clr size field */
35994528Spaulsan	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
36004528Spaulsan	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
36014528Spaulsan	and     %g2, %g1, %g4                   /* g4 = ctx number */
36024528Spaulsan	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
36034528Spaulsan	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
36044528Spaulsan	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
36054528Spaulsan	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
36064528Spaulsan	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
36076127Ssm142603#if defined(sun4v) || defined(UTSB_PHYS)
36084528Spaulsan	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
36094528Spaulsan	brz,pt %g5, tsb_chk4M_ism
36104528Spaulsan	  nop
36114528Spaulsan	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
36124528Spaulsan	or      %g5, HAT_CHKCTX1_FLAG, %g5
36134528Spaulsan	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
361410271SJason.Beloro@Sun.COM	rdpr    %tt, %g5
361510271SJason.Beloro@Sun.COM	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
36166127Ssm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
361710271SJason.Beloro@Sun.COM
36180Sstevel@tonic-gate	/*
36190Sstevel@tonic-gate	 * ISM pages are always locked down.
36200Sstevel@tonic-gate	 * If we can't find the tte then pagefault
36214528Spaulsan	 * and let the spt segment driver resolve it.
36220Sstevel@tonic-gate	 *
36234528Spaulsan	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
36244528Spaulsan	 * g4 = imap_hatflags
36250Sstevel@tonic-gate	 * g6 = tsb miss area
36260Sstevel@tonic-gate	 * g7 = ISM hatid
36270Sstevel@tonic-gate	 */
36284528Spaulsan
36294528Spaulsantsb_chk4M_ism:
36300Sstevel@tonic-gate	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
36310Sstevel@tonic-gate	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
36320Sstevel@tonic-gate	  nop
36330Sstevel@tonic-gate
36340Sstevel@tonic-gatetsb_ism_32M:
36350Sstevel@tonic-gate	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
36360Sstevel@tonic-gate	brz,pn	%g5, tsb_ism_256M
36370Sstevel@tonic-gate	  nop
36380Sstevel@tonic-gate
36390Sstevel@tonic-gate	/*
36400Sstevel@tonic-gate	 * 32M hash.
36410Sstevel@tonic-gate	 */
36420Sstevel@tonic-gate
36438187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
36448187SPaul.Sandhu@Sun.COM	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
36450Sstevel@tonic-gate	    tsb_ism_4M)
36460Sstevel@tonic-gate	/* NOT REACHED */
36470Sstevel@tonic-gate
36480Sstevel@tonic-gatetsb_ism_32M_found:
364910271SJason.Beloro@Sun.COM	brlz,a,pt %g3, tsb_validtte
36504528Spaulsan	  rdpr	%tt, %g7
36510Sstevel@tonic-gate	ba,pt	%xcc, tsb_ism_4M
36520Sstevel@tonic-gate	  nop
36530Sstevel@tonic-gate
36540Sstevel@tonic-gatetsb_ism_256M:
36550Sstevel@tonic-gate	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
36560Sstevel@tonic-gate	brz,a,pn %g5, ptl1_panic
36570Sstevel@tonic-gate	  mov	PTL1_BAD_ISM, %g1
36580Sstevel@tonic-gate
36590Sstevel@tonic-gate	/*
36600Sstevel@tonic-gate	 * 256M hash.
36610Sstevel@tonic-gate	 */
36628187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
36638187SPaul.Sandhu@Sun.COM	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
36640Sstevel@tonic-gate	    tsb_ism_4M)
36650Sstevel@tonic-gate
36660Sstevel@tonic-gatetsb_ism_256M_found:
366710271SJason.Beloro@Sun.COM	brlz,a,pt %g3, tsb_validtte
36684528Spaulsan	  rdpr	%tt, %g7
36690Sstevel@tonic-gate
36700Sstevel@tonic-gatetsb_ism_4M:
36710Sstevel@tonic-gate	/*
36720Sstevel@tonic-gate	 * 4M hash.
36730Sstevel@tonic-gate	 */
36748187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
36758187SPaul.Sandhu@Sun.COM	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
36760Sstevel@tonic-gate	    tsb_ism_8K)
36770Sstevel@tonic-gate	/* NOT REACHED */
36780Sstevel@tonic-gate
36790Sstevel@tonic-gatetsb_ism_4M_found:
368010271SJason.Beloro@Sun.COM	brlz,a,pt %g3, tsb_validtte
36814528Spaulsan	  rdpr	%tt, %g7
36820Sstevel@tonic-gate
36830Sstevel@tonic-gatetsb_ism_8K:
36840Sstevel@tonic-gate	/*
36850Sstevel@tonic-gate	 * 8K and 64K hash.
36860Sstevel@tonic-gate	 */
36870Sstevel@tonic-gate
36888187SPaul.Sandhu@Sun.COM	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
36898187SPaul.Sandhu@Sun.COM	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
36900Sstevel@tonic-gate	    tsb_pagefault)
36910Sstevel@tonic-gate	/* NOT REACHED */
36920Sstevel@tonic-gate
36930Sstevel@tonic-gatetsb_ism_8K_found:
369410271SJason.Beloro@Sun.COM	brlz,a,pt %g3, tsb_validtte
36954528Spaulsan	  rdpr	%tt, %g7
36960Sstevel@tonic-gate
36970Sstevel@tonic-gatetsb_pagefault:
36980Sstevel@tonic-gate	rdpr	%tt, %g7
36990Sstevel@tonic-gate	cmp	%g7, FAST_PROT_TT
37000Sstevel@tonic-gate	be,a,pn	%icc, tsb_protfault
37010Sstevel@tonic-gate	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
37020Sstevel@tonic-gate
37030Sstevel@tonic-gatetsb_protfault:
37040Sstevel@tonic-gate	/*
37050Sstevel@tonic-gate	 * we get here if we couldn't find a valid tte in the hash.
37060Sstevel@tonic-gate	 *
37070Sstevel@tonic-gate	 * If user and we are at tl>1 we go to window handling code.
37080Sstevel@tonic-gate	 *
37090Sstevel@tonic-gate	 * If kernel and the fault is on the same page as our stack
37100Sstevel@tonic-gate	 * pointer, then we know the stack is bad and the trap handler
37110Sstevel@tonic-gate	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
37120Sstevel@tonic-gate	 *
37130Sstevel@tonic-gate	 * If this is a kernel trap and tl>1, panic.
37140Sstevel@tonic-gate	 *
37150Sstevel@tonic-gate	 * Otherwise we call pagefault.
37160Sstevel@tonic-gate	 */
37170Sstevel@tonic-gate	cmp	%g7, FAST_IMMU_MISS_TT
37180Sstevel@tonic-gate#ifdef sun4v
37190Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g4)
37200Sstevel@tonic-gate	ldx	[%g4 + MMFSA_I_CTX], %g5
37210Sstevel@tonic-gate	ldx	[%g4 + MMFSA_D_CTX], %g4
37220Sstevel@tonic-gate	move	%icc, %g5, %g4
37230Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
37240Sstevel@tonic-gate	move	%icc, %g5, %g4
37250Sstevel@tonic-gate#else
37260Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g4
37270Sstevel@tonic-gate	ldxa	[%g4]ASI_DMMU, %g2
37280Sstevel@tonic-gate	ldxa	[%g4]ASI_IMMU, %g5
37290Sstevel@tonic-gate	move	%icc, %g5, %g2
37300Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
37310Sstevel@tonic-gate	move	%icc, %g5, %g2
37320Sstevel@tonic-gate	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
37336127Ssm142603#endif /* sun4v */
37340Sstevel@tonic-gate	brnz,pn	%g4, 3f				/* skip if not kernel */
37350Sstevel@tonic-gate	  rdpr	%tl, %g5
37360Sstevel@tonic-gate
37370Sstevel@tonic-gate	add	%sp, STACK_BIAS, %g3
37380Sstevel@tonic-gate	srlx	%g3, MMU_PAGESHIFT, %g3
37390Sstevel@tonic-gate	srlx	%g2, MMU_PAGESHIFT, %g4
37400Sstevel@tonic-gate	cmp	%g3, %g4
37410Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
37420Sstevel@tonic-gate	  mov	PTL1_BAD_STACK, %g1
37430Sstevel@tonic-gate
37440Sstevel@tonic-gate	cmp	%g5, 1
37450Sstevel@tonic-gate	ble,pt	%icc, 2f
37460Sstevel@tonic-gate	  nop
37470Sstevel@tonic-gate	TSTAT_CHECK_TL1(2f, %g1, %g2)
37480Sstevel@tonic-gate	rdpr	%tt, %g2
37490Sstevel@tonic-gate	cmp	%g2, FAST_PROT_TT
37500Sstevel@tonic-gate	mov	PTL1_BAD_KPROT_FAULT, %g1
37510Sstevel@tonic-gate	movne	%icc, PTL1_BAD_KMISS, %g1
37520Sstevel@tonic-gate	ba,pt	%icc, ptl1_panic
37530Sstevel@tonic-gate	  nop
37540Sstevel@tonic-gate
37550Sstevel@tonic-gate2:
37560Sstevel@tonic-gate	/*
37570Sstevel@tonic-gate	 * We are taking a pagefault in the kernel on a kernel address.  If
37580Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
37590Sstevel@tonic-gate	 * want to call sfmmu_pagefault -- we will instead note that a fault
37600Sstevel@tonic-gate	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
37610Sstevel@tonic-gate	 * (instead of a "retry").  This will step over the faulting
37620Sstevel@tonic-gate	 * instruction.
37630Sstevel@tonic-gate	 */
37640Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
37650Sstevel@tonic-gate	set	cpu_core, %g2
37660Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
37670Sstevel@tonic-gate	add	%g1, %g2, %g1
37680Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
37690Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
37700Sstevel@tonic-gate	bz	sfmmu_pagefault
37710Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
37720Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
37730Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
37740Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
37750Sstevel@tonic-gate	done
37760Sstevel@tonic-gate
37770Sstevel@tonic-gate3:
37780Sstevel@tonic-gate	cmp	%g5, 1
37790Sstevel@tonic-gate	ble,pt	%icc, 4f
37800Sstevel@tonic-gate	  nop
37810Sstevel@tonic-gate	TSTAT_CHECK_TL1(4f, %g1, %g2)
37820Sstevel@tonic-gate	ba,pt	%icc, sfmmu_window_trap
37830Sstevel@tonic-gate	  nop
37840Sstevel@tonic-gate
37850Sstevel@tonic-gate4:
37860Sstevel@tonic-gate	/*
37870Sstevel@tonic-gate	 * We are taking a pagefault on a non-kernel address.  If we are in
37880Sstevel@tonic-gate	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
37890Sstevel@tonic-gate	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
37900Sstevel@tonic-gate	 */
37910Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
37920Sstevel@tonic-gate	set	cpu_core, %g2
37930Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
37940Sstevel@tonic-gate	add	%g1, %g2, %g1
37950Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
37960Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
37974528Spaulsan	bz	sfmmu_mmu_trap
37980Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
37990Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
38000Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
38010Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
38020Sstevel@tonic-gate
38030Sstevel@tonic-gate	/*
38040Sstevel@tonic-gate	 * Be sure that we're actually taking this miss from the kernel --
38050Sstevel@tonic-gate	 * otherwise we have managed to return to user-level with
38060Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
38070Sstevel@tonic-gate	 */
38080Sstevel@tonic-gate	rdpr	%tstate, %g2
38090Sstevel@tonic-gate	btst	TSTATE_PRIV, %g2
38100Sstevel@tonic-gate	bz,a	ptl1_panic
38110Sstevel@tonic-gate	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
38120Sstevel@tonic-gate	done
38130Sstevel@tonic-gate
38140Sstevel@tonic-gate	ALTENTRY(tsb_tl0_noctxt)
38150Sstevel@tonic-gate	/*
38160Sstevel@tonic-gate	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
38170Sstevel@tonic-gate	 * if it is, indicated that we have faulted and issue a done.
38180Sstevel@tonic-gate	 */
38190Sstevel@tonic-gate	CPU_INDEX(%g5, %g6)
38200Sstevel@tonic-gate	set	cpu_core, %g6
38210Sstevel@tonic-gate	sllx	%g5, CPU_CORE_SHIFT, %g5
38220Sstevel@tonic-gate	add	%g5, %g6, %g5
38230Sstevel@tonic-gate	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
38240Sstevel@tonic-gate	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
38250Sstevel@tonic-gate	bz	1f
38260Sstevel@tonic-gate	or	%g6, CPU_DTRACE_BADADDR, %g6
38270Sstevel@tonic-gate	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
38280Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
38290Sstevel@tonic-gate	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
38300Sstevel@tonic-gate
38310Sstevel@tonic-gate	/*
38320Sstevel@tonic-gate	 * Be sure that we're actually taking this miss from the kernel --
38330Sstevel@tonic-gate	 * otherwise we have managed to return to user-level with
38340Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
38350Sstevel@tonic-gate	 */
38360Sstevel@tonic-gate	rdpr	%tstate, %g5
38370Sstevel@tonic-gate	btst	TSTATE_PRIV, %g5
38380Sstevel@tonic-gate	bz,a	ptl1_panic
38390Sstevel@tonic-gate	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
38404565Sjhaslam	TSTAT_CHECK_TL1(2f, %g1, %g2);
38414565Sjhaslam2:
38420Sstevel@tonic-gate	done
38430Sstevel@tonic-gate
38440Sstevel@tonic-gate1:
38450Sstevel@tonic-gate	rdpr	%tt, %g5
38460Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
38470Sstevel@tonic-gate#ifdef sun4v
38480Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g2)
38490Sstevel@tonic-gate	be,a,pt	%icc, 2f
38500Sstevel@tonic-gate	  ldx	[%g2 + MMFSA_I_CTX], %g3
38510Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
38520Sstevel@tonic-gate	be,a,pt	%icc, 2f
38530Sstevel@tonic-gate	  ldx	[%g2 + MMFSA_I_CTX], %g3
38540Sstevel@tonic-gate	ldx	[%g2 + MMFSA_D_CTX], %g3
38550Sstevel@tonic-gate2:
38560Sstevel@tonic-gate#else
38570Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g2
38580Sstevel@tonic-gate	be,a,pt	%icc, 2f
38590Sstevel@tonic-gate	  ldxa	[%g2]ASI_IMMU, %g3
38600Sstevel@tonic-gate	ldxa	[%g2]ASI_DMMU, %g3
38610Sstevel@tonic-gate2:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
38626127Ssm142603#endif /* sun4v */
38630Sstevel@tonic-gate	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
38640Sstevel@tonic-gate	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
38650Sstevel@tonic-gate	rdpr	%tl, %g5
38660Sstevel@tonic-gate	cmp	%g5, 1
38670Sstevel@tonic-gate	ble,pt	%icc, sfmmu_mmu_trap
38680Sstevel@tonic-gate	  nop
38690Sstevel@tonic-gate	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
38700Sstevel@tonic-gate	ba,pt	%icc, sfmmu_window_trap
38710Sstevel@tonic-gate	  nop
38720Sstevel@tonic-gate	SET_SIZE(sfmmu_tsb_miss)
38736127Ssm142603#endif  /* lint */
38740Sstevel@tonic-gate
38750Sstevel@tonic-gate#if defined (lint)
38760Sstevel@tonic-gate/*
38770Sstevel@tonic-gate * This routine will look for a user or kernel vaddr in the hash
38780Sstevel@tonic-gate * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
38790Sstevel@tonic-gate * grab any locks.  It should only be used by other sfmmu routines.
38800Sstevel@tonic-gate */
38810Sstevel@tonic-gate/* ARGSUSED */
38820Sstevel@tonic-gatepfn_t
38830Sstevel@tonic-gatesfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
38840Sstevel@tonic-gate{
38850Sstevel@tonic-gate	return(0);
38860Sstevel@tonic-gate}
38870Sstevel@tonic-gate
38883351Saguzovsk/* ARGSUSED */
38893351Saguzovskpfn_t
38903351Saguzovsksfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
38913351Saguzovsk{
38923351Saguzovsk	return(0);
38933351Saguzovsk}
38943351Saguzovsk
38950Sstevel@tonic-gate#else /* lint */
38960Sstevel@tonic-gate
38970Sstevel@tonic-gate	ENTRY_NP(sfmmu_vatopfn)
38980Sstevel@tonic-gate 	/*
38990Sstevel@tonic-gate 	 * disable interrupts
39000Sstevel@tonic-gate 	 */
39010Sstevel@tonic-gate 	rdpr	%pstate, %o3
39020Sstevel@tonic-gate#ifdef DEBUG
39032241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
39040Sstevel@tonic-gate#endif
39050Sstevel@tonic-gate	/*
39060Sstevel@tonic-gate	 * disable interrupts to protect the TSBMISS area
39070Sstevel@tonic-gate	 */
39080Sstevel@tonic-gate	andn    %o3, PSTATE_IE, %o5
39090Sstevel@tonic-gate	wrpr    %o5, 0, %pstate
39100Sstevel@tonic-gate
39110Sstevel@tonic-gate	/*
39120Sstevel@tonic-gate	 * o0 = vaddr
39130Sstevel@tonic-gate	 * o1 = sfmmup
39140Sstevel@tonic-gate	 * o2 = ttep
39150Sstevel@tonic-gate	 */
39160Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g1, %o5)
39170Sstevel@tonic-gate	ldn	[%g1 + TSBMISS_KHATID], %o4
39180Sstevel@tonic-gate	cmp	%o4, %o1
39190Sstevel@tonic-gate	bne,pn	%ncc, vatopfn_nokernel
39200Sstevel@tonic-gate	  mov	TTE64K, %g5			/* g5 = rehash # */
39210Sstevel@tonic-gate	mov %g1,%o5				/* o5 = tsbmiss_area */
39220Sstevel@tonic-gate	/*
39230Sstevel@tonic-gate	 * o0 = vaddr
39240Sstevel@tonic-gate	 * o1 & o4 = hatid
39250Sstevel@tonic-gate	 * o2 = ttep
39260Sstevel@tonic-gate	 * o5 = tsbmiss area
39270Sstevel@tonic-gate	 */
39280Sstevel@tonic-gate	mov	HBLK_RANGE_SHIFT, %g6
39290Sstevel@tonic-gate1:
39300Sstevel@tonic-gate
39310Sstevel@tonic-gate	/*
39320Sstevel@tonic-gate	 * o0 = vaddr
39330Sstevel@tonic-gate	 * o1 = sfmmup
39340Sstevel@tonic-gate	 * o2 = ttep
39350Sstevel@tonic-gate	 * o3 = old %pstate
39360Sstevel@tonic-gate	 * o4 = hatid
39370Sstevel@tonic-gate	 * o5 = tsbmiss
39380Sstevel@tonic-gate	 * g5 = rehash #
39390Sstevel@tonic-gate	 * g6 = hmeshift
39400Sstevel@tonic-gate	 *
39410Sstevel@tonic-gate	 * The first arg to GET_TTE is actually tagaccess register
39420Sstevel@tonic-gate	 * not just vaddr. Since this call is for kernel we need to clear
39430Sstevel@tonic-gate	 * any lower vaddr bits that would be interpreted as ctx bits.
39440Sstevel@tonic-gate	 */
39450Sstevel@tonic-gate	set     TAGACC_CTX_MASK, %g1
39460Sstevel@tonic-gate	andn    %o0, %g1, %o0
39478187SPaul.Sandhu@Sun.COM	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
39480Sstevel@tonic-gate		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
39490Sstevel@tonic-gate
39500Sstevel@tonic-gatekvtop_hblk_found:
39510Sstevel@tonic-gate	/*
39520Sstevel@tonic-gate	 * o0 = vaddr
39530Sstevel@tonic-gate	 * o1 = sfmmup
39540Sstevel@tonic-gate	 * o2 = ttep
39550Sstevel@tonic-gate	 * g1 = tte
39560Sstevel@tonic-gate	 * g2 = tte pa
39578187SPaul.Sandhu@Sun.COM	 * g3 = scratch
39580Sstevel@tonic-gate	 * o2 = tsbmiss area
39590Sstevel@tonic-gate	 * o1 = hat id
39600Sstevel@tonic-gate	 */
39610Sstevel@tonic-gate	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
39620Sstevel@tonic-gate	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
39630Sstevel@tonic-gate	stx %g1,[%o2]				/* put tte into *ttep */
39640Sstevel@tonic-gate	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
39650Sstevel@tonic-gate	/*
39660Sstevel@tonic-gate	 * o0 = vaddr
39670Sstevel@tonic-gate	 * o1 = sfmmup
39680Sstevel@tonic-gate	 * o2 = ttep
39690Sstevel@tonic-gate	 * g1 = pfn
39700Sstevel@tonic-gate	 */
39710Sstevel@tonic-gate	ba,pt	%xcc, 6f
39720Sstevel@tonic-gate	  mov	%g1, %o0
39730Sstevel@tonic-gate
39740Sstevel@tonic-gatekvtop_nohblk:
39750Sstevel@tonic-gate	/*
39760Sstevel@tonic-gate	 * we get here if we couldn't find valid hblk in hash.  We rehash
39770Sstevel@tonic-gate	 * if neccesary.
39780Sstevel@tonic-gate	 */
39790Sstevel@tonic-gate	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
39800Sstevel@tonic-gate#ifdef sun4v
39810Sstevel@tonic-gate	cmp	%g5, MAX_HASHCNT
39820Sstevel@tonic-gate#else
39830Sstevel@tonic-gate	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
39846127Ssm142603#endif /* sun4v */
39850Sstevel@tonic-gate	be,a,pn	%icc, 6f
39860Sstevel@tonic-gate	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
39870Sstevel@tonic-gate	mov	%o1, %o4			/* restore hatid */
39880Sstevel@tonic-gate#ifdef sun4v
39890Sstevel@tonic-gate        add	%g5, 2, %g5
39900Sstevel@tonic-gate	cmp	%g5, 3
39910Sstevel@tonic-gate	move	%icc, MMU_PAGESHIFT4M, %g6
39920Sstevel@tonic-gate	ba,pt	%icc, 1b
39930Sstevel@tonic-gate	movne	%icc, MMU_PAGESHIFT256M, %g6
39940Sstevel@tonic-gate#else
39950Sstevel@tonic-gate        inc	%g5
39960Sstevel@tonic-gate	cmp	%g5, 2
39970Sstevel@tonic-gate	move	%icc, MMU_PAGESHIFT512K, %g6
39980Sstevel@tonic-gate	ba,pt	%icc, 1b
39990Sstevel@tonic-gate	movne	%icc, MMU_PAGESHIFT4M, %g6
40006127Ssm142603#endif /* sun4v */
40010Sstevel@tonic-gate6:
40020Sstevel@tonic-gate	retl
40030Sstevel@tonic-gate 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
40040Sstevel@tonic-gate
40050Sstevel@tonic-gatetsb_suspend:
40060Sstevel@tonic-gate	/*
40070Sstevel@tonic-gate	 * o0 = vaddr
40080Sstevel@tonic-gate	 * o1 = sfmmup
40090Sstevel@tonic-gate	 * o2 = ttep
40100Sstevel@tonic-gate	 * g1 = tte
40110Sstevel@tonic-gate	 * g2 = tte pa
40120Sstevel@tonic-gate	 * g3 = tte va
40130Sstevel@tonic-gate	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
40140Sstevel@tonic-gate	 */
40150Sstevel@tonic-gate	stx %g1,[%o2]				/* put tte into *ttep */
40160Sstevel@tonic-gate	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
40174489Sjj204856	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
40180Sstevel@tonic-gate	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
40190Sstevel@tonic-gate8:
40200Sstevel@tonic-gate	retl
40210Sstevel@tonic-gate	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
40220Sstevel@tonic-gate
40230Sstevel@tonic-gatevatopfn_nokernel:
40240Sstevel@tonic-gate	/*
40250Sstevel@tonic-gate	 * This routine does NOT support user addresses
40260Sstevel@tonic-gate	 * There is a routine in C that supports this.
40270Sstevel@tonic-gate	 * The only reason why we don't have the C routine
40280Sstevel@tonic-gate	 * support kernel addresses as well is because
40290Sstevel@tonic-gate	 * we do va_to_pa while holding the hashlock.
40300Sstevel@tonic-gate	 */
40310Sstevel@tonic-gate 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
40320Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
40330Sstevel@tonic-gate	sethi	%hi(sfmmu_panic3), %o0
40340Sstevel@tonic-gate	call	panic
40350Sstevel@tonic-gate	 or	%o0, %lo(sfmmu_panic3), %o0
40360Sstevel@tonic-gate
40370Sstevel@tonic-gate	SET_SIZE(sfmmu_vatopfn)
40383351Saguzovsk
40393351Saguzovsk	/*
40403351Saguzovsk	 * %o0 = vaddr
40413351Saguzovsk	 * %o1 = hashno (aka szc)
40423351Saguzovsk	 *
40433351Saguzovsk	 *
40443351Saguzovsk	 * This routine is similar to sfmmu_vatopfn() but will only look for
40453351Saguzovsk	 * a kernel vaddr in the hash structure for the specified rehash value.
40463351Saguzovsk	 * It's just an optimization for the case when pagesize for a given
40473351Saguzovsk	 * va range is already known (e.g. large page heap) and we don't want
40483351Saguzovsk	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
40493351Saguzovsk	 *
40503351Saguzovsk	 * Returns valid pfn or PFN_INVALID if
40513351Saguzovsk	 * tte for specified rehash # is not found, invalid or suspended.
40523351Saguzovsk	 */
40533351Saguzovsk	ENTRY_NP(sfmmu_kvaszc2pfn)
40543351Saguzovsk 	/*
40553351Saguzovsk 	 * disable interrupts
40563351Saguzovsk 	 */
40573351Saguzovsk 	rdpr	%pstate, %o3
40583351Saguzovsk#ifdef DEBUG
40593351Saguzovsk	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
40603351Saguzovsk#endif
40613351Saguzovsk	/*
40623351Saguzovsk	 * disable interrupts to protect the TSBMISS area
40633351Saguzovsk	 */
40643351Saguzovsk	andn    %o3, PSTATE_IE, %o5
40653351Saguzovsk	wrpr    %o5, 0, %pstate
40663351Saguzovsk
40673351Saguzovsk	CPU_TSBMISS_AREA(%g1, %o5)
40683351Saguzovsk	ldn	[%g1 + TSBMISS_KHATID], %o4
40693351Saguzovsk	sll	%o1, 1, %g6
40703351Saguzovsk	add	%g6, %o1, %g6
40713351Saguzovsk	add	%g6, MMU_PAGESHIFT, %g6
40723351Saguzovsk	/*
40733351Saguzovsk	 * %o0 = vaddr
40743351Saguzovsk	 * %o1 = hashno
40753351Saguzovsk	 * %o3 = old %pstate
40763351Saguzovsk	 * %o4 = ksfmmup
40773351Saguzovsk	 * %g1 = tsbmiss area
40783351Saguzovsk	 * %g6 = hmeshift
40793351Saguzovsk	 */
40803351Saguzovsk
40813351Saguzovsk	/*
40823351Saguzovsk	 * The first arg to GET_TTE is actually tagaccess register
40833351Saguzovsk	 * not just vaddr. Since this call is for kernel we need to clear
40843351Saguzovsk	 * any lower vaddr bits that would be interpreted as ctx bits.
40853351Saguzovsk	 */
40863351Saguzovsk	srlx	%o0, MMU_PAGESHIFT, %o0
40873351Saguzovsk	sllx	%o0, MMU_PAGESHIFT, %o0
40888187SPaul.Sandhu@Sun.COM	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
40893351Saguzovsk		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
40903351Saguzovsk		kvaszc2pfn_nohblk)
40913351Saguzovsk
40923351Saguzovskkvaszc2pfn_hblk_found:
40933351Saguzovsk	/*
40943351Saguzovsk	 * %g3 = tte
40953351Saguzovsk	 * %o0 = vaddr
40963351Saguzovsk	 */
40973351Saguzovsk	brgez,a,pn %g3, 1f			/* check if tte is invalid */
40983351Saguzovsk	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
40993351Saguzovsk	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
41003351Saguzovsk	/*
41013351Saguzovsk	 * g3 = pfn
41023351Saguzovsk	 */
41033351Saguzovsk	ba,pt	%xcc, 1f
41043351Saguzovsk	  mov	%g3, %o0
41053351Saguzovsk
41063351Saguzovskkvaszc2pfn_nohblk:
41073351Saguzovsk	mov	-1, %o0
41083351Saguzovsk
41093351Saguzovsk1:
41103351Saguzovsk	retl
41113351Saguzovsk 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
41123351Saguzovsk
41133351Saguzovsk	SET_SIZE(sfmmu_kvaszc2pfn)
41143351Saguzovsk
41150Sstevel@tonic-gate#endif /* lint */
41160Sstevel@tonic-gate
41170Sstevel@tonic-gate
41180Sstevel@tonic-gate
41190Sstevel@tonic-gate#if !defined(lint)
41200Sstevel@tonic-gate
41210Sstevel@tonic-gate/*
41220Sstevel@tonic-gate * kpm lock used between trap level tsbmiss handler and kpm C level.
41230Sstevel@tonic-gate */
41240Sstevel@tonic-gate#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
41250Sstevel@tonic-gate	mov     0xff, tmp1						;\
41260Sstevel@tonic-gatelabel1:									;\
41270Sstevel@tonic-gate	casa    [kpmlckp]asi, %g0, tmp1					;\
41280Sstevel@tonic-gate	brnz,pn tmp1, label1						;\
41290Sstevel@tonic-gate	mov     0xff, tmp1						;\
41300Sstevel@tonic-gate	membar  #LoadLoad
41310Sstevel@tonic-gate
41320Sstevel@tonic-gate#define KPMLOCK_EXIT(kpmlckp, asi)					\
41330Sstevel@tonic-gate	membar  #LoadStore|#StoreStore					;\
41340Sstevel@tonic-gate	sta     %g0, [kpmlckp]asi
41350Sstevel@tonic-gate
41360Sstevel@tonic-gate/*
41370Sstevel@tonic-gate * Lookup a memseg for a given pfn and if found, return the physical
41380Sstevel@tonic-gate * address of the corresponding struct memseg in mseg, otherwise
41390Sstevel@tonic-gate * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
41400Sstevel@tonic-gate * tsbmp, %asi is assumed to be ASI_MEM.
41410Sstevel@tonic-gate * This lookup is done by strictly traversing only the physical memseg
41420Sstevel@tonic-gate * linkage. The more generic approach, to check the virtual linkage
41430Sstevel@tonic-gate * before using the physical (used e.g. with hmehash buckets), cannot
41440Sstevel@tonic-gate * be used here. Memory DR operations can run in parallel to this
41450Sstevel@tonic-gate * lookup w/o any locks and updates of the physical and virtual linkage
41460Sstevel@tonic-gate * cannot be done atomically wrt. to each other. Because physical
41470Sstevel@tonic-gate * address zero can be valid physical address, MSEG_NULLPTR_PA acts
41480Sstevel@tonic-gate * as "physical NULL" pointer.
41490Sstevel@tonic-gate */
41500Sstevel@tonic-gate#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
41510Sstevel@tonic-gate	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
41520Sstevel@tonic-gate	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
41530Sstevel@tonic-gate	udivx	pfn, mseg, mseg						;\
41540Sstevel@tonic-gate	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
41550Sstevel@tonic-gate	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
41560Sstevel@tonic-gate	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
41570Sstevel@tonic-gate	add	tmp1, mseg, tmp1					;\
41580Sstevel@tonic-gate	ldxa	[tmp1]%asi, mseg					;\
41590Sstevel@tonic-gate	cmp	mseg, MSEG_NULLPTR_PA					;\
41600Sstevel@tonic-gate	be,pn	%xcc, label/**/1		/* if not found */	;\
41610Sstevel@tonic-gate	  nop								;\
41620Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
41630Sstevel@tonic-gate	cmp	pfn, tmp1			/* pfn - pages_base */	;\
41640Sstevel@tonic-gate	blu,pn	%xcc, label/**/1					;\
41650Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
41660Sstevel@tonic-gate	cmp	pfn, tmp2			/* pfn - pages_end */	;\
41670Sstevel@tonic-gate	bgeu,pn	%xcc, label/**/1					;\
41680Sstevel@tonic-gate	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
41690Sstevel@tonic-gate	mulx	tmp1, PAGE_SIZE, tmp1					;\
41700Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
41710Sstevel@tonic-gate	add	tmp2, tmp1, tmp1			/* pp */	;\
41720Sstevel@tonic-gate	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
41730Sstevel@tonic-gate	cmp	tmp2, pfn						;\
41740Sstevel@tonic-gate	be,pt	%xcc, label/**/_ok			/* found */	;\
41750Sstevel@tonic-gatelabel/**/1:								;\
41760Sstevel@tonic-gate	/* brute force lookup */					;\
41770Sstevel@tonic-gate	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
41780Sstevel@tonic-gate	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
41790Sstevel@tonic-gatelabel/**/2:								;\
41800Sstevel@tonic-gate	cmp	mseg, MSEG_NULLPTR_PA					;\
41810Sstevel@tonic-gate	be,pn	%xcc, label/**/_ok		/* if not found */	;\
41820Sstevel@tonic-gate	  nop								;\
41830Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
41840Sstevel@tonic-gate	cmp	pfn, tmp1			/* pfn - pages_base */	;\
41850Sstevel@tonic-gate	blu,a,pt %xcc, label/**/2					;\
41860Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
41870Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
41880Sstevel@tonic-gate	cmp	pfn, tmp2			/* pfn - pages_end */	;\
41890Sstevel@tonic-gate	bgeu,a,pt %xcc, label/**/2					;\
41900Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
41910Sstevel@tonic-gatelabel/**/_ok:
41920Sstevel@tonic-gate
41930Sstevel@tonic-gate	/*
41940Sstevel@tonic-gate	 * kpm tsb miss handler large pages
41950Sstevel@tonic-gate	 * g1 = 8K kpm TSB entry pointer
41960Sstevel@tonic-gate	 * g2 = tag access register
41970Sstevel@tonic-gate	 * g3 = 4M kpm TSB entry pointer
41980Sstevel@tonic-gate	 */
41990Sstevel@tonic-gate	ALTENTRY(sfmmu_kpm_dtsb_miss)
42000Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
42010Sstevel@tonic-gate
42020Sstevel@tonic-gate	CPU_INDEX(%g7, %g6)
42030Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), %g6
42040Sstevel@tonic-gate	sllx	%g7, KPMTSBM_SHIFT, %g7
42050Sstevel@tonic-gate	or	%g6, %lo(kpmtsbm_area), %g6
42060Sstevel@tonic-gate	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
42070Sstevel@tonic-gate
42080Sstevel@tonic-gate	/* check enable flag */
42090Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g4
42100Sstevel@tonic-gate	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
42110Sstevel@tonic-gate	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
42120Sstevel@tonic-gate	  nop
42130Sstevel@tonic-gate
42140Sstevel@tonic-gate	/* VA range check */
42150Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_VBASE], %g7
42160Sstevel@tonic-gate	cmp	%g2, %g7
42170Sstevel@tonic-gate	blu,pn	%xcc, sfmmu_tsb_miss
42180Sstevel@tonic-gate	  ldx	[%g6 + KPMTSBM_VEND], %g5
42190Sstevel@tonic-gate	cmp	%g2, %g5
42200Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_tsb_miss
42210Sstevel@tonic-gate	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
42220Sstevel@tonic-gate
42230Sstevel@tonic-gate	/*
42240Sstevel@tonic-gate	 * check TL tsbmiss handling flag
42250Sstevel@tonic-gate	 * bump tsbmiss counter
42260Sstevel@tonic-gate	 */
42270Sstevel@tonic-gate	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
42280Sstevel@tonic-gate#ifdef	DEBUG
42290Sstevel@tonic-gate	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
42300Sstevel@tonic-gate	inc	%g5
42310Sstevel@tonic-gate	brz,pn	%g3, sfmmu_kpm_exception
42320Sstevel@tonic-gate	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
42330Sstevel@tonic-gate#else
42340Sstevel@tonic-gate	inc	%g5
42350Sstevel@tonic-gate	st	%g5, [%g6 + KPMTSBM_TSBMISS]
42360Sstevel@tonic-gate#endif
42370Sstevel@tonic-gate	/*
42380Sstevel@tonic-gate	 * At this point:
42390Sstevel@tonic-gate	 *  g1 = 8K kpm TSB pointer (not used)
42400Sstevel@tonic-gate	 *  g2 = tag access register
42410Sstevel@tonic-gate	 *  g3 = clobbered
42420Sstevel@tonic-gate	 *  g6 = per-CPU kpm tsbmiss area
42430Sstevel@tonic-gate	 *  g7 = kpm_vbase
42440Sstevel@tonic-gate	 */
42450Sstevel@tonic-gate
42460Sstevel@tonic-gate	/* vaddr2pfn */
42470Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
42480Sstevel@tonic-gate	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
42490Sstevel@tonic-gate	srax    %g4, %g3, %g2			/* which alias range (r) */
42500Sstevel@tonic-gate	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
42510Sstevel@tonic-gate	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
42520Sstevel@tonic-gate
42530Sstevel@tonic-gate	/*
42540Sstevel@tonic-gate	 * Setup %asi
42550Sstevel@tonic-gate	 * mseg_pa = page_numtomemseg_nolock(pfn)
42560Sstevel@tonic-gate	 * if (mseg_pa == NULL) sfmmu_kpm_exception
42570Sstevel@tonic-gate	 * g2=pfn
42580Sstevel@tonic-gate	 */
42590Sstevel@tonic-gate	mov	ASI_MEM, %asi
42600Sstevel@tonic-gate	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
42610Sstevel@tonic-gate	cmp	%g3, MSEG_NULLPTR_PA
42620Sstevel@tonic-gate	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
42630Sstevel@tonic-gate	  nop
42640Sstevel@tonic-gate
42650Sstevel@tonic-gate	/*
42660Sstevel@tonic-gate	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
42670Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa
42680Sstevel@tonic-gate	 */
42690Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
42700Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
42710Sstevel@tonic-gate	srlx	%g2, %g5, %g4
42720Sstevel@tonic-gate	sllx	%g4, %g5, %g4
42730Sstevel@tonic-gate	sub	%g4, %g7, %g4
42740Sstevel@tonic-gate	srlx	%g4, %g5, %g4
42750Sstevel@tonic-gate
42760Sstevel@tonic-gate	/*
42770Sstevel@tonic-gate	 * Validate inx value
42780Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=inx
42790Sstevel@tonic-gate	 */
42800Sstevel@tonic-gate#ifdef	DEBUG
42810Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
42820Sstevel@tonic-gate	cmp	%g4, %g5			/* inx - nkpmpgs */
42830Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
42840Sstevel@tonic-gate	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
42850Sstevel@tonic-gate#else
42860Sstevel@tonic-gate	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
42870Sstevel@tonic-gate#endif
42880Sstevel@tonic-gate	/*
42890Sstevel@tonic-gate	 * kp = &mseg_pa->kpm_pages[inx]
42900Sstevel@tonic-gate	 */
42910Sstevel@tonic-gate	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
42920Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
42930Sstevel@tonic-gate	add	%g5, %g4, %g5			/* kp */
42940Sstevel@tonic-gate
42950Sstevel@tonic-gate	/*
42960Sstevel@tonic-gate	 * KPMP_HASH(kp)
42970Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
42980Sstevel@tonic-gate	 */
42990Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
43000Sstevel@tonic-gate	sub	%g7, 1, %g7			/* mask */
43010Sstevel@tonic-gate	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
43020Sstevel@tonic-gate	add	%g5, %g1, %g5			/* y = ksp + x */
43030Sstevel@tonic-gate	and 	%g5, %g7, %g5			/* hashinx = y & mask */
43040Sstevel@tonic-gate
43050Sstevel@tonic-gate	/*
43060Sstevel@tonic-gate	 * Calculate physical kpm_page pointer
43070Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
43080Sstevel@tonic-gate	 */
43090Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
43100Sstevel@tonic-gate	add	%g1, %g4, %g1			/* kp_pa */
43110Sstevel@tonic-gate
43120Sstevel@tonic-gate	/*
43130Sstevel@tonic-gate	 * Calculate physical hash lock address
43140Sstevel@tonic-gate	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
43150Sstevel@tonic-gate	 */
43160Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
43170Sstevel@tonic-gate	sllx	%g5, KPMHLK_SHIFT, %g5
43180Sstevel@tonic-gate	add	%g4, %g5, %g3
43190Sstevel@tonic-gate	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
43200Sstevel@tonic-gate
43210Sstevel@tonic-gate	/*
43220Sstevel@tonic-gate	 * Assemble tte
43230Sstevel@tonic-gate	 * g1=kp_pa g2=pfn g3=hlck_pa
43240Sstevel@tonic-gate	 */
43250Sstevel@tonic-gate#ifdef sun4v
43260Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
43270Sstevel@tonic-gate	sllx	%g5, 32, %g5
43280Sstevel@tonic-gate	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
43290Sstevel@tonic-gate	or	%g4, TTE4M, %g4
43300Sstevel@tonic-gate	or	%g5, %g4, %g5
43310Sstevel@tonic-gate#else
43320Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g4
43330Sstevel@tonic-gate	mov	TTE4M, %g5
43340Sstevel@tonic-gate	sllx	%g5, TTE_SZ_SHFT_INT, %g5
43350Sstevel@tonic-gate	or	%g5, %g4, %g5			/* upper part */
43360Sstevel@tonic-gate	sllx	%g5, 32, %g5
43370Sstevel@tonic-gate	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
43380Sstevel@tonic-gate	or	%g5, %g4, %g5
43390Sstevel@tonic-gate#endif
43400Sstevel@tonic-gate	sllx	%g2, MMU_PAGESHIFT, %g4
43410Sstevel@tonic-gate	or	%g5, %g4, %g5			/* tte */
43420Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
43430Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
43440Sstevel@tonic-gate
43450Sstevel@tonic-gate	/*
43460Sstevel@tonic-gate	 * tsb dropin
43470Sstevel@tonic-gate	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
43480Sstevel@tonic-gate	 */
43490Sstevel@tonic-gate
43500Sstevel@tonic-gate	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
43510Sstevel@tonic-gate	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
43520Sstevel@tonic-gate
43530Sstevel@tonic-gate	/* use C-handler if there's no go for dropin */
43540Sstevel@tonic-gate	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
43550Sstevel@tonic-gate	cmp	%g7, -1
43560Sstevel@tonic-gate	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
43570Sstevel@tonic-gate	  nop
43580Sstevel@tonic-gate
43590Sstevel@tonic-gate#ifdef	DEBUG
43600Sstevel@tonic-gate	/* double check refcnt */
43610Sstevel@tonic-gate	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
43620Sstevel@tonic-gate	brz,pn	%g7, 5f			/* let C-handler deal with this */
43630Sstevel@tonic-gate	  nop
43640Sstevel@tonic-gate#endif
43650Sstevel@tonic-gate
43660Sstevel@tonic-gate#ifndef sun4v
43670Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g7
43680Sstevel@tonic-gate	mov	ASI_N, %g1
43690Sstevel@tonic-gate	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
43700Sstevel@tonic-gate	movnz	%icc, ASI_MEM, %g1
43710Sstevel@tonic-gate	mov	%g1, %asi
43720Sstevel@tonic-gate#endif
43730Sstevel@tonic-gate
43748187SPaul.Sandhu@Sun.COM	/*
43758187SPaul.Sandhu@Sun.COM	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
43768187SPaul.Sandhu@Sun.COM	 * If we fail to lock the TSB entry then just load the tte into the
43778187SPaul.Sandhu@Sun.COM	 * TLB.
43788187SPaul.Sandhu@Sun.COM	 */
43798187SPaul.Sandhu@Sun.COM	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
43800Sstevel@tonic-gate
43810Sstevel@tonic-gate	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
43820Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
43838187SPaul.Sandhu@Sun.COMlocked_tsb_l1:
43840Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
43850Sstevel@tonic-gate
43860Sstevel@tonic-gate	/* KPMLOCK_EXIT(kpmlckp, asi) */
43870Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
43880Sstevel@tonic-gate
43890Sstevel@tonic-gate	/*
43900Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
43910Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
43920Sstevel@tonic-gate	 * itself will patch the correct offset to add).
43930Sstevel@tonic-gate	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
43940Sstevel@tonic-gate	 */
43950Sstevel@tonic-gate	rdpr	%tl, %g7
43960Sstevel@tonic-gate	cmp	%g7, 1
43970Sstevel@tonic-gate	ble	%icc, 0f
43980Sstevel@tonic-gate	sethi	%hi(KERNELBASE), %g6
43990Sstevel@tonic-gate	rdpr	%tpc, %g7
44000Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
44010Sstevel@tonic-gate	cmp	%g7, %g6
44020Sstevel@tonic-gate	bgeu	%xcc, 0f
44030Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
44040Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
44050Sstevel@tonic-gate	wrpr	%g7, %tpc
44060Sstevel@tonic-gate	add	%g7, 4, %g7
44070Sstevel@tonic-gate	wrpr	%g7, %tnpc
44080Sstevel@tonic-gate0:
44090Sstevel@tonic-gate	retry
44100Sstevel@tonic-gate5:
44110Sstevel@tonic-gate	/* g3=hlck_pa */
44120Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
44130Sstevel@tonic-gate	ba,pt	%icc, sfmmu_kpm_exception
44140Sstevel@tonic-gate	  nop
44150Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_dtsb_miss)
44160Sstevel@tonic-gate
44170Sstevel@tonic-gate	/*
44180Sstevel@tonic-gate	 * kpm tsbmiss handler for smallpages
44190Sstevel@tonic-gate	 * g1 = 8K kpm TSB pointer
44200Sstevel@tonic-gate	 * g2 = tag access register
44210Sstevel@tonic-gate	 * g3 = 4M kpm TSB pointer
44220Sstevel@tonic-gate	 */
44230Sstevel@tonic-gate	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
44240Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
44250Sstevel@tonic-gate	CPU_INDEX(%g7, %g6)
44260Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), %g6
44270Sstevel@tonic-gate	sllx	%g7, KPMTSBM_SHIFT, %g7
44280Sstevel@tonic-gate	or	%g6, %lo(kpmtsbm_area), %g6
44290Sstevel@tonic-gate	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
44300Sstevel@tonic-gate
44310Sstevel@tonic-gate	/* check enable flag */
44320Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g4
44330Sstevel@tonic-gate	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
44340Sstevel@tonic-gate	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
44350Sstevel@tonic-gate	  nop
44360Sstevel@tonic-gate
44370Sstevel@tonic-gate	/*
44380Sstevel@tonic-gate	 * VA range check
44390Sstevel@tonic-gate	 * On fail: goto sfmmu_tsb_miss
44400Sstevel@tonic-gate	 */
44410Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_VBASE], %g7
44420Sstevel@tonic-gate	cmp	%g2, %g7
44430Sstevel@tonic-gate	blu,pn	%xcc, sfmmu_tsb_miss
44440Sstevel@tonic-gate	  ldx	[%g6 + KPMTSBM_VEND], %g5
44450Sstevel@tonic-gate	cmp	%g2, %g5
44460Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_tsb_miss
44470Sstevel@tonic-gate	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
44480Sstevel@tonic-gate
44490Sstevel@tonic-gate	/*
44500Sstevel@tonic-gate	 * check TL tsbmiss handling flag
44510Sstevel@tonic-gate	 * bump tsbmiss counter
44520Sstevel@tonic-gate	 */
44530Sstevel@tonic-gate	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
44540Sstevel@tonic-gate#ifdef	DEBUG
44550Sstevel@tonic-gate	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
44560Sstevel@tonic-gate	inc	%g5
44570Sstevel@tonic-gate	brz,pn	%g1, sfmmu_kpm_exception
44580Sstevel@tonic-gate	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
44590Sstevel@tonic-gate#else
44600Sstevel@tonic-gate	inc	%g5
44610Sstevel@tonic-gate	st	%g5, [%g6 + KPMTSBM_TSBMISS]
44620Sstevel@tonic-gate#endif
44630Sstevel@tonic-gate	/*
44640Sstevel@tonic-gate	 * At this point:
44650Sstevel@tonic-gate	 *  g1 = clobbered
44660Sstevel@tonic-gate	 *  g2 = tag access register
44670Sstevel@tonic-gate	 *  g3 = 4M kpm TSB pointer (not used)
44680Sstevel@tonic-gate	 *  g6 = per-CPU kpm tsbmiss area
44690Sstevel@tonic-gate	 *  g7 = kpm_vbase
44700Sstevel@tonic-gate	 */
44710Sstevel@tonic-gate
44727393SDonghai.Qiao@Sun.COM	/*
44737393SDonghai.Qiao@Sun.COM	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
44747393SDonghai.Qiao@Sun.COM	 * which is defined in mach_kpm.h. Any changes in that macro
44757393SDonghai.Qiao@Sun.COM	 * should also be ported back to this assembly code.
44767393SDonghai.Qiao@Sun.COM	 */
44777393SDonghai.Qiao@Sun.COM	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
44780Sstevel@tonic-gate	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
44797393SDonghai.Qiao@Sun.COM	srax    %g4, %g3, %g7			/* which alias range (r) */
44807393SDonghai.Qiao@Sun.COM	brz,pt	%g7, 2f
44817393SDonghai.Qiao@Sun.COM	  sethi   %hi(vac_colors_mask), %g5
44827393SDonghai.Qiao@Sun.COM	ld	[%g5 + %lo(vac_colors_mask)], %g5
44837393SDonghai.Qiao@Sun.COM
44847393SDonghai.Qiao@Sun.COM	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
44857393SDonghai.Qiao@Sun.COM	and	%g1, %g5, %g1			/* g1 = v */
44867393SDonghai.Qiao@Sun.COM	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
44877393SDonghai.Qiao@Sun.COM	cmp	%g7, %g1			/* if (r > v) */
44887393SDonghai.Qiao@Sun.COM	bleu,pn %xcc, 1f
44897393SDonghai.Qiao@Sun.COM	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
44907393SDonghai.Qiao@Sun.COM	sub	%g7, %g1, %g5			/* g5 = r - v */
44917393SDonghai.Qiao@Sun.COM	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
44927393SDonghai.Qiao@Sun.COM	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
44937393SDonghai.Qiao@Sun.COM	ba	2f
44947393SDonghai.Qiao@Sun.COM	  nop
44957393SDonghai.Qiao@Sun.COM1:
44967393SDonghai.Qiao@Sun.COM	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
44977393SDonghai.Qiao@Sun.COM	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
44987393SDonghai.Qiao@Sun.COM
44997393SDonghai.Qiao@Sun.COM	/*
45007393SDonghai.Qiao@Sun.COM	 * paddr2pfn
45017393SDonghai.Qiao@Sun.COM	 *  g1 = vcolor (not used)
45027393SDonghai.Qiao@Sun.COM	 *  g2 = tag access register
45037393SDonghai.Qiao@Sun.COM	 *  g3 = clobbered
45047393SDonghai.Qiao@Sun.COM	 *  g4 = paddr
45057393SDonghai.Qiao@Sun.COM	 *  g5 = clobbered
45067393SDonghai.Qiao@Sun.COM	 *  g6 = per-CPU kpm tsbmiss area
45077393SDonghai.Qiao@Sun.COM	 *  g7 = clobbered
45087393SDonghai.Qiao@Sun.COM	 */
45097393SDonghai.Qiao@Sun.COM2:
45107393SDonghai.Qiao@Sun.COM	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
45110Sstevel@tonic-gate
45120Sstevel@tonic-gate	/*
45130Sstevel@tonic-gate	 * Setup %asi
45140Sstevel@tonic-gate	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
45150Sstevel@tonic-gate	 * if (mseg not found) sfmmu_kpm_exception
45167393SDonghai.Qiao@Sun.COM	 * g2=pfn g6=per-CPU kpm tsbmiss area
45177393SDonghai.Qiao@Sun.COM	 * g4 g5 g7 for scratch use.
45180Sstevel@tonic-gate	 */
45190Sstevel@tonic-gate	mov	ASI_MEM, %asi
45200Sstevel@tonic-gate	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
45217393SDonghai.Qiao@Sun.COM	cmp	%g3, MSEG_NULLPTR_PA
45220Sstevel@tonic-gate	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
45230Sstevel@tonic-gate	  nop
45240Sstevel@tonic-gate
45250Sstevel@tonic-gate	/*
45260Sstevel@tonic-gate	 * inx = pfn - mseg_pa->kpm_pbase
45277393SDonghai.Qiao@Sun.COM	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
45280Sstevel@tonic-gate	 */
45290Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
45307393SDonghai.Qiao@Sun.COM	sub	%g2, %g7, %g4
45310Sstevel@tonic-gate
45320Sstevel@tonic-gate#ifdef	DEBUG
45330Sstevel@tonic-gate	/*
45340Sstevel@tonic-gate	 * Validate inx value
45357393SDonghai.Qiao@Sun.COM	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
45360Sstevel@tonic-gate	 */
45370Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
45380Sstevel@tonic-gate	cmp	%g4, %g5			/* inx - nkpmpgs */
45390Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
45400Sstevel@tonic-gate	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
45410Sstevel@tonic-gate#else
45420Sstevel@tonic-gate	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
45430Sstevel@tonic-gate#endif
45440Sstevel@tonic-gate	/* ksp = &mseg_pa->kpm_spages[inx] */
45450Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
45460Sstevel@tonic-gate	add	%g5, %g4, %g5			/* ksp */
45470Sstevel@tonic-gate
45480Sstevel@tonic-gate	/*
45490Sstevel@tonic-gate	 * KPMP_SHASH(kp)
45507393SDonghai.Qiao@Sun.COM	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
45517393SDonghai.Qiao@Sun.COM	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
45520Sstevel@tonic-gate	 */
45530Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
45540Sstevel@tonic-gate	sub	%g7, 1, %g7			/* mask */
45550Sstevel@tonic-gate	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
45560Sstevel@tonic-gate	add	%g5, %g1, %g5			/* y = ksp + x */
45570Sstevel@tonic-gate	and 	%g5, %g7, %g5			/* hashinx = y & mask */
45580Sstevel@tonic-gate
45590Sstevel@tonic-gate	/*
45600Sstevel@tonic-gate	 * Calculate physical kpm_spage pointer
45610Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
45627393SDonghai.Qiao@Sun.COM	 * g6=per-CPU kpm tsbmiss area
45630Sstevel@tonic-gate	 */
45640Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
45650Sstevel@tonic-gate	add	%g1, %g4, %g1			/* ksp_pa */
45660Sstevel@tonic-gate
45670Sstevel@tonic-gate	/*
45680Sstevel@tonic-gate	 * Calculate physical hash lock address.
45690Sstevel@tonic-gate	 * Note: Changes in kpm_shlk_t must be reflected here.
45700Sstevel@tonic-gate	 * g1=ksp_pa g2=pfn g5=hashinx
45717393SDonghai.Qiao@Sun.COM	 * g6=per-CPU kpm tsbmiss area
45720Sstevel@tonic-gate	 */
45730Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
45740Sstevel@tonic-gate	sllx	%g5, KPMSHLK_SHIFT, %g5
45750Sstevel@tonic-gate	add	%g4, %g5, %g3			/* hlck_pa */
45760Sstevel@tonic-gate
45770Sstevel@tonic-gate	/*
45787393SDonghai.Qiao@Sun.COM	 * Assemble non-cacheable tte initially
45790Sstevel@tonic-gate	 * g1=ksp_pa g2=pfn g3=hlck_pa
45807393SDonghai.Qiao@Sun.COM	 * g6=per-CPU kpm tsbmiss area
45810Sstevel@tonic-gate	 */
45820Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
45830Sstevel@tonic-gate	sllx	%g5, 32, %g5
45847393SDonghai.Qiao@Sun.COM	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
45850Sstevel@tonic-gate	or	%g5, %g4, %g5
45860Sstevel@tonic-gate	sllx	%g2, MMU_PAGESHIFT, %g4
45870Sstevel@tonic-gate	or	%g5, %g4, %g5			/* tte */
45880Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
45890Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
45900Sstevel@tonic-gate
45910Sstevel@tonic-gate	/*
45920Sstevel@tonic-gate	 * tsb dropin
45937393SDonghai.Qiao@Sun.COM	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
45947393SDonghai.Qiao@Sun.COM	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
45950Sstevel@tonic-gate	 */
45960Sstevel@tonic-gate
45970Sstevel@tonic-gate	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
45980Sstevel@tonic-gate	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
45990Sstevel@tonic-gate
46000Sstevel@tonic-gate	/* use C-handler if there's no go for dropin */
46017393SDonghai.Qiao@Sun.COM	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
46027393SDonghai.Qiao@Sun.COM	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
46037393SDonghai.Qiao@Sun.COM	bz,pt	%icc, 5f				/* no go */
46047393SDonghai.Qiao@Sun.COM	  nop
46057393SDonghai.Qiao@Sun.COM	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
46067393SDonghai.Qiao@Sun.COM	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
46077393SDonghai.Qiao@Sun.COM	be,a,pn	%xcc, 3f
46087393SDonghai.Qiao@Sun.COM	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
46097393SDonghai.Qiao@Sun.COM3:
46100Sstevel@tonic-gate#ifndef sun4v
46110Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g7
46120Sstevel@tonic-gate	mov	ASI_N, %g1
46130Sstevel@tonic-gate	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
46140Sstevel@tonic-gate	movnz	%icc, ASI_MEM, %g1
46150Sstevel@tonic-gate	mov	%g1, %asi
46160Sstevel@tonic-gate#endif
46170Sstevel@tonic-gate
46188187SPaul.Sandhu@Sun.COM	/*
46198187SPaul.Sandhu@Sun.COM	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
46208187SPaul.Sandhu@Sun.COM	 * If we fail to lock the TSB entry then just load the tte into the
46218187SPaul.Sandhu@Sun.COM	 * TLB.
46228187SPaul.Sandhu@Sun.COM	 */
46238187SPaul.Sandhu@Sun.COM	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
46240Sstevel@tonic-gate
46250Sstevel@tonic-gate	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
46260Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
46278187SPaul.Sandhu@Sun.COMlocked_tsb_l2:
46280Sstevel@tonic-gate	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
46290Sstevel@tonic-gate
46300Sstevel@tonic-gate	/* KPMLOCK_EXIT(kpmlckp, asi) */
46310Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
46320Sstevel@tonic-gate
46330Sstevel@tonic-gate	/*
46340Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
46350Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
46360Sstevel@tonic-gate	 * itself will patch the correct offset to add).
46370Sstevel@tonic-gate	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
46380Sstevel@tonic-gate	 */
46390Sstevel@tonic-gate	rdpr	%tl, %g7
46400Sstevel@tonic-gate	cmp	%g7, 1
46410Sstevel@tonic-gate	ble	%icc, 0f
46420Sstevel@tonic-gate	sethi	%hi(KERNELBASE), %g6
46430Sstevel@tonic-gate	rdpr	%tpc, %g7
46440Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
46450Sstevel@tonic-gate	cmp	%g7, %g6
46460Sstevel@tonic-gate	bgeu	%xcc, 0f
46470Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
46480Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
46490Sstevel@tonic-gate	wrpr	%g7, %tpc
46500Sstevel@tonic-gate	add	%g7, 4, %g7
46510Sstevel@tonic-gate	wrpr	%g7, %tnpc
46520Sstevel@tonic-gate0:
46530Sstevel@tonic-gate	retry
46540Sstevel@tonic-gate5:
46550Sstevel@tonic-gate	/* g3=hlck_pa */
46560Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
46570Sstevel@tonic-gate	ba,pt	%icc, sfmmu_kpm_exception
46580Sstevel@tonic-gate	  nop
46590Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
46600Sstevel@tonic-gate
46610Sstevel@tonic-gate#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
46620Sstevel@tonic-gate#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
46630Sstevel@tonic-gate#endif
46640Sstevel@tonic-gate
46650Sstevel@tonic-gate#endif /* lint */
46660Sstevel@tonic-gate
46670Sstevel@tonic-gate#ifdef	lint
46680Sstevel@tonic-gate/*
46690Sstevel@tonic-gate * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
46700Sstevel@tonic-gate * Called from C-level, sets/clears "go" indication for trap level handler.
46710Sstevel@tonic-gate * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
46720Sstevel@tonic-gate * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
46730Sstevel@tonic-gate * Assumes khl_mutex is held when called from C-level.
46740Sstevel@tonic-gate */
46750Sstevel@tonic-gate/* ARGSUSED */
46760Sstevel@tonic-gatevoid
46770Sstevel@tonic-gatesfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
46780Sstevel@tonic-gate{
46790Sstevel@tonic-gate}
46800Sstevel@tonic-gate
46810Sstevel@tonic-gate/*
46820Sstevel@tonic-gate * kpm_smallpages: stores val to byte at address mapped within
46830Sstevel@tonic-gate * low level lock brackets. The old value is returned.
46840Sstevel@tonic-gate * Called from C-level.
46850Sstevel@tonic-gate */
46860Sstevel@tonic-gate/* ARGSUSED */
46870Sstevel@tonic-gateint
46887393SDonghai.Qiao@Sun.COMsfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
46890Sstevel@tonic-gate{
46900Sstevel@tonic-gate	return (0);
46910Sstevel@tonic-gate}
46920Sstevel@tonic-gate
46930Sstevel@tonic-gate#else /* lint */
46940Sstevel@tonic-gate
46950Sstevel@tonic-gate	.seg	".data"
46960Sstevel@tonic-gatesfmmu_kpm_tsbmtl_panic:
46970Sstevel@tonic-gate	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
46980Sstevel@tonic-gate	.byte	0
46990Sstevel@tonic-gatesfmmu_kpm_stsbmtl_panic:
47000Sstevel@tonic-gate	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
47010Sstevel@tonic-gate	.byte	0
47020Sstevel@tonic-gate	.align	4
47030Sstevel@tonic-gate	.seg	".text"
47040Sstevel@tonic-gate
47050Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_tsbmtl)
47060Sstevel@tonic-gate	rdpr	%pstate, %o3
47070Sstevel@tonic-gate	/*
47080Sstevel@tonic-gate	 * %o0 = &kp_refcntc
47090Sstevel@tonic-gate	 * %o1 = &khl_lock
47100Sstevel@tonic-gate	 * %o2 = 0/1 (off/on)
47110Sstevel@tonic-gate	 * %o3 = pstate save
47120Sstevel@tonic-gate	 */
47130Sstevel@tonic-gate#ifdef DEBUG
47140Sstevel@tonic-gate	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
47150Sstevel@tonic-gate	bnz,pt %icc, 1f				/* disabled, panic	 */
47160Sstevel@tonic-gate	  nop
47170Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
47180Sstevel@tonic-gate	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
47190Sstevel@tonic-gate	call	panic
47200Sstevel@tonic-gate	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
47210Sstevel@tonic-gate	ret
47220Sstevel@tonic-gate	restore
47230Sstevel@tonic-gate1:
47240Sstevel@tonic-gate#endif /* DEBUG */
47250Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
47260Sstevel@tonic-gate
47270Sstevel@tonic-gate	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
47280Sstevel@tonic-gate	mov	-1, %o5
47290Sstevel@tonic-gate	brz,a	%o2, 2f
47300Sstevel@tonic-gate	  mov	0, %o5
47310Sstevel@tonic-gate2:
47320Sstevel@tonic-gate	sth	%o5, [%o0]
47330Sstevel@tonic-gate	KPMLOCK_EXIT(%o1, ASI_N)
47340Sstevel@tonic-gate
47350Sstevel@tonic-gate	retl
47360Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
47370Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_tsbmtl)
47380Sstevel@tonic-gate
47390Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_stsbmtl)
47400Sstevel@tonic-gate	rdpr	%pstate, %o3
47410Sstevel@tonic-gate	/*
47420Sstevel@tonic-gate	 * %o0 = &mapped
47430Sstevel@tonic-gate	 * %o1 = &kshl_lock
47440Sstevel@tonic-gate	 * %o2 = val
47450Sstevel@tonic-gate	 * %o3 = pstate save
47460Sstevel@tonic-gate	 */
47470Sstevel@tonic-gate#ifdef DEBUG
47480Sstevel@tonic-gate	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
47490Sstevel@tonic-gate	bnz,pt %icc, 1f				/* disabled, panic	 */
47500Sstevel@tonic-gate	  nop
47510Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
47520Sstevel@tonic-gate	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
47530Sstevel@tonic-gate	call	panic
47540Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
47550Sstevel@tonic-gate	ret
47560Sstevel@tonic-gate	restore
47570Sstevel@tonic-gate1:
47580Sstevel@tonic-gate#endif /* DEBUG */
47590Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
47600Sstevel@tonic-gate
47610Sstevel@tonic-gate	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
47620Sstevel@tonic-gate	ldsb	[%o0], %o5
47630Sstevel@tonic-gate	stb	%o2, [%o0]
47640Sstevel@tonic-gate	KPMLOCK_EXIT(%o1, ASI_N)
47650Sstevel@tonic-gate
47667393SDonghai.Qiao@Sun.COM	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
47670Sstevel@tonic-gate	retl
47680Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
47690Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_stsbmtl)
47700Sstevel@tonic-gate
47710Sstevel@tonic-gate#endif /* lint */
47720Sstevel@tonic-gate
47730Sstevel@tonic-gate#ifndef lint
47740Sstevel@tonic-gate#ifdef sun4v
47750Sstevel@tonic-gate	/*
47760Sstevel@tonic-gate	 * User/kernel data miss w// multiple TSBs
47770Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
47780Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
47790Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
47800Sstevel@tonic-gate	 *
47810Sstevel@tonic-gate	 * MMU fault area contains miss address and context.
47820Sstevel@tonic-gate	 */
47830Sstevel@tonic-gate	ALTENTRY(sfmmu_slow_dmmu_miss)
47843687Sjb145095	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
47850Sstevel@tonic-gate
47860Sstevel@tonic-gateslow_miss_common:
47870Sstevel@tonic-gate	/*
47880Sstevel@tonic-gate	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
47890Sstevel@tonic-gate	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
47900Sstevel@tonic-gate	 */
47910Sstevel@tonic-gate	brnz,pt	%g3, 8f			! check for user context
47920Sstevel@tonic-gate	  nop
47930Sstevel@tonic-gate
47940Sstevel@tonic-gate	/*
47950Sstevel@tonic-gate	 * Kernel miss
47960Sstevel@tonic-gate	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
47970Sstevel@tonic-gate	 * branch to sfmmu_tsb_miss_tt to handle it.
47980Sstevel@tonic-gate	 */
47990Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
48000Sstevel@tonic-gatesfmmu_dslow_patch_ktsb_base:
48010Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
48020Sstevel@tonic-gatesfmmu_dslow_patch_ktsb_szcode:
48030Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
48040Sstevel@tonic-gate
48050Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
48060Sstevel@tonic-gate	! %g1 = First TSB entry pointer, as TSB miss handler expects
48070Sstevel@tonic-gate
48080Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
48090Sstevel@tonic-gatesfmmu_dslow_patch_ktsb4m_base:
48100Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
48110Sstevel@tonic-gatesfmmu_dslow_patch_ktsb4m_szcode:
48120Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
48130Sstevel@tonic-gate
48140Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
48150Sstevel@tonic-gate	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
48160Sstevel@tonic-gate	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
48170Sstevel@tonic-gate	.empty
48180Sstevel@tonic-gate
48190Sstevel@tonic-gate8:
48200Sstevel@tonic-gate	/*
48210Sstevel@tonic-gate	 * User miss
48220Sstevel@tonic-gate	 * Get first TSB pointer in %g1
48230Sstevel@tonic-gate	 * Get second TSB pointer (or NULL if no second TSB) in %g3
48240Sstevel@tonic-gate	 * Branch to sfmmu_tsb_miss_tt to handle it
48250Sstevel@tonic-gate	 */
48260Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
48270Sstevel@tonic-gate	/* %g1 = first TSB entry ptr now, %g2 preserved */
48280Sstevel@tonic-gate
48290Sstevel@tonic-gate	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
48304528Spaulsan	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
48314528Spaulsan	  nop
48320Sstevel@tonic-gate
48330Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
48340Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g2 preserved */
48350Sstevel@tonic-gate9:
48360Sstevel@tonic-gate	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
48370Sstevel@tonic-gate	.empty
48380Sstevel@tonic-gate	SET_SIZE(sfmmu_slow_dmmu_miss)
48390Sstevel@tonic-gate
48400Sstevel@tonic-gate
48410Sstevel@tonic-gate	/*
48420Sstevel@tonic-gate	 * User/kernel instruction miss w/ multiple TSBs
48430Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
48440Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
48450Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
48460Sstevel@tonic-gate	 *
48470Sstevel@tonic-gate	 * MMU fault area contains miss address and context.
48480Sstevel@tonic-gate	 */
48490Sstevel@tonic-gate	ALTENTRY(sfmmu_slow_immu_miss)
48503687Sjb145095	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
48513687Sjb145095	ba,a,pt	%xcc, slow_miss_common
48520Sstevel@tonic-gate	SET_SIZE(sfmmu_slow_immu_miss)
48530Sstevel@tonic-gate
48540Sstevel@tonic-gate#endif /* sun4v */
48550Sstevel@tonic-gate#endif	/* lint */
48560Sstevel@tonic-gate
48570Sstevel@tonic-gate#ifndef lint
48580Sstevel@tonic-gate
48590Sstevel@tonic-gate/*
48600Sstevel@tonic-gate * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
48610Sstevel@tonic-gate */
48620Sstevel@tonic-gate	.seg	".data"
48630Sstevel@tonic-gate	.align	64
48640Sstevel@tonic-gate	.global tsbmiss_area
48650Sstevel@tonic-gatetsbmiss_area:
48660Sstevel@tonic-gate	.skip	(TSBMISS_SIZE * NCPU)
48670Sstevel@tonic-gate
48680Sstevel@tonic-gate	.align	64
48690Sstevel@tonic-gate	.global kpmtsbm_area
48700Sstevel@tonic-gatekpmtsbm_area:
48710Sstevel@tonic-gate	.skip	(KPMTSBM_SIZE * NCPU)
48720Sstevel@tonic-gate#endif	/* lint */
4873