xref: /onnv-gate/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s (revision 6674:e2e0bb793ecb)
10Sstevel@tonic-gate/*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51772Sjl139090 * Common Development and Distribution License (the "License").
61772Sjl139090 * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate/*
226127Ssm142603 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate/*
290Sstevel@tonic-gate * SFMMU primitives.  These primitives should only be used by sfmmu
300Sstevel@tonic-gate * routines.
310Sstevel@tonic-gate */
320Sstevel@tonic-gate
330Sstevel@tonic-gate#if defined(lint)
340Sstevel@tonic-gate#include <sys/types.h>
350Sstevel@tonic-gate#else	/* lint */
360Sstevel@tonic-gate#include "assym.h"
370Sstevel@tonic-gate#endif	/* lint */
380Sstevel@tonic-gate
390Sstevel@tonic-gate#include <sys/asm_linkage.h>
400Sstevel@tonic-gate#include <sys/machtrap.h>
410Sstevel@tonic-gate#include <sys/machasi.h>
420Sstevel@tonic-gate#include <sys/sun4asi.h>
430Sstevel@tonic-gate#include <sys/pte.h>
440Sstevel@tonic-gate#include <sys/mmu.h>
450Sstevel@tonic-gate#include <vm/hat_sfmmu.h>
460Sstevel@tonic-gate#include <vm/seg_spt.h>
470Sstevel@tonic-gate#include <sys/machparam.h>
480Sstevel@tonic-gate#include <sys/privregs.h>
490Sstevel@tonic-gate#include <sys/scb.h>
500Sstevel@tonic-gate#include <sys/intreg.h>
510Sstevel@tonic-gate#include <sys/machthread.h>
520Sstevel@tonic-gate#include <sys/clock.h>
530Sstevel@tonic-gate#include <sys/trapstat.h>
540Sstevel@tonic-gate
550Sstevel@tonic-gate/*
560Sstevel@tonic-gate * sfmmu related subroutines
570Sstevel@tonic-gate */
580Sstevel@tonic-gate
590Sstevel@tonic-gate#if defined (lint)
600Sstevel@tonic-gate
610Sstevel@tonic-gate/*
620Sstevel@tonic-gate * sfmmu related subroutines
630Sstevel@tonic-gate */
640Sstevel@tonic-gate/* ARGSUSED */
650Sstevel@tonic-gatevoid
662241Shuahsfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
670Sstevel@tonic-gate{}
680Sstevel@tonic-gate
690Sstevel@tonic-gate/* ARGSUSED */
700Sstevel@tonic-gatevoid
712241Shuahsfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
720Sstevel@tonic-gate{}
730Sstevel@tonic-gate
740Sstevel@tonic-gate/* ARGSUSED */
750Sstevel@tonic-gatevoid
762241Shuahsfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
770Sstevel@tonic-gate{}
780Sstevel@tonic-gate
790Sstevel@tonic-gateint
800Sstevel@tonic-gatesfmmu_getctx_pri()
810Sstevel@tonic-gate{ return(0); }
820Sstevel@tonic-gate
830Sstevel@tonic-gateint
840Sstevel@tonic-gatesfmmu_getctx_sec()
850Sstevel@tonic-gate{ return(0); }
860Sstevel@tonic-gate
870Sstevel@tonic-gate/* ARGSUSED */
880Sstevel@tonic-gatevoid
894528Spaulsansfmmu_setctx_sec(uint_t ctx)
900Sstevel@tonic-gate{}
910Sstevel@tonic-gate
920Sstevel@tonic-gate/* ARGSUSED */
930Sstevel@tonic-gatevoid
940Sstevel@tonic-gatesfmmu_load_mmustate(sfmmu_t *sfmmup)
950Sstevel@tonic-gate{
960Sstevel@tonic-gate}
970Sstevel@tonic-gate
980Sstevel@tonic-gate#else	/* lint */
996127Ssm142603
1000Sstevel@tonic-gate/*
1012241Shuah * Invalidate either the context of a specific victim or any process
1022241Shuah * currently running on this CPU.
1030Sstevel@tonic-gate *
1042241Shuah * %g1 = sfmmup whose ctx is being invalidated
1052241Shuah *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
1062241Shuah * Note %g1 is the only input argument used by this xcall handler.
1070Sstevel@tonic-gate */
1082241Shuah	ENTRY(sfmmu_raise_tsb_exception)
1090Sstevel@tonic-gate	!
1106127Ssm142603	! if (victim == INVALID_CONTEXT ||
1116127Ssm142603	!     current CPU tsbmiss->usfmmup == victim sfmmup) {
1126127Ssm142603	!       if (shctx_on) {
1136127Ssm142603	!               shctx = INVALID;
1146127Ssm142603	!       }
1152241Shuah	!	if (sec-ctx > INVALID_CONTEXT) {
1162241Shuah	!		write INVALID_CONTEXT to sec-ctx
1172241Shuah	!	}
1182241Shuah	!	if (pri-ctx > INVALID_CONTEXT) {
1192241Shuah	!		write INVALID_CONTEXT to pri-ctx
1202241Shuah	!	}
1210Sstevel@tonic-gate	! }
1222241Shuah
1232241Shuah	sethi   %hi(ksfmmup), %g3
1242241Shuah        ldx     [%g3 + %lo(ksfmmup)], %g3
1252241Shuah	cmp	%g1, %g3
1266127Ssm142603	be,a,pn %xcc, ptl1_panic		/* can't invalidate kernel ctx */
1272241Shuah	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
1282241Shuah
1292241Shuah	set	INVALID_CONTEXT, %g2
1302241Shuah	cmp	%g1, INVALID_CONTEXT
1316127Ssm142603	be,pn	%xcc, 0f			/* called from wrap_around? */
1322241Shuah	  mov	MMU_SCONTEXT, %g3
1332241Shuah
1346127Ssm142603	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
1356127Ssm142603	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
1366127Ssm142603	cmp	%g5, %g1			/* hat toBe-invalid running? */
1376127Ssm142603	bne,pt	%xcc, 3f
1386127Ssm142603	  nop
1396127Ssm142603
1406127Ssm1426030:
1416127Ssm142603	sethi   %hi(shctx_on), %g5
1426127Ssm142603        ld      [%g5 + %lo(shctx_on)], %g5
1436127Ssm142603        brz     %g5, 1f
1446127Ssm142603          mov     MMU_SHARED_CONTEXT, %g5
1456127Ssm142603        sethi   %hi(FLUSH_ADDR), %g4
1466127Ssm142603        stxa    %g0, [%g5]ASI_MMU_CTX
1476127Ssm142603        flush   %g4
1486127Ssm142603
1496127Ssm1426031:
1502241Shuah	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
1512241Shuah	set     CTXREG_CTX_MASK, %g4
1522241Shuah	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
1532241Shuah	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
1546127Ssm142603	ble,pn	%xcc, 2f			/* yes, no need to change */
1556127Ssm142603	  mov   MMU_PCONTEXT, %g7
1562241Shuah
1572241Shuah	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
1580Sstevel@tonic-gate	membar	#Sync
1595584Sjimand
1606127Ssm1426032:
1615584Sjimand	ldxa	[%g7]ASI_MMU_CTX, %g3		/* get pgz | pri-ctx */
1625584Sjimand	and     %g3, %g4, %g5			/* %g5 = pri-ctx */
1632241Shuah	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
1646127Ssm142603	ble,pn	%xcc, 3f			/* yes, no need to change */
1655584Sjimand	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
1665584Sjimand	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
1675584Sjimand	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
1685584Sjimand
1692241Shuah	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
1706127Ssm1426033:
1710Sstevel@tonic-gate	retry
1722241Shuah	SET_SIZE(sfmmu_raise_tsb_exception)
1736127Ssm142603
1746127Ssm142603
1750Sstevel@tonic-gate
1762241Shuah	/*
1772241Shuah	 * %o0 = virtual address
1782241Shuah	 * %o1 = address of TTE to be loaded
1792241Shuah	 */
1802241Shuah	ENTRY_NP(sfmmu_itlb_ld_kva)
1810Sstevel@tonic-gate	rdpr	%pstate, %o3
1820Sstevel@tonic-gate#ifdef DEBUG
1832241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
1840Sstevel@tonic-gate#endif /* DEBUG */
1850Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
1860Sstevel@tonic-gate	srln	%o0, MMU_PAGESHIFT, %o0
1870Sstevel@tonic-gate	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
1882241Shuah
1892241Shuah	ldx	[%o1], %g1
1900Sstevel@tonic-gate	set	MMU_TAG_ACCESS, %o5
1910Sstevel@tonic-gate#ifdef	CHEETAHPLUS_ERRATUM_34
1920Sstevel@tonic-gate	!
1930Sstevel@tonic-gate	! If this is Cheetah or derivative and the specified TTE is locked
1940Sstevel@tonic-gate	! and hence to be loaded into the T16, fully-associative TLB, we
1950Sstevel@tonic-gate	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
1960Sstevel@tonic-gate	! certain conditions an ITLB locked index 0 TTE will erroneously be
1970Sstevel@tonic-gate	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
1980Sstevel@tonic-gate	! this erratum, we scan the T16 top down for an unlocked TTE and
1990Sstevel@tonic-gate	! explicitly load the specified TTE into that index.
2000Sstevel@tonic-gate	!
2010Sstevel@tonic-gate	GET_CPU_IMPL(%g2)
2020Sstevel@tonic-gate	cmp	%g2, CHEETAH_IMPL
2030Sstevel@tonic-gate	bl,pn	%icc, 0f
2040Sstevel@tonic-gate	  nop
2050Sstevel@tonic-gate
2060Sstevel@tonic-gate	andcc	%g1, TTE_LCK_INT, %g0
2070Sstevel@tonic-gate	bz	%icc, 0f			! Lock bit is not set;
2080Sstevel@tonic-gate						!   load normally.
2090Sstevel@tonic-gate	  or	%g0, (15 << 3), %g3		! Start searching from the
2100Sstevel@tonic-gate						!   top down.
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate1:
2130Sstevel@tonic-gate	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
2140Sstevel@tonic-gate
2150Sstevel@tonic-gate	!
2160Sstevel@tonic-gate	! If this entry isn't valid, we'll choose to displace it (regardless
2170Sstevel@tonic-gate	! of the lock bit).
2180Sstevel@tonic-gate	!
2190Sstevel@tonic-gate	cmp	%g4, %g0
2200Sstevel@tonic-gate	bge	%xcc, 2f			! TTE is > 0 iff not valid
2210Sstevel@tonic-gate	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
2220Sstevel@tonic-gate	bz	%icc, 2f			! If unlocked, go displace
2230Sstevel@tonic-gate	  nop
2240Sstevel@tonic-gate	sub	%g3, (1 << 3), %g3
2250Sstevel@tonic-gate	brgz	%g3, 1b				! Still more TLB entries
2260Sstevel@tonic-gate	  nop					! to search
2270Sstevel@tonic-gate
2280Sstevel@tonic-gate	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
2290Sstevel@tonic-gate	call    panic                           ! found no unlocked TTE so
2300Sstevel@tonic-gate	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
2310Sstevel@tonic-gate
2322241Shuah
2330Sstevel@tonic-gate2:
2340Sstevel@tonic-gate	!
2350Sstevel@tonic-gate	! We have found an unlocked or non-valid entry; we'll explicitly load
2360Sstevel@tonic-gate	! our locked entry here.
2370Sstevel@tonic-gate	!
2380Sstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
2390Sstevel@tonic-gate	stxa	%o0, [%o5]ASI_IMMU
2400Sstevel@tonic-gate	stxa	%g1, [%g3]ASI_ITLB_ACCESS
2410Sstevel@tonic-gate	flush	%o1				! Flush required for I-MMU
2420Sstevel@tonic-gate	ba	3f				! Delay slot of ba is empty
2432241Shuah	  nop					!   per Erratum 64
2440Sstevel@tonic-gate
2450Sstevel@tonic-gate0:
2460Sstevel@tonic-gate#endif	/* CHEETAHPLUS_ERRATUM_34 */
2470Sstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
2480Sstevel@tonic-gate	stxa	%o0, [%o5]ASI_IMMU
2490Sstevel@tonic-gate	stxa	%g1, [%g0]ASI_ITLB_IN
2500Sstevel@tonic-gate	flush	%o1				! Flush required for I-MMU
2510Sstevel@tonic-gate3:
2520Sstevel@tonic-gate	retl
2530Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! Enable interrupts
2542241Shuah	SET_SIZE(sfmmu_itlb_ld_kva)
2550Sstevel@tonic-gate
2560Sstevel@tonic-gate	/*
2570Sstevel@tonic-gate	 * Load an entry into the DTLB.
2580Sstevel@tonic-gate	 *
2590Sstevel@tonic-gate	 * Special handling is required for locked entries since there
2600Sstevel@tonic-gate	 * are some TLB slots that are reserved for the kernel but not
2610Sstevel@tonic-gate	 * always held locked.  We want to avoid loading locked TTEs
2620Sstevel@tonic-gate	 * into those slots since they could be displaced.
2632241Shuah	 *
2642241Shuah	 * %o0 = virtual address
2652241Shuah	 * %o1 = address of TTE to be loaded
2660Sstevel@tonic-gate	 */
2672241Shuah	ENTRY_NP(sfmmu_dtlb_ld_kva)
2680Sstevel@tonic-gate	rdpr	%pstate, %o3
2690Sstevel@tonic-gate#ifdef DEBUG
2702241Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
2710Sstevel@tonic-gate#endif /* DEBUG */
2720Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
2730Sstevel@tonic-gate	srln	%o0, MMU_PAGESHIFT, %o0
2740Sstevel@tonic-gate	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
2752241Shuah
2762241Shuah	ldx	[%o1], %g1
2772241Shuah
2782241Shuah	set	MMU_TAG_ACCESS, %o5
2792241Shuah
2802241Shuah	set	cpu_impl_dual_pgsz, %o2
2812241Shuah	ld	[%o2], %o2
2820Sstevel@tonic-gate	brz	%o2, 1f
2832241Shuah	  nop
2842241Shuah
2852241Shuah	sethi	%hi(ksfmmup), %o2
2862241Shuah	ldx	[%o2 + %lo(ksfmmup)], %o2
2872241Shuah	ldub    [%o2 + SFMMU_CEXT], %o2
2882241Shuah        sll     %o2, TAGACCEXT_SHIFT, %o2
2892241Shuah
2900Sstevel@tonic-gate	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
2910Sstevel@tonic-gate	stxa	%o2,[%o4]ASI_DMMU
2920Sstevel@tonic-gate	membar	#Sync
2930Sstevel@tonic-gate1:
2940Sstevel@tonic-gate	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
2950Sstevel@tonic-gate	bnz,pn	%icc, 2f			! special handling
2960Sstevel@tonic-gate	  sethi	%hi(dtlb_resv_ttenum), %g3
2970Sstevel@tonic-gate	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
2980Sstevel@tonic-gate	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
2990Sstevel@tonic-gate	membar	#Sync
3000Sstevel@tonic-gate	retl
3010Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! enable interrupts
3020Sstevel@tonic-gate2:
303*6674Sjfrank#ifdef	CHEETAHPLUS_ERRATUM_34
304*6674Sjfrank	GET_CPU_IMPL(%g2)
305*6674Sjfrank#endif
3060Sstevel@tonic-gate	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
3070Sstevel@tonic-gate	sll	%g3, 3, %g3			! First reserved idx in TLB 0
3080Sstevel@tonic-gate	sub	%g3, (1 << 3), %g3		! Decrement idx
309*6674Sjfrank	! Erratum 15 workaround due to ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
310*6674Sjfrank	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
3110Sstevel@tonic-gate3:
3120Sstevel@tonic-gate	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
3130Sstevel@tonic-gate	!
3140Sstevel@tonic-gate	! If this entry isn't valid, we'll choose to displace it (regardless
3150Sstevel@tonic-gate	! of the lock bit).
3160Sstevel@tonic-gate	!
3170Sstevel@tonic-gate	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
3180Sstevel@tonic-gate	  nop
3190Sstevel@tonic-gate	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
3200Sstevel@tonic-gate	bz,pn	%icc, 4f			! If unlocked, go displace
3210Sstevel@tonic-gate	  nop
3220Sstevel@tonic-gate	sub	%g3, (1 << 3), %g3		! Decrement idx
323*6674Sjfrank#ifdef	CHEETAHPLUS_ERRATUM_34
324*6674Sjfrank	!
325*6674Sjfrank	! If this is a Cheetah or derivative, we must work around Erratum 34
326*6674Sjfrank	! for the DTLB.  Erratum 34 states that under certain conditions,
327*6674Sjfrank	! a locked entry 0 TTE may be improperly displaced.  To avoid this,
328*6674Sjfrank	! we do not place a locked TTE in entry 0.
329*6674Sjfrank	!
330*6674Sjfrank	brgz	%g3, 3b
331*6674Sjfrank	  nop
332*6674Sjfrank	cmp	%g2, CHEETAH_IMPL
333*6674Sjfrank	bge,pt	%icc, 5f
334*6674Sjfrank	  nop
335*6674Sjfrank	brz	%g3, 3b
336*6674Sjfrank	 nop
337*6674Sjfrank#else	/* CHEETAHPLUS_ERRATUM_34 */
3382241Shuah	brgez	%g3, 3b
3390Sstevel@tonic-gate	  nop
340*6674Sjfrank#endif	/* CHEETAHPLUS_ERRATUM_34 */
341*6674Sjfrank5:
3420Sstevel@tonic-gate	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
3430Sstevel@tonic-gate	call	panic				! found no unlocked TTE so
3440Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
3450Sstevel@tonic-gate4:
3460Sstevel@tonic-gate	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
3471772Sjl139090#ifdef	OLYMPUS_SHARED_FTLB
3481772Sjl139090	stxa	%g1,[%g0]ASI_DTLB_IN
3491772Sjl139090#else
3500Sstevel@tonic-gate	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
3511772Sjl139090#endif
3520Sstevel@tonic-gate	membar	#Sync
3530Sstevel@tonic-gate	retl
3540Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! enable interrupts
3552241Shuah	SET_SIZE(sfmmu_dtlb_ld_kva)
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate	ENTRY_NP(sfmmu_getctx_pri)
3580Sstevel@tonic-gate	set	MMU_PCONTEXT, %o0
3590Sstevel@tonic-gate	retl
3600Sstevel@tonic-gate	  ldxa	[%o0]ASI_MMU_CTX, %o0
3610Sstevel@tonic-gate	SET_SIZE(sfmmu_getctx_pri)
3620Sstevel@tonic-gate
3630Sstevel@tonic-gate	ENTRY_NP(sfmmu_getctx_sec)
3640Sstevel@tonic-gate	set	MMU_SCONTEXT, %o0
3650Sstevel@tonic-gate	set	CTXREG_CTX_MASK, %o1
3660Sstevel@tonic-gate	ldxa	[%o0]ASI_MMU_CTX, %o0
3670Sstevel@tonic-gate	retl
3682241Shuah	  and	%o0, %o1, %o0
3690Sstevel@tonic-gate	SET_SIZE(sfmmu_getctx_sec)
3700Sstevel@tonic-gate
3710Sstevel@tonic-gate	/*
3720Sstevel@tonic-gate	 * Set the secondary context register for this process.
3732241Shuah	 * %o0 = page_size | context number for this process.
3740Sstevel@tonic-gate	 */
3750Sstevel@tonic-gate	ENTRY_NP(sfmmu_setctx_sec)
3760Sstevel@tonic-gate	/*
3770Sstevel@tonic-gate	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
3780Sstevel@tonic-gate	 * But we can also get called from C with interrupts enabled. So,
3792241Shuah	 * we need to check first.
3800Sstevel@tonic-gate	 */
3810Sstevel@tonic-gate
3820Sstevel@tonic-gate	/* If interrupts are not disabled, then disable them */
3830Sstevel@tonic-gate	rdpr	%pstate, %g1
3840Sstevel@tonic-gate	btst	PSTATE_IE, %g1
3850Sstevel@tonic-gate	bnz,a,pt %icc, 1f
3862241Shuah	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
3872241Shuah
3880Sstevel@tonic-gate1:
3890Sstevel@tonic-gate	mov	MMU_SCONTEXT, %o1
3902241Shuah
3910Sstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o4
3920Sstevel@tonic-gate	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
3930Sstevel@tonic-gate	flush	%o4
3946127Ssm142603        sethi   %hi(shctx_on), %g3
3956127Ssm142603        ld      [%g3 + %lo(shctx_on)], %g3
3966127Ssm142603	brz     %g3, 2f
3976127Ssm142603	  nop
3986127Ssm142603	set	CTXREG_CTX_MASK, %o4
3996127Ssm142603	and	%o0,%o4,%o1
4006127Ssm142603	cmp	%o1, INVALID_CONTEXT
4016127Ssm142603	bne,pn %icc, 2f
4026127Ssm142603   	  mov     MMU_SHARED_CONTEXT, %o1
4036127Ssm142603        sethi   %hi(FLUSH_ADDR), %o4
4046127Ssm142603        stxa    %g0, [%o1]ASI_MMU_CTX           /* set 2nd context reg. */
4056127Ssm142603        flush   %o4
4060Sstevel@tonic-gate
4072241Shuah	/*
4082241Shuah	 * if the routine was entered with intr enabled, then enable intr now.
4092241Shuah	 * otherwise, keep intr disabled, return without enabing intr.
4102241Shuah	 * %g1 - old intr state
4112241Shuah	 */
4126127Ssm1426032:	btst	PSTATE_IE, %g1
4136127Ssm142603	bnz,a,pt %icc, 3f
4142241Shuah	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
4156127Ssm1426033:	retl
4162241Shuah	  nop
4170Sstevel@tonic-gate	SET_SIZE(sfmmu_setctx_sec)
4180Sstevel@tonic-gate
4190Sstevel@tonic-gate	/*
4200Sstevel@tonic-gate	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
4210Sstevel@tonic-gate	 * returns the detection value in %o0.
4221772Sjl139090	 *
4231772Sjl139090	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
4241772Sjl139090	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
4251772Sjl139090	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
4261772Sjl139090	 *
4270Sstevel@tonic-gate	 */
4280Sstevel@tonic-gate	ENTRY_NP(sfmmu_setup_4lp)
4290Sstevel@tonic-gate	GET_CPU_IMPL(%o0);
4300Sstevel@tonic-gate	cmp	%o0, CHEETAH_PLUS_IMPL
4311772Sjl139090	bge,pt	%icc, 4f
4321772Sjl139090	  mov	1, %o1
4331772Sjl139090	cmp	%o0, SPITFIRE_IMPL
4341772Sjl139090	bge,a,pn %icc, 3f
4350Sstevel@tonic-gate	  clr	%o1
4361772Sjl1390904:
4370Sstevel@tonic-gate	set	ktsb_phys, %o2
4380Sstevel@tonic-gate	st	%o1, [%o2]
4391772Sjl1390903:	retl
4400Sstevel@tonic-gate	mov	%o1, %o0
4410Sstevel@tonic-gate	SET_SIZE(sfmmu_setup_4lp)
4420Sstevel@tonic-gate
4430Sstevel@tonic-gate
4440Sstevel@tonic-gate	/*
4450Sstevel@tonic-gate	 * Called to load MMU registers and tsbmiss area
4460Sstevel@tonic-gate	 * for the active process.  This function should
4470Sstevel@tonic-gate	 * only be called from TL=0.
4480Sstevel@tonic-gate	 *
4490Sstevel@tonic-gate	 * %o0 - hat pointer
4506127Ssm142603	 *
4510Sstevel@tonic-gate	 */
4520Sstevel@tonic-gate	ENTRY_NP(sfmmu_load_mmustate)
4530Sstevel@tonic-gate
4542241Shuah#ifdef DEBUG
4556127Ssm142603        PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
4562241Shuah#endif /* DEBUG */
4572241Shuah
4586127Ssm142603        sethi   %hi(ksfmmup), %o3
4596127Ssm142603        ldx     [%o3 + %lo(ksfmmup)], %o3
4606127Ssm142603        cmp     %o3, %o0
4616127Ssm142603        be,pn   %xcc, 8f			! if kernel as, do nothing
4626127Ssm142603          nop
4636127Ssm142603        /*
4646127Ssm142603         * We need to set up the TSB base register, tsbmiss
4656127Ssm142603         * area, and load locked TTE(s) for the TSB.
4666127Ssm142603         */
4676127Ssm142603        ldx     [%o0 + SFMMU_TSB], %o1          ! %o1 = first tsbinfo
4686127Ssm142603        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second tsbinfo
4691772Sjl139090
4701772Sjl139090#ifdef UTSB_PHYS
4716127Ssm142603        /*
4726127Ssm142603         * UTSB_PHYS accesses user TSBs via physical addresses.  The first
4736127Ssm142603         * TSB is in the MMU I/D TSB Base registers.  The 2nd, 3rd and
4746127Ssm142603	 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs.
4750Sstevel@tonic-gate	 */
4766127Ssm142603
4776127Ssm142603        /* create/set first UTSBREG actually loaded into MMU_TSB  */
4786127Ssm142603        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = first utsbreg
4796127Ssm142603 	LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
4806127Ssm142603
4816127Ssm142603        brz,a,pt  %g2, 2f
4826127Ssm142603          mov   -1, %o2                         ! use -1 if no second TSB
4836127Ssm142603
4846127Ssm142603        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = second utsbreg
4856127Ssm1426032:
4866127Ssm142603        SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
4876127Ssm142603
4886127Ssm142603	/* make 3rd and 4th TSB */
4896127Ssm142603	CPU_TSBMISS_AREA(%o4, %o3) 		! %o4 = tsbmiss area
4906127Ssm142603
4916127Ssm142603        ldx     [%o0 + SFMMU_SCDP], %g2         ! %g2 = sfmmu_scd
4926127Ssm142603        brz,pt  %g2, 3f
4936127Ssm142603          mov   -1, %o2                         ! use -1 if no third TSB
4946127Ssm142603
4956127Ssm142603        ldx     [%g2 + SCD_SFMMUP], %g3         ! %g3 = scdp->scd_sfmmup
4966127Ssm142603        ldx     [%g3 + SFMMU_TSB], %o1          ! %o1 = first scd tsbinfo
4976127Ssm142603        brz,pn %o1, 5f
4986127Ssm142603          nop                                   ! panic if no third TSB
4996127Ssm142603
5006127Ssm142603	/* make 3rd UTSBREG */
5016127Ssm142603        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = third utsbreg
5026127Ssm1426033:
5036127Ssm142603        SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3)
5046127Ssm142603	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR]
5056127Ssm142603
5066127Ssm142603        brz,pt  %g2, 4f
5076127Ssm142603          mov   -1, %o2                         ! use -1 if no 3rd or 4th TSB
5086127Ssm142603
5096127Ssm142603        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second scd tsbinfo
5106127Ssm142603        brz,pt  %g2, 4f
5116127Ssm142603          mov   -1, %o2                         ! use -1 if no 4th TSB
5126127Ssm142603
5136127Ssm142603	/* make 4th UTSBREG */
5146127Ssm142603        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = fourth utsbreg
5156127Ssm1426034:
5166127Ssm142603        SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3)
5176127Ssm142603	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR4M]
5186127Ssm142603	ba,pt	%icc, 6f
5196127Ssm142603	  mov	%o4, %o2			! %o2 = tsbmiss area
5206127Ssm1426035:
5216127Ssm142603        sethi   %hi(panicstr), %g1              ! panic if no 3rd TSB
5226127Ssm142603        ldx     [%g1 + %lo(panicstr)], %g1
5236127Ssm142603        tst     %g1
5246127Ssm142603
5256127Ssm142603        bnz,pn  %xcc, 8f
5266127Ssm142603          nop
5276127Ssm142603
5286127Ssm142603        sethi   %hi(sfmmu_panic10), %o0
5296127Ssm142603        call    panic
5306127Ssm142603          or     %o0, %lo(sfmmu_panic10), %o0
5316127Ssm142603
5326127Ssm142603#else /* UTSBREG_PHYS */
5330Sstevel@tonic-gate
5346127Ssm142603        brz,pt  %g2, 4f
5356127Ssm142603          nop
5366127Ssm142603        /*
5376127Ssm142603         * We have a second TSB for this process, so we need to
5386127Ssm142603         * encode data for both the first and second TSB in our single
5396127Ssm142603         * TSB base register.  See hat_sfmmu.h for details on what bits
5406127Ssm142603         * correspond to which TSB.
5416127Ssm142603         * We also need to load a locked TTE into the TLB for the second TSB
5426127Ssm142603         * in this case.
5436127Ssm142603         */
5446127Ssm142603        MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
5456127Ssm142603        ! %o2 = tsbreg
5466127Ssm142603        sethi   %hi(utsb4m_dtlb_ttenum), %o3
5476127Ssm142603        sethi   %hi(utsb4m_vabase), %o4
5486127Ssm142603        ld      [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
5496127Ssm142603        ldx     [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB
5506127Ssm142603        sll     %o3, DTACC_SHIFT, %o3           ! %o3 = sec TSB TLB index
5516127Ssm142603        RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)       ! or-in bits of TSB VA
5526127Ssm142603        LOAD_TSBTTE(%g2, %o3, %o4, %g3)         ! load sec TSB locked TTE
5536127Ssm142603        sethi   %hi(utsb_vabase), %g3
5546127Ssm142603        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
5556127Ssm142603        ba,pt   %xcc, 5f
5566127Ssm142603          nop
5570Sstevel@tonic-gate
5586127Ssm1426034:      sethi   %hi(utsb_vabase), %g3
5596127Ssm142603        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
5606127Ssm142603        MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)     ! %o2 = tsbreg
5616127Ssm142603
5626127Ssm1426035:      LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
5630Sstevel@tonic-gate
5646127Ssm142603        /*
5656127Ssm142603         * Load the TTE for the first TSB at the appropriate location in
5666127Ssm142603         * the TLB
5676127Ssm142603         */
5686127Ssm142603        sethi   %hi(utsb_dtlb_ttenum), %o2
5696127Ssm142603        ld      [%o2 + %lo(utsb_dtlb_ttenum)], %o2
5706127Ssm142603        sll     %o2, DTACC_SHIFT, %o2           ! %o1 = first TSB TLB index
5716127Ssm142603        RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)       ! or-in bits of TSB VA
5726127Ssm142603        LOAD_TSBTTE(%o1, %o2, %g3, %o4)         ! load first TSB locked TTE
5736127Ssm142603	CPU_TSBMISS_AREA(%o2, %o3)
5746127Ssm142603#endif /* UTSB_PHYS */
5756127Ssm1426036:
5766127Ssm142603	ldx     [%o0 + SFMMU_ISMBLKPA], %o1     ! copy members of sfmmu
5776127Ssm142603	              				! we need to access from
5786127Ssm142603        stx     %o1, [%o2 + TSBMISS_ISMBLKPA]   ! sfmmu_tsb_miss into the
5796127Ssm142603        ldub    [%o0 + SFMMU_TTEFLAGS], %o3     ! per-CPU tsbmiss area.
5806127Ssm142603        stx     %o0, [%o2 + TSBMISS_UHATID]
5816127Ssm142603        stub    %o3, [%o2 + TSBMISS_UTTEFLAGS]
5826127Ssm142603#ifdef UTSB_PHYS
5836127Ssm142603        ldx     [%o0 + SFMMU_SRDP], %o1
5846127Ssm142603        ldub    [%o0 + SFMMU_RTTEFLAGS], %o4
5856127Ssm142603        stub    %o4,  [%o2 + TSBMISS_URTTEFLAGS]
5866127Ssm142603        stx     %o1, [%o2 +  TSBMISS_SHARED_UHATID]
5876127Ssm142603        brz,pn  %o1, 8f				! check for sfmmu_srdp
5886127Ssm142603          add   %o0, SFMMU_HMERMAP, %o1
5896127Ssm142603        add     %o2, TSBMISS_SHMERMAP, %o2
5906127Ssm142603        mov     SFMMU_HMERGNMAP_WORDS, %o3
5916127Ssm142603                                                ! set tsbmiss shmermap
5926127Ssm142603        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
5936127Ssm142603
5946127Ssm142603	ldx     [%o0 + SFMMU_SCDP], %o4         ! %o4 = sfmmu_scd
5956127Ssm142603        CPU_TSBMISS_AREA(%o2, %o3)              ! %o2 = tsbmiss area
5966127Ssm142603        mov     SFMMU_HMERGNMAP_WORDS, %o3
5976127Ssm142603        brnz,pt %o4, 7f                       ! check for sfmmu_scdp else
5986127Ssm142603          add   %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
5996127Ssm142603        ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
6006127Ssm142603	ba 8f
6016127Ssm142603	  nop
6026127Ssm1426037:
6036127Ssm142603        add     %o4, SCD_HMERMAP, %o1
6046127Ssm142603        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
6051772Sjl139090#endif /* UTSB_PHYS */
6060Sstevel@tonic-gate
6076127Ssm1426038:
6086127Ssm142603	retl
6096127Ssm142603          nop
6106127Ssm142603        SET_SIZE(sfmmu_load_mmustate)
6110Sstevel@tonic-gate
6120Sstevel@tonic-gate#endif /* lint */
6130Sstevel@tonic-gate
6140Sstevel@tonic-gate#if defined (lint)
6150Sstevel@tonic-gate/*
6160Sstevel@tonic-gate * Invalidate all of the entries within the tsb, by setting the inv bit
6170Sstevel@tonic-gate * in the tte_tag field of each tsbe.
6180Sstevel@tonic-gate *
6190Sstevel@tonic-gate * We take advantage of the fact TSBs are page aligned and a multiple of
6200Sstevel@tonic-gate * PAGESIZE to use block stores.
6210Sstevel@tonic-gate *
6220Sstevel@tonic-gate * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
6230Sstevel@tonic-gate * (in short, we set all bits in the upper word of the tag, and we give the
6240Sstevel@tonic-gate * invalid bit precedence over other tag bits in both places).
6250Sstevel@tonic-gate */
6260Sstevel@tonic-gate/* ARGSUSED */
6270Sstevel@tonic-gatevoid
6280Sstevel@tonic-gatesfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
6290Sstevel@tonic-gate{}
6300Sstevel@tonic-gate
6310Sstevel@tonic-gate#else /* lint */
6320Sstevel@tonic-gate
6330Sstevel@tonic-gate#define	VIS_BLOCKSIZE	64
6340Sstevel@tonic-gate
6350Sstevel@tonic-gate	ENTRY(sfmmu_inv_tsb_fast)
6360Sstevel@tonic-gate
6370Sstevel@tonic-gate	! Get space for aligned block of saved fp regs.
6380Sstevel@tonic-gate	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
6390Sstevel@tonic-gate
6400Sstevel@tonic-gate	! kpreempt_disable();
6410Sstevel@tonic-gate	ldsb	[THREAD_REG + T_PREEMPT], %l3
6420Sstevel@tonic-gate	inc	%l3
6430Sstevel@tonic-gate	stb	%l3, [THREAD_REG + T_PREEMPT]
6440Sstevel@tonic-gate
6450Sstevel@tonic-gate	! See if fpu was in use.  If it was, we need to save off the
6460Sstevel@tonic-gate	! floating point registers to the stack.
6470Sstevel@tonic-gate	rd	%fprs, %l0			! %l0 = cached copy of fprs
6480Sstevel@tonic-gate	btst	FPRS_FEF, %l0
6490Sstevel@tonic-gate	bz,pt	%icc, 4f
6500Sstevel@tonic-gate	  nop
6510Sstevel@tonic-gate
6520Sstevel@tonic-gate	! save in-use fpregs on stack
6530Sstevel@tonic-gate	membar	#Sync				! make sure tranx to fp regs
6540Sstevel@tonic-gate						! have completed
6550Sstevel@tonic-gate	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
6560Sstevel@tonic-gate	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
6570Sstevel@tonic-gate	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
6580Sstevel@tonic-gate
6590Sstevel@tonic-gate	! enable fp
6600Sstevel@tonic-gate4:	membar	#StoreStore|#StoreLoad|#LoadStore
6610Sstevel@tonic-gate	wr	%g0, FPRS_FEF, %fprs
6620Sstevel@tonic-gate	wr	%g0, ASI_BLK_P, %asi
6630Sstevel@tonic-gate
6640Sstevel@tonic-gate	! load up FP registers with invalid TSB tag.
6650Sstevel@tonic-gate	fone	%d0			! ones in tag
6660Sstevel@tonic-gate	fzero	%d2			! zeros in TTE
6670Sstevel@tonic-gate	fone	%d4			! ones in tag
6680Sstevel@tonic-gate	fzero	%d6			! zeros in TTE
6690Sstevel@tonic-gate	fone	%d8			! ones in tag
6700Sstevel@tonic-gate	fzero	%d10			! zeros in TTE
6710Sstevel@tonic-gate	fone	%d12			! ones in tag
6720Sstevel@tonic-gate	fzero	%d14			! zeros in TTE
6730Sstevel@tonic-gate	ba,pt	%xcc, .sfmmu_inv_doblock
6740Sstevel@tonic-gate	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
6750Sstevel@tonic-gate
6760Sstevel@tonic-gate.sfmmu_inv_blkstart:
6770Sstevel@tonic-gate      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
6780Sstevel@tonic-gate	stda	%d0, [%i0+128]%asi
6790Sstevel@tonic-gate	stda	%d0, [%i0+64]%asi
6800Sstevel@tonic-gate	stda	%d0, [%i0]%asi
6810Sstevel@tonic-gate
6820Sstevel@tonic-gate	add	%i0, %i4, %i0
6830Sstevel@tonic-gate	sub	%i1, %i4, %i1
6840Sstevel@tonic-gate
6850Sstevel@tonic-gate.sfmmu_inv_doblock:
6860Sstevel@tonic-gate	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
6870Sstevel@tonic-gate	bgeu,a	%icc, .sfmmu_inv_blkstart
6880Sstevel@tonic-gate	  stda	%d0, [%i0+192]%asi
6890Sstevel@tonic-gate
6900Sstevel@tonic-gate.sfmmu_inv_finish:
6910Sstevel@tonic-gate	membar	#Sync
6920Sstevel@tonic-gate	btst	FPRS_FEF, %l0		! saved from above
6930Sstevel@tonic-gate	bz,a	.sfmmu_inv_finished
6940Sstevel@tonic-gate	  wr	%l0, 0, %fprs		! restore fprs
6950Sstevel@tonic-gate
6960Sstevel@tonic-gate	! restore fpregs from stack
6970Sstevel@tonic-gate	ldda    [%l1]ASI_BLK_P, %d0
6980Sstevel@tonic-gate	membar	#Sync
6990Sstevel@tonic-gate	wr	%l0, 0, %fprs		! restore fprs
7000Sstevel@tonic-gate
7010Sstevel@tonic-gate.sfmmu_inv_finished:
7020Sstevel@tonic-gate	! kpreempt_enable();
7030Sstevel@tonic-gate	ldsb	[THREAD_REG + T_PREEMPT], %l3
7040Sstevel@tonic-gate	dec	%l3
7050Sstevel@tonic-gate	stb	%l3, [THREAD_REG + T_PREEMPT]
7060Sstevel@tonic-gate	ret
7072241Shuah	  restore
7080Sstevel@tonic-gate	SET_SIZE(sfmmu_inv_tsb_fast)
7090Sstevel@tonic-gate
7100Sstevel@tonic-gate#endif /* lint */
7110Sstevel@tonic-gate
7120Sstevel@tonic-gate#if defined(lint)
7130Sstevel@tonic-gate
7140Sstevel@tonic-gate/*
7150Sstevel@tonic-gate * Prefetch "struct tsbe" while walking TSBs.
7160Sstevel@tonic-gate * prefetch 7 cache lines ahead of where we are at now.
7170Sstevel@tonic-gate * #n_reads is being used since #one_read only applies to
7180Sstevel@tonic-gate * floating point reads, and we are not doing floating point
7190Sstevel@tonic-gate * reads.  However, this has the negative side effect of polluting
7200Sstevel@tonic-gate * the ecache.
7210Sstevel@tonic-gate * The 448 comes from (7 * 64) which is how far ahead of our current
7220Sstevel@tonic-gate * address, we want to prefetch.
7230Sstevel@tonic-gate */
7240Sstevel@tonic-gate/*ARGSUSED*/
7250Sstevel@tonic-gatevoid
7260Sstevel@tonic-gateprefetch_tsbe_read(struct tsbe *tsbep)
7270Sstevel@tonic-gate{}
7280Sstevel@tonic-gate
7290Sstevel@tonic-gate/* Prefetch the tsbe that we are about to write */
7300Sstevel@tonic-gate/*ARGSUSED*/
7310Sstevel@tonic-gatevoid
7320Sstevel@tonic-gateprefetch_tsbe_write(struct tsbe *tsbep)
7330Sstevel@tonic-gate{}
7340Sstevel@tonic-gate
7350Sstevel@tonic-gate#else /* lint */
7360Sstevel@tonic-gate
7370Sstevel@tonic-gate	ENTRY(prefetch_tsbe_read)
7380Sstevel@tonic-gate	retl
7392241Shuah	  prefetch	[%o0+448], #n_reads
7400Sstevel@tonic-gate	SET_SIZE(prefetch_tsbe_read)
7410Sstevel@tonic-gate
7420Sstevel@tonic-gate	ENTRY(prefetch_tsbe_write)
7430Sstevel@tonic-gate	retl
7442241Shuah	  prefetch	[%o0], #n_writes
7450Sstevel@tonic-gate	SET_SIZE(prefetch_tsbe_write)
7460Sstevel@tonic-gate#endif /* lint */
7470Sstevel@tonic-gate
748