10Sstevel@tonic-gate/* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52241Shuah * Common Development and Distribution License (the "License"). 62241Shuah * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate/* 22*6553Sjimand * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate#pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate#if !defined(lint) 290Sstevel@tonic-gate#include "assym.h" 300Sstevel@tonic-gate#endif /* lint */ 310Sstevel@tonic-gate 320Sstevel@tonic-gate#include <sys/asm_linkage.h> 330Sstevel@tonic-gate#include <sys/mmu.h> 340Sstevel@tonic-gate#include <vm/hat_sfmmu.h> 350Sstevel@tonic-gate#include <sys/machparam.h> 360Sstevel@tonic-gate#include <sys/machcpuvar.h> 370Sstevel@tonic-gate#include <sys/machthread.h> 380Sstevel@tonic-gate#include <sys/privregs.h> 390Sstevel@tonic-gate#include <sys/asm_linkage.h> 400Sstevel@tonic-gate#include <sys/machasi.h> 410Sstevel@tonic-gate#include <sys/trap.h> 420Sstevel@tonic-gate#include <sys/spitregs.h> 430Sstevel@tonic-gate#include <sys/xc_impl.h> 440Sstevel@tonic-gate#include <sys/intreg.h> 450Sstevel@tonic-gate#include <sys/async.h> 460Sstevel@tonic-gate 470Sstevel@tonic-gate#ifdef TRAPTRACE 480Sstevel@tonic-gate#include <sys/traptrace.h> 490Sstevel@tonic-gate#endif /* TRAPTRACE */ 500Sstevel@tonic-gate 510Sstevel@tonic-gate#ifndef lint 520Sstevel@tonic-gate 530Sstevel@tonic-gate/* BEGIN CSTYLED */ 540Sstevel@tonic-gate#define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \ 550Sstevel@tonic-gate ldxa [%g0]ASI_LSU, tmp1 ;\ 560Sstevel@tonic-gate btst LSU_DC, tmp1 /* is dcache enabled? */ ;\ 570Sstevel@tonic-gate bz,pn %icc, 1f ;\ 580Sstevel@tonic-gate sethi %hi(dcache_linesize), tmp1 ;\ 590Sstevel@tonic-gate ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\ 600Sstevel@tonic-gate sethi %hi(dflush_type), tmp2 ;\ 610Sstevel@tonic-gate ld [tmp2 + %lo(dflush_type)], tmp2 ;\ 620Sstevel@tonic-gate cmp tmp2, FLUSHPAGE_TYPE ;\ 630Sstevel@tonic-gate be,pt %icc, 2f ;\ 640Sstevel@tonic-gate sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\ 650Sstevel@tonic-gate sethi %hi(dcache_size), tmp3 ;\ 660Sstevel@tonic-gate ld [tmp3 + %lo(dcache_size)], tmp3 ;\ 670Sstevel@tonic-gate cmp tmp2, FLUSHMATCH_TYPE ;\ 680Sstevel@tonic-gate be,pt %icc, 3f ;\ 690Sstevel@tonic-gate nop ;\ 700Sstevel@tonic-gate /* \ 710Sstevel@tonic-gate * flushtype = FLUSHALL_TYPE, flush the whole thing \ 720Sstevel@tonic-gate * tmp3 = cache size \ 730Sstevel@tonic-gate * tmp1 = cache line size \ 740Sstevel@tonic-gate */ \ 750Sstevel@tonic-gate sub tmp3, tmp1, tmp2 ;\ 760Sstevel@tonic-gate4: \ 770Sstevel@tonic-gate stxa %g0, [tmp2]ASI_DC_TAG ;\ 780Sstevel@tonic-gate membar #Sync ;\ 790Sstevel@tonic-gate cmp %g0, tmp2 ;\ 800Sstevel@tonic-gate bne,pt %icc, 4b ;\ 810Sstevel@tonic-gate sub tmp2, tmp1, tmp2 ;\ 820Sstevel@tonic-gate ba,pt %icc, 1f ;\ 830Sstevel@tonic-gate nop ;\ 840Sstevel@tonic-gate /* \ 850Sstevel@tonic-gate * flushtype = FLUSHPAGE_TYPE \ 860Sstevel@tonic-gate * arg1 = tag to compare against \ 870Sstevel@tonic-gate * arg2 = virtual color \ 880Sstevel@tonic-gate * tmp1 = cache line size \ 890Sstevel@tonic-gate * tmp2 = tag from cache \ 900Sstevel@tonic-gate * tmp3 = counter \ 910Sstevel@tonic-gate */ \ 920Sstevel@tonic-gate2: \ 930Sstevel@tonic-gate set MMU_PAGESIZE, tmp3 ;\ 940Sstevel@tonic-gate sllx arg2, MMU_PAGESHIFT, arg2 /* color to dcache page */ ;\ 950Sstevel@tonic-gate sub tmp3, tmp1, tmp3 ;\ 960Sstevel@tonic-gate4: \ 970Sstevel@tonic-gate ldxa [arg2 + tmp3]ASI_DC_TAG, tmp2 /* read tag */ ;\ 980Sstevel@tonic-gate btst SF_DC_VBIT_MASK, tmp2 ;\ 990Sstevel@tonic-gate bz,pn %icc, 5f /* branch if no valid sub-blocks */ ;\ 1000Sstevel@tonic-gate andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\ 1010Sstevel@tonic-gate cmp tmp2, arg1 ;\ 1020Sstevel@tonic-gate bne,pn %icc, 5f /* br if tag miss */ ;\ 1030Sstevel@tonic-gate nop ;\ 1040Sstevel@tonic-gate stxa %g0, [arg2 + tmp3]ASI_DC_TAG ;\ 1050Sstevel@tonic-gate membar #Sync ;\ 1060Sstevel@tonic-gate5: \ 1070Sstevel@tonic-gate cmp %g0, tmp3 ;\ 1080Sstevel@tonic-gate bnz,pt %icc, 4b /* branch if not done */ ;\ 1090Sstevel@tonic-gate sub tmp3, tmp1, tmp3 ;\ 1100Sstevel@tonic-gate ba,pt %icc, 1f ;\ 1110Sstevel@tonic-gate nop ;\ 1120Sstevel@tonic-gate /* \ 1130Sstevel@tonic-gate * flushtype = FLUSHMATCH_TYPE \ 1140Sstevel@tonic-gate * arg1 = tag to compare against \ 1150Sstevel@tonic-gate * tmp1 = cache line size \ 1160Sstevel@tonic-gate * tmp3 = cache size \ 1170Sstevel@tonic-gate * arg2 = counter \ 1180Sstevel@tonic-gate * tmp2 = cache tag \ 1190Sstevel@tonic-gate */ \ 1200Sstevel@tonic-gate3: \ 1210Sstevel@tonic-gate sub tmp3, tmp1, arg2 ;\ 1220Sstevel@tonic-gate4: \ 1230Sstevel@tonic-gate ldxa [arg2]ASI_DC_TAG, tmp2 /* read tag */ ;\ 1240Sstevel@tonic-gate btst SF_DC_VBIT_MASK, tmp2 ;\ 1250Sstevel@tonic-gate bz,pn %icc, 5f /* br if no valid sub-blocks */ ;\ 1260Sstevel@tonic-gate andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\ 1270Sstevel@tonic-gate cmp tmp2, arg1 ;\ 1280Sstevel@tonic-gate bne,pn %icc, 5f /* branch if tag miss */ ;\ 1290Sstevel@tonic-gate nop ;\ 1300Sstevel@tonic-gate stxa %g0, [arg2]ASI_DC_TAG ;\ 1310Sstevel@tonic-gate membar #Sync ;\ 1320Sstevel@tonic-gate5: \ 1330Sstevel@tonic-gate cmp %g0, arg2 ;\ 1340Sstevel@tonic-gate bne,pt %icc, 4b /* branch if not done */ ;\ 1350Sstevel@tonic-gate sub arg2, tmp1, arg2 ;\ 1360Sstevel@tonic-gate1: 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate/* 1390Sstevel@tonic-gate * macro that flushes the entire dcache color 1400Sstevel@tonic-gate */ 1410Sstevel@tonic-gate#define DCACHE_FLUSHCOLOR(arg, tmp1, tmp2) \ 1420Sstevel@tonic-gate ldxa [%g0]ASI_LSU, tmp1; \ 1430Sstevel@tonic-gate btst LSU_DC, tmp1; /* is dcache enabled? */ \ 1440Sstevel@tonic-gate bz,pn %icc, 1f; \ 1450Sstevel@tonic-gate sethi %hi(dcache_linesize), tmp1; \ 1460Sstevel@tonic-gate ld [tmp1 + %lo(dcache_linesize)], tmp1; \ 1470Sstevel@tonic-gate set MMU_PAGESIZE, tmp2; \ 1480Sstevel@tonic-gate /* \ 1490Sstevel@tonic-gate * arg = virtual color \ 1500Sstevel@tonic-gate * tmp2 = page size \ 1510Sstevel@tonic-gate * tmp1 = cache line size \ 1520Sstevel@tonic-gate */ \ 1530Sstevel@tonic-gate sllx arg, MMU_PAGESHIFT, arg; /* color to dcache page */ \ 1540Sstevel@tonic-gate sub tmp2, tmp1, tmp2; \ 1550Sstevel@tonic-gate2: \ 1560Sstevel@tonic-gate stxa %g0, [arg + tmp2]ASI_DC_TAG; \ 1570Sstevel@tonic-gate membar #Sync; \ 1580Sstevel@tonic-gate cmp %g0, tmp2; \ 1590Sstevel@tonic-gate bne,pt %icc, 2b; \ 1600Sstevel@tonic-gate sub tmp2, tmp1, tmp2; \ 1610Sstevel@tonic-gate1: 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate/* 1640Sstevel@tonic-gate * macro that flushes the entire dcache 1650Sstevel@tonic-gate */ 1660Sstevel@tonic-gate#define DCACHE_FLUSHALL(size, linesize, tmp) \ 1670Sstevel@tonic-gate ldxa [%g0]ASI_LSU, tmp; \ 1680Sstevel@tonic-gate btst LSU_DC, tmp; /* is dcache enabled? */ \ 1690Sstevel@tonic-gate bz,pn %icc, 1f; \ 1700Sstevel@tonic-gate \ 1710Sstevel@tonic-gate sub size, linesize, tmp; \ 1720Sstevel@tonic-gate2: \ 1730Sstevel@tonic-gate stxa %g0, [tmp]ASI_DC_TAG; \ 1740Sstevel@tonic-gate membar #Sync; \ 1750Sstevel@tonic-gate cmp %g0, tmp; \ 1760Sstevel@tonic-gate bne,pt %icc, 2b; \ 1770Sstevel@tonic-gate sub tmp, linesize, tmp; \ 1780Sstevel@tonic-gate1: 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate/* 1810Sstevel@tonic-gate * macro that flushes the entire icache 1820Sstevel@tonic-gate */ 1830Sstevel@tonic-gate#define ICACHE_FLUSHALL(size, linesize, tmp) \ 1840Sstevel@tonic-gate ldxa [%g0]ASI_LSU, tmp; \ 1850Sstevel@tonic-gate btst LSU_IC, tmp; \ 1860Sstevel@tonic-gate bz,pn %icc, 1f; \ 1870Sstevel@tonic-gate \ 1880Sstevel@tonic-gate sub size, linesize, tmp; \ 1890Sstevel@tonic-gate2: \ 1900Sstevel@tonic-gate stxa %g0, [tmp]ASI_IC_TAG; \ 1910Sstevel@tonic-gate membar #Sync; \ 1920Sstevel@tonic-gate cmp %g0, tmp; \ 1930Sstevel@tonic-gate bne,pt %icc, 2b; \ 1940Sstevel@tonic-gate sub tmp, linesize, tmp; \ 1950Sstevel@tonic-gate1: 1960Sstevel@tonic-gate 1972241Shuah#ifdef SF_ERRATA_32 1982241Shuah#define SF_WORKAROUND(tmp1, tmp2) \ 1992241Shuah sethi %hi(FLUSH_ADDR), tmp2 ;\ 2002241Shuah set MMU_PCONTEXT, tmp1 ;\ 2012241Shuah stxa %g0, [tmp1]ASI_DMMU ;\ 2022241Shuah flush tmp2 ; 2032241Shuah#else 2042241Shuah#define SF_WORKAROUND(tmp1, tmp2) 2052241Shuah#endif /* SF_ERRATA_32 */ 2062241Shuah 2072241Shuah/* 2082241Shuah * arg1 = vaddr 2092241Shuah * arg2 = ctxnum 2102241Shuah * - disable interrupts and clear address mask 2112241Shuah * to access 64 bit physaddr 2122241Shuah * - Blow out the TLB, flush user page. 2132241Shuah * . use secondary context. 2142241Shuah */ 2152241Shuah#define VTAG_FLUSHUPAGE(lbl, arg1, arg2, tmp1, tmp2, tmp3, tmp4) \ 2162241Shuah rdpr %pstate, tmp1 ;\ 2172241Shuah andn tmp1, PSTATE_IE, tmp2 ;\ 2182241Shuah wrpr tmp2, 0, %pstate ;\ 2192241Shuah sethi %hi(FLUSH_ADDR), tmp2 ;\ 2202241Shuah set MMU_SCONTEXT, tmp3 ;\ 2212241Shuah ldxa [tmp3]ASI_DMMU, tmp4 ;\ 2222241Shuah or DEMAP_SECOND | DEMAP_PAGE_TYPE, arg1, arg1 ;\ 2232241Shuah cmp tmp4, arg2 ;\ 2242241Shuah be,a,pt %icc, lbl/**/4 ;\ 2252241Shuah nop ;\ 2262241Shuah stxa arg2, [tmp3]ASI_DMMU ;\ 2272241Shuahlbl/**/4: ;\ 2282241Shuah stxa %g0, [arg1]ASI_DTLB_DEMAP ;\ 2292241Shuah stxa %g0, [arg1]ASI_ITLB_DEMAP ;\ 2302241Shuah flush tmp2 ;\ 2312241Shuah be,a,pt %icc, lbl/**/5 ;\ 2322241Shuah nop ;\ 2332241Shuah stxa tmp4, [tmp3]ASI_DMMU ;\ 2342241Shuah flush tmp2 ;\ 2352241Shuahlbl/**/5: ;\ 2362241Shuah wrpr %g0, tmp1, %pstate 2372241Shuah 2382241Shuah 2392241Shuah/* 2402241Shuah * macro that flushes all the user entries in dtlb 2412241Shuah * arg1 = dtlb entries 2422241Shuah * - Before first compare: 2432241Shuah * tmp4 = tte 2442241Shuah * tmp5 = vaddr 2452241Shuah * tmp6 = cntxnum 2462241Shuah */ 2472241Shuah#define DTLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \ 2482241Shuah tmp4, tmp5, tmp6) \ 2492241Shuahlbl/**/0: ;\ 2502241Shuah sllx arg1, 3, tmp3 ;\ 2512241Shuah SF_WORKAROUND(tmp1, tmp2) ;\ 2522241Shuah ldxa [tmp3]ASI_DTLB_ACCESS, tmp4 ;\ 2532241Shuah srlx tmp4, 6, tmp4 ;\ 2542241Shuah andcc tmp4, 1, %g0 ;\ 2552241Shuah bnz,pn %xcc, lbl/**/1 ;\ 2562241Shuah srlx tmp4, 57, tmp4 ;\ 2572241Shuah andcc tmp4, 1, %g0 ;\ 2582241Shuah beq,pn %xcc, lbl/**/1 ;\ 2592241Shuah nop ;\ 2602241Shuah set TAGREAD_CTX_MASK, tmp1 ;\ 2612241Shuah ldxa [tmp3]ASI_DTLB_TAGREAD, tmp2 ;\ 2622241Shuah and tmp2, tmp1, tmp6 ;\ 2632241Shuah andn tmp2, tmp1, tmp5 ;\ 2642241Shuah set KCONTEXT, tmp4 ;\ 2652241Shuah cmp tmp6, tmp4 ;\ 2662241Shuah be lbl/**/1 ;\ 2672241Shuah nop ;\ 2682241Shuah VTAG_FLUSHUPAGE(VD/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\ 2692241Shuahlbl/**/1: ;\ 2702241Shuah brgz,pt arg1, lbl/**/0 ;\ 2712241Shuah sub arg1, 1, arg1 2722241Shuah 2732241Shuah 2742241Shuah/* 2752241Shuah * macro that flushes all the user entries in itlb 2762241Shuah * arg1 = itlb entries 2772241Shuah * - Before first compare: 2782241Shuah * tmp4 = tte 2792241Shuah * tmp5 = vaddr 2802241Shuah * tmp6 = cntxnum 2812241Shuah */ 2822241Shuah#define ITLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \ 2832241Shuah tmp4, tmp5, tmp6) \ 2842241Shuahlbl/**/0: ;\ 2852241Shuah sllx arg1, 3, tmp3 ;\ 2862241Shuah SF_WORKAROUND(tmp1, tmp2) ;\ 2872241Shuah ldxa [tmp3]ASI_ITLB_ACCESS, tmp4 ;\ 2882241Shuah srlx tmp4, 6, tmp4 ;\ 2892241Shuah andcc tmp4, 1, %g0 ;\ 2902241Shuah bnz,pn %xcc, lbl/**/1 ;\ 2912241Shuah srlx tmp4, 57, tmp4 ;\ 2922241Shuah andcc tmp4, 1, %g0 ;\ 2932241Shuah beq,pn %xcc, lbl/**/1 ;\ 2942241Shuah nop ;\ 2952241Shuah set TAGREAD_CTX_MASK, tmp1 ;\ 2962241Shuah ldxa [tmp3]ASI_ITLB_TAGREAD, tmp2 ;\ 2972241Shuah and tmp2, tmp1, tmp6 ;\ 2982241Shuah andn tmp2, tmp1, tmp5 ;\ 2992241Shuah set KCONTEXT, tmp4 ;\ 3002241Shuah cmp tmp6, tmp4 ;\ 3012241Shuah be lbl/**/1 ;\ 3022241Shuah nop ;\ 3032241Shuah VTAG_FLUSHUPAGE(VI/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\ 3042241Shuahlbl/**/1: ;\ 3052241Shuah brgz,pt arg1, lbl/**/0 ;\ 3062241Shuah sub arg1, 1, arg1 3072241Shuah 3082241Shuah 3092241Shuah 3100Sstevel@tonic-gate/* 3110Sstevel@tonic-gate * Macro for getting to offset from 'cpu_private' ptr. The 'cpu_private' 3120Sstevel@tonic-gate * ptr is in the machcpu structure. 3130Sstevel@tonic-gate * r_or_s: Register or symbol off offset from 'cpu_private' ptr. 3140Sstevel@tonic-gate * scr1: Scratch, ptr is returned in this register. 3150Sstevel@tonic-gate * scr2: Scratch 3160Sstevel@tonic-gate */ 3170Sstevel@tonic-gate#define GET_CPU_PRIVATE_PTR(r_or_s, scr1, scr2, label) \ 3180Sstevel@tonic-gate CPU_ADDR(scr1, scr2); \ 3190Sstevel@tonic-gate ldn [scr1 + CPU_PRIVATE], scr1; \ 3200Sstevel@tonic-gate cmp scr1, 0; \ 3210Sstevel@tonic-gate be label; \ 3220Sstevel@tonic-gate nop; \ 3230Sstevel@tonic-gate add scr1, r_or_s, scr1; \ 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate#ifdef HUMMINGBIRD 3260Sstevel@tonic-gate/* 3270Sstevel@tonic-gate * UltraSPARC-IIe processor supports both 4-way set associative and 3280Sstevel@tonic-gate * direct map E$. For performance reasons, we flush E$ by placing it 3290Sstevel@tonic-gate * in direct map mode for data load/store and restore the state after 3300Sstevel@tonic-gate * we are done flushing it. Keep interrupts off while flushing in this 3310Sstevel@tonic-gate * manner. 3320Sstevel@tonic-gate * 3330Sstevel@tonic-gate * We flush the entire ecache by starting at one end and loading each 3340Sstevel@tonic-gate * successive ecache line for the 2*ecache-size range. We have to repeat 3350Sstevel@tonic-gate * the flush operation to guarantee that the entire ecache has been 3360Sstevel@tonic-gate * flushed. 3370Sstevel@tonic-gate * 3380Sstevel@tonic-gate * For flushing a specific physical address, we start at the aliased 3390Sstevel@tonic-gate * address and load at set-size stride, wrapping around at 2*ecache-size 3400Sstevel@tonic-gate * boundary and skipping the physical address being flushed. It takes 3410Sstevel@tonic-gate * 10 loads to guarantee that the physical address has been flushed. 3420Sstevel@tonic-gate */ 3430Sstevel@tonic-gate 3440Sstevel@tonic-gate#define HB_ECACHE_FLUSH_CNT 2 3450Sstevel@tonic-gate#define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */ 3460Sstevel@tonic-gate#endif /* HUMMINGBIRD */ 3470Sstevel@tonic-gate 3480Sstevel@tonic-gate/* END CSTYLED */ 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate#endif /* !lint */ 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate/* 3530Sstevel@tonic-gate * Spitfire MMU and Cache operations. 3540Sstevel@tonic-gate */ 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate#if defined(lint) 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate/*ARGSUSED*/ 3590Sstevel@tonic-gatevoid 3602241Shuahvtag_flushpage(caddr_t vaddr, uint64_t sfmmup) 3610Sstevel@tonic-gate{} 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate/*ARGSUSED*/ 3640Sstevel@tonic-gatevoid 3650Sstevel@tonic-gatevtag_flushall(void) 3660Sstevel@tonic-gate{} 3672241Shuah 3680Sstevel@tonic-gate/*ARGSUSED*/ 3690Sstevel@tonic-gatevoid 3702241Shuahvtag_flushall_uctxs(void) 3712241Shuah{} 3722241Shuah 3732241Shuah/*ARGSUSED*/ 3742241Shuahvoid 3752241Shuahvtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup) 3760Sstevel@tonic-gate{} 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate/*ARGSUSED*/ 3790Sstevel@tonic-gatevoid 3802241Shuahvtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt) 3810Sstevel@tonic-gate{} 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate/*ARGSUSED*/ 3840Sstevel@tonic-gatevoid 3850Sstevel@tonic-gatevtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2) 3860Sstevel@tonic-gate{} 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate/*ARGSUSED*/ 3890Sstevel@tonic-gatevoid 3900Sstevel@tonic-gatevac_flushpage(pfn_t pfnum, int vcolor) 3910Sstevel@tonic-gate{} 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate/*ARGSUSED*/ 3940Sstevel@tonic-gatevoid 3950Sstevel@tonic-gatevac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor) 3960Sstevel@tonic-gate{} 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate/*ARGSUSED*/ 3990Sstevel@tonic-gatevoid 4000Sstevel@tonic-gateinit_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 4010Sstevel@tonic-gate{} 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate/*ARGSUSED*/ 4040Sstevel@tonic-gatevoid 4050Sstevel@tonic-gateinit_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 4060Sstevel@tonic-gate{} 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate/*ARGSUSED*/ 4090Sstevel@tonic-gatevoid 4100Sstevel@tonic-gateflush_instr_mem(caddr_t vaddr, size_t len) 4110Sstevel@tonic-gate{} 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate/*ARGSUSED*/ 4140Sstevel@tonic-gatevoid 4150Sstevel@tonic-gateflush_ecache(uint64_t physaddr, size_t size, size_t linesize) 4160Sstevel@tonic-gate{} 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate/*ARGSUSED*/ 4190Sstevel@tonic-gatevoid 4200Sstevel@tonic-gateget_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data, 4210Sstevel@tonic-gate uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr) 4220Sstevel@tonic-gate{} 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate/* ARGSUSED */ 4250Sstevel@tonic-gateuint64_t 4260Sstevel@tonic-gateget_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr) 4270Sstevel@tonic-gate{ 4280Sstevel@tonic-gate return ((uint64_t)0); 4290Sstevel@tonic-gate} 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate/* ARGSUSED */ 4320Sstevel@tonic-gateuint64_t 4330Sstevel@tonic-gatecheck_ecache_line(uint32_t id, uint64_t *acc_afsr) 4340Sstevel@tonic-gate{ 4350Sstevel@tonic-gate return ((uint64_t)0); 4360Sstevel@tonic-gate} 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate/*ARGSUSED*/ 4390Sstevel@tonic-gatevoid 4400Sstevel@tonic-gatekdi_flush_idcache(int dcache_size, int dcache_lsize, 4410Sstevel@tonic-gate int icache_size, int icache_lsize) 4420Sstevel@tonic-gate{} 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate#else /* lint */ 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate ENTRY_NP(vtag_flushpage) 4470Sstevel@tonic-gate /* 4480Sstevel@tonic-gate * flush page from the tlb 4490Sstevel@tonic-gate * 4500Sstevel@tonic-gate * %o0 = vaddr 4512241Shuah * %o1 = sfmmup 4520Sstevel@tonic-gate */ 4530Sstevel@tonic-gate rdpr %pstate, %o5 4540Sstevel@tonic-gate#ifdef DEBUG 4552241Shuah PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1) 4560Sstevel@tonic-gate#endif /* DEBUG */ 4570Sstevel@tonic-gate /* 4580Sstevel@tonic-gate * disable ints 4590Sstevel@tonic-gate */ 4600Sstevel@tonic-gate andn %o5, PSTATE_IE, %o4 4610Sstevel@tonic-gate wrpr %o4, 0, %pstate 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate /* 4640Sstevel@tonic-gate * Then, blow out the tlb 4650Sstevel@tonic-gate * Interrupts are disabled to prevent the secondary ctx register 4660Sstevel@tonic-gate * from changing underneath us. 4670Sstevel@tonic-gate */ 4682241Shuah sethi %hi(ksfmmup), %o3 4692241Shuah ldx [%o3 + %lo(ksfmmup)], %o3 4702241Shuah cmp %o3, %o1 4712241Shuah bne,pt %xcc, 1f ! if not kernel as, go to 1 4722241Shuah sethi %hi(FLUSH_ADDR), %o3 4730Sstevel@tonic-gate /* 4740Sstevel@tonic-gate * For KCONTEXT demaps use primary. type = page implicitly 4750Sstevel@tonic-gate */ 4760Sstevel@tonic-gate stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */ 4770Sstevel@tonic-gate stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */ 4782241Shuah flush %o3 4790Sstevel@tonic-gate b 5f 4802241Shuah nop 4810Sstevel@tonic-gate1: 4820Sstevel@tonic-gate /* 4830Sstevel@tonic-gate * User demap. We need to set the secondary context properly. 4840Sstevel@tonic-gate * %o0 = vaddr 4852241Shuah * %o1 = sfmmup 4860Sstevel@tonic-gate * %o3 = FLUSH_ADDR 4870Sstevel@tonic-gate */ 4882241Shuah SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */ 4892241Shuah 4900Sstevel@tonic-gate set MMU_SCONTEXT, %o4 4910Sstevel@tonic-gate ldxa [%o4]ASI_DMMU, %o2 /* rd old ctxnum */ 4920Sstevel@tonic-gate or DEMAP_SECOND | DEMAP_PAGE_TYPE, %o0, %o0 4932241Shuah cmp %o2, %g1 4942241Shuah be,pt %icc, 4f 4950Sstevel@tonic-gate nop 4962241Shuah stxa %g1, [%o4]ASI_DMMU /* wr new ctxum */ 4970Sstevel@tonic-gate4: 4980Sstevel@tonic-gate stxa %g0, [%o0]ASI_DTLB_DEMAP 4990Sstevel@tonic-gate stxa %g0, [%o0]ASI_ITLB_DEMAP 5000Sstevel@tonic-gate flush %o3 5012241Shuah be,pt %icc, 5f 5020Sstevel@tonic-gate nop 5030Sstevel@tonic-gate stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */ 5040Sstevel@tonic-gate flush %o3 5050Sstevel@tonic-gate5: 5060Sstevel@tonic-gate retl 5070Sstevel@tonic-gate wrpr %g0, %o5, %pstate /* enable interrupts */ 5080Sstevel@tonic-gate SET_SIZE(vtag_flushpage) 5092241Shuah 5102241Shuah .seg ".text" 5112241Shuah.flushallmsg: 5122241Shuah .asciz "sfmmu_asm: unimplemented flush operation" 5130Sstevel@tonic-gate 5142241Shuah ENTRY_NP(vtag_flushall) 5152241Shuah sethi %hi(.flushallmsg), %o0 5162241Shuah call panic 5172241Shuah or %o0, %lo(.flushallmsg), %o0 5182241Shuah SET_SIZE(vtag_flushall) 5190Sstevel@tonic-gate 5202241Shuah ENTRY_NP(vtag_flushall_uctxs) 5212241Shuah /* 5222241Shuah * flush entire DTLB/ITLB. 5232241Shuah */ 5242241Shuah CPU_INDEX(%g1, %g2) 5252241Shuah mulx %g1, CPU_NODE_SIZE, %g1 5262241Shuah set cpunodes, %g2 5272241Shuah add %g1, %g2, %g1 5282241Shuah lduh [%g1 + ITLB_SIZE], %g2 ! %g2 = # entries in ITLB 5292241Shuah lduh [%g1 + DTLB_SIZE], %g1 ! %g1 = # entries in DTLB 5302241Shuah sub %g2, 1, %g2 ! %g2 = # entries in ITLB - 1 5312241Shuah sub %g1, 1, %g1 ! %g1 = # entries in DTLB - 1 5322241Shuah 5332241Shuah ! 5342241Shuah ! Flush itlb's 5352241Shuah ! 5362241Shuah ITLB_FLUSH_UNLOCKED_UCTXS(I, %g2, %g3, %g4, %o2, %o3, %o4, %o5) 5372241Shuah 5382241Shuah ! 5392241Shuah ! Flush dtlb's 5402241Shuah ! 5412241Shuah DTLB_FLUSH_UNLOCKED_UCTXS(D, %g1, %g3, %g4, %o2, %o3, %o4, %o5) 5422241Shuah 5432241Shuah membar #Sync 5442241Shuah retl 5450Sstevel@tonic-gate nop 5462241Shuah 5472241Shuah SET_SIZE(vtag_flushall_uctxs) 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate ENTRY_NP(vtag_flushpage_tl1) 5500Sstevel@tonic-gate /* 5510Sstevel@tonic-gate * x-trap to flush page from tlb and tsb 5520Sstevel@tonic-gate * 5530Sstevel@tonic-gate * %g1 = vaddr, zero-extended on 32-bit kernel 5542241Shuah * %g2 = sfmmup 5550Sstevel@tonic-gate * 5560Sstevel@tonic-gate * assumes TSBE_TAG = 0 5570Sstevel@tonic-gate */ 5580Sstevel@tonic-gate srln %g1, MMU_PAGESHIFT, %g1 5590Sstevel@tonic-gate slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 5602241Shuah 5612241Shuah SFMMU_CPU_CNUM(%g2, %g3, %g4) /* %g3 = sfmmu cnum on this CPU */ 5622241Shuah 5630Sstevel@tonic-gate /* We need to set the secondary context properly. */ 5640Sstevel@tonic-gate set MMU_SCONTEXT, %g4 5650Sstevel@tonic-gate ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */ 5660Sstevel@tonic-gate or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1 5672241Shuah stxa %g3, [%g4]ASI_DMMU /* wr new ctxum */ 5680Sstevel@tonic-gate stxa %g0, [%g1]ASI_DTLB_DEMAP 5690Sstevel@tonic-gate stxa %g0, [%g1]ASI_ITLB_DEMAP 5700Sstevel@tonic-gate stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */ 5710Sstevel@tonic-gate membar #Sync 5720Sstevel@tonic-gate retry 5730Sstevel@tonic-gate SET_SIZE(vtag_flushpage_tl1) 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate ENTRY_NP(vtag_flush_pgcnt_tl1) 5760Sstevel@tonic-gate /* 5770Sstevel@tonic-gate * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb 5780Sstevel@tonic-gate * 5790Sstevel@tonic-gate * %g1 = vaddr, zero-extended on 32-bit kernel 5802241Shuah * %g2 = <sfmmup58 | pgcnt6> 5810Sstevel@tonic-gate * 5820Sstevel@tonic-gate * NOTE: this handler relies on the fact that no 5830Sstevel@tonic-gate * interrupts or traps can occur during the loop 5840Sstevel@tonic-gate * issuing the TLB_DEMAP operations. It is assumed 5850Sstevel@tonic-gate * that interrupts are disabled and this code is 5860Sstevel@tonic-gate * fetching from the kernel locked text address. 5870Sstevel@tonic-gate * 5880Sstevel@tonic-gate * assumes TSBE_TAG = 0 5890Sstevel@tonic-gate */ 5900Sstevel@tonic-gate srln %g1, MMU_PAGESHIFT, %g1 5910Sstevel@tonic-gate slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */ 5920Sstevel@tonic-gate or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1 5932241Shuah 5942241Shuah set SFMMU_PGCNT_MASK, %g4 5952241Shuah and %g4, %g2, %g3 /* g3 = pgcnt - 1 */ 5962241Shuah add %g3, 1, %g3 /* g3 = pgcnt */ 5972241Shuah 5982241Shuah andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */ 5992241Shuah 6002241Shuah SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU 6012241Shuah 6020Sstevel@tonic-gate /* We need to set the secondary context properly. */ 6030Sstevel@tonic-gate set MMU_SCONTEXT, %g4 6042241Shuah ldxa [%g4]ASI_DMMU, %g6 /* read old ctxnum */ 6052241Shuah stxa %g5, [%g4]ASI_DMMU /* write new ctxum */ 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate set MMU_PAGESIZE, %g2 /* g2 = pgsize */ 6082241Shuah sethi %hi(FLUSH_ADDR), %g5 6090Sstevel@tonic-gate1: 6100Sstevel@tonic-gate stxa %g0, [%g1]ASI_DTLB_DEMAP 6110Sstevel@tonic-gate stxa %g0, [%g1]ASI_ITLB_DEMAP 6122241Shuah flush %g5 6130Sstevel@tonic-gate deccc %g3 /* decr pgcnt */ 6140Sstevel@tonic-gate bnz,pt %icc,1b 6152241Shuah add %g1, %g2, %g1 /* go to nextpage */ 6160Sstevel@tonic-gate 6172241Shuah stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */ 6180Sstevel@tonic-gate membar #Sync 6190Sstevel@tonic-gate retry 6200Sstevel@tonic-gate SET_SIZE(vtag_flush_pgcnt_tl1) 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate ! Not implemented on US1/US2 6230Sstevel@tonic-gate ENTRY_NP(vtag_flushall_tl1) 6240Sstevel@tonic-gate retry 6250Sstevel@tonic-gate SET_SIZE(vtag_flushall_tl1) 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate/* 6280Sstevel@tonic-gate * vac_flushpage(pfnum, color) 6290Sstevel@tonic-gate * Flush 1 8k page of the D-$ with physical page = pfnum 6300Sstevel@tonic-gate * Algorithm: 6310Sstevel@tonic-gate * The spitfire dcache is a 16k direct mapped virtual indexed, 6320Sstevel@tonic-gate * physically tagged cache. Given the pfnum we read all cache 6330Sstevel@tonic-gate * lines for the corresponding page in the cache (determined by 6340Sstevel@tonic-gate * the color). Each cache line is compared with 6350Sstevel@tonic-gate * the tag created from the pfnum. If the tags match we flush 6360Sstevel@tonic-gate * the line. 6370Sstevel@tonic-gate */ 6380Sstevel@tonic-gate .seg ".data" 6390Sstevel@tonic-gate .align 8 6400Sstevel@tonic-gate .global dflush_type 6410Sstevel@tonic-gatedflush_type: 6420Sstevel@tonic-gate .word FLUSHPAGE_TYPE 6430Sstevel@tonic-gate .seg ".text" 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate ENTRY(vac_flushpage) 6460Sstevel@tonic-gate /* 6470Sstevel@tonic-gate * flush page from the d$ 6480Sstevel@tonic-gate * 6490Sstevel@tonic-gate * %o0 = pfnum, %o1 = color 6500Sstevel@tonic-gate */ 6510Sstevel@tonic-gate DCACHE_FLUSHPAGE(%o0, %o1, %o2, %o3, %o4) 6520Sstevel@tonic-gate retl 6530Sstevel@tonic-gate nop 6540Sstevel@tonic-gate SET_SIZE(vac_flushpage) 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate ENTRY_NP(vac_flushpage_tl1) 6570Sstevel@tonic-gate /* 6580Sstevel@tonic-gate * x-trap to flush page from the d$ 6590Sstevel@tonic-gate * 6600Sstevel@tonic-gate * %g1 = pfnum, %g2 = color 6610Sstevel@tonic-gate */ 6620Sstevel@tonic-gate DCACHE_FLUSHPAGE(%g1, %g2, %g3, %g4, %g5) 6630Sstevel@tonic-gate retry 6640Sstevel@tonic-gate SET_SIZE(vac_flushpage_tl1) 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate ENTRY(vac_flushcolor) 6670Sstevel@tonic-gate /* 6680Sstevel@tonic-gate * %o0 = vcolor 6690Sstevel@tonic-gate */ 6700Sstevel@tonic-gate DCACHE_FLUSHCOLOR(%o0, %o1, %o2) 6710Sstevel@tonic-gate retl 6720Sstevel@tonic-gate nop 6730Sstevel@tonic-gate SET_SIZE(vac_flushcolor) 6740Sstevel@tonic-gate 6750Sstevel@tonic-gate ENTRY(vac_flushcolor_tl1) 6760Sstevel@tonic-gate /* 6770Sstevel@tonic-gate * %g1 = vcolor 6780Sstevel@tonic-gate */ 6790Sstevel@tonic-gate DCACHE_FLUSHCOLOR(%g1, %g2, %g3) 6800Sstevel@tonic-gate retry 6810Sstevel@tonic-gate SET_SIZE(vac_flushcolor_tl1) 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate .global _dispatch_status_busy 6850Sstevel@tonic-gate_dispatch_status_busy: 6860Sstevel@tonic-gate .asciz "ASI_INTR_DISPATCH_STATUS error: busy" 6870Sstevel@tonic-gate .align 4 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate/* 6900Sstevel@tonic-gate * Determine whether or not the IDSR is busy. 6910Sstevel@tonic-gate * Entry: no arguments 6920Sstevel@tonic-gate * Returns: 1 if busy, 0 otherwise 6930Sstevel@tonic-gate */ 6940Sstevel@tonic-gate ENTRY(idsr_busy) 6950Sstevel@tonic-gate ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1 6960Sstevel@tonic-gate clr %o0 6970Sstevel@tonic-gate btst IDSR_BUSY, %g1 6980Sstevel@tonic-gate bz,a,pt %xcc, 1f 6990Sstevel@tonic-gate mov 1, %o0 7000Sstevel@tonic-gate1: 7010Sstevel@tonic-gate retl 7020Sstevel@tonic-gate nop 7030Sstevel@tonic-gate SET_SIZE(idsr_busy) 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate/* 7060Sstevel@tonic-gate * Setup interrupt dispatch data registers 7070Sstevel@tonic-gate * Entry: 7080Sstevel@tonic-gate * %o0 - function or inumber to call 7090Sstevel@tonic-gate * %o1, %o2 - arguments (2 uint64_t's) 7100Sstevel@tonic-gate */ 7110Sstevel@tonic-gate .seg "text" 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate ENTRY(init_mondo) 7140Sstevel@tonic-gate#ifdef DEBUG 7150Sstevel@tonic-gate ! 7160Sstevel@tonic-gate ! IDSR should not be busy at the moment 7170Sstevel@tonic-gate ! 7180Sstevel@tonic-gate ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1 7190Sstevel@tonic-gate btst IDSR_BUSY, %g1 7200Sstevel@tonic-gate bz,pt %xcc, 1f 7210Sstevel@tonic-gate nop 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate sethi %hi(_dispatch_status_busy), %o0 7240Sstevel@tonic-gate call panic 7250Sstevel@tonic-gate or %o0, %lo(_dispatch_status_busy), %o0 7260Sstevel@tonic-gate#endif /* DEBUG */ 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate ALTENTRY(init_mondo_nocheck) 7290Sstevel@tonic-gate ! 7300Sstevel@tonic-gate ! interrupt vector dispach data reg 0 7310Sstevel@tonic-gate ! 7320Sstevel@tonic-gate1: 7330Sstevel@tonic-gate mov IDDR_0, %g1 7340Sstevel@tonic-gate mov IDDR_1, %g2 7350Sstevel@tonic-gate mov IDDR_2, %g3 7360Sstevel@tonic-gate stxa %o0, [%g1]ASI_INTR_DISPATCH 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate ! 7390Sstevel@tonic-gate ! interrupt vector dispach data reg 1 7400Sstevel@tonic-gate ! 7410Sstevel@tonic-gate stxa %o1, [%g2]ASI_INTR_DISPATCH 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate ! 7440Sstevel@tonic-gate ! interrupt vector dispach data reg 2 7450Sstevel@tonic-gate ! 7460Sstevel@tonic-gate stxa %o2, [%g3]ASI_INTR_DISPATCH 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate retl 7490Sstevel@tonic-gate membar #Sync ! allowed to be in the delay slot 7500Sstevel@tonic-gate SET_SIZE(init_mondo) 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate/* 7530Sstevel@tonic-gate * Ship mondo to upaid 7540Sstevel@tonic-gate */ 7550Sstevel@tonic-gate ENTRY_NP(shipit) 7560Sstevel@tonic-gate sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<18:14> = upa id 7570Sstevel@tonic-gate or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70 7580Sstevel@tonic-gate stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch 7590Sstevel@tonic-gate#if defined(SF_ERRATA_54) 7600Sstevel@tonic-gate membar #Sync ! store must occur before load 7610Sstevel@tonic-gate mov 0x20, %g3 ! UDBH Control Register Read 7620Sstevel@tonic-gate ldxa [%g3]ASI_SDB_INTR_R, %g0 7630Sstevel@tonic-gate#endif 7640Sstevel@tonic-gate retl 7650Sstevel@tonic-gate membar #Sync 7660Sstevel@tonic-gate SET_SIZE(shipit) 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate/* 7700Sstevel@tonic-gate * flush_instr_mem: 7710Sstevel@tonic-gate * Flush a portion of the I-$ starting at vaddr 7720Sstevel@tonic-gate * %o0 vaddr 7730Sstevel@tonic-gate * %o1 bytes to be flushed 7740Sstevel@tonic-gate */ 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate ENTRY(flush_instr_mem) 7770Sstevel@tonic-gate membar #StoreStore ! Ensure the stores 7780Sstevel@tonic-gate ! are globally visible 7790Sstevel@tonic-gate1: 7800Sstevel@tonic-gate flush %o0 7810Sstevel@tonic-gate subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20 7820Sstevel@tonic-gate bgu,pt %ncc, 1b 7830Sstevel@tonic-gate add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20 7840Sstevel@tonic-gate 7850Sstevel@tonic-gate retl 7860Sstevel@tonic-gate nop 7870Sstevel@tonic-gate SET_SIZE(flush_instr_mem) 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate/* 7900Sstevel@tonic-gate * flush_ecache: 7910Sstevel@tonic-gate * Flush the entire e$ using displacement flush by reading through a 7920Sstevel@tonic-gate * physically contiguous area. We use mmu bypass asi (ASI_MEM) while 7930Sstevel@tonic-gate * reading this physical address range so that data doesn't go to d$. 7940Sstevel@tonic-gate * incoming arguments: 7950Sstevel@tonic-gate * %o0 - 64 bit physical address 7960Sstevel@tonic-gate * %o1 - size of address range to read 7970Sstevel@tonic-gate * %o2 - ecache linesize 7980Sstevel@tonic-gate */ 7990Sstevel@tonic-gate ENTRY(flush_ecache) 8000Sstevel@tonic-gate#ifndef HUMMINGBIRD 8010Sstevel@tonic-gate b 2f 8020Sstevel@tonic-gate nop 8030Sstevel@tonic-gate1: 8040Sstevel@tonic-gate ldxa [%o0 + %o1]ASI_MEM, %g0 ! start reading from physaddr + size 8050Sstevel@tonic-gate2: 8060Sstevel@tonic-gate subcc %o1, %o2, %o1 8070Sstevel@tonic-gate bcc,a,pt %ncc, 1b 8080Sstevel@tonic-gate nop 8090Sstevel@tonic-gate 8100Sstevel@tonic-gate#else /* HUMMINGBIRD */ 8110Sstevel@tonic-gate /* 8120Sstevel@tonic-gate * UltraSPARC-IIe processor supports both 4-way set associative 8130Sstevel@tonic-gate * and direct map E$. For performance reasons, we flush E$ by 8140Sstevel@tonic-gate * placing it in direct map mode for data load/store and restore 8150Sstevel@tonic-gate * the state after we are done flushing it. It takes 2 iterations 8160Sstevel@tonic-gate * to guarantee that the entire ecache has been flushed. 8170Sstevel@tonic-gate * 8180Sstevel@tonic-gate * Keep the interrupts disabled while flushing E$ in this manner. 8190Sstevel@tonic-gate */ 8200Sstevel@tonic-gate rdpr %pstate, %g4 ! current pstate (restored later) 8210Sstevel@tonic-gate andn %g4, PSTATE_IE, %g5 8220Sstevel@tonic-gate wrpr %g0, %g5, %pstate ! disable interrupts 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate ! Place E$ in direct map mode for data access 8250Sstevel@tonic-gate or %g0, 1, %g5 8260Sstevel@tonic-gate sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5 8270Sstevel@tonic-gate ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later) 8280Sstevel@tonic-gate or %g1, %g5, %g5 8290Sstevel@tonic-gate membar #Sync 8300Sstevel@tonic-gate stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access 8310Sstevel@tonic-gate membar #Sync 8320Sstevel@tonic-gate 8330Sstevel@tonic-gate ! flush entire ecache HB_ECACHE_FLUSH_CNT times 8340Sstevel@tonic-gate mov HB_ECACHE_FLUSH_CNT-1, %g5 8350Sstevel@tonic-gate2: 8360Sstevel@tonic-gate sub %o1, %o2, %g3 ! start from last entry 8370Sstevel@tonic-gate1: 8380Sstevel@tonic-gate ldxa [%o0 + %g3]ASI_MEM, %g0 ! start reading from physaddr + size 8390Sstevel@tonic-gate subcc %g3, %o2, %g3 8400Sstevel@tonic-gate bgeu,a,pt %ncc, 1b 8410Sstevel@tonic-gate nop 8420Sstevel@tonic-gate brgz,a,pt %g5, 2b 8430Sstevel@tonic-gate dec %g5 8440Sstevel@tonic-gate 8450Sstevel@tonic-gate membar #Sync 8460Sstevel@tonic-gate stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config reg 8470Sstevel@tonic-gate membar #Sync 8480Sstevel@tonic-gate wrpr %g0, %g4, %pstate ! restore earlier pstate 8490Sstevel@tonic-gate#endif /* HUMMINGBIRD */ 8500Sstevel@tonic-gate 8510Sstevel@tonic-gate retl 8520Sstevel@tonic-gate nop 8530Sstevel@tonic-gate SET_SIZE(flush_ecache) 8540Sstevel@tonic-gate 8550Sstevel@tonic-gate/* 8560Sstevel@tonic-gate * void kdi_flush_idcache(int dcache_size, int dcache_linesize, 8570Sstevel@tonic-gate * int icache_size, int icache_linesize) 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate ENTRY(kdi_flush_idcache) 8600Sstevel@tonic-gate DCACHE_FLUSHALL(%o0, %o1, %g1) 8610Sstevel@tonic-gate ICACHE_FLUSHALL(%o2, %o3, %g1) 8620Sstevel@tonic-gate membar #Sync 8630Sstevel@tonic-gate retl 8640Sstevel@tonic-gate nop 8650Sstevel@tonic-gate SET_SIZE(kdi_flush_idcache) 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate 8680Sstevel@tonic-gate/* 8690Sstevel@tonic-gate * void get_ecache_dtag(uint32_t ecache_idx, uint64_t *data, uint64_t *tag, 8700Sstevel@tonic-gate * uint64_t *oafsr, uint64_t *acc_afsr) 8710Sstevel@tonic-gate * 8720Sstevel@tonic-gate * Get ecache data and tag. The ecache_idx argument is assumed to be aligned 8730Sstevel@tonic-gate * on a 64-byte boundary. The corresponding AFSR value is also read for each 8740Sstevel@tonic-gate * 8 byte ecache data obtained. The ecache data is assumed to be a pointer 8750Sstevel@tonic-gate * to an array of 16 uint64_t's (e$data & afsr value). The action to read the 8760Sstevel@tonic-gate * data and tag should be atomic to make sense. We will be executing at PIL15 8770Sstevel@tonic-gate * and will disable IE, so nothing can occur between the two reads. We also 8780Sstevel@tonic-gate * assume that the execution of this code does not interfere with what we are 8790Sstevel@tonic-gate * reading - not really possible, but we'll live with it for now. 8800Sstevel@tonic-gate * We also pass the old AFSR value before clearing it, and caller will take 8810Sstevel@tonic-gate * appropriate actions if the important bits are non-zero. 8820Sstevel@tonic-gate * 8830Sstevel@tonic-gate * If the caller wishes to track the AFSR in cases where the CP bit is 8840Sstevel@tonic-gate * set, an address should be passed in for acc_afsr. Otherwise, this 8850Sstevel@tonic-gate * argument may be null. 8860Sstevel@tonic-gate * 8870Sstevel@tonic-gate * Register Usage: 8880Sstevel@tonic-gate * i0: In: 32-bit e$ index 8890Sstevel@tonic-gate * i1: In: addr of e$ data 8900Sstevel@tonic-gate * i2: In: addr of e$ tag 8910Sstevel@tonic-gate * i3: In: addr of old afsr 8920Sstevel@tonic-gate * i4: In: addr of accumulated afsr - may be null 8930Sstevel@tonic-gate */ 8940Sstevel@tonic-gate ENTRY(get_ecache_dtag) 8950Sstevel@tonic-gate save %sp, -SA(MINFRAME), %sp 8960Sstevel@tonic-gate or %g0, 1, %l4 8970Sstevel@tonic-gate sllx %l4, 39, %l4 ! set bit 39 for e$ data access 8980Sstevel@tonic-gate or %i0, %l4, %g6 ! %g6 = e$ addr for data read 8990Sstevel@tonic-gate sllx %l4, 1, %l4 ! set bit 40 for e$ tag access 9000Sstevel@tonic-gate or %i0, %l4, %l4 ! %l4 = e$ addr for tag read 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate rdpr %pstate, %i5 9030Sstevel@tonic-gate andn %i5, PSTATE_IE | PSTATE_AM, %i0 9040Sstevel@tonic-gate wrpr %i0, %g0, %pstate ! clear IE, AM bits 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 9070Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors 9080Sstevel@tonic-gate membar #Sync 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before tag read 9110Sstevel@tonic-gate stx %i0, [%i3] ! write back the old-afsr 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate ldxa [%l4]ASI_EC_R, %g0 ! read tag into E$ tag reg 9140Sstevel@tonic-gate ldxa [%g0]ASI_EC_DIAG, %i0 ! read tag from E$ tag reg 9150Sstevel@tonic-gate stx %i0, [%i2] ! write back tag result 9160Sstevel@tonic-gate 9170Sstevel@tonic-gate clr %i2 ! loop count 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate brz %i4, 1f ! acc_afsr == NULL? 9200Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before clearing 9210Sstevel@tonic-gate srlx %i0, P_AFSR_CP_SHIFT, %l0 9220Sstevel@tonic-gate btst 1, %l0 9230Sstevel@tonic-gate bz 1f 9240Sstevel@tonic-gate nop 9250Sstevel@tonic-gate ldx [%i4], %g4 9260Sstevel@tonic-gate or %g4, %i0, %g4 ! aggregate AFSR in cpu private 9270Sstevel@tonic-gate stx %g4, [%i4] 9280Sstevel@tonic-gate1: 9290Sstevel@tonic-gate stxa %i0, [%g0]ASI_AFSR ! clear AFSR 9300Sstevel@tonic-gate membar #Sync 9310Sstevel@tonic-gate ldxa [%g6]ASI_EC_R, %i0 ! read the 8byte E$data 9320Sstevel@tonic-gate stx %i0, [%i1] ! save the E$data 9330Sstevel@tonic-gate add %g6, 8, %g6 9340Sstevel@tonic-gate add %i1, 8, %i1 9350Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %i0 ! read AFSR for this 16byte read 9360Sstevel@tonic-gate srlx %i0, P_AFSR_CP_SHIFT, %l0 9370Sstevel@tonic-gate btst 1, %l0 9380Sstevel@tonic-gate bz 2f 9390Sstevel@tonic-gate stx %i0, [%i1] ! save the AFSR 9400Sstevel@tonic-gate 9410Sstevel@tonic-gate brz %i4, 2f ! acc_afsr == NULL? 9420Sstevel@tonic-gate nop 9430Sstevel@tonic-gate ldx [%i4], %g4 9440Sstevel@tonic-gate or %g4, %i0, %g4 ! aggregate AFSR in cpu private 9450Sstevel@tonic-gate stx %g4, [%i4] 9460Sstevel@tonic-gate2: 9470Sstevel@tonic-gate add %i2, 8, %i2 9480Sstevel@tonic-gate cmp %i2, 64 9490Sstevel@tonic-gate bl,a 1b 9500Sstevel@tonic-gate add %i1, 8, %i1 9510Sstevel@tonic-gate stxa %i0, [%g0]ASI_AFSR ! clear AFSR 9520Sstevel@tonic-gate membar #Sync 9530Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable 9540Sstevel@tonic-gate membar #Sync 9550Sstevel@tonic-gate wrpr %g0, %i5, %pstate 9560Sstevel@tonic-gate ret 9570Sstevel@tonic-gate restore 9580Sstevel@tonic-gate SET_SIZE(get_ecache_dtag) 9590Sstevel@tonic-gate#endif /* lint */ 9600Sstevel@tonic-gate 9610Sstevel@tonic-gate#if defined(lint) 9620Sstevel@tonic-gate/* 9630Sstevel@tonic-gate * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0. 9640Sstevel@tonic-gate * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status 9650Sstevel@tonic-gate * 4. Clear datapath error bit(s) 5. Clear AFSR error bit 9660Sstevel@tonic-gate * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap 9670Sstevel@tonic-gate * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1 9680Sstevel@tonic-gate * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2 9690Sstevel@tonic-gate */ 9700Sstevel@tonic-gatevoid 9710Sstevel@tonic-gatece_err(void) 9720Sstevel@tonic-gate{} 9730Sstevel@tonic-gate 9740Sstevel@tonic-gatevoid 9750Sstevel@tonic-gatece_err_tl1(void) 9760Sstevel@tonic-gate{} 9770Sstevel@tonic-gate 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate/* 9800Sstevel@tonic-gate * The async_err function handles trap types 0x0A (instruction_access_error) 9810Sstevel@tonic-gate * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here, 9820Sstevel@tonic-gate * %g5 will have the trap type (with 0x200 set if we're at TL > 0). 9830Sstevel@tonic-gate * 9840Sstevel@tonic-gate * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers. 9850Sstevel@tonic-gate * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits 9860Sstevel@tonic-gate * 6. package data in %g2 and %g3 7. disable all cpu errors, because 9870Sstevel@tonic-gate * trap is likely to be fatal 8. call cpu_async_error vis sys_trap 9880Sstevel@tonic-gate * 9890Sstevel@tonic-gate * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1 9900Sstevel@tonic-gate * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2 9910Sstevel@tonic-gate */ 9920Sstevel@tonic-gatevoid 9930Sstevel@tonic-gateasync_err(void) 9940Sstevel@tonic-gate{} 9950Sstevel@tonic-gate 9960Sstevel@tonic-gate/* 9970Sstevel@tonic-gate * The clr_datapath function clears any error bits set in the UDB regs. 9980Sstevel@tonic-gate */ 9990Sstevel@tonic-gatevoid 10000Sstevel@tonic-gateclr_datapath(void) 10010Sstevel@tonic-gate{} 10020Sstevel@tonic-gate 10030Sstevel@tonic-gate/* 10040Sstevel@tonic-gate * The get_udb_errors() function gets the current value of the 10050Sstevel@tonic-gate * Datapath Error Registers. 10060Sstevel@tonic-gate */ 10070Sstevel@tonic-gate/*ARGSUSED*/ 10080Sstevel@tonic-gatevoid 10090Sstevel@tonic-gateget_udb_errors(uint64_t *udbh, uint64_t *udbl) 10100Sstevel@tonic-gate{ 10110Sstevel@tonic-gate *udbh = 0; 10120Sstevel@tonic-gate *udbl = 0; 10130Sstevel@tonic-gate} 10140Sstevel@tonic-gate 10150Sstevel@tonic-gate#else /* lint */ 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate ENTRY_NP(ce_err) 10180Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate ! 10210Sstevel@tonic-gate ! Check for a UE... From Kevin.Normoyle: 10220Sstevel@tonic-gate ! We try to switch to the trap for the UE, but since that's 10230Sstevel@tonic-gate ! a hardware pipeline, we might get to the CE trap before we 10240Sstevel@tonic-gate ! can switch. The UDB and AFSR registers will have both the 10250Sstevel@tonic-gate ! UE and CE bits set but the UDB syndrome and the AFAR will be 10260Sstevel@tonic-gate ! for the UE. 10270Sstevel@tonic-gate ! 10280Sstevel@tonic-gate or %g0, 1, %g1 ! put 1 in g1 10290Sstevel@tonic-gate sllx %g1, 21, %g1 ! shift left to <21> afsr UE 10300Sstevel@tonic-gate andcc %g1, %g3, %g0 ! check for UE in afsr 10310Sstevel@tonic-gate bnz async_err ! handle the UE, not the CE 10320Sstevel@tonic-gate or %g0, 0x63, %g5 ! pass along the CE ttype 10330Sstevel@tonic-gate ! 10340Sstevel@tonic-gate ! Disable further CE traps to avoid recursion (stack overflow) 10350Sstevel@tonic-gate ! and staying above XCALL_PIL for extended periods. 10360Sstevel@tonic-gate ! 10370Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g2 10380Sstevel@tonic-gate andn %g2, 0x1, %g2 ! clear bit 0 - CEEN 10390Sstevel@tonic-gate stxa %g2, [%g0]ASI_ESTATE_ERR 10400Sstevel@tonic-gate membar #Sync ! required 10410Sstevel@tonic-gate ! 10420Sstevel@tonic-gate ! handle the CE 10430Sstevel@tonic-gate ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2 10440Sstevel@tonic-gate 10450Sstevel@tonic-gate set P_DER_H, %g4 ! put P_DER_H in g4 10460Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5 10470Sstevel@tonic-gate or %g0, 1, %g6 ! put 1 in g6 10480Sstevel@tonic-gate sllx %g6, 8, %g6 ! shift g6 to <8> sdb CE 10490Sstevel@tonic-gate andcc %g5, %g6, %g1 ! check for CE in upper half 10500Sstevel@tonic-gate sllx %g5, 33, %g5 ! shift upper bits to <42:33> 10510Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 10520Sstevel@tonic-gate bz,a 1f ! no error, goto 1f 10530Sstevel@tonic-gate nop 10540Sstevel@tonic-gate stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit 10550Sstevel@tonic-gate membar #Sync ! membar sync required 10560Sstevel@tonic-gate1: 10570Sstevel@tonic-gate set P_DER_L, %g4 ! put P_DER_L in g4 10580Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g6 10590Sstevel@tonic-gate andcc %g5, %g6, %g1 ! check for CE in lower half 10600Sstevel@tonic-gate sllx %g5, 43, %g5 ! shift upper bits to <52:43> 10610Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 10620Sstevel@tonic-gate bz,a 2f ! no error, goto 2f 10630Sstevel@tonic-gate nop 10640Sstevel@tonic-gate stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit 10650Sstevel@tonic-gate membar #Sync ! membar sync required 10660Sstevel@tonic-gate2: 10670Sstevel@tonic-gate or %g0, 1, %g4 ! put 1 in g4 10680Sstevel@tonic-gate sllx %g4, 20, %g4 ! shift left to <20> afsr CE 10690Sstevel@tonic-gate stxa %g4, [%g0]ASI_AFSR ! use g4 to clear afsr CE error 10700Sstevel@tonic-gate membar #Sync ! membar sync required 10710Sstevel@tonic-gate 10720Sstevel@tonic-gate set cpu_ce_error, %g1 ! put *cpu_ce_error() in g1 10730Sstevel@tonic-gate rdpr %pil, %g6 ! read pil into %g6 10740Sstevel@tonic-gate subcc %g6, PIL_15, %g0 10750Sstevel@tonic-gate movneg %icc, PIL_14, %g4 ! run at pil 14 unless already at 15 10760Sstevel@tonic-gate sethi %hi(sys_trap), %g5 10770Sstevel@tonic-gate jmp %g5 + %lo(sys_trap) ! goto sys_trap 10780Sstevel@tonic-gate movge %icc, PIL_15, %g4 ! already at pil 15 10790Sstevel@tonic-gate SET_SIZE(ce_err) 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate ENTRY_NP(ce_err_tl1) 10820Sstevel@tonic-gate#ifndef TRAPTRACE 10830Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %g7 10840Sstevel@tonic-gate stxa %g7, [%g0]ASI_AFSR 10850Sstevel@tonic-gate membar #Sync 10860Sstevel@tonic-gate retry 10870Sstevel@tonic-gate#else 10880Sstevel@tonic-gate set ce_trap_tl1, %g1 10890Sstevel@tonic-gate sethi %hi(dis_err_panic1), %g4 10900Sstevel@tonic-gate jmp %g4 + %lo(dis_err_panic1) 10910Sstevel@tonic-gate nop 10920Sstevel@tonic-gate#endif 10930Sstevel@tonic-gate SET_SIZE(ce_err_tl1) 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate#ifdef TRAPTRACE 10960Sstevel@tonic-gate.celevel1msg: 10970Sstevel@tonic-gate .asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x"; 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate ENTRY_NP(ce_trap_tl1) 11000Sstevel@tonic-gate ! upper 32 bits of AFSR already in o3 11010Sstevel@tonic-gate mov %o4, %o0 ! save AFAR upper 32 bits 11020Sstevel@tonic-gate mov %o2, %o4 ! lower 32 bits of AFSR 11030Sstevel@tonic-gate mov %o1, %o2 ! lower 32 bits of AFAR 11040Sstevel@tonic-gate mov %o0, %o1 ! upper 32 bits of AFAR 11050Sstevel@tonic-gate set .celevel1msg, %o0 11060Sstevel@tonic-gate call panic 11070Sstevel@tonic-gate nop 11080Sstevel@tonic-gate SET_SIZE(ce_trap_tl1) 11090Sstevel@tonic-gate#endif 11100Sstevel@tonic-gate 11110Sstevel@tonic-gate ! 11120Sstevel@tonic-gate ! async_err is the assembly glue code to get us from the actual trap 11130Sstevel@tonic-gate ! into the CPU module's C error handler. Note that we also branch 11140Sstevel@tonic-gate ! here from ce_err() above. 11150Sstevel@tonic-gate ! 11160Sstevel@tonic-gate ENTRY_NP(async_err) 11170Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors 11180Sstevel@tonic-gate membar #Sync ! membar sync required 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3 11210Sstevel@tonic-gate ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate sllx %g5, 53, %g5 ! move ttype to <63:53> 11240Sstevel@tonic-gate or %g3, %g5, %g3 ! or to afsr in g3 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate or %g0, 1, %g1 ! put 1 in g1 11270Sstevel@tonic-gate sllx %g1, 21, %g1 ! shift left to <21> afsr UE 11280Sstevel@tonic-gate andcc %g1, %g3, %g0 ! check for UE in afsr 11290Sstevel@tonic-gate bz,a,pn %icc, 2f ! if !UE skip sdb read/clear 11300Sstevel@tonic-gate nop 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate set P_DER_H, %g4 ! put P_DER_H in g4 11330Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56 11340Sstevel@tonic-gate or %g0, 1, %g6 ! put 1 in g6 11350Sstevel@tonic-gate sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE 11360Sstevel@tonic-gate andcc %g5, %g6, %g1 ! check for UE in upper half 11370Sstevel@tonic-gate sllx %g5, 33, %g5 ! shift upper bits to <42:33> 11380Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 11390Sstevel@tonic-gate bz,a 1f ! no error, goto 1f 11400Sstevel@tonic-gate nop 11410Sstevel@tonic-gate stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit 11420Sstevel@tonic-gate membar #Sync ! membar sync required 11430Sstevel@tonic-gate1: 11440Sstevel@tonic-gate set P_DER_L, %g4 ! put P_DER_L in g4 11450Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5 11460Sstevel@tonic-gate andcc %g5, %g6, %g1 ! check for UE in lower half 11470Sstevel@tonic-gate sllx %g5, 43, %g5 ! shift upper bits to <52:43> 11480Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 11490Sstevel@tonic-gate bz,a 2f ! no error, goto 2f 11500Sstevel@tonic-gate nop 11510Sstevel@tonic-gate stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit 11520Sstevel@tonic-gate membar #Sync ! membar sync required 11530Sstevel@tonic-gate2: 11540Sstevel@tonic-gate stxa %g3, [%g0]ASI_AFSR ! clear all the sticky bits 11550Sstevel@tonic-gate membar #Sync ! membar sync required 11560Sstevel@tonic-gate 1157*6553Sjimand RESET_USER_RTT_REGS(%g4, %g5, async_err_resetskip) 1158*6553Sjimandasync_err_resetskip: 11591270Sbs21162 11600Sstevel@tonic-gate set cpu_async_error, %g1 ! put cpu_async_error in g1 11610Sstevel@tonic-gate sethi %hi(sys_trap), %g5 11620Sstevel@tonic-gate jmp %g5 + %lo(sys_trap) ! goto sys_trap 11630Sstevel@tonic-gate or %g0, PIL_15, %g4 ! run at pil 15 11640Sstevel@tonic-gate SET_SIZE(async_err) 11650Sstevel@tonic-gate 11660Sstevel@tonic-gate ENTRY_NP(dis_err_panic1) 11670Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable all error traps 11680Sstevel@tonic-gate membar #Sync 11690Sstevel@tonic-gate ! save destination routine is in g1 11700Sstevel@tonic-gate ldxa [%g0]ASI_AFAR, %g2 ! read afar 11710Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %g3 ! read afsr 11720Sstevel@tonic-gate set P_DER_H, %g4 ! put P_DER_H in g4 11730Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5 11740Sstevel@tonic-gate sllx %g5, 33, %g5 ! shift upper bits to <42:33> 11750Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 11760Sstevel@tonic-gate set P_DER_L, %g4 ! put P_DER_L in g4 11770Sstevel@tonic-gate ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5 11780Sstevel@tonic-gate sllx %g5, 43, %g5 ! shift upper bits to <52:43> 11790Sstevel@tonic-gate or %g3, %g5, %g3 ! or with afsr bits 11801270Sbs21162 1181*6553Sjimand RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip) 1182*6553Sjimanddis_err_panic1_resetskip: 11831270Sbs21162 11840Sstevel@tonic-gate sethi %hi(sys_trap), %g5 11850Sstevel@tonic-gate jmp %g5 + %lo(sys_trap) ! goto sys_trap 11860Sstevel@tonic-gate sub %g0, 1, %g4 11870Sstevel@tonic-gate SET_SIZE(dis_err_panic1) 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate ENTRY(clr_datapath) 11900Sstevel@tonic-gate set P_DER_H, %o4 ! put P_DER_H in o4 11910Sstevel@tonic-gate ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3 11920Sstevel@tonic-gate or %g0, 0x3, %o2 ! put 0x3 in o2 11930Sstevel@tonic-gate sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb 11940Sstevel@tonic-gate andcc %o5, %o2, %o1 ! check for UE,CE in upper half 11950Sstevel@tonic-gate bz,a 1f ! no error, goto 1f 11960Sstevel@tonic-gate nop 11970Sstevel@tonic-gate stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits 11980Sstevel@tonic-gate membar #Sync ! membar sync required 11990Sstevel@tonic-gate1: 12000Sstevel@tonic-gate set P_DER_L, %o4 ! put P_DER_L in o4 12010Sstevel@tonic-gate ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5 12020Sstevel@tonic-gate andcc %o5, %o2, %o1 ! check for UE,CE in lower half 12030Sstevel@tonic-gate bz,a 2f ! no error, goto 2f 12040Sstevel@tonic-gate nop 12050Sstevel@tonic-gate stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits 12060Sstevel@tonic-gate membar #Sync 12070Sstevel@tonic-gate2: 12080Sstevel@tonic-gate retl 12090Sstevel@tonic-gate nop 12100Sstevel@tonic-gate SET_SIZE(clr_datapath) 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate ENTRY(get_udb_errors) 12130Sstevel@tonic-gate set P_DER_H, %o3 12140Sstevel@tonic-gate ldxa [%o3]ASI_SDB_INTR_R, %o2 12150Sstevel@tonic-gate stx %o2, [%o0] 12160Sstevel@tonic-gate set P_DER_L, %o3 12170Sstevel@tonic-gate ldxa [%o3]ASI_SDB_INTR_R, %o2 12180Sstevel@tonic-gate retl 12190Sstevel@tonic-gate stx %o2, [%o1] 12200Sstevel@tonic-gate SET_SIZE(get_udb_errors) 12210Sstevel@tonic-gate 12220Sstevel@tonic-gate#endif /* lint */ 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate#if defined(lint) 12250Sstevel@tonic-gate/* 12260Sstevel@tonic-gate * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the 12270Sstevel@tonic-gate * tte, the virtual address, and the ctxnum of the specified tlb entry. They 12280Sstevel@tonic-gate * should only be used in places where you have no choice but to look at the 12290Sstevel@tonic-gate * tlb itself. 12300Sstevel@tonic-gate * 12310Sstevel@tonic-gate * Note: These two routines are required by the Estar "cpr" loadable module. 12320Sstevel@tonic-gate */ 12330Sstevel@tonic-gate/*ARGSUSED*/ 12340Sstevel@tonic-gatevoid 12350Sstevel@tonic-gateitlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) 12360Sstevel@tonic-gate{} 12370Sstevel@tonic-gate 12380Sstevel@tonic-gate/*ARGSUSED*/ 12390Sstevel@tonic-gatevoid 12400Sstevel@tonic-gatedtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) 12410Sstevel@tonic-gate{} 12420Sstevel@tonic-gate#else /* lint */ 12430Sstevel@tonic-gate/* 12440Sstevel@tonic-gate * NB - In Spitfire cpus, when reading a tte from the hardware, we 12450Sstevel@tonic-gate * need to clear [42-41] because the general definitions in pte.h 12460Sstevel@tonic-gate * define the PA to be [42-13] whereas Spitfire really uses [40-13]. 12470Sstevel@tonic-gate * When cloning these routines for other cpus the "andn" below is not 12480Sstevel@tonic-gate * necessary. 12490Sstevel@tonic-gate */ 12500Sstevel@tonic-gate ENTRY_NP(itlb_rd_entry) 12510Sstevel@tonic-gate sllx %o0, 3, %o0 12520Sstevel@tonic-gate#if defined(SF_ERRATA_32) 12530Sstevel@tonic-gate sethi %hi(FLUSH_ADDR), %g2 12540Sstevel@tonic-gate set MMU_PCONTEXT, %g1 12550Sstevel@tonic-gate stxa %g0, [%g1]ASI_DMMU ! KCONTEXT 12560Sstevel@tonic-gate flush %g2 12570Sstevel@tonic-gate#endif 12580Sstevel@tonic-gate ldxa [%o0]ASI_ITLB_ACCESS, %g1 12590Sstevel@tonic-gate set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only 12600Sstevel@tonic-gate sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above 12610Sstevel@tonic-gate andn %g1, %g2, %g1 ! for details 12620Sstevel@tonic-gate stx %g1, [%o1] 12630Sstevel@tonic-gate ldxa [%o0]ASI_ITLB_TAGREAD, %g2 12640Sstevel@tonic-gate set TAGREAD_CTX_MASK, %o4 12650Sstevel@tonic-gate andn %g2, %o4, %o5 12660Sstevel@tonic-gate retl 12670Sstevel@tonic-gate stx %o5, [%o2] 12680Sstevel@tonic-gate SET_SIZE(itlb_rd_entry) 12690Sstevel@tonic-gate 12700Sstevel@tonic-gate ENTRY_NP(dtlb_rd_entry) 12710Sstevel@tonic-gate sllx %o0, 3, %o0 12720Sstevel@tonic-gate#if defined(SF_ERRATA_32) 12730Sstevel@tonic-gate sethi %hi(FLUSH_ADDR), %g2 12740Sstevel@tonic-gate set MMU_PCONTEXT, %g1 12750Sstevel@tonic-gate stxa %g0, [%g1]ASI_DMMU ! KCONTEXT 12760Sstevel@tonic-gate flush %g2 12770Sstevel@tonic-gate#endif 12780Sstevel@tonic-gate ldxa [%o0]ASI_DTLB_ACCESS, %g1 12790Sstevel@tonic-gate set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only 12800Sstevel@tonic-gate sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above 12810Sstevel@tonic-gate andn %g1, %g2, %g1 ! itlb_rd_entry 12820Sstevel@tonic-gate stx %g1, [%o1] 12830Sstevel@tonic-gate ldxa [%o0]ASI_DTLB_TAGREAD, %g2 12840Sstevel@tonic-gate set TAGREAD_CTX_MASK, %o4 12850Sstevel@tonic-gate andn %g2, %o4, %o5 12860Sstevel@tonic-gate retl 12870Sstevel@tonic-gate stx %o5, [%o2] 12880Sstevel@tonic-gate SET_SIZE(dtlb_rd_entry) 12890Sstevel@tonic-gate#endif /* lint */ 12900Sstevel@tonic-gate 12910Sstevel@tonic-gate#if defined(lint) 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate/* 12940Sstevel@tonic-gate * routines to get and set the LSU register 12950Sstevel@tonic-gate */ 12960Sstevel@tonic-gateuint64_t 12970Sstevel@tonic-gateget_lsu(void) 12980Sstevel@tonic-gate{ 12990Sstevel@tonic-gate return ((uint64_t)0); 13000Sstevel@tonic-gate} 13010Sstevel@tonic-gate 13020Sstevel@tonic-gate/*ARGSUSED*/ 13030Sstevel@tonic-gatevoid 13040Sstevel@tonic-gateset_lsu(uint64_t lsu) 13050Sstevel@tonic-gate{} 13060Sstevel@tonic-gate 13070Sstevel@tonic-gate#else /* lint */ 13080Sstevel@tonic-gate 13090Sstevel@tonic-gate ENTRY(set_lsu) 13100Sstevel@tonic-gate stxa %o0, [%g0]ASI_LSU ! store to LSU 13110Sstevel@tonic-gate retl 13120Sstevel@tonic-gate membar #Sync 13130Sstevel@tonic-gate SET_SIZE(set_lsu) 13140Sstevel@tonic-gate 13150Sstevel@tonic-gate ENTRY(get_lsu) 13160Sstevel@tonic-gate retl 13170Sstevel@tonic-gate ldxa [%g0]ASI_LSU, %o0 ! load LSU 13180Sstevel@tonic-gate SET_SIZE(get_lsu) 13190Sstevel@tonic-gate 13200Sstevel@tonic-gate#endif /* lint */ 13210Sstevel@tonic-gate 13220Sstevel@tonic-gate#ifndef lint 13230Sstevel@tonic-gate /* 13240Sstevel@tonic-gate * Clear the NPT (non-privileged trap) bit in the %tick 13250Sstevel@tonic-gate * registers. In an effort to make the change in the 13260Sstevel@tonic-gate * tick counter as consistent as possible, we disable 13270Sstevel@tonic-gate * all interrupts while we're changing the registers. We also 13280Sstevel@tonic-gate * ensure that the read and write instructions are in the same 13290Sstevel@tonic-gate * line in the instruction cache. 13300Sstevel@tonic-gate */ 13310Sstevel@tonic-gate ENTRY_NP(cpu_clearticknpt) 13320Sstevel@tonic-gate rdpr %pstate, %g1 /* save processor state */ 13330Sstevel@tonic-gate andn %g1, PSTATE_IE, %g3 /* turn off */ 13340Sstevel@tonic-gate wrpr %g0, %g3, %pstate /* interrupts */ 13350Sstevel@tonic-gate rdpr %tick, %g2 /* get tick register */ 13360Sstevel@tonic-gate brgez,pn %g2, 1f /* if NPT bit off, we're done */ 13370Sstevel@tonic-gate mov 1, %g3 /* create mask */ 13380Sstevel@tonic-gate sllx %g3, 63, %g3 /* for NPT bit */ 13390Sstevel@tonic-gate ba,a,pt %xcc, 2f 13400Sstevel@tonic-gate .align 64 /* Align to I$ boundary */ 13410Sstevel@tonic-gate2: 13420Sstevel@tonic-gate rdpr %tick, %g2 /* get tick register */ 13430Sstevel@tonic-gate wrpr %g3, %g2, %tick /* write tick register, */ 13440Sstevel@tonic-gate /* clearing NPT bit */ 13450Sstevel@tonic-gate#if defined(BB_ERRATA_1) 13460Sstevel@tonic-gate rdpr %tick, %g0 /* read (s)tick (BB_ERRATA_1) */ 13470Sstevel@tonic-gate#endif 13480Sstevel@tonic-gate1: 13490Sstevel@tonic-gate jmp %g4 + 4 13500Sstevel@tonic-gate wrpr %g0, %g1, %pstate /* restore processor state */ 13510Sstevel@tonic-gate SET_SIZE(cpu_clearticknpt) 13520Sstevel@tonic-gate 13530Sstevel@tonic-gate /* 13540Sstevel@tonic-gate * get_ecache_tag() 13550Sstevel@tonic-gate * Register Usage: 13560Sstevel@tonic-gate * %o0: In: 32-bit E$ index 13570Sstevel@tonic-gate * Out: 64-bit E$ tag value 13580Sstevel@tonic-gate * %o1: In: 64-bit AFSR value after clearing sticky bits 13590Sstevel@tonic-gate * %o2: In: address of cpu private afsr storage 13600Sstevel@tonic-gate */ 13610Sstevel@tonic-gate ENTRY(get_ecache_tag) 13620Sstevel@tonic-gate or %g0, 1, %o4 13630Sstevel@tonic-gate sllx %o4, 40, %o4 ! set bit 40 for e$ tag access 13640Sstevel@tonic-gate or %o0, %o4, %o4 ! %o4 = e$ addr for tag read 13650Sstevel@tonic-gate rdpr %pstate, %o5 13660Sstevel@tonic-gate andn %o5, PSTATE_IE | PSTATE_AM, %o0 13670Sstevel@tonic-gate wrpr %o0, %g0, %pstate ! clear IE, AM bits 13680Sstevel@tonic-gate 13690Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 13700Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable 13710Sstevel@tonic-gate membar #Sync 13720Sstevel@tonic-gate 13730Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %o0 13740Sstevel@tonic-gate srlx %o0, P_AFSR_CP_SHIFT, %o3 13750Sstevel@tonic-gate btst 1, %o3 13760Sstevel@tonic-gate bz 1f 13770Sstevel@tonic-gate nop 13780Sstevel@tonic-gate ldx [%o2], %g4 13790Sstevel@tonic-gate or %g4, %o0, %g4 ! aggregate AFSR in cpu private 13800Sstevel@tonic-gate stx %g4, [%o2] 13810Sstevel@tonic-gate1: 13820Sstevel@tonic-gate stxa %o0, [%g0]ASI_AFSR ! clear AFSR 13830Sstevel@tonic-gate membar #Sync 13840Sstevel@tonic-gate 13850Sstevel@tonic-gate ldxa [%o4]ASI_EC_R, %g0 13860Sstevel@tonic-gate ldxa [%g0]ASI_EC_DIAG, %o0 ! read tag from e$ tag reg 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %o3 13890Sstevel@tonic-gate srlx %o3, P_AFSR_CP_SHIFT, %o4 13900Sstevel@tonic-gate btst 1, %o4 13910Sstevel@tonic-gate bz 2f 13920Sstevel@tonic-gate stx %o3, [%o1] ! AFSR after sticky clear 13930Sstevel@tonic-gate ldx [%o2], %g4 13940Sstevel@tonic-gate or %g4, %o3, %g4 ! aggregate AFSR in cpu private 13950Sstevel@tonic-gate stx %g4, [%o2] 13960Sstevel@tonic-gate2: 13970Sstevel@tonic-gate membar #Sync 13980Sstevel@tonic-gate 13990Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on 14000Sstevel@tonic-gate membar #Sync 14010Sstevel@tonic-gate retl 14020Sstevel@tonic-gate wrpr %g0, %o5, %pstate 14030Sstevel@tonic-gate SET_SIZE(get_ecache_tag) 14040Sstevel@tonic-gate 14050Sstevel@tonic-gate /* 14060Sstevel@tonic-gate * check_ecache_line() 14070Sstevel@tonic-gate * Register Usage: 14080Sstevel@tonic-gate * %o0: In: 32-bit E$ index 14090Sstevel@tonic-gate * Out: 64-bit accumulated AFSR 14100Sstevel@tonic-gate * %o1: In: address of cpu private afsr storage 14110Sstevel@tonic-gate */ 14120Sstevel@tonic-gate ENTRY(check_ecache_line) 14130Sstevel@tonic-gate or %g0, 1, %o4 14140Sstevel@tonic-gate sllx %o4, 39, %o4 ! set bit 39 for e$ data access 14150Sstevel@tonic-gate or %o0, %o4, %o4 ! %o4 = e$ addr for data read 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate rdpr %pstate, %o5 14180Sstevel@tonic-gate andn %o5, PSTATE_IE | PSTATE_AM, %o0 14190Sstevel@tonic-gate wrpr %o0, %g0, %pstate ! clear IE, AM bits 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 14220Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable 14230Sstevel@tonic-gate membar #Sync 14240Sstevel@tonic-gate 14250Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %o0 14260Sstevel@tonic-gate srlx %o0, P_AFSR_CP_SHIFT, %o2 14270Sstevel@tonic-gate btst 1, %o2 14280Sstevel@tonic-gate bz 1f 14290Sstevel@tonic-gate clr %o2 ! loop count 14300Sstevel@tonic-gate ldx [%o1], %o3 14310Sstevel@tonic-gate or %o3, %o0, %o3 ! aggregate AFSR in cpu private 14320Sstevel@tonic-gate stx %o3, [%o1] 14330Sstevel@tonic-gate1: 14340Sstevel@tonic-gate stxa %o0, [%g0]ASI_AFSR ! clear AFSR 14350Sstevel@tonic-gate membar #Sync 14360Sstevel@tonic-gate 14370Sstevel@tonic-gate2: 14380Sstevel@tonic-gate ldxa [%o4]ASI_EC_R, %g0 ! Read the E$ data 8bytes each 14390Sstevel@tonic-gate add %o2, 1, %o2 14400Sstevel@tonic-gate cmp %o2, 8 14410Sstevel@tonic-gate bl,a 2b 14420Sstevel@tonic-gate add %o4, 8, %o4 14430Sstevel@tonic-gate 14440Sstevel@tonic-gate membar #Sync 14450Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR 14460Sstevel@tonic-gate srlx %o0, P_AFSR_CP_SHIFT, %o2 14470Sstevel@tonic-gate btst 1, %o2 14480Sstevel@tonic-gate bz 3f 14490Sstevel@tonic-gate nop 14500Sstevel@tonic-gate ldx [%o1], %o3 14510Sstevel@tonic-gate or %o3, %o0, %o3 ! aggregate AFSR in cpu private 14520Sstevel@tonic-gate stx %o3, [%o1] 14530Sstevel@tonic-gate3: 14540Sstevel@tonic-gate stxa %o0, [%g0]ASI_AFSR ! clear AFSR 14550Sstevel@tonic-gate membar #Sync 14560Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on 14570Sstevel@tonic-gate membar #Sync 14580Sstevel@tonic-gate retl 14590Sstevel@tonic-gate wrpr %g0, %o5, %pstate 14600Sstevel@tonic-gate SET_SIZE(check_ecache_line) 14610Sstevel@tonic-gate#endif /* lint */ 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate#if defined(lint) 14640Sstevel@tonic-gateuint64_t 14650Sstevel@tonic-gateread_and_clear_afsr() 14660Sstevel@tonic-gate{ 14670Sstevel@tonic-gate return ((uint64_t)0); 14680Sstevel@tonic-gate} 14690Sstevel@tonic-gate#else /* lint */ 14700Sstevel@tonic-gate ENTRY(read_and_clear_afsr) 14710Sstevel@tonic-gate ldxa [%g0]ASI_AFSR, %o0 14720Sstevel@tonic-gate retl 14730Sstevel@tonic-gate stxa %o0, [%g0]ASI_AFSR ! clear AFSR 14740Sstevel@tonic-gate SET_SIZE(read_and_clear_afsr) 14750Sstevel@tonic-gate#endif /* lint */ 14760Sstevel@tonic-gate 14770Sstevel@tonic-gate#if defined(lint) 14780Sstevel@tonic-gate/* ARGSUSED */ 14790Sstevel@tonic-gatevoid 14800Sstevel@tonic-gatescrubphys(uint64_t paddr, int ecache_size) 14810Sstevel@tonic-gate{ 14820Sstevel@tonic-gate} 14830Sstevel@tonic-gate 14840Sstevel@tonic-gate#else /* lint */ 14850Sstevel@tonic-gate 14860Sstevel@tonic-gate/* 14870Sstevel@tonic-gate * scrubphys - Pass in the aligned physical memory address that you want 14880Sstevel@tonic-gate * to scrub, along with the ecache size. 14890Sstevel@tonic-gate * 14900Sstevel@tonic-gate * 1) Displacement flush the E$ line corresponding to %addr. 14910Sstevel@tonic-gate * The first ldxa guarantees that the %addr is no longer in 14920Sstevel@tonic-gate * M, O, or E (goes to I or S (if instruction fetch also happens). 14930Sstevel@tonic-gate * 2) "Write" the data using a CAS %addr,%g0,%g0. 14940Sstevel@tonic-gate * The casxa guarantees a transition from I to M or S to M. 14950Sstevel@tonic-gate * 3) Displacement flush the E$ line corresponding to %addr. 14960Sstevel@tonic-gate * The second ldxa pushes the M line out of the ecache, into the 14970Sstevel@tonic-gate * writeback buffers, on the way to memory. 14980Sstevel@tonic-gate * 4) The "membar #Sync" pushes the cache line out of the writeback 14990Sstevel@tonic-gate * buffers onto the bus, on the way to dram finally. 15000Sstevel@tonic-gate * 15010Sstevel@tonic-gate * This is a modified version of the algorithm suggested by Gary Lauterbach. 15020Sstevel@tonic-gate * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line 15030Sstevel@tonic-gate * as modified, but then we found out that for spitfire, if it misses in the 15040Sstevel@tonic-gate * E$ it will probably install as an M, but if it hits in the E$, then it 15050Sstevel@tonic-gate * will stay E, if the store doesn't happen. So the first displacement flush 15060Sstevel@tonic-gate * should ensure that the CAS will miss in the E$. Arrgh. 15070Sstevel@tonic-gate */ 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate ENTRY(scrubphys) 15100Sstevel@tonic-gate or %o1, %g0, %o2 ! put ecache size in %o2 15110Sstevel@tonic-gate#ifndef HUMMINGBIRD 15120Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 15130Sstevel@tonic-gate add %o2, %o2, %o3 ! 2 * ecachesize in case 15140Sstevel@tonic-gate ! addr == ecache_flushaddr 15150Sstevel@tonic-gate sub %o3, 1, %o3 ! -1 == mask 15160Sstevel@tonic-gate and %o1, %o3, %o1 ! and with xor'd address 15170Sstevel@tonic-gate set ecache_flushaddr, %o3 15180Sstevel@tonic-gate ldx [%o3], %o3 15190Sstevel@tonic-gate 15200Sstevel@tonic-gate rdpr %pstate, %o4 15210Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 15220Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 15230Sstevel@tonic-gate 15240Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 15250Sstevel@tonic-gate casxa [%o0]ASI_MEM, %g0, %g0 15260Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 15270Sstevel@tonic-gate 15280Sstevel@tonic-gate#else /* HUMMINGBIRD */ 15290Sstevel@tonic-gate /* 15300Sstevel@tonic-gate * UltraSPARC-IIe processor supports both 4-way set associative 15310Sstevel@tonic-gate * and direct map E$. We need to reconfigure E$ to direct map 15320Sstevel@tonic-gate * mode for data load/store before displacement flush. Also, we 15330Sstevel@tonic-gate * need to flush all 4 sets of the E$ to ensure that the physaddr 15340Sstevel@tonic-gate * has been flushed. Keep the interrupts disabled while flushing 15350Sstevel@tonic-gate * E$ in this manner. 15360Sstevel@tonic-gate * 15370Sstevel@tonic-gate * For flushing a specific physical address, we start at the 15380Sstevel@tonic-gate * aliased address and load at set-size stride, wrapping around 15390Sstevel@tonic-gate * at 2*ecache-size boundary and skipping fault physical address. 15400Sstevel@tonic-gate * It takes 10 loads to guarantee that the physical address has 15410Sstevel@tonic-gate * been flushed. 15420Sstevel@tonic-gate * 15430Sstevel@tonic-gate * Usage: 15440Sstevel@tonic-gate * %o0 physaddr 15450Sstevel@tonic-gate * %o5 physaddr - ecache_flushaddr 15460Sstevel@tonic-gate * %g1 UPA config (restored later) 15470Sstevel@tonic-gate * %g2 E$ set size 15480Sstevel@tonic-gate * %g3 E$ flush address range mask (i.e. 2 * E$ -1) 15490Sstevel@tonic-gate * %g4 #loads to flush phys address 15500Sstevel@tonic-gate * %g5 temp 15510Sstevel@tonic-gate */ 15520Sstevel@tonic-gate 15530Sstevel@tonic-gate sethi %hi(ecache_associativity), %g5 15540Sstevel@tonic-gate ld [%g5 + %lo(ecache_associativity)], %g5 15550Sstevel@tonic-gate udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets) 15560Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 15570Sstevel@tonic-gate add %o2, %o2, %g3 ! 2 * ecachesize in case 15580Sstevel@tonic-gate ! addr == ecache_flushaddr 15590Sstevel@tonic-gate sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask 15600Sstevel@tonic-gate and %o1, %g3, %o1 ! and with xor'd address 15610Sstevel@tonic-gate sethi %hi(ecache_flushaddr), %o3 15620Sstevel@tonic-gate ldx [%o3 + %lo(ecache_flushaddr)], %o3 15630Sstevel@tonic-gate 15640Sstevel@tonic-gate rdpr %pstate, %o4 15650Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 15660Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate ! Place E$ in direct map mode for data access 15690Sstevel@tonic-gate or %g0, 1, %g5 15700Sstevel@tonic-gate sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5 15710Sstevel@tonic-gate ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later) 15720Sstevel@tonic-gate or %g1, %g5, %g5 15730Sstevel@tonic-gate membar #Sync 15740Sstevel@tonic-gate stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access 15750Sstevel@tonic-gate membar #Sync 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate ! Displace cache line from each set of E$ starting at the 15780Sstevel@tonic-gate ! aliased address. at set-size stride, wrapping at 2*ecache_size 15790Sstevel@tonic-gate ! and skipping load from physaddr. We need 10 loads to flush the 15800Sstevel@tonic-gate ! physaddr from E$. 15810Sstevel@tonic-gate mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr 15820Sstevel@tonic-gate sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr 15830Sstevel@tonic-gate or %o1, %g0, %g5 ! starting aliased offset 15840Sstevel@tonic-gate2: 15850Sstevel@tonic-gate ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 15860Sstevel@tonic-gate1: 15870Sstevel@tonic-gate add %g5, %g2, %g5 ! calculate offset in next set 15880Sstevel@tonic-gate and %g5, %g3, %g5 ! force offset within aliased range 15890Sstevel@tonic-gate cmp %g5, %o5 ! skip loads from physaddr 15900Sstevel@tonic-gate be,pn %ncc, 1b 15910Sstevel@tonic-gate nop 15920Sstevel@tonic-gate brgz,pt %g4, 2b 15930Sstevel@tonic-gate dec %g4 15940Sstevel@tonic-gate 15950Sstevel@tonic-gate casxa [%o0]ASI_MEM, %g0, %g0 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate ! Flush %o0 from ecahe again. 15980Sstevel@tonic-gate ! Need single displacement flush at offset %o1 this time as 15990Sstevel@tonic-gate ! the E$ is already in direct map mode. 16000Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 16010Sstevel@tonic-gate 16020Sstevel@tonic-gate membar #Sync 16030Sstevel@tonic-gate stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits) 16040Sstevel@tonic-gate membar #Sync 16050Sstevel@tonic-gate#endif /* HUMMINGBIRD */ 16060Sstevel@tonic-gate wrpr %g0, %o4, %pstate ! restore earlier pstate register value 16070Sstevel@tonic-gate 16080Sstevel@tonic-gate retl 16090Sstevel@tonic-gate membar #Sync ! move the data out of the load buffer 16100Sstevel@tonic-gate SET_SIZE(scrubphys) 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate#endif /* lint */ 16130Sstevel@tonic-gate 16140Sstevel@tonic-gate#if defined(lint) 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate/* 16170Sstevel@tonic-gate * clearphys - Pass in the aligned physical memory address that you want 16180Sstevel@tonic-gate * to push out, as a 64 byte block of zeros, from the ecache zero-filled. 16190Sstevel@tonic-gate * Since this routine does not bypass the ecache, it is possible that 16200Sstevel@tonic-gate * it could generate a UE error while trying to clear the a bad line. 16210Sstevel@tonic-gate * This routine clears and restores the error enable flag. 16220Sstevel@tonic-gate * TBD - Hummingbird may need similar protection 16230Sstevel@tonic-gate */ 16240Sstevel@tonic-gate/* ARGSUSED */ 16250Sstevel@tonic-gatevoid 16260Sstevel@tonic-gateclearphys(uint64_t paddr, int ecache_size, int ecache_linesize) 16270Sstevel@tonic-gate{ 16280Sstevel@tonic-gate} 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate#else /* lint */ 16310Sstevel@tonic-gate 16320Sstevel@tonic-gate ENTRY(clearphys) 16330Sstevel@tonic-gate or %o2, %g0, %o3 ! ecache linesize 16340Sstevel@tonic-gate or %o1, %g0, %o2 ! ecache size 16350Sstevel@tonic-gate#ifndef HUMMINGBIRD 16360Sstevel@tonic-gate or %o3, %g0, %o4 ! save ecache linesize 16370Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 16380Sstevel@tonic-gate add %o2, %o2, %o3 ! 2 * ecachesize 16390Sstevel@tonic-gate sub %o3, 1, %o3 ! -1 == mask 16400Sstevel@tonic-gate and %o1, %o3, %o1 ! and with xor'd address 16410Sstevel@tonic-gate set ecache_flushaddr, %o3 16420Sstevel@tonic-gate ldx [%o3], %o3 16430Sstevel@tonic-gate or %o4, %g0, %o2 ! saved ecache linesize 16440Sstevel@tonic-gate 16450Sstevel@tonic-gate rdpr %pstate, %o4 16460Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 16470Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 16480Sstevel@tonic-gate 16490Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 16500Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors 16510Sstevel@tonic-gate membar #Sync 16520Sstevel@tonic-gate 16530Sstevel@tonic-gate ! need to put zeros in the cache line before displacing it 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate sub %o2, 8, %o2 ! get offset of last double word in ecache line 16560Sstevel@tonic-gate1: 16570Sstevel@tonic-gate stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line 16580Sstevel@tonic-gate sub %o2, 8, %o2 16590Sstevel@tonic-gate brgez,a,pt %o2, 1b 16600Sstevel@tonic-gate nop 16610Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 16620Sstevel@tonic-gate casxa [%o0]ASI_MEM, %g0, %g0 16630Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable 16660Sstevel@tonic-gate membar #Sync 16670Sstevel@tonic-gate 16680Sstevel@tonic-gate#else /* HUMMINGBIRD... */ 16690Sstevel@tonic-gate /* 16700Sstevel@tonic-gate * UltraSPARC-IIe processor supports both 4-way set associative 16710Sstevel@tonic-gate * and direct map E$. We need to reconfigure E$ to direct map 16720Sstevel@tonic-gate * mode for data load/store before displacement flush. Also, we 16730Sstevel@tonic-gate * need to flush all 4 sets of the E$ to ensure that the physaddr 16740Sstevel@tonic-gate * has been flushed. Keep the interrupts disabled while flushing 16750Sstevel@tonic-gate * E$ in this manner. 16760Sstevel@tonic-gate * 16770Sstevel@tonic-gate * For flushing a specific physical address, we start at the 16780Sstevel@tonic-gate * aliased address and load at set-size stride, wrapping around 16790Sstevel@tonic-gate * at 2*ecache-size boundary and skipping fault physical address. 16800Sstevel@tonic-gate * It takes 10 loads to guarantee that the physical address has 16810Sstevel@tonic-gate * been flushed. 16820Sstevel@tonic-gate * 16830Sstevel@tonic-gate * Usage: 16840Sstevel@tonic-gate * %o0 physaddr 16850Sstevel@tonic-gate * %o5 physaddr - ecache_flushaddr 16860Sstevel@tonic-gate * %g1 UPA config (restored later) 16870Sstevel@tonic-gate * %g2 E$ set size 16880Sstevel@tonic-gate * %g3 E$ flush address range mask (i.e. 2 * E$ -1) 16890Sstevel@tonic-gate * %g4 #loads to flush phys address 16900Sstevel@tonic-gate * %g5 temp 16910Sstevel@tonic-gate */ 16920Sstevel@tonic-gate 16930Sstevel@tonic-gate or %o3, %g0, %o4 ! save ecache linesize 16940Sstevel@tonic-gate sethi %hi(ecache_associativity), %g5 16950Sstevel@tonic-gate ld [%g5 + %lo(ecache_associativity)], %g5 16960Sstevel@tonic-gate udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets) 16970Sstevel@tonic-gate 16980Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 16990Sstevel@tonic-gate add %o2, %o2, %g3 ! 2 * ecachesize 17000Sstevel@tonic-gate sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask 17010Sstevel@tonic-gate and %o1, %g3, %o1 ! and with xor'd address 17020Sstevel@tonic-gate sethi %hi(ecache_flushaddr), %o3 17030Sstevel@tonic-gate ldx [%o3 +%lo(ecache_flushaddr)], %o3 17040Sstevel@tonic-gate or %o4, %g0, %o2 ! saved ecache linesize 17050Sstevel@tonic-gate 17060Sstevel@tonic-gate rdpr %pstate, %o4 17070Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 17080Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 17090Sstevel@tonic-gate 17100Sstevel@tonic-gate ! Place E$ in direct map mode for data access 17110Sstevel@tonic-gate or %g0, 1, %g5 17120Sstevel@tonic-gate sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5 17130Sstevel@tonic-gate ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later) 17140Sstevel@tonic-gate or %g1, %g5, %g5 17150Sstevel@tonic-gate membar #Sync 17160Sstevel@tonic-gate stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access 17170Sstevel@tonic-gate membar #Sync 17180Sstevel@tonic-gate 17190Sstevel@tonic-gate ! need to put zeros in the cache line before displacing it 17200Sstevel@tonic-gate 17210Sstevel@tonic-gate sub %o2, 8, %o2 ! get offset of last double word in ecache line 17220Sstevel@tonic-gate1: 17230Sstevel@tonic-gate stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line 17240Sstevel@tonic-gate sub %o2, 8, %o2 17250Sstevel@tonic-gate brgez,a,pt %o2, 1b 17260Sstevel@tonic-gate nop 17270Sstevel@tonic-gate 17280Sstevel@tonic-gate ! Displace cache line from each set of E$ starting at the 17290Sstevel@tonic-gate ! aliased address. at set-size stride, wrapping at 2*ecache_size 17300Sstevel@tonic-gate ! and skipping load from physaddr. We need 10 loads to flush the 17310Sstevel@tonic-gate ! physaddr from E$. 17320Sstevel@tonic-gate mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr 17330Sstevel@tonic-gate sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr 17340Sstevel@tonic-gate or %o1, %g0, %g5 ! starting offset 17350Sstevel@tonic-gate2: 17360Sstevel@tonic-gate ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 17370Sstevel@tonic-gate3: 17380Sstevel@tonic-gate add %g5, %g2, %g5 ! calculate offset in next set 17390Sstevel@tonic-gate and %g5, %g3, %g5 ! force offset within aliased range 17400Sstevel@tonic-gate cmp %g5, %o5 ! skip loads from physaddr 17410Sstevel@tonic-gate be,pn %ncc, 3b 17420Sstevel@tonic-gate nop 17430Sstevel@tonic-gate brgz,pt %g4, 2b 17440Sstevel@tonic-gate dec %g4 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate casxa [%o0]ASI_MEM, %g0, %g0 17470Sstevel@tonic-gate 17480Sstevel@tonic-gate ! Flush %o0 from ecahe again. 17490Sstevel@tonic-gate ! Need single displacement flush at offset %o1 this time as 17500Sstevel@tonic-gate ! the E$ is already in direct map mode. 17510Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 17520Sstevel@tonic-gate 17530Sstevel@tonic-gate membar #Sync 17540Sstevel@tonic-gate stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits) 17550Sstevel@tonic-gate membar #Sync 17560Sstevel@tonic-gate#endif /* HUMMINGBIRD... */ 17570Sstevel@tonic-gate 17580Sstevel@tonic-gate retl 17590Sstevel@tonic-gate wrpr %g0, %o4, %pstate ! restore earlier pstate register value 17600Sstevel@tonic-gate SET_SIZE(clearphys) 17610Sstevel@tonic-gate 17620Sstevel@tonic-gate#endif /* lint */ 17630Sstevel@tonic-gate 17640Sstevel@tonic-gate#if defined(lint) 17650Sstevel@tonic-gate/* ARGSUSED */ 17660Sstevel@tonic-gatevoid 17670Sstevel@tonic-gateflushecacheline(uint64_t paddr, int ecache_size) 17680Sstevel@tonic-gate{ 17690Sstevel@tonic-gate} 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate#else /* lint */ 17720Sstevel@tonic-gate/* 17730Sstevel@tonic-gate * flushecacheline - This is a simpler version of scrubphys 17740Sstevel@tonic-gate * which simply does a displacement flush of the line in 17750Sstevel@tonic-gate * question. This routine is mainly used in handling async 17760Sstevel@tonic-gate * errors where we want to get rid of a bad line in ecache. 17770Sstevel@tonic-gate * Note that if the line is modified and it has suffered 17780Sstevel@tonic-gate * data corruption - we are guarantee that the hw will write 17790Sstevel@tonic-gate * a UE back to mark the page poisoned. 17800Sstevel@tonic-gate */ 17810Sstevel@tonic-gate ENTRY(flushecacheline) 17820Sstevel@tonic-gate or %o1, %g0, %o2 ! put ecache size in %o2 17830Sstevel@tonic-gate#ifndef HUMMINGBIRD 17840Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 17850Sstevel@tonic-gate add %o2, %o2, %o3 ! 2 * ecachesize in case 17860Sstevel@tonic-gate ! addr == ecache_flushaddr 17870Sstevel@tonic-gate sub %o3, 1, %o3 ! -1 == mask 17880Sstevel@tonic-gate and %o1, %o3, %o1 ! and with xor'd address 17890Sstevel@tonic-gate set ecache_flushaddr, %o3 17900Sstevel@tonic-gate ldx [%o3], %o3 17910Sstevel@tonic-gate 17920Sstevel@tonic-gate rdpr %pstate, %o4 17930Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 17940Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 17970Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors 17980Sstevel@tonic-gate membar #Sync 17990Sstevel@tonic-gate 18000Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 18010Sstevel@tonic-gate membar #Sync 18020Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable 18030Sstevel@tonic-gate membar #Sync 18040Sstevel@tonic-gate#else /* HUMMINGBIRD */ 18050Sstevel@tonic-gate /* 18060Sstevel@tonic-gate * UltraSPARC-IIe processor supports both 4-way set associative 18070Sstevel@tonic-gate * and direct map E$. We need to reconfigure E$ to direct map 18080Sstevel@tonic-gate * mode for data load/store before displacement flush. Also, we 18090Sstevel@tonic-gate * need to flush all 4 sets of the E$ to ensure that the physaddr 18100Sstevel@tonic-gate * has been flushed. Keep the interrupts disabled while flushing 18110Sstevel@tonic-gate * E$ in this manner. 18120Sstevel@tonic-gate * 18130Sstevel@tonic-gate * For flushing a specific physical address, we start at the 18140Sstevel@tonic-gate * aliased address and load at set-size stride, wrapping around 18150Sstevel@tonic-gate * at 2*ecache-size boundary and skipping fault physical address. 18160Sstevel@tonic-gate * It takes 10 loads to guarantee that the physical address has 18170Sstevel@tonic-gate * been flushed. 18180Sstevel@tonic-gate * 18190Sstevel@tonic-gate * Usage: 18200Sstevel@tonic-gate * %o0 physaddr 18210Sstevel@tonic-gate * %o5 physaddr - ecache_flushaddr 18220Sstevel@tonic-gate * %g1 error enable register 18230Sstevel@tonic-gate * %g2 E$ set size 18240Sstevel@tonic-gate * %g3 E$ flush address range mask (i.e. 2 * E$ -1) 18250Sstevel@tonic-gate * %g4 UPA config (restored later) 18260Sstevel@tonic-gate * %g5 temp 18270Sstevel@tonic-gate */ 18280Sstevel@tonic-gate 18290Sstevel@tonic-gate sethi %hi(ecache_associativity), %g5 18300Sstevel@tonic-gate ld [%g5 + %lo(ecache_associativity)], %g5 18310Sstevel@tonic-gate udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets) 18320Sstevel@tonic-gate xor %o0, %o2, %o1 ! calculate alias address 18330Sstevel@tonic-gate add %o2, %o2, %g3 ! 2 * ecachesize in case 18340Sstevel@tonic-gate ! addr == ecache_flushaddr 18350Sstevel@tonic-gate sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask 18360Sstevel@tonic-gate and %o1, %g3, %o1 ! and with xor'd address 18370Sstevel@tonic-gate sethi %hi(ecache_flushaddr), %o3 18380Sstevel@tonic-gate ldx [%o3 + %lo(ecache_flushaddr)], %o3 18390Sstevel@tonic-gate 18400Sstevel@tonic-gate rdpr %pstate, %o4 18410Sstevel@tonic-gate andn %o4, PSTATE_IE | PSTATE_AM, %o5 18420Sstevel@tonic-gate wrpr %o5, %g0, %pstate ! clear IE, AM bits 18430Sstevel@tonic-gate 18440Sstevel@tonic-gate ! Place E$ in direct map mode for data access 18450Sstevel@tonic-gate or %g0, 1, %g5 18460Sstevel@tonic-gate sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5 18470Sstevel@tonic-gate ldxa [%g0]ASI_UPA_CONFIG, %g4 ! current UPA config (restored later) 18480Sstevel@tonic-gate or %g4, %g5, %g5 18490Sstevel@tonic-gate membar #Sync 18500Sstevel@tonic-gate stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access 18510Sstevel@tonic-gate membar #Sync 18520Sstevel@tonic-gate 18530Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 18540Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors 18550Sstevel@tonic-gate membar #Sync 18560Sstevel@tonic-gate 18570Sstevel@tonic-gate ! Displace cache line from each set of E$ starting at the 18580Sstevel@tonic-gate ! aliased address. at set-size stride, wrapping at 2*ecache_size 18590Sstevel@tonic-gate ! and skipping load from physaddr. We need 10 loads to flush the 18600Sstevel@tonic-gate ! physaddr from E$. 18610Sstevel@tonic-gate mov HB_PHYS_FLUSH_CNT-1, %g5 ! #loads to flush physaddr 18620Sstevel@tonic-gate sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr 18630Sstevel@tonic-gate2: 18640Sstevel@tonic-gate ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias 18650Sstevel@tonic-gate3: 18660Sstevel@tonic-gate add %o1, %g2, %o1 ! calculate offset in next set 18670Sstevel@tonic-gate and %o1, %g3, %o1 ! force offset within aliased range 18680Sstevel@tonic-gate cmp %o1, %o5 ! skip loads from physaddr 18690Sstevel@tonic-gate be,pn %ncc, 3b 18700Sstevel@tonic-gate nop 18710Sstevel@tonic-gate brgz,pt %g5, 2b 18720Sstevel@tonic-gate dec %g5 18730Sstevel@tonic-gate 18740Sstevel@tonic-gate membar #Sync 18750Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable 18760Sstevel@tonic-gate membar #Sync 18770Sstevel@tonic-gate 18780Sstevel@tonic-gate stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits) 18790Sstevel@tonic-gate membar #Sync 18800Sstevel@tonic-gate#endif /* HUMMINGBIRD */ 18810Sstevel@tonic-gate retl 18820Sstevel@tonic-gate wrpr %g0, %o4, %pstate 18830Sstevel@tonic-gate SET_SIZE(flushecacheline) 18840Sstevel@tonic-gate 18850Sstevel@tonic-gate#endif /* lint */ 18860Sstevel@tonic-gate 18870Sstevel@tonic-gate#if defined(lint) 18880Sstevel@tonic-gate/* ARGSUSED */ 18890Sstevel@tonic-gatevoid 18900Sstevel@tonic-gateecache_scrubreq_tl1(uint64_t inum, uint64_t dummy) 18910Sstevel@tonic-gate{ 18920Sstevel@tonic-gate} 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate#else /* lint */ 18950Sstevel@tonic-gate/* 18960Sstevel@tonic-gate * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz 18970Sstevel@tonic-gate * from the clock CPU. It atomically increments the outstanding request 18980Sstevel@tonic-gate * counter and, if there was not already an outstanding request, 18992973Sgovinda * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum. 19000Sstevel@tonic-gate */ 19010Sstevel@tonic-gate 19020Sstevel@tonic-gate ! Register usage: 19030Sstevel@tonic-gate ! 19040Sstevel@tonic-gate ! Arguments: 19050Sstevel@tonic-gate ! %g1 - inum 19060Sstevel@tonic-gate ! 19070Sstevel@tonic-gate ! Internal: 19080Sstevel@tonic-gate ! %g2, %g3, %g5 - scratch 19090Sstevel@tonic-gate ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding. 19100Sstevel@tonic-gate ! %g6 - setsoftint_tl1 address 19110Sstevel@tonic-gate 19120Sstevel@tonic-gate ENTRY_NP(ecache_scrubreq_tl1) 19130Sstevel@tonic-gate set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2 19140Sstevel@tonic-gate GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f); 19150Sstevel@tonic-gate ld [%g4], %g2 ! cpu's ec_scrub_outstanding. 19160Sstevel@tonic-gate set setsoftint_tl1, %g6 19170Sstevel@tonic-gate ! 19180Sstevel@tonic-gate ! no need to use atomic instructions for the following 19190Sstevel@tonic-gate ! increment - we're at tl1 19200Sstevel@tonic-gate ! 19210Sstevel@tonic-gate add %g2, 0x1, %g3 19222973Sgovinda brnz,pn %g2, 1f ! no need to enqueue more intr_vec 19230Sstevel@tonic-gate st %g3, [%g4] ! delay - store incremented counter 19242973Sgovinda jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec 19250Sstevel@tonic-gate nop 19260Sstevel@tonic-gate ! not reached 19270Sstevel@tonic-gate1: 19280Sstevel@tonic-gate retry 19290Sstevel@tonic-gate SET_SIZE(ecache_scrubreq_tl1) 19300Sstevel@tonic-gate 19310Sstevel@tonic-gate#endif /* lint */ 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate#if defined(lint) 19340Sstevel@tonic-gate/*ARGSUSED*/ 19350Sstevel@tonic-gatevoid 19360Sstevel@tonic-gatewrite_ec_tag_parity(uint32_t id) 19370Sstevel@tonic-gate{} 19380Sstevel@tonic-gate#else /* lint */ 19390Sstevel@tonic-gate 19400Sstevel@tonic-gate /* 19410Sstevel@tonic-gate * write_ec_tag_parity(), which zero's the ecache tag, 19420Sstevel@tonic-gate * marks the state as invalid and writes good parity to the tag. 19430Sstevel@tonic-gate * Input %o1= 32 bit E$ index 19440Sstevel@tonic-gate */ 19450Sstevel@tonic-gate ENTRY(write_ec_tag_parity) 19460Sstevel@tonic-gate or %g0, 1, %o4 19470Sstevel@tonic-gate sllx %o4, 39, %o4 ! set bit 40 for e$ tag access 19480Sstevel@tonic-gate or %o0, %o4, %o4 ! %o4 = ecache addr for tag write 19490Sstevel@tonic-gate 19500Sstevel@tonic-gate rdpr %pstate, %o5 19510Sstevel@tonic-gate andn %o5, PSTATE_IE | PSTATE_AM, %o1 19520Sstevel@tonic-gate wrpr %o1, %g0, %pstate ! clear IE, AM bits 19530Sstevel@tonic-gate 19540Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 19550Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable 19560Sstevel@tonic-gate membar #Sync 19570Sstevel@tonic-gate 19580Sstevel@tonic-gate ba 1f 19590Sstevel@tonic-gate nop 19600Sstevel@tonic-gate /* 19610Sstevel@tonic-gate * Align on the ecache boundary in order to force 19620Sstevel@tonic-gate * ciritical code section onto the same ecache line. 19630Sstevel@tonic-gate */ 19640Sstevel@tonic-gate .align 64 19650Sstevel@tonic-gate 19660Sstevel@tonic-gate1: 19670Sstevel@tonic-gate set S_EC_PARITY, %o3 ! clear tag, state invalid 19680Sstevel@tonic-gate sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity 19690Sstevel@tonic-gate stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info 19700Sstevel@tonic-gate stxa %g0, [%o4]ASI_EC_W 19710Sstevel@tonic-gate membar #Sync 19720Sstevel@tonic-gate 19730Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on 19740Sstevel@tonic-gate membar #Sync 19750Sstevel@tonic-gate retl 19760Sstevel@tonic-gate wrpr %g0, %o5, %pstate 19770Sstevel@tonic-gate SET_SIZE(write_ec_tag_parity) 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate#endif /* lint */ 19800Sstevel@tonic-gate 19810Sstevel@tonic-gate#if defined(lint) 19820Sstevel@tonic-gate/*ARGSUSED*/ 19830Sstevel@tonic-gatevoid 19840Sstevel@tonic-gatewrite_hb_ec_tag_parity(uint32_t id) 19850Sstevel@tonic-gate{} 19860Sstevel@tonic-gate#else /* lint */ 19870Sstevel@tonic-gate 19880Sstevel@tonic-gate /* 19890Sstevel@tonic-gate * write_hb_ec_tag_parity(), which zero's the ecache tag, 19900Sstevel@tonic-gate * marks the state as invalid and writes good parity to the tag. 19910Sstevel@tonic-gate * Input %o1= 32 bit E$ index 19920Sstevel@tonic-gate */ 19930Sstevel@tonic-gate ENTRY(write_hb_ec_tag_parity) 19940Sstevel@tonic-gate or %g0, 1, %o4 19950Sstevel@tonic-gate sllx %o4, 39, %o4 ! set bit 40 for e$ tag access 19960Sstevel@tonic-gate or %o0, %o4, %o4 ! %o4 = ecache addr for tag write 19970Sstevel@tonic-gate 19980Sstevel@tonic-gate rdpr %pstate, %o5 19990Sstevel@tonic-gate andn %o5, PSTATE_IE | PSTATE_AM, %o1 20000Sstevel@tonic-gate wrpr %o1, %g0, %pstate ! clear IE, AM bits 20010Sstevel@tonic-gate 20020Sstevel@tonic-gate ldxa [%g0]ASI_ESTATE_ERR, %g1 20030Sstevel@tonic-gate stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable 20040Sstevel@tonic-gate membar #Sync 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate ba 1f 20070Sstevel@tonic-gate nop 20080Sstevel@tonic-gate /* 20090Sstevel@tonic-gate * Align on the ecache boundary in order to force 20100Sstevel@tonic-gate * ciritical code section onto the same ecache line. 20110Sstevel@tonic-gate */ 20120Sstevel@tonic-gate .align 64 20130Sstevel@tonic-gate1: 20140Sstevel@tonic-gate#ifdef HUMMINGBIRD 20150Sstevel@tonic-gate set HB_EC_PARITY, %o3 ! clear tag, state invalid 20160Sstevel@tonic-gate sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity 20170Sstevel@tonic-gate#else /* !HUMMINGBIRD */ 20180Sstevel@tonic-gate set SB_EC_PARITY, %o3 ! clear tag, state invalid 20190Sstevel@tonic-gate sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity 20200Sstevel@tonic-gate#endif /* !HUMMINGBIRD */ 20210Sstevel@tonic-gate 20220Sstevel@tonic-gate stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info 20230Sstevel@tonic-gate stxa %g0, [%o4]ASI_EC_W 20240Sstevel@tonic-gate membar #Sync 20250Sstevel@tonic-gate 20260Sstevel@tonic-gate stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on 20270Sstevel@tonic-gate membar #Sync 20280Sstevel@tonic-gate retl 20290Sstevel@tonic-gate wrpr %g0, %o5, %pstate 20300Sstevel@tonic-gate SET_SIZE(write_hb_ec_tag_parity) 20310Sstevel@tonic-gate 20320Sstevel@tonic-gate#endif /* lint */ 20330Sstevel@tonic-gate 20340Sstevel@tonic-gate#define VIS_BLOCKSIZE 64 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate#if defined(lint) 20370Sstevel@tonic-gate 20380Sstevel@tonic-gate/*ARGSUSED*/ 20390Sstevel@tonic-gateint 20400Sstevel@tonic-gatedtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 20410Sstevel@tonic-gate{ return (0); } 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate#else 20440Sstevel@tonic-gate 20450Sstevel@tonic-gate ENTRY(dtrace_blksuword32) 20460Sstevel@tonic-gate save %sp, -SA(MINFRAME + 4), %sp 20470Sstevel@tonic-gate 20480Sstevel@tonic-gate rdpr %pstate, %l1 20490Sstevel@tonic-gate andn %l1, PSTATE_IE, %l2 ! disable interrupts to 20500Sstevel@tonic-gate wrpr %g0, %l2, %pstate ! protect our FPU diddling 20510Sstevel@tonic-gate 20520Sstevel@tonic-gate rd %fprs, %l0 20530Sstevel@tonic-gate andcc %l0, FPRS_FEF, %g0 20540Sstevel@tonic-gate bz,a,pt %xcc, 1f ! if the fpu is disabled 20550Sstevel@tonic-gate wr %g0, FPRS_FEF, %fprs ! ... enable the fpu 20560Sstevel@tonic-gate 20570Sstevel@tonic-gate st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack 20580Sstevel@tonic-gate1: 20590Sstevel@tonic-gate set 0f, %l5 20600Sstevel@tonic-gate /* 20610Sstevel@tonic-gate * We're about to write a block full or either total garbage 20620Sstevel@tonic-gate * (not kernel data, don't worry) or user floating-point data 20630Sstevel@tonic-gate * (so it only _looks_ like garbage). 20640Sstevel@tonic-gate */ 20650Sstevel@tonic-gate ld [%i1], %f0 ! modify the block 20660Sstevel@tonic-gate membar #Sync 20670Sstevel@tonic-gate stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler 20680Sstevel@tonic-gate stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block 20690Sstevel@tonic-gate membar #Sync 20700Sstevel@tonic-gate stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler 20710Sstevel@tonic-gate 20720Sstevel@tonic-gate bz,a,pt %xcc, 1f 20730Sstevel@tonic-gate wr %g0, %l0, %fprs ! restore %fprs 20740Sstevel@tonic-gate 20750Sstevel@tonic-gate ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0 20760Sstevel@tonic-gate1: 20770Sstevel@tonic-gate 20780Sstevel@tonic-gate wrpr %g0, %l1, %pstate ! restore interrupts 20790Sstevel@tonic-gate 20800Sstevel@tonic-gate ret 20810Sstevel@tonic-gate restore %g0, %g0, %o0 20820Sstevel@tonic-gate 20830Sstevel@tonic-gate0: 20840Sstevel@tonic-gate membar #Sync 20850Sstevel@tonic-gate stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler 20860Sstevel@tonic-gate 20870Sstevel@tonic-gate bz,a,pt %xcc, 1f 20880Sstevel@tonic-gate wr %g0, %l0, %fprs ! restore %fprs 20890Sstevel@tonic-gate 20900Sstevel@tonic-gate ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0 20910Sstevel@tonic-gate1: 20920Sstevel@tonic-gate 20930Sstevel@tonic-gate wrpr %g0, %l1, %pstate ! restore interrupts 20940Sstevel@tonic-gate 20950Sstevel@tonic-gate /* 20960Sstevel@tonic-gate * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err() 20970Sstevel@tonic-gate * which deals with watchpoints. Otherwise, just return -1. 20980Sstevel@tonic-gate */ 20990Sstevel@tonic-gate brnz,pt %i2, 1f 21000Sstevel@tonic-gate nop 21010Sstevel@tonic-gate ret 21020Sstevel@tonic-gate restore %g0, -1, %o0 21030Sstevel@tonic-gate1: 21040Sstevel@tonic-gate call dtrace_blksuword32_err 21050Sstevel@tonic-gate restore 21060Sstevel@tonic-gate 21070Sstevel@tonic-gate SET_SIZE(dtrace_blksuword32) 21080Sstevel@tonic-gate 21090Sstevel@tonic-gate#endif /* lint */ 2110