xref: /onnv-gate/usr/src/uts/sun4u/vm/mach_sfmmu.c (revision 2241:592fbc504a44)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51772Sjl139090  * Common Development and Distribution License (the "License").
61772Sjl139090  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
221772Sjl139090  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <vm/hat.h>
300Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
310Sstevel@tonic-gate #include <vm/page.h>
320Sstevel@tonic-gate #include <sys/pte.h>
330Sstevel@tonic-gate #include <sys/systm.h>
340Sstevel@tonic-gate #include <sys/mman.h>
350Sstevel@tonic-gate #include <sys/sysmacros.h>
360Sstevel@tonic-gate #include <sys/machparam.h>
370Sstevel@tonic-gate #include <sys/vtrace.h>
380Sstevel@tonic-gate #include <sys/kmem.h>
390Sstevel@tonic-gate #include <sys/mmu.h>
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate #include <sys/cpu.h>
420Sstevel@tonic-gate #include <sys/cpuvar.h>
430Sstevel@tonic-gate #include <sys/debug.h>
440Sstevel@tonic-gate #include <sys/lgrp.h>
450Sstevel@tonic-gate #include <sys/archsystm.h>
460Sstevel@tonic-gate #include <sys/machsystm.h>
470Sstevel@tonic-gate #include <sys/vmsystm.h>
480Sstevel@tonic-gate #include <sys/bitmap.h>
490Sstevel@tonic-gate #include <vm/rm.h>
500Sstevel@tonic-gate #include <sys/t_lock.h>
510Sstevel@tonic-gate #include <sys/vm_machparam.h>
520Sstevel@tonic-gate #include <sys/promif.h>
530Sstevel@tonic-gate #include <sys/prom_isa.h>
540Sstevel@tonic-gate #include <sys/prom_plat.h>
550Sstevel@tonic-gate #include <sys/prom_debug.h>
560Sstevel@tonic-gate #include <sys/privregs.h>
570Sstevel@tonic-gate #include <sys/bootconf.h>
580Sstevel@tonic-gate #include <sys/memlist.h>
590Sstevel@tonic-gate #include <sys/memlist_plat.h>
600Sstevel@tonic-gate #include <sys/cpu_module.h>
610Sstevel@tonic-gate #include <sys/reboot.h>
620Sstevel@tonic-gate #include <sys/kdi.h>
630Sstevel@tonic-gate #include <sys/fpu/fpusystm.h>
640Sstevel@tonic-gate 
650Sstevel@tonic-gate /*
660Sstevel@tonic-gate  * External routines and data structures
670Sstevel@tonic-gate  */
680Sstevel@tonic-gate extern void	sfmmu_cache_flushcolor(int, pfn_t);
690Sstevel@tonic-gate 
700Sstevel@tonic-gate /*
710Sstevel@tonic-gate  * Static routines
720Sstevel@tonic-gate  */
730Sstevel@tonic-gate static void	sfmmu_set_tlb(void);
740Sstevel@tonic-gate 
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate  * Global Data:
770Sstevel@tonic-gate  */
780Sstevel@tonic-gate caddr_t	textva, datava;
790Sstevel@tonic-gate tte_t	ktext_tte, kdata_tte;		/* ttes for kernel text and data */
800Sstevel@tonic-gate 
810Sstevel@tonic-gate int	enable_bigktsb = 1;
820Sstevel@tonic-gate 
830Sstevel@tonic-gate tte_t bigktsb_ttes[MAX_BIGKTSB_TTES];
840Sstevel@tonic-gate int bigktsb_nttes = 0;
850Sstevel@tonic-gate 
860Sstevel@tonic-gate 
870Sstevel@tonic-gate /*
880Sstevel@tonic-gate  * Controls the logic which enables the use of the
890Sstevel@tonic-gate  * QUAD_LDD_PHYS ASI for TSB accesses.
900Sstevel@tonic-gate  */
910Sstevel@tonic-gate int	ktsb_phys = 0;
920Sstevel@tonic-gate 
930Sstevel@tonic-gate 
940Sstevel@tonic-gate 
950Sstevel@tonic-gate /*
960Sstevel@tonic-gate  * This routine remaps the kernel using large ttes
970Sstevel@tonic-gate  * All entries except locked ones will be removed from the tlb.
980Sstevel@tonic-gate  * It assumes that both the text and data segments reside in a separate
990Sstevel@tonic-gate  * 4mb virtual and physical contigous memory chunk.  This routine
1000Sstevel@tonic-gate  * is only executed by the first cpu.  The remaining cpus execute
1010Sstevel@tonic-gate  * sfmmu_mp_startup() instead.
1020Sstevel@tonic-gate  * XXX It assumes that the start of the text segment is KERNELBASE.  It should
1030Sstevel@tonic-gate  * actually be based on start.
1040Sstevel@tonic-gate  */
1050Sstevel@tonic-gate void
sfmmu_remap_kernel(void)1060Sstevel@tonic-gate sfmmu_remap_kernel(void)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	pfn_t	pfn;
1090Sstevel@tonic-gate 	uint_t	attr;
1100Sstevel@tonic-gate 	int	flags;
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	extern char end[];
1130Sstevel@tonic-gate 	extern struct as kas;
1140Sstevel@tonic-gate 
1150Sstevel@tonic-gate 	textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M);
1160Sstevel@tonic-gate 	pfn = va_to_pfn(textva);
1170Sstevel@tonic-gate 	if (pfn == PFN_INVALID)
1180Sstevel@tonic-gate 		prom_panic("can't find kernel text pfn");
1190Sstevel@tonic-gate 	pfn &= TTE_PFNMASK(TTE4M);
1200Sstevel@tonic-gate 
1210Sstevel@tonic-gate 	attr = PROC_TEXT | HAT_NOSYNC;
1220Sstevel@tonic-gate 	flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD;
1230Sstevel@tonic-gate 	sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M);
1240Sstevel@tonic-gate 	/*
1250Sstevel@tonic-gate 	 * We set the lock bit in the tte to lock the translation in
1260Sstevel@tonic-gate 	 * the tlb. Note we cannot lock Panther 32M/256M pages into the tlb.
1270Sstevel@tonic-gate 	 * This note is here to make sure that no one tries to remap the
1280Sstevel@tonic-gate 	 * kernel using 32M or 256M tte's on Panther cpus.
1290Sstevel@tonic-gate 	 */
1300Sstevel@tonic-gate 	TTE_SET_LOCKED(&ktext_tte);
1310Sstevel@tonic-gate 	sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags);
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate 	datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M);
1340Sstevel@tonic-gate 	pfn = va_to_pfn(datava);
1350Sstevel@tonic-gate 	if (pfn == PFN_INVALID)
1360Sstevel@tonic-gate 		prom_panic("can't find kernel data pfn");
1370Sstevel@tonic-gate 	pfn &= TTE_PFNMASK(TTE4M);
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate 	attr = PROC_DATA | HAT_NOSYNC;
1400Sstevel@tonic-gate 	sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M);
1410Sstevel@tonic-gate 	/*
1420Sstevel@tonic-gate 	 * We set the lock bit in the tte to lock the translation in
1430Sstevel@tonic-gate 	 * the tlb.  We also set the mod bit to avoid taking dirty bit
1440Sstevel@tonic-gate 	 * traps on kernel data.
1450Sstevel@tonic-gate 	 */
1460Sstevel@tonic-gate 	TTE_SET_LOCKED(&kdata_tte);
1470Sstevel@tonic-gate 	TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT);
1480Sstevel@tonic-gate 	sfmmu_tteload(kas.a_hat, &kdata_tte, datava,
1490Sstevel@tonic-gate 	    (struct page *)NULL, flags);
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	/*
1520Sstevel@tonic-gate 	 * create bigktsb ttes if necessary.
1530Sstevel@tonic-gate 	 */
1540Sstevel@tonic-gate 	if (enable_bigktsb) {
1550Sstevel@tonic-gate 		int i = 0;
1560Sstevel@tonic-gate 		caddr_t va = ktsb_base;
1570Sstevel@tonic-gate 		size_t tsbsz = ktsb_sz;
1580Sstevel@tonic-gate 		tte_t tte;
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 		ASSERT(va >= datava + MMU_PAGESIZE4M);
1610Sstevel@tonic-gate 		ASSERT(tsbsz >= MMU_PAGESIZE4M);
1620Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(tsbsz, tsbsz));
1630Sstevel@tonic-gate 		ASSERT(IS_P2ALIGNED(va, tsbsz));
1640Sstevel@tonic-gate 		attr = PROC_DATA | HAT_NOSYNC;
1650Sstevel@tonic-gate 		while (tsbsz != 0) {
1660Sstevel@tonic-gate 			ASSERT(i < MAX_BIGKTSB_TTES);
1670Sstevel@tonic-gate 			pfn = va_to_pfn(va);
1680Sstevel@tonic-gate 			ASSERT(pfn != PFN_INVALID);
1690Sstevel@tonic-gate 			ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0);
1700Sstevel@tonic-gate 			sfmmu_memtte(&tte, pfn, attr, TTE4M);
1710Sstevel@tonic-gate 			ASSERT(TTE_IS_MOD(&tte));
1720Sstevel@tonic-gate 			/*
1730Sstevel@tonic-gate 			 * No need to lock if we use physical addresses.
1740Sstevel@tonic-gate 			 * Since we invalidate the kernel TSB using virtual
1750Sstevel@tonic-gate 			 * addresses, it's an optimization to load them now
1760Sstevel@tonic-gate 			 * so that we won't have to load them later.
1770Sstevel@tonic-gate 			 */
1780Sstevel@tonic-gate 			if (!ktsb_phys) {
1790Sstevel@tonic-gate 				TTE_SET_LOCKED(&tte);
1800Sstevel@tonic-gate 			}
1810Sstevel@tonic-gate 			sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags);
1820Sstevel@tonic-gate 			bigktsb_ttes[i] = tte;
1830Sstevel@tonic-gate 			va += MMU_PAGESIZE4M;
1840Sstevel@tonic-gate 			tsbsz -= MMU_PAGESIZE4M;
1850Sstevel@tonic-gate 			i++;
1860Sstevel@tonic-gate 		}
1870Sstevel@tonic-gate 		bigktsb_nttes = i;
1880Sstevel@tonic-gate 	}
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	sfmmu_set_tlb();
1910Sstevel@tonic-gate }
1920Sstevel@tonic-gate 
1931772Sjl139090 #ifndef UTSB_PHYS
1940Sstevel@tonic-gate /*
1950Sstevel@tonic-gate  * Unmap all references to user TSBs from the TLB of the current processor.
1960Sstevel@tonic-gate  */
1970Sstevel@tonic-gate static void
sfmmu_clear_user_tsbs()1980Sstevel@tonic-gate sfmmu_clear_user_tsbs()
1990Sstevel@tonic-gate {
2000Sstevel@tonic-gate 	caddr_t va;
2010Sstevel@tonic-gate 	caddr_t end_va;
2020Sstevel@tonic-gate 
2030Sstevel@tonic-gate 	/* Demap all pages in the VA range for the first user TSB */
2040Sstevel@tonic-gate 	va = utsb_vabase;
2050Sstevel@tonic-gate 	end_va = va + tsb_slab_size;
2060Sstevel@tonic-gate 	while (va < end_va) {
207*2241Shuah 		vtag_flushpage(va, (uint64_t)ksfmmup);
2080Sstevel@tonic-gate 		va += MMU_PAGESIZE;
2090Sstevel@tonic-gate 	}
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 	/* Demap all pages in the VA range for the second user TSB */
2120Sstevel@tonic-gate 	va = utsb4m_vabase;
2130Sstevel@tonic-gate 	end_va = va + tsb_slab_size;
2140Sstevel@tonic-gate 	while (va < end_va) {
215*2241Shuah 		vtag_flushpage(va, (uint64_t)ksfmmup);
2160Sstevel@tonic-gate 		va += MMU_PAGESIZE;
2170Sstevel@tonic-gate 	}
2180Sstevel@tonic-gate }
2191772Sjl139090 #endif /* UTSB_PHYS */
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate /*
2220Sstevel@tonic-gate  * Setup the kernel's locked tte's
2230Sstevel@tonic-gate  */
2240Sstevel@tonic-gate void
sfmmu_set_tlb(void)2250Sstevel@tonic-gate sfmmu_set_tlb(void)
2260Sstevel@tonic-gate {
2270Sstevel@tonic-gate 	uint_t index;
2280Sstevel@tonic-gate 	struct cpu_node *cpunode;
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	cpunode = &cpunodes[getprocessorid()];
2310Sstevel@tonic-gate 	index = cpunode->itlb_size;
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 	/*
2340Sstevel@tonic-gate 	 * NOTE: the prom will do an explicit unmap of the VAs from the TLBs
2350Sstevel@tonic-gate 	 * in the following functions before loading the new value into the
2360Sstevel@tonic-gate 	 * TLB.  Thus if there was an entry already in the TLB at a different
2370Sstevel@tonic-gate 	 * location, it will get unmapped before we load the entry at the
2380Sstevel@tonic-gate 	 * specified location.
2390Sstevel@tonic-gate 	 */
2400Sstevel@tonic-gate 	(void) prom_itlb_load(index - 1, *(uint64_t *)&ktext_tte, textva);
2410Sstevel@tonic-gate 	index = cpunode->dtlb_size;
2420Sstevel@tonic-gate 	(void) prom_dtlb_load(index - 1, *(uint64_t *)&kdata_tte, datava);
2430Sstevel@tonic-gate 	(void) prom_dtlb_load(index - 2, *(uint64_t *)&ktext_tte, textva);
2440Sstevel@tonic-gate 	index -= 3;
2450Sstevel@tonic-gate 
2461772Sjl139090 #ifndef UTSB_PHYS
2470Sstevel@tonic-gate 	utsb_dtlb_ttenum = index--;
2480Sstevel@tonic-gate 	utsb4m_dtlb_ttenum = index--;
2490Sstevel@tonic-gate 	sfmmu_clear_user_tsbs();
2501772Sjl139090 #endif /* UTSB_PHYS */
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate 	if (!ktsb_phys && enable_bigktsb) {
2530Sstevel@tonic-gate 		int i;
2540Sstevel@tonic-gate 		caddr_t va = ktsb_base;
2550Sstevel@tonic-gate 		uint64_t tte;
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 		ASSERT(bigktsb_nttes <= MAX_BIGKTSB_TTES);
2580Sstevel@tonic-gate 		for (i = 0; i < bigktsb_nttes; i++) {
2590Sstevel@tonic-gate 			tte = *(uint64_t *)&bigktsb_ttes[i];
2600Sstevel@tonic-gate 			(void) prom_dtlb_load(index, tte, va);
2610Sstevel@tonic-gate 			va += MMU_PAGESIZE4M;
2620Sstevel@tonic-gate 			index--;
2630Sstevel@tonic-gate 		}
2640Sstevel@tonic-gate 	}
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	dtlb_resv_ttenum = index + 1;
2670Sstevel@tonic-gate }
2680Sstevel@tonic-gate 
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate  * This routine is executed by all other cpus except the first one
2710Sstevel@tonic-gate  * at initialization time.  It is responsible for taking over the
2720Sstevel@tonic-gate  * mmu from the prom.  We follow these steps.
2730Sstevel@tonic-gate  * Lock the kernel's ttes in the TLB
2740Sstevel@tonic-gate  * Initialize the tsb hardware registers
2750Sstevel@tonic-gate  * Take over the trap table
2760Sstevel@tonic-gate  * Flush the prom's locked entries from the TLB
2770Sstevel@tonic-gate  */
2780Sstevel@tonic-gate void
sfmmu_mp_startup(void)2790Sstevel@tonic-gate sfmmu_mp_startup(void)
2800Sstevel@tonic-gate {
2810Sstevel@tonic-gate 	sfmmu_set_tlb();
2820Sstevel@tonic-gate 	setwstate(WSTATE_KERN);
2830Sstevel@tonic-gate 	prom_set_traptable(&trap_table);
2840Sstevel@tonic-gate 	install_va_to_tte();
2850Sstevel@tonic-gate }
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate void
kdi_tlb_page_lock(caddr_t va,int do_dtlb)2880Sstevel@tonic-gate kdi_tlb_page_lock(caddr_t va, int do_dtlb)
2890Sstevel@tonic-gate {
2900Sstevel@tonic-gate 	tte_t tte;
2910Sstevel@tonic-gate 	pfn_t pfn = va_to_pfn(va);
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | TTE_PFN_INTHI(pfn);
2940Sstevel@tonic-gate 	tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_LCK_INT | TTE_CP_INT |
2950Sstevel@tonic-gate 	    TTE_PRIV_INT | TTE_HWWR_INT;
2960Sstevel@tonic-gate 
297*2241Shuah 	vtag_flushpage(va, (uint64_t)ksfmmup);
2980Sstevel@tonic-gate 
299*2241Shuah 	sfmmu_itlb_ld_kva(va, &tte);
3000Sstevel@tonic-gate 	if (do_dtlb)
301*2241Shuah 		sfmmu_dtlb_ld_kva(va, &tte);
3020Sstevel@tonic-gate }
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate /*ARGSUSED*/
3050Sstevel@tonic-gate void
kdi_tlb_page_unlock(caddr_t va,int do_dtlb)3060Sstevel@tonic-gate kdi_tlb_page_unlock(caddr_t va, int do_dtlb)
3070Sstevel@tonic-gate {
308*2241Shuah 	vtag_flushpage(va, (uint64_t)ksfmmup);
3090Sstevel@tonic-gate }
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate /* clear user TSB information (applicable to hardware TSB walkers) */
3120Sstevel@tonic-gate void
sfmmu_clear_utsbinfo()3130Sstevel@tonic-gate sfmmu_clear_utsbinfo()
3140Sstevel@tonic-gate {
3150Sstevel@tonic-gate }
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate /*ARGSUSED*/
3180Sstevel@tonic-gate void
sfmmu_setup_tsbinfo(sfmmu_t * sfmmup)3190Sstevel@tonic-gate sfmmu_setup_tsbinfo(sfmmu_t *sfmmup)
3200Sstevel@tonic-gate {
3210Sstevel@tonic-gate }
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate /*
3240Sstevel@tonic-gate  * Invalidate a TSB.  If floating point is enabled we use
3250Sstevel@tonic-gate  * a fast block-store routine, otherwise we use the old method
3260Sstevel@tonic-gate  * of walking the TSB setting each tag to TSBTAG_INVALID.
3270Sstevel@tonic-gate  */
3280Sstevel@tonic-gate void
sfmmu_inv_tsb(caddr_t tsb_base,uint_t tsb_bytes)3290Sstevel@tonic-gate sfmmu_inv_tsb(caddr_t tsb_base, uint_t tsb_bytes)
3300Sstevel@tonic-gate {
3310Sstevel@tonic-gate 	extern void sfmmu_inv_tsb_fast(caddr_t, uint_t);
3320Sstevel@tonic-gate 	struct tsbe *tsbaddr;
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	/* CONSTCOND */
3350Sstevel@tonic-gate 	if (fpu_exists) {
3360Sstevel@tonic-gate 		sfmmu_inv_tsb_fast(tsb_base, tsb_bytes);
3370Sstevel@tonic-gate 		return;
3380Sstevel@tonic-gate 	}
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate 	for (tsbaddr = (struct tsbe *)tsb_base;
3410Sstevel@tonic-gate 	    (uintptr_t)tsbaddr < (uintptr_t)(tsb_base + tsb_bytes);
3420Sstevel@tonic-gate 	    tsbaddr++) {
3430Sstevel@tonic-gate 		tsbaddr->tte_tag.tag_inthi = TSBTAG_INVALID;
3440Sstevel@tonic-gate 	}
3450Sstevel@tonic-gate 
3460Sstevel@tonic-gate 	if (ktsb_phys && tsb_base == ktsb_base)
3470Sstevel@tonic-gate 		dcache_flushall();
3480Sstevel@tonic-gate }
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate /*
3510Sstevel@tonic-gate  * Completely flush the D-cache on all cpus.
3520Sstevel@tonic-gate  */
3530Sstevel@tonic-gate void
sfmmu_cache_flushall()3540Sstevel@tonic-gate sfmmu_cache_flushall()
3550Sstevel@tonic-gate {
3560Sstevel@tonic-gate 	int i;
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	for (i = 0; i < CACHE_NUM_COLOR; i++)
3590Sstevel@tonic-gate 		sfmmu_cache_flushcolor(i, 0);
3600Sstevel@tonic-gate }
361