1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/types.h> 30*0Sstevel@tonic-gate #include <sys/systm.h> 31*0Sstevel@tonic-gate #include <sys/sysmacros.h> 32*0Sstevel@tonic-gate #include <sys/archsystm.h> 33*0Sstevel@tonic-gate #include <sys/vmsystm.h> 34*0Sstevel@tonic-gate #include <sys/machparam.h> 35*0Sstevel@tonic-gate #include <sys/machsystm.h> 36*0Sstevel@tonic-gate #include <vm/vm_dep.h> 37*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 38*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 39*0Sstevel@tonic-gate #include <sys/cmn_err.h> 40*0Sstevel@tonic-gate #include <sys/debug.h> 41*0Sstevel@tonic-gate #include <sys/cpu_module.h> 42*0Sstevel@tonic-gate #include <sys/sysmacros.h> 43*0Sstevel@tonic-gate #include <sys/panic.h> 44*0Sstevel@tonic-gate 45*0Sstevel@tonic-gate /* 46*0Sstevel@tonic-gate * Note that 'Cheetah PRM' refers to: 47*0Sstevel@tonic-gate * SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III 48*0Sstevel@tonic-gate */ 49*0Sstevel@tonic-gate 50*0Sstevel@tonic-gate /* Will be set !NULL for Cheetah+ and derivatives. */ 51*0Sstevel@tonic-gate extern uchar_t *ctx_pgsz_array; 52*0Sstevel@tonic-gate 53*0Sstevel@tonic-gate /* 54*0Sstevel@tonic-gate * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther- 55*0Sstevel@tonic-gate * specific versions of disable_ism_large_pages and disable_large_pages, 56*0Sstevel@tonic-gate * and feed back into those two hat variables at hat initialization time, 57*0Sstevel@tonic-gate * for Panther-only systems. 58*0Sstevel@tonic-gate * 59*0Sstevel@tonic-gate * chpjag_disable_ism_large_pages is the Ch/Jaguar-specific version of 60*0Sstevel@tonic-gate * disable_ism_large_pages. Ditto for chjag_disable_large_pages. 61*0Sstevel@tonic-gate */ 62*0Sstevel@tonic-gate static int panther_only = 0; 63*0Sstevel@tonic-gate 64*0Sstevel@tonic-gate static int pan_disable_ism_large_pages = ((1 << TTE64K) | 65*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE256M)); 66*0Sstevel@tonic-gate static int pan_disable_large_pages = (1 << TTE256M); 67*0Sstevel@tonic-gate static int pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE256M); 68*0Sstevel@tonic-gate 69*0Sstevel@tonic-gate static int chjag_disable_ism_large_pages = ((1 << TTE64K) | 70*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 71*0Sstevel@tonic-gate static int chjag_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 72*0Sstevel@tonic-gate static int chjag_disable_auto_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 73*0Sstevel@tonic-gate 74*0Sstevel@tonic-gate /* 75*0Sstevel@tonic-gate * The function returns the USIII-IV mmu-specific values for the 76*0Sstevel@tonic-gate * hat's disable_large_pages and disable_ism_large_pages variables. 77*0Sstevel@tonic-gate * Currently the hat's disable_large_pages and disable_ism_large_pages 78*0Sstevel@tonic-gate * already contain the generic sparc 4 page size info, and the return 79*0Sstevel@tonic-gate * values are or'd with those values. 80*0Sstevel@tonic-gate */ 81*0Sstevel@tonic-gate int 82*0Sstevel@tonic-gate mmu_large_pages_disabled(uint_t flag) 83*0Sstevel@tonic-gate { 84*0Sstevel@tonic-gate int pages_disable = 0; 85*0Sstevel@tonic-gate 86*0Sstevel@tonic-gate if (panther_only) { 87*0Sstevel@tonic-gate if (flag == HAT_LOAD) { 88*0Sstevel@tonic-gate pages_disable = pan_disable_large_pages; 89*0Sstevel@tonic-gate } else if (flag == HAT_LOAD_SHARE) { 90*0Sstevel@tonic-gate pages_disable = pan_disable_ism_large_pages; 91*0Sstevel@tonic-gate } else if (flag == HAT_LOAD_AUTOLPG) { 92*0Sstevel@tonic-gate pages_disable = pan_disable_auto_large_pages; 93*0Sstevel@tonic-gate } 94*0Sstevel@tonic-gate } else { 95*0Sstevel@tonic-gate if (flag == HAT_LOAD) { 96*0Sstevel@tonic-gate pages_disable = chjag_disable_large_pages; 97*0Sstevel@tonic-gate } else if (flag == HAT_LOAD_SHARE) { 98*0Sstevel@tonic-gate pages_disable = chjag_disable_ism_large_pages; 99*0Sstevel@tonic-gate } else if (flag == HAT_LOAD_AUTOLPG) { 100*0Sstevel@tonic-gate pages_disable = chjag_disable_auto_large_pages; 101*0Sstevel@tonic-gate } 102*0Sstevel@tonic-gate } 103*0Sstevel@tonic-gate return (pages_disable); 104*0Sstevel@tonic-gate } 105*0Sstevel@tonic-gate 106*0Sstevel@tonic-gate #if defined(CPU_IMP_DUAL_PAGESIZE) 107*0Sstevel@tonic-gate /* 108*0Sstevel@tonic-gate * If a platform is running with only Ch+ or Jaguar, and then someone DR's 109*0Sstevel@tonic-gate * in a Panther board, the Panther mmu will not like it if one of the already 110*0Sstevel@tonic-gate * running threads is context switched to the Panther and tries to program 111*0Sstevel@tonic-gate * a 512K or 4M page into the T512_1. So make these platforms pay the price 112*0Sstevel@tonic-gate * and follow the Panther DTLB restrictions by default. :) 113*0Sstevel@tonic-gate * The mmu_init_mmu_page_sizes code below takes care of heterogeneous 114*0Sstevel@tonic-gate * platforms that don't support DR, like daktari. 115*0Sstevel@tonic-gate * 116*0Sstevel@tonic-gate * The effect of these restrictions is to limit the allowable values in 117*0Sstevel@tonic-gate * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in 118*0Sstevel@tonic-gate * mmu_set_ctx_page_sizes to set up the values in the ctx_pgsz_array that 119*0Sstevel@tonic-gate * are used at context switch time. The value in sfmmu_pgsz[0] is used in 120*0Sstevel@tonic-gate * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1 121*0Sstevel@tonic-gate * IMMU and DMMU Primary Context Register in the Panther Implementation 122*0Sstevel@tonic-gate * Supplement and Table 15-21 DMMU Primary Context Register in the 123*0Sstevel@tonic-gate * Cheetah+ Delta PRM. 124*0Sstevel@tonic-gate */ 125*0Sstevel@tonic-gate #ifdef MIXEDCPU_DR_SUPPORTED 126*0Sstevel@tonic-gate int panther_dtlb_restrictions = 1; 127*0Sstevel@tonic-gate #else 128*0Sstevel@tonic-gate int panther_dtlb_restrictions = 0; 129*0Sstevel@tonic-gate #endif /* MIXEDCPU_DR_SUPPORTED */ 130*0Sstevel@tonic-gate 131*0Sstevel@tonic-gate /* 132*0Sstevel@tonic-gate * init_mmu_page_sizes is set to one after the bootup time initialization 133*0Sstevel@tonic-gate * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a 134*0Sstevel@tonic-gate * valid value. 135*0Sstevel@tonic-gate */ 136*0Sstevel@tonic-gate int init_mmu_page_sizes = 0; 137*0Sstevel@tonic-gate 138*0Sstevel@tonic-gate /* 139*0Sstevel@tonic-gate * mmu_init_large_pages is called with the desired ism_pagesize parameter, 140*0Sstevel@tonic-gate * for Panther-only systems. It may be called from set_platform_defaults, 141*0Sstevel@tonic-gate * if some value other than 32M is desired, for Panther-only systems. 142*0Sstevel@tonic-gate * mmu_ism_pagesize is the tunable. If it has a bad value, then only warn, 143*0Sstevel@tonic-gate * since it would be bad form to panic due 144*0Sstevel@tonic-gate * to a user typo. 145*0Sstevel@tonic-gate * 146*0Sstevel@tonic-gate * The function re-initializes the pan_disable_ism_large_pages and 147*0Sstevel@tonic-gate * pan_disable_large_pages variables, which are closely related. 148*0Sstevel@tonic-gate * Aka, if 32M is the desired [D]ISM page sizes, then 256M cannot be allowed 149*0Sstevel@tonic-gate * for non-ISM large page usage, or DTLB conflict will occur. Please see the 150*0Sstevel@tonic-gate * Panther PRM for additional DTLB technical info. 151*0Sstevel@tonic-gate */ 152*0Sstevel@tonic-gate void 153*0Sstevel@tonic-gate mmu_init_large_pages(size_t ism_pagesize) 154*0Sstevel@tonic-gate { 155*0Sstevel@tonic-gate if (ctx_pgsz_array == NULL) { /* disable_dual_pgsz flag */ 156*0Sstevel@tonic-gate pan_disable_ism_large_pages = ((1 << TTE64K) | 157*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 158*0Sstevel@tonic-gate pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 159*0Sstevel@tonic-gate auto_lpg_maxszc = TTE4M; 160*0Sstevel@tonic-gate return; 161*0Sstevel@tonic-gate } 162*0Sstevel@tonic-gate 163*0Sstevel@tonic-gate switch (ism_pagesize) { 164*0Sstevel@tonic-gate case MMU_PAGESIZE4M: 165*0Sstevel@tonic-gate pan_disable_ism_large_pages = ((1 << TTE64K) | 166*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 167*0Sstevel@tonic-gate pan_disable_large_pages = (1 << TTE256M); 168*0Sstevel@tonic-gate pan_disable_auto_large_pages = (1 << TTE32M) | (1 << TTE256M); 169*0Sstevel@tonic-gate auto_lpg_maxszc = TTE4M; 170*0Sstevel@tonic-gate break; 171*0Sstevel@tonic-gate case MMU_PAGESIZE32M: 172*0Sstevel@tonic-gate pan_disable_ism_large_pages = ((1 << TTE64K) | 173*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE256M)); 174*0Sstevel@tonic-gate pan_disable_large_pages = (1 << TTE256M); 175*0Sstevel@tonic-gate pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE256M); 176*0Sstevel@tonic-gate auto_lpg_maxszc = TTE32M; 177*0Sstevel@tonic-gate break; 178*0Sstevel@tonic-gate case MMU_PAGESIZE256M: 179*0Sstevel@tonic-gate pan_disable_ism_large_pages = ((1 << TTE64K) | 180*0Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE32M)); 181*0Sstevel@tonic-gate pan_disable_large_pages = (1 << TTE32M); 182*0Sstevel@tonic-gate pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE32M); 183*0Sstevel@tonic-gate auto_lpg_maxszc = TTE256M; 184*0Sstevel@tonic-gate break; 185*0Sstevel@tonic-gate default: 186*0Sstevel@tonic-gate cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx", 187*0Sstevel@tonic-gate ism_pagesize); 188*0Sstevel@tonic-gate break; 189*0Sstevel@tonic-gate } 190*0Sstevel@tonic-gate } 191*0Sstevel@tonic-gate 192*0Sstevel@tonic-gate /* 193*0Sstevel@tonic-gate * Re-initialize mmu_page_sizes and friends, for Panther mmu support. 194*0Sstevel@tonic-gate * Called during very early bootup from check_cpus_set(). 195*0Sstevel@tonic-gate * Can be called to verify that mmu_page_sizes are set up correctly. 196*0Sstevel@tonic-gate * Note that ncpus is not initialized at this point in the bootup sequence. 197*0Sstevel@tonic-gate */ 198*0Sstevel@tonic-gate int 199*0Sstevel@tonic-gate mmu_init_mmu_page_sizes(int cinfo) 200*0Sstevel@tonic-gate { 201*0Sstevel@tonic-gate int npanther = cinfo; 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate if (!init_mmu_page_sizes) { 204*0Sstevel@tonic-gate if (npanther == ncpunode) { 205*0Sstevel@tonic-gate mmu_page_sizes = MMU_PAGE_SIZES; 206*0Sstevel@tonic-gate mmu_hashcnt = MAX_HASHCNT; 207*0Sstevel@tonic-gate mmu_ism_pagesize = MMU_PAGESIZE32M; 208*0Sstevel@tonic-gate mmu_exported_pagesize_mask = (1 << TTE8K) | 209*0Sstevel@tonic-gate (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) | 210*0Sstevel@tonic-gate (1 << TTE32M) | (1 << TTE256M); 211*0Sstevel@tonic-gate panther_dtlb_restrictions = 1; 212*0Sstevel@tonic-gate panther_only = 1; 213*0Sstevel@tonic-gate auto_lpg_maxszc = TTE32M; 214*0Sstevel@tonic-gate } else if (npanther > 0) { 215*0Sstevel@tonic-gate panther_dtlb_restrictions = 1; 216*0Sstevel@tonic-gate } 217*0Sstevel@tonic-gate auto_lpg_maxszc = mmu_page_sizes - 1; 218*0Sstevel@tonic-gate init_mmu_page_sizes = 1; 219*0Sstevel@tonic-gate return (0); 220*0Sstevel@tonic-gate } 221*0Sstevel@tonic-gate return (1); 222*0Sstevel@tonic-gate } 223*0Sstevel@tonic-gate 224*0Sstevel@tonic-gate 225*0Sstevel@tonic-gate /* Cheetah+ and later worst case DTLB parameters */ 226*0Sstevel@tonic-gate #ifndef LOCKED_DTLB_ENTRIES 227*0Sstevel@tonic-gate #define LOCKED_DTLB_ENTRIES 5 /* 2 user TSBs, 2 nucleus, + OBP */ 228*0Sstevel@tonic-gate #endif 229*0Sstevel@tonic-gate #define TOTAL_DTLB_ENTRIES 16 230*0Sstevel@tonic-gate #define AVAIL_32M_ENTRIES 0 231*0Sstevel@tonic-gate #define AVAIL_256M_ENTRIES 0 232*0Sstevel@tonic-gate #define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES) 233*0Sstevel@tonic-gate static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = { 234*0Sstevel@tonic-gate AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 235*0Sstevel@tonic-gate AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 236*0Sstevel@tonic-gate AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES }; 237*0Sstevel@tonic-gate 238*0Sstevel@tonic-gate /*ARGSUSED*/ 239*0Sstevel@tonic-gate uint_t 240*0Sstevel@tonic-gate mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len) 241*0Sstevel@tonic-gate { 242*0Sstevel@tonic-gate sfmmu_t *sfmmup = (sfmmu_t *)hat; 243*0Sstevel@tonic-gate uint_t pgsz0, pgsz1; 244*0Sstevel@tonic-gate uint_t szc, maxszc = mmu_page_sizes - 1; 245*0Sstevel@tonic-gate size_t pgsz; 246*0Sstevel@tonic-gate extern int disable_large_pages; 247*0Sstevel@tonic-gate 248*0Sstevel@tonic-gate pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0]; 249*0Sstevel@tonic-gate pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1]; 250*0Sstevel@tonic-gate 251*0Sstevel@tonic-gate /* 252*0Sstevel@tonic-gate * If either of the TLBs are reprogrammed, choose 253*0Sstevel@tonic-gate * the largest mapping size as the preferred size, 254*0Sstevel@tonic-gate * if it fits the size and alignment constraints. 255*0Sstevel@tonic-gate * Else return the largest mapping size that fits, 256*0Sstevel@tonic-gate * if neither TLB is reprogrammed. 257*0Sstevel@tonic-gate */ 258*0Sstevel@tonic-gate if (pgsz0 > TTE8K || pgsz1 > TTE8K) { 259*0Sstevel@tonic-gate if (pgsz1 > pgsz0) { /* First try pgsz1 */ 260*0Sstevel@tonic-gate pgsz = hw_page_array[pgsz1].hp_size; 261*0Sstevel@tonic-gate if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) 262*0Sstevel@tonic-gate return (pgsz1); 263*0Sstevel@tonic-gate } 264*0Sstevel@tonic-gate if (pgsz0 > TTE8K) { /* Then try pgsz0, if !TTE8K */ 265*0Sstevel@tonic-gate pgsz = hw_page_array[pgsz0].hp_size; 266*0Sstevel@tonic-gate if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) 267*0Sstevel@tonic-gate return (pgsz0); 268*0Sstevel@tonic-gate } 269*0Sstevel@tonic-gate } else { /* Otherwise pick best fit if neither TLB is reprogrammed. */ 270*0Sstevel@tonic-gate for (szc = maxszc; szc > TTE8K; szc--) { 271*0Sstevel@tonic-gate if (disable_large_pages & (1 << szc)) 272*0Sstevel@tonic-gate continue; 273*0Sstevel@tonic-gate 274*0Sstevel@tonic-gate pgsz = hw_page_array[szc].hp_size; 275*0Sstevel@tonic-gate if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) 276*0Sstevel@tonic-gate return (szc); 277*0Sstevel@tonic-gate } 278*0Sstevel@tonic-gate } 279*0Sstevel@tonic-gate return (TTE8K); 280*0Sstevel@tonic-gate } 281*0Sstevel@tonic-gate 282*0Sstevel@tonic-gate /* 283*0Sstevel@tonic-gate * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array 284*0Sstevel@tonic-gate * in order to handle the Panther mmu DTLB requirements. Panther only supports 285*0Sstevel@tonic-gate * the 32M/256M pages in the T512_1 and not in the T16, so the Panther cpu 286*0Sstevel@tonic-gate * can only support one of the two largest page sizes at a time (efficiently). 287*0Sstevel@tonic-gate * Panther only supports 512K and 4M pages in the T512_0, and 32M/256M pages 288*0Sstevel@tonic-gate * in the T512_1. So check the sfmmu flags and ttecnt before enabling 289*0Sstevel@tonic-gate * the T512_1 for 32M or 256M page sizes, and make sure that 512K and 4M 290*0Sstevel@tonic-gate * requests go to the T512_0. 291*0Sstevel@tonic-gate * 292*0Sstevel@tonic-gate * The tmp_pgsz array comes into this routine in sorted order, as it is 293*0Sstevel@tonic-gate * sorted from largest to smallest #pages per pagesize in use by the hat code, 294*0Sstevel@tonic-gate * and leaves with the Panther mmu DTLB requirements satisfied. Note that 295*0Sstevel@tonic-gate * when the array leaves this function it may not contain all of the page 296*0Sstevel@tonic-gate * size codes that it had coming into the function. 297*0Sstevel@tonic-gate * 298*0Sstevel@tonic-gate * Note that for DISM the flag can be set but the ttecnt can be 0, if we 299*0Sstevel@tonic-gate * didn't fault any pages in. This allows the t512_1 to be reprogrammed, 300*0Sstevel@tonic-gate * because the T16 does not support the two giant page sizes. ouch. 301*0Sstevel@tonic-gate */ 302*0Sstevel@tonic-gate void 303*0Sstevel@tonic-gate mmu_fixup_large_pages(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz) 304*0Sstevel@tonic-gate { 305*0Sstevel@tonic-gate uint_t pgsz0 = tmp_pgsz[0]; 306*0Sstevel@tonic-gate uint_t pgsz1 = tmp_pgsz[1]; 307*0Sstevel@tonic-gate uint_t spgsz; 308*0Sstevel@tonic-gate 309*0Sstevel@tonic-gate /* 310*0Sstevel@tonic-gate * Don't program 2nd dtlb for kernel and ism hat 311*0Sstevel@tonic-gate */ 312*0Sstevel@tonic-gate ASSERT(hat->sfmmu_ismhat == NULL); 313*0Sstevel@tonic-gate ASSERT(hat != ksfmmup); 314*0Sstevel@tonic-gate ASSERT(ctx_pgsz_array != NULL); 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate ASSERT((!SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) || 317*0Sstevel@tonic-gate (!SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG))); 318*0Sstevel@tonic-gate 319*0Sstevel@tonic-gate if ((SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) || (ttecnt[TTE32M] != 0)) { 320*0Sstevel@tonic-gate spgsz = pgsz1; 321*0Sstevel@tonic-gate pgsz1 = TTE32M; 322*0Sstevel@tonic-gate if (pgsz0 == TTE32M) 323*0Sstevel@tonic-gate pgsz0 = spgsz; 324*0Sstevel@tonic-gate } else if ((SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)) || 325*0Sstevel@tonic-gate (ttecnt[TTE256M] != 0)) { 326*0Sstevel@tonic-gate spgsz = pgsz1; 327*0Sstevel@tonic-gate pgsz1 = TTE256M; 328*0Sstevel@tonic-gate if (pgsz0 == TTE256M) 329*0Sstevel@tonic-gate pgsz0 = spgsz; 330*0Sstevel@tonic-gate } else if ((pgsz1 == TTE512K) || (pgsz1 == TTE4M)) { 331*0Sstevel@tonic-gate if ((pgsz0 != TTE512K) && (pgsz0 != TTE4M)) { 332*0Sstevel@tonic-gate spgsz = pgsz0; 333*0Sstevel@tonic-gate pgsz0 = pgsz1; 334*0Sstevel@tonic-gate pgsz1 = spgsz; 335*0Sstevel@tonic-gate } else { 336*0Sstevel@tonic-gate pgsz1 = page_szc(MMU_PAGESIZE); 337*0Sstevel@tonic-gate } 338*0Sstevel@tonic-gate } 339*0Sstevel@tonic-gate /* 340*0Sstevel@tonic-gate * This implements PAGESIZE programming of the T8s 341*0Sstevel@tonic-gate * if large TTE counts don't exceed the thresholds. 342*0Sstevel@tonic-gate */ 343*0Sstevel@tonic-gate if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0]) 344*0Sstevel@tonic-gate pgsz0 = page_szc(MMU_PAGESIZE); 345*0Sstevel@tonic-gate if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1]) 346*0Sstevel@tonic-gate pgsz1 = page_szc(MMU_PAGESIZE); 347*0Sstevel@tonic-gate tmp_pgsz[0] = pgsz0; 348*0Sstevel@tonic-gate tmp_pgsz[1] = pgsz1; 349*0Sstevel@tonic-gate } 350*0Sstevel@tonic-gate 351*0Sstevel@tonic-gate /* 352*0Sstevel@tonic-gate * Function to set up the page size values used to reprogram the DTLBs, 353*0Sstevel@tonic-gate * when page sizes used by a process change significantly. 354*0Sstevel@tonic-gate */ 355*0Sstevel@tonic-gate void 356*0Sstevel@tonic-gate mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz) 357*0Sstevel@tonic-gate { 358*0Sstevel@tonic-gate uint_t pgsz0, pgsz1; 359*0Sstevel@tonic-gate 360*0Sstevel@tonic-gate /* 361*0Sstevel@tonic-gate * Don't program 2nd dtlb for kernel and ism hat 362*0Sstevel@tonic-gate */ 363*0Sstevel@tonic-gate ASSERT(hat->sfmmu_ismhat == NULL); 364*0Sstevel@tonic-gate ASSERT(hat != ksfmmup); 365*0Sstevel@tonic-gate 366*0Sstevel@tonic-gate if (ctx_pgsz_array == NULL) /* disable_dual_pgsz flag */ 367*0Sstevel@tonic-gate return; 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate /* 370*0Sstevel@tonic-gate * hat->sfmmu_pgsz[] is an array whose elements 371*0Sstevel@tonic-gate * contain a sorted order of page sizes. Element 372*0Sstevel@tonic-gate * 0 is the most commonly used page size, followed 373*0Sstevel@tonic-gate * by element 1, and so on. 374*0Sstevel@tonic-gate * 375*0Sstevel@tonic-gate * ttecnt[] is an array of per-page-size page counts 376*0Sstevel@tonic-gate * mapped into the process. 377*0Sstevel@tonic-gate * 378*0Sstevel@tonic-gate * If the HAT's choice for page sizes is unsuitable, 379*0Sstevel@tonic-gate * we can override it here. The new values written 380*0Sstevel@tonic-gate * to the array will be handed back to us later to 381*0Sstevel@tonic-gate * do the actual programming of the TLB hardware. 382*0Sstevel@tonic-gate * 383*0Sstevel@tonic-gate * The policy we use for programming the dual T8s on 384*0Sstevel@tonic-gate * Cheetah+ and beyond is as follows: 385*0Sstevel@tonic-gate * 386*0Sstevel@tonic-gate * We have two programmable TLBs, so we look at 387*0Sstevel@tonic-gate * the two most common page sizes in the array, which 388*0Sstevel@tonic-gate * have already been computed for us by the HAT. 389*0Sstevel@tonic-gate * If the TTE count of either of a preferred page size 390*0Sstevel@tonic-gate * exceeds the number of unlocked T16 entries, 391*0Sstevel@tonic-gate * we reprogram one of the T8s to that page size 392*0Sstevel@tonic-gate * to avoid thrashing in the T16. Else we program 393*0Sstevel@tonic-gate * that T8 to the base page size. Note that we do 394*0Sstevel@tonic-gate * not force either T8 to be the base page size if a 395*0Sstevel@tonic-gate * process is using more than two page sizes. Policy 396*0Sstevel@tonic-gate * decisions about which page sizes are best to use are 397*0Sstevel@tonic-gate * left to the upper layers. 398*0Sstevel@tonic-gate * 399*0Sstevel@tonic-gate * Note that for Panther, 4M and 512K pages need to be 400*0Sstevel@tonic-gate * programmed into T512_0, and 32M and 256M into T512_1, 401*0Sstevel@tonic-gate * so we don't want to go through the MIN/MAX code. 402*0Sstevel@tonic-gate * For partial-Panther systems, we still want to make sure 403*0Sstevel@tonic-gate * that 4M and 512K page sizes NEVER get into the T512_1. 404*0Sstevel@tonic-gate * Since the DTLB flags are not set up on a per-cpu basis, 405*0Sstevel@tonic-gate * Panther rules must be applied for mixed Panther/Cheetah+/ 406*0Sstevel@tonic-gate * Jaguar configurations. 407*0Sstevel@tonic-gate */ 408*0Sstevel@tonic-gate if (panther_dtlb_restrictions) { 409*0Sstevel@tonic-gate if ((tmp_pgsz[1] == TTE512K) || (tmp_pgsz[1] == TTE4M)) { 410*0Sstevel@tonic-gate if ((tmp_pgsz[0] != TTE512K) && 411*0Sstevel@tonic-gate (tmp_pgsz[0] != TTE4M)) { 412*0Sstevel@tonic-gate pgsz1 = tmp_pgsz[0]; 413*0Sstevel@tonic-gate pgsz0 = tmp_pgsz[1]; 414*0Sstevel@tonic-gate } else { 415*0Sstevel@tonic-gate pgsz0 = tmp_pgsz[0]; 416*0Sstevel@tonic-gate pgsz1 = page_szc(MMU_PAGESIZE); 417*0Sstevel@tonic-gate } 418*0Sstevel@tonic-gate } else { 419*0Sstevel@tonic-gate pgsz0 = tmp_pgsz[0]; 420*0Sstevel@tonic-gate pgsz1 = tmp_pgsz[1]; 421*0Sstevel@tonic-gate } 422*0Sstevel@tonic-gate } else { 423*0Sstevel@tonic-gate pgsz0 = MIN(tmp_pgsz[0], tmp_pgsz[1]); 424*0Sstevel@tonic-gate pgsz1 = MAX(tmp_pgsz[0], tmp_pgsz[1]); 425*0Sstevel@tonic-gate } 426*0Sstevel@tonic-gate 427*0Sstevel@tonic-gate /* 428*0Sstevel@tonic-gate * This implements PAGESIZE programming of the T8s 429*0Sstevel@tonic-gate * if large TTE counts don't exceed the thresholds. 430*0Sstevel@tonic-gate */ 431*0Sstevel@tonic-gate if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0]) 432*0Sstevel@tonic-gate pgsz0 = page_szc(MMU_PAGESIZE); 433*0Sstevel@tonic-gate if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1]) 434*0Sstevel@tonic-gate pgsz1 = page_szc(MMU_PAGESIZE); 435*0Sstevel@tonic-gate tmp_pgsz[0] = pgsz0; 436*0Sstevel@tonic-gate tmp_pgsz[1] = pgsz1; 437*0Sstevel@tonic-gate } 438*0Sstevel@tonic-gate 439*0Sstevel@tonic-gate /* 440*0Sstevel@tonic-gate * The HAT calls this function when an MMU context is allocated so that we 441*0Sstevel@tonic-gate * can reprogram the large TLBs appropriately for the new process using 442*0Sstevel@tonic-gate * the context. 443*0Sstevel@tonic-gate * 444*0Sstevel@tonic-gate * The caller must hold the HAT lock. 445*0Sstevel@tonic-gate */ 446*0Sstevel@tonic-gate void 447*0Sstevel@tonic-gate mmu_set_ctx_page_sizes(struct hat *hat) 448*0Sstevel@tonic-gate { 449*0Sstevel@tonic-gate uint_t pgsz0, pgsz1; 450*0Sstevel@tonic-gate uint_t new_cext; 451*0Sstevel@tonic-gate 452*0Sstevel@tonic-gate ASSERT(sfmmu_hat_lock_held(hat)); 453*0Sstevel@tonic-gate ASSERT(hat != ksfmmup); 454*0Sstevel@tonic-gate 455*0Sstevel@tonic-gate if (ctx_pgsz_array == NULL) /* disable_dual_pgsz flag */ 456*0Sstevel@tonic-gate return; 457*0Sstevel@tonic-gate 458*0Sstevel@tonic-gate /* 459*0Sstevel@tonic-gate * If supported, reprogram the TLBs to a larger pagesize. 460*0Sstevel@tonic-gate */ 461*0Sstevel@tonic-gate pgsz0 = hat->sfmmu_pgsz[0]; 462*0Sstevel@tonic-gate pgsz1 = hat->sfmmu_pgsz[1]; 463*0Sstevel@tonic-gate ASSERT(pgsz0 < mmu_page_sizes); 464*0Sstevel@tonic-gate ASSERT(pgsz1 < mmu_page_sizes); 465*0Sstevel@tonic-gate #ifdef DEBUG 466*0Sstevel@tonic-gate if (panther_dtlb_restrictions) { 467*0Sstevel@tonic-gate ASSERT(pgsz1 != TTE512K); 468*0Sstevel@tonic-gate ASSERT(pgsz1 != TTE4M); 469*0Sstevel@tonic-gate } 470*0Sstevel@tonic-gate if (panther_only) { 471*0Sstevel@tonic-gate ASSERT(pgsz0 != TTE32M); 472*0Sstevel@tonic-gate ASSERT(pgsz0 != TTE256M); 473*0Sstevel@tonic-gate } 474*0Sstevel@tonic-gate #endif /* DEBUG */ 475*0Sstevel@tonic-gate new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0); 476*0Sstevel@tonic-gate if (hat->sfmmu_cext != new_cext) { 477*0Sstevel@tonic-gate hat->sfmmu_cext = new_cext; 478*0Sstevel@tonic-gate } 479*0Sstevel@tonic-gate ctx_pgsz_array[hat->sfmmu_cnum] = hat->sfmmu_cext; 480*0Sstevel@tonic-gate /* 481*0Sstevel@tonic-gate * sfmmu_setctx_sec() will take care of the 482*0Sstevel@tonic-gate * rest of the chores reprogramming the ctx_pgsz_array 483*0Sstevel@tonic-gate * page size values into the DTLBs. 484*0Sstevel@tonic-gate */ 485*0Sstevel@tonic-gate } 486*0Sstevel@tonic-gate 487*0Sstevel@tonic-gate /* 488*0Sstevel@tonic-gate * This function assumes that there are either four or six supported page 489*0Sstevel@tonic-gate * sizes and at most two programmable TLBs, so we need to decide which 490*0Sstevel@tonic-gate * page sizes are most important and then adjust the TLB page sizes 491*0Sstevel@tonic-gate * accordingly (if supported). 492*0Sstevel@tonic-gate * 493*0Sstevel@tonic-gate * If these assumptions change, this function will need to be 494*0Sstevel@tonic-gate * updated to support whatever the new limits are. 495*0Sstevel@tonic-gate */ 496*0Sstevel@tonic-gate void 497*0Sstevel@tonic-gate mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt) 498*0Sstevel@tonic-gate { 499*0Sstevel@tonic-gate uint64_t sortcnt[MMU_PAGE_SIZES]; 500*0Sstevel@tonic-gate uint8_t tmp_pgsz[MMU_PAGE_SIZES]; 501*0Sstevel@tonic-gate uint8_t i, j, max; 502*0Sstevel@tonic-gate uint16_t oldval, newval; 503*0Sstevel@tonic-gate 504*0Sstevel@tonic-gate /* 505*0Sstevel@tonic-gate * We only consider reprogramming the TLBs if one or more of 506*0Sstevel@tonic-gate * the two most used page sizes changes and we're using 507*0Sstevel@tonic-gate * large pages in this process, except for Panther 32M/256M pages, 508*0Sstevel@tonic-gate * which the Panther T16 does not support. 509*0Sstevel@tonic-gate */ 510*0Sstevel@tonic-gate if (sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) { 511*0Sstevel@tonic-gate /* Sort page sizes. */ 512*0Sstevel@tonic-gate for (i = 0; i < mmu_page_sizes; i++) { 513*0Sstevel@tonic-gate sortcnt[i] = ttecnt[i]; 514*0Sstevel@tonic-gate } 515*0Sstevel@tonic-gate for (j = 0; j < mmu_page_sizes; j++) { 516*0Sstevel@tonic-gate for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) { 517*0Sstevel@tonic-gate if (sortcnt[i] > sortcnt[max]) 518*0Sstevel@tonic-gate max = i; 519*0Sstevel@tonic-gate } 520*0Sstevel@tonic-gate tmp_pgsz[j] = max; 521*0Sstevel@tonic-gate sortcnt[max] = 0; 522*0Sstevel@tonic-gate } 523*0Sstevel@tonic-gate 524*0Sstevel@tonic-gate /* 525*0Sstevel@tonic-gate * Handle Panther page dtlb calcs separately. The check 526*0Sstevel@tonic-gate * for actual or potential 32M/256M pages must occur 527*0Sstevel@tonic-gate * every time due to lack of T16 support for them. 528*0Sstevel@tonic-gate * The sort works fine for Ch+/Jag, but Panther has 529*0Sstevel@tonic-gate * pagesize restrictions for both DTLBs. 530*0Sstevel@tonic-gate */ 531*0Sstevel@tonic-gate oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1]; 532*0Sstevel@tonic-gate 533*0Sstevel@tonic-gate if (panther_only) { 534*0Sstevel@tonic-gate mmu_fixup_large_pages(sfmmup, ttecnt, tmp_pgsz); 535*0Sstevel@tonic-gate } else { 536*0Sstevel@tonic-gate /* Check 2 largest values after the sort. */ 537*0Sstevel@tonic-gate mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz); 538*0Sstevel@tonic-gate } 539*0Sstevel@tonic-gate newval = tmp_pgsz[0] << 8 | tmp_pgsz[1]; 540*0Sstevel@tonic-gate if (newval != oldval) { 541*0Sstevel@tonic-gate sfmmu_steal_context(sfmmup, tmp_pgsz); 542*0Sstevel@tonic-gate } 543*0Sstevel@tonic-gate } 544*0Sstevel@tonic-gate } 545*0Sstevel@tonic-gate 546*0Sstevel@tonic-gate #endif /* CPU_IMP_DUAL_PAGESIZE */ 547*0Sstevel@tonic-gate 548*0Sstevel@tonic-gate struct heap_lp_page_size { 549*0Sstevel@tonic-gate int impl; 550*0Sstevel@tonic-gate uint_t tte; 551*0Sstevel@tonic-gate int use_dt512; 552*0Sstevel@tonic-gate }; 553*0Sstevel@tonic-gate 554*0Sstevel@tonic-gate struct heap_lp_page_size heap_lp_pgsz[] = { 555*0Sstevel@tonic-gate 556*0Sstevel@tonic-gate {CHEETAH_IMPL, TTE8K, 0}, /* default */ 557*0Sstevel@tonic-gate {CHEETAH_IMPL, TTE64K, 0}, 558*0Sstevel@tonic-gate {CHEETAH_IMPL, TTE4M, 0}, 559*0Sstevel@tonic-gate 560*0Sstevel@tonic-gate { CHEETAH_PLUS_IMPL, TTE4M, 1 }, /* default */ 561*0Sstevel@tonic-gate { CHEETAH_PLUS_IMPL, TTE4M, 0 }, 562*0Sstevel@tonic-gate { CHEETAH_PLUS_IMPL, TTE64K, 1 }, 563*0Sstevel@tonic-gate { CHEETAH_PLUS_IMPL, TTE64K, 0 }, 564*0Sstevel@tonic-gate { CHEETAH_PLUS_IMPL, TTE8K, 0 }, 565*0Sstevel@tonic-gate 566*0Sstevel@tonic-gate { JALAPENO_IMPL, TTE4M, 1 }, /* default */ 567*0Sstevel@tonic-gate { JALAPENO_IMPL, TTE4M, 0 }, 568*0Sstevel@tonic-gate { JALAPENO_IMPL, TTE64K, 1 }, 569*0Sstevel@tonic-gate { JALAPENO_IMPL, TTE64K, 0 }, 570*0Sstevel@tonic-gate { JALAPENO_IMPL, TTE8K, 0 }, 571*0Sstevel@tonic-gate 572*0Sstevel@tonic-gate { JAGUAR_IMPL, TTE4M, 1 }, /* default */ 573*0Sstevel@tonic-gate { JAGUAR_IMPL, TTE4M, 0 }, 574*0Sstevel@tonic-gate { JAGUAR_IMPL, TTE64K, 1 }, 575*0Sstevel@tonic-gate { JAGUAR_IMPL, TTE64K, 0 }, 576*0Sstevel@tonic-gate { JAGUAR_IMPL, TTE8K, 0 }, 577*0Sstevel@tonic-gate 578*0Sstevel@tonic-gate { SERRANO_IMPL, TTE4M, 1 }, /* default */ 579*0Sstevel@tonic-gate { SERRANO_IMPL, TTE4M, 0 }, 580*0Sstevel@tonic-gate { SERRANO_IMPL, TTE64K, 1 }, 581*0Sstevel@tonic-gate { SERRANO_IMPL, TTE64K, 0 }, 582*0Sstevel@tonic-gate { SERRANO_IMPL, TTE8K, 0 }, 583*0Sstevel@tonic-gate 584*0Sstevel@tonic-gate { PANTHER_IMPL, TTE4M, 1 }, /* default */ 585*0Sstevel@tonic-gate { PANTHER_IMPL, TTE4M, 0 }, 586*0Sstevel@tonic-gate { PANTHER_IMPL, TTE64K, 1 }, 587*0Sstevel@tonic-gate { PANTHER_IMPL, TTE64K, 0 }, 588*0Sstevel@tonic-gate { PANTHER_IMPL, TTE8K, 0 } 589*0Sstevel@tonic-gate }; 590*0Sstevel@tonic-gate 591*0Sstevel@tonic-gate int heaplp_use_dt512 = -1; 592*0Sstevel@tonic-gate 593*0Sstevel@tonic-gate void 594*0Sstevel@tonic-gate mmu_init_kernel_pgsz(struct hat *hat) 595*0Sstevel@tonic-gate { 596*0Sstevel@tonic-gate uint_t tte = page_szc(segkmem_lpsize); 597*0Sstevel@tonic-gate uchar_t new_cext_primary, new_cext_nucleus; 598*0Sstevel@tonic-gate 599*0Sstevel@tonic-gate if (heaplp_use_dt512 == 0 || tte > TTE4M) { 600*0Sstevel@tonic-gate /* do not reprogram dt512 tlb */ 601*0Sstevel@tonic-gate tte = TTE8K; 602*0Sstevel@tonic-gate } 603*0Sstevel@tonic-gate 604*0Sstevel@tonic-gate new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K); 605*0Sstevel@tonic-gate new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte); 606*0Sstevel@tonic-gate 607*0Sstevel@tonic-gate if (ctx_pgsz_array) 608*0Sstevel@tonic-gate ctx_pgsz_array[KCONTEXT] = new_cext_primary; 609*0Sstevel@tonic-gate hat->sfmmu_cext = new_cext_primary; 610*0Sstevel@tonic-gate kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) | 611*0Sstevel@tonic-gate ((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT); 612*0Sstevel@tonic-gate mmu_init_kcontext(); 613*0Sstevel@tonic-gate } 614*0Sstevel@tonic-gate 615*0Sstevel@tonic-gate size_t 616*0Sstevel@tonic-gate mmu_get_kernel_lpsize(size_t lpsize) 617*0Sstevel@tonic-gate { 618*0Sstevel@tonic-gate struct heap_lp_page_size *p_lpgsz, *pend_lpgsz; 619*0Sstevel@tonic-gate int impl = cpunodes[getprocessorid()].implementation; 620*0Sstevel@tonic-gate uint_t tte = TTE8K; 621*0Sstevel@tonic-gate 622*0Sstevel@tonic-gate pend_lpgsz = (struct heap_lp_page_size *) 623*0Sstevel@tonic-gate ((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz)); 624*0Sstevel@tonic-gate 625*0Sstevel@tonic-gate /* search for a valid segkmem_lpsize */ 626*0Sstevel@tonic-gate for (p_lpgsz = heap_lp_pgsz; p_lpgsz < pend_lpgsz; p_lpgsz++) { 627*0Sstevel@tonic-gate if (impl != p_lpgsz->impl) 628*0Sstevel@tonic-gate continue; 629*0Sstevel@tonic-gate 630*0Sstevel@tonic-gate if (lpsize == 0) { 631*0Sstevel@tonic-gate /* 632*0Sstevel@tonic-gate * no setting for segkmem_lpsize in /etc/system 633*0Sstevel@tonic-gate * use default from the table 634*0Sstevel@tonic-gate */ 635*0Sstevel@tonic-gate tte = p_lpgsz->tte; 636*0Sstevel@tonic-gate heaplp_use_dt512 = p_lpgsz->use_dt512; 637*0Sstevel@tonic-gate break; 638*0Sstevel@tonic-gate } 639*0Sstevel@tonic-gate 640*0Sstevel@tonic-gate if (lpsize == TTEBYTES(p_lpgsz->tte) && 641*0Sstevel@tonic-gate (heaplp_use_dt512 == -1 || 642*0Sstevel@tonic-gate heaplp_use_dt512 == p_lpgsz->use_dt512)) { 643*0Sstevel@tonic-gate 644*0Sstevel@tonic-gate tte = p_lpgsz->tte; 645*0Sstevel@tonic-gate heaplp_use_dt512 = p_lpgsz->use_dt512; 646*0Sstevel@tonic-gate 647*0Sstevel@tonic-gate /* found a match */ 648*0Sstevel@tonic-gate break; 649*0Sstevel@tonic-gate } 650*0Sstevel@tonic-gate } 651*0Sstevel@tonic-gate 652*0Sstevel@tonic-gate if (p_lpgsz == pend_lpgsz) { 653*0Sstevel@tonic-gate /* nothing found: disable large page kernel heap */ 654*0Sstevel@tonic-gate tte = TTE8K; 655*0Sstevel@tonic-gate heaplp_use_dt512 = 0; 656*0Sstevel@tonic-gate } 657*0Sstevel@tonic-gate 658*0Sstevel@tonic-gate lpsize = TTEBYTES(tte); 659*0Sstevel@tonic-gate 660*0Sstevel@tonic-gate return (lpsize); 661*0Sstevel@tonic-gate } 662