10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52414Saguzovsk * Common Development and Distribution License (the "License"). 62414Saguzovsk * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223764Sdp78419 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 310Sstevel@tonic-gate * under license from the Regents of the University of California. 320Sstevel@tonic-gate */ 330Sstevel@tonic-gate 340Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 370Sstevel@tonic-gate * UNIX machine dependent virtual memory support. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/vm.h> 410Sstevel@tonic-gate #include <sys/exec.h> 420Sstevel@tonic-gate #include <sys/cmn_err.h> 430Sstevel@tonic-gate #include <sys/cpu_module.h> 440Sstevel@tonic-gate #include <sys/cpu.h> 450Sstevel@tonic-gate #include <sys/elf_SPARC.h> 460Sstevel@tonic-gate #include <sys/archsystm.h> 470Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 480Sstevel@tonic-gate #include <sys/memnode.h> 490Sstevel@tonic-gate #include <sys/mem_cage.h> 500Sstevel@tonic-gate #include <vm/vm_dep.h> 510Sstevel@tonic-gate 520Sstevel@tonic-gate #if defined(__sparcv9) && defined(SF_ERRATA_57) 530Sstevel@tonic-gate caddr_t errata57_limit; 540Sstevel@tonic-gate #endif 550Sstevel@tonic-gate 560Sstevel@tonic-gate uint_t page_colors = 0; 570Sstevel@tonic-gate uint_t page_colors_mask = 0; 580Sstevel@tonic-gate uint_t page_coloring_shift = 0; 590Sstevel@tonic-gate int consistent_coloring; 600Sstevel@tonic-gate 610Sstevel@tonic-gate uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES; 620Sstevel@tonic-gate uint_t max_mmu_page_sizes = MMU_PAGE_SIZES; 630Sstevel@tonic-gate uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT; 640Sstevel@tonic-gate uint_t max_mmu_hashcnt = MAX_HASHCNT; 650Sstevel@tonic-gate size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 660Sstevel@tonic-gate 670Sstevel@tonic-gate /* 680Sstevel@tonic-gate * The sun4u hardware mapping sizes which will always be supported are 690Sstevel@tonic-gate * 8K, 64K, 512K and 4M. If sun4u based machines need to support other 700Sstevel@tonic-gate * page sizes, platform or cpu specific routines need to modify the value. 710Sstevel@tonic-gate * The base pagesize (p_szc == 0) must always be supported by the hardware. 720Sstevel@tonic-gate */ 730Sstevel@tonic-gate int mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) | 740Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE4M); 750Sstevel@tonic-gate uint_t mmu_exported_page_sizes; 760Sstevel@tonic-gate 770Sstevel@tonic-gate uint_t szc_2_userszc[MMU_PAGE_SIZES]; 780Sstevel@tonic-gate uint_t userszc_2_szc[MMU_PAGE_SIZES]; 790Sstevel@tonic-gate 800Sstevel@tonic-gate extern uint_t vac_colors_mask; 810Sstevel@tonic-gate extern int vac_shift; 820Sstevel@tonic-gate 830Sstevel@tonic-gate hw_pagesize_t hw_page_array[] = { 842961Sdp78419 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT}, 852961Sdp78419 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0, 862961Sdp78419 MMU_PAGESIZE64K >> MMU_PAGESHIFT}, 872961Sdp78419 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0, 880Sstevel@tonic-gate MMU_PAGESIZE512K >> MMU_PAGESHIFT}, 892961Sdp78419 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT}, 902961Sdp78419 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0, 912961Sdp78419 MMU_PAGESIZE32M >> MMU_PAGESHIFT}, 922961Sdp78419 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0, 930Sstevel@tonic-gate MMU_PAGESIZE256M >> MMU_PAGESHIFT}, 942961Sdp78419 {0, 0, 0, 0} 950Sstevel@tonic-gate }; 960Sstevel@tonic-gate 970Sstevel@tonic-gate /* 983764Sdp78419 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end 993764Sdp78419 */ 1003764Sdp78419 int max_bootlp_tteszc = TTE4M; 1013764Sdp78419 1023764Sdp78419 /* 1032991Ssusans * use_text_pgsz64k and use_text_pgsz512k allow the user to turn on these 1042991Ssusans * additional text page sizes for USIII-IV+ and OPL by changing the default 1052991Ssusans * values via /etc/system. 1060Sstevel@tonic-gate */ 1072991Ssusans int use_text_pgsz64K = 0; 1082991Ssusans int use_text_pgsz512K = 0; 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate /* 1112991Ssusans * Maximum and default segment size tunables for user heap, stack, private 1122991Ssusans * and shared anonymous memory, and user text and initialized data. 1130Sstevel@tonic-gate */ 1142991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE4M; 1152991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE; 1162991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE4M; 1172991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE; 1182991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE4M; 1192991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE; 1202991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE4M; 1212991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE4M; 1222414Saguzovsk 1232991Ssusans void 1242991Ssusans adjust_data_maxlpsize(size_t ismpagesize) 1252991Ssusans { 1262991Ssusans if (max_uheap_lpsize == MMU_PAGESIZE4M) { 1272991Ssusans max_uheap_lpsize = ismpagesize; 1282991Ssusans } 1292991Ssusans if (max_ustack_lpsize == MMU_PAGESIZE4M) { 1302991Ssusans max_ustack_lpsize = ismpagesize; 1312991Ssusans } 1322991Ssusans if (max_privmap_lpsize == MMU_PAGESIZE4M) { 1332991Ssusans max_privmap_lpsize = ismpagesize; 1342991Ssusans } 1352991Ssusans if (max_shm_lpsize == MMU_PAGESIZE4M) { 1362991Ssusans max_shm_lpsize = ismpagesize; 1372991Ssusans } 1382991Ssusans } 1392659Ssusans 1402659Ssusans /* 1410Sstevel@tonic-gate * map_addr_proc() is the routine called when the system is to 1420Sstevel@tonic-gate * choose an address for the user. We will pick an address 1430Sstevel@tonic-gate * range which is just below the current stack limit. The 1440Sstevel@tonic-gate * algorithm used for cache consistency on machines with virtual 1450Sstevel@tonic-gate * address caches is such that offset 0 in the vnode is always 1460Sstevel@tonic-gate * on a shm_alignment'ed aligned address. Unfortunately, this 1470Sstevel@tonic-gate * means that vnodes which are demand paged will not be mapped 1480Sstevel@tonic-gate * cache consistently with the executable images. When the 1490Sstevel@tonic-gate * cache alignment for a given object is inconsistent, the 1500Sstevel@tonic-gate * lower level code must manage the translations so that this 1510Sstevel@tonic-gate * is not seen here (at the cost of efficiency, of course). 1520Sstevel@tonic-gate * 1530Sstevel@tonic-gate * addrp is a value/result parameter. 1540Sstevel@tonic-gate * On input it is a hint from the user to be used in a completely 1550Sstevel@tonic-gate * machine dependent fashion. For MAP_ALIGN, addrp contains the 1560Sstevel@tonic-gate * minimal alignment. 1570Sstevel@tonic-gate * 1580Sstevel@tonic-gate * On output it is NULL if no address can be found in the current 1590Sstevel@tonic-gate * processes address space or else an address that is currently 1600Sstevel@tonic-gate * not mapped for len bytes with a page of red zone on either side. 1610Sstevel@tonic-gate * If vacalign is true, then the selected address will obey the alignment 1620Sstevel@tonic-gate * constraints of a vac machine based on the given off value. 1630Sstevel@tonic-gate */ 1640Sstevel@tonic-gate /*ARGSUSED4*/ 1650Sstevel@tonic-gate void 1660Sstevel@tonic-gate map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, 1670Sstevel@tonic-gate caddr_t userlimit, struct proc *p, uint_t flags) 1680Sstevel@tonic-gate { 1690Sstevel@tonic-gate struct as *as = p->p_as; 1700Sstevel@tonic-gate caddr_t addr; 1710Sstevel@tonic-gate caddr_t base; 1720Sstevel@tonic-gate size_t slen; 1730Sstevel@tonic-gate uintptr_t align_amount; 1740Sstevel@tonic-gate int allow_largepage_alignment = 1; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate base = p->p_brkbase; 1770Sstevel@tonic-gate if (userlimit < as->a_userlimit) { 1780Sstevel@tonic-gate /* 1790Sstevel@tonic-gate * This happens when a program wants to map something in 1800Sstevel@tonic-gate * a range that's accessible to a program in a smaller 1810Sstevel@tonic-gate * address space. For example, a 64-bit program might 1820Sstevel@tonic-gate * be calling mmap32(2) to guarantee that the returned 1830Sstevel@tonic-gate * address is below 4Gbytes. 1840Sstevel@tonic-gate */ 1850Sstevel@tonic-gate ASSERT(userlimit > base); 1860Sstevel@tonic-gate slen = userlimit - base; 1870Sstevel@tonic-gate } else { 1880Sstevel@tonic-gate slen = p->p_usrstack - base - (((size_t)rctl_enforced_value( 1890Sstevel@tonic-gate rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET) 1900Sstevel@tonic-gate & PAGEMASK); 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate len = (len + PAGEOFFSET) & PAGEMASK; 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate /* 1950Sstevel@tonic-gate * Redzone for each side of the request. This is done to leave 1960Sstevel@tonic-gate * one page unmapped between segments. This is not required, but 1970Sstevel@tonic-gate * it's useful for the user because if their program strays across 1980Sstevel@tonic-gate * a segment boundary, it will catch a fault immediately making 1990Sstevel@tonic-gate * debugging a little easier. 2000Sstevel@tonic-gate */ 2010Sstevel@tonic-gate len += (2 * PAGESIZE); 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* 2040Sstevel@tonic-gate * If the request is larger than the size of a particular 2050Sstevel@tonic-gate * mmu level, then we use that level to map the request. 2060Sstevel@tonic-gate * But this requires that both the virtual and the physical 2070Sstevel@tonic-gate * addresses be aligned with respect to that level, so we 2080Sstevel@tonic-gate * do the virtual bit of nastiness here. 2090Sstevel@tonic-gate * 2100Sstevel@tonic-gate * For 32-bit processes, only those which have specified 2110Sstevel@tonic-gate * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise 2120Sstevel@tonic-gate * we can potentially waste up to 256MB of the 4G process address 2130Sstevel@tonic-gate * space just for alignment. 2140Sstevel@tonic-gate */ 2150Sstevel@tonic-gate if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 || 2160Sstevel@tonic-gate ((uintptr_t)*addrp) != 0)) { 2170Sstevel@tonic-gate allow_largepage_alignment = 0; 2180Sstevel@tonic-gate } 2190Sstevel@tonic-gate if ((mmu_page_sizes == max_mmu_page_sizes) && 2200Sstevel@tonic-gate allow_largepage_alignment && 2210Sstevel@tonic-gate (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */ 2220Sstevel@tonic-gate align_amount = MMU_PAGESIZE256M; 2230Sstevel@tonic-gate } else if ((mmu_page_sizes == max_mmu_page_sizes) && 2240Sstevel@tonic-gate allow_largepage_alignment && 2250Sstevel@tonic-gate (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */ 2260Sstevel@tonic-gate align_amount = MMU_PAGESIZE32M; 2270Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */ 2280Sstevel@tonic-gate align_amount = MMU_PAGESIZE4M; 2290Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */ 2300Sstevel@tonic-gate align_amount = MMU_PAGESIZE512K; 2310Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */ 2320Sstevel@tonic-gate align_amount = MMU_PAGESIZE64K; 2330Sstevel@tonic-gate } else { 2340Sstevel@tonic-gate /* 2350Sstevel@tonic-gate * Align virtual addresses on a 64K boundary to ensure 2360Sstevel@tonic-gate * that ELF shared libraries are mapped with the appropriate 2370Sstevel@tonic-gate * alignment constraints by the run-time linker. 2380Sstevel@tonic-gate */ 2390Sstevel@tonic-gate align_amount = ELF_SPARC_MAXPGSZ; 2400Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) && 2410Sstevel@tonic-gate ((uintptr_t)*addrp < align_amount)) 2420Sstevel@tonic-gate align_amount = (uintptr_t)*addrp; 2430Sstevel@tonic-gate } 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate /* 2460Sstevel@tonic-gate * 64-bit processes require 1024K alignment of ELF shared libraries. 2470Sstevel@tonic-gate */ 2480Sstevel@tonic-gate if (p->p_model == DATAMODEL_LP64) 2490Sstevel@tonic-gate align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ); 2500Sstevel@tonic-gate #ifdef VAC 2510Sstevel@tonic-gate if (vac && vacalign && (align_amount < shm_alignment)) 2520Sstevel@tonic-gate align_amount = shm_alignment; 2530Sstevel@tonic-gate #endif 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) { 2560Sstevel@tonic-gate align_amount = (uintptr_t)*addrp; 2570Sstevel@tonic-gate } 2580Sstevel@tonic-gate len += align_amount; 2590Sstevel@tonic-gate 2600Sstevel@tonic-gate /* 2610Sstevel@tonic-gate * Look for a large enough hole starting below the stack limit. 2620Sstevel@tonic-gate * After finding it, use the upper part. Addition of PAGESIZE is 2630Sstevel@tonic-gate * for the redzone as described above. 2640Sstevel@tonic-gate */ 2650Sstevel@tonic-gate as_purge(as); 2660Sstevel@tonic-gate if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) { 2670Sstevel@tonic-gate caddr_t as_addr; 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate addr = base + slen - len + PAGESIZE; 2700Sstevel@tonic-gate as_addr = addr; 2710Sstevel@tonic-gate /* 2720Sstevel@tonic-gate * Round address DOWN to the alignment amount, 2730Sstevel@tonic-gate * add the offset, and if this address is less 2740Sstevel@tonic-gate * than the original address, add alignment amount. 2750Sstevel@tonic-gate */ 2760Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l))); 2770Sstevel@tonic-gate addr += (long)(off & (align_amount - 1l)); 2780Sstevel@tonic-gate if (addr < as_addr) { 2790Sstevel@tonic-gate addr += align_amount; 2800Sstevel@tonic-gate } 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate ASSERT(addr <= (as_addr + align_amount)); 2830Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_amount - 1l)) == 2840Sstevel@tonic-gate ((uintptr_t)(off & (align_amount - 1l)))); 2850Sstevel@tonic-gate *addrp = addr; 2860Sstevel@tonic-gate 2870Sstevel@tonic-gate #if defined(SF_ERRATA_57) 2880Sstevel@tonic-gate if (AS_TYPE_64BIT(as) && addr < errata57_limit) { 2890Sstevel@tonic-gate *addrp = NULL; 2900Sstevel@tonic-gate } 2910Sstevel@tonic-gate #endif 2920Sstevel@tonic-gate } else { 2930Sstevel@tonic-gate *addrp = NULL; /* no more virtual space */ 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate } 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate /* 2980Sstevel@tonic-gate * Platform-dependent page scrub call. 2990Sstevel@tonic-gate */ 3000Sstevel@tonic-gate void 3010Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len) 3020Sstevel@tonic-gate { 3030Sstevel@tonic-gate /* 3040Sstevel@tonic-gate * For now, we rely on the fact that pagezero() will 3050Sstevel@tonic-gate * always clear UEs. 3060Sstevel@tonic-gate */ 3070Sstevel@tonic-gate pagezero(pp, off, len); 3080Sstevel@tonic-gate } 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate /*ARGSUSED*/ 3110Sstevel@tonic-gate void 3120Sstevel@tonic-gate sync_data_memory(caddr_t va, size_t len) 3130Sstevel@tonic-gate { 3140Sstevel@tonic-gate cpu_flush_ecache(); 3150Sstevel@tonic-gate } 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate /* 3180Sstevel@tonic-gate * platform specific large pages for kernel heap support 3190Sstevel@tonic-gate */ 3200Sstevel@tonic-gate void 3210Sstevel@tonic-gate mmu_init_kcontext() 3220Sstevel@tonic-gate { 3230Sstevel@tonic-gate extern void set_kcontextreg(); 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate if (kcontextreg) 3260Sstevel@tonic-gate set_kcontextreg(); 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate void 3300Sstevel@tonic-gate contig_mem_init(void) 3310Sstevel@tonic-gate { 3320Sstevel@tonic-gate /* not applicable to sun4u */ 3330Sstevel@tonic-gate } 3343177Sdp78419 335*4204Sha137994 /*ARGSUSED*/ 336*4204Sha137994 caddr_t 337*4204Sha137994 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages) 338*4204Sha137994 { 339*4204Sha137994 /* not applicable to sun4u */ 340*4204Sha137994 return (alloc_base); 341*4204Sha137994 } 342*4204Sha137994 3433177Sdp78419 size_t 3443177Sdp78419 exec_get_spslew(void) 3453177Sdp78419 { 3463177Sdp78419 return (0); 3473177Sdp78419 } 348