10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52414Saguzovsk * Common Development and Distribution License (the "License").
62414Saguzovsk * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*8947SMichael.Corcoran@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
270Sstevel@tonic-gate /* All Rights Reserved */
280Sstevel@tonic-gate
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
310Sstevel@tonic-gate * under license from the Regents of the University of California.
320Sstevel@tonic-gate */
330Sstevel@tonic-gate
340Sstevel@tonic-gate /*
350Sstevel@tonic-gate * UNIX machine dependent virtual memory support.
360Sstevel@tonic-gate */
370Sstevel@tonic-gate
380Sstevel@tonic-gate #include <sys/vm.h>
390Sstevel@tonic-gate #include <sys/exec.h>
400Sstevel@tonic-gate #include <sys/cmn_err.h>
410Sstevel@tonic-gate #include <sys/cpu_module.h>
420Sstevel@tonic-gate #include <sys/cpu.h>
430Sstevel@tonic-gate #include <sys/elf_SPARC.h>
440Sstevel@tonic-gate #include <sys/archsystm.h>
450Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
460Sstevel@tonic-gate #include <sys/memnode.h>
470Sstevel@tonic-gate #include <sys/mem_cage.h>
480Sstevel@tonic-gate #include <vm/vm_dep.h>
490Sstevel@tonic-gate
500Sstevel@tonic-gate #if defined(__sparcv9) && defined(SF_ERRATA_57)
510Sstevel@tonic-gate caddr_t errata57_limit;
520Sstevel@tonic-gate #endif
530Sstevel@tonic-gate
540Sstevel@tonic-gate uint_t page_colors = 0;
550Sstevel@tonic-gate uint_t page_colors_mask = 0;
560Sstevel@tonic-gate uint_t page_coloring_shift = 0;
570Sstevel@tonic-gate int consistent_coloring;
584266Sdp78419 int update_proc_pgcolorbase_after_fork = 0;
590Sstevel@tonic-gate
600Sstevel@tonic-gate uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES;
610Sstevel@tonic-gate uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
620Sstevel@tonic-gate uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT;
630Sstevel@tonic-gate uint_t max_mmu_hashcnt = MAX_HASHCNT;
640Sstevel@tonic-gate size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
650Sstevel@tonic-gate
660Sstevel@tonic-gate /*
670Sstevel@tonic-gate * The sun4u hardware mapping sizes which will always be supported are
680Sstevel@tonic-gate * 8K, 64K, 512K and 4M. If sun4u based machines need to support other
690Sstevel@tonic-gate * page sizes, platform or cpu specific routines need to modify the value.
700Sstevel@tonic-gate * The base pagesize (p_szc == 0) must always be supported by the hardware.
710Sstevel@tonic-gate */
720Sstevel@tonic-gate int mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) |
730Sstevel@tonic-gate (1 << TTE512K) | (1 << TTE4M);
740Sstevel@tonic-gate uint_t mmu_exported_page_sizes;
750Sstevel@tonic-gate
760Sstevel@tonic-gate uint_t szc_2_userszc[MMU_PAGE_SIZES];
770Sstevel@tonic-gate uint_t userszc_2_szc[MMU_PAGE_SIZES];
780Sstevel@tonic-gate
790Sstevel@tonic-gate extern uint_t vac_colors_mask;
800Sstevel@tonic-gate extern int vac_shift;
810Sstevel@tonic-gate
820Sstevel@tonic-gate hw_pagesize_t hw_page_array[] = {
832961Sdp78419 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
842961Sdp78419 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
852961Sdp78419 MMU_PAGESIZE64K >> MMU_PAGESHIFT},
862961Sdp78419 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
870Sstevel@tonic-gate MMU_PAGESIZE512K >> MMU_PAGESHIFT},
882961Sdp78419 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
892961Sdp78419 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
902961Sdp78419 MMU_PAGESIZE32M >> MMU_PAGESHIFT},
912961Sdp78419 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
920Sstevel@tonic-gate MMU_PAGESIZE256M >> MMU_PAGESHIFT},
932961Sdp78419 {0, 0, 0, 0}
940Sstevel@tonic-gate };
950Sstevel@tonic-gate
960Sstevel@tonic-gate /*
973764Sdp78419 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
983764Sdp78419 */
993764Sdp78419 int max_bootlp_tteszc = TTE4M;
1003764Sdp78419
1013764Sdp78419 /*
1022991Ssusans * use_text_pgsz64k and use_text_pgsz512k allow the user to turn on these
1032991Ssusans * additional text page sizes for USIII-IV+ and OPL by changing the default
1042991Ssusans * values via /etc/system.
1050Sstevel@tonic-gate */
1062991Ssusans int use_text_pgsz64K = 0;
1072991Ssusans int use_text_pgsz512K = 0;
1080Sstevel@tonic-gate
1090Sstevel@tonic-gate /*
1102991Ssusans * Maximum and default segment size tunables for user heap, stack, private
1112991Ssusans * and shared anonymous memory, and user text and initialized data.
1120Sstevel@tonic-gate */
1132991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE4M;
1142991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE;
1152991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE4M;
1162991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE;
1172991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE4M;
1182991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE;
1192991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE4M;
1202991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE4M;
1212414Saguzovsk
1222991Ssusans void
adjust_data_maxlpsize(size_t ismpagesize)1232991Ssusans adjust_data_maxlpsize(size_t ismpagesize)
1242991Ssusans {
1252991Ssusans if (max_uheap_lpsize == MMU_PAGESIZE4M) {
1262991Ssusans max_uheap_lpsize = ismpagesize;
1272991Ssusans }
1282991Ssusans if (max_ustack_lpsize == MMU_PAGESIZE4M) {
1292991Ssusans max_ustack_lpsize = ismpagesize;
1302991Ssusans }
1312991Ssusans if (max_privmap_lpsize == MMU_PAGESIZE4M) {
1322991Ssusans max_privmap_lpsize = ismpagesize;
1332991Ssusans }
1342991Ssusans if (max_shm_lpsize == MMU_PAGESIZE4M) {
1352991Ssusans max_shm_lpsize = ismpagesize;
1362991Ssusans }
1372991Ssusans }
1382659Ssusans
1392659Ssusans /*
1400Sstevel@tonic-gate * map_addr_proc() is the routine called when the system is to
1410Sstevel@tonic-gate * choose an address for the user. We will pick an address
1420Sstevel@tonic-gate * range which is just below the current stack limit. The
1430Sstevel@tonic-gate * algorithm used for cache consistency on machines with virtual
1440Sstevel@tonic-gate * address caches is such that offset 0 in the vnode is always
1450Sstevel@tonic-gate * on a shm_alignment'ed aligned address. Unfortunately, this
1460Sstevel@tonic-gate * means that vnodes which are demand paged will not be mapped
1470Sstevel@tonic-gate * cache consistently with the executable images. When the
1480Sstevel@tonic-gate * cache alignment for a given object is inconsistent, the
1490Sstevel@tonic-gate * lower level code must manage the translations so that this
1500Sstevel@tonic-gate * is not seen here (at the cost of efficiency, of course).
1510Sstevel@tonic-gate *
1525668Smec * Every mapping will have a redzone of a single page on either side of
1535668Smec * the request. This is done to leave one page unmapped between segments.
1545668Smec * This is not required, but it's useful for the user because if their
1555668Smec * program strays across a segment boundary, it will catch a fault
1565668Smec * immediately making debugging a little easier. Currently the redzone
1575668Smec * is mandatory.
1585668Smec *
1595668Smec *
1600Sstevel@tonic-gate * addrp is a value/result parameter.
1610Sstevel@tonic-gate * On input it is a hint from the user to be used in a completely
1620Sstevel@tonic-gate * machine dependent fashion. For MAP_ALIGN, addrp contains the
1635668Smec * minimal alignment, which must be some "power of two" multiple of
1645668Smec * pagesize.
1650Sstevel@tonic-gate *
1660Sstevel@tonic-gate * On output it is NULL if no address can be found in the current
1670Sstevel@tonic-gate * processes address space or else an address that is currently
1680Sstevel@tonic-gate * not mapped for len bytes with a page of red zone on either side.
1690Sstevel@tonic-gate * If vacalign is true, then the selected address will obey the alignment
1700Sstevel@tonic-gate * constraints of a vac machine based on the given off value.
1710Sstevel@tonic-gate */
1720Sstevel@tonic-gate /*ARGSUSED4*/
1730Sstevel@tonic-gate void
map_addr_proc(caddr_t * addrp,size_t len,offset_t off,int vacalign,caddr_t userlimit,struct proc * p,uint_t flags)1740Sstevel@tonic-gate map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
1750Sstevel@tonic-gate caddr_t userlimit, struct proc *p, uint_t flags)
1760Sstevel@tonic-gate {
1770Sstevel@tonic-gate struct as *as = p->p_as;
1780Sstevel@tonic-gate caddr_t addr;
1790Sstevel@tonic-gate caddr_t base;
1800Sstevel@tonic-gate size_t slen;
1810Sstevel@tonic-gate uintptr_t align_amount;
1820Sstevel@tonic-gate int allow_largepage_alignment = 1;
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate base = p->p_brkbase;
1850Sstevel@tonic-gate if (userlimit < as->a_userlimit) {
1860Sstevel@tonic-gate /*
1870Sstevel@tonic-gate * This happens when a program wants to map something in
1880Sstevel@tonic-gate * a range that's accessible to a program in a smaller
1890Sstevel@tonic-gate * address space. For example, a 64-bit program might
1900Sstevel@tonic-gate * be calling mmap32(2) to guarantee that the returned
1910Sstevel@tonic-gate * address is below 4Gbytes.
1920Sstevel@tonic-gate */
1930Sstevel@tonic-gate ASSERT(userlimit > base);
1940Sstevel@tonic-gate slen = userlimit - base;
1950Sstevel@tonic-gate } else {
196*8947SMichael.Corcoran@Sun.COM slen = p->p_usrstack - base -
197*8947SMichael.Corcoran@Sun.COM ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
1980Sstevel@tonic-gate }
1995668Smec
2005668Smec /* Make len be a multiple of PAGESIZE */
2010Sstevel@tonic-gate len = (len + PAGEOFFSET) & PAGEMASK;
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate /*
2040Sstevel@tonic-gate * If the request is larger than the size of a particular
2050Sstevel@tonic-gate * mmu level, then we use that level to map the request.
2060Sstevel@tonic-gate * But this requires that both the virtual and the physical
2070Sstevel@tonic-gate * addresses be aligned with respect to that level, so we
2080Sstevel@tonic-gate * do the virtual bit of nastiness here.
2090Sstevel@tonic-gate *
2100Sstevel@tonic-gate * For 32-bit processes, only those which have specified
2110Sstevel@tonic-gate * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
2120Sstevel@tonic-gate * we can potentially waste up to 256MB of the 4G process address
2130Sstevel@tonic-gate * space just for alignment.
2140Sstevel@tonic-gate */
2150Sstevel@tonic-gate if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
2160Sstevel@tonic-gate ((uintptr_t)*addrp) != 0)) {
2170Sstevel@tonic-gate allow_largepage_alignment = 0;
2180Sstevel@tonic-gate }
2190Sstevel@tonic-gate if ((mmu_page_sizes == max_mmu_page_sizes) &&
2200Sstevel@tonic-gate allow_largepage_alignment &&
2215668Smec (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
2220Sstevel@tonic-gate align_amount = MMU_PAGESIZE256M;
2230Sstevel@tonic-gate } else if ((mmu_page_sizes == max_mmu_page_sizes) &&
2240Sstevel@tonic-gate allow_largepage_alignment &&
2255668Smec (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
2260Sstevel@tonic-gate align_amount = MMU_PAGESIZE32M;
2270Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */
2280Sstevel@tonic-gate align_amount = MMU_PAGESIZE4M;
2290Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
2300Sstevel@tonic-gate align_amount = MMU_PAGESIZE512K;
2310Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
2320Sstevel@tonic-gate align_amount = MMU_PAGESIZE64K;
2330Sstevel@tonic-gate } else {
2340Sstevel@tonic-gate /*
2350Sstevel@tonic-gate * Align virtual addresses on a 64K boundary to ensure
2360Sstevel@tonic-gate * that ELF shared libraries are mapped with the appropriate
2370Sstevel@tonic-gate * alignment constraints by the run-time linker.
2380Sstevel@tonic-gate */
2390Sstevel@tonic-gate align_amount = ELF_SPARC_MAXPGSZ;
2400Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
2415668Smec ((uintptr_t)*addrp < align_amount))
2420Sstevel@tonic-gate align_amount = (uintptr_t)*addrp;
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate * 64-bit processes require 1024K alignment of ELF shared libraries.
2470Sstevel@tonic-gate */
2480Sstevel@tonic-gate if (p->p_model == DATAMODEL_LP64)
2490Sstevel@tonic-gate align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
2500Sstevel@tonic-gate #ifdef VAC
2510Sstevel@tonic-gate if (vac && vacalign && (align_amount < shm_alignment))
2520Sstevel@tonic-gate align_amount = shm_alignment;
2530Sstevel@tonic-gate #endif
2540Sstevel@tonic-gate
2550Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
2560Sstevel@tonic-gate align_amount = (uintptr_t)*addrp;
2570Sstevel@tonic-gate }
2585668Smec
2595668Smec ASSERT(ISP2(align_amount));
2605668Smec ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
2610Sstevel@tonic-gate
2620Sstevel@tonic-gate /*
2630Sstevel@tonic-gate * Look for a large enough hole starting below the stack limit.
2645668Smec * After finding it, use the upper part.
2650Sstevel@tonic-gate */
2660Sstevel@tonic-gate as_purge(as);
2675668Smec off = off & (align_amount - 1);
2685668Smec if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
2695668Smec PAGESIZE, off) == 0) {
2700Sstevel@tonic-gate caddr_t as_addr;
2710Sstevel@tonic-gate
2725668Smec /*
2735668Smec * addr is the highest possible address to use since we have
2745668Smec * a PAGESIZE redzone at the beginning and end.
2755668Smec */
2765668Smec addr = base + slen - (PAGESIZE + len);
2770Sstevel@tonic-gate as_addr = addr;
2780Sstevel@tonic-gate /*
2795668Smec * Round address DOWN to the alignment amount and
2805668Smec * add the offset in.
2815668Smec * If addr is greater than as_addr, len would not be large
2825668Smec * enough to include the redzone, so we must adjust down
2835668Smec * by the alignment amount.
2840Sstevel@tonic-gate */
2850Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
2865668Smec addr += (long)off;
2875668Smec if (addr > as_addr) {
2885668Smec addr -= align_amount;
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate
2915668Smec ASSERT(addr > base);
2925668Smec ASSERT(addr + len < base + slen);
2930Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
2945668Smec ((uintptr_t)(off)));
2950Sstevel@tonic-gate *addrp = addr;
2960Sstevel@tonic-gate
2970Sstevel@tonic-gate #if defined(SF_ERRATA_57)
2980Sstevel@tonic-gate if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
2990Sstevel@tonic-gate *addrp = NULL;
3000Sstevel@tonic-gate }
3010Sstevel@tonic-gate #endif
3020Sstevel@tonic-gate } else {
3030Sstevel@tonic-gate *addrp = NULL; /* no more virtual space */
3040Sstevel@tonic-gate }
3050Sstevel@tonic-gate }
3060Sstevel@tonic-gate
3070Sstevel@tonic-gate /*
3080Sstevel@tonic-gate * Platform-dependent page scrub call.
3090Sstevel@tonic-gate */
3100Sstevel@tonic-gate void
pagescrub(page_t * pp,uint_t off,uint_t len)3110Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len)
3120Sstevel@tonic-gate {
3130Sstevel@tonic-gate /*
3140Sstevel@tonic-gate * For now, we rely on the fact that pagezero() will
3150Sstevel@tonic-gate * always clear UEs.
3160Sstevel@tonic-gate */
3170Sstevel@tonic-gate pagezero(pp, off, len);
3180Sstevel@tonic-gate }
3190Sstevel@tonic-gate
3200Sstevel@tonic-gate /*ARGSUSED*/
3210Sstevel@tonic-gate void
sync_data_memory(caddr_t va,size_t len)3220Sstevel@tonic-gate sync_data_memory(caddr_t va, size_t len)
3230Sstevel@tonic-gate {
3240Sstevel@tonic-gate cpu_flush_ecache();
3250Sstevel@tonic-gate }
3260Sstevel@tonic-gate
3270Sstevel@tonic-gate /*
3280Sstevel@tonic-gate * platform specific large pages for kernel heap support
3290Sstevel@tonic-gate */
3300Sstevel@tonic-gate void
mmu_init_kcontext()3310Sstevel@tonic-gate mmu_init_kcontext()
3320Sstevel@tonic-gate {
3330Sstevel@tonic-gate extern void set_kcontextreg();
3340Sstevel@tonic-gate
3350Sstevel@tonic-gate if (kcontextreg)
3360Sstevel@tonic-gate set_kcontextreg();
3370Sstevel@tonic-gate }
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate void
contig_mem_init(void)3400Sstevel@tonic-gate contig_mem_init(void)
3410Sstevel@tonic-gate {
3420Sstevel@tonic-gate /* not applicable to sun4u */
3430Sstevel@tonic-gate }
3443177Sdp78419
3454204Sha137994 /*ARGSUSED*/
3464204Sha137994 caddr_t
contig_mem_prealloc(caddr_t alloc_base,pgcnt_t npages)3474204Sha137994 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
3484204Sha137994 {
3494204Sha137994 /* not applicable to sun4u */
3504204Sha137994 return (alloc_base);
3514204Sha137994 }
3524204Sha137994
3533177Sdp78419 size_t
exec_get_spslew(void)3543177Sdp78419 exec_get_spslew(void)
3553177Sdp78419 {
3563177Sdp78419 return (0);
3573177Sdp78419 }
358