xref: /onnv-gate/usr/src/uts/sun4u/vm/mach_vm_dep.c (revision 5668:7066e93e6b89)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52414Saguzovsk  * Common Development and Distribution License (the "License").
62414Saguzovsk  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223764Sdp78419  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
270Sstevel@tonic-gate /*	All Rights Reserved   */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley 4.3 BSD
310Sstevel@tonic-gate  * under license from the Regents of the University of California.
320Sstevel@tonic-gate  */
330Sstevel@tonic-gate 
340Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
350Sstevel@tonic-gate 
360Sstevel@tonic-gate /*
370Sstevel@tonic-gate  * UNIX machine dependent virtual memory support.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <sys/vm.h>
410Sstevel@tonic-gate #include <sys/exec.h>
420Sstevel@tonic-gate #include <sys/cmn_err.h>
430Sstevel@tonic-gate #include <sys/cpu_module.h>
440Sstevel@tonic-gate #include <sys/cpu.h>
450Sstevel@tonic-gate #include <sys/elf_SPARC.h>
460Sstevel@tonic-gate #include <sys/archsystm.h>
470Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
480Sstevel@tonic-gate #include <sys/memnode.h>
490Sstevel@tonic-gate #include <sys/mem_cage.h>
500Sstevel@tonic-gate #include <vm/vm_dep.h>
510Sstevel@tonic-gate 
520Sstevel@tonic-gate #if defined(__sparcv9) && defined(SF_ERRATA_57)
530Sstevel@tonic-gate caddr_t errata57_limit;
540Sstevel@tonic-gate #endif
550Sstevel@tonic-gate 
560Sstevel@tonic-gate uint_t page_colors = 0;
570Sstevel@tonic-gate uint_t page_colors_mask = 0;
580Sstevel@tonic-gate uint_t page_coloring_shift = 0;
590Sstevel@tonic-gate int consistent_coloring;
604266Sdp78419 int update_proc_pgcolorbase_after_fork = 0;
610Sstevel@tonic-gate 
620Sstevel@tonic-gate uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES;
630Sstevel@tonic-gate uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
640Sstevel@tonic-gate uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT;
650Sstevel@tonic-gate uint_t max_mmu_hashcnt = MAX_HASHCNT;
660Sstevel@tonic-gate size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
670Sstevel@tonic-gate 
680Sstevel@tonic-gate /*
690Sstevel@tonic-gate  * The sun4u hardware mapping sizes which will always be supported are
700Sstevel@tonic-gate  * 8K, 64K, 512K and 4M.  If sun4u based machines need to support other
710Sstevel@tonic-gate  * page sizes, platform or cpu specific routines need to modify the value.
720Sstevel@tonic-gate  * The base pagesize (p_szc == 0) must always be supported by the hardware.
730Sstevel@tonic-gate  */
740Sstevel@tonic-gate int mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) |
750Sstevel@tonic-gate 	(1 << TTE512K) | (1 << TTE4M);
760Sstevel@tonic-gate uint_t mmu_exported_page_sizes;
770Sstevel@tonic-gate 
780Sstevel@tonic-gate uint_t szc_2_userszc[MMU_PAGE_SIZES];
790Sstevel@tonic-gate uint_t userszc_2_szc[MMU_PAGE_SIZES];
800Sstevel@tonic-gate 
810Sstevel@tonic-gate extern uint_t vac_colors_mask;
820Sstevel@tonic-gate extern int vac_shift;
830Sstevel@tonic-gate 
840Sstevel@tonic-gate hw_pagesize_t hw_page_array[] = {
852961Sdp78419 	{MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
862961Sdp78419 	{MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
872961Sdp78419 	    MMU_PAGESIZE64K >> MMU_PAGESHIFT},
882961Sdp78419 	{MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
890Sstevel@tonic-gate 	    MMU_PAGESIZE512K >> MMU_PAGESHIFT},
902961Sdp78419 	{MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
912961Sdp78419 	{MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
922961Sdp78419 	    MMU_PAGESIZE32M >> MMU_PAGESHIFT},
932961Sdp78419 	{MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
940Sstevel@tonic-gate 	    MMU_PAGESIZE256M >> MMU_PAGESHIFT},
952961Sdp78419 	{0, 0, 0, 0}
960Sstevel@tonic-gate };
970Sstevel@tonic-gate 
980Sstevel@tonic-gate /*
993764Sdp78419  * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
1003764Sdp78419  */
1013764Sdp78419 int	max_bootlp_tteszc = TTE4M;
1023764Sdp78419 
1033764Sdp78419 /*
1042991Ssusans  * use_text_pgsz64k and use_text_pgsz512k allow the user to turn on these
1052991Ssusans  * additional text page sizes for USIII-IV+ and OPL by changing the default
1062991Ssusans  * values via /etc/system.
1070Sstevel@tonic-gate  */
1082991Ssusans int	use_text_pgsz64K = 0;
1092991Ssusans int	use_text_pgsz512K = 0;
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate /*
1122991Ssusans  * Maximum and default segment size tunables for user heap, stack, private
1132991Ssusans  * and shared anonymous memory, and user text and initialized data.
1140Sstevel@tonic-gate  */
1152991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE4M;
1162991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE;
1172991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE4M;
1182991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE;
1192991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE4M;
1202991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE;
1212991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE4M;
1222991Ssusans size_t max_shm_lpsize = MMU_PAGESIZE4M;
1232414Saguzovsk 
1242991Ssusans void
1252991Ssusans adjust_data_maxlpsize(size_t ismpagesize)
1262991Ssusans {
1272991Ssusans 	if (max_uheap_lpsize == MMU_PAGESIZE4M) {
1282991Ssusans 		max_uheap_lpsize = ismpagesize;
1292991Ssusans 	}
1302991Ssusans 	if (max_ustack_lpsize == MMU_PAGESIZE4M) {
1312991Ssusans 		max_ustack_lpsize = ismpagesize;
1322991Ssusans 	}
1332991Ssusans 	if (max_privmap_lpsize == MMU_PAGESIZE4M) {
1342991Ssusans 		max_privmap_lpsize = ismpagesize;
1352991Ssusans 	}
1362991Ssusans 	if (max_shm_lpsize == MMU_PAGESIZE4M) {
1372991Ssusans 		max_shm_lpsize = ismpagesize;
1382991Ssusans 	}
1392991Ssusans }
1402659Ssusans 
1412659Ssusans /*
1420Sstevel@tonic-gate  * map_addr_proc() is the routine called when the system is to
1430Sstevel@tonic-gate  * choose an address for the user.  We will pick an address
1440Sstevel@tonic-gate  * range which is just below the current stack limit.  The
1450Sstevel@tonic-gate  * algorithm used for cache consistency on machines with virtual
1460Sstevel@tonic-gate  * address caches is such that offset 0 in the vnode is always
1470Sstevel@tonic-gate  * on a shm_alignment'ed aligned address.  Unfortunately, this
1480Sstevel@tonic-gate  * means that vnodes which are demand paged will not be mapped
1490Sstevel@tonic-gate  * cache consistently with the executable images.  When the
1500Sstevel@tonic-gate  * cache alignment for a given object is inconsistent, the
1510Sstevel@tonic-gate  * lower level code must manage the translations so that this
1520Sstevel@tonic-gate  * is not seen here (at the cost of efficiency, of course).
1530Sstevel@tonic-gate  *
154*5668Smec  * Every mapping will have a redzone of a single page on either side of
155*5668Smec  * the request. This is done to leave one page unmapped between segments.
156*5668Smec  * This is not required, but it's useful for the user because if their
157*5668Smec  * program strays across a segment boundary, it will catch a fault
158*5668Smec  * immediately making debugging a little easier.  Currently the redzone
159*5668Smec  * is mandatory.
160*5668Smec  *
161*5668Smec  *
1620Sstevel@tonic-gate  * addrp is a value/result parameter.
1630Sstevel@tonic-gate  *	On input it is a hint from the user to be used in a completely
1640Sstevel@tonic-gate  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
165*5668Smec  *	minimal alignment, which must be some "power of two" multiple of
166*5668Smec  *	pagesize.
1670Sstevel@tonic-gate  *
1680Sstevel@tonic-gate  *	On output it is NULL if no address can be found in the current
1690Sstevel@tonic-gate  *	processes address space or else an address that is currently
1700Sstevel@tonic-gate  *	not mapped for len bytes with a page of red zone on either side.
1710Sstevel@tonic-gate  *	If vacalign is true, then the selected address will obey the alignment
1720Sstevel@tonic-gate  *	constraints of a vac machine based on the given off value.
1730Sstevel@tonic-gate  */
1740Sstevel@tonic-gate /*ARGSUSED4*/
1750Sstevel@tonic-gate void
1760Sstevel@tonic-gate map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
1770Sstevel@tonic-gate     caddr_t userlimit, struct proc *p, uint_t flags)
1780Sstevel@tonic-gate {
1790Sstevel@tonic-gate 	struct as *as = p->p_as;
1800Sstevel@tonic-gate 	caddr_t addr;
1810Sstevel@tonic-gate 	caddr_t base;
1820Sstevel@tonic-gate 	size_t slen;
1830Sstevel@tonic-gate 	uintptr_t align_amount;
1840Sstevel@tonic-gate 	int allow_largepage_alignment = 1;
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate 	base = p->p_brkbase;
1870Sstevel@tonic-gate 	if (userlimit < as->a_userlimit) {
1880Sstevel@tonic-gate 		/*
1890Sstevel@tonic-gate 		 * This happens when a program wants to map something in
1900Sstevel@tonic-gate 		 * a range that's accessible to a program in a smaller
1910Sstevel@tonic-gate 		 * address space.  For example, a 64-bit program might
1920Sstevel@tonic-gate 		 * be calling mmap32(2) to guarantee that the returned
1930Sstevel@tonic-gate 		 * address is below 4Gbytes.
1940Sstevel@tonic-gate 		 */
1950Sstevel@tonic-gate 		ASSERT(userlimit > base);
1960Sstevel@tonic-gate 		slen = userlimit - base;
1970Sstevel@tonic-gate 	} else {
1980Sstevel@tonic-gate 		slen = p->p_usrstack - base - (((size_t)rctl_enforced_value(
1990Sstevel@tonic-gate 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
2000Sstevel@tonic-gate 		    & PAGEMASK);
2010Sstevel@tonic-gate 	}
202*5668Smec 
203*5668Smec 	/* Make len be a multiple of PAGESIZE */
2040Sstevel@tonic-gate 	len = (len + PAGEOFFSET) & PAGEMASK;
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	/*
2070Sstevel@tonic-gate 	 *  If the request is larger than the size of a particular
2080Sstevel@tonic-gate 	 *  mmu level, then we use that level to map the request.
2090Sstevel@tonic-gate 	 *  But this requires that both the virtual and the physical
2100Sstevel@tonic-gate 	 *  addresses be aligned with respect to that level, so we
2110Sstevel@tonic-gate 	 *  do the virtual bit of nastiness here.
2120Sstevel@tonic-gate 	 *
2130Sstevel@tonic-gate 	 *  For 32-bit processes, only those which have specified
2140Sstevel@tonic-gate 	 *  MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
2150Sstevel@tonic-gate 	 *  we can potentially waste up to 256MB of the 4G process address
2160Sstevel@tonic-gate 	 *  space just for alignment.
2170Sstevel@tonic-gate 	 */
2180Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
2190Sstevel@tonic-gate 	    ((uintptr_t)*addrp) != 0)) {
2200Sstevel@tonic-gate 		allow_largepage_alignment = 0;
2210Sstevel@tonic-gate 	}
2220Sstevel@tonic-gate 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
2230Sstevel@tonic-gate 	    allow_largepage_alignment &&
224*5668Smec 	    (len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
2250Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE256M;
2260Sstevel@tonic-gate 	} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
2270Sstevel@tonic-gate 	    allow_largepage_alignment &&
228*5668Smec 	    (len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
2290Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE32M;
2300Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE4M) {  /* 4MB mappings */
2310Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE4M;
2320Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
2330Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE512K;
2340Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
2350Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE64K;
2360Sstevel@tonic-gate 	} else  {
2370Sstevel@tonic-gate 		/*
2380Sstevel@tonic-gate 		 * Align virtual addresses on a 64K boundary to ensure
2390Sstevel@tonic-gate 		 * that ELF shared libraries are mapped with the appropriate
2400Sstevel@tonic-gate 		 * alignment constraints by the run-time linker.
2410Sstevel@tonic-gate 		 */
2420Sstevel@tonic-gate 		align_amount = ELF_SPARC_MAXPGSZ;
2430Sstevel@tonic-gate 		if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
244*5668Smec 		    ((uintptr_t)*addrp < align_amount))
2450Sstevel@tonic-gate 			align_amount = (uintptr_t)*addrp;
2460Sstevel@tonic-gate 	}
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate 	/*
2490Sstevel@tonic-gate 	 * 64-bit processes require 1024K alignment of ELF shared libraries.
2500Sstevel@tonic-gate 	 */
2510Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_LP64)
2520Sstevel@tonic-gate 		align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
2530Sstevel@tonic-gate #ifdef VAC
2540Sstevel@tonic-gate 	if (vac && vacalign && (align_amount < shm_alignment))
2550Sstevel@tonic-gate 		align_amount = shm_alignment;
2560Sstevel@tonic-gate #endif
2570Sstevel@tonic-gate 
2580Sstevel@tonic-gate 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
2590Sstevel@tonic-gate 		align_amount = (uintptr_t)*addrp;
2600Sstevel@tonic-gate 	}
261*5668Smec 
262*5668Smec 	ASSERT(ISP2(align_amount));
263*5668Smec 	ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
2640Sstevel@tonic-gate 
2650Sstevel@tonic-gate 	/*
2660Sstevel@tonic-gate 	 * Look for a large enough hole starting below the stack limit.
267*5668Smec 	 * After finding it, use the upper part.
2680Sstevel@tonic-gate 	 */
2690Sstevel@tonic-gate 	as_purge(as);
270*5668Smec 	off = off & (align_amount - 1);
271*5668Smec 	if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
272*5668Smec 	    PAGESIZE, off) == 0) {
2730Sstevel@tonic-gate 		caddr_t as_addr;
2740Sstevel@tonic-gate 
275*5668Smec 		/*
276*5668Smec 		 * addr is the highest possible address to use since we have
277*5668Smec 		 * a PAGESIZE redzone at the beginning and end.
278*5668Smec 		 */
279*5668Smec 		addr = base + slen - (PAGESIZE + len);
2800Sstevel@tonic-gate 		as_addr = addr;
2810Sstevel@tonic-gate 		/*
282*5668Smec 		 * Round address DOWN to the alignment amount and
283*5668Smec 		 * add the offset in.
284*5668Smec 		 * If addr is greater than as_addr, len would not be large
285*5668Smec 		 * enough to include the redzone, so we must adjust down
286*5668Smec 		 * by the alignment amount.
2870Sstevel@tonic-gate 		 */
2880Sstevel@tonic-gate 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
289*5668Smec 		addr += (long)off;
290*5668Smec 		if (addr > as_addr) {
291*5668Smec 			addr -= align_amount;
2920Sstevel@tonic-gate 		}
2930Sstevel@tonic-gate 
294*5668Smec 		ASSERT(addr > base);
295*5668Smec 		ASSERT(addr + len < base + slen);
2960Sstevel@tonic-gate 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
297*5668Smec 		    ((uintptr_t)(off)));
2980Sstevel@tonic-gate 		*addrp = addr;
2990Sstevel@tonic-gate 
3000Sstevel@tonic-gate #if defined(SF_ERRATA_57)
3010Sstevel@tonic-gate 		if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
3020Sstevel@tonic-gate 			*addrp = NULL;
3030Sstevel@tonic-gate 		}
3040Sstevel@tonic-gate #endif
3050Sstevel@tonic-gate 	} else {
3060Sstevel@tonic-gate 		*addrp = NULL;	/* no more virtual space */
3070Sstevel@tonic-gate 	}
3080Sstevel@tonic-gate }
3090Sstevel@tonic-gate 
3100Sstevel@tonic-gate /*
3110Sstevel@tonic-gate  * Platform-dependent page scrub call.
3120Sstevel@tonic-gate  */
3130Sstevel@tonic-gate void
3140Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len)
3150Sstevel@tonic-gate {
3160Sstevel@tonic-gate 	/*
3170Sstevel@tonic-gate 	 * For now, we rely on the fact that pagezero() will
3180Sstevel@tonic-gate 	 * always clear UEs.
3190Sstevel@tonic-gate 	 */
3200Sstevel@tonic-gate 	pagezero(pp, off, len);
3210Sstevel@tonic-gate }
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate /*ARGSUSED*/
3240Sstevel@tonic-gate void
3250Sstevel@tonic-gate sync_data_memory(caddr_t va, size_t len)
3260Sstevel@tonic-gate {
3270Sstevel@tonic-gate 	cpu_flush_ecache();
3280Sstevel@tonic-gate }
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate /*
3310Sstevel@tonic-gate  * platform specific large pages for kernel heap support
3320Sstevel@tonic-gate  */
3330Sstevel@tonic-gate void
3340Sstevel@tonic-gate mmu_init_kcontext()
3350Sstevel@tonic-gate {
3360Sstevel@tonic-gate 	extern void set_kcontextreg();
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	if (kcontextreg)
3390Sstevel@tonic-gate 		set_kcontextreg();
3400Sstevel@tonic-gate }
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate void
3430Sstevel@tonic-gate contig_mem_init(void)
3440Sstevel@tonic-gate {
3450Sstevel@tonic-gate 	/* not applicable to sun4u */
3460Sstevel@tonic-gate }
3473177Sdp78419 
3484204Sha137994 /*ARGSUSED*/
3494204Sha137994 caddr_t
3504204Sha137994 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
3514204Sha137994 {
3524204Sha137994 	/* not applicable to sun4u */
3534204Sha137994 	return (alloc_base);
3544204Sha137994 }
3554204Sha137994 
3563177Sdp78419 size_t
3573177Sdp78419 exec_get_spslew(void)
3583177Sdp78419 {
3593177Sdp78419 	return (0);
3603177Sdp78419 }
361