xref: /onnv-gate/usr/src/uts/sun4/vm/vm_dep.c (revision 5668:7066e93e6b89)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52251Selowe  * Common Development and Distribution License (the "License").
62251Selowe  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223717Sdp78419  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate  * UNIX machine dependent virtual memory support.
300Sstevel@tonic-gate  */
310Sstevel@tonic-gate 
320Sstevel@tonic-gate #include <sys/vm.h>
330Sstevel@tonic-gate #include <sys/exec.h>
340Sstevel@tonic-gate 
350Sstevel@tonic-gate #include <sys/exechdr.h>
360Sstevel@tonic-gate #include <vm/seg_kmem.h>
370Sstevel@tonic-gate #include <sys/atomic.h>
380Sstevel@tonic-gate #include <sys/archsystm.h>
390Sstevel@tonic-gate #include <sys/machsystm.h>
400Sstevel@tonic-gate #include <sys/kdi.h>
410Sstevel@tonic-gate #include <sys/cpu_module.h>
420Sstevel@tonic-gate 
430Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
440Sstevel@tonic-gate 
450Sstevel@tonic-gate #include <sys/memnode.h>
460Sstevel@tonic-gate 
470Sstevel@tonic-gate #include <sys/mem_config.h>
480Sstevel@tonic-gate #include <sys/mem_cage.h>
490Sstevel@tonic-gate #include <vm/vm_dep.h>
502961Sdp78419 #include <vm/page.h>
510Sstevel@tonic-gate #include <sys/platform_module.h>
520Sstevel@tonic-gate 
530Sstevel@tonic-gate /*
540Sstevel@tonic-gate  * These variables are set by module specific config routines.
554528Spaulsan  * They are only set by modules which will use physical cache page coloring.
560Sstevel@tonic-gate  */
570Sstevel@tonic-gate int do_pg_coloring = 0;
580Sstevel@tonic-gate 
590Sstevel@tonic-gate /*
600Sstevel@tonic-gate  * These variables can be conveniently patched at kernel load time to
614528Spaulsan  * prevent do_pg_coloring from being enabled by
620Sstevel@tonic-gate  * module specific config routines.
630Sstevel@tonic-gate  */
640Sstevel@tonic-gate 
650Sstevel@tonic-gate int use_page_coloring = 1;
660Sstevel@tonic-gate 
670Sstevel@tonic-gate /*
680Sstevel@tonic-gate  * initialized by page_coloring_init()
690Sstevel@tonic-gate  */
700Sstevel@tonic-gate extern uint_t page_colors;
710Sstevel@tonic-gate extern uint_t page_colors_mask;
720Sstevel@tonic-gate extern uint_t page_coloring_shift;
730Sstevel@tonic-gate int cpu_page_colors;
740Sstevel@tonic-gate uint_t vac_colors = 0;
750Sstevel@tonic-gate uint_t vac_colors_mask = 0;
760Sstevel@tonic-gate 
772961Sdp78419 /* cpu specific coloring initialization */
782961Sdp78419 extern void page_coloring_init_cpu();
792961Sdp78419 #pragma weak page_coloring_init_cpu
802961Sdp78419 
810Sstevel@tonic-gate /*
820Sstevel@tonic-gate  * get the ecache setsize for the current cpu.
830Sstevel@tonic-gate  */
840Sstevel@tonic-gate #define	CPUSETSIZE()	(cpunodes[CPU->cpu_id].ecache_setsize)
850Sstevel@tonic-gate 
860Sstevel@tonic-gate plcnt_t		plcnt;		/* page list count */
870Sstevel@tonic-gate 
880Sstevel@tonic-gate /*
890Sstevel@tonic-gate  * This variable is set by the cpu module to contain the lowest
900Sstevel@tonic-gate  * address not affected by the SF_ERRATA_57 workaround.  It should
910Sstevel@tonic-gate  * remain 0 if the workaround is not needed.
920Sstevel@tonic-gate  */
930Sstevel@tonic-gate #if defined(SF_ERRATA_57)
940Sstevel@tonic-gate caddr_t errata57_limit;
950Sstevel@tonic-gate #endif
960Sstevel@tonic-gate 
970Sstevel@tonic-gate extern void page_relocate_hash(page_t *, page_t *);
980Sstevel@tonic-gate 
990Sstevel@tonic-gate /*
1000Sstevel@tonic-gate  * these must be defined in platform specific areas
1010Sstevel@tonic-gate  */
1020Sstevel@tonic-gate extern void map_addr_proc(caddr_t *, size_t, offset_t, int, caddr_t,
1030Sstevel@tonic-gate 	struct proc *, uint_t);
1040Sstevel@tonic-gate extern page_t *page_get_freelist(struct vnode *, u_offset_t, struct seg *,
1050Sstevel@tonic-gate 	caddr_t, size_t, uint_t, struct lgrp *);
1060Sstevel@tonic-gate /*
1070Sstevel@tonic-gate  * Convert page frame number to an OBMEM page frame number
1080Sstevel@tonic-gate  * (i.e. put in the type bits -- zero for this implementation)
1090Sstevel@tonic-gate  */
1100Sstevel@tonic-gate pfn_t
1110Sstevel@tonic-gate impl_obmem_pfnum(pfn_t pf)
1120Sstevel@tonic-gate {
1130Sstevel@tonic-gate 	return (pf);
1140Sstevel@tonic-gate }
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate /*
1170Sstevel@tonic-gate  * Use physmax to determine the highest physical page of DRAM memory
1180Sstevel@tonic-gate  * It is assumed that any physical addresses above physmax is in IO space.
1190Sstevel@tonic-gate  * We don't bother checking the low end because we assume that memory space
1200Sstevel@tonic-gate  * begins at physical page frame 0.
1210Sstevel@tonic-gate  *
1220Sstevel@tonic-gate  * Return 1 if the page frame is onboard DRAM memory, else 0.
1230Sstevel@tonic-gate  * Returns 0 for nvram so it won't be cached.
1240Sstevel@tonic-gate  */
1250Sstevel@tonic-gate int
1260Sstevel@tonic-gate pf_is_memory(pfn_t pf)
1270Sstevel@tonic-gate {
1280Sstevel@tonic-gate 	/* We must be IO space */
1290Sstevel@tonic-gate 	if (pf > physmax)
1300Sstevel@tonic-gate 		return (0);
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate 	/* We must be memory space */
1330Sstevel@tonic-gate 	return (1);
1340Sstevel@tonic-gate }
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate /*
1370Sstevel@tonic-gate  * Handle a pagefault.
1380Sstevel@tonic-gate  */
1390Sstevel@tonic-gate faultcode_t
1400Sstevel@tonic-gate pagefault(caddr_t addr, enum fault_type type, enum seg_rw rw, int iskernel)
1410Sstevel@tonic-gate {
1420Sstevel@tonic-gate 	struct as *as;
1430Sstevel@tonic-gate 	struct proc *p;
1440Sstevel@tonic-gate 	faultcode_t res;
1450Sstevel@tonic-gate 	caddr_t base;
1460Sstevel@tonic-gate 	size_t len;
1470Sstevel@tonic-gate 	int err;
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate 	if (INVALID_VADDR(addr))
1500Sstevel@tonic-gate 		return (FC_NOMAP);
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate 	if (iskernel) {
1530Sstevel@tonic-gate 		as = &kas;
1540Sstevel@tonic-gate 	} else {
1550Sstevel@tonic-gate 		p = curproc;
1560Sstevel@tonic-gate 		as = p->p_as;
1570Sstevel@tonic-gate #if defined(SF_ERRATA_57)
1580Sstevel@tonic-gate 		/*
1590Sstevel@tonic-gate 		 * Prevent infinite loops due to a segment driver
1600Sstevel@tonic-gate 		 * setting the execute permissions and the sfmmu hat
1610Sstevel@tonic-gate 		 * silently ignoring them.
1620Sstevel@tonic-gate 		 */
1630Sstevel@tonic-gate 		if (rw == S_EXEC && AS_TYPE_64BIT(as) &&
1640Sstevel@tonic-gate 		    addr < errata57_limit) {
1650Sstevel@tonic-gate 			res = FC_NOMAP;
1660Sstevel@tonic-gate 			goto out;
1670Sstevel@tonic-gate 		}
1680Sstevel@tonic-gate #endif
1690Sstevel@tonic-gate 	}
1700Sstevel@tonic-gate 
1710Sstevel@tonic-gate 	/*
1720Sstevel@tonic-gate 	 * Dispatch pagefault.
1730Sstevel@tonic-gate 	 */
1740Sstevel@tonic-gate 	res = as_fault(as->a_hat, as, addr, 1, type, rw);
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	/*
1770Sstevel@tonic-gate 	 * If this isn't a potential unmapped hole in the user's
1780Sstevel@tonic-gate 	 * UNIX data or stack segments, just return status info.
1790Sstevel@tonic-gate 	 */
1800Sstevel@tonic-gate 	if (!(res == FC_NOMAP && iskernel == 0))
1810Sstevel@tonic-gate 		goto out;
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate 	/*
1840Sstevel@tonic-gate 	 * Check to see if we happened to faulted on a currently unmapped
1850Sstevel@tonic-gate 	 * part of the UNIX data or stack segments.  If so, create a zfod
1860Sstevel@tonic-gate 	 * mapping there and then try calling the fault routine again.
1870Sstevel@tonic-gate 	 */
1880Sstevel@tonic-gate 	base = p->p_brkbase;
1890Sstevel@tonic-gate 	len = p->p_brksize;
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate 	if (addr < base || addr >= base + len) {		/* data seg? */
1920Sstevel@tonic-gate 		base = (caddr_t)(p->p_usrstack - p->p_stksize);
1930Sstevel@tonic-gate 		len = p->p_stksize;
1940Sstevel@tonic-gate 		if (addr < base || addr >= p->p_usrstack) {	/* stack seg? */
1950Sstevel@tonic-gate 			/* not in either UNIX data or stack segments */
1960Sstevel@tonic-gate 			res = FC_NOMAP;
1970Sstevel@tonic-gate 			goto out;
1980Sstevel@tonic-gate 		}
1990Sstevel@tonic-gate 	}
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate 	/* the rest of this function implements a 3.X 4.X 5.X compatibility */
2020Sstevel@tonic-gate 	/* This code is probably not needed anymore */
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	/* expand the gap to the page boundaries on each side */
2050Sstevel@tonic-gate 	len = (((uintptr_t)base + len + PAGEOFFSET) & PAGEMASK) -
2060Sstevel@tonic-gate 	    ((uintptr_t)base & PAGEMASK);
2070Sstevel@tonic-gate 	base = (caddr_t)((uintptr_t)base & PAGEMASK);
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	as_rangelock(as);
2100Sstevel@tonic-gate 	as_purge(as);
2110Sstevel@tonic-gate 	if (as_gap(as, PAGESIZE, &base, &len, AH_CONTAIN, addr) == 0) {
2120Sstevel@tonic-gate 		err = as_map(as, base, len, segvn_create, zfod_argsp);
2130Sstevel@tonic-gate 		as_rangeunlock(as);
2140Sstevel@tonic-gate 		if (err) {
2150Sstevel@tonic-gate 			res = FC_MAKE_ERR(err);
2160Sstevel@tonic-gate 			goto out;
2170Sstevel@tonic-gate 		}
2180Sstevel@tonic-gate 	} else {
2190Sstevel@tonic-gate 		/*
2200Sstevel@tonic-gate 		 * This page is already mapped by another thread after we
2210Sstevel@tonic-gate 		 * returned from as_fault() above.  We just fallthrough
2220Sstevel@tonic-gate 		 * as_fault() below.
2230Sstevel@tonic-gate 		 */
2240Sstevel@tonic-gate 		as_rangeunlock(as);
2250Sstevel@tonic-gate 	}
2260Sstevel@tonic-gate 
2270Sstevel@tonic-gate 	res = as_fault(as->a_hat, as, addr, 1, F_INVAL, rw);
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate out:
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	return (res);
2320Sstevel@tonic-gate }
2330Sstevel@tonic-gate 
2340Sstevel@tonic-gate /*
2350Sstevel@tonic-gate  * This is the routine which defines the address limit implied
2360Sstevel@tonic-gate  * by the flag '_MAP_LOW32'.  USERLIMIT32 matches the highest
2370Sstevel@tonic-gate  * mappable address in a 32-bit process on this platform (though
2380Sstevel@tonic-gate  * perhaps we should make it be UINT32_MAX here?)
2390Sstevel@tonic-gate  */
2400Sstevel@tonic-gate void
2410Sstevel@tonic-gate map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
2420Sstevel@tonic-gate {
2430Sstevel@tonic-gate 	struct proc *p = curproc;
2440Sstevel@tonic-gate 	caddr_t userlimit = flags & _MAP_LOW32 ?
2455648Ssetje 	    (caddr_t)USERLIMIT32 : p->p_as->a_userlimit;
2460Sstevel@tonic-gate 	map_addr_proc(addrp, len, off, vacalign, userlimit, p, flags);
2470Sstevel@tonic-gate }
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate /*
2500Sstevel@tonic-gate  * Some V9 CPUs have holes in the middle of the 64-bit virtual address range.
2510Sstevel@tonic-gate  */
2520Sstevel@tonic-gate caddr_t	hole_start, hole_end;
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate /*
2550Sstevel@tonic-gate  * kpm mapping window
2560Sstevel@tonic-gate  */
2570Sstevel@tonic-gate caddr_t kpm_vbase;
2580Sstevel@tonic-gate size_t  kpm_size;
2590Sstevel@tonic-gate uchar_t kpm_size_shift;
2600Sstevel@tonic-gate 
261*5668Smec int valid_va_range_aligned_wraparound;
2620Sstevel@tonic-gate /*
263*5668Smec  * Determine whether [*basep, *basep + *lenp) contains a mappable range of
264*5668Smec  * addresses at least "minlen" long, where the base of the range is at "off"
265*5668Smec  * phase from an "align" boundary and there is space for a "redzone"-sized
266*5668Smec  * redzone on either side of the range.  On success, 1 is returned and *basep
267*5668Smec  * and *lenp are adjusted to describe the acceptable range (including
268*5668Smec  * the redzone).  On failure, 0 is returned.
2690Sstevel@tonic-gate  */
2700Sstevel@tonic-gate int
271*5668Smec valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
272*5668Smec     size_t align, size_t redzone, size_t off)
2730Sstevel@tonic-gate {
2740Sstevel@tonic-gate 	caddr_t hi, lo;
275*5668Smec 	size_t tot_len;
276*5668Smec 
277*5668Smec 	ASSERT(align == 0 ? off == 0 : off < align);
278*5668Smec 	ASSERT(ISP2(align));
279*5668Smec 	ASSERT(align == 0 || align >= PAGESIZE);
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate 	lo = *basep;
2820Sstevel@tonic-gate 	hi = lo + *lenp;
283*5668Smec 	tot_len = minlen + 2 * redzone;	/* need at least this much space */
2840Sstevel@tonic-gate 
285*5668Smec 	/* If hi rolled over the top try cutting back. */
286*5668Smec 	if (hi < lo) {
287*5668Smec 		*lenp = 0UL - (uintptr_t)lo - 1UL;
288*5668Smec 		/* Trying to see if this really happens, and then if so, why */
289*5668Smec 		valid_va_range_aligned_wraparound++;
290*5668Smec 		hi = lo + *lenp;
291*5668Smec 	}
292*5668Smec 	if (*lenp < tot_len) {
2930Sstevel@tonic-gate 		return (0);
294*5668Smec 	}
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate 	/*
2970Sstevel@tonic-gate 	 * Deal with a possible hole in the address range between
2980Sstevel@tonic-gate 	 * hole_start and hole_end that should never be mapped by the MMU.
2990Sstevel@tonic-gate 	 */
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 	if (lo < hole_start) {
3020Sstevel@tonic-gate 		if (hi > hole_start)
3030Sstevel@tonic-gate 			if (hi < hole_end)
3040Sstevel@tonic-gate 				hi = hole_start;
3050Sstevel@tonic-gate 			else
3060Sstevel@tonic-gate 				/* lo < hole_start && hi >= hole_end */
3070Sstevel@tonic-gate 				if (dir == AH_LO) {
3080Sstevel@tonic-gate 					/*
3090Sstevel@tonic-gate 					 * prefer lowest range
3100Sstevel@tonic-gate 					 */
311*5668Smec 					if (hole_start - lo >= tot_len)
3120Sstevel@tonic-gate 						hi = hole_start;
313*5668Smec 					else if (hi - hole_end >= tot_len)
3140Sstevel@tonic-gate 						lo = hole_end;
3150Sstevel@tonic-gate 					else
3160Sstevel@tonic-gate 						return (0);
3170Sstevel@tonic-gate 				} else {
3180Sstevel@tonic-gate 					/*
3190Sstevel@tonic-gate 					 * prefer highest range
3200Sstevel@tonic-gate 					 */
321*5668Smec 					if (hi - hole_end >= tot_len)
3220Sstevel@tonic-gate 						lo = hole_end;
323*5668Smec 					else if (hole_start - lo >= tot_len)
3240Sstevel@tonic-gate 						hi = hole_start;
3250Sstevel@tonic-gate 					else
3260Sstevel@tonic-gate 						return (0);
3270Sstevel@tonic-gate 				}
3280Sstevel@tonic-gate 	} else {
3290Sstevel@tonic-gate 		/* lo >= hole_start */
3300Sstevel@tonic-gate 		if (hi < hole_end)
3310Sstevel@tonic-gate 			return (0);
3320Sstevel@tonic-gate 		if (lo < hole_end)
3330Sstevel@tonic-gate 			lo = hole_end;
3340Sstevel@tonic-gate 	}
3350Sstevel@tonic-gate 
336*5668Smec 	/* Check if remaining length is too small */
337*5668Smec 	if (hi - lo < tot_len) {
3380Sstevel@tonic-gate 		return (0);
339*5668Smec 	}
340*5668Smec 	if (align > 1) {
341*5668Smec 		caddr_t tlo = lo + redzone;
342*5668Smec 		caddr_t thi = hi - redzone;
343*5668Smec 		tlo = (caddr_t)P2PHASEUP((uintptr_t)tlo, align, off);
344*5668Smec 		if (tlo < lo + redzone) {
345*5668Smec 			return (0);
346*5668Smec 		}
347*5668Smec 		if (thi < tlo || thi - tlo < minlen) {
348*5668Smec 			return (0);
349*5668Smec 		}
350*5668Smec 	}
3510Sstevel@tonic-gate 	*basep = lo;
3520Sstevel@tonic-gate 	*lenp = hi - lo;
353*5668Smec 	return (1);
354*5668Smec }
3550Sstevel@tonic-gate 
356*5668Smec /*
357*5668Smec  * Determine whether [*basep, *basep + *lenp) contains a mappable range of
358*5668Smec  * addresses at least "minlen" long.  On success, 1 is returned and *basep
359*5668Smec  * and *lenp are adjusted to describe the acceptable range.  On failure, 0
360*5668Smec  * is returned.
361*5668Smec  */
362*5668Smec int
363*5668Smec valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
364*5668Smec {
365*5668Smec 	return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
3660Sstevel@tonic-gate }
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate /*
3690Sstevel@tonic-gate  * Determine whether [addr, addr+len] with protections `prot' are valid
3700Sstevel@tonic-gate  * for a user address space.
3710Sstevel@tonic-gate  */
3720Sstevel@tonic-gate /*ARGSUSED*/
3730Sstevel@tonic-gate int
3740Sstevel@tonic-gate valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
3750Sstevel@tonic-gate     caddr_t userlimit)
3760Sstevel@tonic-gate {
3770Sstevel@tonic-gate 	caddr_t eaddr = addr + len;
3780Sstevel@tonic-gate 
3790Sstevel@tonic-gate 	if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
3800Sstevel@tonic-gate 		return (RANGE_BADADDR);
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate 	/*
3830Sstevel@tonic-gate 	 * Determine if the address range falls within an illegal
3840Sstevel@tonic-gate 	 * range of the MMU.
3850Sstevel@tonic-gate 	 */
3860Sstevel@tonic-gate 	if (eaddr > hole_start && addr < hole_end)
3870Sstevel@tonic-gate 		return (RANGE_BADADDR);
3880Sstevel@tonic-gate 
3890Sstevel@tonic-gate #if defined(SF_ERRATA_57)
3900Sstevel@tonic-gate 	/*
3910Sstevel@tonic-gate 	 * Make sure USERLIMIT isn't raised too high
3920Sstevel@tonic-gate 	 */
3930Sstevel@tonic-gate 	ASSERT64(addr <= (caddr_t)0xffffffff80000000ul ||
3940Sstevel@tonic-gate 	    errata57_limit == 0);
3950Sstevel@tonic-gate 
3960Sstevel@tonic-gate 	if (AS_TYPE_64BIT(as) &&
3970Sstevel@tonic-gate 	    (addr < errata57_limit) &&
3980Sstevel@tonic-gate 	    (prot & PROT_EXEC))
3990Sstevel@tonic-gate 		return (RANGE_BADPROT);
4000Sstevel@tonic-gate #endif /* SF_ERRATA57 */
4010Sstevel@tonic-gate 	return (RANGE_OKAY);
4020Sstevel@tonic-gate }
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate /*
4050Sstevel@tonic-gate  * Routine used to check to see if an a.out can be executed
4060Sstevel@tonic-gate  * by the current machine/architecture.
4070Sstevel@tonic-gate  */
4080Sstevel@tonic-gate int
4090Sstevel@tonic-gate chkaout(struct exdata *exp)
4100Sstevel@tonic-gate {
4110Sstevel@tonic-gate 	if (exp->ux_mach == M_SPARC)
4120Sstevel@tonic-gate 		return (0);
4130Sstevel@tonic-gate 	else
4140Sstevel@tonic-gate 		return (ENOEXEC);
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate /*
4180Sstevel@tonic-gate  * The following functions return information about an a.out
4190Sstevel@tonic-gate  * which is used when a program is executed.
4200Sstevel@tonic-gate  */
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate /*
4230Sstevel@tonic-gate  * Return the load memory address for the data segment.
4240Sstevel@tonic-gate  */
4250Sstevel@tonic-gate caddr_t
4260Sstevel@tonic-gate getdmem(struct exec *exp)
4270Sstevel@tonic-gate {
4280Sstevel@tonic-gate 	/*
4290Sstevel@tonic-gate 	 * XXX - Sparc Reference Hack approaching
4300Sstevel@tonic-gate 	 * Remember that we are loading
4310Sstevel@tonic-gate 	 * 8k executables into a 4k machine
4320Sstevel@tonic-gate 	 * DATA_ALIGN == 2 * PAGESIZE
4330Sstevel@tonic-gate 	 */
4340Sstevel@tonic-gate 	if (exp->a_text)
4350Sstevel@tonic-gate 		return ((caddr_t)(roundup(USRTEXT + exp->a_text, DATA_ALIGN)));
4360Sstevel@tonic-gate 	else
4370Sstevel@tonic-gate 		return ((caddr_t)USRTEXT);
4380Sstevel@tonic-gate }
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate /*
4410Sstevel@tonic-gate  * Return the starting disk address for the data segment.
4420Sstevel@tonic-gate  */
4430Sstevel@tonic-gate ulong_t
4440Sstevel@tonic-gate getdfile(struct exec *exp)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate 	if (exp->a_magic == ZMAGIC)
4470Sstevel@tonic-gate 		return (exp->a_text);
4480Sstevel@tonic-gate 	else
4490Sstevel@tonic-gate 		return (sizeof (struct exec) + exp->a_text);
4500Sstevel@tonic-gate }
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate /*
4530Sstevel@tonic-gate  * Return the load memory address for the text segment.
4540Sstevel@tonic-gate  */
4550Sstevel@tonic-gate 
4560Sstevel@tonic-gate /*ARGSUSED*/
4570Sstevel@tonic-gate caddr_t
4580Sstevel@tonic-gate gettmem(struct exec *exp)
4590Sstevel@tonic-gate {
4600Sstevel@tonic-gate 	return ((caddr_t)USRTEXT);
4610Sstevel@tonic-gate }
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate /*
4640Sstevel@tonic-gate  * Return the file byte offset for the text segment.
4650Sstevel@tonic-gate  */
4660Sstevel@tonic-gate uint_t
4670Sstevel@tonic-gate gettfile(struct exec *exp)
4680Sstevel@tonic-gate {
4690Sstevel@tonic-gate 	if (exp->a_magic == ZMAGIC)
4700Sstevel@tonic-gate 		return (0);
4710Sstevel@tonic-gate 	else
4720Sstevel@tonic-gate 		return (sizeof (struct exec));
4730Sstevel@tonic-gate }
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate void
4760Sstevel@tonic-gate getexinfo(
4770Sstevel@tonic-gate 	struct exdata *edp_in,
4780Sstevel@tonic-gate 	struct exdata *edp_out,
4790Sstevel@tonic-gate 	int *pagetext,
4800Sstevel@tonic-gate 	int *pagedata)
4810Sstevel@tonic-gate {
4820Sstevel@tonic-gate 	*edp_out = *edp_in;	/* structure copy */
4830Sstevel@tonic-gate 
4840Sstevel@tonic-gate 	if ((edp_in->ux_mag == ZMAGIC) &&
4850Sstevel@tonic-gate 	    ((edp_in->vp->v_flag & VNOMAP) == 0)) {
4860Sstevel@tonic-gate 		*pagetext = 1;
4870Sstevel@tonic-gate 		*pagedata = 1;
4880Sstevel@tonic-gate 	} else {
4890Sstevel@tonic-gate 		*pagetext = 0;
4900Sstevel@tonic-gate 		*pagedata = 0;
4910Sstevel@tonic-gate 	}
4920Sstevel@tonic-gate }
4930Sstevel@tonic-gate 
4942991Ssusans /*
4952991Ssusans  * Return non 0 value if the address may cause a VAC alias with KPM mappings.
4962991Ssusans  * KPM selects an address such that it's equal offset modulo shm_alignment and
4972991Ssusans  * assumes it can't be in VAC conflict with any larger than PAGESIZE mapping.
4982991Ssusans  */
4992991Ssusans int
5002991Ssusans map_addr_vacalign_check(caddr_t addr, u_offset_t off)
5012991Ssusans {
5022991Ssusans 	if (vac) {
5032991Ssusans 		return (((uintptr_t)addr ^ off) & shm_alignment - 1);
5042991Ssusans 	} else {
5052991Ssusans 		return (0);
5060Sstevel@tonic-gate 	}
5072991Ssusans }
5080Sstevel@tonic-gate 
5092991Ssusans /*
5102991Ssusans  * Sanity control. Don't use large pages regardless of user
5112991Ssusans  * settings if there's less than priv or shm_lpg_min_physmem memory installed.
5122991Ssusans  * The units for this variable is 8K pages.
5132991Ssusans  */
5142991Ssusans pgcnt_t shm_lpg_min_physmem = 131072;			/* 1GB */
5152991Ssusans pgcnt_t privm_lpg_min_physmem = 131072;			/* 1GB */
5160Sstevel@tonic-gate 
5172659Ssusans static size_t
5180Sstevel@tonic-gate map_pgszheap(struct proc *p, caddr_t addr, size_t len)
5190Sstevel@tonic-gate {
5202991Ssusans 	size_t		pgsz = MMU_PAGESIZE;
5212991Ssusans 	int		szc;
5220Sstevel@tonic-gate 
5230Sstevel@tonic-gate 	/*
5240Sstevel@tonic-gate 	 * If len is zero, retrieve from proc and don't demote the page size.
5252991Ssusans 	 * Use atleast the default pagesize.
5260Sstevel@tonic-gate 	 */
5270Sstevel@tonic-gate 	if (len == 0) {
5282991Ssusans 		len = p->p_brkbase + p->p_brksize - p->p_bssbase;
5292991Ssusans 	}
5302991Ssusans 	len = MAX(len, default_uheap_lpsize);
5312991Ssusans 
5322991Ssusans 	for (szc = mmu_page_sizes - 1; szc >= 0; szc--) {
5332991Ssusans 		pgsz = hw_page_array[szc].hp_size;
5342991Ssusans 		if ((disable_auto_data_large_pages & (1 << szc)) ||
5352991Ssusans 		    pgsz > max_uheap_lpsize)
5362991Ssusans 			continue;
5372991Ssusans 		if (len >= pgsz) {
5382991Ssusans 			break;
5392991Ssusans 		}
5400Sstevel@tonic-gate 	}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	/*
5432991Ssusans 	 * If addr == 0 we were called by memcntl() when the
5440Sstevel@tonic-gate 	 * size code is 0.  Don't set pgsz less than current size.
5450Sstevel@tonic-gate 	 */
5460Sstevel@tonic-gate 	if (addr == 0 && (pgsz < hw_page_array[p->p_brkpageszc].hp_size)) {
5470Sstevel@tonic-gate 		pgsz = hw_page_array[p->p_brkpageszc].hp_size;
5480Sstevel@tonic-gate 	}
5490Sstevel@tonic-gate 
5500Sstevel@tonic-gate 	return (pgsz);
5510Sstevel@tonic-gate }
5520Sstevel@tonic-gate 
5532659Ssusans static size_t
5540Sstevel@tonic-gate map_pgszstk(struct proc *p, caddr_t addr, size_t len)
5550Sstevel@tonic-gate {
5562991Ssusans 	size_t		pgsz = MMU_PAGESIZE;
5572991Ssusans 	int		szc;
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	/*
5600Sstevel@tonic-gate 	 * If len is zero, retrieve from proc and don't demote the page size.
5612991Ssusans 	 * Use atleast the default pagesize.
5620Sstevel@tonic-gate 	 */
5630Sstevel@tonic-gate 	if (len == 0) {
5640Sstevel@tonic-gate 		len = p->p_stksize;
5650Sstevel@tonic-gate 	}
5662991Ssusans 	len = MAX(len, default_ustack_lpsize);
5670Sstevel@tonic-gate 
5682991Ssusans 	for (szc = mmu_page_sizes - 1; szc >= 0; szc--) {
5692991Ssusans 		pgsz = hw_page_array[szc].hp_size;
5702991Ssusans 		if ((disable_auto_data_large_pages & (1 << szc)) ||
5712991Ssusans 		    pgsz > max_ustack_lpsize)
5722991Ssusans 			continue;
5732991Ssusans 		if (len >= pgsz) {
5742991Ssusans 			break;
5752991Ssusans 		}
5760Sstevel@tonic-gate 	}
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 	/*
5790Sstevel@tonic-gate 	 * If addr == 0 we were called by memcntl() or exec_args() when the
5800Sstevel@tonic-gate 	 * size code is 0.  Don't set pgsz less than current size.
5810Sstevel@tonic-gate 	 */
5820Sstevel@tonic-gate 	if (addr == 0 && (pgsz < hw_page_array[p->p_stkpageszc].hp_size)) {
5830Sstevel@tonic-gate 		pgsz = hw_page_array[p->p_stkpageszc].hp_size;
5840Sstevel@tonic-gate 	}
5850Sstevel@tonic-gate 
5860Sstevel@tonic-gate 	return (pgsz);
5870Sstevel@tonic-gate }
5880Sstevel@tonic-gate 
5892659Ssusans static size_t
5902659Ssusans map_pgszism(caddr_t addr, size_t len)
5912659Ssusans {
5922659Ssusans 	uint_t szc;
5932659Ssusans 	size_t pgsz;
5942659Ssusans 
5952659Ssusans 	for (szc = mmu_page_sizes - 1; szc >= TTE4M; szc--) {
5962659Ssusans 		if (disable_ism_large_pages & (1 << szc))
5972659Ssusans 			continue;
5982659Ssusans 
5992659Ssusans 		pgsz = hw_page_array[szc].hp_size;
6002659Ssusans 		if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
6012659Ssusans 			return (pgsz);
6022659Ssusans 	}
6032991Ssusans 
6042659Ssusans 	return (DEFAULT_ISM_PAGESIZE);
6052659Ssusans }
6062659Ssusans 
6072659Ssusans /*
6082659Ssusans  * Suggest a page size to be used to map a segment of type maptype and length
6092659Ssusans  * len.  Returns a page size (not a size code).
6102659Ssusans  */
6112991Ssusans /* ARGSUSED */
6122659Ssusans size_t
6132991Ssusans map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
6142659Ssusans {
6152991Ssusans 	size_t	pgsz = MMU_PAGESIZE;
6162991Ssusans 
6172991Ssusans 	ASSERT(maptype != MAPPGSZ_VA);
6182659Ssusans 
6192991Ssusans 	if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
6202991Ssusans 		return (MMU_PAGESIZE);
6212991Ssusans 	}
6222659Ssusans 
6232659Ssusans 	switch (maptype) {
6242659Ssusans 	case MAPPGSZ_ISM:
6252659Ssusans 		pgsz = map_pgszism(addr, len);
6262659Ssusans 		break;
6272659Ssusans 
6282659Ssusans 	case MAPPGSZ_STK:
6292991Ssusans 		if (max_ustack_lpsize > MMU_PAGESIZE) {
6302991Ssusans 			pgsz = map_pgszstk(p, addr, len);
6312991Ssusans 		}
6322659Ssusans 		break;
6332659Ssusans 
6342659Ssusans 	case MAPPGSZ_HEAP:
6352991Ssusans 		if (max_uheap_lpsize > MMU_PAGESIZE) {
6362991Ssusans 			pgsz = map_pgszheap(p, addr, len);
6372991Ssusans 		}
6382659Ssusans 		break;
6392659Ssusans 	}
6402659Ssusans 	return (pgsz);
6412659Ssusans }
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 
6440Sstevel@tonic-gate /* assumes TTE8K...TTE4M == szc */
6450Sstevel@tonic-gate 
6460Sstevel@tonic-gate static uint_t
6472991Ssusans map_szcvec(caddr_t addr, size_t size, uintptr_t off, int disable_lpgs,
6482991Ssusans     size_t max_lpsize, size_t min_physmem)
6492414Saguzovsk {
6502414Saguzovsk 	caddr_t eaddr = addr + size;
6512414Saguzovsk 	uint_t szcvec = 0;
6522414Saguzovsk 	caddr_t raddr;
6532414Saguzovsk 	caddr_t readdr;
6542414Saguzovsk 	size_t pgsz;
6552991Ssusans 	int i;
6562414Saguzovsk 
6572991Ssusans 	if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
6582414Saguzovsk 		return (0);
6592414Saguzovsk 	}
6602414Saguzovsk 	for (i = mmu_page_sizes - 1; i > 0; i--) {
6612991Ssusans 		if (disable_lpgs & (1 << i)) {
6622414Saguzovsk 			continue;
6632414Saguzovsk 		}
6642414Saguzovsk 		pgsz = page_get_pagesize(i);
6652991Ssusans 		if (pgsz > max_lpsize) {
6662414Saguzovsk 			continue;
6672414Saguzovsk 		}
6682414Saguzovsk 		raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
6692414Saguzovsk 		readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
6702414Saguzovsk 		if (raddr < addr || raddr >= readdr) {
6712414Saguzovsk 			continue;
6722414Saguzovsk 		}
6732414Saguzovsk 		if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
6742414Saguzovsk 			continue;
6752414Saguzovsk 		}
6762414Saguzovsk 		szcvec |= (1 << i);
6772414Saguzovsk 		/*
6782414Saguzovsk 		 * And or in the remaining enabled page sizes.
6792414Saguzovsk 		 */
6802991Ssusans 		szcvec |= P2PHASE(~disable_lpgs, (1 << i));
6812414Saguzovsk 		szcvec &= ~1; /* no need to return 8K pagesize */
6822414Saguzovsk 		break;
6832414Saguzovsk 	}
6842414Saguzovsk 	return (szcvec);
6852414Saguzovsk }
6862414Saguzovsk 
6870Sstevel@tonic-gate /*
6882991Ssusans  * Return a bit vector of large page size codes that
6892991Ssusans  * can be used to map [addr, addr + len) region.
6902991Ssusans  */
6912991Ssusans /* ARGSUSED */
6922991Ssusans uint_t
6932991Ssusans map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
6942991Ssusans     int memcntl)
6952991Ssusans {
6962991Ssusans 	if (flags & MAP_TEXT) {
6975648Ssetje 		return (map_szcvec(addr, size, off,
6985648Ssetje 		    disable_auto_text_large_pages,
6992991Ssusans 		    max_utext_lpsize, shm_lpg_min_physmem));
7002991Ssusans 
7012991Ssusans 	} else if (flags & MAP_INITDATA) {
7025648Ssetje 		return (map_szcvec(addr, size, off,
7035648Ssetje 		    disable_auto_data_large_pages,
7042991Ssusans 		    max_uidata_lpsize, privm_lpg_min_physmem));
7052991Ssusans 
7062991Ssusans 	} else if (type == MAPPGSZC_SHM) {
7075648Ssetje 		return (map_szcvec(addr, size, off,
7085648Ssetje 		    disable_auto_data_large_pages,
7092991Ssusans 		    max_shm_lpsize, shm_lpg_min_physmem));
7102991Ssusans 
7112991Ssusans 	} else if (type == MAPPGSZC_HEAP) {
7125648Ssetje 		return (map_szcvec(addr, size, off,
7135648Ssetje 		    disable_auto_data_large_pages,
7142991Ssusans 		    max_uheap_lpsize, privm_lpg_min_physmem));
7152991Ssusans 
7162991Ssusans 	} else if (type == MAPPGSZC_STACK) {
7175648Ssetje 		return (map_szcvec(addr, size, off,
7185648Ssetje 		    disable_auto_data_large_pages,
7192991Ssusans 		    max_ustack_lpsize, privm_lpg_min_physmem));
7202991Ssusans 
7212991Ssusans 	} else {
7225648Ssetje 		return (map_szcvec(addr, size, off,
7235648Ssetje 		    disable_auto_data_large_pages,
7242991Ssusans 		    max_privmap_lpsize, privm_lpg_min_physmem));
7252991Ssusans 	}
7262991Ssusans }
7272991Ssusans 
7282991Ssusans /*
7290Sstevel@tonic-gate  * Anchored in the table below are counters used to keep track
7300Sstevel@tonic-gate  * of free contiguous physical memory. Each element of the table contains
7310Sstevel@tonic-gate  * the array of counters, the size of array which is allocated during
7320Sstevel@tonic-gate  * startup based on physmax and a shift value used to convert a pagenum
7330Sstevel@tonic-gate  * into a counter array index or vice versa. The table has page size
7340Sstevel@tonic-gate  * for rows and region size for columns:
7350Sstevel@tonic-gate  *
7360Sstevel@tonic-gate  *	page_counters[page_size][region_size]
7370Sstevel@tonic-gate  *
7380Sstevel@tonic-gate  *	page_size: 	TTE size code of pages on page_size freelist.
7390Sstevel@tonic-gate  *
7400Sstevel@tonic-gate  *	region_size:	TTE size code of a candidate larger page made up
7410Sstevel@tonic-gate  *			made up of contiguous free page_size pages.
7420Sstevel@tonic-gate  *
7430Sstevel@tonic-gate  * As you go across a page_size row increasing region_size each
7440Sstevel@tonic-gate  * element keeps track of how many (region_size - 1) size groups
7450Sstevel@tonic-gate  * made up of page_size free pages can be coalesced into a
7460Sstevel@tonic-gate  * regsion_size page. Yuck! Lets try an example:
7470Sstevel@tonic-gate  *
7480Sstevel@tonic-gate  * 	page_counters[1][3] is the table element used for identifying
7490Sstevel@tonic-gate  *	candidate 4M pages from contiguous pages off the 64K free list.
7500Sstevel@tonic-gate  *	Each index in the page_counters[1][3].array spans 4M. Its the
7510Sstevel@tonic-gate  *	number of free 512K size (regsion_size - 1) groups of contiguous
7520Sstevel@tonic-gate  *	64K free pages.	So when page_counters[1][3].counters[n] == 8
7530Sstevel@tonic-gate  *	we know we have a candidate 4M page made up of 512K size groups
7540Sstevel@tonic-gate  *	of 64K free pages.
7550Sstevel@tonic-gate  */
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate /*
7580Sstevel@tonic-gate  * Per page size free lists. 3rd (max_mem_nodes) and 4th (page coloring bins)
7590Sstevel@tonic-gate  * dimensions are allocated dynamically.
7600Sstevel@tonic-gate  */
7610Sstevel@tonic-gate page_t ***page_freelists[MMU_PAGE_SIZES][MAX_MEM_TYPES];
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate /*
7640Sstevel@tonic-gate  * For now there is only a single size cache list.
7650Sstevel@tonic-gate  * Allocated dynamically.
7660Sstevel@tonic-gate  */
7670Sstevel@tonic-gate page_t ***page_cachelists[MAX_MEM_TYPES];
7680Sstevel@tonic-gate 
7690Sstevel@tonic-gate kmutex_t *fpc_mutex[NPC_MUTEX];
7700Sstevel@tonic-gate kmutex_t *cpc_mutex[NPC_MUTEX];
7710Sstevel@tonic-gate 
7725648Ssetje /*
7735648Ssetje  * Calculate space needed for page freelists and counters
7745648Ssetje  */
7755648Ssetje size_t
7765648Ssetje calc_free_pagelist_sz(void)
7770Sstevel@tonic-gate {
7785648Ssetje 	int szc;
7795648Ssetje 	size_t alloc_sz, cache_sz, free_sz;
7805648Ssetje 
7815648Ssetje 	/*
7825648Ssetje 	 * one cachelist per color, node, and type
7835648Ssetje 	 */
7845648Ssetje 	cache_sz = (page_get_pagecolors(0) * sizeof (page_t *)) +
7855648Ssetje 	    sizeof (page_t **);
7865648Ssetje 	cache_sz *= max_mem_nodes * MAX_MEM_TYPES;
7870Sstevel@tonic-gate 
7885648Ssetje 	/*
7895648Ssetje 	 * one freelist per size, color, node, and type
7905648Ssetje 	 */
7915648Ssetje 	free_sz = sizeof (page_t **);
7925648Ssetje 	for (szc = 0; szc < mmu_page_sizes; szc++)
7935648Ssetje 		free_sz += sizeof (page_t *) * page_get_pagecolors(szc);
7945648Ssetje 	free_sz *= max_mem_nodes * MAX_MEM_TYPES;
7955648Ssetje 
7965648Ssetje 	alloc_sz = cache_sz + free_sz + page_ctrs_sz();
7975648Ssetje 	return (alloc_sz);
7985648Ssetje }
7995648Ssetje 
8005648Ssetje caddr_t
8015648Ssetje alloc_page_freelists(caddr_t alloc_base)
8025648Ssetje {
8035648Ssetje 	int	mnode, mtype;
8045648Ssetje 	int	szc, clrs;
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate 	/*
8070Sstevel@tonic-gate 	 * We only support small pages in the cachelist.
8080Sstevel@tonic-gate 	 */
8090Sstevel@tonic-gate 	for (mtype = 0; mtype < MAX_MEM_TYPES; mtype++) {
8105648Ssetje 		page_cachelists[mtype] = (page_t ***)alloc_base;
8115648Ssetje 		alloc_base += (max_mem_nodes * sizeof (page_t **));
8125648Ssetje 		for (mnode = 0; mnode < max_mem_nodes; mnode++) {
8135648Ssetje 			page_cachelists[mtype][mnode] = (page_t **)alloc_base;
8145648Ssetje 			alloc_base +=
8155648Ssetje 			    (page_get_pagecolors(0) * sizeof (page_t *));
8160Sstevel@tonic-gate 		}
8170Sstevel@tonic-gate 	}
8180Sstevel@tonic-gate 
8195648Ssetje 	/*
8205648Ssetje 	 * Allocate freelists bins for all
8215648Ssetje 	 * supported page sizes.
8225648Ssetje 	 */
8235648Ssetje 	for (szc = 0; szc < mmu_page_sizes; szc++) {
8245648Ssetje 		clrs = page_get_pagecolors(szc);
8255648Ssetje 		for (mtype = 0; mtype < MAX_MEM_TYPES; mtype++) {
8265648Ssetje 			page_freelists[szc][mtype] = (page_t ***)alloc_base;
8275648Ssetje 			alloc_base += (max_mem_nodes * sizeof (page_t **));
8285648Ssetje 			for (mnode = 0; mnode < max_mem_nodes; mnode++) {
8295648Ssetje 				page_freelists[szc][mtype][mnode] =
8305648Ssetje 				    (page_t **)alloc_base;
8315648Ssetje 				alloc_base += (clrs * (sizeof (page_t *)));
8325648Ssetje 			}
8335648Ssetje 		}
8345648Ssetje 	}
8350Sstevel@tonic-gate 
8365648Ssetje 	alloc_base = page_ctrs_alloc(alloc_base);
8370Sstevel@tonic-gate 	return (alloc_base);
8380Sstevel@tonic-gate }
8390Sstevel@tonic-gate 
8400Sstevel@tonic-gate /*
8415648Ssetje  * Allocate page_freelists locks for a memnode from the nucleus data
8425648Ssetje  * area. This is the first time that mmu_page_sizes is used during
8435648Ssetje  * bootup, so check mmu_page_sizes initialization.
8440Sstevel@tonic-gate  */
8450Sstevel@tonic-gate int
8465648Ssetje ndata_alloc_page_mutexs(struct memlist *ndata)
8470Sstevel@tonic-gate {
8480Sstevel@tonic-gate 	size_t alloc_sz;
8490Sstevel@tonic-gate 	caddr_t alloc_base;
8505648Ssetje 	int	i;
8515648Ssetje 	void	page_coloring_init();
8520Sstevel@tonic-gate 
8535648Ssetje 	page_coloring_init();
8540Sstevel@tonic-gate 	if (&mmu_init_mmu_page_sizes) {
8555648Ssetje 		if (!mmu_init_mmu_page_sizes(0)) {
8560Sstevel@tonic-gate 			cmn_err(CE_PANIC, "mmu_page_sizes %d not initialized",
8570Sstevel@tonic-gate 			    mmu_page_sizes);
8580Sstevel@tonic-gate 		}
8590Sstevel@tonic-gate 	}
8600Sstevel@tonic-gate 	ASSERT(mmu_page_sizes >= DEFAULT_MMU_PAGE_SIZES);
8610Sstevel@tonic-gate 
8625648Ssetje 	/* fpc_mutex and cpc_mutex */
8635648Ssetje 	alloc_sz = 2 * NPC_MUTEX * max_mem_nodes * sizeof (kmutex_t);
8640Sstevel@tonic-gate 
8650Sstevel@tonic-gate 	alloc_base = ndata_alloc(ndata, alloc_sz, ecache_alignsize);
8660Sstevel@tonic-gate 	if (alloc_base == NULL)
8670Sstevel@tonic-gate 		return (-1);
8680Sstevel@tonic-gate 
8695648Ssetje 	ASSERT(((uintptr_t)alloc_base & (ecache_alignsize - 1)) == 0);
8700Sstevel@tonic-gate 
8715648Ssetje 	for (i = 0; i < NPC_MUTEX; i++) {
8725648Ssetje 		fpc_mutex[i] = (kmutex_t *)alloc_base;
8735648Ssetje 		alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
8745648Ssetje 		cpc_mutex[i] = (kmutex_t *)alloc_base;
8755648Ssetje 		alloc_base += (sizeof (kmutex_t) * max_mem_nodes);
8765648Ssetje 	}
8770Sstevel@tonic-gate 	return (0);
8780Sstevel@tonic-gate }
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate /*
8810Sstevel@tonic-gate  * To select our starting bin, we stride through the bins with a stride
8820Sstevel@tonic-gate  * of 337.  Why 337?  It's prime, it's largeish, and it performs well both
8830Sstevel@tonic-gate  * in simulation and practice for different workloads on varying cache sizes.
8840Sstevel@tonic-gate  */
8850Sstevel@tonic-gate uint32_t color_start_current = 0;
8860Sstevel@tonic-gate uint32_t color_start_stride = 337;
8870Sstevel@tonic-gate int color_start_random = 0;
8880Sstevel@tonic-gate 
8890Sstevel@tonic-gate /* ARGSUSED */
8900Sstevel@tonic-gate uint_t
8910Sstevel@tonic-gate get_color_start(struct as *as)
8920Sstevel@tonic-gate {
8930Sstevel@tonic-gate 	uint32_t old, new;
8940Sstevel@tonic-gate 
8950Sstevel@tonic-gate 	if (consistent_coloring == 2 || color_start_random) {
8960Sstevel@tonic-gate 		return ((uint_t)(((gettick()) << (vac_shift - MMU_PAGESHIFT)) &
8972961Sdp78419 		    (hw_page_array[0].hp_colors - 1)));
8980Sstevel@tonic-gate 	}
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	do {
9010Sstevel@tonic-gate 		old = color_start_current;
9020Sstevel@tonic-gate 		new = old + (color_start_stride << (vac_shift - MMU_PAGESHIFT));
9030Sstevel@tonic-gate 	} while (cas32(&color_start_current, old, new) != old);
9040Sstevel@tonic-gate 
9050Sstevel@tonic-gate 	return ((uint_t)(new));
9060Sstevel@tonic-gate }
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate /*
9090Sstevel@tonic-gate  * Called once at startup from kphysm_init() -- before memialloc()
9100Sstevel@tonic-gate  * is invoked to do the 1st page_free()/page_freelist_add().
9110Sstevel@tonic-gate  *
9120Sstevel@tonic-gate  * initializes page_colors and page_colors_mask based on ecache_setsize.
9130Sstevel@tonic-gate  *
9140Sstevel@tonic-gate  * Also initializes the counter locks.
9150Sstevel@tonic-gate  */
9160Sstevel@tonic-gate void
9170Sstevel@tonic-gate page_coloring_init()
9180Sstevel@tonic-gate {
9192961Sdp78419 	int	a, i;
9202961Sdp78419 	uint_t colors;
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 	if (do_pg_coloring == 0) {
9230Sstevel@tonic-gate 		page_colors = 1;
9243177Sdp78419 		for (i = 0; i < mmu_page_sizes; i++) {
9253177Sdp78419 			colorequivszc[i] = 0;
9262961Sdp78419 			hw_page_array[i].hp_colors = 1;
9273177Sdp78419 		}
9280Sstevel@tonic-gate 		return;
9290Sstevel@tonic-gate 	}
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	/*
9320Sstevel@tonic-gate 	 * Calculate page_colors from ecache_setsize. ecache_setsize contains
9330Sstevel@tonic-gate 	 * the max ecache setsize of all cpus configured in the system or, for
9340Sstevel@tonic-gate 	 * cheetah+ systems, the max possible ecache setsize for all possible
9350Sstevel@tonic-gate 	 * cheetah+ cpus.
9360Sstevel@tonic-gate 	 */
9370Sstevel@tonic-gate 	page_colors = ecache_setsize / MMU_PAGESIZE;
9380Sstevel@tonic-gate 	page_colors_mask = page_colors - 1;
9390Sstevel@tonic-gate 
9402961Sdp78419 	vac_colors = vac_size / MMU_PAGESIZE;
9412961Sdp78419 	vac_colors_mask = vac_colors -1;
9422961Sdp78419 
9432961Sdp78419 	page_coloring_shift = 0;
9442961Sdp78419 	a = ecache_setsize;
9452961Sdp78419 	while (a >>= 1) {
9462961Sdp78419 		page_coloring_shift++;
9472961Sdp78419 	}
9482961Sdp78419 
9492961Sdp78419 	/* initialize number of colors per page size */
9502961Sdp78419 	for (i = 0; i < mmu_page_sizes; i++) {
9512961Sdp78419 		hw_page_array[i].hp_colors = (page_colors_mask >>
9522961Sdp78419 		    (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
9532961Sdp78419 		    + 1;
9543177Sdp78419 		colorequivszc[i] = 0;
9552961Sdp78419 	}
9562961Sdp78419 
9570Sstevel@tonic-gate 	/*
9580Sstevel@tonic-gate 	 * initialize cpu_page_colors if ecache setsizes are homogenous.
9590Sstevel@tonic-gate 	 * cpu_page_colors set to -1 during DR operation or during startup
9600Sstevel@tonic-gate 	 * if setsizes are heterogenous.
9610Sstevel@tonic-gate 	 *
9620Sstevel@tonic-gate 	 * The value of cpu_page_colors determines if additional color bins
9630Sstevel@tonic-gate 	 * need to be checked for a particular color in the page_get routines.
9640Sstevel@tonic-gate 	 */
9653177Sdp78419 	if (cpu_setsize > 0 && cpu_page_colors == 0 &&
9663177Sdp78419 	    cpu_setsize < ecache_setsize) {
9670Sstevel@tonic-gate 		cpu_page_colors = cpu_setsize / MMU_PAGESIZE;
9682961Sdp78419 		a = lowbit(page_colors) - lowbit(cpu_page_colors);
9692961Sdp78419 		ASSERT(a > 0);
9702961Sdp78419 		ASSERT(a < 16);
9712961Sdp78419 
9722961Sdp78419 		for (i = 0; i < mmu_page_sizes; i++) {
9732961Sdp78419 			if ((colors = hw_page_array[i].hp_colors) <= 1) {
9742961Sdp78419 				continue;
9752961Sdp78419 			}
9762961Sdp78419 			while ((colors >> a) == 0)
9772961Sdp78419 				a--;
9782961Sdp78419 			ASSERT(a >= 0);
9792961Sdp78419 
9802961Sdp78419 			/* higher 4 bits encodes color equiv mask */
9812961Sdp78419 			colorequivszc[i] = (a << 4);
9822961Sdp78419 		}
9832961Sdp78419 	}
9840Sstevel@tonic-gate 
9852961Sdp78419 	/* do cpu specific color initialization */
9862961Sdp78419 	if (&page_coloring_init_cpu) {
9872961Sdp78419 		page_coloring_init_cpu();
9880Sstevel@tonic-gate 	}
9890Sstevel@tonic-gate }
9900Sstevel@tonic-gate 
9910Sstevel@tonic-gate int
9920Sstevel@tonic-gate bp_color(struct buf *bp)
9930Sstevel@tonic-gate {
9940Sstevel@tonic-gate 	int color = -1;
9950Sstevel@tonic-gate 
9960Sstevel@tonic-gate 	if (vac) {
9970Sstevel@tonic-gate 		if ((bp->b_flags & B_PAGEIO) != 0) {
9980Sstevel@tonic-gate 			color = sfmmu_get_ppvcolor(bp->b_pages);
9990Sstevel@tonic-gate 		} else if (bp->b_un.b_addr != NULL) {
10000Sstevel@tonic-gate 			color = sfmmu_get_addrvcolor(bp->b_un.b_addr);
10010Sstevel@tonic-gate 		}
10020Sstevel@tonic-gate 	}
10030Sstevel@tonic-gate 	return (color < 0 ? 0 : ptob(color));
10040Sstevel@tonic-gate }
10050Sstevel@tonic-gate 
10060Sstevel@tonic-gate /*
10070Sstevel@tonic-gate  * Create & Initialise pageout scanner thread. The thread has to
10080Sstevel@tonic-gate  * start at procedure with process pp and priority pri.
10090Sstevel@tonic-gate  */
10100Sstevel@tonic-gate void
10110Sstevel@tonic-gate pageout_init(void (*procedure)(), proc_t *pp, pri_t pri)
10120Sstevel@tonic-gate {
10130Sstevel@tonic-gate 	(void) thread_create(NULL, 0, procedure, NULL, 0, pp, TS_RUN, pri);
10140Sstevel@tonic-gate }
10150Sstevel@tonic-gate 
10160Sstevel@tonic-gate /*
10170Sstevel@tonic-gate  * Function for flushing D-cache when performing module relocations
10180Sstevel@tonic-gate  * to an alternate mapping.  Stubbed out on all platforms except sun4u,
10190Sstevel@tonic-gate  * at least for now.
10200Sstevel@tonic-gate  */
10210Sstevel@tonic-gate void
10220Sstevel@tonic-gate dcache_flushall()
10230Sstevel@tonic-gate {
10240Sstevel@tonic-gate 	sfmmu_cache_flushall();
10250Sstevel@tonic-gate }
10260Sstevel@tonic-gate 
10270Sstevel@tonic-gate static int
10280Sstevel@tonic-gate kdi_range_overlap(uintptr_t va1, size_t sz1, uintptr_t va2, size_t sz2)
10290Sstevel@tonic-gate {
10300Sstevel@tonic-gate 	if (va1 < va2 && va1 + sz1 <= va2)
10310Sstevel@tonic-gate 		return (0);
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	if (va2 < va1 && va2 + sz2 <= va1)
10340Sstevel@tonic-gate 		return (0);
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	return (1);
10370Sstevel@tonic-gate }
10380Sstevel@tonic-gate 
10390Sstevel@tonic-gate /*
10400Sstevel@tonic-gate  * Return the number of bytes, relative to the beginning of a given range, that
10410Sstevel@tonic-gate  * are non-toxic (can be read from and written to with relative impunity).
10420Sstevel@tonic-gate  */
10430Sstevel@tonic-gate size_t
10440Sstevel@tonic-gate kdi_range_is_nontoxic(uintptr_t va, size_t sz, int write)
10450Sstevel@tonic-gate {
10460Sstevel@tonic-gate 	/* OBP reads are harmless, but we don't want people writing there */
10470Sstevel@tonic-gate 	if (write && kdi_range_overlap(va, sz, OFW_START_ADDR, OFW_END_ADDR -
10480Sstevel@tonic-gate 	    OFW_START_ADDR + 1))
10490Sstevel@tonic-gate 		return (va < OFW_START_ADDR ? OFW_START_ADDR - va : 0);
10500Sstevel@tonic-gate 
10510Sstevel@tonic-gate 	if (kdi_range_overlap(va, sz, PIOMAPBASE, PIOMAPSIZE))
10520Sstevel@tonic-gate 		return (va < PIOMAPBASE ? PIOMAPBASE - va : 0);
10530Sstevel@tonic-gate 
10540Sstevel@tonic-gate 	return (sz); /* no overlap */
10550Sstevel@tonic-gate }
10560Sstevel@tonic-gate 
10570Sstevel@tonic-gate /*
10580Sstevel@tonic-gate  * Minimum physmem required for enabling large pages for kernel heap
10590Sstevel@tonic-gate  * Currently we do not enable lp for kmem on systems with less
10600Sstevel@tonic-gate  * than 1GB of memory. This value can be changed via /etc/system
10610Sstevel@tonic-gate  */
10620Sstevel@tonic-gate size_t segkmem_lpminphysmem = 0x40000000;	/* 1GB */
10630Sstevel@tonic-gate 
10640Sstevel@tonic-gate /*
10650Sstevel@tonic-gate  * this function chooses large page size for kernel heap
10660Sstevel@tonic-gate  */
10670Sstevel@tonic-gate size_t
10680Sstevel@tonic-gate get_segkmem_lpsize(size_t lpsize)
10690Sstevel@tonic-gate {
10700Sstevel@tonic-gate 	size_t memtotal = physmem * PAGESIZE;
10712251Selowe 	size_t mmusz;
10722251Selowe 	uint_t szc;
10730Sstevel@tonic-gate 
10740Sstevel@tonic-gate 	if (memtotal < segkmem_lpminphysmem)
10750Sstevel@tonic-gate 		return (PAGESIZE);
10760Sstevel@tonic-gate 
10770Sstevel@tonic-gate 	if (plat_lpkmem_is_supported != NULL &&
10780Sstevel@tonic-gate 	    plat_lpkmem_is_supported() == 0)
10790Sstevel@tonic-gate 		return (PAGESIZE);
10800Sstevel@tonic-gate 
10812251Selowe 	mmusz = mmu_get_kernel_lpsize(lpsize);
10822251Selowe 	szc = page_szc(mmusz);
10832251Selowe 
10842251Selowe 	while (szc) {
10852251Selowe 		if (!(disable_large_pages & (1 << szc)))
10862251Selowe 			return (page_get_pagesize(szc));
10872251Selowe 		szc--;
10882251Selowe 	}
10892251Selowe 	return (PAGESIZE);
10900Sstevel@tonic-gate }
1091