xref: /onnv-gate/usr/src/uts/sun4v/vm/mach_vm_dep.c (revision 4204:4e0ccf8619c3)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51859Sha137994  * Common Development and Distribution License (the "License").
61859Sha137994  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
223764Sdp78419  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
270Sstevel@tonic-gate /*	All Rights Reserved   */
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley 4.3 BSD
310Sstevel@tonic-gate  * under license from the Regents of the University of California.
320Sstevel@tonic-gate  */
330Sstevel@tonic-gate 
340Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
350Sstevel@tonic-gate 
360Sstevel@tonic-gate /*
370Sstevel@tonic-gate  * UNIX machine dependent virtual memory support.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #include <sys/vm.h>
410Sstevel@tonic-gate #include <sys/exec.h>
420Sstevel@tonic-gate #include <sys/cmn_err.h>
430Sstevel@tonic-gate #include <sys/cpu_module.h>
440Sstevel@tonic-gate #include <sys/cpu.h>
450Sstevel@tonic-gate #include <sys/elf_SPARC.h>
460Sstevel@tonic-gate #include <sys/archsystm.h>
470Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
480Sstevel@tonic-gate #include <sys/memnode.h>
490Sstevel@tonic-gate #include <sys/mem_cage.h>
500Sstevel@tonic-gate #include <vm/vm_dep.h>
510Sstevel@tonic-gate #include <sys/error.h>
520Sstevel@tonic-gate #include <sys/machsystm.h>
530Sstevel@tonic-gate #include <vm/seg_kmem.h>
543177Sdp78419 #include <sys/stack.h>
553177Sdp78419 #include <sys/atomic.h>
560Sstevel@tonic-gate 
570Sstevel@tonic-gate uint_t page_colors = 0;
580Sstevel@tonic-gate uint_t page_colors_mask = 0;
590Sstevel@tonic-gate uint_t page_coloring_shift = 0;
600Sstevel@tonic-gate int consistent_coloring;
610Sstevel@tonic-gate 
620Sstevel@tonic-gate uint_t mmu_page_sizes = MMU_PAGE_SIZES;
630Sstevel@tonic-gate uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
640Sstevel@tonic-gate uint_t mmu_hashcnt = MAX_HASHCNT;
650Sstevel@tonic-gate uint_t max_mmu_hashcnt = MAX_HASHCNT;
660Sstevel@tonic-gate size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
670Sstevel@tonic-gate 
680Sstevel@tonic-gate /*
690Sstevel@tonic-gate  * A bitmask of the page sizes supported by hardware based upon szc.
700Sstevel@tonic-gate  * The base pagesize (p_szc == 0) must always be supported by the hardware.
710Sstevel@tonic-gate  */
720Sstevel@tonic-gate int mmu_exported_pagesize_mask;
730Sstevel@tonic-gate uint_t mmu_exported_page_sizes;
740Sstevel@tonic-gate 
750Sstevel@tonic-gate uint_t szc_2_userszc[MMU_PAGE_SIZES];
760Sstevel@tonic-gate uint_t userszc_2_szc[MMU_PAGE_SIZES];
770Sstevel@tonic-gate 
780Sstevel@tonic-gate extern uint_t vac_colors_mask;
790Sstevel@tonic-gate extern int vac_shift;
800Sstevel@tonic-gate 
810Sstevel@tonic-gate hw_pagesize_t hw_page_array[] = {
822961Sdp78419 	{MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
832961Sdp78419 	{MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
842961Sdp78419 	    MMU_PAGESIZE64K >> MMU_PAGESHIFT},
852961Sdp78419 	{MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
860Sstevel@tonic-gate 	    MMU_PAGESIZE512K >> MMU_PAGESHIFT},
872961Sdp78419 	{MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
882961Sdp78419 	{MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
892961Sdp78419 	    MMU_PAGESIZE32M >> MMU_PAGESHIFT},
902961Sdp78419 	{MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
910Sstevel@tonic-gate 	    MMU_PAGESIZE256M >> MMU_PAGESHIFT},
922961Sdp78419 	{0, 0, 0, 0}
930Sstevel@tonic-gate };
940Sstevel@tonic-gate 
950Sstevel@tonic-gate /*
963764Sdp78419  * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
973764Sdp78419  */
983764Sdp78419 int	max_bootlp_tteszc = TTE256M;
993764Sdp78419 
1003764Sdp78419 /*
1012991Ssusans  * Maximum and default segment size tunables for user heap, stack, private
1022991Ssusans  * and shared anonymous memory, and user text and initialized data.
1030Sstevel@tonic-gate  */
1042991Ssusans size_t max_uheap_lpsize = MMU_PAGESIZE64K;
1052991Ssusans size_t default_uheap_lpsize = MMU_PAGESIZE64K;
1062991Ssusans size_t max_ustack_lpsize = MMU_PAGESIZE64K;
1072991Ssusans size_t default_ustack_lpsize = MMU_PAGESIZE64K;
1082991Ssusans size_t max_privmap_lpsize = MMU_PAGESIZE64K;
1092991Ssusans size_t max_uidata_lpsize = MMU_PAGESIZE64K;
1102991Ssusans size_t max_utext_lpsize = MMU_PAGESIZE4M;
1112414Saguzovsk size_t max_shm_lpsize = MMU_PAGESIZE4M;
1122414Saguzovsk 
1130Sstevel@tonic-gate /*
114*4204Sha137994  * Contiguous memory allocator data structures and variables.
115*4204Sha137994  *
116*4204Sha137994  * The sun4v kernel must provide a means to allocate physically
117*4204Sha137994  * contiguous, non-relocatable memory. The contig_mem_arena
118*4204Sha137994  * and contig_mem_slab_arena exist for this purpose. Allocations
119*4204Sha137994  * that require physically contiguous non-relocatable memory should
120*4204Sha137994  * be made using contig_mem_alloc() or contig_mem_alloc_align()
121*4204Sha137994  * which return memory from contig_mem_arena or contig_mem_reloc_arena.
122*4204Sha137994  * These arenas import memory from the contig_mem_slab_arena one
123*4204Sha137994  * contiguous chunk at a time.
124*4204Sha137994  *
125*4204Sha137994  * When importing slabs, an attempt is made to allocate a large page
126*4204Sha137994  * to use as backing. As a result of the non-relocatable requirement,
127*4204Sha137994  * slabs are allocated from the kernel cage freelists. If the cage does
128*4204Sha137994  * not contain any free contiguous chunks large enough to satisfy the
129*4204Sha137994  * slab allocation, the slab size will be downsized and the operation
130*4204Sha137994  * retried. Large slab sizes are tried first to minimize cage
131*4204Sha137994  * fragmentation. If the slab allocation is unsuccessful still, the slab
132*4204Sha137994  * is allocated from outside the kernel cage. This is undesirable because,
133*4204Sha137994  * until slabs are freed, it results in non-relocatable chunks scattered
134*4204Sha137994  * throughout physical memory.
135*4204Sha137994  *
136*4204Sha137994  * Allocations from the contig_mem_arena are backed by slabs from the
137*4204Sha137994  * cage. Allocations from the contig_mem_reloc_arena are backed by
138*4204Sha137994  * slabs allocated outside the cage. Slabs are left share locked while
139*4204Sha137994  * in use to prevent non-cage slabs from being relocated.
140*4204Sha137994  *
141*4204Sha137994  * Since there is no guarantee that large pages will be available in
142*4204Sha137994  * the kernel cage, contiguous memory is reserved and added to the
143*4204Sha137994  * contig_mem_arena at boot time, making it available for later
144*4204Sha137994  * contiguous memory allocations. This reserve will be used to satisfy
145*4204Sha137994  * contig_mem allocations first and it is only when the reserve is
146*4204Sha137994  * completely allocated that new slabs will need to be imported.
147*4204Sha137994  */
148*4204Sha137994 static	vmem_t		*contig_mem_slab_arena;
149*4204Sha137994 static	vmem_t		*contig_mem_arena;
150*4204Sha137994 static	vmem_t		*contig_mem_reloc_arena;
151*4204Sha137994 static	kmutex_t	contig_mem_lock;
152*4204Sha137994 #define	CONTIG_MEM_ARENA_QUANTUM	64
153*4204Sha137994 #define	CONTIG_MEM_SLAB_ARENA_QUANTUM	MMU_PAGESIZE64K
154*4204Sha137994 
155*4204Sha137994 /* contig_mem_arena import slab sizes, in decreasing size order */
156*4204Sha137994 static size_t contig_mem_import_sizes[] = {
157*4204Sha137994 	MMU_PAGESIZE4M,
158*4204Sha137994 	MMU_PAGESIZE512K,
159*4204Sha137994 	MMU_PAGESIZE64K
160*4204Sha137994 };
161*4204Sha137994 #define	NUM_IMPORT_SIZES	\
162*4204Sha137994 	(sizeof (contig_mem_import_sizes) / sizeof (size_t))
163*4204Sha137994 static size_t contig_mem_import_size_max	= MMU_PAGESIZE4M;
164*4204Sha137994 size_t contig_mem_slab_size			= MMU_PAGESIZE4M;
165*4204Sha137994 
166*4204Sha137994 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
167*4204Sha137994 static size_t prealloc_size;
168*4204Sha137994 static void *prealloc_buf;
169*4204Sha137994 
170*4204Sha137994 /*
1710Sstevel@tonic-gate  * map_addr_proc() is the routine called when the system is to
1720Sstevel@tonic-gate  * choose an address for the user.  We will pick an address
1730Sstevel@tonic-gate  * range which is just below the current stack limit.  The
1740Sstevel@tonic-gate  * algorithm used for cache consistency on machines with virtual
1750Sstevel@tonic-gate  * address caches is such that offset 0 in the vnode is always
1760Sstevel@tonic-gate  * on a shm_alignment'ed aligned address.  Unfortunately, this
1770Sstevel@tonic-gate  * means that vnodes which are demand paged will not be mapped
1780Sstevel@tonic-gate  * cache consistently with the executable images.  When the
1790Sstevel@tonic-gate  * cache alignment for a given object is inconsistent, the
1800Sstevel@tonic-gate  * lower level code must manage the translations so that this
1810Sstevel@tonic-gate  * is not seen here (at the cost of efficiency, of course).
1820Sstevel@tonic-gate  *
1830Sstevel@tonic-gate  * addrp is a value/result parameter.
1840Sstevel@tonic-gate  *	On input it is a hint from the user to be used in a completely
1850Sstevel@tonic-gate  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
1860Sstevel@tonic-gate  *	minimal alignment.
1870Sstevel@tonic-gate  *
1880Sstevel@tonic-gate  *	On output it is NULL if no address can be found in the current
1890Sstevel@tonic-gate  *	processes address space or else an address that is currently
1900Sstevel@tonic-gate  *	not mapped for len bytes with a page of red zone on either side.
1910Sstevel@tonic-gate  *	If vacalign is true, then the selected address will obey the alignment
1920Sstevel@tonic-gate  *	constraints of a vac machine based on the given off value.
1930Sstevel@tonic-gate  */
1940Sstevel@tonic-gate /*ARGSUSED3*/
1950Sstevel@tonic-gate void
1960Sstevel@tonic-gate map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
1970Sstevel@tonic-gate     caddr_t userlimit, struct proc *p, uint_t flags)
1980Sstevel@tonic-gate {
1990Sstevel@tonic-gate 	struct as *as = p->p_as;
2000Sstevel@tonic-gate 	caddr_t addr;
2010Sstevel@tonic-gate 	caddr_t base;
2020Sstevel@tonic-gate 	size_t slen;
2030Sstevel@tonic-gate 	uintptr_t align_amount;
2040Sstevel@tonic-gate 	int allow_largepage_alignment = 1;
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate 	base = p->p_brkbase;
2070Sstevel@tonic-gate 	if (userlimit < as->a_userlimit) {
2080Sstevel@tonic-gate 		/*
2090Sstevel@tonic-gate 		 * This happens when a program wants to map something in
2100Sstevel@tonic-gate 		 * a range that's accessible to a program in a smaller
2110Sstevel@tonic-gate 		 * address space.  For example, a 64-bit program might
2120Sstevel@tonic-gate 		 * be calling mmap32(2) to guarantee that the returned
2130Sstevel@tonic-gate 		 * address is below 4Gbytes.
2140Sstevel@tonic-gate 		 */
2150Sstevel@tonic-gate 		ASSERT(userlimit > base);
2160Sstevel@tonic-gate 		slen = userlimit - base;
2170Sstevel@tonic-gate 	} else {
2180Sstevel@tonic-gate 		slen = p->p_usrstack - base - (((size_t)rctl_enforced_value(
2190Sstevel@tonic-gate 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
2200Sstevel@tonic-gate 		    & PAGEMASK);
2210Sstevel@tonic-gate 	}
2220Sstevel@tonic-gate 	len = (len + PAGEOFFSET) & PAGEMASK;
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 	/*
2250Sstevel@tonic-gate 	 * Redzone for each side of the request. This is done to leave
2260Sstevel@tonic-gate 	 * one page unmapped between segments. This is not required, but
2270Sstevel@tonic-gate 	 * it's useful for the user because if their program strays across
2280Sstevel@tonic-gate 	 * a segment boundary, it will catch a fault immediately making
2290Sstevel@tonic-gate 	 * debugging a little easier.
2300Sstevel@tonic-gate 	 */
2310Sstevel@tonic-gate 	len += (2 * PAGESIZE);
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 	/*
2340Sstevel@tonic-gate 	 *  If the request is larger than the size of a particular
2350Sstevel@tonic-gate 	 *  mmu level, then we use that level to map the request.
2360Sstevel@tonic-gate 	 *  But this requires that both the virtual and the physical
2370Sstevel@tonic-gate 	 *  addresses be aligned with respect to that level, so we
2380Sstevel@tonic-gate 	 *  do the virtual bit of nastiness here.
2390Sstevel@tonic-gate 	 *
2400Sstevel@tonic-gate 	 *  For 32-bit processes, only those which have specified
2410Sstevel@tonic-gate 	 *  MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
2420Sstevel@tonic-gate 	 *  we can potentially waste up to 256MB of the 4G process address
2430Sstevel@tonic-gate 	 *  space just for alignment.
2440Sstevel@tonic-gate 	 *
2450Sstevel@tonic-gate 	 * XXXQ Should iterate trough hw_page_array here to catch
2460Sstevel@tonic-gate 	 * all supported pagesizes
2470Sstevel@tonic-gate 	 */
2480Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
2490Sstevel@tonic-gate 	    ((uintptr_t)*addrp) != 0)) {
2500Sstevel@tonic-gate 		allow_largepage_alignment = 0;
2510Sstevel@tonic-gate 	}
2520Sstevel@tonic-gate 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
2530Sstevel@tonic-gate 	    allow_largepage_alignment &&
2540Sstevel@tonic-gate 		(len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
2550Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE256M;
2560Sstevel@tonic-gate 	} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
2570Sstevel@tonic-gate 	    allow_largepage_alignment &&
2580Sstevel@tonic-gate 		(len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
2590Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE32M;
2600Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE4M) {  /* 4MB mappings */
2610Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE4M;
2620Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
2630Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE512K;
2640Sstevel@tonic-gate 	} else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
2650Sstevel@tonic-gate 		align_amount = MMU_PAGESIZE64K;
2660Sstevel@tonic-gate 	} else  {
2670Sstevel@tonic-gate 		/*
2680Sstevel@tonic-gate 		 * Align virtual addresses on a 64K boundary to ensure
2690Sstevel@tonic-gate 		 * that ELF shared libraries are mapped with the appropriate
2700Sstevel@tonic-gate 		 * alignment constraints by the run-time linker.
2710Sstevel@tonic-gate 		 */
2720Sstevel@tonic-gate 		align_amount = ELF_SPARC_MAXPGSZ;
2730Sstevel@tonic-gate 		if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
2740Sstevel@tonic-gate 			((uintptr_t)*addrp < align_amount))
2750Sstevel@tonic-gate 			align_amount = (uintptr_t)*addrp;
2760Sstevel@tonic-gate 	}
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	/*
2790Sstevel@tonic-gate 	 * 64-bit processes require 1024K alignment of ELF shared libraries.
2800Sstevel@tonic-gate 	 */
2810Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_LP64)
2820Sstevel@tonic-gate 		align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
2830Sstevel@tonic-gate #ifdef VAC
2840Sstevel@tonic-gate 	if (vac && vacalign && (align_amount < shm_alignment))
2850Sstevel@tonic-gate 		align_amount = shm_alignment;
2860Sstevel@tonic-gate #endif
2870Sstevel@tonic-gate 
2880Sstevel@tonic-gate 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
2890Sstevel@tonic-gate 		align_amount = (uintptr_t)*addrp;
2900Sstevel@tonic-gate 	}
2910Sstevel@tonic-gate 	len += align_amount;
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 	/*
2940Sstevel@tonic-gate 	 * Look for a large enough hole starting below the stack limit.
2950Sstevel@tonic-gate 	 * After finding it, use the upper part.  Addition of PAGESIZE is
2960Sstevel@tonic-gate 	 * for the redzone as described above.
2970Sstevel@tonic-gate 	 */
2980Sstevel@tonic-gate 	as_purge(as);
2990Sstevel@tonic-gate 	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
3000Sstevel@tonic-gate 		caddr_t as_addr;
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 		addr = base + slen - len + PAGESIZE;
3030Sstevel@tonic-gate 		as_addr = addr;
3040Sstevel@tonic-gate 		/*
3050Sstevel@tonic-gate 		 * Round address DOWN to the alignment amount,
3060Sstevel@tonic-gate 		 * add the offset, and if this address is less
3070Sstevel@tonic-gate 		 * than the original address, add alignment amount.
3080Sstevel@tonic-gate 		 */
3090Sstevel@tonic-gate 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
3100Sstevel@tonic-gate 		addr += (long)(off & (align_amount - 1l));
3110Sstevel@tonic-gate 		if (addr < as_addr) {
3120Sstevel@tonic-gate 			addr += align_amount;
3130Sstevel@tonic-gate 		}
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 		ASSERT(addr <= (as_addr + align_amount));
3160Sstevel@tonic-gate 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
3170Sstevel@tonic-gate 		    ((uintptr_t)(off & (align_amount - 1l))));
3180Sstevel@tonic-gate 		*addrp = addr;
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate 	} else {
3210Sstevel@tonic-gate 		*addrp = NULL;	/* no more virtual space */
3220Sstevel@tonic-gate 	}
3230Sstevel@tonic-gate }
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate /*
3260Sstevel@tonic-gate  * Platform-dependent page scrub call.
3270Sstevel@tonic-gate  * We call hypervisor to scrub the page.
3280Sstevel@tonic-gate  */
3290Sstevel@tonic-gate void
3300Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len)
3310Sstevel@tonic-gate {
3320Sstevel@tonic-gate 	uint64_t pa, length;
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off);
3350Sstevel@tonic-gate 	length = (uint64_t)len;
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate 	(void) mem_scrub(pa, length);
3380Sstevel@tonic-gate }
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate void
3410Sstevel@tonic-gate sync_data_memory(caddr_t va, size_t len)
3420Sstevel@tonic-gate {
3430Sstevel@tonic-gate 	/* Call memory sync function */
3443199Sep32863 	(void) mem_sync(va, len);
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate 
3470Sstevel@tonic-gate size_t
3480Sstevel@tonic-gate mmu_get_kernel_lpsize(size_t lpsize)
3490Sstevel@tonic-gate {
3500Sstevel@tonic-gate 	extern int mmu_exported_pagesize_mask;
3510Sstevel@tonic-gate 	uint_t tte;
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate 	if (lpsize == 0) {
3540Sstevel@tonic-gate 		/* no setting for segkmem_lpsize in /etc/system: use default */
3550Sstevel@tonic-gate 		if (mmu_exported_pagesize_mask & (1 << TTE256M)) {
3560Sstevel@tonic-gate 			lpsize = MMU_PAGESIZE256M;
3570Sstevel@tonic-gate 		} else if (mmu_exported_pagesize_mask & (1 << TTE4M)) {
3580Sstevel@tonic-gate 			lpsize = MMU_PAGESIZE4M;
3590Sstevel@tonic-gate 		} else if (mmu_exported_pagesize_mask & (1 << TTE64K)) {
3600Sstevel@tonic-gate 			lpsize = MMU_PAGESIZE64K;
3610Sstevel@tonic-gate 		} else {
3620Sstevel@tonic-gate 			lpsize = MMU_PAGESIZE;
3630Sstevel@tonic-gate 		}
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 		return (lpsize);
3660Sstevel@tonic-gate 	}
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	for (tte = TTE8K; tte <= TTE256M; tte++) {
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 		if ((mmu_exported_pagesize_mask & (1 << tte)) == 0)
3710Sstevel@tonic-gate 			continue;
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate 		if (lpsize == TTEBYTES(tte))
3740Sstevel@tonic-gate 			return (lpsize);
3750Sstevel@tonic-gate 	}
3760Sstevel@tonic-gate 
3770Sstevel@tonic-gate 	lpsize = TTEBYTES(TTE8K);
3780Sstevel@tonic-gate 	return (lpsize);
3790Sstevel@tonic-gate }
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate void
3820Sstevel@tonic-gate mmu_init_kcontext()
3830Sstevel@tonic-gate {
3840Sstevel@tonic-gate }
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate /*ARGSUSED*/
3870Sstevel@tonic-gate void
3880Sstevel@tonic-gate mmu_init_kernel_pgsz(struct hat *hat)
3890Sstevel@tonic-gate {
3900Sstevel@tonic-gate }
3910Sstevel@tonic-gate 
3920Sstevel@tonic-gate static void *
3930Sstevel@tonic-gate contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag)
3940Sstevel@tonic-gate {
3950Sstevel@tonic-gate 	page_t *ppl;
3960Sstevel@tonic-gate 	page_t *rootpp;
3970Sstevel@tonic-gate 	caddr_t addr = NULL;
3980Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
3990Sstevel@tonic-gate 	page_t **ppa;
4000Sstevel@tonic-gate 	int pgflags;
401*4204Sha137994 	spgcnt_t i = 0;
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 
404*4204Sha137994 	ASSERT(size <= contig_mem_import_size_max);
405*4204Sha137994 	ASSERT((size & (size - 1)) == 0);
4061859Sha137994 
4070Sstevel@tonic-gate 	if ((addr = vmem_xalloc(vmp, size, size, 0, 0,
4080Sstevel@tonic-gate 	    NULL, NULL, vmflag)) == NULL) {
4090Sstevel@tonic-gate 		return (NULL);
4100Sstevel@tonic-gate 	}
4110Sstevel@tonic-gate 
4121859Sha137994 	/* The address should be slab-size aligned. */
413*4204Sha137994 	ASSERT(((uintptr_t)addr & (size - 1)) == 0);
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
4160Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
4170Sstevel@tonic-gate 		return (NULL);
4180Sstevel@tonic-gate 	}
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate 	pgflags = PG_EXCL;
421*4204Sha137994 	if (vmflag & VM_NORELOC)
422*4204Sha137994 		pgflags |= PG_NORELOC;
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate 	ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
4250Sstevel@tonic-gate 	    pgflags, &kvseg, addr, NULL);
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate 	if (ppl == NULL) {
4280Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
4290Sstevel@tonic-gate 		page_unresv(npages);
4300Sstevel@tonic-gate 		return (NULL);
4310Sstevel@tonic-gate 	}
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	rootpp = ppl;
4340Sstevel@tonic-gate 	ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
4350Sstevel@tonic-gate 	while (ppl != NULL) {
4360Sstevel@tonic-gate 		page_t *pp = ppl;
4370Sstevel@tonic-gate 		ppa[i++] = pp;
4380Sstevel@tonic-gate 		page_sub(&ppl, pp);
4390Sstevel@tonic-gate 		ASSERT(page_iolock_assert(pp));
440*4204Sha137994 		ASSERT(PAGE_EXCL(pp));
4410Sstevel@tonic-gate 		page_io_unlock(pp);
4420Sstevel@tonic-gate 	}
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 	/*
4450Sstevel@tonic-gate 	 * Load the locked entry.  It's OK to preload the entry into
4460Sstevel@tonic-gate 	 * the TSB since we now support large mappings in the kernel TSB.
4470Sstevel@tonic-gate 	 */
4480Sstevel@tonic-gate 	hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size,
4490Sstevel@tonic-gate 	    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK);
4500Sstevel@tonic-gate 
451*4204Sha137994 	ASSERT(i == page_get_pagecnt(ppa[0]->p_szc));
4520Sstevel@tonic-gate 	for (--i; i >= 0; --i) {
453*4204Sha137994 		ASSERT(ppa[i]->p_szc == ppa[0]->p_szc);
454*4204Sha137994 		ASSERT(page_pptonum(ppa[i]) == page_pptonum(ppa[0]) + i);
4550Sstevel@tonic-gate 		(void) page_pp_lock(ppa[i], 0, 1);
456*4204Sha137994 		/*
457*4204Sha137994 		 * Leave the page share locked. For non-cage pages,
458*4204Sha137994 		 * this would prevent memory DR if it were supported
459*4204Sha137994 		 * on sun4v.
460*4204Sha137994 		 */
461*4204Sha137994 		page_downgrade(ppa[i]);
4620Sstevel@tonic-gate 	}
4630Sstevel@tonic-gate 
4640Sstevel@tonic-gate 	kmem_free(ppa, npages * sizeof (page_t *));
4650Sstevel@tonic-gate 	return (addr);
4660Sstevel@tonic-gate }
4670Sstevel@tonic-gate 
468*4204Sha137994 /*
469*4204Sha137994  * Allocates a slab by first trying to use the largest slab size
470*4204Sha137994  * in contig_mem_import_sizes and then falling back to smaller slab
471*4204Sha137994  * sizes still large enough for the allocation. The sizep argument
472*4204Sha137994  * is a pointer to the requested size. When a slab is successfully
473*4204Sha137994  * allocated, the slab size, which must be >= *sizep and <=
474*4204Sha137994  * contig_mem_import_size_max, is returned in the *sizep argument.
475*4204Sha137994  * Returns the virtual address of the new slab.
476*4204Sha137994  */
477*4204Sha137994 static void *
478*4204Sha137994 span_alloc_downsize(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
479*4204Sha137994 {
480*4204Sha137994 	int i;
481*4204Sha137994 
482*4204Sha137994 	ASSERT(*sizep <= contig_mem_import_size_max);
483*4204Sha137994 
484*4204Sha137994 	for (i = 0; i < NUM_IMPORT_SIZES; i++) {
485*4204Sha137994 		size_t page_size = contig_mem_import_sizes[i];
486*4204Sha137994 
487*4204Sha137994 		/*
488*4204Sha137994 		 * Check that the alignment is also less than the
489*4204Sha137994 		 * import (large page) size. In the case where the
490*4204Sha137994 		 * alignment is larger than the size, a large page
491*4204Sha137994 		 * large enough for the allocation is not necessarily
492*4204Sha137994 		 * physical-address aligned to satisfy the requested
493*4204Sha137994 		 * alignment. Since alignment is required to be a
494*4204Sha137994 		 * power-of-2, any large page >= size && >= align will
495*4204Sha137994 		 * suffice.
496*4204Sha137994 		 */
497*4204Sha137994 		if (*sizep <= page_size && align <= page_size) {
498*4204Sha137994 			void *addr;
499*4204Sha137994 			addr = contig_mem_span_alloc(vmp, page_size, vmflag);
500*4204Sha137994 			if (addr == NULL)
501*4204Sha137994 				continue;
502*4204Sha137994 			*sizep = page_size;
503*4204Sha137994 			return (addr);
504*4204Sha137994 		}
505*4204Sha137994 		return (NULL);
506*4204Sha137994 	}
507*4204Sha137994 
508*4204Sha137994 	return (NULL);
509*4204Sha137994 }
510*4204Sha137994 
511*4204Sha137994 static void *
512*4204Sha137994 contig_mem_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
513*4204Sha137994 {
514*4204Sha137994 	return (span_alloc_downsize(vmp, sizep, align, vmflag | VM_NORELOC));
515*4204Sha137994 }
516*4204Sha137994 
517*4204Sha137994 static void *
518*4204Sha137994 contig_mem_reloc_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align,
519*4204Sha137994     int vmflag)
520*4204Sha137994 {
521*4204Sha137994 	ASSERT((vmflag & VM_NORELOC) == 0);
522*4204Sha137994 	return (span_alloc_downsize(vmp, sizep, align, vmflag));
523*4204Sha137994 }
524*4204Sha137994 
525*4204Sha137994 /*
526*4204Sha137994  * Free a span, which is always exactly one large page.
527*4204Sha137994  */
528*4204Sha137994 static void
5290Sstevel@tonic-gate contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size)
5300Sstevel@tonic-gate {
5310Sstevel@tonic-gate 	page_t *pp;
5320Sstevel@tonic-gate 	caddr_t addr = inaddr;
5330Sstevel@tonic-gate 	caddr_t eaddr;
5340Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
5350Sstevel@tonic-gate 	page_t *rootpp = NULL;
5360Sstevel@tonic-gate 
537*4204Sha137994 	ASSERT(size <= contig_mem_import_size_max);
538*4204Sha137994 	/* All slabs should be size aligned */
539*4204Sha137994 	ASSERT(((uintptr_t)addr & (size - 1)) == 0);
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
544*4204Sha137994 		pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
545*4204Sha137994 		if (pp == NULL) {
5460Sstevel@tonic-gate 			panic("contig_mem_span_free: page not found");
547*4204Sha137994 		}
548*4204Sha137994 		if (!page_tryupgrade(pp)) {
549*4204Sha137994 			page_unlock(pp);
550*4204Sha137994 			pp = page_lookup(&kvp,
551*4204Sha137994 			    (u_offset_t)(uintptr_t)addr, SE_EXCL);
552*4204Sha137994 			if (pp == NULL)
553*4204Sha137994 				panic("contig_mem_span_free: page not found");
554*4204Sha137994 		}
5550Sstevel@tonic-gate 
5560Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
557*4204Sha137994 		ASSERT(size == page_get_pagesize(pp->p_szc));
558*4204Sha137994 		ASSERT(rootpp == NULL || rootpp->p_szc == pp->p_szc);
559*4204Sha137994 		ASSERT(rootpp == NULL || (page_pptonum(rootpp) +
560*4204Sha137994 		    (pgcnt_t)btop(addr - (caddr_t)inaddr) == page_pptonum(pp)));
561*4204Sha137994 
5620Sstevel@tonic-gate 		page_pp_unlock(pp, 0, 1);
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate 		if (rootpp == NULL)
5650Sstevel@tonic-gate 			rootpp = pp;
5660Sstevel@tonic-gate 	}
567*4204Sha137994 	page_destroy_pages(rootpp);
5680Sstevel@tonic-gate 	page_unresv(npages);
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate 	if (vmp != NULL)
5710Sstevel@tonic-gate 		vmem_xfree(vmp, inaddr, size);
5720Sstevel@tonic-gate }
5730Sstevel@tonic-gate 
5740Sstevel@tonic-gate static void *
575*4204Sha137994 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t *sizep, size_t align,
576*4204Sha137994     int vmflag)
5770Sstevel@tonic-gate {
578*4204Sha137994 	ASSERT((align & (align - 1)) == 0);
579*4204Sha137994 	return (vmem_xalloc(vmp, *sizep, align, 0, 0, NULL, NULL, vmflag));
5800Sstevel@tonic-gate }
5810Sstevel@tonic-gate 
582288Sarao /*
583*4204Sha137994  * contig_mem_alloc, contig_mem_alloc_align
584*4204Sha137994  *
585*4204Sha137994  * Caution: contig_mem_alloc and contig_mem_alloc_align should be
586*4204Sha137994  * used only when physically contiguous non-relocatable memory is
587*4204Sha137994  * required. Furthermore, use of these allocation routines should be
588*4204Sha137994  * minimized as well as should the allocation size. As described in the
589*4204Sha137994  * contig_mem_arena comment block above, slab allocations fall back to
590*4204Sha137994  * being outside of the cage. Therefore, overuse of these allocation
591*4204Sha137994  * routines can lead to non-relocatable large pages being allocated
592*4204Sha137994  * outside the cage. Such pages prevent the allocation of a larger page
593*4204Sha137994  * occupying overlapping pages. This can impact performance for
594*4204Sha137994  * applications that utilize e.g. 256M large pages.
595288Sarao  */
596288Sarao 
597288Sarao /*
598*4204Sha137994  * Allocates size aligned contiguous memory up to contig_mem_import_size_max.
599288Sarao  * Size must be a power of 2.
600288Sarao  */
6010Sstevel@tonic-gate void *
6020Sstevel@tonic-gate contig_mem_alloc(size_t size)
6030Sstevel@tonic-gate {
604288Sarao 	ASSERT((size & (size - 1)) == 0);
605288Sarao 	return (contig_mem_alloc_align(size, size));
6060Sstevel@tonic-gate }
6070Sstevel@tonic-gate 
608*4204Sha137994 /*
609*4204Sha137994  * contig_mem_alloc_align allocates real contiguous memory with the specified
610*4204Sha137994  * alignment up to contig_mem_import_size_max. The alignment must be a
611*4204Sha137994  * power of 2 and no greater than contig_mem_import_size_max. We assert
612*4204Sha137994  * the aligment is a power of 2. For non-debug, vmem_xalloc will panic
613*4204Sha137994  * for non power of 2 alignments.
614*4204Sha137994  */
615*4204Sha137994 void *
616*4204Sha137994 contig_mem_alloc_align(size_t size, size_t align)
617*4204Sha137994 {
618*4204Sha137994 	void *buf;
619*4204Sha137994 
620*4204Sha137994 	ASSERT(size <= contig_mem_import_size_max);
621*4204Sha137994 	ASSERT(align <= contig_mem_import_size_max);
622*4204Sha137994 	ASSERT((align & (align - 1)) == 0);
623*4204Sha137994 
624*4204Sha137994 	if (align < CONTIG_MEM_ARENA_QUANTUM)
625*4204Sha137994 		align = CONTIG_MEM_ARENA_QUANTUM;
626*4204Sha137994 
627*4204Sha137994 	/*
628*4204Sha137994 	 * We take the lock here to serialize span allocations.
629*4204Sha137994 	 * We do not lose concurrency for the common case, since
630*4204Sha137994 	 * allocations that don't require new span allocations
631*4204Sha137994 	 * are serialized by vmem_xalloc. Serializing span
632*4204Sha137994 	 * allocations also prevents us from trying to allocate
633*4204Sha137994 	 * more spans that necessary.
634*4204Sha137994 	 */
635*4204Sha137994 	mutex_enter(&contig_mem_lock);
636*4204Sha137994 
637*4204Sha137994 	buf = vmem_xalloc(contig_mem_arena, size, align, 0, 0,
638*4204Sha137994 	    NULL, NULL, VM_NOSLEEP | VM_NORELOC);
639*4204Sha137994 
640*4204Sha137994 	if ((buf == NULL) && (size <= MMU_PAGESIZE)) {
641*4204Sha137994 		mutex_exit(&contig_mem_lock);
642*4204Sha137994 		return (vmem_xalloc(static_alloc_arena, size, align, 0, 0,
643*4204Sha137994 		    NULL, NULL, VM_NOSLEEP));
644*4204Sha137994 	}
645*4204Sha137994 
646*4204Sha137994 	if (buf == NULL) {
647*4204Sha137994 		buf = vmem_xalloc(contig_mem_reloc_arena, size, align, 0, 0,
648*4204Sha137994 		    NULL, NULL, VM_NOSLEEP);
649*4204Sha137994 	}
650*4204Sha137994 
651*4204Sha137994 	mutex_exit(&contig_mem_lock);
652*4204Sha137994 
653*4204Sha137994 	return (buf);
654*4204Sha137994 }
655*4204Sha137994 
6560Sstevel@tonic-gate void
6570Sstevel@tonic-gate contig_mem_free(void *vaddr, size_t size)
6580Sstevel@tonic-gate {
659*4204Sha137994 	if (vmem_contains(contig_mem_arena, vaddr, size)) {
660*4204Sha137994 		vmem_xfree(contig_mem_arena, vaddr, size);
661*4204Sha137994 	} else if (size > MMU_PAGESIZE) {
662*4204Sha137994 		vmem_xfree(contig_mem_reloc_arena, vaddr, size);
663*4204Sha137994 	} else {
664*4204Sha137994 		vmem_xfree(static_alloc_arena, vaddr, size);
665*4204Sha137994 	}
6660Sstevel@tonic-gate }
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate /*
6690Sstevel@tonic-gate  * We create a set of stacked vmem arenas to enable us to
670*4204Sha137994  * allocate large >PAGESIZE chucks of contiguous Real Address space.
671*4204Sha137994  * The vmem_xcreate interface is used to create the contig_mem_arena
672*4204Sha137994  * allowing the import routine to downsize the requested slab size
673*4204Sha137994  * and return a smaller slab.
6740Sstevel@tonic-gate  */
6750Sstevel@tonic-gate void
6760Sstevel@tonic-gate contig_mem_init(void)
6770Sstevel@tonic-gate {
678*4204Sha137994 	mutex_init(&contig_mem_lock, NULL, MUTEX_DEFAULT, NULL);
6790Sstevel@tonic-gate 
680*4204Sha137994 	contig_mem_slab_arena = vmem_xcreate("contig_mem_slab_arena", NULL, 0,
681*4204Sha137994 	    CONTIG_MEM_SLAB_ARENA_QUANTUM, contig_vmem_xalloc_aligned_wrapper,
682*4204Sha137994 	    vmem_xfree, heap_arena, 0, VM_SLEEP | VMC_XALIGN);
6830Sstevel@tonic-gate 
684*4204Sha137994 	contig_mem_arena = vmem_xcreate("contig_mem_arena", NULL, 0,
685*4204Sha137994 	    CONTIG_MEM_ARENA_QUANTUM, contig_mem_span_xalloc,
686*4204Sha137994 	    contig_mem_span_free, contig_mem_slab_arena, 0,
687*4204Sha137994 	    VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
6880Sstevel@tonic-gate 
689*4204Sha137994 	contig_mem_reloc_arena = vmem_xcreate("contig_mem_reloc_arena", NULL, 0,
690*4204Sha137994 	    CONTIG_MEM_ARENA_QUANTUM, contig_mem_reloc_span_xalloc,
691*4204Sha137994 	    contig_mem_span_free, contig_mem_slab_arena, 0,
692*4204Sha137994 	    VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
693*4204Sha137994 
694*4204Sha137994 	if (vmem_add(contig_mem_arena, prealloc_buf, prealloc_size,
695*4204Sha137994 	    VM_SLEEP) == NULL)
696*4204Sha137994 		cmn_err(CE_PANIC, "Failed to pre-populate contig_mem_arena");
6970Sstevel@tonic-gate }
6983177Sdp78419 
699*4204Sha137994 /*
700*4204Sha137994  * In calculating how much memory to pre-allocate, we include a small
701*4204Sha137994  * amount per-CPU to account for per-CPU buffers in line with measured
702*4204Sha137994  * values for different size systems. contig_mem_prealloc_base is the
703*4204Sha137994  * base fixed amount to be preallocated before considering per-CPU
704*4204Sha137994  * requirements and memory size. We take the minimum of
705*4204Sha137994  * contig_mem_prealloc_base and a small percentage of physical memory
706*4204Sha137994  * to prevent allocating too much on smaller systems.
707*4204Sha137994  */
708*4204Sha137994 #define	PREALLOC_PER_CPU	(256 * 1024)		/* 256K */
709*4204Sha137994 #define	PREALLOC_PERCENT	(4)			/* 4% */
710*4204Sha137994 #define	PREALLOC_MIN		(16 * 1024 * 1024)	/* 16M */
711*4204Sha137994 size_t contig_mem_prealloc_base = 0;
712*4204Sha137994 
713*4204Sha137994 /*
714*4204Sha137994  * Called at boot-time allowing pre-allocation of contiguous memory.
715*4204Sha137994  * The argument 'alloc_base' is the requested base address for the
716*4204Sha137994  * allocation and originates in startup_memlist.
717*4204Sha137994  */
718*4204Sha137994 caddr_t
719*4204Sha137994 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
720*4204Sha137994 {
721*4204Sha137994 	prealloc_size = MIN((PREALLOC_PER_CPU * ncpu_guest_max) +
722*4204Sha137994 	    contig_mem_prealloc_base, (ptob(npages) * PREALLOC_PERCENT) / 100);
723*4204Sha137994 	prealloc_size = MAX(prealloc_size, PREALLOC_MIN);
724*4204Sha137994 	prealloc_size = P2ROUNDUP(prealloc_size, MMU_PAGESIZE4M);
725*4204Sha137994 
726*4204Sha137994 	alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, MMU_PAGESIZE4M);
727*4204Sha137994 	prealloc_buf = alloc_base;
728*4204Sha137994 	alloc_base += prealloc_size;
729*4204Sha137994 
730*4204Sha137994 	return (alloc_base);
731*4204Sha137994 }
7323177Sdp78419 
7333177Sdp78419 static uint_t sp_color_stride = 16;
7343177Sdp78419 static uint_t sp_color_mask = 0x1f;
7353177Sdp78419 static uint_t sp_current_color = (uint_t)-1;
7363177Sdp78419 
7373177Sdp78419 size_t
7383177Sdp78419 exec_get_spslew(void)
7393177Sdp78419 {
7403177Sdp78419 	uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
7413177Sdp78419 	return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
7423177Sdp78419 }
743