xref: /onnv-gate/usr/src/uts/common/vm/seg_kmem.c (revision 3290:256464cbb73c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*3290Sjohansen  * Common Development and Distribution License (the "License").
6*3290Sjohansen  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
221338Selowe  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <sys/t_lock.h>
300Sstevel@tonic-gate #include <sys/param.h>
310Sstevel@tonic-gate #include <sys/sysmacros.h>
320Sstevel@tonic-gate #include <sys/tuneable.h>
330Sstevel@tonic-gate #include <sys/systm.h>
340Sstevel@tonic-gate #include <sys/vm.h>
350Sstevel@tonic-gate #include <sys/kmem.h>
360Sstevel@tonic-gate #include <sys/vmem.h>
370Sstevel@tonic-gate #include <sys/mman.h>
380Sstevel@tonic-gate #include <sys/cmn_err.h>
390Sstevel@tonic-gate #include <sys/debug.h>
400Sstevel@tonic-gate #include <sys/dumphdr.h>
410Sstevel@tonic-gate #include <sys/bootconf.h>
420Sstevel@tonic-gate #include <sys/lgrp.h>
430Sstevel@tonic-gate #include <vm/seg_kmem.h>
440Sstevel@tonic-gate #include <vm/hat.h>
450Sstevel@tonic-gate #include <vm/page.h>
460Sstevel@tonic-gate #include <vm/vm_dep.h>
470Sstevel@tonic-gate #include <vm/faultcode.h>
480Sstevel@tonic-gate #include <sys/promif.h>
490Sstevel@tonic-gate #include <vm/seg_kp.h>
500Sstevel@tonic-gate #include <sys/bitmap.h>
510Sstevel@tonic-gate #include <sys/mem_cage.h>
520Sstevel@tonic-gate 
530Sstevel@tonic-gate /*
540Sstevel@tonic-gate  * seg_kmem is the primary kernel memory segment driver.  It
550Sstevel@tonic-gate  * maps the kernel heap [kernelheap, ekernelheap), module text,
560Sstevel@tonic-gate  * and all memory which was allocated before the VM was initialized
570Sstevel@tonic-gate  * into kas.
580Sstevel@tonic-gate  *
590Sstevel@tonic-gate  * Pages which belong to seg_kmem are hashed into &kvp vnode at
600Sstevel@tonic-gate  * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
610Sstevel@tonic-gate  * They must never be paged out since segkmem_fault() is a no-op to
620Sstevel@tonic-gate  * prevent recursive faults.
630Sstevel@tonic-gate  *
640Sstevel@tonic-gate  * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
650Sstevel@tonic-gate  * __x86 and are unlocked (p_sharelock == 0) on __sparc.  Once __x86
660Sstevel@tonic-gate  * supports relocation the #ifdef kludges can be removed.
670Sstevel@tonic-gate  *
680Sstevel@tonic-gate  * seg_kmem pages may be subject to relocation by page_relocate(),
690Sstevel@tonic-gate  * provided that the HAT supports it; if this is so, segkmem_reloc
700Sstevel@tonic-gate  * will be set to a nonzero value. All boot time allocated memory as
710Sstevel@tonic-gate  * well as static memory is considered off limits to relocation.
720Sstevel@tonic-gate  * Pages are "relocatable" if p_state does not have P_NORELOC set, so
730Sstevel@tonic-gate  * we request P_NORELOC pages for memory that isn't safe to relocate.
740Sstevel@tonic-gate  *
750Sstevel@tonic-gate  * The kernel heap is logically divided up into four pieces:
760Sstevel@tonic-gate  *
770Sstevel@tonic-gate  *   heap32_arena is for allocations that require 32-bit absolute
780Sstevel@tonic-gate  *   virtual addresses (e.g. code that uses 32-bit pointers/offsets).
790Sstevel@tonic-gate  *
800Sstevel@tonic-gate  *   heap_core is for allocations that require 2GB *relative*
810Sstevel@tonic-gate  *   offsets; in other words all memory from heap_core is within
820Sstevel@tonic-gate  *   2GB of all other memory from the same arena. This is a requirement
830Sstevel@tonic-gate  *   of the addressing modes of some processors in supervisor code.
840Sstevel@tonic-gate  *
850Sstevel@tonic-gate  *   heap_arena is the general heap arena.
860Sstevel@tonic-gate  *
870Sstevel@tonic-gate  *   static_arena is the static memory arena.  Allocations from it
880Sstevel@tonic-gate  *   are not subject to relocation so it is safe to use the memory
890Sstevel@tonic-gate  *   physical address as well as the virtual address (e.g. the VA to
900Sstevel@tonic-gate  *   PA translations are static).  Caches may import from static_arena;
910Sstevel@tonic-gate  *   all other static memory allocations should use static_alloc_arena.
920Sstevel@tonic-gate  *
930Sstevel@tonic-gate  * On some platforms which have limited virtual address space, seg_kmem
940Sstevel@tonic-gate  * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
950Sstevel@tonic-gate  * segkp_bitmap is non-NULL, and each bit represents a page of virtual
960Sstevel@tonic-gate  * address space which is actually seg_kp mapped.
970Sstevel@tonic-gate  */
980Sstevel@tonic-gate 
990Sstevel@tonic-gate extern ulong_t *segkp_bitmap;   /* Is set if segkp is from the kernel heap */
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate char *kernelheap;		/* start of primary kernel heap */
1020Sstevel@tonic-gate char *ekernelheap;		/* end of primary kernel heap */
1030Sstevel@tonic-gate struct seg kvseg;		/* primary kernel heap segment */
1040Sstevel@tonic-gate struct seg kvseg_core;		/* "core" kernel heap segment */
105*3290Sjohansen struct seg kzioseg;		/* Segment for zio mappings */
1060Sstevel@tonic-gate vmem_t *heap_arena;		/* primary kernel heap arena */
1070Sstevel@tonic-gate vmem_t *heap_core_arena;	/* core kernel heap arena */
1080Sstevel@tonic-gate char *heap_core_base;		/* start of core kernel heap arena */
1090Sstevel@tonic-gate char *heap_lp_base;		/* start of kernel large page heap arena */
1100Sstevel@tonic-gate char *heap_lp_end;		/* end of kernel large page heap arena */
1110Sstevel@tonic-gate vmem_t *hat_memload_arena;	/* HAT translation data */
1120Sstevel@tonic-gate struct seg kvseg32;		/* 32-bit kernel heap segment */
1130Sstevel@tonic-gate vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
1140Sstevel@tonic-gate vmem_t *heaptext_arena;		/* heaptext arena */
1150Sstevel@tonic-gate struct as kas;			/* kernel address space */
1160Sstevel@tonic-gate struct vnode kvp;		/* vnode for all segkmem pages */
117*3290Sjohansen struct vnode zvp;		/* vnode for zfs pages */
1180Sstevel@tonic-gate int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
1190Sstevel@tonic-gate vmem_t *static_arena;		/* arena for caches to import static memory */
1200Sstevel@tonic-gate vmem_t *static_alloc_arena;	/* arena for allocating static memory */
121*3290Sjohansen vmem_t *zio_arena = NULL;	/* arena for allocating zio memory */
122*3290Sjohansen vmem_t *zio_alloc_arena = NULL;	/* arena for allocating zio memory */
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate /*
1250Sstevel@tonic-gate  * seg_kmem driver can map part of the kernel heap with large pages.
1260Sstevel@tonic-gate  * Currently this functionality is implemented for sparc platforms only.
1270Sstevel@tonic-gate  *
1280Sstevel@tonic-gate  * The large page size "segkmem_lpsize" for kernel heap is selected in the
1290Sstevel@tonic-gate  * platform specific code. It can also be modified via /etc/system file.
1300Sstevel@tonic-gate  * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
1310Sstevel@tonic-gate  * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
1320Sstevel@tonic-gate  * match segkmem_lpsize.
1330Sstevel@tonic-gate  *
1340Sstevel@tonic-gate  * At boot time we carve from kernel heap arena a range of virtual addresses
1350Sstevel@tonic-gate  * that will be used for large page mappings. This range [heap_lp_base,
1360Sstevel@tonic-gate  * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
1370Sstevel@tonic-gate  * create "kmem_lp_arena" that caches memory already backed up by large
1380Sstevel@tonic-gate  * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
1390Sstevel@tonic-gate  */
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate size_t	segkmem_lpsize;
1420Sstevel@tonic-gate static  uint_t	segkmem_lpshift = PAGESHIFT;
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate size_t  segkmem_kmemlp_quantum = 0x400000;	/* 4MB */
1450Sstevel@tonic-gate size_t  segkmem_heaplp_quantum;
1465Seg155566 vmem_t *heap_lp_arena;
1470Sstevel@tonic-gate static  vmem_t *kmem_lp_arena;
1480Sstevel@tonic-gate static  vmem_t *segkmem_ppa_arena;
1490Sstevel@tonic-gate static	segkmem_lpcb_t segkmem_lpcb;
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate /*
1520Sstevel@tonic-gate  * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
153215Seg155566  * consumed by the large page heap. By default this parameter is set to 1/8 of
1540Sstevel@tonic-gate  * physmem but can be adjusted through /etc/system either directly or
1550Sstevel@tonic-gate  * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
1560Sstevel@tonic-gate  * we allow for large page heap.
1570Sstevel@tonic-gate  */
1580Sstevel@tonic-gate size_t  segkmem_kmemlp_max;
1590Sstevel@tonic-gate static  uint_t  segkmem_kmemlp_pcnt;
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate /*
1620Sstevel@tonic-gate  * Getting large pages for kernel heap could be problematic due to
1630Sstevel@tonic-gate  * physical memory fragmentation. That's why we allow to preallocate
1640Sstevel@tonic-gate  * "segkmem_kmemlp_min" bytes at boot time.
1650Sstevel@tonic-gate  */
1660Sstevel@tonic-gate static  size_t	segkmem_kmemlp_min;
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate /*
1690Sstevel@tonic-gate  * Throttling is used to avoid expensive tries to allocate large pages
1700Sstevel@tonic-gate  * for kernel heap when a lot of succesive attempts to do so fail.
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate static  ulong_t segkmem_lpthrottle_max = 0x400000;
1730Sstevel@tonic-gate static  ulong_t segkmem_lpthrottle_start = 0x40;
1740Sstevel@tonic-gate static  ulong_t segkmem_use_lpthrottle = 1;
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate /*
1770Sstevel@tonic-gate  * Freed pages accumulate on a garbage list until segkmem is ready,
1780Sstevel@tonic-gate  * at which point we call segkmem_gc() to free it all.
1790Sstevel@tonic-gate  */
1800Sstevel@tonic-gate typedef struct segkmem_gc_list {
1810Sstevel@tonic-gate 	struct segkmem_gc_list	*gc_next;
1820Sstevel@tonic-gate 	vmem_t			*gc_arena;
1830Sstevel@tonic-gate 	size_t			gc_size;
1840Sstevel@tonic-gate } segkmem_gc_list_t;
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate static segkmem_gc_list_t *segkmem_gc_list;
1870Sstevel@tonic-gate 
1880Sstevel@tonic-gate /*
1890Sstevel@tonic-gate  * Allocations from the hat_memload arena add VM_MEMLOAD to their
1900Sstevel@tonic-gate  * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
1910Sstevel@tonic-gate  * to take steps to prevent infinite recursion.  HAT allocations also
1920Sstevel@tonic-gate  * must be non-relocatable to prevent recursive page faults.
1930Sstevel@tonic-gate  */
1940Sstevel@tonic-gate static void *
1950Sstevel@tonic-gate hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
1960Sstevel@tonic-gate {
1970Sstevel@tonic-gate 	flags |= (VM_MEMLOAD | VM_NORELOC);
1980Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, flags));
1990Sstevel@tonic-gate }
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate /*
2020Sstevel@tonic-gate  * Allocations from static_arena arena (or any other arena that uses
2030Sstevel@tonic-gate  * segkmem_alloc_permanent()) require non-relocatable (permanently
2040Sstevel@tonic-gate  * wired) memory pages, since these pages are referenced by physical
2050Sstevel@tonic-gate  * as well as virtual address.
2060Sstevel@tonic-gate  */
2070Sstevel@tonic-gate void *
2080Sstevel@tonic-gate segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
2090Sstevel@tonic-gate {
2100Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
2110Sstevel@tonic-gate }
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate /*
2140Sstevel@tonic-gate  * Initialize kernel heap boundaries.
2150Sstevel@tonic-gate  */
2160Sstevel@tonic-gate void
2170Sstevel@tonic-gate kernelheap_init(
2180Sstevel@tonic-gate 	void *heap_start,
2190Sstevel@tonic-gate 	void *heap_end,
2200Sstevel@tonic-gate 	char *first_avail,
2210Sstevel@tonic-gate 	void *core_start,
2220Sstevel@tonic-gate 	void *core_end)
2230Sstevel@tonic-gate {
2240Sstevel@tonic-gate 	uintptr_t textbase;
2250Sstevel@tonic-gate 	size_t core_size;
2260Sstevel@tonic-gate 	size_t heap_size;
2270Sstevel@tonic-gate 	vmem_t *heaptext_parent;
2280Sstevel@tonic-gate 	size_t	heap_lp_size = 0;
2290Sstevel@tonic-gate 
2300Sstevel@tonic-gate 	kernelheap = heap_start;
2310Sstevel@tonic-gate 	ekernelheap = heap_end;
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate #ifdef __sparc
2340Sstevel@tonic-gate 	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
2350Sstevel@tonic-gate 	heap_lp_base = ekernelheap - heap_lp_size;
2360Sstevel@tonic-gate 	heap_lp_end = heap_lp_base + heap_lp_size;
2370Sstevel@tonic-gate #endif	/* __sparc */
2380Sstevel@tonic-gate 
2390Sstevel@tonic-gate 	/*
2400Sstevel@tonic-gate 	 * If this platform has a 'core' heap area, then the space for
2410Sstevel@tonic-gate 	 * overflow module text should be carved out of the end of that
2420Sstevel@tonic-gate 	 * heap.  Otherwise, it gets carved out of the general purpose
2430Sstevel@tonic-gate 	 * heap.
2440Sstevel@tonic-gate 	 */
2450Sstevel@tonic-gate 	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
2460Sstevel@tonic-gate 	if (core_size > 0) {
2470Sstevel@tonic-gate 		ASSERT(core_size >= HEAPTEXT_SIZE);
2480Sstevel@tonic-gate 		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
2490Sstevel@tonic-gate 		core_size -= HEAPTEXT_SIZE;
2500Sstevel@tonic-gate 	}
2510Sstevel@tonic-gate #ifndef __sparc
2520Sstevel@tonic-gate 	else {
2530Sstevel@tonic-gate 		ekernelheap -= HEAPTEXT_SIZE;
2540Sstevel@tonic-gate 		textbase = (uintptr_t)ekernelheap;
2550Sstevel@tonic-gate 	}
2560Sstevel@tonic-gate #endif
2570Sstevel@tonic-gate 
2580Sstevel@tonic-gate 	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
2590Sstevel@tonic-gate 	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
2600Sstevel@tonic-gate 	    segkmem_alloc, segkmem_free);
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate 	if (core_size > 0) {
2630Sstevel@tonic-gate 		heap_core_arena = vmem_create("heap_core", core_start,
2640Sstevel@tonic-gate 		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
2650Sstevel@tonic-gate 		heap_core_base = core_start;
2660Sstevel@tonic-gate 	} else {
2670Sstevel@tonic-gate 		heap_core_arena = heap_arena;
2680Sstevel@tonic-gate 		heap_core_base = kernelheap;
2690Sstevel@tonic-gate 	}
2700Sstevel@tonic-gate 
2710Sstevel@tonic-gate 	/*
2720Sstevel@tonic-gate 	 * reserve space for the large page heap. If large pages for kernel
2730Sstevel@tonic-gate 	 * heap is enabled large page heap arean will be created later in the
2740Sstevel@tonic-gate 	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
2750Sstevel@tonic-gate 	 * range will be returned back to the heap_arena.
2760Sstevel@tonic-gate 	 */
2770Sstevel@tonic-gate 	if (heap_lp_size) {
2780Sstevel@tonic-gate 		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
2790Sstevel@tonic-gate 		    heap_lp_base, heap_lp_end,
2800Sstevel@tonic-gate 		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
2810Sstevel@tonic-gate 	}
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate 	/*
2840Sstevel@tonic-gate 	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
2850Sstevel@tonic-gate 	 */
2860Sstevel@tonic-gate 	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
2870Sstevel@tonic-gate 	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate #ifdef __sparc
2900Sstevel@tonic-gate 	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
2910Sstevel@tonic-gate 	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
2920Sstevel@tonic-gate 	    NULL, NULL, 0, VM_SLEEP);
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate 	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
2950Sstevel@tonic-gate 	heaptext_parent = NULL;
2960Sstevel@tonic-gate #else	/* __sparc */
2970Sstevel@tonic-gate 	heap32_arena = heap_core_arena;
2980Sstevel@tonic-gate 	heaptext_parent = heap_core_arena;
2990Sstevel@tonic-gate #endif	/* __sparc */
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 	heaptext_arena = vmem_create("heaptext", (void *)textbase,
3020Sstevel@tonic-gate 	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 	/*
3050Sstevel@tonic-gate 	 * Create a set of arenas for memory with static translations
3060Sstevel@tonic-gate 	 * (e.g. VA -> PA translations cannot change).  Since using
3070Sstevel@tonic-gate 	 * kernel pages by physical address implies it isn't safe to
3080Sstevel@tonic-gate 	 * walk across page boundaries, the static_arena quantum must
3090Sstevel@tonic-gate 	 * be PAGESIZE.  Any kmem caches that require static memory
3100Sstevel@tonic-gate 	 * should source from static_arena, while direct allocations
3110Sstevel@tonic-gate 	 * should only use static_alloc_arena.
3120Sstevel@tonic-gate 	 */
3130Sstevel@tonic-gate 	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
3140Sstevel@tonic-gate 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
3150Sstevel@tonic-gate 	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
3160Sstevel@tonic-gate 	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
3170Sstevel@tonic-gate 	    0, VM_SLEEP);
3180Sstevel@tonic-gate 
3190Sstevel@tonic-gate 	/*
3200Sstevel@tonic-gate 	 * Create an arena for translation data (ptes, hmes, or hblks).
3210Sstevel@tonic-gate 	 * We need an arena for this because hat_memload() is essential
3220Sstevel@tonic-gate 	 * to vmem_populate() (see comments in common/os/vmem.c).
3230Sstevel@tonic-gate 	 *
3240Sstevel@tonic-gate 	 * Note: any kmem cache that allocates from hat_memload_arena
3250Sstevel@tonic-gate 	 * must be created as a KMC_NOHASH cache (i.e. no external slab
3260Sstevel@tonic-gate 	 * and bufctl structures to allocate) so that slab creation doesn't
3270Sstevel@tonic-gate 	 * require anything more than a single vmem_alloc().
3280Sstevel@tonic-gate 	 */
3290Sstevel@tonic-gate 	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
3300Sstevel@tonic-gate 	    hat_memload_alloc, segkmem_free, heap_arena, 0,
3310Sstevel@tonic-gate 	    VM_SLEEP | VMC_POPULATOR);
3320Sstevel@tonic-gate }
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate /*
3350Sstevel@tonic-gate  * Grow kernel heap downward.
3360Sstevel@tonic-gate  */
3370Sstevel@tonic-gate void
3380Sstevel@tonic-gate kernelheap_extend(void *range_start, void *range_end)
3390Sstevel@tonic-gate {
3400Sstevel@tonic-gate 	size_t len = (uintptr_t)range_end - (uintptr_t)range_start;
3410Sstevel@tonic-gate 
3420Sstevel@tonic-gate 	ASSERT(range_start < range_end && range_end == kernelheap);
3430Sstevel@tonic-gate 
3440Sstevel@tonic-gate 	if (vmem_add(heap_arena, range_start, len, VM_NOSLEEP) == NULL) {
3450Sstevel@tonic-gate 		cmn_err(CE_WARN, "Could not grow kernel heap below 0x%p",
3460Sstevel@tonic-gate 		    (void *)kernelheap);
3470Sstevel@tonic-gate 	} else {
3480Sstevel@tonic-gate 		kernelheap = range_start;
3490Sstevel@tonic-gate 	}
3500Sstevel@tonic-gate }
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate void
3530Sstevel@tonic-gate boot_mapin(caddr_t addr, size_t size)
3540Sstevel@tonic-gate {
3550Sstevel@tonic-gate 	caddr_t	 eaddr;
3560Sstevel@tonic-gate 	page_t	*pp;
3570Sstevel@tonic-gate 	pfn_t	 pfnum;
3580Sstevel@tonic-gate 
3590Sstevel@tonic-gate 	if (page_resv(btop(size), KM_NOSLEEP) == 0)
3600Sstevel@tonic-gate 		panic("boot_mapin: page_resv failed");
3610Sstevel@tonic-gate 
3620Sstevel@tonic-gate 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3630Sstevel@tonic-gate 		pfnum = va_to_pfn(addr);
3640Sstevel@tonic-gate 		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
3650Sstevel@tonic-gate 			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate 		/*
3680Sstevel@tonic-gate 		 * must break up any large pages that may have constituent
3690Sstevel@tonic-gate 		 * pages being utilized for BOP_ALLOC()'s before calling
3700Sstevel@tonic-gate 		 * page_numtopp().The locking code (ie. page_reclaim())
3710Sstevel@tonic-gate 		 * can't handle them
3720Sstevel@tonic-gate 		 */
3730Sstevel@tonic-gate 		if (pp->p_szc != 0)
3740Sstevel@tonic-gate 			page_boot_demote(pp);
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate 		pp = page_numtopp(pfnum, SE_EXCL);
3770Sstevel@tonic-gate 		if (pp == NULL || PP_ISFREE(pp))
3780Sstevel@tonic-gate 			panic("boot_alloc: pp is NULL or free");
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 		/*
3810Sstevel@tonic-gate 		 * If the cage is on but doesn't yet contain this page,
3820Sstevel@tonic-gate 		 * mark it as non-relocatable.
3830Sstevel@tonic-gate 		 */
3840Sstevel@tonic-gate 		if (kcage_on && !PP_ISNORELOC(pp))
3850Sstevel@tonic-gate 			PP_SETNORELOC(pp);
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
3880Sstevel@tonic-gate 		pp->p_lckcnt = 1;
3890Sstevel@tonic-gate #if defined(__x86)
3900Sstevel@tonic-gate 		page_downgrade(pp);
3910Sstevel@tonic-gate #else
3920Sstevel@tonic-gate 		page_unlock(pp);
3930Sstevel@tonic-gate #endif
3940Sstevel@tonic-gate 	}
3950Sstevel@tonic-gate }
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate /*
3980Sstevel@tonic-gate  * Get pages from boot and hash them into the kernel's vp.
3990Sstevel@tonic-gate  * Used after page structs have been allocated, but before segkmem is ready.
4000Sstevel@tonic-gate  */
4010Sstevel@tonic-gate void *
4020Sstevel@tonic-gate boot_alloc(void *inaddr, size_t size, uint_t align)
4030Sstevel@tonic-gate {
4040Sstevel@tonic-gate 	caddr_t addr = inaddr;
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate 	if (bootops == NULL)
4070Sstevel@tonic-gate 		prom_panic("boot_alloc: attempt to allocate memory after "
4080Sstevel@tonic-gate 		    "BOP_GONE");
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	size = ptob(btopr(size));
4110Sstevel@tonic-gate 	if (BOP_ALLOC(bootops, addr, size, align) != addr)
4120Sstevel@tonic-gate 		panic("boot_alloc: BOP_ALLOC failed");
4130Sstevel@tonic-gate 	boot_mapin((caddr_t)addr, size);
4140Sstevel@tonic-gate 	return (addr);
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate static void
4180Sstevel@tonic-gate segkmem_badop()
4190Sstevel@tonic-gate {
4200Sstevel@tonic-gate 	panic("segkmem_badop");
4210Sstevel@tonic-gate }
4220Sstevel@tonic-gate 
4230Sstevel@tonic-gate #define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate /*ARGSUSED*/
4260Sstevel@tonic-gate static faultcode_t
4270Sstevel@tonic-gate segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
4280Sstevel@tonic-gate 	enum fault_type type, enum seg_rw rw)
4290Sstevel@tonic-gate {
4301338Selowe 	pgcnt_t npages;
4311338Selowe 	spgcnt_t pg;
4321338Selowe 	page_t *pp;
433*3290Sjohansen 	struct vnode *vp = seg->s_data;
4341338Selowe 
4350Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 	if (seg->s_as != &kas || size > seg->s_size ||
4380Sstevel@tonic-gate 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
4390Sstevel@tonic-gate 		panic("segkmem_fault: bad args");
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
4420Sstevel@tonic-gate 		/*
4430Sstevel@tonic-gate 		 * If it is one of segkp pages, call segkp_fault.
4440Sstevel@tonic-gate 		 */
4450Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
4460Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
4470Sstevel@tonic-gate 			return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
4480Sstevel@tonic-gate 	}
4490Sstevel@tonic-gate 
4501338Selowe 	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
4511338Selowe 		return (FC_NOSUPPORT);
4521338Selowe 
4531338Selowe 	npages = btopr(size);
4541338Selowe 
4550Sstevel@tonic-gate 	switch (type) {
4560Sstevel@tonic-gate 	case F_SOFTLOCK:	/* lock down already-loaded translations */
4571338Selowe 		for (pg = 0; pg < npages; pg++) {
458*3290Sjohansen 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
4591338Selowe 			    SE_SHARED);
4601338Selowe 			if (pp == NULL) {
4611338Selowe 				/*
4621338Selowe 				 * Hmm, no page. Does a kernel mapping
4631338Selowe 				 * exist for it?
4641338Selowe 				 */
4651338Selowe 				if (!hat_probe(kas.a_hat, addr)) {
4661338Selowe 					addr -= PAGESIZE;
4671338Selowe 					while (--pg >= 0) {
468*3290Sjohansen 						pp = page_find(vp,
4691338Selowe 						(u_offset_t)(uintptr_t)addr);
4701338Selowe 						if (pp)
4711338Selowe 							page_unlock(pp);
4721338Selowe 						addr -= PAGESIZE;
4731338Selowe 					}
4741338Selowe 					return (FC_NOMAP);
4751338Selowe 				}
4761338Selowe 			}
4771338Selowe 			addr += PAGESIZE;
4781338Selowe 		}
4791338Selowe 		if (rw == S_OTHER)
4800Sstevel@tonic-gate 			hat_reserve(seg->s_as, addr, size);
4811338Selowe 		return (0);
4820Sstevel@tonic-gate 	case F_SOFTUNLOCK:
4831338Selowe 		while (npages--) {
484*3290Sjohansen 			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
4851338Selowe 			if (pp)
4861338Selowe 				page_unlock(pp);
4871338Selowe 			addr += PAGESIZE;
4881338Selowe 		}
4891338Selowe 		return (0);
4900Sstevel@tonic-gate 	default:
4911338Selowe 		return (FC_NOSUPPORT);
4920Sstevel@tonic-gate 	}
4931338Selowe 	/*NOTREACHED*/
4940Sstevel@tonic-gate }
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate static int
4970Sstevel@tonic-gate segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
4980Sstevel@tonic-gate {
4990Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	if (seg->s_as != &kas || size > seg->s_size ||
5020Sstevel@tonic-gate 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
5030Sstevel@tonic-gate 		panic("segkmem_setprot: bad args");
5040Sstevel@tonic-gate 
5050Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 		/*
5080Sstevel@tonic-gate 		 * If it is one of segkp pages, call segkp.
5090Sstevel@tonic-gate 		 */
5100Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
5110Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
5120Sstevel@tonic-gate 			return (SEGOP_SETPROT(segkp, addr, size, prot));
5130Sstevel@tonic-gate 	}
5140Sstevel@tonic-gate 
5150Sstevel@tonic-gate 	if (prot == 0)
5160Sstevel@tonic-gate 		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
5170Sstevel@tonic-gate 	else
5180Sstevel@tonic-gate 		hat_chgprot(kas.a_hat, addr, size, prot);
5190Sstevel@tonic-gate 	return (0);
5200Sstevel@tonic-gate }
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate /*
5230Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
5240Sstevel@tonic-gate  * when segkp is under the heap.
5250Sstevel@tonic-gate  */
5260Sstevel@tonic-gate /* ARGSUSED */
5270Sstevel@tonic-gate static int
5280Sstevel@tonic-gate segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
5290Sstevel@tonic-gate {
5300Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5310Sstevel@tonic-gate 
5320Sstevel@tonic-gate 	if (seg->s_as != &kas)
5330Sstevel@tonic-gate 		segkmem_badop();
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
5360Sstevel@tonic-gate 
5370Sstevel@tonic-gate 		/*
5380Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
5390Sstevel@tonic-gate 		 */
5400Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
5410Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
5420Sstevel@tonic-gate 			return (SEGOP_CHECKPROT(segkp, addr, size, prot));
5430Sstevel@tonic-gate 	}
5440Sstevel@tonic-gate 	segkmem_badop();
5450Sstevel@tonic-gate 	return (0);
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate /*
5490Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
5500Sstevel@tonic-gate  * when segkp is under the heap.
5510Sstevel@tonic-gate  */
5520Sstevel@tonic-gate /* ARGSUSED */
5530Sstevel@tonic-gate static int
5540Sstevel@tonic-gate segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
5550Sstevel@tonic-gate {
5560Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate 	if (seg->s_as != &kas)
5590Sstevel@tonic-gate 		segkmem_badop();
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 		/*
5640Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
5650Sstevel@tonic-gate 		 */
5660Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
5670Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
5680Sstevel@tonic-gate 			return (SEGOP_KLUSTER(segkp, addr, delta));
5690Sstevel@tonic-gate 	}
5700Sstevel@tonic-gate 	segkmem_badop();
5710Sstevel@tonic-gate 	return (0);
5720Sstevel@tonic-gate }
5730Sstevel@tonic-gate 
5740Sstevel@tonic-gate static void
5750Sstevel@tonic-gate segkmem_xdump_range(void *arg, void *start, size_t size)
5760Sstevel@tonic-gate {
5770Sstevel@tonic-gate 	struct as *as = arg;
5780Sstevel@tonic-gate 	caddr_t addr = start;
5790Sstevel@tonic-gate 	caddr_t addr_end = addr + size;
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate 	while (addr < addr_end) {
5820Sstevel@tonic-gate 		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
5830Sstevel@tonic-gate 		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
5840Sstevel@tonic-gate 			dump_addpage(as, addr, pfn);
5850Sstevel@tonic-gate 		addr += PAGESIZE;
5860Sstevel@tonic-gate 		dump_timeleft = dump_timeout;
5870Sstevel@tonic-gate 	}
5880Sstevel@tonic-gate }
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate static void
5910Sstevel@tonic-gate segkmem_dump_range(void *arg, void *start, size_t size)
5920Sstevel@tonic-gate {
5930Sstevel@tonic-gate 	caddr_t addr = start;
5940Sstevel@tonic-gate 	caddr_t addr_end = addr + size;
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate 	/*
5970Sstevel@tonic-gate 	 * If we are about to start dumping the range of addresses we
5980Sstevel@tonic-gate 	 * carved out of the kernel heap for the large page heap walk
5990Sstevel@tonic-gate 	 * heap_lp_arena to find what segments are actually populated
6000Sstevel@tonic-gate 	 */
6010Sstevel@tonic-gate 	if (SEGKMEM_USE_LARGEPAGES &&
6020Sstevel@tonic-gate 	    addr == heap_lp_base && addr_end == heap_lp_end &&
6030Sstevel@tonic-gate 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
6040Sstevel@tonic-gate 		vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
6050Sstevel@tonic-gate 		    segkmem_xdump_range, arg);
6060Sstevel@tonic-gate 	} else {
6070Sstevel@tonic-gate 		segkmem_xdump_range(arg, start, size);
6080Sstevel@tonic-gate 	}
6090Sstevel@tonic-gate }
6100Sstevel@tonic-gate 
6110Sstevel@tonic-gate static void
6120Sstevel@tonic-gate segkmem_dump(struct seg *seg)
6130Sstevel@tonic-gate {
6140Sstevel@tonic-gate 	/*
6150Sstevel@tonic-gate 	 * The kernel's heap_arena (represented by kvseg) is a very large
6160Sstevel@tonic-gate 	 * VA space, most of which is typically unused.  To speed up dumping
6170Sstevel@tonic-gate 	 * we use vmem_walk() to quickly find the pieces of heap_arena that
6180Sstevel@tonic-gate 	 * are actually in use.  We do the same for heap32_arena and
6190Sstevel@tonic-gate 	 * heap_core.
6200Sstevel@tonic-gate 	 *
6210Sstevel@tonic-gate 	 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
6220Sstevel@tonic-gate 	 * may ultimately need to allocate memory.  Reentrant walks are
6230Sstevel@tonic-gate 	 * necessarily imperfect snapshots.  The kernel heap continues
6240Sstevel@tonic-gate 	 * to change during a live crash dump, for example.  For a normal
6250Sstevel@tonic-gate 	 * crash dump, however, we know that there won't be any other threads
6260Sstevel@tonic-gate 	 * messing with the heap.  Therefore, at worst, we may fail to dump
6270Sstevel@tonic-gate 	 * the pages that get allocated by the act of dumping; but we will
6280Sstevel@tonic-gate 	 * always dump every page that was allocated when the walk began.
6290Sstevel@tonic-gate 	 *
6300Sstevel@tonic-gate 	 * The other segkmem segments are dense (fully populated), so there's
6310Sstevel@tonic-gate 	 * no need to use this technique when dumping them.
6320Sstevel@tonic-gate 	 *
6330Sstevel@tonic-gate 	 * Note: when adding special dump handling for any new sparsely-
6340Sstevel@tonic-gate 	 * populated segments, be sure to add similar handling to the ::kgrep
6350Sstevel@tonic-gate 	 * code in mdb.
6360Sstevel@tonic-gate 	 */
6370Sstevel@tonic-gate 	if (seg == &kvseg) {
6380Sstevel@tonic-gate 		vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
6390Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
6400Sstevel@tonic-gate #ifndef __sparc
6410Sstevel@tonic-gate 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
6420Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
6430Sstevel@tonic-gate #endif
6440Sstevel@tonic-gate 	} else if (seg == &kvseg_core) {
6450Sstevel@tonic-gate 		vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
6460Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
6470Sstevel@tonic-gate 	} else if (seg == &kvseg32) {
6480Sstevel@tonic-gate 		vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
6490Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
6500Sstevel@tonic-gate 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
6510Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
652*3290Sjohansen 	} else if (seg == &kzioseg) {
653*3290Sjohansen 		/*
654*3290Sjohansen 		 * We don't want to dump pages attached to kzioseg since they
655*3290Sjohansen 		 * contain file data from ZFS.  If this page's segment is
656*3290Sjohansen 		 * kzioseg return instead of writing it to the dump device.
657*3290Sjohansen 		 */
658*3290Sjohansen 		return;
6590Sstevel@tonic-gate 	} else {
6600Sstevel@tonic-gate 		segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
6610Sstevel@tonic-gate 	}
6620Sstevel@tonic-gate }
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate /*
6650Sstevel@tonic-gate  * lock/unlock kmem pages over a given range [addr, addr+len).
6661338Selowe  * Returns a shadow list of pages in ppp. If there are holes
6671338Selowe  * in the range (e.g. some of the kernel mappings do not have
6681338Selowe  * underlying page_ts) returns ENOTSUP so that as_pagelock()
6691338Selowe  * will handle the range via as_fault(F_SOFTLOCK).
6700Sstevel@tonic-gate  */
6710Sstevel@tonic-gate /*ARGSUSED*/
6720Sstevel@tonic-gate static int
6730Sstevel@tonic-gate segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
6740Sstevel@tonic-gate 	page_t ***ppp, enum lock_type type, enum seg_rw rw)
6750Sstevel@tonic-gate {
6760Sstevel@tonic-gate 	page_t **pplist, *pp;
6770Sstevel@tonic-gate 	pgcnt_t npages;
6781338Selowe 	spgcnt_t pg;
6790Sstevel@tonic-gate 	size_t nb;
680*3290Sjohansen 	struct vnode *vp = seg->s_data;
6810Sstevel@tonic-gate 
6821338Selowe 	ASSERT(ppp != NULL);
6831338Selowe 
6840Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
6850Sstevel@tonic-gate 		/*
6860Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
6870Sstevel@tonic-gate 		 */
6880Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
6890Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
6900Sstevel@tonic-gate 			return (SEGOP_PAGELOCK(segkp, addr, len, ppp,
6910Sstevel@tonic-gate 						type, rw));
6920Sstevel@tonic-gate 	}
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate 	if (type == L_PAGERECLAIM)
6950Sstevel@tonic-gate 		return (ENOTSUP);
6960Sstevel@tonic-gate 
6970Sstevel@tonic-gate 	npages = btopr(len);
6980Sstevel@tonic-gate 	nb = sizeof (page_t *) * npages;
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	if (type == L_PAGEUNLOCK) {
7011338Selowe 		pplist = *ppp;
7021338Selowe 		ASSERT(pplist != NULL);
7031338Selowe 
7041338Selowe 		for (pg = 0; pg < npages; pg++) {
7051338Selowe 			pp = pplist[pg];
7061338Selowe 			page_unlock(pp);
7070Sstevel@tonic-gate 		}
7081338Selowe 		kmem_free(pplist, nb);
7090Sstevel@tonic-gate 		return (0);
7100Sstevel@tonic-gate 	}
7110Sstevel@tonic-gate 
7120Sstevel@tonic-gate 	ASSERT(type == L_PAGELOCK);
7130Sstevel@tonic-gate 
7141338Selowe 	pplist = kmem_alloc(nb, KM_NOSLEEP);
7151338Selowe 	if (pplist == NULL) {
7161338Selowe 		*ppp = NULL;
7171338Selowe 		return (ENOTSUP);	/* take the slow path */
7181338Selowe 	}
7190Sstevel@tonic-gate 
7201338Selowe 	for (pg = 0; pg < npages; pg++) {
721*3290Sjohansen 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
7221338Selowe 		if (pp == NULL) {
7231338Selowe 			while (--pg >= 0)
7241338Selowe 				page_unlock(pplist[pg]);
7251338Selowe 			kmem_free(pplist, nb);
7261338Selowe 			*ppp = NULL;
7271338Selowe 			return (ENOTSUP);
7281338Selowe 		}
7291338Selowe 		pplist[pg] = pp;
7300Sstevel@tonic-gate 		addr += PAGESIZE;
7310Sstevel@tonic-gate 	}
7321338Selowe 
7331338Selowe 	*ppp = pplist;
7340Sstevel@tonic-gate 	return (0);
7350Sstevel@tonic-gate }
7360Sstevel@tonic-gate 
7370Sstevel@tonic-gate /*
7380Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
7390Sstevel@tonic-gate  * when segkp is under the heap.
7400Sstevel@tonic-gate  */
7410Sstevel@tonic-gate /* ARGSUSED */
7420Sstevel@tonic-gate static int
7430Sstevel@tonic-gate segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
7440Sstevel@tonic-gate {
7450Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate 	if (seg->s_as != &kas)
7480Sstevel@tonic-gate 		segkmem_badop();
7490Sstevel@tonic-gate 
7500Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
7510Sstevel@tonic-gate 
7520Sstevel@tonic-gate 		/*
7530Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
7540Sstevel@tonic-gate 		 */
7550Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
7560Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
7570Sstevel@tonic-gate 			return (SEGOP_GETMEMID(segkp, addr, memidp));
7580Sstevel@tonic-gate 	}
7590Sstevel@tonic-gate 	segkmem_badop();
7600Sstevel@tonic-gate 	return (0);
7610Sstevel@tonic-gate }
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate /*ARGSUSED*/
7640Sstevel@tonic-gate static lgrp_mem_policy_info_t *
7650Sstevel@tonic-gate segkmem_getpolicy(struct seg *seg, caddr_t addr)
7660Sstevel@tonic-gate {
7670Sstevel@tonic-gate 	return (NULL);
7680Sstevel@tonic-gate }
7690Sstevel@tonic-gate 
770670Selowe /*ARGSUSED*/
771670Selowe static int
772670Selowe segkmem_capable(struct seg *seg, segcapability_t capability)
773670Selowe {
774670Selowe 	if (capability == S_CAPABILITY_NOMINFLT)
775670Selowe 		return (1);
776670Selowe 	return (0);
777670Selowe }
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate static struct seg_ops segkmem_ops = {
7800Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* dup */
7810Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* unmap */
7820Sstevel@tonic-gate 	SEGKMEM_BADOP(void),		/* free */
7830Sstevel@tonic-gate 	segkmem_fault,
7840Sstevel@tonic-gate 	SEGKMEM_BADOP(faultcode_t),	/* faulta */
7850Sstevel@tonic-gate 	segkmem_setprot,
7860Sstevel@tonic-gate 	segkmem_checkprot,
7870Sstevel@tonic-gate 	segkmem_kluster,
7880Sstevel@tonic-gate 	SEGKMEM_BADOP(size_t),		/* swapout */
7890Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* sync */
7900Sstevel@tonic-gate 	SEGKMEM_BADOP(size_t),		/* incore */
7910Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* lockop */
7920Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* getprot */
7930Sstevel@tonic-gate 	SEGKMEM_BADOP(u_offset_t),	/* getoffset */
7940Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* gettype */
7950Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* getvp */
7960Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* advise */
7970Sstevel@tonic-gate 	segkmem_dump,
7980Sstevel@tonic-gate 	segkmem_pagelock,
7990Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* setpgsz */
8000Sstevel@tonic-gate 	segkmem_getmemid,
8010Sstevel@tonic-gate 	segkmem_getpolicy,		/* getpolicy */
802670Selowe 	segkmem_capable,		/* capable */
8030Sstevel@tonic-gate };
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate int
806*3290Sjohansen segkmem_zio_create(struct seg *seg)
807*3290Sjohansen {
808*3290Sjohansen 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
809*3290Sjohansen 	seg->s_ops = &segkmem_ops;
810*3290Sjohansen 	seg->s_data = &zvp;
811*3290Sjohansen 	kas.a_size += seg->s_size;
812*3290Sjohansen 	return (0);
813*3290Sjohansen }
814*3290Sjohansen 
815*3290Sjohansen int
8160Sstevel@tonic-gate segkmem_create(struct seg *seg)
8170Sstevel@tonic-gate {
8180Sstevel@tonic-gate 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
8190Sstevel@tonic-gate 	seg->s_ops = &segkmem_ops;
820*3290Sjohansen 	seg->s_data = &kvp;
8210Sstevel@tonic-gate 	kas.a_size += seg->s_size;
8220Sstevel@tonic-gate 	return (0);
8230Sstevel@tonic-gate }
8240Sstevel@tonic-gate 
8250Sstevel@tonic-gate /*ARGSUSED*/
8260Sstevel@tonic-gate page_t *
8270Sstevel@tonic-gate segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
8280Sstevel@tonic-gate {
8290Sstevel@tonic-gate 	struct seg kseg;
8300Sstevel@tonic-gate 	int pgflags;
831*3290Sjohansen 	struct vnode *vp = arg;
832*3290Sjohansen 
833*3290Sjohansen 	if (vp == NULL)
834*3290Sjohansen 		vp = &kvp;
8350Sstevel@tonic-gate 
8360Sstevel@tonic-gate 	kseg.s_as = &kas;
8370Sstevel@tonic-gate 	pgflags = PG_EXCL;
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
8400Sstevel@tonic-gate 		pgflags |= PG_NORELOC;
8410Sstevel@tonic-gate 	if ((vmflag & VM_NOSLEEP) == 0)
8420Sstevel@tonic-gate 		pgflags |= PG_WAIT;
8430Sstevel@tonic-gate 	if (vmflag & VM_PANIC)
8440Sstevel@tonic-gate 		pgflags |= PG_PANIC;
8450Sstevel@tonic-gate 	if (vmflag & VM_PUSHPAGE)
8460Sstevel@tonic-gate 		pgflags |= PG_PUSHPAGE;
8470Sstevel@tonic-gate 
848*3290Sjohansen 	return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
8490Sstevel@tonic-gate 	    pgflags, &kseg, addr));
8500Sstevel@tonic-gate }
8510Sstevel@tonic-gate 
8520Sstevel@tonic-gate /*
8530Sstevel@tonic-gate  * Allocate pages to back the virtual address range [addr, addr + size).
8540Sstevel@tonic-gate  * If addr is NULL, allocate the virtual address space as well.
8550Sstevel@tonic-gate  */
8560Sstevel@tonic-gate void *
8570Sstevel@tonic-gate segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
8580Sstevel@tonic-gate 	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
8590Sstevel@tonic-gate {
8600Sstevel@tonic-gate 	page_t *ppl;
8610Sstevel@tonic-gate 	caddr_t addr = inaddr;
8620Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
8630Sstevel@tonic-gate 	int allocflag;
8640Sstevel@tonic-gate 
8650Sstevel@tonic-gate 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
8660Sstevel@tonic-gate 		return (NULL);
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
8710Sstevel@tonic-gate 		if (inaddr == NULL)
8720Sstevel@tonic-gate 			vmem_free(vmp, addr, size);
8730Sstevel@tonic-gate 		return (NULL);
8740Sstevel@tonic-gate 	}
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	ppl = page_create_func(addr, size, vmflag, pcarg);
8770Sstevel@tonic-gate 	if (ppl == NULL) {
8780Sstevel@tonic-gate 		if (inaddr == NULL)
8790Sstevel@tonic-gate 			vmem_free(vmp, addr, size);
8800Sstevel@tonic-gate 		page_unresv(npages);
8810Sstevel@tonic-gate 		return (NULL);
8820Sstevel@tonic-gate 	}
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate 	/*
8850Sstevel@tonic-gate 	 * Under certain conditions, we need to let the HAT layer know
8860Sstevel@tonic-gate 	 * that it cannot safely allocate memory.  Allocations from
8870Sstevel@tonic-gate 	 * the hat_memload vmem arena always need this, to prevent
8880Sstevel@tonic-gate 	 * infinite recursion.
8890Sstevel@tonic-gate 	 *
8900Sstevel@tonic-gate 	 * In addition, the x86 hat cannot safely do memory
8910Sstevel@tonic-gate 	 * allocations while in vmem_populate(), because there
8920Sstevel@tonic-gate 	 * is no simple bound on its usage.
8930Sstevel@tonic-gate 	 */
8940Sstevel@tonic-gate 	if (vmflag & VM_MEMLOAD)
8950Sstevel@tonic-gate 		allocflag = HAT_NO_KALLOC;
8960Sstevel@tonic-gate #if defined(__x86)
8970Sstevel@tonic-gate 	else if (vmem_is_populator())
8980Sstevel@tonic-gate 		allocflag = HAT_NO_KALLOC;
8990Sstevel@tonic-gate #endif
9000Sstevel@tonic-gate 	else
9010Sstevel@tonic-gate 		allocflag = 0;
9020Sstevel@tonic-gate 
9030Sstevel@tonic-gate 	while (ppl != NULL) {
9040Sstevel@tonic-gate 		page_t *pp = ppl;
9050Sstevel@tonic-gate 		page_sub(&ppl, pp);
9060Sstevel@tonic-gate 		ASSERT(page_iolock_assert(pp));
9070Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
9080Sstevel@tonic-gate 		page_io_unlock(pp);
9090Sstevel@tonic-gate 		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
9100Sstevel@tonic-gate 		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
9110Sstevel@tonic-gate 		    HAT_LOAD_LOCK | allocflag);
9120Sstevel@tonic-gate 		pp->p_lckcnt = 1;
9130Sstevel@tonic-gate #if defined(__x86)
9140Sstevel@tonic-gate 		page_downgrade(pp);
9150Sstevel@tonic-gate #else
9160Sstevel@tonic-gate 		if (vmflag & SEGKMEM_SHARELOCKED)
9170Sstevel@tonic-gate 			page_downgrade(pp);
9180Sstevel@tonic-gate 		else
9190Sstevel@tonic-gate 			page_unlock(pp);
9200Sstevel@tonic-gate #endif
9210Sstevel@tonic-gate 	}
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 	return (addr);
9240Sstevel@tonic-gate }
9250Sstevel@tonic-gate 
926*3290Sjohansen static void *
927*3290Sjohansen segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
9280Sstevel@tonic-gate {
9290Sstevel@tonic-gate 	void *addr;
9300Sstevel@tonic-gate 	segkmem_gc_list_t *gcp, **prev_gcpp;
9310Sstevel@tonic-gate 
932*3290Sjohansen 	ASSERT(vp != NULL);
933*3290Sjohansen 
9340Sstevel@tonic-gate 	if (kvseg.s_base == NULL) {
9350Sstevel@tonic-gate #ifndef __sparc
9360Sstevel@tonic-gate 		if (bootops->bsys_alloc == NULL)
9370Sstevel@tonic-gate 			halt("Memory allocation between bop_alloc() and "
9380Sstevel@tonic-gate 			    "kmem_alloc().\n");
9390Sstevel@tonic-gate #endif
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate 		/*
9420Sstevel@tonic-gate 		 * There's not a lot of memory to go around during boot,
9430Sstevel@tonic-gate 		 * so recycle it if we can.
9440Sstevel@tonic-gate 		 */
9450Sstevel@tonic-gate 		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
9460Sstevel@tonic-gate 		    prev_gcpp = &gcp->gc_next) {
9470Sstevel@tonic-gate 			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
9480Sstevel@tonic-gate 				*prev_gcpp = gcp->gc_next;
9490Sstevel@tonic-gate 				return (gcp);
9500Sstevel@tonic-gate 			}
9510Sstevel@tonic-gate 		}
9520Sstevel@tonic-gate 
9530Sstevel@tonic-gate 		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
9540Sstevel@tonic-gate 		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
9550Sstevel@tonic-gate 			panic("segkmem_alloc: boot_alloc failed");
9560Sstevel@tonic-gate 		return (addr);
9570Sstevel@tonic-gate 	}
9580Sstevel@tonic-gate 	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
959*3290Sjohansen 	    segkmem_page_create, vp));
960*3290Sjohansen }
961*3290Sjohansen 
962*3290Sjohansen void *
963*3290Sjohansen segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
964*3290Sjohansen {
965*3290Sjohansen 	return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
966*3290Sjohansen }
967*3290Sjohansen 
968*3290Sjohansen void *
969*3290Sjohansen segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
970*3290Sjohansen {
971*3290Sjohansen 	return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
9720Sstevel@tonic-gate }
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate /*
9750Sstevel@tonic-gate  * Any changes to this routine must also be carried over to
9760Sstevel@tonic-gate  * devmap_free_pages() in the seg_dev driver. This is because
9770Sstevel@tonic-gate  * we currently don't have a special kernel segment for non-paged
9780Sstevel@tonic-gate  * kernel memory that is exported by drivers to user space.
9790Sstevel@tonic-gate  */
980*3290Sjohansen static void
981*3290Sjohansen segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp)
9820Sstevel@tonic-gate {
9830Sstevel@tonic-gate 	page_t *pp;
9840Sstevel@tonic-gate 	caddr_t addr = inaddr;
9850Sstevel@tonic-gate 	caddr_t eaddr;
9860Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
9870Sstevel@tonic-gate 
9880Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
989*3290Sjohansen 	ASSERT(vp != NULL);
9900Sstevel@tonic-gate 
9910Sstevel@tonic-gate 	if (kvseg.s_base == NULL) {
9920Sstevel@tonic-gate 		segkmem_gc_list_t *gc = inaddr;
9930Sstevel@tonic-gate 		gc->gc_arena = vmp;
9940Sstevel@tonic-gate 		gc->gc_size = size;
9950Sstevel@tonic-gate 		gc->gc_next = segkmem_gc_list;
9960Sstevel@tonic-gate 		segkmem_gc_list = gc;
9970Sstevel@tonic-gate 		return;
9980Sstevel@tonic-gate 	}
9990Sstevel@tonic-gate 
10000Sstevel@tonic-gate 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10010Sstevel@tonic-gate 
10020Sstevel@tonic-gate 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
10030Sstevel@tonic-gate #if defined(__x86)
1004*3290Sjohansen 		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
10050Sstevel@tonic-gate 		if (pp == NULL)
10060Sstevel@tonic-gate 			panic("segkmem_free: page not found");
10070Sstevel@tonic-gate 		if (!page_tryupgrade(pp)) {
10080Sstevel@tonic-gate 			/*
10090Sstevel@tonic-gate 			 * Some other thread has a sharelock. Wait for
10100Sstevel@tonic-gate 			 * it to drop the lock so we can free this page.
10110Sstevel@tonic-gate 			 */
10120Sstevel@tonic-gate 			page_unlock(pp);
1013*3290Sjohansen 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
10140Sstevel@tonic-gate 			    SE_EXCL);
10150Sstevel@tonic-gate 		}
10160Sstevel@tonic-gate #else
1017*3290Sjohansen 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
10180Sstevel@tonic-gate #endif
10190Sstevel@tonic-gate 		if (pp == NULL)
10200Sstevel@tonic-gate 			panic("segkmem_free: page not found");
10210Sstevel@tonic-gate 		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
10220Sstevel@tonic-gate 		pp->p_lckcnt = 0;
10230Sstevel@tonic-gate 		page_destroy(pp, 0);
10240Sstevel@tonic-gate 	}
10250Sstevel@tonic-gate 	page_unresv(npages);
10260Sstevel@tonic-gate 
10270Sstevel@tonic-gate 	if (vmp != NULL)
10280Sstevel@tonic-gate 		vmem_free(vmp, inaddr, size);
1029*3290Sjohansen 
1030*3290Sjohansen }
1031*3290Sjohansen 
1032*3290Sjohansen void
1033*3290Sjohansen segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1034*3290Sjohansen {
1035*3290Sjohansen 	segkmem_free_vn(vmp, inaddr, size, &kvp);
1036*3290Sjohansen }
1037*3290Sjohansen 
1038*3290Sjohansen void
1039*3290Sjohansen segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1040*3290Sjohansen {
1041*3290Sjohansen 	segkmem_free_vn(vmp, inaddr, size, &zvp);
10420Sstevel@tonic-gate }
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate void
10450Sstevel@tonic-gate segkmem_gc(void)
10460Sstevel@tonic-gate {
10470Sstevel@tonic-gate 	ASSERT(kvseg.s_base != NULL);
10480Sstevel@tonic-gate 	while (segkmem_gc_list != NULL) {
10490Sstevel@tonic-gate 		segkmem_gc_list_t *gc = segkmem_gc_list;
10500Sstevel@tonic-gate 		segkmem_gc_list = gc->gc_next;
10510Sstevel@tonic-gate 		segkmem_free(gc->gc_arena, gc, gc->gc_size);
10520Sstevel@tonic-gate 	}
10530Sstevel@tonic-gate }
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate /*
10560Sstevel@tonic-gate  * Legacy entry points from here to end of file.
10570Sstevel@tonic-gate  */
10580Sstevel@tonic-gate void
10590Sstevel@tonic-gate segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
10600Sstevel@tonic-gate     pfn_t pfn, uint_t flags)
10610Sstevel@tonic-gate {
10620Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10630Sstevel@tonic-gate 	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
10640Sstevel@tonic-gate 	    flags | HAT_LOAD_LOCK);
10650Sstevel@tonic-gate }
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate void
10680Sstevel@tonic-gate segkmem_mapout(struct seg *seg, void *addr, size_t size)
10690Sstevel@tonic-gate {
10700Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10710Sstevel@tonic-gate }
10720Sstevel@tonic-gate 
10730Sstevel@tonic-gate void *
10740Sstevel@tonic-gate kmem_getpages(pgcnt_t npages, int kmflag)
10750Sstevel@tonic-gate {
10760Sstevel@tonic-gate 	return (kmem_alloc(ptob(npages), kmflag));
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate 
10790Sstevel@tonic-gate void
10800Sstevel@tonic-gate kmem_freepages(void *addr, pgcnt_t npages)
10810Sstevel@tonic-gate {
10820Sstevel@tonic-gate 	kmem_free(addr, ptob(npages));
10830Sstevel@tonic-gate }
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate /*
10860Sstevel@tonic-gate  * segkmem_page_create_large() allocates a large page to be used for the kmem
10870Sstevel@tonic-gate  * caches. If kpr is enabled we ask for a relocatable page unless requested
10880Sstevel@tonic-gate  * otherwise. If kpr is disabled we have to ask for a non-reloc page
10890Sstevel@tonic-gate  */
10900Sstevel@tonic-gate static page_t *
10910Sstevel@tonic-gate segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
10920Sstevel@tonic-gate {
10930Sstevel@tonic-gate 	int pgflags;
10940Sstevel@tonic-gate 
10950Sstevel@tonic-gate 	pgflags = PG_EXCL;
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
10980Sstevel@tonic-gate 		pgflags |= PG_NORELOC;
10990Sstevel@tonic-gate 	if (!(vmflag & VM_NOSLEEP))
11000Sstevel@tonic-gate 		pgflags |= PG_WAIT;
11010Sstevel@tonic-gate 	if (vmflag & VM_PUSHPAGE)
11020Sstevel@tonic-gate 		pgflags |= PG_PUSHPAGE;
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate 	return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
11050Sstevel@tonic-gate 	    pgflags, &kvseg, addr, arg));
11060Sstevel@tonic-gate }
11070Sstevel@tonic-gate 
11080Sstevel@tonic-gate /*
11090Sstevel@tonic-gate  * Allocate a large page to back the virtual address range
11100Sstevel@tonic-gate  * [addr, addr + size).  If addr is NULL, allocate the virtual address
11110Sstevel@tonic-gate  * space as well.
11120Sstevel@tonic-gate  */
11130Sstevel@tonic-gate static void *
11140Sstevel@tonic-gate segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
11150Sstevel@tonic-gate     uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
11160Sstevel@tonic-gate     void *pcarg)
11170Sstevel@tonic-gate {
11180Sstevel@tonic-gate 	caddr_t addr = inaddr, pa;
11190Sstevel@tonic-gate 	size_t  lpsize = segkmem_lpsize;
11200Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
11210Sstevel@tonic-gate 	pgcnt_t nbpages = btop(lpsize);
11220Sstevel@tonic-gate 	pgcnt_t nlpages = size >> segkmem_lpshift;
11230Sstevel@tonic-gate 	size_t  ppasize = nbpages * sizeof (page_t *);
11240Sstevel@tonic-gate 	page_t *pp, *rootpp, **ppa, *pplist = NULL;
11250Sstevel@tonic-gate 	int i;
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
11280Sstevel@tonic-gate 		return (NULL);
11290Sstevel@tonic-gate 	}
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 	/*
11320Sstevel@tonic-gate 	 * allocate an array we need for hat_memload_array.
11330Sstevel@tonic-gate 	 * we use a separate arena to avoid recursion.
11340Sstevel@tonic-gate 	 * we will not need this array when hat_memload_array learns pp++
11350Sstevel@tonic-gate 	 */
11360Sstevel@tonic-gate 	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
11370Sstevel@tonic-gate 		goto fail_array_alloc;
11380Sstevel@tonic-gate 	}
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
11410Sstevel@tonic-gate 		goto fail_vmem_alloc;
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate 	/* create all the pages */
11460Sstevel@tonic-gate 	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
11470Sstevel@tonic-gate 		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
11480Sstevel@tonic-gate 			goto fail_page_create;
11490Sstevel@tonic-gate 		page_list_concat(&pplist, &pp);
11500Sstevel@tonic-gate 	}
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 	/* at this point we have all the resource to complete the request */
11530Sstevel@tonic-gate 	while ((rootpp = pplist) != NULL) {
11540Sstevel@tonic-gate 		for (i = 0; i < nbpages; i++) {
11550Sstevel@tonic-gate 			ASSERT(pplist != NULL);
11560Sstevel@tonic-gate 			pp = pplist;
11570Sstevel@tonic-gate 			page_sub(&pplist, pp);
11580Sstevel@tonic-gate 			ASSERT(page_iolock_assert(pp));
11590Sstevel@tonic-gate 			page_io_unlock(pp);
11600Sstevel@tonic-gate 			ppa[i] = pp;
11610Sstevel@tonic-gate 		}
11620Sstevel@tonic-gate 		/*
11630Sstevel@tonic-gate 		 * Load the locked entry. It's OK to preload the entry into the
11640Sstevel@tonic-gate 		 * TSB since we now support large mappings in the kernel TSB.
11650Sstevel@tonic-gate 		 */
11660Sstevel@tonic-gate 		hat_memload_array(kas.a_hat,
11670Sstevel@tonic-gate 		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
11680Sstevel@tonic-gate 		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
11690Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 		for (--i; i >= 0; --i) {
11720Sstevel@tonic-gate 			ppa[i]->p_lckcnt = 1;
11730Sstevel@tonic-gate 			page_unlock(ppa[i]);
11740Sstevel@tonic-gate 		}
11750Sstevel@tonic-gate 	}
11760Sstevel@tonic-gate 
11770Sstevel@tonic-gate 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
11780Sstevel@tonic-gate 	return (addr);
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate fail_page_create:
11810Sstevel@tonic-gate 	while ((rootpp = pplist) != NULL) {
11820Sstevel@tonic-gate 		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
11830Sstevel@tonic-gate 			ASSERT(pp != NULL);
11840Sstevel@tonic-gate 			page_sub(&pplist, pp);
11850Sstevel@tonic-gate 			ASSERT(page_iolock_assert(pp));
11860Sstevel@tonic-gate 			page_io_unlock(pp);
11870Sstevel@tonic-gate 		}
11880Sstevel@tonic-gate 		page_destroy_pages(rootpp);
11890Sstevel@tonic-gate 	}
11900Sstevel@tonic-gate 
11910Sstevel@tonic-gate 	if (inaddr == NULL)
11920Sstevel@tonic-gate 		vmem_free(vmp, addr, size);
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate fail_vmem_alloc:
11950Sstevel@tonic-gate 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
11960Sstevel@tonic-gate 
11970Sstevel@tonic-gate fail_array_alloc:
11980Sstevel@tonic-gate 	page_unresv(npages);
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate 	return (NULL);
12010Sstevel@tonic-gate }
12020Sstevel@tonic-gate 
12030Sstevel@tonic-gate static void
12040Sstevel@tonic-gate segkmem_free_one_lp(caddr_t addr, size_t size)
12050Sstevel@tonic-gate {
12060Sstevel@tonic-gate 	page_t		*pp, *rootpp = NULL;
12070Sstevel@tonic-gate 	pgcnt_t 	pgs_left = btopr(size);
12080Sstevel@tonic-gate 
12090Sstevel@tonic-gate 	ASSERT(size == segkmem_lpsize);
12100Sstevel@tonic-gate 
12110Sstevel@tonic-gate 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
12140Sstevel@tonic-gate 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
12150Sstevel@tonic-gate 		if (pp == NULL)
12160Sstevel@tonic-gate 			panic("segkmem_free_one_lp: page not found");
12170Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
12180Sstevel@tonic-gate 		pp->p_lckcnt = 0;
12190Sstevel@tonic-gate 		if (rootpp == NULL)
12200Sstevel@tonic-gate 			rootpp = pp;
12210Sstevel@tonic-gate 	}
12220Sstevel@tonic-gate 	ASSERT(rootpp != NULL);
12230Sstevel@tonic-gate 	page_destroy_pages(rootpp);
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	/* page_unresv() is done by the caller */
12260Sstevel@tonic-gate }
12270Sstevel@tonic-gate 
12280Sstevel@tonic-gate /*
12290Sstevel@tonic-gate  * This function is called to import new spans into the vmem arenas like
12300Sstevel@tonic-gate  * kmem_default_arena and kmem_oversize_arena. It first tries to import
12310Sstevel@tonic-gate  * spans from large page arena - kmem_lp_arena. In order to do this it might
12320Sstevel@tonic-gate  * have to "upgrade the requested size" to kmem_lp_arena quantum. If
12330Sstevel@tonic-gate  * it was not able to satisfy the upgraded request it then calls regular
12340Sstevel@tonic-gate  * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
12350Sstevel@tonic-gate  */
12360Sstevel@tonic-gate void *
12370Sstevel@tonic-gate segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, int vmflag)
12380Sstevel@tonic-gate {
12390Sstevel@tonic-gate 	size_t size;
12400Sstevel@tonic-gate 	kthread_t *t = curthread;
12410Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
12420Sstevel@tonic-gate 
12430Sstevel@tonic-gate 	ASSERT(sizep != NULL);
12440Sstevel@tonic-gate 
12450Sstevel@tonic-gate 	size = *sizep;
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate 	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
12480Sstevel@tonic-gate 	    !(vmflag & SEGKMEM_SHARELOCKED)) {
12490Sstevel@tonic-gate 
12500Sstevel@tonic-gate 		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
12510Sstevel@tonic-gate 		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
12520Sstevel@tonic-gate 		void  *addr = NULL;
12530Sstevel@tonic-gate 		ulong_t *lpthrtp = &lpcb->lp_throttle;
12540Sstevel@tonic-gate 		ulong_t lpthrt = *lpthrtp;
12550Sstevel@tonic-gate 		int	dowakeup = 0;
12560Sstevel@tonic-gate 		int	doalloc = 1;
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate 		ASSERT(kmem_lp_arena != NULL);
12590Sstevel@tonic-gate 		ASSERT(asize >= size);
12600Sstevel@tonic-gate 
12610Sstevel@tonic-gate 		if (lpthrt != 0) {
12620Sstevel@tonic-gate 			/* try to update the throttle value */
12630Sstevel@tonic-gate 			lpthrt = atomic_add_long_nv(lpthrtp, 1);
12640Sstevel@tonic-gate 			if (lpthrt >= segkmem_lpthrottle_max) {
12650Sstevel@tonic-gate 				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
12660Sstevel@tonic-gate 				    segkmem_lpthrottle_max / 4);
12670Sstevel@tonic-gate 			}
12680Sstevel@tonic-gate 
12690Sstevel@tonic-gate 			/*
12700Sstevel@tonic-gate 			 * when we get above throttle start do an exponential
12710Sstevel@tonic-gate 			 * backoff at trying large pages and reaping
12720Sstevel@tonic-gate 			 */
12730Sstevel@tonic-gate 			if (lpthrt > segkmem_lpthrottle_start &&
12740Sstevel@tonic-gate 			    (lpthrt & (lpthrt - 1))) {
1275215Seg155566 				lpcb->allocs_throttled++;
12760Sstevel@tonic-gate 				lpthrt--;
12770Sstevel@tonic-gate 				if ((lpthrt & (lpthrt - 1)) == 0)
12780Sstevel@tonic-gate 					kmem_reap();
12790Sstevel@tonic-gate 				return (segkmem_alloc(vmp, size, vmflag));
12800Sstevel@tonic-gate 			}
12810Sstevel@tonic-gate 		}
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate 		if (!(vmflag & VM_NOSLEEP) &&
12840Sstevel@tonic-gate 		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
12850Sstevel@tonic-gate 		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
12860Sstevel@tonic-gate 		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
12870Sstevel@tonic-gate 
12880Sstevel@tonic-gate 			/*
12890Sstevel@tonic-gate 			 * we are low on free memory in kmem_lp_arena
12900Sstevel@tonic-gate 			 * we let only one guy to allocate heap_lp
12910Sstevel@tonic-gate 			 * quantum size chunk that everybody is going to
12920Sstevel@tonic-gate 			 * share
12930Sstevel@tonic-gate 			 */
12940Sstevel@tonic-gate 			mutex_enter(&lpcb->lp_lock);
12950Sstevel@tonic-gate 
12960Sstevel@tonic-gate 			if (lpcb->lp_wait) {
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate 				/* we are not the first one - wait */
12990Sstevel@tonic-gate 				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
13000Sstevel@tonic-gate 				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
13010Sstevel@tonic-gate 				    kmemlp_qnt)  {
13020Sstevel@tonic-gate 					doalloc = 0;
13030Sstevel@tonic-gate 				}
13040Sstevel@tonic-gate 			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
13050Sstevel@tonic-gate 			    kmemlp_qnt) {
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate 				/*
13080Sstevel@tonic-gate 				 * we are the first one, make sure we import
13090Sstevel@tonic-gate 				 * a large page
13100Sstevel@tonic-gate 				 */
13110Sstevel@tonic-gate 				if (asize == kmemlp_qnt)
13120Sstevel@tonic-gate 					asize += kmemlp_qnt;
13130Sstevel@tonic-gate 				dowakeup = 1;
13140Sstevel@tonic-gate 				lpcb->lp_wait = 1;
13150Sstevel@tonic-gate 			}
13160Sstevel@tonic-gate 
13170Sstevel@tonic-gate 			mutex_exit(&lpcb->lp_lock);
13180Sstevel@tonic-gate 		}
13190Sstevel@tonic-gate 
13200Sstevel@tonic-gate 		/*
13210Sstevel@tonic-gate 		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
13220Sstevel@tonic-gate 		 * large pages are not available. In that case this allocation
13230Sstevel@tonic-gate 		 * attempt will fail and we will retry allocation with small
13240Sstevel@tonic-gate 		 * pages. We also do not want to panic if this allocation fails
13250Sstevel@tonic-gate 		 * because we are going to retry.
13260Sstevel@tonic-gate 		 */
13270Sstevel@tonic-gate 		if (doalloc) {
13280Sstevel@tonic-gate 			addr = vmem_alloc(kmem_lp_arena, asize,
13290Sstevel@tonic-gate 			    (vmflag | VM_ABORT) & ~VM_PANIC);
13300Sstevel@tonic-gate 
13310Sstevel@tonic-gate 			if (dowakeup) {
13320Sstevel@tonic-gate 				mutex_enter(&lpcb->lp_lock);
13330Sstevel@tonic-gate 				ASSERT(lpcb->lp_wait != 0);
13340Sstevel@tonic-gate 				lpcb->lp_wait = 0;
13350Sstevel@tonic-gate 				cv_broadcast(&lpcb->lp_cv);
13360Sstevel@tonic-gate 				mutex_exit(&lpcb->lp_lock);
13370Sstevel@tonic-gate 			}
13380Sstevel@tonic-gate 		}
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate 		if (addr != NULL) {
13410Sstevel@tonic-gate 			*sizep = asize;
13420Sstevel@tonic-gate 			*lpthrtp = 0;
13430Sstevel@tonic-gate 			return (addr);
13440Sstevel@tonic-gate 		}
13450Sstevel@tonic-gate 
13460Sstevel@tonic-gate 		if (vmflag & VM_NOSLEEP)
1347215Seg155566 			lpcb->nosleep_allocs_failed++;
13480Sstevel@tonic-gate 		else
1349215Seg155566 			lpcb->sleep_allocs_failed++;
1350215Seg155566 		lpcb->alloc_bytes_failed += size;
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate 		/* if large page throttling is not started yet do it */
13530Sstevel@tonic-gate 		if (segkmem_use_lpthrottle && lpthrt == 0) {
13540Sstevel@tonic-gate 			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
13550Sstevel@tonic-gate 		}
13560Sstevel@tonic-gate 	}
13570Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, vmflag));
13580Sstevel@tonic-gate }
13590Sstevel@tonic-gate 
13600Sstevel@tonic-gate void
13610Sstevel@tonic-gate segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
13620Sstevel@tonic-gate {
13630Sstevel@tonic-gate 	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
13640Sstevel@tonic-gate 		segkmem_free(vmp, inaddr, size);
13650Sstevel@tonic-gate 	} else {
13660Sstevel@tonic-gate 		vmem_free(kmem_lp_arena, inaddr, size);
13670Sstevel@tonic-gate 	}
13680Sstevel@tonic-gate }
13690Sstevel@tonic-gate 
13700Sstevel@tonic-gate /*
13710Sstevel@tonic-gate  * segkmem_alloc_lpi() imports virtual memory from large page heap arena
13720Sstevel@tonic-gate  * into kmem_lp arena. In the process it maps the imported segment with
13730Sstevel@tonic-gate  * large pages
13740Sstevel@tonic-gate  */
13750Sstevel@tonic-gate static void *
13760Sstevel@tonic-gate segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
13770Sstevel@tonic-gate {
13780Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
13790Sstevel@tonic-gate 	void  *addr;
13800Sstevel@tonic-gate 
13810Sstevel@tonic-gate 	ASSERT(size != 0);
13820Sstevel@tonic-gate 	ASSERT(vmp == heap_lp_arena);
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	/* do not allow large page heap grow beyound limits */
13850Sstevel@tonic-gate 	if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1386215Seg155566 		lpcb->allocs_limited++;
13870Sstevel@tonic-gate 		return (NULL);
13880Sstevel@tonic-gate 	}
13890Sstevel@tonic-gate 
13900Sstevel@tonic-gate 	addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
13910Sstevel@tonic-gate 	    segkmem_page_create_large, NULL);
13920Sstevel@tonic-gate 	return (addr);
13930Sstevel@tonic-gate }
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate /*
13960Sstevel@tonic-gate  * segkmem_free_lpi() returns virtual memory back into large page heap arena
13970Sstevel@tonic-gate  * from kmem_lp arena. Beore doing this it unmaps the segment and frees
13980Sstevel@tonic-gate  * large pages used to map it.
13990Sstevel@tonic-gate  */
14000Sstevel@tonic-gate static void
14010Sstevel@tonic-gate segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
14020Sstevel@tonic-gate {
14030Sstevel@tonic-gate 	pgcnt_t		nlpages = size >> segkmem_lpshift;
14040Sstevel@tonic-gate 	size_t		lpsize = segkmem_lpsize;
14050Sstevel@tonic-gate 	caddr_t		addr = inaddr;
14060Sstevel@tonic-gate 	pgcnt_t 	npages = btopr(size);
14070Sstevel@tonic-gate 	int		i;
14080Sstevel@tonic-gate 
14090Sstevel@tonic-gate 	ASSERT(vmp == heap_lp_arena);
14100Sstevel@tonic-gate 	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
14110Sstevel@tonic-gate 	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
14120Sstevel@tonic-gate 
14130Sstevel@tonic-gate 	for (i = 0; i < nlpages; i++) {
14140Sstevel@tonic-gate 		segkmem_free_one_lp(addr, lpsize);
14150Sstevel@tonic-gate 		addr += lpsize;
14160Sstevel@tonic-gate 	}
14170Sstevel@tonic-gate 
14180Sstevel@tonic-gate 	page_unresv(npages);
14190Sstevel@tonic-gate 
14200Sstevel@tonic-gate 	vmem_free(vmp, inaddr, size);
14210Sstevel@tonic-gate }
14220Sstevel@tonic-gate 
14230Sstevel@tonic-gate /*
14240Sstevel@tonic-gate  * This function is called at system boot time by kmem_init right after
14250Sstevel@tonic-gate  * /etc/system file has been read. It checks based on hardware configuration
14260Sstevel@tonic-gate  * and /etc/system settings if system is going to use large pages. The
14270Sstevel@tonic-gate  * initialiazation necessary to actually start using large pages
14280Sstevel@tonic-gate  * happens later in the process after segkmem_heap_lp_init() is called.
14290Sstevel@tonic-gate  */
14300Sstevel@tonic-gate int
14310Sstevel@tonic-gate segkmem_lpsetup()
14320Sstevel@tonic-gate {
14330Sstevel@tonic-gate 	int use_large_pages = 0;
14340Sstevel@tonic-gate 
14350Sstevel@tonic-gate #ifdef __sparc
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	size_t memtotal = physmem * PAGESIZE;
14380Sstevel@tonic-gate 
14390Sstevel@tonic-gate 	if (heap_lp_base == NULL) {
14400Sstevel@tonic-gate 		segkmem_lpsize = PAGESIZE;
14410Sstevel@tonic-gate 		return (0);
14420Sstevel@tonic-gate 	}
14430Sstevel@tonic-gate 
14440Sstevel@tonic-gate 	/* get a platform dependent value of large page size for kernel heap */
14450Sstevel@tonic-gate 	segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
14460Sstevel@tonic-gate 
14470Sstevel@tonic-gate 	if (segkmem_lpsize <= PAGESIZE) {
14480Sstevel@tonic-gate 		/*
14490Sstevel@tonic-gate 		 * put virtual space reserved for the large page kernel
14500Sstevel@tonic-gate 		 * back to the regular heap
14510Sstevel@tonic-gate 		 */
14520Sstevel@tonic-gate 		vmem_xfree(heap_arena, heap_lp_base,
14530Sstevel@tonic-gate 		    heap_lp_end - heap_lp_base);
14540Sstevel@tonic-gate 		heap_lp_base = NULL;
14550Sstevel@tonic-gate 		heap_lp_end = NULL;
14560Sstevel@tonic-gate 		segkmem_lpsize = PAGESIZE;
14570Sstevel@tonic-gate 		return (0);
14580Sstevel@tonic-gate 	}
14590Sstevel@tonic-gate 
14600Sstevel@tonic-gate 	/* set heap_lp quantum if necessary */
14610Sstevel@tonic-gate 	if (segkmem_heaplp_quantum == 0 ||
14620Sstevel@tonic-gate 	    (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
14630Sstevel@tonic-gate 	    P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
14640Sstevel@tonic-gate 		segkmem_heaplp_quantum = segkmem_lpsize;
14650Sstevel@tonic-gate 	}
14660Sstevel@tonic-gate 
14670Sstevel@tonic-gate 	/* set kmem_lp quantum if necessary */
14680Sstevel@tonic-gate 	if (segkmem_kmemlp_quantum == 0 ||
14690Sstevel@tonic-gate 	    (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
14700Sstevel@tonic-gate 	    segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
14710Sstevel@tonic-gate 		segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
14720Sstevel@tonic-gate 	}
14730Sstevel@tonic-gate 
14740Sstevel@tonic-gate 	/* set total amount of memory allowed for large page kernel heap */
14750Sstevel@tonic-gate 	if (segkmem_kmemlp_max == 0) {
14760Sstevel@tonic-gate 		if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1477215Seg155566 			segkmem_kmemlp_pcnt = 12;
1478215Seg155566 		segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
14790Sstevel@tonic-gate 	}
14800Sstevel@tonic-gate 	segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
14810Sstevel@tonic-gate 	    segkmem_heaplp_quantum);
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	/* fix lp kmem preallocation request if necesssary */
14840Sstevel@tonic-gate 	if (segkmem_kmemlp_min) {
14850Sstevel@tonic-gate 		segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
14860Sstevel@tonic-gate 		    segkmem_heaplp_quantum);
14870Sstevel@tonic-gate 		if (segkmem_kmemlp_min > segkmem_kmemlp_max)
14880Sstevel@tonic-gate 			segkmem_kmemlp_min = segkmem_kmemlp_max;
14890Sstevel@tonic-gate 	}
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 	use_large_pages = 1;
14920Sstevel@tonic-gate 	segkmem_lpshift = page_get_shift(page_szc(segkmem_lpsize));
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate #endif
14950Sstevel@tonic-gate 	return (use_large_pages);
14960Sstevel@tonic-gate }
14970Sstevel@tonic-gate 
1498*3290Sjohansen void
1499*3290Sjohansen segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1500*3290Sjohansen {
1501*3290Sjohansen 	ASSERT(zio_mem_base != NULL);
1502*3290Sjohansen 	ASSERT(zio_mem_size != 0);
1503*3290Sjohansen 
1504*3290Sjohansen 	zio_arena = vmem_create("zio", zio_mem_base, zio_mem_size, PAGESIZE,
1505*3290Sjohansen 	    NULL, NULL, NULL, 0, VM_SLEEP);
1506*3290Sjohansen 
1507*3290Sjohansen 	zio_alloc_arena = vmem_create("zio_buf", NULL, 0, PAGESIZE,
1508*3290Sjohansen 	    segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1509*3290Sjohansen 
1510*3290Sjohansen 	ASSERT(zio_arena != NULL);
1511*3290Sjohansen 	ASSERT(zio_alloc_arena != NULL);
1512*3290Sjohansen }
1513*3290Sjohansen 
15140Sstevel@tonic-gate #ifdef __sparc
15150Sstevel@tonic-gate 
15160Sstevel@tonic-gate 
15170Sstevel@tonic-gate static void *
15180Sstevel@tonic-gate segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
15190Sstevel@tonic-gate {
15200Sstevel@tonic-gate 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
15210Sstevel@tonic-gate 	void   *addr;
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate 	if (ppaquantum <= PAGESIZE)
15240Sstevel@tonic-gate 		return (segkmem_alloc(vmp, size, vmflag));
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate 	ASSERT((size & (ppaquantum - 1)) == 0);
15270Sstevel@tonic-gate 
15280Sstevel@tonic-gate 	addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
15290Sstevel@tonic-gate 	if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
15300Sstevel@tonic-gate 		segkmem_page_create, NULL) == NULL) {
15310Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
15320Sstevel@tonic-gate 		addr = NULL;
15330Sstevel@tonic-gate 	}
15340Sstevel@tonic-gate 
15350Sstevel@tonic-gate 	return (addr);
15360Sstevel@tonic-gate }
15370Sstevel@tonic-gate 
15380Sstevel@tonic-gate static void
15390Sstevel@tonic-gate segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
15400Sstevel@tonic-gate {
15410Sstevel@tonic-gate 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
15420Sstevel@tonic-gate 
15430Sstevel@tonic-gate 	ASSERT(addr != NULL);
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate 	if (ppaquantum <= PAGESIZE) {
15460Sstevel@tonic-gate 		segkmem_free(vmp, addr, size);
15470Sstevel@tonic-gate 	} else {
15480Sstevel@tonic-gate 		segkmem_free(NULL, addr, size);
15490Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
15500Sstevel@tonic-gate 	}
15510Sstevel@tonic-gate }
15520Sstevel@tonic-gate 
15530Sstevel@tonic-gate void
15540Sstevel@tonic-gate segkmem_heap_lp_init()
15550Sstevel@tonic-gate {
15560Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
15570Sstevel@tonic-gate 	size_t heap_lp_size = heap_lp_end - heap_lp_base;
15580Sstevel@tonic-gate 	size_t lpsize = segkmem_lpsize;
15590Sstevel@tonic-gate 	size_t ppaquantum;
15600Sstevel@tonic-gate 	void   *addr;
15610Sstevel@tonic-gate 
15620Sstevel@tonic-gate 	if (segkmem_lpsize <= PAGESIZE) {
15630Sstevel@tonic-gate 		ASSERT(heap_lp_base == NULL);
15640Sstevel@tonic-gate 		ASSERT(heap_lp_end == NULL);
15650Sstevel@tonic-gate 		return;
15660Sstevel@tonic-gate 	}
15670Sstevel@tonic-gate 
15680Sstevel@tonic-gate 	ASSERT(segkmem_heaplp_quantum >= lpsize);
15690Sstevel@tonic-gate 	ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
15700Sstevel@tonic-gate 	ASSERT(lpcb->lp_uselp == 0);
15710Sstevel@tonic-gate 	ASSERT(heap_lp_base != NULL);
15720Sstevel@tonic-gate 	ASSERT(heap_lp_end != NULL);
15730Sstevel@tonic-gate 	ASSERT(heap_lp_base < heap_lp_end);
15740Sstevel@tonic-gate 	ASSERT(heap_lp_arena == NULL);
15750Sstevel@tonic-gate 	ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
15760Sstevel@tonic-gate 	ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
15770Sstevel@tonic-gate 
15780Sstevel@tonic-gate 	/* create large page heap arena */
15790Sstevel@tonic-gate 	heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
15800Sstevel@tonic-gate 	    segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
15810Sstevel@tonic-gate 
15820Sstevel@tonic-gate 	ASSERT(heap_lp_arena != NULL);
15830Sstevel@tonic-gate 
15840Sstevel@tonic-gate 	/* This arena caches memory already mapped by large pages */
15850Sstevel@tonic-gate 	kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
15860Sstevel@tonic-gate 	    segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
15870Sstevel@tonic-gate 
15880Sstevel@tonic-gate 	ASSERT(kmem_lp_arena != NULL);
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate 	mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
15910Sstevel@tonic-gate 	cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
15920Sstevel@tonic-gate 
15930Sstevel@tonic-gate 	/*
15940Sstevel@tonic-gate 	 * this arena is used for the array of page_t pointers necessary
15950Sstevel@tonic-gate 	 * to call hat_mem_load_array
15960Sstevel@tonic-gate 	 */
15970Sstevel@tonic-gate 	ppaquantum = btopr(lpsize) * sizeof (page_t *);
15980Sstevel@tonic-gate 	segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
15990Sstevel@tonic-gate 	    segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
16000Sstevel@tonic-gate 	    VM_SLEEP);
16010Sstevel@tonic-gate 
16020Sstevel@tonic-gate 	ASSERT(segkmem_ppa_arena != NULL);
16030Sstevel@tonic-gate 
16040Sstevel@tonic-gate 	/* prealloacate some memory for the lp kernel heap */
16050Sstevel@tonic-gate 	if (segkmem_kmemlp_min) {
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate 		ASSERT(P2PHASE(segkmem_kmemlp_min,
16080Sstevel@tonic-gate 		    segkmem_heaplp_quantum) == 0);
16090Sstevel@tonic-gate 
16100Sstevel@tonic-gate 		if ((addr = segkmem_alloc_lpi(heap_lp_arena,
16110Sstevel@tonic-gate 		    segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 			addr = vmem_add(kmem_lp_arena, addr,
16140Sstevel@tonic-gate 			    segkmem_kmemlp_min, VM_SLEEP);
16150Sstevel@tonic-gate 			ASSERT(addr != NULL);
16160Sstevel@tonic-gate 		}
16170Sstevel@tonic-gate 	}
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate 	lpcb->lp_uselp = 1;
16200Sstevel@tonic-gate }
16210Sstevel@tonic-gate 
16220Sstevel@tonic-gate #endif
1623