xref: /onnv-gate/usr/src/uts/common/vm/seg_kmem.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/types.h>
30*0Sstevel@tonic-gate #include <sys/t_lock.h>
31*0Sstevel@tonic-gate #include <sys/param.h>
32*0Sstevel@tonic-gate #include <sys/sysmacros.h>
33*0Sstevel@tonic-gate #include <sys/tuneable.h>
34*0Sstevel@tonic-gate #include <sys/systm.h>
35*0Sstevel@tonic-gate #include <sys/vm.h>
36*0Sstevel@tonic-gate #include <sys/kmem.h>
37*0Sstevel@tonic-gate #include <sys/vmem.h>
38*0Sstevel@tonic-gate #include <sys/mman.h>
39*0Sstevel@tonic-gate #include <sys/cmn_err.h>
40*0Sstevel@tonic-gate #include <sys/debug.h>
41*0Sstevel@tonic-gate #include <sys/dumphdr.h>
42*0Sstevel@tonic-gate #include <sys/bootconf.h>
43*0Sstevel@tonic-gate #include <sys/lgrp.h>
44*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
45*0Sstevel@tonic-gate #include <vm/hat.h>
46*0Sstevel@tonic-gate #include <vm/page.h>
47*0Sstevel@tonic-gate #include <vm/vm_dep.h>
48*0Sstevel@tonic-gate #include <vm/faultcode.h>
49*0Sstevel@tonic-gate #include <sys/promif.h>
50*0Sstevel@tonic-gate #include <vm/seg_kp.h>
51*0Sstevel@tonic-gate #include <sys/bitmap.h>
52*0Sstevel@tonic-gate #include <sys/mem_cage.h>
53*0Sstevel@tonic-gate 
54*0Sstevel@tonic-gate /*
55*0Sstevel@tonic-gate  * seg_kmem is the primary kernel memory segment driver.  It
56*0Sstevel@tonic-gate  * maps the kernel heap [kernelheap, ekernelheap), module text,
57*0Sstevel@tonic-gate  * and all memory which was allocated before the VM was initialized
58*0Sstevel@tonic-gate  * into kas.
59*0Sstevel@tonic-gate  *
60*0Sstevel@tonic-gate  * Pages which belong to seg_kmem are hashed into &kvp vnode at
61*0Sstevel@tonic-gate  * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
62*0Sstevel@tonic-gate  * They must never be paged out since segkmem_fault() is a no-op to
63*0Sstevel@tonic-gate  * prevent recursive faults.
64*0Sstevel@tonic-gate  *
65*0Sstevel@tonic-gate  * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
66*0Sstevel@tonic-gate  * __x86 and are unlocked (p_sharelock == 0) on __sparc.  Once __x86
67*0Sstevel@tonic-gate  * supports relocation the #ifdef kludges can be removed.
68*0Sstevel@tonic-gate  *
69*0Sstevel@tonic-gate  * seg_kmem pages may be subject to relocation by page_relocate(),
70*0Sstevel@tonic-gate  * provided that the HAT supports it; if this is so, segkmem_reloc
71*0Sstevel@tonic-gate  * will be set to a nonzero value. All boot time allocated memory as
72*0Sstevel@tonic-gate  * well as static memory is considered off limits to relocation.
73*0Sstevel@tonic-gate  * Pages are "relocatable" if p_state does not have P_NORELOC set, so
74*0Sstevel@tonic-gate  * we request P_NORELOC pages for memory that isn't safe to relocate.
75*0Sstevel@tonic-gate  *
76*0Sstevel@tonic-gate  * The kernel heap is logically divided up into four pieces:
77*0Sstevel@tonic-gate  *
78*0Sstevel@tonic-gate  *   heap32_arena is for allocations that require 32-bit absolute
79*0Sstevel@tonic-gate  *   virtual addresses (e.g. code that uses 32-bit pointers/offsets).
80*0Sstevel@tonic-gate  *
81*0Sstevel@tonic-gate  *   heap_core is for allocations that require 2GB *relative*
82*0Sstevel@tonic-gate  *   offsets; in other words all memory from heap_core is within
83*0Sstevel@tonic-gate  *   2GB of all other memory from the same arena. This is a requirement
84*0Sstevel@tonic-gate  *   of the addressing modes of some processors in supervisor code.
85*0Sstevel@tonic-gate  *
86*0Sstevel@tonic-gate  *   heap_arena is the general heap arena.
87*0Sstevel@tonic-gate  *
88*0Sstevel@tonic-gate  *   static_arena is the static memory arena.  Allocations from it
89*0Sstevel@tonic-gate  *   are not subject to relocation so it is safe to use the memory
90*0Sstevel@tonic-gate  *   physical address as well as the virtual address (e.g. the VA to
91*0Sstevel@tonic-gate  *   PA translations are static).  Caches may import from static_arena;
92*0Sstevel@tonic-gate  *   all other static memory allocations should use static_alloc_arena.
93*0Sstevel@tonic-gate  *
94*0Sstevel@tonic-gate  * On some platforms which have limited virtual address space, seg_kmem
95*0Sstevel@tonic-gate  * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
96*0Sstevel@tonic-gate  * segkp_bitmap is non-NULL, and each bit represents a page of virtual
97*0Sstevel@tonic-gate  * address space which is actually seg_kp mapped.
98*0Sstevel@tonic-gate  */
99*0Sstevel@tonic-gate 
100*0Sstevel@tonic-gate extern ulong_t *segkp_bitmap;   /* Is set if segkp is from the kernel heap */
101*0Sstevel@tonic-gate 
102*0Sstevel@tonic-gate char *kernelheap;		/* start of primary kernel heap */
103*0Sstevel@tonic-gate char *ekernelheap;		/* end of primary kernel heap */
104*0Sstevel@tonic-gate struct seg kvseg;		/* primary kernel heap segment */
105*0Sstevel@tonic-gate struct seg kvseg_core;		/* "core" kernel heap segment */
106*0Sstevel@tonic-gate vmem_t *heap_arena;		/* primary kernel heap arena */
107*0Sstevel@tonic-gate vmem_t *heap_core_arena;	/* core kernel heap arena */
108*0Sstevel@tonic-gate char *heap_core_base;		/* start of core kernel heap arena */
109*0Sstevel@tonic-gate char *heap_lp_base;		/* start of kernel large page heap arena */
110*0Sstevel@tonic-gate char *heap_lp_end;		/* end of kernel large page heap arena */
111*0Sstevel@tonic-gate vmem_t *hat_memload_arena;	/* HAT translation data */
112*0Sstevel@tonic-gate struct seg kvseg32;		/* 32-bit kernel heap segment */
113*0Sstevel@tonic-gate vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
114*0Sstevel@tonic-gate vmem_t *heaptext_arena;		/* heaptext arena */
115*0Sstevel@tonic-gate struct as kas;			/* kernel address space */
116*0Sstevel@tonic-gate struct vnode kvp;		/* vnode for all segkmem pages */
117*0Sstevel@tonic-gate int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
118*0Sstevel@tonic-gate vmem_t *static_arena;		/* arena for caches to import static memory */
119*0Sstevel@tonic-gate vmem_t *static_alloc_arena;	/* arena for allocating static memory */
120*0Sstevel@tonic-gate 
121*0Sstevel@tonic-gate /*
122*0Sstevel@tonic-gate  * seg_kmem driver can map part of the kernel heap with large pages.
123*0Sstevel@tonic-gate  * Currently this functionality is implemented for sparc platforms only.
124*0Sstevel@tonic-gate  *
125*0Sstevel@tonic-gate  * The large page size "segkmem_lpsize" for kernel heap is selected in the
126*0Sstevel@tonic-gate  * platform specific code. It can also be modified via /etc/system file.
127*0Sstevel@tonic-gate  * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
128*0Sstevel@tonic-gate  * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
129*0Sstevel@tonic-gate  * match segkmem_lpsize.
130*0Sstevel@tonic-gate  *
131*0Sstevel@tonic-gate  * At boot time we carve from kernel heap arena a range of virtual addresses
132*0Sstevel@tonic-gate  * that will be used for large page mappings. This range [heap_lp_base,
133*0Sstevel@tonic-gate  * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
134*0Sstevel@tonic-gate  * create "kmem_lp_arena" that caches memory already backed up by large
135*0Sstevel@tonic-gate  * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
136*0Sstevel@tonic-gate  */
137*0Sstevel@tonic-gate 
138*0Sstevel@tonic-gate size_t	segkmem_lpsize;
139*0Sstevel@tonic-gate static  uint_t	segkmem_lpshift = PAGESHIFT;
140*0Sstevel@tonic-gate 
141*0Sstevel@tonic-gate size_t  segkmem_kmemlp_quantum = 0x400000;	/* 4MB */
142*0Sstevel@tonic-gate size_t  segkmem_heaplp_quantum;
143*0Sstevel@tonic-gate static	vmem_t *heap_lp_arena;
144*0Sstevel@tonic-gate static  vmem_t *kmem_lp_arena;
145*0Sstevel@tonic-gate static  vmem_t *segkmem_ppa_arena;
146*0Sstevel@tonic-gate static	segkmem_lpcb_t segkmem_lpcb;
147*0Sstevel@tonic-gate 
148*0Sstevel@tonic-gate /*
149*0Sstevel@tonic-gate  * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
150*0Sstevel@tonic-gate  * consumed by the large page heap. By default this parameter is set to 1/4 of
151*0Sstevel@tonic-gate  * physmem but can be adjusted through /etc/system either directly or
152*0Sstevel@tonic-gate  * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
153*0Sstevel@tonic-gate  * we allow for large page heap.
154*0Sstevel@tonic-gate  */
155*0Sstevel@tonic-gate size_t  segkmem_kmemlp_max;
156*0Sstevel@tonic-gate static  uint_t  segkmem_kmemlp_pcnt;
157*0Sstevel@tonic-gate 
158*0Sstevel@tonic-gate /*
159*0Sstevel@tonic-gate  * Getting large pages for kernel heap could be problematic due to
160*0Sstevel@tonic-gate  * physical memory fragmentation. That's why we allow to preallocate
161*0Sstevel@tonic-gate  * "segkmem_kmemlp_min" bytes at boot time.
162*0Sstevel@tonic-gate  */
163*0Sstevel@tonic-gate static  size_t	segkmem_kmemlp_min;
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate /*
166*0Sstevel@tonic-gate  * Throttling is used to avoid expensive tries to allocate large pages
167*0Sstevel@tonic-gate  * for kernel heap when a lot of succesive attempts to do so fail.
168*0Sstevel@tonic-gate  */
169*0Sstevel@tonic-gate static  ulong_t segkmem_lpthrottle_max = 0x400000;
170*0Sstevel@tonic-gate static  ulong_t segkmem_lpthrottle_start = 0x40;
171*0Sstevel@tonic-gate static  ulong_t segkmem_use_lpthrottle = 1;
172*0Sstevel@tonic-gate 
173*0Sstevel@tonic-gate /*
174*0Sstevel@tonic-gate  * Freed pages accumulate on a garbage list until segkmem is ready,
175*0Sstevel@tonic-gate  * at which point we call segkmem_gc() to free it all.
176*0Sstevel@tonic-gate  */
177*0Sstevel@tonic-gate typedef struct segkmem_gc_list {
178*0Sstevel@tonic-gate 	struct segkmem_gc_list	*gc_next;
179*0Sstevel@tonic-gate 	vmem_t			*gc_arena;
180*0Sstevel@tonic-gate 	size_t			gc_size;
181*0Sstevel@tonic-gate } segkmem_gc_list_t;
182*0Sstevel@tonic-gate 
183*0Sstevel@tonic-gate static segkmem_gc_list_t *segkmem_gc_list;
184*0Sstevel@tonic-gate 
185*0Sstevel@tonic-gate /*
186*0Sstevel@tonic-gate  * Allocations from the hat_memload arena add VM_MEMLOAD to their
187*0Sstevel@tonic-gate  * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
188*0Sstevel@tonic-gate  * to take steps to prevent infinite recursion.  HAT allocations also
189*0Sstevel@tonic-gate  * must be non-relocatable to prevent recursive page faults.
190*0Sstevel@tonic-gate  */
191*0Sstevel@tonic-gate static void *
192*0Sstevel@tonic-gate hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
193*0Sstevel@tonic-gate {
194*0Sstevel@tonic-gate 	flags |= (VM_MEMLOAD | VM_NORELOC);
195*0Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, flags));
196*0Sstevel@tonic-gate }
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate /*
199*0Sstevel@tonic-gate  * Allocations from static_arena arena (or any other arena that uses
200*0Sstevel@tonic-gate  * segkmem_alloc_permanent()) require non-relocatable (permanently
201*0Sstevel@tonic-gate  * wired) memory pages, since these pages are referenced by physical
202*0Sstevel@tonic-gate  * as well as virtual address.
203*0Sstevel@tonic-gate  */
204*0Sstevel@tonic-gate void *
205*0Sstevel@tonic-gate segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
206*0Sstevel@tonic-gate {
207*0Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
208*0Sstevel@tonic-gate }
209*0Sstevel@tonic-gate 
210*0Sstevel@tonic-gate /*
211*0Sstevel@tonic-gate  * Initialize kernel heap boundaries.
212*0Sstevel@tonic-gate  */
213*0Sstevel@tonic-gate void
214*0Sstevel@tonic-gate kernelheap_init(
215*0Sstevel@tonic-gate 	void *heap_start,
216*0Sstevel@tonic-gate 	void *heap_end,
217*0Sstevel@tonic-gate 	char *first_avail,
218*0Sstevel@tonic-gate 	void *core_start,
219*0Sstevel@tonic-gate 	void *core_end)
220*0Sstevel@tonic-gate {
221*0Sstevel@tonic-gate 	uintptr_t textbase;
222*0Sstevel@tonic-gate 	size_t core_size;
223*0Sstevel@tonic-gate 	size_t heap_size;
224*0Sstevel@tonic-gate 	vmem_t *heaptext_parent;
225*0Sstevel@tonic-gate 	size_t	heap_lp_size = 0;
226*0Sstevel@tonic-gate 
227*0Sstevel@tonic-gate 	kernelheap = heap_start;
228*0Sstevel@tonic-gate 	ekernelheap = heap_end;
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate #ifdef __sparc
231*0Sstevel@tonic-gate 	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
232*0Sstevel@tonic-gate 	heap_lp_base = ekernelheap - heap_lp_size;
233*0Sstevel@tonic-gate 	heap_lp_end = heap_lp_base + heap_lp_size;
234*0Sstevel@tonic-gate #endif	/* __sparc */
235*0Sstevel@tonic-gate 
236*0Sstevel@tonic-gate 	/*
237*0Sstevel@tonic-gate 	 * If this platform has a 'core' heap area, then the space for
238*0Sstevel@tonic-gate 	 * overflow module text should be carved out of the end of that
239*0Sstevel@tonic-gate 	 * heap.  Otherwise, it gets carved out of the general purpose
240*0Sstevel@tonic-gate 	 * heap.
241*0Sstevel@tonic-gate 	 */
242*0Sstevel@tonic-gate 	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
243*0Sstevel@tonic-gate 	if (core_size > 0) {
244*0Sstevel@tonic-gate 		ASSERT(core_size >= HEAPTEXT_SIZE);
245*0Sstevel@tonic-gate 		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
246*0Sstevel@tonic-gate 		core_size -= HEAPTEXT_SIZE;
247*0Sstevel@tonic-gate 	}
248*0Sstevel@tonic-gate #ifndef __sparc
249*0Sstevel@tonic-gate 	else {
250*0Sstevel@tonic-gate 		ekernelheap -= HEAPTEXT_SIZE;
251*0Sstevel@tonic-gate 		textbase = (uintptr_t)ekernelheap;
252*0Sstevel@tonic-gate 	}
253*0Sstevel@tonic-gate #endif
254*0Sstevel@tonic-gate 
255*0Sstevel@tonic-gate 	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
256*0Sstevel@tonic-gate 	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
257*0Sstevel@tonic-gate 	    segkmem_alloc, segkmem_free);
258*0Sstevel@tonic-gate 
259*0Sstevel@tonic-gate 	if (core_size > 0) {
260*0Sstevel@tonic-gate 		heap_core_arena = vmem_create("heap_core", core_start,
261*0Sstevel@tonic-gate 		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
262*0Sstevel@tonic-gate 		heap_core_base = core_start;
263*0Sstevel@tonic-gate 	} else {
264*0Sstevel@tonic-gate 		heap_core_arena = heap_arena;
265*0Sstevel@tonic-gate 		heap_core_base = kernelheap;
266*0Sstevel@tonic-gate 	}
267*0Sstevel@tonic-gate 
268*0Sstevel@tonic-gate 	/*
269*0Sstevel@tonic-gate 	 * reserve space for the large page heap. If large pages for kernel
270*0Sstevel@tonic-gate 	 * heap is enabled large page heap arean will be created later in the
271*0Sstevel@tonic-gate 	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
272*0Sstevel@tonic-gate 	 * range will be returned back to the heap_arena.
273*0Sstevel@tonic-gate 	 */
274*0Sstevel@tonic-gate 	if (heap_lp_size) {
275*0Sstevel@tonic-gate 		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
276*0Sstevel@tonic-gate 		    heap_lp_base, heap_lp_end,
277*0Sstevel@tonic-gate 		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
278*0Sstevel@tonic-gate 	}
279*0Sstevel@tonic-gate 
280*0Sstevel@tonic-gate 	/*
281*0Sstevel@tonic-gate 	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
282*0Sstevel@tonic-gate 	 */
283*0Sstevel@tonic-gate 	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
284*0Sstevel@tonic-gate 	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
285*0Sstevel@tonic-gate 
286*0Sstevel@tonic-gate #ifdef __sparc
287*0Sstevel@tonic-gate 	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
288*0Sstevel@tonic-gate 	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
289*0Sstevel@tonic-gate 	    NULL, NULL, 0, VM_SLEEP);
290*0Sstevel@tonic-gate 
291*0Sstevel@tonic-gate 	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
292*0Sstevel@tonic-gate 	heaptext_parent = NULL;
293*0Sstevel@tonic-gate #else	/* __sparc */
294*0Sstevel@tonic-gate 	heap32_arena = heap_core_arena;
295*0Sstevel@tonic-gate 	heaptext_parent = heap_core_arena;
296*0Sstevel@tonic-gate #endif	/* __sparc */
297*0Sstevel@tonic-gate 
298*0Sstevel@tonic-gate 	heaptext_arena = vmem_create("heaptext", (void *)textbase,
299*0Sstevel@tonic-gate 	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
300*0Sstevel@tonic-gate 
301*0Sstevel@tonic-gate 	/*
302*0Sstevel@tonic-gate 	 * Create a set of arenas for memory with static translations
303*0Sstevel@tonic-gate 	 * (e.g. VA -> PA translations cannot change).  Since using
304*0Sstevel@tonic-gate 	 * kernel pages by physical address implies it isn't safe to
305*0Sstevel@tonic-gate 	 * walk across page boundaries, the static_arena quantum must
306*0Sstevel@tonic-gate 	 * be PAGESIZE.  Any kmem caches that require static memory
307*0Sstevel@tonic-gate 	 * should source from static_arena, while direct allocations
308*0Sstevel@tonic-gate 	 * should only use static_alloc_arena.
309*0Sstevel@tonic-gate 	 */
310*0Sstevel@tonic-gate 	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
311*0Sstevel@tonic-gate 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
312*0Sstevel@tonic-gate 	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
313*0Sstevel@tonic-gate 	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
314*0Sstevel@tonic-gate 	    0, VM_SLEEP);
315*0Sstevel@tonic-gate 
316*0Sstevel@tonic-gate 	/*
317*0Sstevel@tonic-gate 	 * Create an arena for translation data (ptes, hmes, or hblks).
318*0Sstevel@tonic-gate 	 * We need an arena for this because hat_memload() is essential
319*0Sstevel@tonic-gate 	 * to vmem_populate() (see comments in common/os/vmem.c).
320*0Sstevel@tonic-gate 	 *
321*0Sstevel@tonic-gate 	 * Note: any kmem cache that allocates from hat_memload_arena
322*0Sstevel@tonic-gate 	 * must be created as a KMC_NOHASH cache (i.e. no external slab
323*0Sstevel@tonic-gate 	 * and bufctl structures to allocate) so that slab creation doesn't
324*0Sstevel@tonic-gate 	 * require anything more than a single vmem_alloc().
325*0Sstevel@tonic-gate 	 */
326*0Sstevel@tonic-gate 	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
327*0Sstevel@tonic-gate 	    hat_memload_alloc, segkmem_free, heap_arena, 0,
328*0Sstevel@tonic-gate 	    VM_SLEEP | VMC_POPULATOR);
329*0Sstevel@tonic-gate }
330*0Sstevel@tonic-gate 
331*0Sstevel@tonic-gate /*
332*0Sstevel@tonic-gate  * Grow kernel heap downward.
333*0Sstevel@tonic-gate  */
334*0Sstevel@tonic-gate void
335*0Sstevel@tonic-gate kernelheap_extend(void *range_start, void *range_end)
336*0Sstevel@tonic-gate {
337*0Sstevel@tonic-gate 	size_t len = (uintptr_t)range_end - (uintptr_t)range_start;
338*0Sstevel@tonic-gate 
339*0Sstevel@tonic-gate 	ASSERT(range_start < range_end && range_end == kernelheap);
340*0Sstevel@tonic-gate 
341*0Sstevel@tonic-gate 	if (vmem_add(heap_arena, range_start, len, VM_NOSLEEP) == NULL) {
342*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "Could not grow kernel heap below 0x%p",
343*0Sstevel@tonic-gate 		    (void *)kernelheap);
344*0Sstevel@tonic-gate 	} else {
345*0Sstevel@tonic-gate 		kernelheap = range_start;
346*0Sstevel@tonic-gate 	}
347*0Sstevel@tonic-gate }
348*0Sstevel@tonic-gate 
349*0Sstevel@tonic-gate void
350*0Sstevel@tonic-gate boot_mapin(caddr_t addr, size_t size)
351*0Sstevel@tonic-gate {
352*0Sstevel@tonic-gate 	caddr_t	 eaddr;
353*0Sstevel@tonic-gate 	page_t	*pp;
354*0Sstevel@tonic-gate 	pfn_t	 pfnum;
355*0Sstevel@tonic-gate 
356*0Sstevel@tonic-gate 	if (page_resv(btop(size), KM_NOSLEEP) == 0)
357*0Sstevel@tonic-gate 		panic("boot_mapin: page_resv failed");
358*0Sstevel@tonic-gate 
359*0Sstevel@tonic-gate 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
360*0Sstevel@tonic-gate 		pfnum = va_to_pfn(addr);
361*0Sstevel@tonic-gate 		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
362*0Sstevel@tonic-gate 			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
363*0Sstevel@tonic-gate 
364*0Sstevel@tonic-gate 		/*
365*0Sstevel@tonic-gate 		 * must break up any large pages that may have constituent
366*0Sstevel@tonic-gate 		 * pages being utilized for BOP_ALLOC()'s before calling
367*0Sstevel@tonic-gate 		 * page_numtopp().The locking code (ie. page_reclaim())
368*0Sstevel@tonic-gate 		 * can't handle them
369*0Sstevel@tonic-gate 		 */
370*0Sstevel@tonic-gate 		if (pp->p_szc != 0)
371*0Sstevel@tonic-gate 			page_boot_demote(pp);
372*0Sstevel@tonic-gate 
373*0Sstevel@tonic-gate 		pp = page_numtopp(pfnum, SE_EXCL);
374*0Sstevel@tonic-gate 		if (pp == NULL || PP_ISFREE(pp))
375*0Sstevel@tonic-gate 			panic("boot_alloc: pp is NULL or free");
376*0Sstevel@tonic-gate 
377*0Sstevel@tonic-gate 		/*
378*0Sstevel@tonic-gate 		 * If the cage is on but doesn't yet contain this page,
379*0Sstevel@tonic-gate 		 * mark it as non-relocatable.
380*0Sstevel@tonic-gate 		 */
381*0Sstevel@tonic-gate 		if (kcage_on && !PP_ISNORELOC(pp))
382*0Sstevel@tonic-gate 			PP_SETNORELOC(pp);
383*0Sstevel@tonic-gate 
384*0Sstevel@tonic-gate 		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
385*0Sstevel@tonic-gate 		pp->p_lckcnt = 1;
386*0Sstevel@tonic-gate #if defined(__x86)
387*0Sstevel@tonic-gate 		page_downgrade(pp);
388*0Sstevel@tonic-gate #else
389*0Sstevel@tonic-gate 		page_unlock(pp);
390*0Sstevel@tonic-gate #endif
391*0Sstevel@tonic-gate 	}
392*0Sstevel@tonic-gate }
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate /*
395*0Sstevel@tonic-gate  * Get pages from boot and hash them into the kernel's vp.
396*0Sstevel@tonic-gate  * Used after page structs have been allocated, but before segkmem is ready.
397*0Sstevel@tonic-gate  */
398*0Sstevel@tonic-gate void *
399*0Sstevel@tonic-gate boot_alloc(void *inaddr, size_t size, uint_t align)
400*0Sstevel@tonic-gate {
401*0Sstevel@tonic-gate 	caddr_t addr = inaddr;
402*0Sstevel@tonic-gate 
403*0Sstevel@tonic-gate 	if (bootops == NULL)
404*0Sstevel@tonic-gate 		prom_panic("boot_alloc: attempt to allocate memory after "
405*0Sstevel@tonic-gate 		    "BOP_GONE");
406*0Sstevel@tonic-gate 
407*0Sstevel@tonic-gate 	size = ptob(btopr(size));
408*0Sstevel@tonic-gate 	if (BOP_ALLOC(bootops, addr, size, align) != addr)
409*0Sstevel@tonic-gate 		panic("boot_alloc: BOP_ALLOC failed");
410*0Sstevel@tonic-gate 	boot_mapin((caddr_t)addr, size);
411*0Sstevel@tonic-gate 	return (addr);
412*0Sstevel@tonic-gate }
413*0Sstevel@tonic-gate 
414*0Sstevel@tonic-gate static void
415*0Sstevel@tonic-gate segkmem_badop()
416*0Sstevel@tonic-gate {
417*0Sstevel@tonic-gate 	panic("segkmem_badop");
418*0Sstevel@tonic-gate }
419*0Sstevel@tonic-gate 
420*0Sstevel@tonic-gate #define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
421*0Sstevel@tonic-gate 
422*0Sstevel@tonic-gate /*ARGSUSED*/
423*0Sstevel@tonic-gate static faultcode_t
424*0Sstevel@tonic-gate segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
425*0Sstevel@tonic-gate 	enum fault_type type, enum seg_rw rw)
426*0Sstevel@tonic-gate {
427*0Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
428*0Sstevel@tonic-gate 
429*0Sstevel@tonic-gate 	if (seg->s_as != &kas || size > seg->s_size ||
430*0Sstevel@tonic-gate 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
431*0Sstevel@tonic-gate 		panic("segkmem_fault: bad args");
432*0Sstevel@tonic-gate 
433*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
434*0Sstevel@tonic-gate 
435*0Sstevel@tonic-gate 		/*
436*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call segkp_fault.
437*0Sstevel@tonic-gate 		 */
438*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
439*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
440*0Sstevel@tonic-gate 			return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
441*0Sstevel@tonic-gate 	}
442*0Sstevel@tonic-gate 
443*0Sstevel@tonic-gate 	switch (type) {
444*0Sstevel@tonic-gate 	case F_SOFTLOCK:	/* lock down already-loaded translations */
445*0Sstevel@tonic-gate 		if (rw == S_OTHER) {
446*0Sstevel@tonic-gate 			hat_reserve(seg->s_as, addr, size);
447*0Sstevel@tonic-gate 			return (0);
448*0Sstevel@tonic-gate 		}
449*0Sstevel@tonic-gate 		/*FALLTHROUGH*/
450*0Sstevel@tonic-gate 	case F_SOFTUNLOCK:
451*0Sstevel@tonic-gate 		if (rw == S_READ || rw == S_WRITE)
452*0Sstevel@tonic-gate 			return (0);
453*0Sstevel@tonic-gate 		/*FALLTHROUGH*/
454*0Sstevel@tonic-gate 	default:
455*0Sstevel@tonic-gate 		break;
456*0Sstevel@tonic-gate 	}
457*0Sstevel@tonic-gate 	return (FC_NOSUPPORT);
458*0Sstevel@tonic-gate }
459*0Sstevel@tonic-gate 
460*0Sstevel@tonic-gate static int
461*0Sstevel@tonic-gate segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
462*0Sstevel@tonic-gate {
463*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
464*0Sstevel@tonic-gate 
465*0Sstevel@tonic-gate 	if (seg->s_as != &kas || size > seg->s_size ||
466*0Sstevel@tonic-gate 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
467*0Sstevel@tonic-gate 		panic("segkmem_setprot: bad args");
468*0Sstevel@tonic-gate 
469*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 		/*
472*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call segkp.
473*0Sstevel@tonic-gate 		 */
474*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
475*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
476*0Sstevel@tonic-gate 			return (SEGOP_SETPROT(segkp, addr, size, prot));
477*0Sstevel@tonic-gate 	}
478*0Sstevel@tonic-gate 
479*0Sstevel@tonic-gate 	if (prot == 0)
480*0Sstevel@tonic-gate 		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
481*0Sstevel@tonic-gate 	else
482*0Sstevel@tonic-gate 		hat_chgprot(kas.a_hat, addr, size, prot);
483*0Sstevel@tonic-gate 	return (0);
484*0Sstevel@tonic-gate }
485*0Sstevel@tonic-gate 
486*0Sstevel@tonic-gate /*
487*0Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
488*0Sstevel@tonic-gate  * when segkp is under the heap.
489*0Sstevel@tonic-gate  */
490*0Sstevel@tonic-gate /* ARGSUSED */
491*0Sstevel@tonic-gate static int
492*0Sstevel@tonic-gate segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
493*0Sstevel@tonic-gate {
494*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
495*0Sstevel@tonic-gate 
496*0Sstevel@tonic-gate 	if (seg->s_as != &kas)
497*0Sstevel@tonic-gate 		segkmem_badop();
498*0Sstevel@tonic-gate 
499*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
500*0Sstevel@tonic-gate 
501*0Sstevel@tonic-gate 		/*
502*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
503*0Sstevel@tonic-gate 		 */
504*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
505*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
506*0Sstevel@tonic-gate 			return (SEGOP_CHECKPROT(segkp, addr, size, prot));
507*0Sstevel@tonic-gate 	}
508*0Sstevel@tonic-gate 	segkmem_badop();
509*0Sstevel@tonic-gate 	return (0);
510*0Sstevel@tonic-gate }
511*0Sstevel@tonic-gate 
512*0Sstevel@tonic-gate /*
513*0Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
514*0Sstevel@tonic-gate  * when segkp is under the heap.
515*0Sstevel@tonic-gate  */
516*0Sstevel@tonic-gate /* ARGSUSED */
517*0Sstevel@tonic-gate static int
518*0Sstevel@tonic-gate segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
519*0Sstevel@tonic-gate {
520*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
521*0Sstevel@tonic-gate 
522*0Sstevel@tonic-gate 	if (seg->s_as != &kas)
523*0Sstevel@tonic-gate 		segkmem_badop();
524*0Sstevel@tonic-gate 
525*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
526*0Sstevel@tonic-gate 
527*0Sstevel@tonic-gate 		/*
528*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
529*0Sstevel@tonic-gate 		 */
530*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
531*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
532*0Sstevel@tonic-gate 			return (SEGOP_KLUSTER(segkp, addr, delta));
533*0Sstevel@tonic-gate 	}
534*0Sstevel@tonic-gate 	segkmem_badop();
535*0Sstevel@tonic-gate 	return (0);
536*0Sstevel@tonic-gate }
537*0Sstevel@tonic-gate 
538*0Sstevel@tonic-gate static void
539*0Sstevel@tonic-gate segkmem_xdump_range(void *arg, void *start, size_t size)
540*0Sstevel@tonic-gate {
541*0Sstevel@tonic-gate 	struct as *as = arg;
542*0Sstevel@tonic-gate 	caddr_t addr = start;
543*0Sstevel@tonic-gate 	caddr_t addr_end = addr + size;
544*0Sstevel@tonic-gate 
545*0Sstevel@tonic-gate 	while (addr < addr_end) {
546*0Sstevel@tonic-gate 		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
547*0Sstevel@tonic-gate 		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
548*0Sstevel@tonic-gate 			dump_addpage(as, addr, pfn);
549*0Sstevel@tonic-gate 		addr += PAGESIZE;
550*0Sstevel@tonic-gate 		dump_timeleft = dump_timeout;
551*0Sstevel@tonic-gate 	}
552*0Sstevel@tonic-gate }
553*0Sstevel@tonic-gate 
554*0Sstevel@tonic-gate static void
555*0Sstevel@tonic-gate segkmem_dump_range(void *arg, void *start, size_t size)
556*0Sstevel@tonic-gate {
557*0Sstevel@tonic-gate 	caddr_t addr = start;
558*0Sstevel@tonic-gate 	caddr_t addr_end = addr + size;
559*0Sstevel@tonic-gate 
560*0Sstevel@tonic-gate 	/*
561*0Sstevel@tonic-gate 	 * If we are about to start dumping the range of addresses we
562*0Sstevel@tonic-gate 	 * carved out of the kernel heap for the large page heap walk
563*0Sstevel@tonic-gate 	 * heap_lp_arena to find what segments are actually populated
564*0Sstevel@tonic-gate 	 */
565*0Sstevel@tonic-gate 	if (SEGKMEM_USE_LARGEPAGES &&
566*0Sstevel@tonic-gate 	    addr == heap_lp_base && addr_end == heap_lp_end &&
567*0Sstevel@tonic-gate 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
568*0Sstevel@tonic-gate 		vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
569*0Sstevel@tonic-gate 		    segkmem_xdump_range, arg);
570*0Sstevel@tonic-gate 	} else {
571*0Sstevel@tonic-gate 		segkmem_xdump_range(arg, start, size);
572*0Sstevel@tonic-gate 	}
573*0Sstevel@tonic-gate }
574*0Sstevel@tonic-gate 
575*0Sstevel@tonic-gate static void
576*0Sstevel@tonic-gate segkmem_dump(struct seg *seg)
577*0Sstevel@tonic-gate {
578*0Sstevel@tonic-gate 	/*
579*0Sstevel@tonic-gate 	 * The kernel's heap_arena (represented by kvseg) is a very large
580*0Sstevel@tonic-gate 	 * VA space, most of which is typically unused.  To speed up dumping
581*0Sstevel@tonic-gate 	 * we use vmem_walk() to quickly find the pieces of heap_arena that
582*0Sstevel@tonic-gate 	 * are actually in use.  We do the same for heap32_arena and
583*0Sstevel@tonic-gate 	 * heap_core.
584*0Sstevel@tonic-gate 	 *
585*0Sstevel@tonic-gate 	 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
586*0Sstevel@tonic-gate 	 * may ultimately need to allocate memory.  Reentrant walks are
587*0Sstevel@tonic-gate 	 * necessarily imperfect snapshots.  The kernel heap continues
588*0Sstevel@tonic-gate 	 * to change during a live crash dump, for example.  For a normal
589*0Sstevel@tonic-gate 	 * crash dump, however, we know that there won't be any other threads
590*0Sstevel@tonic-gate 	 * messing with the heap.  Therefore, at worst, we may fail to dump
591*0Sstevel@tonic-gate 	 * the pages that get allocated by the act of dumping; but we will
592*0Sstevel@tonic-gate 	 * always dump every page that was allocated when the walk began.
593*0Sstevel@tonic-gate 	 *
594*0Sstevel@tonic-gate 	 * The other segkmem segments are dense (fully populated), so there's
595*0Sstevel@tonic-gate 	 * no need to use this technique when dumping them.
596*0Sstevel@tonic-gate 	 *
597*0Sstevel@tonic-gate 	 * Note: when adding special dump handling for any new sparsely-
598*0Sstevel@tonic-gate 	 * populated segments, be sure to add similar handling to the ::kgrep
599*0Sstevel@tonic-gate 	 * code in mdb.
600*0Sstevel@tonic-gate 	 */
601*0Sstevel@tonic-gate 	if (seg == &kvseg) {
602*0Sstevel@tonic-gate 		vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
603*0Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
604*0Sstevel@tonic-gate #ifndef __sparc
605*0Sstevel@tonic-gate 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
606*0Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
607*0Sstevel@tonic-gate #endif
608*0Sstevel@tonic-gate 	} else if (seg == &kvseg_core) {
609*0Sstevel@tonic-gate 		vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
610*0Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
611*0Sstevel@tonic-gate 	} else if (seg == &kvseg32) {
612*0Sstevel@tonic-gate 		vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
613*0Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
614*0Sstevel@tonic-gate 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
615*0Sstevel@tonic-gate 		    segkmem_dump_range, seg->s_as);
616*0Sstevel@tonic-gate 	} else {
617*0Sstevel@tonic-gate 		segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
618*0Sstevel@tonic-gate 	}
619*0Sstevel@tonic-gate }
620*0Sstevel@tonic-gate 
621*0Sstevel@tonic-gate /*
622*0Sstevel@tonic-gate  * lock/unlock kmem pages over a given range [addr, addr+len).
623*0Sstevel@tonic-gate  * Returns a shadow list of pages in ppp if *ppp is not NULL
624*0Sstevel@tonic-gate  * and memory can be allocated to hold the shadow list.
625*0Sstevel@tonic-gate  */
626*0Sstevel@tonic-gate /*ARGSUSED*/
627*0Sstevel@tonic-gate static int
628*0Sstevel@tonic-gate segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
629*0Sstevel@tonic-gate 	page_t ***ppp, enum lock_type type, enum seg_rw rw)
630*0Sstevel@tonic-gate {
631*0Sstevel@tonic-gate 	page_t **pplist, *pp;
632*0Sstevel@tonic-gate 	pgcnt_t npages;
633*0Sstevel@tonic-gate 	size_t nb;
634*0Sstevel@tonic-gate 
635*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
636*0Sstevel@tonic-gate 		/*
637*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
638*0Sstevel@tonic-gate 		 */
639*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
640*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
641*0Sstevel@tonic-gate 			return (SEGOP_PAGELOCK(segkp, addr, len, ppp,
642*0Sstevel@tonic-gate 						type, rw));
643*0Sstevel@tonic-gate 	}
644*0Sstevel@tonic-gate 
645*0Sstevel@tonic-gate 	if (type == L_PAGERECLAIM)
646*0Sstevel@tonic-gate 		return (ENOTSUP);
647*0Sstevel@tonic-gate 
648*0Sstevel@tonic-gate 	npages = btopr(len);
649*0Sstevel@tonic-gate 	nb = sizeof (page_t *) * npages;
650*0Sstevel@tonic-gate 
651*0Sstevel@tonic-gate 	if (type == L_PAGEUNLOCK) {
652*0Sstevel@tonic-gate 		if ((pplist = *ppp) == NULL) {
653*0Sstevel@tonic-gate 			/*
654*0Sstevel@tonic-gate 			 * No shadow list.  Iterate over the range
655*0Sstevel@tonic-gate 			 * using page_find() and unlock the pages
656*0Sstevel@tonic-gate 			 * that we encounter.
657*0Sstevel@tonic-gate 			 */
658*0Sstevel@tonic-gate 			while (npages--) {
659*0Sstevel@tonic-gate 				pp = page_find(&kvp,
660*0Sstevel@tonic-gate 				    (u_offset_t)(uintptr_t)addr);
661*0Sstevel@tonic-gate 				if (pp)
662*0Sstevel@tonic-gate 					page_unlock(pp);
663*0Sstevel@tonic-gate 				addr += PAGESIZE;
664*0Sstevel@tonic-gate 			}
665*0Sstevel@tonic-gate 			return (0);
666*0Sstevel@tonic-gate 		}
667*0Sstevel@tonic-gate 
668*0Sstevel@tonic-gate 		while (npages--) {
669*0Sstevel@tonic-gate 			pp = *pplist++;
670*0Sstevel@tonic-gate 			if (pp)
671*0Sstevel@tonic-gate 				page_unlock(pp);
672*0Sstevel@tonic-gate 		}
673*0Sstevel@tonic-gate 		kmem_free(*ppp, nb);
674*0Sstevel@tonic-gate 		return (0);
675*0Sstevel@tonic-gate 	}
676*0Sstevel@tonic-gate 
677*0Sstevel@tonic-gate 	ASSERT(type == L_PAGELOCK);
678*0Sstevel@tonic-gate 
679*0Sstevel@tonic-gate 	pplist = NULL;
680*0Sstevel@tonic-gate 	if (ppp != NULL)
681*0Sstevel@tonic-gate 		*ppp = pplist = kmem_alloc(nb, KM_NOSLEEP);
682*0Sstevel@tonic-gate 
683*0Sstevel@tonic-gate 	while (npages--) {
684*0Sstevel@tonic-gate 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
685*0Sstevel@tonic-gate 		/*
686*0Sstevel@tonic-gate 		 * We'd like to ASSERT(pp != NULL) here, but we can't
687*0Sstevel@tonic-gate 		 * because there are legitimate cases where the address
688*0Sstevel@tonic-gate 		 * isn't really mapped -- for instance, attaching a
689*0Sstevel@tonic-gate 		 * kernel debugger and poking at a non-existent address.
690*0Sstevel@tonic-gate 		 */
691*0Sstevel@tonic-gate 		if (pplist)
692*0Sstevel@tonic-gate 			*pplist++ = pp;
693*0Sstevel@tonic-gate 		addr += PAGESIZE;
694*0Sstevel@tonic-gate 	}
695*0Sstevel@tonic-gate 	return (0);
696*0Sstevel@tonic-gate }
697*0Sstevel@tonic-gate 
698*0Sstevel@tonic-gate /*
699*0Sstevel@tonic-gate  * This is a dummy segkmem function overloaded to call segkp
700*0Sstevel@tonic-gate  * when segkp is under the heap.
701*0Sstevel@tonic-gate  */
702*0Sstevel@tonic-gate /* ARGSUSED */
703*0Sstevel@tonic-gate static int
704*0Sstevel@tonic-gate segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
705*0Sstevel@tonic-gate {
706*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
707*0Sstevel@tonic-gate 
708*0Sstevel@tonic-gate 	if (seg->s_as != &kas)
709*0Sstevel@tonic-gate 		segkmem_badop();
710*0Sstevel@tonic-gate 
711*0Sstevel@tonic-gate 	if (segkp_bitmap && seg == &kvseg) {
712*0Sstevel@tonic-gate 
713*0Sstevel@tonic-gate 		/*
714*0Sstevel@tonic-gate 		 * If it is one of segkp pages, call into segkp.
715*0Sstevel@tonic-gate 		 */
716*0Sstevel@tonic-gate 		if (BT_TEST(segkp_bitmap,
717*0Sstevel@tonic-gate 			btop((uintptr_t)(addr - seg->s_base))))
718*0Sstevel@tonic-gate 			return (SEGOP_GETMEMID(segkp, addr, memidp));
719*0Sstevel@tonic-gate 	}
720*0Sstevel@tonic-gate 	segkmem_badop();
721*0Sstevel@tonic-gate 	return (0);
722*0Sstevel@tonic-gate }
723*0Sstevel@tonic-gate 
724*0Sstevel@tonic-gate /*ARGSUSED*/
725*0Sstevel@tonic-gate static lgrp_mem_policy_info_t *
726*0Sstevel@tonic-gate segkmem_getpolicy(struct seg *seg, caddr_t addr)
727*0Sstevel@tonic-gate {
728*0Sstevel@tonic-gate 	return (NULL);
729*0Sstevel@tonic-gate }
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 
732*0Sstevel@tonic-gate static struct seg_ops segkmem_ops = {
733*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* dup */
734*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* unmap */
735*0Sstevel@tonic-gate 	SEGKMEM_BADOP(void),		/* free */
736*0Sstevel@tonic-gate 	segkmem_fault,
737*0Sstevel@tonic-gate 	SEGKMEM_BADOP(faultcode_t),	/* faulta */
738*0Sstevel@tonic-gate 	segkmem_setprot,
739*0Sstevel@tonic-gate 	segkmem_checkprot,
740*0Sstevel@tonic-gate 	segkmem_kluster,
741*0Sstevel@tonic-gate 	SEGKMEM_BADOP(size_t),		/* swapout */
742*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* sync */
743*0Sstevel@tonic-gate 	SEGKMEM_BADOP(size_t),		/* incore */
744*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* lockop */
745*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* getprot */
746*0Sstevel@tonic-gate 	SEGKMEM_BADOP(u_offset_t),	/* getoffset */
747*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* gettype */
748*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* getvp */
749*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* advise */
750*0Sstevel@tonic-gate 	segkmem_dump,
751*0Sstevel@tonic-gate 	segkmem_pagelock,
752*0Sstevel@tonic-gate 	SEGKMEM_BADOP(int),		/* setpgsz */
753*0Sstevel@tonic-gate 	segkmem_getmemid,
754*0Sstevel@tonic-gate 	segkmem_getpolicy,		/* getpolicy */
755*0Sstevel@tonic-gate };
756*0Sstevel@tonic-gate 
757*0Sstevel@tonic-gate int
758*0Sstevel@tonic-gate segkmem_create(struct seg *seg)
759*0Sstevel@tonic-gate {
760*0Sstevel@tonic-gate 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
761*0Sstevel@tonic-gate 	seg->s_ops = &segkmem_ops;
762*0Sstevel@tonic-gate 	seg->s_data = NULL;
763*0Sstevel@tonic-gate 	kas.a_size += seg->s_size;
764*0Sstevel@tonic-gate 	return (0);
765*0Sstevel@tonic-gate }
766*0Sstevel@tonic-gate 
767*0Sstevel@tonic-gate /*ARGSUSED*/
768*0Sstevel@tonic-gate page_t *
769*0Sstevel@tonic-gate segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
770*0Sstevel@tonic-gate {
771*0Sstevel@tonic-gate 	struct seg kseg;
772*0Sstevel@tonic-gate 	int pgflags;
773*0Sstevel@tonic-gate 
774*0Sstevel@tonic-gate 	kseg.s_as = &kas;
775*0Sstevel@tonic-gate 	pgflags = PG_EXCL;
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
778*0Sstevel@tonic-gate 		pgflags |= PG_NORELOC;
779*0Sstevel@tonic-gate 	if ((vmflag & VM_NOSLEEP) == 0)
780*0Sstevel@tonic-gate 		pgflags |= PG_WAIT;
781*0Sstevel@tonic-gate 	if (vmflag & VM_PANIC)
782*0Sstevel@tonic-gate 		pgflags |= PG_PANIC;
783*0Sstevel@tonic-gate 	if (vmflag & VM_PUSHPAGE)
784*0Sstevel@tonic-gate 		pgflags |= PG_PUSHPAGE;
785*0Sstevel@tonic-gate 
786*0Sstevel@tonic-gate 	return (page_create_va(&kvp, (u_offset_t)(uintptr_t)addr, size,
787*0Sstevel@tonic-gate 	    pgflags, &kseg, addr));
788*0Sstevel@tonic-gate }
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate /*
791*0Sstevel@tonic-gate  * Allocate pages to back the virtual address range [addr, addr + size).
792*0Sstevel@tonic-gate  * If addr is NULL, allocate the virtual address space as well.
793*0Sstevel@tonic-gate  */
794*0Sstevel@tonic-gate void *
795*0Sstevel@tonic-gate segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
796*0Sstevel@tonic-gate 	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
797*0Sstevel@tonic-gate {
798*0Sstevel@tonic-gate 	page_t *ppl;
799*0Sstevel@tonic-gate 	caddr_t addr = inaddr;
800*0Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
801*0Sstevel@tonic-gate 	int allocflag;
802*0Sstevel@tonic-gate 
803*0Sstevel@tonic-gate 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
804*0Sstevel@tonic-gate 		return (NULL);
805*0Sstevel@tonic-gate 
806*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
807*0Sstevel@tonic-gate 
808*0Sstevel@tonic-gate 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
809*0Sstevel@tonic-gate 		if (inaddr == NULL)
810*0Sstevel@tonic-gate 			vmem_free(vmp, addr, size);
811*0Sstevel@tonic-gate 		return (NULL);
812*0Sstevel@tonic-gate 	}
813*0Sstevel@tonic-gate 
814*0Sstevel@tonic-gate 	ppl = page_create_func(addr, size, vmflag, pcarg);
815*0Sstevel@tonic-gate 	if (ppl == NULL) {
816*0Sstevel@tonic-gate 		if (inaddr == NULL)
817*0Sstevel@tonic-gate 			vmem_free(vmp, addr, size);
818*0Sstevel@tonic-gate 		page_unresv(npages);
819*0Sstevel@tonic-gate 		return (NULL);
820*0Sstevel@tonic-gate 	}
821*0Sstevel@tonic-gate 
822*0Sstevel@tonic-gate 	/*
823*0Sstevel@tonic-gate 	 * Under certain conditions, we need to let the HAT layer know
824*0Sstevel@tonic-gate 	 * that it cannot safely allocate memory.  Allocations from
825*0Sstevel@tonic-gate 	 * the hat_memload vmem arena always need this, to prevent
826*0Sstevel@tonic-gate 	 * infinite recursion.
827*0Sstevel@tonic-gate 	 *
828*0Sstevel@tonic-gate 	 * In addition, the x86 hat cannot safely do memory
829*0Sstevel@tonic-gate 	 * allocations while in vmem_populate(), because there
830*0Sstevel@tonic-gate 	 * is no simple bound on its usage.
831*0Sstevel@tonic-gate 	 */
832*0Sstevel@tonic-gate 	if (vmflag & VM_MEMLOAD)
833*0Sstevel@tonic-gate 		allocflag = HAT_NO_KALLOC;
834*0Sstevel@tonic-gate #if defined(__x86)
835*0Sstevel@tonic-gate 	else if (vmem_is_populator())
836*0Sstevel@tonic-gate 		allocflag = HAT_NO_KALLOC;
837*0Sstevel@tonic-gate #endif
838*0Sstevel@tonic-gate 	else
839*0Sstevel@tonic-gate 		allocflag = 0;
840*0Sstevel@tonic-gate 
841*0Sstevel@tonic-gate 	while (ppl != NULL) {
842*0Sstevel@tonic-gate 		page_t *pp = ppl;
843*0Sstevel@tonic-gate 		page_sub(&ppl, pp);
844*0Sstevel@tonic-gate 		ASSERT(page_iolock_assert(pp));
845*0Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
846*0Sstevel@tonic-gate 		page_io_unlock(pp);
847*0Sstevel@tonic-gate 		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
848*0Sstevel@tonic-gate 		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
849*0Sstevel@tonic-gate 		    HAT_LOAD_LOCK | allocflag);
850*0Sstevel@tonic-gate 		pp->p_lckcnt = 1;
851*0Sstevel@tonic-gate #if defined(__x86)
852*0Sstevel@tonic-gate 		page_downgrade(pp);
853*0Sstevel@tonic-gate #else
854*0Sstevel@tonic-gate 		if (vmflag & SEGKMEM_SHARELOCKED)
855*0Sstevel@tonic-gate 			page_downgrade(pp);
856*0Sstevel@tonic-gate 		else
857*0Sstevel@tonic-gate 			page_unlock(pp);
858*0Sstevel@tonic-gate #endif
859*0Sstevel@tonic-gate 	}
860*0Sstevel@tonic-gate 
861*0Sstevel@tonic-gate 	return (addr);
862*0Sstevel@tonic-gate }
863*0Sstevel@tonic-gate 
864*0Sstevel@tonic-gate void *
865*0Sstevel@tonic-gate segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
866*0Sstevel@tonic-gate {
867*0Sstevel@tonic-gate 	void *addr;
868*0Sstevel@tonic-gate 	segkmem_gc_list_t *gcp, **prev_gcpp;
869*0Sstevel@tonic-gate 
870*0Sstevel@tonic-gate 	if (kvseg.s_base == NULL) {
871*0Sstevel@tonic-gate #ifndef __sparc
872*0Sstevel@tonic-gate 		if (bootops->bsys_alloc == NULL)
873*0Sstevel@tonic-gate 			halt("Memory allocation between bop_alloc() and "
874*0Sstevel@tonic-gate 			    "kmem_alloc().\n");
875*0Sstevel@tonic-gate #endif
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate 		/*
878*0Sstevel@tonic-gate 		 * There's not a lot of memory to go around during boot,
879*0Sstevel@tonic-gate 		 * so recycle it if we can.
880*0Sstevel@tonic-gate 		 */
881*0Sstevel@tonic-gate 		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
882*0Sstevel@tonic-gate 		    prev_gcpp = &gcp->gc_next) {
883*0Sstevel@tonic-gate 			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
884*0Sstevel@tonic-gate 				*prev_gcpp = gcp->gc_next;
885*0Sstevel@tonic-gate 				return (gcp);
886*0Sstevel@tonic-gate 			}
887*0Sstevel@tonic-gate 		}
888*0Sstevel@tonic-gate 
889*0Sstevel@tonic-gate 		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
890*0Sstevel@tonic-gate 		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
891*0Sstevel@tonic-gate 			panic("segkmem_alloc: boot_alloc failed");
892*0Sstevel@tonic-gate 		return (addr);
893*0Sstevel@tonic-gate 	}
894*0Sstevel@tonic-gate 	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
895*0Sstevel@tonic-gate 	    segkmem_page_create, NULL));
896*0Sstevel@tonic-gate }
897*0Sstevel@tonic-gate 
898*0Sstevel@tonic-gate /*
899*0Sstevel@tonic-gate  * Any changes to this routine must also be carried over to
900*0Sstevel@tonic-gate  * devmap_free_pages() in the seg_dev driver. This is because
901*0Sstevel@tonic-gate  * we currently don't have a special kernel segment for non-paged
902*0Sstevel@tonic-gate  * kernel memory that is exported by drivers to user space.
903*0Sstevel@tonic-gate  */
904*0Sstevel@tonic-gate void
905*0Sstevel@tonic-gate segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
906*0Sstevel@tonic-gate {
907*0Sstevel@tonic-gate 	page_t *pp;
908*0Sstevel@tonic-gate 	caddr_t addr = inaddr;
909*0Sstevel@tonic-gate 	caddr_t eaddr;
910*0Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
911*0Sstevel@tonic-gate 
912*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
913*0Sstevel@tonic-gate 
914*0Sstevel@tonic-gate 	if (kvseg.s_base == NULL) {
915*0Sstevel@tonic-gate 		segkmem_gc_list_t *gc = inaddr;
916*0Sstevel@tonic-gate 		gc->gc_arena = vmp;
917*0Sstevel@tonic-gate 		gc->gc_size = size;
918*0Sstevel@tonic-gate 		gc->gc_next = segkmem_gc_list;
919*0Sstevel@tonic-gate 		segkmem_gc_list = gc;
920*0Sstevel@tonic-gate 		return;
921*0Sstevel@tonic-gate 	}
922*0Sstevel@tonic-gate 
923*0Sstevel@tonic-gate 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
924*0Sstevel@tonic-gate 
925*0Sstevel@tonic-gate 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
926*0Sstevel@tonic-gate #if defined(__x86)
927*0Sstevel@tonic-gate 		pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
928*0Sstevel@tonic-gate 		if (pp == NULL)
929*0Sstevel@tonic-gate 			panic("segkmem_free: page not found");
930*0Sstevel@tonic-gate 		if (!page_tryupgrade(pp)) {
931*0Sstevel@tonic-gate 			/*
932*0Sstevel@tonic-gate 			 * Some other thread has a sharelock. Wait for
933*0Sstevel@tonic-gate 			 * it to drop the lock so we can free this page.
934*0Sstevel@tonic-gate 			 */
935*0Sstevel@tonic-gate 			page_unlock(pp);
936*0Sstevel@tonic-gate 			pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
937*0Sstevel@tonic-gate 			    SE_EXCL);
938*0Sstevel@tonic-gate 		}
939*0Sstevel@tonic-gate #else
940*0Sstevel@tonic-gate 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
941*0Sstevel@tonic-gate #endif
942*0Sstevel@tonic-gate 		if (pp == NULL)
943*0Sstevel@tonic-gate 			panic("segkmem_free: page not found");
944*0Sstevel@tonic-gate 		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
945*0Sstevel@tonic-gate 		pp->p_lckcnt = 0;
946*0Sstevel@tonic-gate 		page_destroy(pp, 0);
947*0Sstevel@tonic-gate 	}
948*0Sstevel@tonic-gate 	page_unresv(npages);
949*0Sstevel@tonic-gate 
950*0Sstevel@tonic-gate 	if (vmp != NULL)
951*0Sstevel@tonic-gate 		vmem_free(vmp, inaddr, size);
952*0Sstevel@tonic-gate }
953*0Sstevel@tonic-gate 
954*0Sstevel@tonic-gate void
955*0Sstevel@tonic-gate segkmem_gc(void)
956*0Sstevel@tonic-gate {
957*0Sstevel@tonic-gate 	ASSERT(kvseg.s_base != NULL);
958*0Sstevel@tonic-gate 	while (segkmem_gc_list != NULL) {
959*0Sstevel@tonic-gate 		segkmem_gc_list_t *gc = segkmem_gc_list;
960*0Sstevel@tonic-gate 		segkmem_gc_list = gc->gc_next;
961*0Sstevel@tonic-gate 		segkmem_free(gc->gc_arena, gc, gc->gc_size);
962*0Sstevel@tonic-gate 	}
963*0Sstevel@tonic-gate }
964*0Sstevel@tonic-gate 
965*0Sstevel@tonic-gate /*
966*0Sstevel@tonic-gate  * Legacy entry points from here to end of file.
967*0Sstevel@tonic-gate  */
968*0Sstevel@tonic-gate void
969*0Sstevel@tonic-gate segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
970*0Sstevel@tonic-gate     pfn_t pfn, uint_t flags)
971*0Sstevel@tonic-gate {
972*0Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
973*0Sstevel@tonic-gate 	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
974*0Sstevel@tonic-gate 	    flags | HAT_LOAD_LOCK);
975*0Sstevel@tonic-gate }
976*0Sstevel@tonic-gate 
977*0Sstevel@tonic-gate void
978*0Sstevel@tonic-gate segkmem_mapout(struct seg *seg, void *addr, size_t size)
979*0Sstevel@tonic-gate {
980*0Sstevel@tonic-gate 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
981*0Sstevel@tonic-gate }
982*0Sstevel@tonic-gate 
983*0Sstevel@tonic-gate void *
984*0Sstevel@tonic-gate kmem_getpages(pgcnt_t npages, int kmflag)
985*0Sstevel@tonic-gate {
986*0Sstevel@tonic-gate 	return (kmem_alloc(ptob(npages), kmflag));
987*0Sstevel@tonic-gate }
988*0Sstevel@tonic-gate 
989*0Sstevel@tonic-gate void
990*0Sstevel@tonic-gate kmem_freepages(void *addr, pgcnt_t npages)
991*0Sstevel@tonic-gate {
992*0Sstevel@tonic-gate 	kmem_free(addr, ptob(npages));
993*0Sstevel@tonic-gate }
994*0Sstevel@tonic-gate 
995*0Sstevel@tonic-gate /*
996*0Sstevel@tonic-gate  * segkmem_page_create_large() allocates a large page to be used for the kmem
997*0Sstevel@tonic-gate  * caches. If kpr is enabled we ask for a relocatable page unless requested
998*0Sstevel@tonic-gate  * otherwise. If kpr is disabled we have to ask for a non-reloc page
999*0Sstevel@tonic-gate  */
1000*0Sstevel@tonic-gate static page_t *
1001*0Sstevel@tonic-gate segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1002*0Sstevel@tonic-gate {
1003*0Sstevel@tonic-gate 	int pgflags;
1004*0Sstevel@tonic-gate 
1005*0Sstevel@tonic-gate 	pgflags = PG_EXCL;
1006*0Sstevel@tonic-gate 
1007*0Sstevel@tonic-gate 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1008*0Sstevel@tonic-gate 		pgflags |= PG_NORELOC;
1009*0Sstevel@tonic-gate 	if (!(vmflag & VM_NOSLEEP))
1010*0Sstevel@tonic-gate 		pgflags |= PG_WAIT;
1011*0Sstevel@tonic-gate 	if (vmflag & VM_PUSHPAGE)
1012*0Sstevel@tonic-gate 		pgflags |= PG_PUSHPAGE;
1013*0Sstevel@tonic-gate 
1014*0Sstevel@tonic-gate 	return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1015*0Sstevel@tonic-gate 	    pgflags, &kvseg, addr, arg));
1016*0Sstevel@tonic-gate }
1017*0Sstevel@tonic-gate 
1018*0Sstevel@tonic-gate /*
1019*0Sstevel@tonic-gate  * Allocate a large page to back the virtual address range
1020*0Sstevel@tonic-gate  * [addr, addr + size).  If addr is NULL, allocate the virtual address
1021*0Sstevel@tonic-gate  * space as well.
1022*0Sstevel@tonic-gate  */
1023*0Sstevel@tonic-gate static void *
1024*0Sstevel@tonic-gate segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1025*0Sstevel@tonic-gate     uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1026*0Sstevel@tonic-gate     void *pcarg)
1027*0Sstevel@tonic-gate {
1028*0Sstevel@tonic-gate 	caddr_t addr = inaddr, pa;
1029*0Sstevel@tonic-gate 	size_t  lpsize = segkmem_lpsize;
1030*0Sstevel@tonic-gate 	pgcnt_t npages = btopr(size);
1031*0Sstevel@tonic-gate 	pgcnt_t nbpages = btop(lpsize);
1032*0Sstevel@tonic-gate 	pgcnt_t nlpages = size >> segkmem_lpshift;
1033*0Sstevel@tonic-gate 	size_t  ppasize = nbpages * sizeof (page_t *);
1034*0Sstevel@tonic-gate 	page_t *pp, *rootpp, **ppa, *pplist = NULL;
1035*0Sstevel@tonic-gate 	int i;
1036*0Sstevel@tonic-gate 
1037*0Sstevel@tonic-gate 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1038*0Sstevel@tonic-gate 		return (NULL);
1039*0Sstevel@tonic-gate 	}
1040*0Sstevel@tonic-gate 
1041*0Sstevel@tonic-gate 	/*
1042*0Sstevel@tonic-gate 	 * allocate an array we need for hat_memload_array.
1043*0Sstevel@tonic-gate 	 * we use a separate arena to avoid recursion.
1044*0Sstevel@tonic-gate 	 * we will not need this array when hat_memload_array learns pp++
1045*0Sstevel@tonic-gate 	 */
1046*0Sstevel@tonic-gate 	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1047*0Sstevel@tonic-gate 		goto fail_array_alloc;
1048*0Sstevel@tonic-gate 	}
1049*0Sstevel@tonic-gate 
1050*0Sstevel@tonic-gate 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1051*0Sstevel@tonic-gate 		goto fail_vmem_alloc;
1052*0Sstevel@tonic-gate 
1053*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1054*0Sstevel@tonic-gate 
1055*0Sstevel@tonic-gate 	/* create all the pages */
1056*0Sstevel@tonic-gate 	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1057*0Sstevel@tonic-gate 		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1058*0Sstevel@tonic-gate 			goto fail_page_create;
1059*0Sstevel@tonic-gate 		page_list_concat(&pplist, &pp);
1060*0Sstevel@tonic-gate 	}
1061*0Sstevel@tonic-gate 
1062*0Sstevel@tonic-gate 	/* at this point we have all the resource to complete the request */
1063*0Sstevel@tonic-gate 	while ((rootpp = pplist) != NULL) {
1064*0Sstevel@tonic-gate 		for (i = 0; i < nbpages; i++) {
1065*0Sstevel@tonic-gate 			ASSERT(pplist != NULL);
1066*0Sstevel@tonic-gate 			pp = pplist;
1067*0Sstevel@tonic-gate 			page_sub(&pplist, pp);
1068*0Sstevel@tonic-gate 			ASSERT(page_iolock_assert(pp));
1069*0Sstevel@tonic-gate 			page_io_unlock(pp);
1070*0Sstevel@tonic-gate 			ppa[i] = pp;
1071*0Sstevel@tonic-gate 		}
1072*0Sstevel@tonic-gate 		/*
1073*0Sstevel@tonic-gate 		 * Load the locked entry. It's OK to preload the entry into the
1074*0Sstevel@tonic-gate 		 * TSB since we now support large mappings in the kernel TSB.
1075*0Sstevel@tonic-gate 		 */
1076*0Sstevel@tonic-gate 		hat_memload_array(kas.a_hat,
1077*0Sstevel@tonic-gate 		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1078*0Sstevel@tonic-gate 		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1079*0Sstevel@tonic-gate 		    HAT_LOAD_LOCK);
1080*0Sstevel@tonic-gate 
1081*0Sstevel@tonic-gate 		for (--i; i >= 0; --i) {
1082*0Sstevel@tonic-gate 			ppa[i]->p_lckcnt = 1;
1083*0Sstevel@tonic-gate 			page_unlock(ppa[i]);
1084*0Sstevel@tonic-gate 		}
1085*0Sstevel@tonic-gate 	}
1086*0Sstevel@tonic-gate 
1087*0Sstevel@tonic-gate 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1088*0Sstevel@tonic-gate 	return (addr);
1089*0Sstevel@tonic-gate 
1090*0Sstevel@tonic-gate fail_page_create:
1091*0Sstevel@tonic-gate 	while ((rootpp = pplist) != NULL) {
1092*0Sstevel@tonic-gate 		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1093*0Sstevel@tonic-gate 			ASSERT(pp != NULL);
1094*0Sstevel@tonic-gate 			page_sub(&pplist, pp);
1095*0Sstevel@tonic-gate 			ASSERT(page_iolock_assert(pp));
1096*0Sstevel@tonic-gate 			page_io_unlock(pp);
1097*0Sstevel@tonic-gate 		}
1098*0Sstevel@tonic-gate 		page_destroy_pages(rootpp);
1099*0Sstevel@tonic-gate 	}
1100*0Sstevel@tonic-gate 
1101*0Sstevel@tonic-gate 	if (inaddr == NULL)
1102*0Sstevel@tonic-gate 		vmem_free(vmp, addr, size);
1103*0Sstevel@tonic-gate 
1104*0Sstevel@tonic-gate fail_vmem_alloc:
1105*0Sstevel@tonic-gate 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1106*0Sstevel@tonic-gate 
1107*0Sstevel@tonic-gate fail_array_alloc:
1108*0Sstevel@tonic-gate 	page_unresv(npages);
1109*0Sstevel@tonic-gate 
1110*0Sstevel@tonic-gate 	return (NULL);
1111*0Sstevel@tonic-gate }
1112*0Sstevel@tonic-gate 
1113*0Sstevel@tonic-gate static void
1114*0Sstevel@tonic-gate segkmem_free_one_lp(caddr_t addr, size_t size)
1115*0Sstevel@tonic-gate {
1116*0Sstevel@tonic-gate 	page_t		*pp, *rootpp = NULL;
1117*0Sstevel@tonic-gate 	pgcnt_t 	pgs_left = btopr(size);
1118*0Sstevel@tonic-gate 
1119*0Sstevel@tonic-gate 	ASSERT(size == segkmem_lpsize);
1120*0Sstevel@tonic-gate 
1121*0Sstevel@tonic-gate 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1122*0Sstevel@tonic-gate 
1123*0Sstevel@tonic-gate 	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1124*0Sstevel@tonic-gate 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1125*0Sstevel@tonic-gate 		if (pp == NULL)
1126*0Sstevel@tonic-gate 			panic("segkmem_free_one_lp: page not found");
1127*0Sstevel@tonic-gate 		ASSERT(PAGE_EXCL(pp));
1128*0Sstevel@tonic-gate 		pp->p_lckcnt = 0;
1129*0Sstevel@tonic-gate 		if (rootpp == NULL)
1130*0Sstevel@tonic-gate 			rootpp = pp;
1131*0Sstevel@tonic-gate 	}
1132*0Sstevel@tonic-gate 	ASSERT(rootpp != NULL);
1133*0Sstevel@tonic-gate 	page_destroy_pages(rootpp);
1134*0Sstevel@tonic-gate 
1135*0Sstevel@tonic-gate 	/* page_unresv() is done by the caller */
1136*0Sstevel@tonic-gate }
1137*0Sstevel@tonic-gate 
1138*0Sstevel@tonic-gate /*
1139*0Sstevel@tonic-gate  * This function is called to import new spans into the vmem arenas like
1140*0Sstevel@tonic-gate  * kmem_default_arena and kmem_oversize_arena. It first tries to import
1141*0Sstevel@tonic-gate  * spans from large page arena - kmem_lp_arena. In order to do this it might
1142*0Sstevel@tonic-gate  * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1143*0Sstevel@tonic-gate  * it was not able to satisfy the upgraded request it then calls regular
1144*0Sstevel@tonic-gate  * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1145*0Sstevel@tonic-gate  */
1146*0Sstevel@tonic-gate void *
1147*0Sstevel@tonic-gate segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, int vmflag)
1148*0Sstevel@tonic-gate {
1149*0Sstevel@tonic-gate 	size_t size;
1150*0Sstevel@tonic-gate 	kthread_t *t = curthread;
1151*0Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1152*0Sstevel@tonic-gate 
1153*0Sstevel@tonic-gate 	ASSERT(sizep != NULL);
1154*0Sstevel@tonic-gate 
1155*0Sstevel@tonic-gate 	size = *sizep;
1156*0Sstevel@tonic-gate 
1157*0Sstevel@tonic-gate 	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1158*0Sstevel@tonic-gate 	    !(vmflag & SEGKMEM_SHARELOCKED)) {
1159*0Sstevel@tonic-gate 
1160*0Sstevel@tonic-gate 		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1161*0Sstevel@tonic-gate 		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1162*0Sstevel@tonic-gate 		void  *addr = NULL;
1163*0Sstevel@tonic-gate 		ulong_t *lpthrtp = &lpcb->lp_throttle;
1164*0Sstevel@tonic-gate 		ulong_t lpthrt = *lpthrtp;
1165*0Sstevel@tonic-gate 		int	dowakeup = 0;
1166*0Sstevel@tonic-gate 		int	doalloc = 1;
1167*0Sstevel@tonic-gate 
1168*0Sstevel@tonic-gate 		ASSERT(kmem_lp_arena != NULL);
1169*0Sstevel@tonic-gate 		ASSERT(asize >= size);
1170*0Sstevel@tonic-gate 
1171*0Sstevel@tonic-gate 		if (lpthrt != 0) {
1172*0Sstevel@tonic-gate 			/* try to update the throttle value */
1173*0Sstevel@tonic-gate 			lpthrt = atomic_add_long_nv(lpthrtp, 1);
1174*0Sstevel@tonic-gate 			if (lpthrt >= segkmem_lpthrottle_max) {
1175*0Sstevel@tonic-gate 				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1176*0Sstevel@tonic-gate 				    segkmem_lpthrottle_max / 4);
1177*0Sstevel@tonic-gate 			}
1178*0Sstevel@tonic-gate 
1179*0Sstevel@tonic-gate 			/*
1180*0Sstevel@tonic-gate 			 * when we get above throttle start do an exponential
1181*0Sstevel@tonic-gate 			 * backoff at trying large pages and reaping
1182*0Sstevel@tonic-gate 			 */
1183*0Sstevel@tonic-gate 			if (lpthrt > segkmem_lpthrottle_start &&
1184*0Sstevel@tonic-gate 			    (lpthrt & (lpthrt - 1))) {
1185*0Sstevel@tonic-gate 				atomic_add_64(&lpcb->allocs_throttled, 1L);
1186*0Sstevel@tonic-gate 				lpthrt--;
1187*0Sstevel@tonic-gate 				if ((lpthrt & (lpthrt - 1)) == 0)
1188*0Sstevel@tonic-gate 					kmem_reap();
1189*0Sstevel@tonic-gate 				return (segkmem_alloc(vmp, size, vmflag));
1190*0Sstevel@tonic-gate 			}
1191*0Sstevel@tonic-gate 		}
1192*0Sstevel@tonic-gate 
1193*0Sstevel@tonic-gate 		if (!(vmflag & VM_NOSLEEP) &&
1194*0Sstevel@tonic-gate 		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1195*0Sstevel@tonic-gate 		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1196*0Sstevel@tonic-gate 		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1197*0Sstevel@tonic-gate 
1198*0Sstevel@tonic-gate 			/*
1199*0Sstevel@tonic-gate 			 * we are low on free memory in kmem_lp_arena
1200*0Sstevel@tonic-gate 			 * we let only one guy to allocate heap_lp
1201*0Sstevel@tonic-gate 			 * quantum size chunk that everybody is going to
1202*0Sstevel@tonic-gate 			 * share
1203*0Sstevel@tonic-gate 			 */
1204*0Sstevel@tonic-gate 			mutex_enter(&lpcb->lp_lock);
1205*0Sstevel@tonic-gate 
1206*0Sstevel@tonic-gate 			if (lpcb->lp_wait) {
1207*0Sstevel@tonic-gate 
1208*0Sstevel@tonic-gate 				/* we are not the first one - wait */
1209*0Sstevel@tonic-gate 				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1210*0Sstevel@tonic-gate 				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1211*0Sstevel@tonic-gate 				    kmemlp_qnt)  {
1212*0Sstevel@tonic-gate 					doalloc = 0;
1213*0Sstevel@tonic-gate 				}
1214*0Sstevel@tonic-gate 			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1215*0Sstevel@tonic-gate 			    kmemlp_qnt) {
1216*0Sstevel@tonic-gate 
1217*0Sstevel@tonic-gate 				/*
1218*0Sstevel@tonic-gate 				 * we are the first one, make sure we import
1219*0Sstevel@tonic-gate 				 * a large page
1220*0Sstevel@tonic-gate 				 */
1221*0Sstevel@tonic-gate 				if (asize == kmemlp_qnt)
1222*0Sstevel@tonic-gate 					asize += kmemlp_qnt;
1223*0Sstevel@tonic-gate 				dowakeup = 1;
1224*0Sstevel@tonic-gate 				lpcb->lp_wait = 1;
1225*0Sstevel@tonic-gate 			}
1226*0Sstevel@tonic-gate 
1227*0Sstevel@tonic-gate 			mutex_exit(&lpcb->lp_lock);
1228*0Sstevel@tonic-gate 		}
1229*0Sstevel@tonic-gate 
1230*0Sstevel@tonic-gate 		/*
1231*0Sstevel@tonic-gate 		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1232*0Sstevel@tonic-gate 		 * large pages are not available. In that case this allocation
1233*0Sstevel@tonic-gate 		 * attempt will fail and we will retry allocation with small
1234*0Sstevel@tonic-gate 		 * pages. We also do not want to panic if this allocation fails
1235*0Sstevel@tonic-gate 		 * because we are going to retry.
1236*0Sstevel@tonic-gate 		 */
1237*0Sstevel@tonic-gate 		if (doalloc) {
1238*0Sstevel@tonic-gate 			addr = vmem_alloc(kmem_lp_arena, asize,
1239*0Sstevel@tonic-gate 			    (vmflag | VM_ABORT) & ~VM_PANIC);
1240*0Sstevel@tonic-gate 
1241*0Sstevel@tonic-gate 			if (dowakeup) {
1242*0Sstevel@tonic-gate 				mutex_enter(&lpcb->lp_lock);
1243*0Sstevel@tonic-gate 				ASSERT(lpcb->lp_wait != 0);
1244*0Sstevel@tonic-gate 				lpcb->lp_wait = 0;
1245*0Sstevel@tonic-gate 				cv_broadcast(&lpcb->lp_cv);
1246*0Sstevel@tonic-gate 				mutex_exit(&lpcb->lp_lock);
1247*0Sstevel@tonic-gate 			}
1248*0Sstevel@tonic-gate 		}
1249*0Sstevel@tonic-gate 
1250*0Sstevel@tonic-gate 		if (addr != NULL) {
1251*0Sstevel@tonic-gate 			*sizep = asize;
1252*0Sstevel@tonic-gate 			*lpthrtp = 0;
1253*0Sstevel@tonic-gate 			return (addr);
1254*0Sstevel@tonic-gate 		}
1255*0Sstevel@tonic-gate 
1256*0Sstevel@tonic-gate 		if (vmflag & VM_NOSLEEP)
1257*0Sstevel@tonic-gate 			atomic_add_64(&lpcb->nosleep_allocs_failed, 1L);
1258*0Sstevel@tonic-gate 		else
1259*0Sstevel@tonic-gate 			atomic_add_64(&lpcb->sleep_allocs_failed, 1L);
1260*0Sstevel@tonic-gate 		atomic_add_64(&lpcb->alloc_bytes_failed, size);
1261*0Sstevel@tonic-gate 
1262*0Sstevel@tonic-gate 		/* if large page throttling is not started yet do it */
1263*0Sstevel@tonic-gate 		if (segkmem_use_lpthrottle && lpthrt == 0) {
1264*0Sstevel@tonic-gate 			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1265*0Sstevel@tonic-gate 		}
1266*0Sstevel@tonic-gate 	}
1267*0Sstevel@tonic-gate 	return (segkmem_alloc(vmp, size, vmflag));
1268*0Sstevel@tonic-gate }
1269*0Sstevel@tonic-gate 
1270*0Sstevel@tonic-gate void
1271*0Sstevel@tonic-gate segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1272*0Sstevel@tonic-gate {
1273*0Sstevel@tonic-gate 	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1274*0Sstevel@tonic-gate 		segkmem_free(vmp, inaddr, size);
1275*0Sstevel@tonic-gate 	} else {
1276*0Sstevel@tonic-gate 		vmem_free(kmem_lp_arena, inaddr, size);
1277*0Sstevel@tonic-gate 	}
1278*0Sstevel@tonic-gate }
1279*0Sstevel@tonic-gate 
1280*0Sstevel@tonic-gate /*
1281*0Sstevel@tonic-gate  * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1282*0Sstevel@tonic-gate  * into kmem_lp arena. In the process it maps the imported segment with
1283*0Sstevel@tonic-gate  * large pages
1284*0Sstevel@tonic-gate  */
1285*0Sstevel@tonic-gate static void *
1286*0Sstevel@tonic-gate segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1287*0Sstevel@tonic-gate {
1288*0Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1289*0Sstevel@tonic-gate 	void  *addr;
1290*0Sstevel@tonic-gate 
1291*0Sstevel@tonic-gate 	ASSERT(size != 0);
1292*0Sstevel@tonic-gate 	ASSERT(vmp == heap_lp_arena);
1293*0Sstevel@tonic-gate 
1294*0Sstevel@tonic-gate 	/* do not allow large page heap grow beyound limits */
1295*0Sstevel@tonic-gate 	if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1296*0Sstevel@tonic-gate 		atomic_add_64(&lpcb->allocs_limited, 1);
1297*0Sstevel@tonic-gate 		return (NULL);
1298*0Sstevel@tonic-gate 	}
1299*0Sstevel@tonic-gate 
1300*0Sstevel@tonic-gate 	addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1301*0Sstevel@tonic-gate 	    segkmem_page_create_large, NULL);
1302*0Sstevel@tonic-gate 	return (addr);
1303*0Sstevel@tonic-gate }
1304*0Sstevel@tonic-gate 
1305*0Sstevel@tonic-gate /*
1306*0Sstevel@tonic-gate  * segkmem_free_lpi() returns virtual memory back into large page heap arena
1307*0Sstevel@tonic-gate  * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1308*0Sstevel@tonic-gate  * large pages used to map it.
1309*0Sstevel@tonic-gate  */
1310*0Sstevel@tonic-gate static void
1311*0Sstevel@tonic-gate segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1312*0Sstevel@tonic-gate {
1313*0Sstevel@tonic-gate 	pgcnt_t		nlpages = size >> segkmem_lpshift;
1314*0Sstevel@tonic-gate 	size_t		lpsize = segkmem_lpsize;
1315*0Sstevel@tonic-gate 	caddr_t		addr = inaddr;
1316*0Sstevel@tonic-gate 	pgcnt_t 	npages = btopr(size);
1317*0Sstevel@tonic-gate 	int		i;
1318*0Sstevel@tonic-gate 
1319*0Sstevel@tonic-gate 	ASSERT(vmp == heap_lp_arena);
1320*0Sstevel@tonic-gate 	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1321*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1322*0Sstevel@tonic-gate 
1323*0Sstevel@tonic-gate 	for (i = 0; i < nlpages; i++) {
1324*0Sstevel@tonic-gate 		segkmem_free_one_lp(addr, lpsize);
1325*0Sstevel@tonic-gate 		addr += lpsize;
1326*0Sstevel@tonic-gate 	}
1327*0Sstevel@tonic-gate 
1328*0Sstevel@tonic-gate 	page_unresv(npages);
1329*0Sstevel@tonic-gate 
1330*0Sstevel@tonic-gate 	vmem_free(vmp, inaddr, size);
1331*0Sstevel@tonic-gate }
1332*0Sstevel@tonic-gate 
1333*0Sstevel@tonic-gate /*
1334*0Sstevel@tonic-gate  * This function is called at system boot time by kmem_init right after
1335*0Sstevel@tonic-gate  * /etc/system file has been read. It checks based on hardware configuration
1336*0Sstevel@tonic-gate  * and /etc/system settings if system is going to use large pages. The
1337*0Sstevel@tonic-gate  * initialiazation necessary to actually start using large pages
1338*0Sstevel@tonic-gate  * happens later in the process after segkmem_heap_lp_init() is called.
1339*0Sstevel@tonic-gate  */
1340*0Sstevel@tonic-gate int
1341*0Sstevel@tonic-gate segkmem_lpsetup()
1342*0Sstevel@tonic-gate {
1343*0Sstevel@tonic-gate 	int use_large_pages = 0;
1344*0Sstevel@tonic-gate 
1345*0Sstevel@tonic-gate #ifdef __sparc
1346*0Sstevel@tonic-gate 
1347*0Sstevel@tonic-gate 	size_t memtotal = physmem * PAGESIZE;
1348*0Sstevel@tonic-gate 
1349*0Sstevel@tonic-gate 	if (heap_lp_base == NULL) {
1350*0Sstevel@tonic-gate 		segkmem_lpsize = PAGESIZE;
1351*0Sstevel@tonic-gate 		return (0);
1352*0Sstevel@tonic-gate 	}
1353*0Sstevel@tonic-gate 
1354*0Sstevel@tonic-gate 	/* get a platform dependent value of large page size for kernel heap */
1355*0Sstevel@tonic-gate 	segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1356*0Sstevel@tonic-gate 
1357*0Sstevel@tonic-gate 	if (segkmem_lpsize <= PAGESIZE) {
1358*0Sstevel@tonic-gate 		/*
1359*0Sstevel@tonic-gate 		 * put virtual space reserved for the large page kernel
1360*0Sstevel@tonic-gate 		 * back to the regular heap
1361*0Sstevel@tonic-gate 		 */
1362*0Sstevel@tonic-gate 		vmem_xfree(heap_arena, heap_lp_base,
1363*0Sstevel@tonic-gate 		    heap_lp_end - heap_lp_base);
1364*0Sstevel@tonic-gate 		heap_lp_base = NULL;
1365*0Sstevel@tonic-gate 		heap_lp_end = NULL;
1366*0Sstevel@tonic-gate 		segkmem_lpsize = PAGESIZE;
1367*0Sstevel@tonic-gate 		return (0);
1368*0Sstevel@tonic-gate 	}
1369*0Sstevel@tonic-gate 
1370*0Sstevel@tonic-gate 	/* set heap_lp quantum if necessary */
1371*0Sstevel@tonic-gate 	if (segkmem_heaplp_quantum == 0 ||
1372*0Sstevel@tonic-gate 	    (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
1373*0Sstevel@tonic-gate 	    P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1374*0Sstevel@tonic-gate 		segkmem_heaplp_quantum = segkmem_lpsize;
1375*0Sstevel@tonic-gate 	}
1376*0Sstevel@tonic-gate 
1377*0Sstevel@tonic-gate 	/* set kmem_lp quantum if necessary */
1378*0Sstevel@tonic-gate 	if (segkmem_kmemlp_quantum == 0 ||
1379*0Sstevel@tonic-gate 	    (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
1380*0Sstevel@tonic-gate 	    segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1381*0Sstevel@tonic-gate 		segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1382*0Sstevel@tonic-gate 	}
1383*0Sstevel@tonic-gate 
1384*0Sstevel@tonic-gate 	/* set total amount of memory allowed for large page kernel heap */
1385*0Sstevel@tonic-gate 	if (segkmem_kmemlp_max == 0) {
1386*0Sstevel@tonic-gate 		if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1387*0Sstevel@tonic-gate 			segkmem_kmemlp_pcnt = 25;
1388*0Sstevel@tonic-gate 		segkmem_kmemlp_max = (memtotal * 100) / segkmem_kmemlp_pcnt;
1389*0Sstevel@tonic-gate 	}
1390*0Sstevel@tonic-gate 	segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1391*0Sstevel@tonic-gate 	    segkmem_heaplp_quantum);
1392*0Sstevel@tonic-gate 
1393*0Sstevel@tonic-gate 	/* fix lp kmem preallocation request if necesssary */
1394*0Sstevel@tonic-gate 	if (segkmem_kmemlp_min) {
1395*0Sstevel@tonic-gate 		segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1396*0Sstevel@tonic-gate 		    segkmem_heaplp_quantum);
1397*0Sstevel@tonic-gate 		if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1398*0Sstevel@tonic-gate 			segkmem_kmemlp_min = segkmem_kmemlp_max;
1399*0Sstevel@tonic-gate 	}
1400*0Sstevel@tonic-gate 
1401*0Sstevel@tonic-gate 	use_large_pages = 1;
1402*0Sstevel@tonic-gate 	segkmem_lpshift = page_get_shift(page_szc(segkmem_lpsize));
1403*0Sstevel@tonic-gate 
1404*0Sstevel@tonic-gate #endif
1405*0Sstevel@tonic-gate 	return (use_large_pages);
1406*0Sstevel@tonic-gate }
1407*0Sstevel@tonic-gate 
1408*0Sstevel@tonic-gate #ifdef __sparc
1409*0Sstevel@tonic-gate 
1410*0Sstevel@tonic-gate 
1411*0Sstevel@tonic-gate static void *
1412*0Sstevel@tonic-gate segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1413*0Sstevel@tonic-gate {
1414*0Sstevel@tonic-gate 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1415*0Sstevel@tonic-gate 	void   *addr;
1416*0Sstevel@tonic-gate 
1417*0Sstevel@tonic-gate 	if (ppaquantum <= PAGESIZE)
1418*0Sstevel@tonic-gate 		return (segkmem_alloc(vmp, size, vmflag));
1419*0Sstevel@tonic-gate 
1420*0Sstevel@tonic-gate 	ASSERT((size & (ppaquantum - 1)) == 0);
1421*0Sstevel@tonic-gate 
1422*0Sstevel@tonic-gate 	addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1423*0Sstevel@tonic-gate 	if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1424*0Sstevel@tonic-gate 		segkmem_page_create, NULL) == NULL) {
1425*0Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
1426*0Sstevel@tonic-gate 		addr = NULL;
1427*0Sstevel@tonic-gate 	}
1428*0Sstevel@tonic-gate 
1429*0Sstevel@tonic-gate 	return (addr);
1430*0Sstevel@tonic-gate }
1431*0Sstevel@tonic-gate 
1432*0Sstevel@tonic-gate static void
1433*0Sstevel@tonic-gate segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1434*0Sstevel@tonic-gate {
1435*0Sstevel@tonic-gate 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1436*0Sstevel@tonic-gate 
1437*0Sstevel@tonic-gate 	ASSERT(addr != NULL);
1438*0Sstevel@tonic-gate 
1439*0Sstevel@tonic-gate 	if (ppaquantum <= PAGESIZE) {
1440*0Sstevel@tonic-gate 		segkmem_free(vmp, addr, size);
1441*0Sstevel@tonic-gate 	} else {
1442*0Sstevel@tonic-gate 		segkmem_free(NULL, addr, size);
1443*0Sstevel@tonic-gate 		vmem_xfree(vmp, addr, size);
1444*0Sstevel@tonic-gate 	}
1445*0Sstevel@tonic-gate }
1446*0Sstevel@tonic-gate 
1447*0Sstevel@tonic-gate void
1448*0Sstevel@tonic-gate segkmem_heap_lp_init()
1449*0Sstevel@tonic-gate {
1450*0Sstevel@tonic-gate 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1451*0Sstevel@tonic-gate 	size_t heap_lp_size = heap_lp_end - heap_lp_base;
1452*0Sstevel@tonic-gate 	size_t lpsize = segkmem_lpsize;
1453*0Sstevel@tonic-gate 	size_t ppaquantum;
1454*0Sstevel@tonic-gate 	void   *addr;
1455*0Sstevel@tonic-gate 
1456*0Sstevel@tonic-gate 	if (segkmem_lpsize <= PAGESIZE) {
1457*0Sstevel@tonic-gate 		ASSERT(heap_lp_base == NULL);
1458*0Sstevel@tonic-gate 		ASSERT(heap_lp_end == NULL);
1459*0Sstevel@tonic-gate 		return;
1460*0Sstevel@tonic-gate 	}
1461*0Sstevel@tonic-gate 
1462*0Sstevel@tonic-gate 	ASSERT(segkmem_heaplp_quantum >= lpsize);
1463*0Sstevel@tonic-gate 	ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1464*0Sstevel@tonic-gate 	ASSERT(lpcb->lp_uselp == 0);
1465*0Sstevel@tonic-gate 	ASSERT(heap_lp_base != NULL);
1466*0Sstevel@tonic-gate 	ASSERT(heap_lp_end != NULL);
1467*0Sstevel@tonic-gate 	ASSERT(heap_lp_base < heap_lp_end);
1468*0Sstevel@tonic-gate 	ASSERT(heap_lp_arena == NULL);
1469*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1470*0Sstevel@tonic-gate 	ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1471*0Sstevel@tonic-gate 
1472*0Sstevel@tonic-gate 	/* create large page heap arena */
1473*0Sstevel@tonic-gate 	heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1474*0Sstevel@tonic-gate 	    segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1475*0Sstevel@tonic-gate 
1476*0Sstevel@tonic-gate 	ASSERT(heap_lp_arena != NULL);
1477*0Sstevel@tonic-gate 
1478*0Sstevel@tonic-gate 	/* This arena caches memory already mapped by large pages */
1479*0Sstevel@tonic-gate 	kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1480*0Sstevel@tonic-gate 	    segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1481*0Sstevel@tonic-gate 
1482*0Sstevel@tonic-gate 	ASSERT(kmem_lp_arena != NULL);
1483*0Sstevel@tonic-gate 
1484*0Sstevel@tonic-gate 	mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1485*0Sstevel@tonic-gate 	cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1486*0Sstevel@tonic-gate 
1487*0Sstevel@tonic-gate 	/*
1488*0Sstevel@tonic-gate 	 * this arena is used for the array of page_t pointers necessary
1489*0Sstevel@tonic-gate 	 * to call hat_mem_load_array
1490*0Sstevel@tonic-gate 	 */
1491*0Sstevel@tonic-gate 	ppaquantum = btopr(lpsize) * sizeof (page_t *);
1492*0Sstevel@tonic-gate 	segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1493*0Sstevel@tonic-gate 	    segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1494*0Sstevel@tonic-gate 	    VM_SLEEP);
1495*0Sstevel@tonic-gate 
1496*0Sstevel@tonic-gate 	ASSERT(segkmem_ppa_arena != NULL);
1497*0Sstevel@tonic-gate 
1498*0Sstevel@tonic-gate 	/* prealloacate some memory for the lp kernel heap */
1499*0Sstevel@tonic-gate 	if (segkmem_kmemlp_min) {
1500*0Sstevel@tonic-gate 
1501*0Sstevel@tonic-gate 		ASSERT(P2PHASE(segkmem_kmemlp_min,
1502*0Sstevel@tonic-gate 		    segkmem_heaplp_quantum) == 0);
1503*0Sstevel@tonic-gate 
1504*0Sstevel@tonic-gate 		if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1505*0Sstevel@tonic-gate 		    segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1506*0Sstevel@tonic-gate 
1507*0Sstevel@tonic-gate 			addr = vmem_add(kmem_lp_arena, addr,
1508*0Sstevel@tonic-gate 			    segkmem_kmemlp_min, VM_SLEEP);
1509*0Sstevel@tonic-gate 			ASSERT(addr != NULL);
1510*0Sstevel@tonic-gate 		}
1511*0Sstevel@tonic-gate 	}
1512*0Sstevel@tonic-gate 
1513*0Sstevel@tonic-gate 	lpcb->lp_uselp = 1;
1514*0Sstevel@tonic-gate }
1515*0Sstevel@tonic-gate 
1516*0Sstevel@tonic-gate #endif
1517