10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53290Sjohansen * Common Development and Distribution License (the "License").
63290Sjohansen * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
2212156SStan.Studzinski@Sun.COM * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
240Sstevel@tonic-gate
250Sstevel@tonic-gate #include <sys/types.h>
260Sstevel@tonic-gate #include <sys/t_lock.h>
270Sstevel@tonic-gate #include <sys/param.h>
280Sstevel@tonic-gate #include <sys/sysmacros.h>
290Sstevel@tonic-gate #include <sys/tuneable.h>
300Sstevel@tonic-gate #include <sys/systm.h>
310Sstevel@tonic-gate #include <sys/vm.h>
320Sstevel@tonic-gate #include <sys/kmem.h>
330Sstevel@tonic-gate #include <sys/vmem.h>
340Sstevel@tonic-gate #include <sys/mman.h>
350Sstevel@tonic-gate #include <sys/cmn_err.h>
360Sstevel@tonic-gate #include <sys/debug.h>
370Sstevel@tonic-gate #include <sys/dumphdr.h>
380Sstevel@tonic-gate #include <sys/bootconf.h>
390Sstevel@tonic-gate #include <sys/lgrp.h>
400Sstevel@tonic-gate #include <vm/seg_kmem.h>
410Sstevel@tonic-gate #include <vm/hat.h>
420Sstevel@tonic-gate #include <vm/page.h>
430Sstevel@tonic-gate #include <vm/vm_dep.h>
440Sstevel@tonic-gate #include <vm/faultcode.h>
450Sstevel@tonic-gate #include <sys/promif.h>
460Sstevel@tonic-gate #include <vm/seg_kp.h>
470Sstevel@tonic-gate #include <sys/bitmap.h>
480Sstevel@tonic-gate #include <sys/mem_cage.h>
490Sstevel@tonic-gate
5011444SGangadhar.M@Sun.COM #ifdef __sparc
5111444SGangadhar.M@Sun.COM #include <sys/ivintr.h>
5211444SGangadhar.M@Sun.COM #include <sys/panic.h>
5311444SGangadhar.M@Sun.COM #endif
5411444SGangadhar.M@Sun.COM
550Sstevel@tonic-gate /*
560Sstevel@tonic-gate * seg_kmem is the primary kernel memory segment driver. It
570Sstevel@tonic-gate * maps the kernel heap [kernelheap, ekernelheap), module text,
580Sstevel@tonic-gate * and all memory which was allocated before the VM was initialized
590Sstevel@tonic-gate * into kas.
600Sstevel@tonic-gate *
610Sstevel@tonic-gate * Pages which belong to seg_kmem are hashed into &kvp vnode at
620Sstevel@tonic-gate * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
630Sstevel@tonic-gate * They must never be paged out since segkmem_fault() is a no-op to
640Sstevel@tonic-gate * prevent recursive faults.
650Sstevel@tonic-gate *
660Sstevel@tonic-gate * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
670Sstevel@tonic-gate * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
680Sstevel@tonic-gate * supports relocation the #ifdef kludges can be removed.
690Sstevel@tonic-gate *
700Sstevel@tonic-gate * seg_kmem pages may be subject to relocation by page_relocate(),
710Sstevel@tonic-gate * provided that the HAT supports it; if this is so, segkmem_reloc
720Sstevel@tonic-gate * will be set to a nonzero value. All boot time allocated memory as
730Sstevel@tonic-gate * well as static memory is considered off limits to relocation.
740Sstevel@tonic-gate * Pages are "relocatable" if p_state does not have P_NORELOC set, so
750Sstevel@tonic-gate * we request P_NORELOC pages for memory that isn't safe to relocate.
760Sstevel@tonic-gate *
770Sstevel@tonic-gate * The kernel heap is logically divided up into four pieces:
780Sstevel@tonic-gate *
790Sstevel@tonic-gate * heap32_arena is for allocations that require 32-bit absolute
800Sstevel@tonic-gate * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
810Sstevel@tonic-gate *
820Sstevel@tonic-gate * heap_core is for allocations that require 2GB *relative*
830Sstevel@tonic-gate * offsets; in other words all memory from heap_core is within
840Sstevel@tonic-gate * 2GB of all other memory from the same arena. This is a requirement
850Sstevel@tonic-gate * of the addressing modes of some processors in supervisor code.
860Sstevel@tonic-gate *
870Sstevel@tonic-gate * heap_arena is the general heap arena.
880Sstevel@tonic-gate *
890Sstevel@tonic-gate * static_arena is the static memory arena. Allocations from it
900Sstevel@tonic-gate * are not subject to relocation so it is safe to use the memory
910Sstevel@tonic-gate * physical address as well as the virtual address (e.g. the VA to
920Sstevel@tonic-gate * PA translations are static). Caches may import from static_arena;
930Sstevel@tonic-gate * all other static memory allocations should use static_alloc_arena.
940Sstevel@tonic-gate *
950Sstevel@tonic-gate * On some platforms which have limited virtual address space, seg_kmem
960Sstevel@tonic-gate * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
970Sstevel@tonic-gate * segkp_bitmap is non-NULL, and each bit represents a page of virtual
980Sstevel@tonic-gate * address space which is actually seg_kp mapped.
990Sstevel@tonic-gate */
1000Sstevel@tonic-gate
1010Sstevel@tonic-gate extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */
1020Sstevel@tonic-gate
1030Sstevel@tonic-gate char *kernelheap; /* start of primary kernel heap */
1040Sstevel@tonic-gate char *ekernelheap; /* end of primary kernel heap */
1050Sstevel@tonic-gate struct seg kvseg; /* primary kernel heap segment */
1060Sstevel@tonic-gate struct seg kvseg_core; /* "core" kernel heap segment */
1073290Sjohansen struct seg kzioseg; /* Segment for zio mappings */
1080Sstevel@tonic-gate vmem_t *heap_arena; /* primary kernel heap arena */
1090Sstevel@tonic-gate vmem_t *heap_core_arena; /* core kernel heap arena */
1100Sstevel@tonic-gate char *heap_core_base; /* start of core kernel heap arena */
1110Sstevel@tonic-gate char *heap_lp_base; /* start of kernel large page heap arena */
1120Sstevel@tonic-gate char *heap_lp_end; /* end of kernel large page heap arena */
1130Sstevel@tonic-gate vmem_t *hat_memload_arena; /* HAT translation data */
1140Sstevel@tonic-gate struct seg kvseg32; /* 32-bit kernel heap segment */
1150Sstevel@tonic-gate vmem_t *heap32_arena; /* 32-bit kernel heap arena */
1160Sstevel@tonic-gate vmem_t *heaptext_arena; /* heaptext arena */
1170Sstevel@tonic-gate struct as kas; /* kernel address space */
1180Sstevel@tonic-gate int segkmem_reloc; /* enable/disable relocatable segkmem pages */
1190Sstevel@tonic-gate vmem_t *static_arena; /* arena for caches to import static memory */
1200Sstevel@tonic-gate vmem_t *static_alloc_arena; /* arena for allocating static memory */
1213290Sjohansen vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
1223290Sjohansen vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
1230Sstevel@tonic-gate
1240Sstevel@tonic-gate /*
1250Sstevel@tonic-gate * seg_kmem driver can map part of the kernel heap with large pages.
1260Sstevel@tonic-gate * Currently this functionality is implemented for sparc platforms only.
1270Sstevel@tonic-gate *
1280Sstevel@tonic-gate * The large page size "segkmem_lpsize" for kernel heap is selected in the
1290Sstevel@tonic-gate * platform specific code. It can also be modified via /etc/system file.
1300Sstevel@tonic-gate * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
1310Sstevel@tonic-gate * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
1320Sstevel@tonic-gate * match segkmem_lpsize.
1330Sstevel@tonic-gate *
1340Sstevel@tonic-gate * At boot time we carve from kernel heap arena a range of virtual addresses
1350Sstevel@tonic-gate * that will be used for large page mappings. This range [heap_lp_base,
1360Sstevel@tonic-gate * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
1370Sstevel@tonic-gate * create "kmem_lp_arena" that caches memory already backed up by large
1380Sstevel@tonic-gate * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
1390Sstevel@tonic-gate */
1400Sstevel@tonic-gate
1410Sstevel@tonic-gate size_t segkmem_lpsize;
1420Sstevel@tonic-gate static uint_t segkmem_lpshift = PAGESHIFT;
1433351Saguzovsk int segkmem_lpszc = 0;
1440Sstevel@tonic-gate
1450Sstevel@tonic-gate size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
1460Sstevel@tonic-gate size_t segkmem_heaplp_quantum;
1475Seg155566 vmem_t *heap_lp_arena;
1480Sstevel@tonic-gate static vmem_t *kmem_lp_arena;
1490Sstevel@tonic-gate static vmem_t *segkmem_ppa_arena;
1500Sstevel@tonic-gate static segkmem_lpcb_t segkmem_lpcb;
1510Sstevel@tonic-gate
1520Sstevel@tonic-gate /*
1530Sstevel@tonic-gate * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154215Seg155566 * consumed by the large page heap. By default this parameter is set to 1/8 of
1550Sstevel@tonic-gate * physmem but can be adjusted through /etc/system either directly or
1560Sstevel@tonic-gate * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
1570Sstevel@tonic-gate * we allow for large page heap.
1580Sstevel@tonic-gate */
1590Sstevel@tonic-gate size_t segkmem_kmemlp_max;
1600Sstevel@tonic-gate static uint_t segkmem_kmemlp_pcnt;
1610Sstevel@tonic-gate
1620Sstevel@tonic-gate /*
1630Sstevel@tonic-gate * Getting large pages for kernel heap could be problematic due to
1640Sstevel@tonic-gate * physical memory fragmentation. That's why we allow to preallocate
1650Sstevel@tonic-gate * "segkmem_kmemlp_min" bytes at boot time.
1660Sstevel@tonic-gate */
1670Sstevel@tonic-gate static size_t segkmem_kmemlp_min;
1680Sstevel@tonic-gate
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate * Throttling is used to avoid expensive tries to allocate large pages
1710Sstevel@tonic-gate * for kernel heap when a lot of succesive attempts to do so fail.
1720Sstevel@tonic-gate */
1730Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_max = 0x400000;
1740Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_start = 0x40;
1750Sstevel@tonic-gate static ulong_t segkmem_use_lpthrottle = 1;
1760Sstevel@tonic-gate
1770Sstevel@tonic-gate /*
1780Sstevel@tonic-gate * Freed pages accumulate on a garbage list until segkmem is ready,
1790Sstevel@tonic-gate * at which point we call segkmem_gc() to free it all.
1800Sstevel@tonic-gate */
1810Sstevel@tonic-gate typedef struct segkmem_gc_list {
1820Sstevel@tonic-gate struct segkmem_gc_list *gc_next;
1830Sstevel@tonic-gate vmem_t *gc_arena;
1840Sstevel@tonic-gate size_t gc_size;
1850Sstevel@tonic-gate } segkmem_gc_list_t;
1860Sstevel@tonic-gate
1870Sstevel@tonic-gate static segkmem_gc_list_t *segkmem_gc_list;
1880Sstevel@tonic-gate
1890Sstevel@tonic-gate /*
1900Sstevel@tonic-gate * Allocations from the hat_memload arena add VM_MEMLOAD to their
1910Sstevel@tonic-gate * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
1920Sstevel@tonic-gate * to take steps to prevent infinite recursion. HAT allocations also
1930Sstevel@tonic-gate * must be non-relocatable to prevent recursive page faults.
1940Sstevel@tonic-gate */
1950Sstevel@tonic-gate static void *
hat_memload_alloc(vmem_t * vmp,size_t size,int flags)1960Sstevel@tonic-gate hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
1970Sstevel@tonic-gate {
1980Sstevel@tonic-gate flags |= (VM_MEMLOAD | VM_NORELOC);
1990Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags));
2000Sstevel@tonic-gate }
2010Sstevel@tonic-gate
2020Sstevel@tonic-gate /*
2030Sstevel@tonic-gate * Allocations from static_arena arena (or any other arena that uses
2040Sstevel@tonic-gate * segkmem_alloc_permanent()) require non-relocatable (permanently
2050Sstevel@tonic-gate * wired) memory pages, since these pages are referenced by physical
2060Sstevel@tonic-gate * as well as virtual address.
2070Sstevel@tonic-gate */
2080Sstevel@tonic-gate void *
segkmem_alloc_permanent(vmem_t * vmp,size_t size,int flags)2090Sstevel@tonic-gate segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
2100Sstevel@tonic-gate {
2110Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
2120Sstevel@tonic-gate }
2130Sstevel@tonic-gate
2140Sstevel@tonic-gate /*
2150Sstevel@tonic-gate * Initialize kernel heap boundaries.
2160Sstevel@tonic-gate */
2170Sstevel@tonic-gate void
kernelheap_init(void * heap_start,void * heap_end,char * first_avail,void * core_start,void * core_end)2180Sstevel@tonic-gate kernelheap_init(
2190Sstevel@tonic-gate void *heap_start,
2200Sstevel@tonic-gate void *heap_end,
2210Sstevel@tonic-gate char *first_avail,
2220Sstevel@tonic-gate void *core_start,
2230Sstevel@tonic-gate void *core_end)
2240Sstevel@tonic-gate {
2250Sstevel@tonic-gate uintptr_t textbase;
2260Sstevel@tonic-gate size_t core_size;
2270Sstevel@tonic-gate size_t heap_size;
2280Sstevel@tonic-gate vmem_t *heaptext_parent;
2290Sstevel@tonic-gate size_t heap_lp_size = 0;
2303764Sdp78419 #ifdef __sparc
2313764Sdp78419 size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
2323764Sdp78419 #endif /* __sparc */
2330Sstevel@tonic-gate
2340Sstevel@tonic-gate kernelheap = heap_start;
2350Sstevel@tonic-gate ekernelheap = heap_end;
2360Sstevel@tonic-gate
2370Sstevel@tonic-gate #ifdef __sparc
2380Sstevel@tonic-gate heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
2393764Sdp78419 /*
2403764Sdp78419 * Bias heap_lp start address by kmem64_sz to reduce collisions
2413764Sdp78419 * in 4M kernel TSB between kmem64 area and heap_lp
2423764Sdp78419 */
2433764Sdp78419 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
2443764Sdp78419 if (kmem64_sz <= heap_lp_size / 2)
2453764Sdp78419 heap_lp_size -= kmem64_sz;
2460Sstevel@tonic-gate heap_lp_base = ekernelheap - heap_lp_size;
2470Sstevel@tonic-gate heap_lp_end = heap_lp_base + heap_lp_size;
2480Sstevel@tonic-gate #endif /* __sparc */
2490Sstevel@tonic-gate
2500Sstevel@tonic-gate /*
2510Sstevel@tonic-gate * If this platform has a 'core' heap area, then the space for
2520Sstevel@tonic-gate * overflow module text should be carved out of the end of that
2530Sstevel@tonic-gate * heap. Otherwise, it gets carved out of the general purpose
2540Sstevel@tonic-gate * heap.
2550Sstevel@tonic-gate */
2560Sstevel@tonic-gate core_size = (uintptr_t)core_end - (uintptr_t)core_start;
2570Sstevel@tonic-gate if (core_size > 0) {
2580Sstevel@tonic-gate ASSERT(core_size >= HEAPTEXT_SIZE);
2590Sstevel@tonic-gate textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
2600Sstevel@tonic-gate core_size -= HEAPTEXT_SIZE;
2610Sstevel@tonic-gate }
2620Sstevel@tonic-gate #ifndef __sparc
2630Sstevel@tonic-gate else {
2640Sstevel@tonic-gate ekernelheap -= HEAPTEXT_SIZE;
2650Sstevel@tonic-gate textbase = (uintptr_t)ekernelheap;
2660Sstevel@tonic-gate }
2670Sstevel@tonic-gate #endif
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
2700Sstevel@tonic-gate heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
2710Sstevel@tonic-gate segkmem_alloc, segkmem_free);
2720Sstevel@tonic-gate
2730Sstevel@tonic-gate if (core_size > 0) {
2740Sstevel@tonic-gate heap_core_arena = vmem_create("heap_core", core_start,
2750Sstevel@tonic-gate core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
2760Sstevel@tonic-gate heap_core_base = core_start;
2770Sstevel@tonic-gate } else {
2780Sstevel@tonic-gate heap_core_arena = heap_arena;
2790Sstevel@tonic-gate heap_core_base = kernelheap;
2800Sstevel@tonic-gate }
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate /*
2830Sstevel@tonic-gate * reserve space for the large page heap. If large pages for kernel
2840Sstevel@tonic-gate * heap is enabled large page heap arean will be created later in the
2850Sstevel@tonic-gate * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
2860Sstevel@tonic-gate * range will be returned back to the heap_arena.
2870Sstevel@tonic-gate */
2880Sstevel@tonic-gate if (heap_lp_size) {
2890Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
2900Sstevel@tonic-gate heap_lp_base, heap_lp_end,
2910Sstevel@tonic-gate VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
2920Sstevel@tonic-gate }
2930Sstevel@tonic-gate
2940Sstevel@tonic-gate /*
2950Sstevel@tonic-gate * Remove the already-spoken-for memory range [kernelheap, first_avail).
2960Sstevel@tonic-gate */
2970Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
2980Sstevel@tonic-gate 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
2990Sstevel@tonic-gate
3000Sstevel@tonic-gate #ifdef __sparc
3010Sstevel@tonic-gate heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
3020Sstevel@tonic-gate SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
3030Sstevel@tonic-gate NULL, NULL, 0, VM_SLEEP);
30411444SGangadhar.M@Sun.COM /*
30511444SGangadhar.M@Sun.COM * Prom claims the physical and virtual resources used by panicbuf
30611444SGangadhar.M@Sun.COM * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
30711444SGangadhar.M@Sun.COM * reserved interrupt vector data structures from 32-bit heap.
30811444SGangadhar.M@Sun.COM */
30911444SGangadhar.M@Sun.COM (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
31011444SGangadhar.M@Sun.COM panicbuf, panicbuf + PANICBUFSIZE,
31111444SGangadhar.M@Sun.COM VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
31211444SGangadhar.M@Sun.COM
31311444SGangadhar.M@Sun.COM (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
31411444SGangadhar.M@Sun.COM intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
31511444SGangadhar.M@Sun.COM VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
3160Sstevel@tonic-gate
3170Sstevel@tonic-gate textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
3180Sstevel@tonic-gate heaptext_parent = NULL;
3190Sstevel@tonic-gate #else /* __sparc */
3200Sstevel@tonic-gate heap32_arena = heap_core_arena;
3210Sstevel@tonic-gate heaptext_parent = heap_core_arena;
3220Sstevel@tonic-gate #endif /* __sparc */
3230Sstevel@tonic-gate
3240Sstevel@tonic-gate heaptext_arena = vmem_create("heaptext", (void *)textbase,
3250Sstevel@tonic-gate HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
3260Sstevel@tonic-gate
3270Sstevel@tonic-gate /*
3280Sstevel@tonic-gate * Create a set of arenas for memory with static translations
3290Sstevel@tonic-gate * (e.g. VA -> PA translations cannot change). Since using
3300Sstevel@tonic-gate * kernel pages by physical address implies it isn't safe to
3310Sstevel@tonic-gate * walk across page boundaries, the static_arena quantum must
3320Sstevel@tonic-gate * be PAGESIZE. Any kmem caches that require static memory
3330Sstevel@tonic-gate * should source from static_arena, while direct allocations
3340Sstevel@tonic-gate * should only use static_alloc_arena.
3350Sstevel@tonic-gate */
3360Sstevel@tonic-gate static_arena = vmem_create("static", NULL, 0, PAGESIZE,
3370Sstevel@tonic-gate segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
3380Sstevel@tonic-gate static_alloc_arena = vmem_create("static_alloc", NULL, 0,
3390Sstevel@tonic-gate sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
3400Sstevel@tonic-gate 0, VM_SLEEP);
3410Sstevel@tonic-gate
3420Sstevel@tonic-gate /*
3430Sstevel@tonic-gate * Create an arena for translation data (ptes, hmes, or hblks).
3440Sstevel@tonic-gate * We need an arena for this because hat_memload() is essential
3450Sstevel@tonic-gate * to vmem_populate() (see comments in common/os/vmem.c).
3460Sstevel@tonic-gate *
3470Sstevel@tonic-gate * Note: any kmem cache that allocates from hat_memload_arena
3480Sstevel@tonic-gate * must be created as a KMC_NOHASH cache (i.e. no external slab
3490Sstevel@tonic-gate * and bufctl structures to allocate) so that slab creation doesn't
3500Sstevel@tonic-gate * require anything more than a single vmem_alloc().
3510Sstevel@tonic-gate */
3520Sstevel@tonic-gate hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
3530Sstevel@tonic-gate hat_memload_alloc, segkmem_free, heap_arena, 0,
35411178SDave.Plauger@Sun.COM VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
3550Sstevel@tonic-gate }
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate void
boot_mapin(caddr_t addr,size_t size)3580Sstevel@tonic-gate boot_mapin(caddr_t addr, size_t size)
3590Sstevel@tonic-gate {
3600Sstevel@tonic-gate caddr_t eaddr;
3610Sstevel@tonic-gate page_t *pp;
3620Sstevel@tonic-gate pfn_t pfnum;
3630Sstevel@tonic-gate
3640Sstevel@tonic-gate if (page_resv(btop(size), KM_NOSLEEP) == 0)
3650Sstevel@tonic-gate panic("boot_mapin: page_resv failed");
3660Sstevel@tonic-gate
3670Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3680Sstevel@tonic-gate pfnum = va_to_pfn(addr);
3695460Sjosephb if (pfnum == PFN_INVALID)
3705460Sjosephb continue;
3710Sstevel@tonic-gate if ((pp = page_numtopp_nolock(pfnum)) == NULL)
3720Sstevel@tonic-gate panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
3730Sstevel@tonic-gate
3740Sstevel@tonic-gate /*
3750Sstevel@tonic-gate * must break up any large pages that may have constituent
3760Sstevel@tonic-gate * pages being utilized for BOP_ALLOC()'s before calling
3770Sstevel@tonic-gate * page_numtopp().The locking code (ie. page_reclaim())
3780Sstevel@tonic-gate * can't handle them
3790Sstevel@tonic-gate */
3800Sstevel@tonic-gate if (pp->p_szc != 0)
3810Sstevel@tonic-gate page_boot_demote(pp);
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate pp = page_numtopp(pfnum, SE_EXCL);
3840Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp))
3850Sstevel@tonic-gate panic("boot_alloc: pp is NULL or free");
3860Sstevel@tonic-gate
3870Sstevel@tonic-gate /*
3880Sstevel@tonic-gate * If the cage is on but doesn't yet contain this page,
3890Sstevel@tonic-gate * mark it as non-relocatable.
3900Sstevel@tonic-gate */
3917142Skchow if (kcage_on && !PP_ISNORELOC(pp)) {
3920Sstevel@tonic-gate PP_SETNORELOC(pp);
3937142Skchow PLCNT_XFER_NORELOC(pp);
3947142Skchow }
3950Sstevel@tonic-gate
3960Sstevel@tonic-gate (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
3970Sstevel@tonic-gate pp->p_lckcnt = 1;
3980Sstevel@tonic-gate #if defined(__x86)
3990Sstevel@tonic-gate page_downgrade(pp);
4000Sstevel@tonic-gate #else
4010Sstevel@tonic-gate page_unlock(pp);
4020Sstevel@tonic-gate #endif
4030Sstevel@tonic-gate }
4040Sstevel@tonic-gate }
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate /*
4070Sstevel@tonic-gate * Get pages from boot and hash them into the kernel's vp.
4080Sstevel@tonic-gate * Used after page structs have been allocated, but before segkmem is ready.
4090Sstevel@tonic-gate */
4100Sstevel@tonic-gate void *
boot_alloc(void * inaddr,size_t size,uint_t align)4110Sstevel@tonic-gate boot_alloc(void *inaddr, size_t size, uint_t align)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate caddr_t addr = inaddr;
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate if (bootops == NULL)
4160Sstevel@tonic-gate prom_panic("boot_alloc: attempt to allocate memory after "
4170Sstevel@tonic-gate "BOP_GONE");
4180Sstevel@tonic-gate
4190Sstevel@tonic-gate size = ptob(btopr(size));
4207218Ssvemuri #ifdef __sparc
4217218Ssvemuri if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
4227218Ssvemuri panic("boot_alloc: bop_alloc_chunk failed");
4237218Ssvemuri #else
4240Sstevel@tonic-gate if (BOP_ALLOC(bootops, addr, size, align) != addr)
4250Sstevel@tonic-gate panic("boot_alloc: BOP_ALLOC failed");
4267218Ssvemuri #endif
4270Sstevel@tonic-gate boot_mapin((caddr_t)addr, size);
4280Sstevel@tonic-gate return (addr);
4290Sstevel@tonic-gate }
4300Sstevel@tonic-gate
4310Sstevel@tonic-gate static void
segkmem_badop()4320Sstevel@tonic-gate segkmem_badop()
4330Sstevel@tonic-gate {
4340Sstevel@tonic-gate panic("segkmem_badop");
4350Sstevel@tonic-gate }
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate /*ARGSUSED*/
4400Sstevel@tonic-gate static faultcode_t
segkmem_fault(struct hat * hat,struct seg * seg,caddr_t addr,size_t size,enum fault_type type,enum seg_rw rw)4410Sstevel@tonic-gate segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
4420Sstevel@tonic-gate enum fault_type type, enum seg_rw rw)
4430Sstevel@tonic-gate {
4441338Selowe pgcnt_t npages;
4451338Selowe spgcnt_t pg;
4461338Selowe page_t *pp;
4473290Sjohansen struct vnode *vp = seg->s_data;
4481338Selowe
4490Sstevel@tonic-gate ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
4500Sstevel@tonic-gate
4510Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size ||
4520Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
4530Sstevel@tonic-gate panic("segkmem_fault: bad args");
4540Sstevel@tonic-gate
4554828Sjosephb /*
4564828Sjosephb * If it is one of segkp pages, call segkp_fault.
4574828Sjosephb */
4584828Sjosephb if (segkp_bitmap && seg == &kvseg &&
4594828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
4604828Sjosephb return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
4610Sstevel@tonic-gate
4621338Selowe if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
4631338Selowe return (FC_NOSUPPORT);
4641338Selowe
4651338Selowe npages = btopr(size);
4661338Selowe
4670Sstevel@tonic-gate switch (type) {
4680Sstevel@tonic-gate case F_SOFTLOCK: /* lock down already-loaded translations */
4691338Selowe for (pg = 0; pg < npages; pg++) {
4703290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
4711338Selowe SE_SHARED);
4721338Selowe if (pp == NULL) {
4731338Selowe /*
4741338Selowe * Hmm, no page. Does a kernel mapping
4751338Selowe * exist for it?
4761338Selowe */
4771338Selowe if (!hat_probe(kas.a_hat, addr)) {
4781338Selowe addr -= PAGESIZE;
4791338Selowe while (--pg >= 0) {
4804828Sjosephb pp = page_find(vp, (u_offset_t)
4814828Sjosephb (uintptr_t)addr);
4821338Selowe if (pp)
4831338Selowe page_unlock(pp);
4841338Selowe addr -= PAGESIZE;
4851338Selowe }
4861338Selowe return (FC_NOMAP);
4871338Selowe }
4881338Selowe }
4891338Selowe addr += PAGESIZE;
4901338Selowe }
4911338Selowe if (rw == S_OTHER)
4920Sstevel@tonic-gate hat_reserve(seg->s_as, addr, size);
4931338Selowe return (0);
4940Sstevel@tonic-gate case F_SOFTUNLOCK:
4951338Selowe while (npages--) {
4963290Sjohansen pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
4971338Selowe if (pp)
4981338Selowe page_unlock(pp);
4991338Selowe addr += PAGESIZE;
5001338Selowe }
5011338Selowe return (0);
5020Sstevel@tonic-gate default:
5031338Selowe return (FC_NOSUPPORT);
5040Sstevel@tonic-gate }
5051338Selowe /*NOTREACHED*/
5060Sstevel@tonic-gate }
5070Sstevel@tonic-gate
5080Sstevel@tonic-gate static int
segkmem_setprot(struct seg * seg,caddr_t addr,size_t size,uint_t prot)5090Sstevel@tonic-gate segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
5100Sstevel@tonic-gate {
5110Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size ||
5140Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
5150Sstevel@tonic-gate panic("segkmem_setprot: bad args");
5160Sstevel@tonic-gate
5174828Sjosephb /*
5184828Sjosephb * If it is one of segkp pages, call segkp.
5194828Sjosephb */
5204828Sjosephb if (segkp_bitmap && seg == &kvseg &&
5214828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
5224828Sjosephb return (SEGOP_SETPROT(segkp, addr, size, prot));
5230Sstevel@tonic-gate
5240Sstevel@tonic-gate if (prot == 0)
5250Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
5260Sstevel@tonic-gate else
5270Sstevel@tonic-gate hat_chgprot(kas.a_hat, addr, size, prot);
5280Sstevel@tonic-gate return (0);
5290Sstevel@tonic-gate }
5300Sstevel@tonic-gate
5310Sstevel@tonic-gate /*
5320Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp
5330Sstevel@tonic-gate * when segkp is under the heap.
5340Sstevel@tonic-gate */
5350Sstevel@tonic-gate /* ARGSUSED */
5360Sstevel@tonic-gate static int
segkmem_checkprot(struct seg * seg,caddr_t addr,size_t size,uint_t prot)5370Sstevel@tonic-gate segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
5380Sstevel@tonic-gate {
5390Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate if (seg->s_as != &kas)
5420Sstevel@tonic-gate segkmem_badop();
5430Sstevel@tonic-gate
5444828Sjosephb /*
5454828Sjosephb * If it is one of segkp pages, call into segkp.
5464828Sjosephb */
5474828Sjosephb if (segkp_bitmap && seg == &kvseg &&
5484828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
5494828Sjosephb return (SEGOP_CHECKPROT(segkp, addr, size, prot));
5500Sstevel@tonic-gate
5510Sstevel@tonic-gate segkmem_badop();
5520Sstevel@tonic-gate return (0);
5530Sstevel@tonic-gate }
5540Sstevel@tonic-gate
5550Sstevel@tonic-gate /*
5560Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp
5570Sstevel@tonic-gate * when segkp is under the heap.
5580Sstevel@tonic-gate */
5590Sstevel@tonic-gate /* ARGSUSED */
5600Sstevel@tonic-gate static int
segkmem_kluster(struct seg * seg,caddr_t addr,ssize_t delta)5610Sstevel@tonic-gate segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
5620Sstevel@tonic-gate {
5630Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
5640Sstevel@tonic-gate
5650Sstevel@tonic-gate if (seg->s_as != &kas)
5660Sstevel@tonic-gate segkmem_badop();
5670Sstevel@tonic-gate
5684828Sjosephb /*
5694828Sjosephb * If it is one of segkp pages, call into segkp.
5704828Sjosephb */
5714828Sjosephb if (segkp_bitmap && seg == &kvseg &&
5724828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
5734828Sjosephb return (SEGOP_KLUSTER(segkp, addr, delta));
5740Sstevel@tonic-gate
5750Sstevel@tonic-gate segkmem_badop();
5760Sstevel@tonic-gate return (0);
5770Sstevel@tonic-gate }
5780Sstevel@tonic-gate
5790Sstevel@tonic-gate static void
segkmem_xdump_range(void * arg,void * start,size_t size)5800Sstevel@tonic-gate segkmem_xdump_range(void *arg, void *start, size_t size)
5810Sstevel@tonic-gate {
5820Sstevel@tonic-gate struct as *as = arg;
5830Sstevel@tonic-gate caddr_t addr = start;
5840Sstevel@tonic-gate caddr_t addr_end = addr + size;
5850Sstevel@tonic-gate
5860Sstevel@tonic-gate while (addr < addr_end) {
5870Sstevel@tonic-gate pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
5880Sstevel@tonic-gate if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
5890Sstevel@tonic-gate dump_addpage(as, addr, pfn);
5900Sstevel@tonic-gate addr += PAGESIZE;
5910Sstevel@tonic-gate dump_timeleft = dump_timeout;
5920Sstevel@tonic-gate }
5930Sstevel@tonic-gate }
5940Sstevel@tonic-gate
5950Sstevel@tonic-gate static void
segkmem_dump_range(void * arg,void * start,size_t size)5960Sstevel@tonic-gate segkmem_dump_range(void *arg, void *start, size_t size)
5970Sstevel@tonic-gate {
5980Sstevel@tonic-gate caddr_t addr = start;
5990Sstevel@tonic-gate caddr_t addr_end = addr + size;
6000Sstevel@tonic-gate
6010Sstevel@tonic-gate /*
6020Sstevel@tonic-gate * If we are about to start dumping the range of addresses we
6030Sstevel@tonic-gate * carved out of the kernel heap for the large page heap walk
6040Sstevel@tonic-gate * heap_lp_arena to find what segments are actually populated
6050Sstevel@tonic-gate */
6060Sstevel@tonic-gate if (SEGKMEM_USE_LARGEPAGES &&
6070Sstevel@tonic-gate addr == heap_lp_base && addr_end == heap_lp_end &&
6080Sstevel@tonic-gate vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
6090Sstevel@tonic-gate vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
6100Sstevel@tonic-gate segkmem_xdump_range, arg);
6110Sstevel@tonic-gate } else {
6120Sstevel@tonic-gate segkmem_xdump_range(arg, start, size);
6130Sstevel@tonic-gate }
6140Sstevel@tonic-gate }
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate static void
segkmem_dump(struct seg * seg)6170Sstevel@tonic-gate segkmem_dump(struct seg *seg)
6180Sstevel@tonic-gate {
6190Sstevel@tonic-gate /*
6200Sstevel@tonic-gate * The kernel's heap_arena (represented by kvseg) is a very large
6210Sstevel@tonic-gate * VA space, most of which is typically unused. To speed up dumping
6220Sstevel@tonic-gate * we use vmem_walk() to quickly find the pieces of heap_arena that
6230Sstevel@tonic-gate * are actually in use. We do the same for heap32_arena and
6240Sstevel@tonic-gate * heap_core.
6250Sstevel@tonic-gate *
6260Sstevel@tonic-gate * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
6270Sstevel@tonic-gate * may ultimately need to allocate memory. Reentrant walks are
6280Sstevel@tonic-gate * necessarily imperfect snapshots. The kernel heap continues
6290Sstevel@tonic-gate * to change during a live crash dump, for example. For a normal
6300Sstevel@tonic-gate * crash dump, however, we know that there won't be any other threads
6310Sstevel@tonic-gate * messing with the heap. Therefore, at worst, we may fail to dump
6320Sstevel@tonic-gate * the pages that get allocated by the act of dumping; but we will
6330Sstevel@tonic-gate * always dump every page that was allocated when the walk began.
6340Sstevel@tonic-gate *
6350Sstevel@tonic-gate * The other segkmem segments are dense (fully populated), so there's
6360Sstevel@tonic-gate * no need to use this technique when dumping them.
6370Sstevel@tonic-gate *
6380Sstevel@tonic-gate * Note: when adding special dump handling for any new sparsely-
6390Sstevel@tonic-gate * populated segments, be sure to add similar handling to the ::kgrep
6400Sstevel@tonic-gate * code in mdb.
6410Sstevel@tonic-gate */
6420Sstevel@tonic-gate if (seg == &kvseg) {
6430Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
6440Sstevel@tonic-gate segkmem_dump_range, seg->s_as);
6450Sstevel@tonic-gate #ifndef __sparc
6460Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
6470Sstevel@tonic-gate segkmem_dump_range, seg->s_as);
6480Sstevel@tonic-gate #endif
6490Sstevel@tonic-gate } else if (seg == &kvseg_core) {
6500Sstevel@tonic-gate vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
6510Sstevel@tonic-gate segkmem_dump_range, seg->s_as);
6520Sstevel@tonic-gate } else if (seg == &kvseg32) {
6530Sstevel@tonic-gate vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
6540Sstevel@tonic-gate segkmem_dump_range, seg->s_as);
6550Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
6560Sstevel@tonic-gate segkmem_dump_range, seg->s_as);
6573290Sjohansen } else if (seg == &kzioseg) {
6583290Sjohansen /*
6593290Sjohansen * We don't want to dump pages attached to kzioseg since they
6603290Sjohansen * contain file data from ZFS. If this page's segment is
6613290Sjohansen * kzioseg return instead of writing it to the dump device.
6623290Sjohansen */
6633290Sjohansen return;
6640Sstevel@tonic-gate } else {
6650Sstevel@tonic-gate segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
6660Sstevel@tonic-gate }
6670Sstevel@tonic-gate }
6680Sstevel@tonic-gate
6690Sstevel@tonic-gate /*
6700Sstevel@tonic-gate * lock/unlock kmem pages over a given range [addr, addr+len).
6711338Selowe * Returns a shadow list of pages in ppp. If there are holes
6721338Selowe * in the range (e.g. some of the kernel mappings do not have
6731338Selowe * underlying page_ts) returns ENOTSUP so that as_pagelock()
6741338Selowe * will handle the range via as_fault(F_SOFTLOCK).
6750Sstevel@tonic-gate */
6760Sstevel@tonic-gate /*ARGSUSED*/
6770Sstevel@tonic-gate static int
segkmem_pagelock(struct seg * seg,caddr_t addr,size_t len,page_t *** ppp,enum lock_type type,enum seg_rw rw)6780Sstevel@tonic-gate segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
6790Sstevel@tonic-gate page_t ***ppp, enum lock_type type, enum seg_rw rw)
6800Sstevel@tonic-gate {
6810Sstevel@tonic-gate page_t **pplist, *pp;
6820Sstevel@tonic-gate pgcnt_t npages;
6831338Selowe spgcnt_t pg;
6840Sstevel@tonic-gate size_t nb;
6853290Sjohansen struct vnode *vp = seg->s_data;
6860Sstevel@tonic-gate
6871338Selowe ASSERT(ppp != NULL);
6881338Selowe
6894828Sjosephb /*
6904828Sjosephb * If it is one of segkp pages, call into segkp.
6914828Sjosephb */
6924828Sjosephb if (segkp_bitmap && seg == &kvseg &&
6934828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
6944828Sjosephb return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
6950Sstevel@tonic-gate
6960Sstevel@tonic-gate npages = btopr(len);
6970Sstevel@tonic-gate nb = sizeof (page_t *) * npages;
6980Sstevel@tonic-gate
6990Sstevel@tonic-gate if (type == L_PAGEUNLOCK) {
7001338Selowe pplist = *ppp;
7011338Selowe ASSERT(pplist != NULL);
7021338Selowe
7031338Selowe for (pg = 0; pg < npages; pg++) {
7041338Selowe pp = pplist[pg];
7051338Selowe page_unlock(pp);
7060Sstevel@tonic-gate }
7071338Selowe kmem_free(pplist, nb);
7080Sstevel@tonic-gate return (0);
7090Sstevel@tonic-gate }
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate ASSERT(type == L_PAGELOCK);
7120Sstevel@tonic-gate
7131338Selowe pplist = kmem_alloc(nb, KM_NOSLEEP);
7141338Selowe if (pplist == NULL) {
7151338Selowe *ppp = NULL;
7161338Selowe return (ENOTSUP); /* take the slow path */
7171338Selowe }
7180Sstevel@tonic-gate
7191338Selowe for (pg = 0; pg < npages; pg++) {
7203290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
7211338Selowe if (pp == NULL) {
7221338Selowe while (--pg >= 0)
7231338Selowe page_unlock(pplist[pg]);
7241338Selowe kmem_free(pplist, nb);
7251338Selowe *ppp = NULL;
7261338Selowe return (ENOTSUP);
7271338Selowe }
7281338Selowe pplist[pg] = pp;
7290Sstevel@tonic-gate addr += PAGESIZE;
7300Sstevel@tonic-gate }
7311338Selowe
7321338Selowe *ppp = pplist;
7330Sstevel@tonic-gate return (0);
7340Sstevel@tonic-gate }
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate /*
7370Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp
7380Sstevel@tonic-gate * when segkp is under the heap.
7390Sstevel@tonic-gate */
7400Sstevel@tonic-gate /* ARGSUSED */
7410Sstevel@tonic-gate static int
segkmem_getmemid(struct seg * seg,caddr_t addr,memid_t * memidp)7420Sstevel@tonic-gate segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
7430Sstevel@tonic-gate {
7440Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate if (seg->s_as != &kas)
7470Sstevel@tonic-gate segkmem_badop();
7480Sstevel@tonic-gate
7494828Sjosephb /*
7504828Sjosephb * If it is one of segkp pages, call into segkp.
7514828Sjosephb */
7524828Sjosephb if (segkp_bitmap && seg == &kvseg &&
7534828Sjosephb BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
7544828Sjosephb return (SEGOP_GETMEMID(segkp, addr, memidp));
7550Sstevel@tonic-gate
7560Sstevel@tonic-gate segkmem_badop();
7570Sstevel@tonic-gate return (0);
7580Sstevel@tonic-gate }
7590Sstevel@tonic-gate
7600Sstevel@tonic-gate /*ARGSUSED*/
7610Sstevel@tonic-gate static lgrp_mem_policy_info_t *
segkmem_getpolicy(struct seg * seg,caddr_t addr)7620Sstevel@tonic-gate segkmem_getpolicy(struct seg *seg, caddr_t addr)
7630Sstevel@tonic-gate {
7640Sstevel@tonic-gate return (NULL);
7650Sstevel@tonic-gate }
7660Sstevel@tonic-gate
767670Selowe /*ARGSUSED*/
768670Selowe static int
segkmem_capable(struct seg * seg,segcapability_t capability)769670Selowe segkmem_capable(struct seg *seg, segcapability_t capability)
770670Selowe {
771670Selowe if (capability == S_CAPABILITY_NOMINFLT)
772670Selowe return (1);
773670Selowe return (0);
774670Selowe }
7750Sstevel@tonic-gate
7760Sstevel@tonic-gate static struct seg_ops segkmem_ops = {
7770Sstevel@tonic-gate SEGKMEM_BADOP(int), /* dup */
7780Sstevel@tonic-gate SEGKMEM_BADOP(int), /* unmap */
7790Sstevel@tonic-gate SEGKMEM_BADOP(void), /* free */
7800Sstevel@tonic-gate segkmem_fault,
7810Sstevel@tonic-gate SEGKMEM_BADOP(faultcode_t), /* faulta */
7820Sstevel@tonic-gate segkmem_setprot,
7830Sstevel@tonic-gate segkmem_checkprot,
7840Sstevel@tonic-gate segkmem_kluster,
7850Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* swapout */
7860Sstevel@tonic-gate SEGKMEM_BADOP(int), /* sync */
7870Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* incore */
7880Sstevel@tonic-gate SEGKMEM_BADOP(int), /* lockop */
7890Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getprot */
7900Sstevel@tonic-gate SEGKMEM_BADOP(u_offset_t), /* getoffset */
7910Sstevel@tonic-gate SEGKMEM_BADOP(int), /* gettype */
7920Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getvp */
7930Sstevel@tonic-gate SEGKMEM_BADOP(int), /* advise */
7940Sstevel@tonic-gate segkmem_dump,
7950Sstevel@tonic-gate segkmem_pagelock,
7960Sstevel@tonic-gate SEGKMEM_BADOP(int), /* setpgsz */
7970Sstevel@tonic-gate segkmem_getmemid,
7980Sstevel@tonic-gate segkmem_getpolicy, /* getpolicy */
799670Selowe segkmem_capable, /* capable */
8000Sstevel@tonic-gate };
8010Sstevel@tonic-gate
8020Sstevel@tonic-gate int
segkmem_zio_create(struct seg * seg)8033290Sjohansen segkmem_zio_create(struct seg *seg)
8043290Sjohansen {
8053290Sjohansen ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
8063290Sjohansen seg->s_ops = &segkmem_ops;
8073290Sjohansen seg->s_data = &zvp;
8083290Sjohansen kas.a_size += seg->s_size;
8093290Sjohansen return (0);
8103290Sjohansen }
8113290Sjohansen
8123290Sjohansen int
segkmem_create(struct seg * seg)8130Sstevel@tonic-gate segkmem_create(struct seg *seg)
8140Sstevel@tonic-gate {
8150Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
8160Sstevel@tonic-gate seg->s_ops = &segkmem_ops;
8173290Sjohansen seg->s_data = &kvp;
8180Sstevel@tonic-gate kas.a_size += seg->s_size;
8190Sstevel@tonic-gate return (0);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate
8220Sstevel@tonic-gate /*ARGSUSED*/
8230Sstevel@tonic-gate page_t *
segkmem_page_create(void * addr,size_t size,int vmflag,void * arg)8240Sstevel@tonic-gate segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
8250Sstevel@tonic-gate {
8260Sstevel@tonic-gate struct seg kseg;
8270Sstevel@tonic-gate int pgflags;
8283290Sjohansen struct vnode *vp = arg;
8293290Sjohansen
8303290Sjohansen if (vp == NULL)
8313290Sjohansen vp = &kvp;
8320Sstevel@tonic-gate
8330Sstevel@tonic-gate kseg.s_as = &kas;
8340Sstevel@tonic-gate pgflags = PG_EXCL;
8350Sstevel@tonic-gate
8360Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
8370Sstevel@tonic-gate pgflags |= PG_NORELOC;
8380Sstevel@tonic-gate if ((vmflag & VM_NOSLEEP) == 0)
8390Sstevel@tonic-gate pgflags |= PG_WAIT;
8400Sstevel@tonic-gate if (vmflag & VM_PANIC)
8410Sstevel@tonic-gate pgflags |= PG_PANIC;
8420Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE)
8430Sstevel@tonic-gate pgflags |= PG_PUSHPAGE;
84412156SStan.Studzinski@Sun.COM if (vmflag & VM_NORMALPRI) {
84512156SStan.Studzinski@Sun.COM ASSERT(vmflag & VM_NOSLEEP);
84612156SStan.Studzinski@Sun.COM pgflags |= PG_NORMALPRI;
84712156SStan.Studzinski@Sun.COM }
8480Sstevel@tonic-gate
8493290Sjohansen return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
8500Sstevel@tonic-gate pgflags, &kseg, addr));
8510Sstevel@tonic-gate }
8520Sstevel@tonic-gate
8530Sstevel@tonic-gate /*
8540Sstevel@tonic-gate * Allocate pages to back the virtual address range [addr, addr + size).
8550Sstevel@tonic-gate * If addr is NULL, allocate the virtual address space as well.
8560Sstevel@tonic-gate */
8570Sstevel@tonic-gate void *
segkmem_xalloc(vmem_t * vmp,void * inaddr,size_t size,int vmflag,uint_t attr,page_t * (* page_create_func)(void *,size_t,int,void *),void * pcarg)8580Sstevel@tonic-gate segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
8590Sstevel@tonic-gate page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
8600Sstevel@tonic-gate {
8610Sstevel@tonic-gate page_t *ppl;
8620Sstevel@tonic-gate caddr_t addr = inaddr;
8630Sstevel@tonic-gate pgcnt_t npages = btopr(size);
8640Sstevel@tonic-gate int allocflag;
8650Sstevel@tonic-gate
8660Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
8670Sstevel@tonic-gate return (NULL);
8680Sstevel@tonic-gate
8690Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
8700Sstevel@tonic-gate
8710Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
8720Sstevel@tonic-gate if (inaddr == NULL)
8730Sstevel@tonic-gate vmem_free(vmp, addr, size);
8740Sstevel@tonic-gate return (NULL);
8750Sstevel@tonic-gate }
8760Sstevel@tonic-gate
8770Sstevel@tonic-gate ppl = page_create_func(addr, size, vmflag, pcarg);
8780Sstevel@tonic-gate if (ppl == NULL) {
8790Sstevel@tonic-gate if (inaddr == NULL)
8800Sstevel@tonic-gate vmem_free(vmp, addr, size);
8810Sstevel@tonic-gate page_unresv(npages);
8820Sstevel@tonic-gate return (NULL);
8830Sstevel@tonic-gate }
8840Sstevel@tonic-gate
8850Sstevel@tonic-gate /*
8860Sstevel@tonic-gate * Under certain conditions, we need to let the HAT layer know
8870Sstevel@tonic-gate * that it cannot safely allocate memory. Allocations from
8880Sstevel@tonic-gate * the hat_memload vmem arena always need this, to prevent
8890Sstevel@tonic-gate * infinite recursion.
8900Sstevel@tonic-gate *
8910Sstevel@tonic-gate * In addition, the x86 hat cannot safely do memory
8920Sstevel@tonic-gate * allocations while in vmem_populate(), because there
8930Sstevel@tonic-gate * is no simple bound on its usage.
8940Sstevel@tonic-gate */
8950Sstevel@tonic-gate if (vmflag & VM_MEMLOAD)
8960Sstevel@tonic-gate allocflag = HAT_NO_KALLOC;
8970Sstevel@tonic-gate #if defined(__x86)
8980Sstevel@tonic-gate else if (vmem_is_populator())
8990Sstevel@tonic-gate allocflag = HAT_NO_KALLOC;
9000Sstevel@tonic-gate #endif
9010Sstevel@tonic-gate else
9020Sstevel@tonic-gate allocflag = 0;
9030Sstevel@tonic-gate
9040Sstevel@tonic-gate while (ppl != NULL) {
9050Sstevel@tonic-gate page_t *pp = ppl;
9060Sstevel@tonic-gate page_sub(&ppl, pp);
9070Sstevel@tonic-gate ASSERT(page_iolock_assert(pp));
9080Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
9090Sstevel@tonic-gate page_io_unlock(pp);
9100Sstevel@tonic-gate hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
9110Sstevel@tonic-gate (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
9120Sstevel@tonic-gate HAT_LOAD_LOCK | allocflag);
9130Sstevel@tonic-gate pp->p_lckcnt = 1;
9140Sstevel@tonic-gate #if defined(__x86)
9150Sstevel@tonic-gate page_downgrade(pp);
9160Sstevel@tonic-gate #else
9170Sstevel@tonic-gate if (vmflag & SEGKMEM_SHARELOCKED)
9180Sstevel@tonic-gate page_downgrade(pp);
9190Sstevel@tonic-gate else
9200Sstevel@tonic-gate page_unlock(pp);
9210Sstevel@tonic-gate #endif
9220Sstevel@tonic-gate }
9230Sstevel@tonic-gate
9240Sstevel@tonic-gate return (addr);
9250Sstevel@tonic-gate }
9260Sstevel@tonic-gate
9273290Sjohansen static void *
segkmem_alloc_vn(vmem_t * vmp,size_t size,int vmflag,struct vnode * vp)9283290Sjohansen segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
9290Sstevel@tonic-gate {
9300Sstevel@tonic-gate void *addr;
9310Sstevel@tonic-gate segkmem_gc_list_t *gcp, **prev_gcpp;
9320Sstevel@tonic-gate
9333290Sjohansen ASSERT(vp != NULL);
9343290Sjohansen
9350Sstevel@tonic-gate if (kvseg.s_base == NULL) {
9360Sstevel@tonic-gate #ifndef __sparc
9370Sstevel@tonic-gate if (bootops->bsys_alloc == NULL)
9380Sstevel@tonic-gate halt("Memory allocation between bop_alloc() and "
9390Sstevel@tonic-gate "kmem_alloc().\n");
9400Sstevel@tonic-gate #endif
9410Sstevel@tonic-gate
9420Sstevel@tonic-gate /*
9430Sstevel@tonic-gate * There's not a lot of memory to go around during boot,
9440Sstevel@tonic-gate * so recycle it if we can.
9450Sstevel@tonic-gate */
9460Sstevel@tonic-gate for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
9470Sstevel@tonic-gate prev_gcpp = &gcp->gc_next) {
9480Sstevel@tonic-gate if (gcp->gc_arena == vmp && gcp->gc_size == size) {
9490Sstevel@tonic-gate *prev_gcpp = gcp->gc_next;
9500Sstevel@tonic-gate return (gcp);
9510Sstevel@tonic-gate }
9520Sstevel@tonic-gate }
9530Sstevel@tonic-gate
9540Sstevel@tonic-gate addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
9550Sstevel@tonic-gate if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
9560Sstevel@tonic-gate panic("segkmem_alloc: boot_alloc failed");
9570Sstevel@tonic-gate return (addr);
9580Sstevel@tonic-gate }
9590Sstevel@tonic-gate return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
9603290Sjohansen segkmem_page_create, vp));
9613290Sjohansen }
9623290Sjohansen
9633290Sjohansen void *
segkmem_alloc(vmem_t * vmp,size_t size,int vmflag)9643290Sjohansen segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
9653290Sjohansen {
9663290Sjohansen return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
9673290Sjohansen }
9683290Sjohansen
9693290Sjohansen void *
segkmem_zio_alloc(vmem_t * vmp,size_t size,int vmflag)9703290Sjohansen segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
9713290Sjohansen {
9723290Sjohansen return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
9730Sstevel@tonic-gate }
9740Sstevel@tonic-gate
9750Sstevel@tonic-gate /*
9760Sstevel@tonic-gate * Any changes to this routine must also be carried over to
9770Sstevel@tonic-gate * devmap_free_pages() in the seg_dev driver. This is because
9780Sstevel@tonic-gate * we currently don't have a special kernel segment for non-paged
9790Sstevel@tonic-gate * kernel memory that is exported by drivers to user space.
9800Sstevel@tonic-gate */
9813290Sjohansen static void
segkmem_free_vn(vmem_t * vmp,void * inaddr,size_t size,struct vnode * vp,void (* func)(page_t *))9825084Sjohnlev segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
9835084Sjohnlev void (*func)(page_t *))
9840Sstevel@tonic-gate {
9850Sstevel@tonic-gate page_t *pp;
9860Sstevel@tonic-gate caddr_t addr = inaddr;
9870Sstevel@tonic-gate caddr_t eaddr;
9880Sstevel@tonic-gate pgcnt_t npages = btopr(size);
9890Sstevel@tonic-gate
9900Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
9913290Sjohansen ASSERT(vp != NULL);
9920Sstevel@tonic-gate
9930Sstevel@tonic-gate if (kvseg.s_base == NULL) {
9940Sstevel@tonic-gate segkmem_gc_list_t *gc = inaddr;
9950Sstevel@tonic-gate gc->gc_arena = vmp;
9960Sstevel@tonic-gate gc->gc_size = size;
9970Sstevel@tonic-gate gc->gc_next = segkmem_gc_list;
9980Sstevel@tonic-gate segkmem_gc_list = gc;
9990Sstevel@tonic-gate return;
10000Sstevel@tonic-gate }
10010Sstevel@tonic-gate
10020Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10030Sstevel@tonic-gate
10040Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
10050Sstevel@tonic-gate #if defined(__x86)
10063290Sjohansen pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
10070Sstevel@tonic-gate if (pp == NULL)
10080Sstevel@tonic-gate panic("segkmem_free: page not found");
10090Sstevel@tonic-gate if (!page_tryupgrade(pp)) {
10100Sstevel@tonic-gate /*
10110Sstevel@tonic-gate * Some other thread has a sharelock. Wait for
10120Sstevel@tonic-gate * it to drop the lock so we can free this page.
10130Sstevel@tonic-gate */
10140Sstevel@tonic-gate page_unlock(pp);
10153290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
10160Sstevel@tonic-gate SE_EXCL);
10170Sstevel@tonic-gate }
10180Sstevel@tonic-gate #else
10193290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
10200Sstevel@tonic-gate #endif
10210Sstevel@tonic-gate if (pp == NULL)
10220Sstevel@tonic-gate panic("segkmem_free: page not found");
10230Sstevel@tonic-gate /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
10240Sstevel@tonic-gate pp->p_lckcnt = 0;
10255084Sjohnlev if (func)
10265084Sjohnlev func(pp);
10275084Sjohnlev else
10285084Sjohnlev page_destroy(pp, 0);
10290Sstevel@tonic-gate }
10305084Sjohnlev if (func == NULL)
10315084Sjohnlev page_unresv(npages);
10320Sstevel@tonic-gate
10330Sstevel@tonic-gate if (vmp != NULL)
10340Sstevel@tonic-gate vmem_free(vmp, inaddr, size);
10353290Sjohansen
10363290Sjohansen }
10373290Sjohansen
10383290Sjohansen void
segkmem_xfree(vmem_t * vmp,void * inaddr,size_t size,void (* func)(page_t *))10395084Sjohnlev segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
10405084Sjohnlev {
10415084Sjohnlev segkmem_free_vn(vmp, inaddr, size, &kvp, func);
10425084Sjohnlev }
10435084Sjohnlev
10445084Sjohnlev void
segkmem_free(vmem_t * vmp,void * inaddr,size_t size)10453290Sjohansen segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
10463290Sjohansen {
10475084Sjohnlev segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
10483290Sjohansen }
10493290Sjohansen
10503290Sjohansen void
segkmem_zio_free(vmem_t * vmp,void * inaddr,size_t size)10513290Sjohansen segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
10523290Sjohansen {
10535084Sjohnlev segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
10540Sstevel@tonic-gate }
10550Sstevel@tonic-gate
10560Sstevel@tonic-gate void
segkmem_gc(void)10570Sstevel@tonic-gate segkmem_gc(void)
10580Sstevel@tonic-gate {
10590Sstevel@tonic-gate ASSERT(kvseg.s_base != NULL);
10600Sstevel@tonic-gate while (segkmem_gc_list != NULL) {
10610Sstevel@tonic-gate segkmem_gc_list_t *gc = segkmem_gc_list;
10620Sstevel@tonic-gate segkmem_gc_list = gc->gc_next;
10630Sstevel@tonic-gate segkmem_free(gc->gc_arena, gc, gc->gc_size);
10640Sstevel@tonic-gate }
10650Sstevel@tonic-gate }
10660Sstevel@tonic-gate
10670Sstevel@tonic-gate /*
10680Sstevel@tonic-gate * Legacy entry points from here to end of file.
10690Sstevel@tonic-gate */
10700Sstevel@tonic-gate void
segkmem_mapin(struct seg * seg,void * addr,size_t size,uint_t vprot,pfn_t pfn,uint_t flags)10710Sstevel@tonic-gate segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
10720Sstevel@tonic-gate pfn_t pfn, uint_t flags)
10730Sstevel@tonic-gate {
10740Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10750Sstevel@tonic-gate hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
10760Sstevel@tonic-gate flags | HAT_LOAD_LOCK);
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate
10790Sstevel@tonic-gate void
segkmem_mapout(struct seg * seg,void * addr,size_t size)10800Sstevel@tonic-gate segkmem_mapout(struct seg *seg, void *addr, size_t size)
10810Sstevel@tonic-gate {
10820Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
10830Sstevel@tonic-gate }
10840Sstevel@tonic-gate
10850Sstevel@tonic-gate void *
kmem_getpages(pgcnt_t npages,int kmflag)10860Sstevel@tonic-gate kmem_getpages(pgcnt_t npages, int kmflag)
10870Sstevel@tonic-gate {
10880Sstevel@tonic-gate return (kmem_alloc(ptob(npages), kmflag));
10890Sstevel@tonic-gate }
10900Sstevel@tonic-gate
10910Sstevel@tonic-gate void
kmem_freepages(void * addr,pgcnt_t npages)10920Sstevel@tonic-gate kmem_freepages(void *addr, pgcnt_t npages)
10930Sstevel@tonic-gate {
10940Sstevel@tonic-gate kmem_free(addr, ptob(npages));
10950Sstevel@tonic-gate }
10960Sstevel@tonic-gate
10970Sstevel@tonic-gate /*
10980Sstevel@tonic-gate * segkmem_page_create_large() allocates a large page to be used for the kmem
10990Sstevel@tonic-gate * caches. If kpr is enabled we ask for a relocatable page unless requested
11000Sstevel@tonic-gate * otherwise. If kpr is disabled we have to ask for a non-reloc page
11010Sstevel@tonic-gate */
11020Sstevel@tonic-gate static page_t *
segkmem_page_create_large(void * addr,size_t size,int vmflag,void * arg)11030Sstevel@tonic-gate segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
11040Sstevel@tonic-gate {
11050Sstevel@tonic-gate int pgflags;
11060Sstevel@tonic-gate
11070Sstevel@tonic-gate pgflags = PG_EXCL;
11080Sstevel@tonic-gate
11090Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
11100Sstevel@tonic-gate pgflags |= PG_NORELOC;
11110Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP))
11120Sstevel@tonic-gate pgflags |= PG_WAIT;
11130Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE)
11140Sstevel@tonic-gate pgflags |= PG_PUSHPAGE;
111512156SStan.Studzinski@Sun.COM if (vmflag & VM_NORMALPRI)
111612156SStan.Studzinski@Sun.COM pgflags |= PG_NORMALPRI;
11170Sstevel@tonic-gate
11180Sstevel@tonic-gate return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
11190Sstevel@tonic-gate pgflags, &kvseg, addr, arg));
11200Sstevel@tonic-gate }
11210Sstevel@tonic-gate
11220Sstevel@tonic-gate /*
11230Sstevel@tonic-gate * Allocate a large page to back the virtual address range
11240Sstevel@tonic-gate * [addr, addr + size). If addr is NULL, allocate the virtual address
11250Sstevel@tonic-gate * space as well.
11260Sstevel@tonic-gate */
11270Sstevel@tonic-gate static void *
segkmem_xalloc_lp(vmem_t * vmp,void * inaddr,size_t size,int vmflag,uint_t attr,page_t * (* page_create_func)(void *,size_t,int,void *),void * pcarg)11280Sstevel@tonic-gate segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
11290Sstevel@tonic-gate uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
11300Sstevel@tonic-gate void *pcarg)
11310Sstevel@tonic-gate {
11320Sstevel@tonic-gate caddr_t addr = inaddr, pa;
11330Sstevel@tonic-gate size_t lpsize = segkmem_lpsize;
11340Sstevel@tonic-gate pgcnt_t npages = btopr(size);
11350Sstevel@tonic-gate pgcnt_t nbpages = btop(lpsize);
11360Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift;
11370Sstevel@tonic-gate size_t ppasize = nbpages * sizeof (page_t *);
11380Sstevel@tonic-gate page_t *pp, *rootpp, **ppa, *pplist = NULL;
11390Sstevel@tonic-gate int i;
11400Sstevel@tonic-gate
11415662Sdp78419 vmflag |= VM_NOSLEEP;
11425662Sdp78419
11430Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
11440Sstevel@tonic-gate return (NULL);
11450Sstevel@tonic-gate }
11460Sstevel@tonic-gate
11470Sstevel@tonic-gate /*
11480Sstevel@tonic-gate * allocate an array we need for hat_memload_array.
11490Sstevel@tonic-gate * we use a separate arena to avoid recursion.
11500Sstevel@tonic-gate * we will not need this array when hat_memload_array learns pp++
11510Sstevel@tonic-gate */
11520Sstevel@tonic-gate if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
11530Sstevel@tonic-gate goto fail_array_alloc;
11540Sstevel@tonic-gate }
11550Sstevel@tonic-gate
11560Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
11570Sstevel@tonic-gate goto fail_vmem_alloc;
11580Sstevel@tonic-gate
11590Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
11600Sstevel@tonic-gate
11610Sstevel@tonic-gate /* create all the pages */
11620Sstevel@tonic-gate for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
11630Sstevel@tonic-gate if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
11640Sstevel@tonic-gate goto fail_page_create;
11650Sstevel@tonic-gate page_list_concat(&pplist, &pp);
11660Sstevel@tonic-gate }
11670Sstevel@tonic-gate
11680Sstevel@tonic-gate /* at this point we have all the resource to complete the request */
11690Sstevel@tonic-gate while ((rootpp = pplist) != NULL) {
11700Sstevel@tonic-gate for (i = 0; i < nbpages; i++) {
11710Sstevel@tonic-gate ASSERT(pplist != NULL);
11720Sstevel@tonic-gate pp = pplist;
11730Sstevel@tonic-gate page_sub(&pplist, pp);
11740Sstevel@tonic-gate ASSERT(page_iolock_assert(pp));
11750Sstevel@tonic-gate page_io_unlock(pp);
11760Sstevel@tonic-gate ppa[i] = pp;
11770Sstevel@tonic-gate }
11780Sstevel@tonic-gate /*
11790Sstevel@tonic-gate * Load the locked entry. It's OK to preload the entry into the
11800Sstevel@tonic-gate * TSB since we now support large mappings in the kernel TSB.
11810Sstevel@tonic-gate */
11820Sstevel@tonic-gate hat_memload_array(kas.a_hat,
11830Sstevel@tonic-gate (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
11840Sstevel@tonic-gate ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
11850Sstevel@tonic-gate HAT_LOAD_LOCK);
11860Sstevel@tonic-gate
11870Sstevel@tonic-gate for (--i; i >= 0; --i) {
11880Sstevel@tonic-gate ppa[i]->p_lckcnt = 1;
11890Sstevel@tonic-gate page_unlock(ppa[i]);
11900Sstevel@tonic-gate }
11910Sstevel@tonic-gate }
11920Sstevel@tonic-gate
11930Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize);
11940Sstevel@tonic-gate return (addr);
11950Sstevel@tonic-gate
11960Sstevel@tonic-gate fail_page_create:
11970Sstevel@tonic-gate while ((rootpp = pplist) != NULL) {
11980Sstevel@tonic-gate for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
11990Sstevel@tonic-gate ASSERT(pp != NULL);
12000Sstevel@tonic-gate page_sub(&pplist, pp);
12010Sstevel@tonic-gate ASSERT(page_iolock_assert(pp));
12020Sstevel@tonic-gate page_io_unlock(pp);
12030Sstevel@tonic-gate }
12040Sstevel@tonic-gate page_destroy_pages(rootpp);
12050Sstevel@tonic-gate }
12060Sstevel@tonic-gate
12070Sstevel@tonic-gate if (inaddr == NULL)
12080Sstevel@tonic-gate vmem_free(vmp, addr, size);
12090Sstevel@tonic-gate
12100Sstevel@tonic-gate fail_vmem_alloc:
12110Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize);
12120Sstevel@tonic-gate
12130Sstevel@tonic-gate fail_array_alloc:
12140Sstevel@tonic-gate page_unresv(npages);
12150Sstevel@tonic-gate
12160Sstevel@tonic-gate return (NULL);
12170Sstevel@tonic-gate }
12180Sstevel@tonic-gate
12190Sstevel@tonic-gate static void
segkmem_free_one_lp(caddr_t addr,size_t size)12200Sstevel@tonic-gate segkmem_free_one_lp(caddr_t addr, size_t size)
12210Sstevel@tonic-gate {
12220Sstevel@tonic-gate page_t *pp, *rootpp = NULL;
12230Sstevel@tonic-gate pgcnt_t pgs_left = btopr(size);
12240Sstevel@tonic-gate
12250Sstevel@tonic-gate ASSERT(size == segkmem_lpsize);
12260Sstevel@tonic-gate
12270Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
12280Sstevel@tonic-gate
12290Sstevel@tonic-gate for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
12300Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
12310Sstevel@tonic-gate if (pp == NULL)
12320Sstevel@tonic-gate panic("segkmem_free_one_lp: page not found");
12330Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
12340Sstevel@tonic-gate pp->p_lckcnt = 0;
12350Sstevel@tonic-gate if (rootpp == NULL)
12360Sstevel@tonic-gate rootpp = pp;
12370Sstevel@tonic-gate }
12380Sstevel@tonic-gate ASSERT(rootpp != NULL);
12390Sstevel@tonic-gate page_destroy_pages(rootpp);
12400Sstevel@tonic-gate
12410Sstevel@tonic-gate /* page_unresv() is done by the caller */
12420Sstevel@tonic-gate }
12430Sstevel@tonic-gate
12440Sstevel@tonic-gate /*
12450Sstevel@tonic-gate * This function is called to import new spans into the vmem arenas like
12460Sstevel@tonic-gate * kmem_default_arena and kmem_oversize_arena. It first tries to import
12470Sstevel@tonic-gate * spans from large page arena - kmem_lp_arena. In order to do this it might
12480Sstevel@tonic-gate * have to "upgrade the requested size" to kmem_lp_arena quantum. If
12490Sstevel@tonic-gate * it was not able to satisfy the upgraded request it then calls regular
12500Sstevel@tonic-gate * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
12510Sstevel@tonic-gate */
12524204Sha137994 /*ARGSUSED*/
12530Sstevel@tonic-gate void *
segkmem_alloc_lp(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)12544204Sha137994 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
12550Sstevel@tonic-gate {
12560Sstevel@tonic-gate size_t size;
12570Sstevel@tonic-gate kthread_t *t = curthread;
12580Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb;
12590Sstevel@tonic-gate
12600Sstevel@tonic-gate ASSERT(sizep != NULL);
12610Sstevel@tonic-gate
12620Sstevel@tonic-gate size = *sizep;
12630Sstevel@tonic-gate
12640Sstevel@tonic-gate if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
12650Sstevel@tonic-gate !(vmflag & SEGKMEM_SHARELOCKED)) {
12660Sstevel@tonic-gate
12670Sstevel@tonic-gate size_t kmemlp_qnt = segkmem_kmemlp_quantum;
12680Sstevel@tonic-gate size_t asize = P2ROUNDUP(size, kmemlp_qnt);
12690Sstevel@tonic-gate void *addr = NULL;
12700Sstevel@tonic-gate ulong_t *lpthrtp = &lpcb->lp_throttle;
12710Sstevel@tonic-gate ulong_t lpthrt = *lpthrtp;
12720Sstevel@tonic-gate int dowakeup = 0;
12730Sstevel@tonic-gate int doalloc = 1;
12740Sstevel@tonic-gate
12750Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL);
12760Sstevel@tonic-gate ASSERT(asize >= size);
12770Sstevel@tonic-gate
12780Sstevel@tonic-gate if (lpthrt != 0) {
12790Sstevel@tonic-gate /* try to update the throttle value */
12800Sstevel@tonic-gate lpthrt = atomic_add_long_nv(lpthrtp, 1);
12810Sstevel@tonic-gate if (lpthrt >= segkmem_lpthrottle_max) {
12820Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
12830Sstevel@tonic-gate segkmem_lpthrottle_max / 4);
12840Sstevel@tonic-gate }
12850Sstevel@tonic-gate
12860Sstevel@tonic-gate /*
12870Sstevel@tonic-gate * when we get above throttle start do an exponential
12880Sstevel@tonic-gate * backoff at trying large pages and reaping
12890Sstevel@tonic-gate */
12900Sstevel@tonic-gate if (lpthrt > segkmem_lpthrottle_start &&
12910Sstevel@tonic-gate (lpthrt & (lpthrt - 1))) {
1292215Seg155566 lpcb->allocs_throttled++;
12930Sstevel@tonic-gate lpthrt--;
12940Sstevel@tonic-gate if ((lpthrt & (lpthrt - 1)) == 0)
12950Sstevel@tonic-gate kmem_reap();
12960Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag));
12970Sstevel@tonic-gate }
12980Sstevel@tonic-gate }
12990Sstevel@tonic-gate
13000Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP) &&
13010Sstevel@tonic-gate segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
13020Sstevel@tonic-gate vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
13030Sstevel@tonic-gate asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
13040Sstevel@tonic-gate
13050Sstevel@tonic-gate /*
13060Sstevel@tonic-gate * we are low on free memory in kmem_lp_arena
13070Sstevel@tonic-gate * we let only one guy to allocate heap_lp
13080Sstevel@tonic-gate * quantum size chunk that everybody is going to
13090Sstevel@tonic-gate * share
13100Sstevel@tonic-gate */
13110Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock);
13120Sstevel@tonic-gate
13130Sstevel@tonic-gate if (lpcb->lp_wait) {
13140Sstevel@tonic-gate
13150Sstevel@tonic-gate /* we are not the first one - wait */
13160Sstevel@tonic-gate cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
13170Sstevel@tonic-gate if (vmem_size(kmem_lp_arena, VMEM_FREE) <
13180Sstevel@tonic-gate kmemlp_qnt) {
13190Sstevel@tonic-gate doalloc = 0;
13200Sstevel@tonic-gate }
13210Sstevel@tonic-gate } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
13220Sstevel@tonic-gate kmemlp_qnt) {
13230Sstevel@tonic-gate
13240Sstevel@tonic-gate /*
13250Sstevel@tonic-gate * we are the first one, make sure we import
13260Sstevel@tonic-gate * a large page
13270Sstevel@tonic-gate */
13280Sstevel@tonic-gate if (asize == kmemlp_qnt)
13290Sstevel@tonic-gate asize += kmemlp_qnt;
13300Sstevel@tonic-gate dowakeup = 1;
13310Sstevel@tonic-gate lpcb->lp_wait = 1;
13320Sstevel@tonic-gate }
13330Sstevel@tonic-gate
13340Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock);
13350Sstevel@tonic-gate }
13360Sstevel@tonic-gate
13370Sstevel@tonic-gate /*
13380Sstevel@tonic-gate * VM_ABORT flag prevents sleeps in vmem_xalloc when
13390Sstevel@tonic-gate * large pages are not available. In that case this allocation
13400Sstevel@tonic-gate * attempt will fail and we will retry allocation with small
13410Sstevel@tonic-gate * pages. We also do not want to panic if this allocation fails
13420Sstevel@tonic-gate * because we are going to retry.
13430Sstevel@tonic-gate */
13440Sstevel@tonic-gate if (doalloc) {
13450Sstevel@tonic-gate addr = vmem_alloc(kmem_lp_arena, asize,
13460Sstevel@tonic-gate (vmflag | VM_ABORT) & ~VM_PANIC);
13470Sstevel@tonic-gate
13480Sstevel@tonic-gate if (dowakeup) {
13490Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock);
13500Sstevel@tonic-gate ASSERT(lpcb->lp_wait != 0);
13510Sstevel@tonic-gate lpcb->lp_wait = 0;
13520Sstevel@tonic-gate cv_broadcast(&lpcb->lp_cv);
13530Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock);
13540Sstevel@tonic-gate }
13550Sstevel@tonic-gate }
13560Sstevel@tonic-gate
13570Sstevel@tonic-gate if (addr != NULL) {
13580Sstevel@tonic-gate *sizep = asize;
13590Sstevel@tonic-gate *lpthrtp = 0;
13600Sstevel@tonic-gate return (addr);
13610Sstevel@tonic-gate }
13620Sstevel@tonic-gate
13630Sstevel@tonic-gate if (vmflag & VM_NOSLEEP)
1364215Seg155566 lpcb->nosleep_allocs_failed++;
13650Sstevel@tonic-gate else
1366215Seg155566 lpcb->sleep_allocs_failed++;
1367215Seg155566 lpcb->alloc_bytes_failed += size;
13680Sstevel@tonic-gate
13690Sstevel@tonic-gate /* if large page throttling is not started yet do it */
13700Sstevel@tonic-gate if (segkmem_use_lpthrottle && lpthrt == 0) {
13710Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
13720Sstevel@tonic-gate }
13730Sstevel@tonic-gate }
13740Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag));
13750Sstevel@tonic-gate }
13760Sstevel@tonic-gate
13770Sstevel@tonic-gate void
segkmem_free_lp(vmem_t * vmp,void * inaddr,size_t size)13780Sstevel@tonic-gate segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
13790Sstevel@tonic-gate {
13800Sstevel@tonic-gate if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
13810Sstevel@tonic-gate segkmem_free(vmp, inaddr, size);
13820Sstevel@tonic-gate } else {
13830Sstevel@tonic-gate vmem_free(kmem_lp_arena, inaddr, size);
13840Sstevel@tonic-gate }
13850Sstevel@tonic-gate }
13860Sstevel@tonic-gate
13870Sstevel@tonic-gate /*
13880Sstevel@tonic-gate * segkmem_alloc_lpi() imports virtual memory from large page heap arena
13890Sstevel@tonic-gate * into kmem_lp arena. In the process it maps the imported segment with
13900Sstevel@tonic-gate * large pages
13910Sstevel@tonic-gate */
13920Sstevel@tonic-gate static void *
segkmem_alloc_lpi(vmem_t * vmp,size_t size,int vmflag)13930Sstevel@tonic-gate segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
13940Sstevel@tonic-gate {
13950Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb;
13960Sstevel@tonic-gate void *addr;
13970Sstevel@tonic-gate
13980Sstevel@tonic-gate ASSERT(size != 0);
13990Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena);
14000Sstevel@tonic-gate
14010Sstevel@tonic-gate /* do not allow large page heap grow beyound limits */
14020Sstevel@tonic-gate if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1403215Seg155566 lpcb->allocs_limited++;
14040Sstevel@tonic-gate return (NULL);
14050Sstevel@tonic-gate }
14060Sstevel@tonic-gate
14070Sstevel@tonic-gate addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
14080Sstevel@tonic-gate segkmem_page_create_large, NULL);
14090Sstevel@tonic-gate return (addr);
14100Sstevel@tonic-gate }
14110Sstevel@tonic-gate
14120Sstevel@tonic-gate /*
14130Sstevel@tonic-gate * segkmem_free_lpi() returns virtual memory back into large page heap arena
14140Sstevel@tonic-gate * from kmem_lp arena. Beore doing this it unmaps the segment and frees
14150Sstevel@tonic-gate * large pages used to map it.
14160Sstevel@tonic-gate */
14170Sstevel@tonic-gate static void
segkmem_free_lpi(vmem_t * vmp,void * inaddr,size_t size)14180Sstevel@tonic-gate segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
14190Sstevel@tonic-gate {
14200Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift;
14210Sstevel@tonic-gate size_t lpsize = segkmem_lpsize;
14220Sstevel@tonic-gate caddr_t addr = inaddr;
14230Sstevel@tonic-gate pgcnt_t npages = btopr(size);
14240Sstevel@tonic-gate int i;
14250Sstevel@tonic-gate
14260Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena);
14270Sstevel@tonic-gate ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
14280Sstevel@tonic-gate ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
14290Sstevel@tonic-gate
14300Sstevel@tonic-gate for (i = 0; i < nlpages; i++) {
14310Sstevel@tonic-gate segkmem_free_one_lp(addr, lpsize);
14320Sstevel@tonic-gate addr += lpsize;
14330Sstevel@tonic-gate }
14340Sstevel@tonic-gate
14350Sstevel@tonic-gate page_unresv(npages);
14360Sstevel@tonic-gate
14370Sstevel@tonic-gate vmem_free(vmp, inaddr, size);
14380Sstevel@tonic-gate }
14390Sstevel@tonic-gate
14400Sstevel@tonic-gate /*
14410Sstevel@tonic-gate * This function is called at system boot time by kmem_init right after
14420Sstevel@tonic-gate * /etc/system file has been read. It checks based on hardware configuration
14430Sstevel@tonic-gate * and /etc/system settings if system is going to use large pages. The
14440Sstevel@tonic-gate * initialiazation necessary to actually start using large pages
14450Sstevel@tonic-gate * happens later in the process after segkmem_heap_lp_init() is called.
14460Sstevel@tonic-gate */
14470Sstevel@tonic-gate int
segkmem_lpsetup()14480Sstevel@tonic-gate segkmem_lpsetup()
14490Sstevel@tonic-gate {
14500Sstevel@tonic-gate int use_large_pages = 0;
14510Sstevel@tonic-gate
14520Sstevel@tonic-gate #ifdef __sparc
14530Sstevel@tonic-gate
14540Sstevel@tonic-gate size_t memtotal = physmem * PAGESIZE;
14550Sstevel@tonic-gate
14560Sstevel@tonic-gate if (heap_lp_base == NULL) {
14570Sstevel@tonic-gate segkmem_lpsize = PAGESIZE;
14580Sstevel@tonic-gate return (0);
14590Sstevel@tonic-gate }
14600Sstevel@tonic-gate
14610Sstevel@tonic-gate /* get a platform dependent value of large page size for kernel heap */
14620Sstevel@tonic-gate segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
14630Sstevel@tonic-gate
14640Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) {
14650Sstevel@tonic-gate /*
14660Sstevel@tonic-gate * put virtual space reserved for the large page kernel
14670Sstevel@tonic-gate * back to the regular heap
14680Sstevel@tonic-gate */
14690Sstevel@tonic-gate vmem_xfree(heap_arena, heap_lp_base,
14700Sstevel@tonic-gate heap_lp_end - heap_lp_base);
14710Sstevel@tonic-gate heap_lp_base = NULL;
14720Sstevel@tonic-gate heap_lp_end = NULL;
14730Sstevel@tonic-gate segkmem_lpsize = PAGESIZE;
14740Sstevel@tonic-gate return (0);
14750Sstevel@tonic-gate }
14760Sstevel@tonic-gate
14770Sstevel@tonic-gate /* set heap_lp quantum if necessary */
14780Sstevel@tonic-gate if (segkmem_heaplp_quantum == 0 ||
14790Sstevel@tonic-gate (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
14800Sstevel@tonic-gate P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
14810Sstevel@tonic-gate segkmem_heaplp_quantum = segkmem_lpsize;
14820Sstevel@tonic-gate }
14830Sstevel@tonic-gate
14840Sstevel@tonic-gate /* set kmem_lp quantum if necessary */
14850Sstevel@tonic-gate if (segkmem_kmemlp_quantum == 0 ||
14860Sstevel@tonic-gate (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
14870Sstevel@tonic-gate segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
14880Sstevel@tonic-gate segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
14890Sstevel@tonic-gate }
14900Sstevel@tonic-gate
14910Sstevel@tonic-gate /* set total amount of memory allowed for large page kernel heap */
14920Sstevel@tonic-gate if (segkmem_kmemlp_max == 0) {
14930Sstevel@tonic-gate if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1494215Seg155566 segkmem_kmemlp_pcnt = 12;
1495215Seg155566 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
14960Sstevel@tonic-gate }
14970Sstevel@tonic-gate segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
14980Sstevel@tonic-gate segkmem_heaplp_quantum);
14990Sstevel@tonic-gate
15000Sstevel@tonic-gate /* fix lp kmem preallocation request if necesssary */
15010Sstevel@tonic-gate if (segkmem_kmemlp_min) {
15020Sstevel@tonic-gate segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
15030Sstevel@tonic-gate segkmem_heaplp_quantum);
15040Sstevel@tonic-gate if (segkmem_kmemlp_min > segkmem_kmemlp_max)
15050Sstevel@tonic-gate segkmem_kmemlp_min = segkmem_kmemlp_max;
15060Sstevel@tonic-gate }
15070Sstevel@tonic-gate
15080Sstevel@tonic-gate use_large_pages = 1;
15093351Saguzovsk segkmem_lpszc = page_szc(segkmem_lpsize);
15103351Saguzovsk segkmem_lpshift = page_get_shift(segkmem_lpszc);
15110Sstevel@tonic-gate
15120Sstevel@tonic-gate #endif
15130Sstevel@tonic-gate return (use_large_pages);
15140Sstevel@tonic-gate }
15150Sstevel@tonic-gate
15163290Sjohansen void
segkmem_zio_init(void * zio_mem_base,size_t zio_mem_size)15173290Sjohansen segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
15183290Sjohansen {
15193290Sjohansen ASSERT(zio_mem_base != NULL);
15203290Sjohansen ASSERT(zio_mem_size != 0);
15213290Sjohansen
1522*13146SJonathan.Adams@Sun.COM /*
1523*13146SJonathan.Adams@Sun.COM * To reduce VA space fragmentation, we set up quantum caches for the
1524*13146SJonathan.Adams@Sun.COM * smaller sizes; we chose 32k because that translates to 128k VA
1525*13146SJonathan.Adams@Sun.COM * slabs, which matches nicely with the common 128k zio_data bufs.
1526*13146SJonathan.Adams@Sun.COM */
15277315SJonathan.Adams@Sun.COM zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1528*13146SJonathan.Adams@Sun.COM PAGESIZE, NULL, NULL, NULL, 32 * 1024, VM_SLEEP);
15293290Sjohansen
15307315SJonathan.Adams@Sun.COM zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
15313290Sjohansen segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
15323290Sjohansen
15333290Sjohansen ASSERT(zio_arena != NULL);
15343290Sjohansen ASSERT(zio_alloc_arena != NULL);
15353290Sjohansen }
15363290Sjohansen
15370Sstevel@tonic-gate #ifdef __sparc
15380Sstevel@tonic-gate
15390Sstevel@tonic-gate
15400Sstevel@tonic-gate static void *
segkmem_alloc_ppa(vmem_t * vmp,size_t size,int vmflag)15410Sstevel@tonic-gate segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
15420Sstevel@tonic-gate {
15430Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
15440Sstevel@tonic-gate void *addr;
15450Sstevel@tonic-gate
15460Sstevel@tonic-gate if (ppaquantum <= PAGESIZE)
15470Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag));
15480Sstevel@tonic-gate
15490Sstevel@tonic-gate ASSERT((size & (ppaquantum - 1)) == 0);
15500Sstevel@tonic-gate
15510Sstevel@tonic-gate addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
15520Sstevel@tonic-gate if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
15534828Sjosephb segkmem_page_create, NULL) == NULL) {
15540Sstevel@tonic-gate vmem_xfree(vmp, addr, size);
15550Sstevel@tonic-gate addr = NULL;
15560Sstevel@tonic-gate }
15570Sstevel@tonic-gate
15580Sstevel@tonic-gate return (addr);
15590Sstevel@tonic-gate }
15600Sstevel@tonic-gate
15610Sstevel@tonic-gate static void
segkmem_free_ppa(vmem_t * vmp,void * addr,size_t size)15620Sstevel@tonic-gate segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
15630Sstevel@tonic-gate {
15640Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
15650Sstevel@tonic-gate
15660Sstevel@tonic-gate ASSERT(addr != NULL);
15670Sstevel@tonic-gate
15680Sstevel@tonic-gate if (ppaquantum <= PAGESIZE) {
15690Sstevel@tonic-gate segkmem_free(vmp, addr, size);
15700Sstevel@tonic-gate } else {
15710Sstevel@tonic-gate segkmem_free(NULL, addr, size);
15720Sstevel@tonic-gate vmem_xfree(vmp, addr, size);
15730Sstevel@tonic-gate }
15740Sstevel@tonic-gate }
15750Sstevel@tonic-gate
15760Sstevel@tonic-gate void
segkmem_heap_lp_init()15770Sstevel@tonic-gate segkmem_heap_lp_init()
15780Sstevel@tonic-gate {
15790Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb;
15800Sstevel@tonic-gate size_t heap_lp_size = heap_lp_end - heap_lp_base;
15810Sstevel@tonic-gate size_t lpsize = segkmem_lpsize;
15820Sstevel@tonic-gate size_t ppaquantum;
15830Sstevel@tonic-gate void *addr;
15840Sstevel@tonic-gate
15850Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) {
15860Sstevel@tonic-gate ASSERT(heap_lp_base == NULL);
15870Sstevel@tonic-gate ASSERT(heap_lp_end == NULL);
15880Sstevel@tonic-gate return;
15890Sstevel@tonic-gate }
15900Sstevel@tonic-gate
15910Sstevel@tonic-gate ASSERT(segkmem_heaplp_quantum >= lpsize);
15920Sstevel@tonic-gate ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
15930Sstevel@tonic-gate ASSERT(lpcb->lp_uselp == 0);
15940Sstevel@tonic-gate ASSERT(heap_lp_base != NULL);
15950Sstevel@tonic-gate ASSERT(heap_lp_end != NULL);
15960Sstevel@tonic-gate ASSERT(heap_lp_base < heap_lp_end);
15970Sstevel@tonic-gate ASSERT(heap_lp_arena == NULL);
15980Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
15990Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
16000Sstevel@tonic-gate
16010Sstevel@tonic-gate /* create large page heap arena */
16020Sstevel@tonic-gate heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
16030Sstevel@tonic-gate segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
16040Sstevel@tonic-gate
16050Sstevel@tonic-gate ASSERT(heap_lp_arena != NULL);
16060Sstevel@tonic-gate
16070Sstevel@tonic-gate /* This arena caches memory already mapped by large pages */
16080Sstevel@tonic-gate kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
16090Sstevel@tonic-gate segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
16100Sstevel@tonic-gate
16110Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL);
16120Sstevel@tonic-gate
16130Sstevel@tonic-gate mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
16140Sstevel@tonic-gate cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
16150Sstevel@tonic-gate
16160Sstevel@tonic-gate /*
16170Sstevel@tonic-gate * this arena is used for the array of page_t pointers necessary
16180Sstevel@tonic-gate * to call hat_mem_load_array
16190Sstevel@tonic-gate */
16200Sstevel@tonic-gate ppaquantum = btopr(lpsize) * sizeof (page_t *);
16210Sstevel@tonic-gate segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
16220Sstevel@tonic-gate segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
16230Sstevel@tonic-gate VM_SLEEP);
16240Sstevel@tonic-gate
16250Sstevel@tonic-gate ASSERT(segkmem_ppa_arena != NULL);
16260Sstevel@tonic-gate
16270Sstevel@tonic-gate /* prealloacate some memory for the lp kernel heap */
16280Sstevel@tonic-gate if (segkmem_kmemlp_min) {
16290Sstevel@tonic-gate
16300Sstevel@tonic-gate ASSERT(P2PHASE(segkmem_kmemlp_min,
16310Sstevel@tonic-gate segkmem_heaplp_quantum) == 0);
16320Sstevel@tonic-gate
16330Sstevel@tonic-gate if ((addr = segkmem_alloc_lpi(heap_lp_arena,
16340Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
16350Sstevel@tonic-gate
16360Sstevel@tonic-gate addr = vmem_add(kmem_lp_arena, addr,
16370Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP);
16380Sstevel@tonic-gate ASSERT(addr != NULL);
16390Sstevel@tonic-gate }
16400Sstevel@tonic-gate }
16410Sstevel@tonic-gate
16420Sstevel@tonic-gate lpcb->lp_uselp = 1;
16430Sstevel@tonic-gate }
16440Sstevel@tonic-gate
16450Sstevel@tonic-gate #endif
1646