10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 230Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/t_lock.h> 310Sstevel@tonic-gate #include <sys/param.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/tuneable.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <sys/vm.h> 360Sstevel@tonic-gate #include <sys/kmem.h> 370Sstevel@tonic-gate #include <sys/vmem.h> 380Sstevel@tonic-gate #include <sys/mman.h> 390Sstevel@tonic-gate #include <sys/cmn_err.h> 400Sstevel@tonic-gate #include <sys/debug.h> 410Sstevel@tonic-gate #include <sys/dumphdr.h> 420Sstevel@tonic-gate #include <sys/bootconf.h> 430Sstevel@tonic-gate #include <sys/lgrp.h> 440Sstevel@tonic-gate #include <vm/seg_kmem.h> 450Sstevel@tonic-gate #include <vm/hat.h> 460Sstevel@tonic-gate #include <vm/page.h> 470Sstevel@tonic-gate #include <vm/vm_dep.h> 480Sstevel@tonic-gate #include <vm/faultcode.h> 490Sstevel@tonic-gate #include <sys/promif.h> 500Sstevel@tonic-gate #include <vm/seg_kp.h> 510Sstevel@tonic-gate #include <sys/bitmap.h> 520Sstevel@tonic-gate #include <sys/mem_cage.h> 530Sstevel@tonic-gate 540Sstevel@tonic-gate /* 550Sstevel@tonic-gate * seg_kmem is the primary kernel memory segment driver. It 560Sstevel@tonic-gate * maps the kernel heap [kernelheap, ekernelheap), module text, 570Sstevel@tonic-gate * and all memory which was allocated before the VM was initialized 580Sstevel@tonic-gate * into kas. 590Sstevel@tonic-gate * 600Sstevel@tonic-gate * Pages which belong to seg_kmem are hashed into &kvp vnode at 610Sstevel@tonic-gate * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1. 620Sstevel@tonic-gate * They must never be paged out since segkmem_fault() is a no-op to 630Sstevel@tonic-gate * prevent recursive faults. 640Sstevel@tonic-gate * 650Sstevel@tonic-gate * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on 660Sstevel@tonic-gate * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86 670Sstevel@tonic-gate * supports relocation the #ifdef kludges can be removed. 680Sstevel@tonic-gate * 690Sstevel@tonic-gate * seg_kmem pages may be subject to relocation by page_relocate(), 700Sstevel@tonic-gate * provided that the HAT supports it; if this is so, segkmem_reloc 710Sstevel@tonic-gate * will be set to a nonzero value. All boot time allocated memory as 720Sstevel@tonic-gate * well as static memory is considered off limits to relocation. 730Sstevel@tonic-gate * Pages are "relocatable" if p_state does not have P_NORELOC set, so 740Sstevel@tonic-gate * we request P_NORELOC pages for memory that isn't safe to relocate. 750Sstevel@tonic-gate * 760Sstevel@tonic-gate * The kernel heap is logically divided up into four pieces: 770Sstevel@tonic-gate * 780Sstevel@tonic-gate * heap32_arena is for allocations that require 32-bit absolute 790Sstevel@tonic-gate * virtual addresses (e.g. code that uses 32-bit pointers/offsets). 800Sstevel@tonic-gate * 810Sstevel@tonic-gate * heap_core is for allocations that require 2GB *relative* 820Sstevel@tonic-gate * offsets; in other words all memory from heap_core is within 830Sstevel@tonic-gate * 2GB of all other memory from the same arena. This is a requirement 840Sstevel@tonic-gate * of the addressing modes of some processors in supervisor code. 850Sstevel@tonic-gate * 860Sstevel@tonic-gate * heap_arena is the general heap arena. 870Sstevel@tonic-gate * 880Sstevel@tonic-gate * static_arena is the static memory arena. Allocations from it 890Sstevel@tonic-gate * are not subject to relocation so it is safe to use the memory 900Sstevel@tonic-gate * physical address as well as the virtual address (e.g. the VA to 910Sstevel@tonic-gate * PA translations are static). Caches may import from static_arena; 920Sstevel@tonic-gate * all other static memory allocations should use static_alloc_arena. 930Sstevel@tonic-gate * 940Sstevel@tonic-gate * On some platforms which have limited virtual address space, seg_kmem 950Sstevel@tonic-gate * may share [kernelheap, ekernelheap) with seg_kp; if this is so, 960Sstevel@tonic-gate * segkp_bitmap is non-NULL, and each bit represents a page of virtual 970Sstevel@tonic-gate * address space which is actually seg_kp mapped. 980Sstevel@tonic-gate */ 990Sstevel@tonic-gate 1000Sstevel@tonic-gate extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */ 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate char *kernelheap; /* start of primary kernel heap */ 1030Sstevel@tonic-gate char *ekernelheap; /* end of primary kernel heap */ 1040Sstevel@tonic-gate struct seg kvseg; /* primary kernel heap segment */ 1050Sstevel@tonic-gate struct seg kvseg_core; /* "core" kernel heap segment */ 1060Sstevel@tonic-gate vmem_t *heap_arena; /* primary kernel heap arena */ 1070Sstevel@tonic-gate vmem_t *heap_core_arena; /* core kernel heap arena */ 1080Sstevel@tonic-gate char *heap_core_base; /* start of core kernel heap arena */ 1090Sstevel@tonic-gate char *heap_lp_base; /* start of kernel large page heap arena */ 1100Sstevel@tonic-gate char *heap_lp_end; /* end of kernel large page heap arena */ 1110Sstevel@tonic-gate vmem_t *hat_memload_arena; /* HAT translation data */ 1120Sstevel@tonic-gate struct seg kvseg32; /* 32-bit kernel heap segment */ 1130Sstevel@tonic-gate vmem_t *heap32_arena; /* 32-bit kernel heap arena */ 1140Sstevel@tonic-gate vmem_t *heaptext_arena; /* heaptext arena */ 1150Sstevel@tonic-gate struct as kas; /* kernel address space */ 1160Sstevel@tonic-gate struct vnode kvp; /* vnode for all segkmem pages */ 1170Sstevel@tonic-gate int segkmem_reloc; /* enable/disable relocatable segkmem pages */ 1180Sstevel@tonic-gate vmem_t *static_arena; /* arena for caches to import static memory */ 1190Sstevel@tonic-gate vmem_t *static_alloc_arena; /* arena for allocating static memory */ 1200Sstevel@tonic-gate 1210Sstevel@tonic-gate /* 1220Sstevel@tonic-gate * seg_kmem driver can map part of the kernel heap with large pages. 1230Sstevel@tonic-gate * Currently this functionality is implemented for sparc platforms only. 1240Sstevel@tonic-gate * 1250Sstevel@tonic-gate * The large page size "segkmem_lpsize" for kernel heap is selected in the 1260Sstevel@tonic-gate * platform specific code. It can also be modified via /etc/system file. 1270Sstevel@tonic-gate * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large 1280Sstevel@tonic-gate * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to 1290Sstevel@tonic-gate * match segkmem_lpsize. 1300Sstevel@tonic-gate * 1310Sstevel@tonic-gate * At boot time we carve from kernel heap arena a range of virtual addresses 1320Sstevel@tonic-gate * that will be used for large page mappings. This range [heap_lp_base, 1330Sstevel@tonic-gate * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also 1340Sstevel@tonic-gate * create "kmem_lp_arena" that caches memory already backed up by large 1350Sstevel@tonic-gate * pages. kmem_lp_arena imports virtual segments from heap_lp_arena. 1360Sstevel@tonic-gate */ 1370Sstevel@tonic-gate 1380Sstevel@tonic-gate size_t segkmem_lpsize; 1390Sstevel@tonic-gate static uint_t segkmem_lpshift = PAGESHIFT; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */ 1420Sstevel@tonic-gate size_t segkmem_heaplp_quantum; 1435Seg155566 vmem_t *heap_lp_arena; 1440Sstevel@tonic-gate static vmem_t *kmem_lp_arena; 1450Sstevel@tonic-gate static vmem_t *segkmem_ppa_arena; 1460Sstevel@tonic-gate static segkmem_lpcb_t segkmem_lpcb; 1470Sstevel@tonic-gate 1480Sstevel@tonic-gate /* 1490Sstevel@tonic-gate * We use "segkmem_kmemlp_max" to limit the total amount of physical memory 150*215Seg155566 * consumed by the large page heap. By default this parameter is set to 1/8 of 1510Sstevel@tonic-gate * physmem but can be adjusted through /etc/system either directly or 1520Sstevel@tonic-gate * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem 1530Sstevel@tonic-gate * we allow for large page heap. 1540Sstevel@tonic-gate */ 1550Sstevel@tonic-gate size_t segkmem_kmemlp_max; 1560Sstevel@tonic-gate static uint_t segkmem_kmemlp_pcnt; 1570Sstevel@tonic-gate 1580Sstevel@tonic-gate /* 1590Sstevel@tonic-gate * Getting large pages for kernel heap could be problematic due to 1600Sstevel@tonic-gate * physical memory fragmentation. That's why we allow to preallocate 1610Sstevel@tonic-gate * "segkmem_kmemlp_min" bytes at boot time. 1620Sstevel@tonic-gate */ 1630Sstevel@tonic-gate static size_t segkmem_kmemlp_min; 1640Sstevel@tonic-gate 1650Sstevel@tonic-gate /* 1660Sstevel@tonic-gate * Throttling is used to avoid expensive tries to allocate large pages 1670Sstevel@tonic-gate * for kernel heap when a lot of succesive attempts to do so fail. 1680Sstevel@tonic-gate */ 1690Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_max = 0x400000; 1700Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_start = 0x40; 1710Sstevel@tonic-gate static ulong_t segkmem_use_lpthrottle = 1; 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate /* 1740Sstevel@tonic-gate * Freed pages accumulate on a garbage list until segkmem is ready, 1750Sstevel@tonic-gate * at which point we call segkmem_gc() to free it all. 1760Sstevel@tonic-gate */ 1770Sstevel@tonic-gate typedef struct segkmem_gc_list { 1780Sstevel@tonic-gate struct segkmem_gc_list *gc_next; 1790Sstevel@tonic-gate vmem_t *gc_arena; 1800Sstevel@tonic-gate size_t gc_size; 1810Sstevel@tonic-gate } segkmem_gc_list_t; 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate static segkmem_gc_list_t *segkmem_gc_list; 1840Sstevel@tonic-gate 1850Sstevel@tonic-gate /* 1860Sstevel@tonic-gate * Allocations from the hat_memload arena add VM_MEMLOAD to their 1870Sstevel@tonic-gate * vmflags so that segkmem_xalloc() can inform the hat layer that it needs 1880Sstevel@tonic-gate * to take steps to prevent infinite recursion. HAT allocations also 1890Sstevel@tonic-gate * must be non-relocatable to prevent recursive page faults. 1900Sstevel@tonic-gate */ 1910Sstevel@tonic-gate static void * 1920Sstevel@tonic-gate hat_memload_alloc(vmem_t *vmp, size_t size, int flags) 1930Sstevel@tonic-gate { 1940Sstevel@tonic-gate flags |= (VM_MEMLOAD | VM_NORELOC); 1950Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags)); 1960Sstevel@tonic-gate } 1970Sstevel@tonic-gate 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * Allocations from static_arena arena (or any other arena that uses 2000Sstevel@tonic-gate * segkmem_alloc_permanent()) require non-relocatable (permanently 2010Sstevel@tonic-gate * wired) memory pages, since these pages are referenced by physical 2020Sstevel@tonic-gate * as well as virtual address. 2030Sstevel@tonic-gate */ 2040Sstevel@tonic-gate void * 2050Sstevel@tonic-gate segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags) 2060Sstevel@tonic-gate { 2070Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags | VM_NORELOC)); 2080Sstevel@tonic-gate } 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate /* 2110Sstevel@tonic-gate * Initialize kernel heap boundaries. 2120Sstevel@tonic-gate */ 2130Sstevel@tonic-gate void 2140Sstevel@tonic-gate kernelheap_init( 2150Sstevel@tonic-gate void *heap_start, 2160Sstevel@tonic-gate void *heap_end, 2170Sstevel@tonic-gate char *first_avail, 2180Sstevel@tonic-gate void *core_start, 2190Sstevel@tonic-gate void *core_end) 2200Sstevel@tonic-gate { 2210Sstevel@tonic-gate uintptr_t textbase; 2220Sstevel@tonic-gate size_t core_size; 2230Sstevel@tonic-gate size_t heap_size; 2240Sstevel@tonic-gate vmem_t *heaptext_parent; 2250Sstevel@tonic-gate size_t heap_lp_size = 0; 2260Sstevel@tonic-gate 2270Sstevel@tonic-gate kernelheap = heap_start; 2280Sstevel@tonic-gate ekernelheap = heap_end; 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate #ifdef __sparc 2310Sstevel@tonic-gate heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4); 2320Sstevel@tonic-gate heap_lp_base = ekernelheap - heap_lp_size; 2330Sstevel@tonic-gate heap_lp_end = heap_lp_base + heap_lp_size; 2340Sstevel@tonic-gate #endif /* __sparc */ 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate /* 2370Sstevel@tonic-gate * If this platform has a 'core' heap area, then the space for 2380Sstevel@tonic-gate * overflow module text should be carved out of the end of that 2390Sstevel@tonic-gate * heap. Otherwise, it gets carved out of the general purpose 2400Sstevel@tonic-gate * heap. 2410Sstevel@tonic-gate */ 2420Sstevel@tonic-gate core_size = (uintptr_t)core_end - (uintptr_t)core_start; 2430Sstevel@tonic-gate if (core_size > 0) { 2440Sstevel@tonic-gate ASSERT(core_size >= HEAPTEXT_SIZE); 2450Sstevel@tonic-gate textbase = (uintptr_t)core_end - HEAPTEXT_SIZE; 2460Sstevel@tonic-gate core_size -= HEAPTEXT_SIZE; 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate #ifndef __sparc 2490Sstevel@tonic-gate else { 2500Sstevel@tonic-gate ekernelheap -= HEAPTEXT_SIZE; 2510Sstevel@tonic-gate textbase = (uintptr_t)ekernelheap; 2520Sstevel@tonic-gate } 2530Sstevel@tonic-gate #endif 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap; 2560Sstevel@tonic-gate heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE, 2570Sstevel@tonic-gate segkmem_alloc, segkmem_free); 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate if (core_size > 0) { 2600Sstevel@tonic-gate heap_core_arena = vmem_create("heap_core", core_start, 2610Sstevel@tonic-gate core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP); 2620Sstevel@tonic-gate heap_core_base = core_start; 2630Sstevel@tonic-gate } else { 2640Sstevel@tonic-gate heap_core_arena = heap_arena; 2650Sstevel@tonic-gate heap_core_base = kernelheap; 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate /* 2690Sstevel@tonic-gate * reserve space for the large page heap. If large pages for kernel 2700Sstevel@tonic-gate * heap is enabled large page heap arean will be created later in the 2710Sstevel@tonic-gate * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated 2720Sstevel@tonic-gate * range will be returned back to the heap_arena. 2730Sstevel@tonic-gate */ 2740Sstevel@tonic-gate if (heap_lp_size) { 2750Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0, 2760Sstevel@tonic-gate heap_lp_base, heap_lp_end, 2770Sstevel@tonic-gate VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 2780Sstevel@tonic-gate } 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate /* 2810Sstevel@tonic-gate * Remove the already-spoken-for memory range [kernelheap, first_avail). 2820Sstevel@tonic-gate */ 2830Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE, 2840Sstevel@tonic-gate 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 2850Sstevel@tonic-gate 2860Sstevel@tonic-gate #ifdef __sparc 2870Sstevel@tonic-gate heap32_arena = vmem_create("heap32", (void *)SYSBASE32, 2880Sstevel@tonic-gate SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL, 2890Sstevel@tonic-gate NULL, NULL, 0, VM_SLEEP); 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate textbase = SYSLIMIT32 - HEAPTEXT_SIZE; 2920Sstevel@tonic-gate heaptext_parent = NULL; 2930Sstevel@tonic-gate #else /* __sparc */ 2940Sstevel@tonic-gate heap32_arena = heap_core_arena; 2950Sstevel@tonic-gate heaptext_parent = heap_core_arena; 2960Sstevel@tonic-gate #endif /* __sparc */ 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate heaptext_arena = vmem_create("heaptext", (void *)textbase, 2990Sstevel@tonic-gate HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP); 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * Create a set of arenas for memory with static translations 3030Sstevel@tonic-gate * (e.g. VA -> PA translations cannot change). Since using 3040Sstevel@tonic-gate * kernel pages by physical address implies it isn't safe to 3050Sstevel@tonic-gate * walk across page boundaries, the static_arena quantum must 3060Sstevel@tonic-gate * be PAGESIZE. Any kmem caches that require static memory 3070Sstevel@tonic-gate * should source from static_arena, while direct allocations 3080Sstevel@tonic-gate * should only use static_alloc_arena. 3090Sstevel@tonic-gate */ 3100Sstevel@tonic-gate static_arena = vmem_create("static", NULL, 0, PAGESIZE, 3110Sstevel@tonic-gate segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 3120Sstevel@tonic-gate static_alloc_arena = vmem_create("static_alloc", NULL, 0, 3130Sstevel@tonic-gate sizeof (uint64_t), vmem_alloc, vmem_free, static_arena, 3140Sstevel@tonic-gate 0, VM_SLEEP); 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate /* 3170Sstevel@tonic-gate * Create an arena for translation data (ptes, hmes, or hblks). 3180Sstevel@tonic-gate * We need an arena for this because hat_memload() is essential 3190Sstevel@tonic-gate * to vmem_populate() (see comments in common/os/vmem.c). 3200Sstevel@tonic-gate * 3210Sstevel@tonic-gate * Note: any kmem cache that allocates from hat_memload_arena 3220Sstevel@tonic-gate * must be created as a KMC_NOHASH cache (i.e. no external slab 3230Sstevel@tonic-gate * and bufctl structures to allocate) so that slab creation doesn't 3240Sstevel@tonic-gate * require anything more than a single vmem_alloc(). 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE, 3270Sstevel@tonic-gate hat_memload_alloc, segkmem_free, heap_arena, 0, 3280Sstevel@tonic-gate VM_SLEEP | VMC_POPULATOR); 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * Grow kernel heap downward. 3330Sstevel@tonic-gate */ 3340Sstevel@tonic-gate void 3350Sstevel@tonic-gate kernelheap_extend(void *range_start, void *range_end) 3360Sstevel@tonic-gate { 3370Sstevel@tonic-gate size_t len = (uintptr_t)range_end - (uintptr_t)range_start; 3380Sstevel@tonic-gate 3390Sstevel@tonic-gate ASSERT(range_start < range_end && range_end == kernelheap); 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate if (vmem_add(heap_arena, range_start, len, VM_NOSLEEP) == NULL) { 3420Sstevel@tonic-gate cmn_err(CE_WARN, "Could not grow kernel heap below 0x%p", 3430Sstevel@tonic-gate (void *)kernelheap); 3440Sstevel@tonic-gate } else { 3450Sstevel@tonic-gate kernelheap = range_start; 3460Sstevel@tonic-gate } 3470Sstevel@tonic-gate } 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate void 3500Sstevel@tonic-gate boot_mapin(caddr_t addr, size_t size) 3510Sstevel@tonic-gate { 3520Sstevel@tonic-gate caddr_t eaddr; 3530Sstevel@tonic-gate page_t *pp; 3540Sstevel@tonic-gate pfn_t pfnum; 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate if (page_resv(btop(size), KM_NOSLEEP) == 0) 3570Sstevel@tonic-gate panic("boot_mapin: page_resv failed"); 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 3600Sstevel@tonic-gate pfnum = va_to_pfn(addr); 3610Sstevel@tonic-gate if ((pp = page_numtopp_nolock(pfnum)) == NULL) 3620Sstevel@tonic-gate panic("boot_mapin(): No pp for pfnum = %lx", pfnum); 3630Sstevel@tonic-gate 3640Sstevel@tonic-gate /* 3650Sstevel@tonic-gate * must break up any large pages that may have constituent 3660Sstevel@tonic-gate * pages being utilized for BOP_ALLOC()'s before calling 3670Sstevel@tonic-gate * page_numtopp().The locking code (ie. page_reclaim()) 3680Sstevel@tonic-gate * can't handle them 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate if (pp->p_szc != 0) 3710Sstevel@tonic-gate page_boot_demote(pp); 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate pp = page_numtopp(pfnum, SE_EXCL); 3740Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp)) 3750Sstevel@tonic-gate panic("boot_alloc: pp is NULL or free"); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate /* 3780Sstevel@tonic-gate * If the cage is on but doesn't yet contain this page, 3790Sstevel@tonic-gate * mark it as non-relocatable. 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate if (kcage_on && !PP_ISNORELOC(pp)) 3820Sstevel@tonic-gate PP_SETNORELOC(pp); 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL); 3850Sstevel@tonic-gate pp->p_lckcnt = 1; 3860Sstevel@tonic-gate #if defined(__x86) 3870Sstevel@tonic-gate page_downgrade(pp); 3880Sstevel@tonic-gate #else 3890Sstevel@tonic-gate page_unlock(pp); 3900Sstevel@tonic-gate #endif 3910Sstevel@tonic-gate } 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * Get pages from boot and hash them into the kernel's vp. 3960Sstevel@tonic-gate * Used after page structs have been allocated, but before segkmem is ready. 3970Sstevel@tonic-gate */ 3980Sstevel@tonic-gate void * 3990Sstevel@tonic-gate boot_alloc(void *inaddr, size_t size, uint_t align) 4000Sstevel@tonic-gate { 4010Sstevel@tonic-gate caddr_t addr = inaddr; 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate if (bootops == NULL) 4040Sstevel@tonic-gate prom_panic("boot_alloc: attempt to allocate memory after " 4050Sstevel@tonic-gate "BOP_GONE"); 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate size = ptob(btopr(size)); 4080Sstevel@tonic-gate if (BOP_ALLOC(bootops, addr, size, align) != addr) 4090Sstevel@tonic-gate panic("boot_alloc: BOP_ALLOC failed"); 4100Sstevel@tonic-gate boot_mapin((caddr_t)addr, size); 4110Sstevel@tonic-gate return (addr); 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate static void 4150Sstevel@tonic-gate segkmem_badop() 4160Sstevel@tonic-gate { 4170Sstevel@tonic-gate panic("segkmem_badop"); 4180Sstevel@tonic-gate } 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate /*ARGSUSED*/ 4230Sstevel@tonic-gate static faultcode_t 4240Sstevel@tonic-gate segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size, 4250Sstevel@tonic-gate enum fault_type type, enum seg_rw rw) 4260Sstevel@tonic-gate { 4270Sstevel@tonic-gate ASSERT(RW_READ_HELD(&seg->s_as->a_lock)); 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size || 4300Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 4310Sstevel@tonic-gate panic("segkmem_fault: bad args"); 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate /* 4360Sstevel@tonic-gate * If it is one of segkp pages, call segkp_fault. 4370Sstevel@tonic-gate */ 4380Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 4390Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 4400Sstevel@tonic-gate return (SEGOP_FAULT(hat, segkp, addr, size, type, rw)); 4410Sstevel@tonic-gate } 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate switch (type) { 4440Sstevel@tonic-gate case F_SOFTLOCK: /* lock down already-loaded translations */ 4450Sstevel@tonic-gate if (rw == S_OTHER) { 4460Sstevel@tonic-gate hat_reserve(seg->s_as, addr, size); 4470Sstevel@tonic-gate return (0); 4480Sstevel@tonic-gate } 4490Sstevel@tonic-gate /*FALLTHROUGH*/ 4500Sstevel@tonic-gate case F_SOFTUNLOCK: 4510Sstevel@tonic-gate if (rw == S_READ || rw == S_WRITE) 4520Sstevel@tonic-gate return (0); 4530Sstevel@tonic-gate /*FALLTHROUGH*/ 4540Sstevel@tonic-gate default: 4550Sstevel@tonic-gate break; 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate return (FC_NOSUPPORT); 4580Sstevel@tonic-gate } 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate static int 4610Sstevel@tonic-gate segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 4620Sstevel@tonic-gate { 4630Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size || 4660Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 4670Sstevel@tonic-gate panic("segkmem_setprot: bad args"); 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * If it is one of segkp pages, call segkp. 4730Sstevel@tonic-gate */ 4740Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 4750Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 4760Sstevel@tonic-gate return (SEGOP_SETPROT(segkp, addr, size, prot)); 4770Sstevel@tonic-gate } 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate if (prot == 0) 4800Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD); 4810Sstevel@tonic-gate else 4820Sstevel@tonic-gate hat_chgprot(kas.a_hat, addr, size, prot); 4830Sstevel@tonic-gate return (0); 4840Sstevel@tonic-gate } 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* 4870Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 4880Sstevel@tonic-gate * when segkp is under the heap. 4890Sstevel@tonic-gate */ 4900Sstevel@tonic-gate /* ARGSUSED */ 4910Sstevel@tonic-gate static int 4920Sstevel@tonic-gate segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 4930Sstevel@tonic-gate { 4940Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate if (seg->s_as != &kas) 4970Sstevel@tonic-gate segkmem_badop(); 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate /* 5020Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 5030Sstevel@tonic-gate */ 5040Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 5050Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 5060Sstevel@tonic-gate return (SEGOP_CHECKPROT(segkp, addr, size, prot)); 5070Sstevel@tonic-gate } 5080Sstevel@tonic-gate segkmem_badop(); 5090Sstevel@tonic-gate return (0); 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate /* 5130Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 5140Sstevel@tonic-gate * when segkp is under the heap. 5150Sstevel@tonic-gate */ 5160Sstevel@tonic-gate /* ARGSUSED */ 5170Sstevel@tonic-gate static int 5180Sstevel@tonic-gate segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 5190Sstevel@tonic-gate { 5200Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate if (seg->s_as != &kas) 5230Sstevel@tonic-gate segkmem_badop(); 5240Sstevel@tonic-gate 5250Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate /* 5280Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 5290Sstevel@tonic-gate */ 5300Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 5310Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 5320Sstevel@tonic-gate return (SEGOP_KLUSTER(segkp, addr, delta)); 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate segkmem_badop(); 5350Sstevel@tonic-gate return (0); 5360Sstevel@tonic-gate } 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate static void 5390Sstevel@tonic-gate segkmem_xdump_range(void *arg, void *start, size_t size) 5400Sstevel@tonic-gate { 5410Sstevel@tonic-gate struct as *as = arg; 5420Sstevel@tonic-gate caddr_t addr = start; 5430Sstevel@tonic-gate caddr_t addr_end = addr + size; 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate while (addr < addr_end) { 5460Sstevel@tonic-gate pfn_t pfn = hat_getpfnum(kas.a_hat, addr); 5470Sstevel@tonic-gate if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn)) 5480Sstevel@tonic-gate dump_addpage(as, addr, pfn); 5490Sstevel@tonic-gate addr += PAGESIZE; 5500Sstevel@tonic-gate dump_timeleft = dump_timeout; 5510Sstevel@tonic-gate } 5520Sstevel@tonic-gate } 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate static void 5550Sstevel@tonic-gate segkmem_dump_range(void *arg, void *start, size_t size) 5560Sstevel@tonic-gate { 5570Sstevel@tonic-gate caddr_t addr = start; 5580Sstevel@tonic-gate caddr_t addr_end = addr + size; 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate /* 5610Sstevel@tonic-gate * If we are about to start dumping the range of addresses we 5620Sstevel@tonic-gate * carved out of the kernel heap for the large page heap walk 5630Sstevel@tonic-gate * heap_lp_arena to find what segments are actually populated 5640Sstevel@tonic-gate */ 5650Sstevel@tonic-gate if (SEGKMEM_USE_LARGEPAGES && 5660Sstevel@tonic-gate addr == heap_lp_base && addr_end == heap_lp_end && 5670Sstevel@tonic-gate vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 5680Sstevel@tonic-gate vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT, 5690Sstevel@tonic-gate segkmem_xdump_range, arg); 5700Sstevel@tonic-gate } else { 5710Sstevel@tonic-gate segkmem_xdump_range(arg, start, size); 5720Sstevel@tonic-gate } 5730Sstevel@tonic-gate } 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate static void 5760Sstevel@tonic-gate segkmem_dump(struct seg *seg) 5770Sstevel@tonic-gate { 5780Sstevel@tonic-gate /* 5790Sstevel@tonic-gate * The kernel's heap_arena (represented by kvseg) is a very large 5800Sstevel@tonic-gate * VA space, most of which is typically unused. To speed up dumping 5810Sstevel@tonic-gate * we use vmem_walk() to quickly find the pieces of heap_arena that 5820Sstevel@tonic-gate * are actually in use. We do the same for heap32_arena and 5830Sstevel@tonic-gate * heap_core. 5840Sstevel@tonic-gate * 5850Sstevel@tonic-gate * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage() 5860Sstevel@tonic-gate * may ultimately need to allocate memory. Reentrant walks are 5870Sstevel@tonic-gate * necessarily imperfect snapshots. The kernel heap continues 5880Sstevel@tonic-gate * to change during a live crash dump, for example. For a normal 5890Sstevel@tonic-gate * crash dump, however, we know that there won't be any other threads 5900Sstevel@tonic-gate * messing with the heap. Therefore, at worst, we may fail to dump 5910Sstevel@tonic-gate * the pages that get allocated by the act of dumping; but we will 5920Sstevel@tonic-gate * always dump every page that was allocated when the walk began. 5930Sstevel@tonic-gate * 5940Sstevel@tonic-gate * The other segkmem segments are dense (fully populated), so there's 5950Sstevel@tonic-gate * no need to use this technique when dumping them. 5960Sstevel@tonic-gate * 5970Sstevel@tonic-gate * Note: when adding special dump handling for any new sparsely- 5980Sstevel@tonic-gate * populated segments, be sure to add similar handling to the ::kgrep 5990Sstevel@tonic-gate * code in mdb. 6000Sstevel@tonic-gate */ 6010Sstevel@tonic-gate if (seg == &kvseg) { 6020Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT, 6030Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6040Sstevel@tonic-gate #ifndef __sparc 6050Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 6060Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6070Sstevel@tonic-gate #endif 6080Sstevel@tonic-gate } else if (seg == &kvseg_core) { 6090Sstevel@tonic-gate vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT, 6100Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6110Sstevel@tonic-gate } else if (seg == &kvseg32) { 6120Sstevel@tonic-gate vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT, 6130Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6140Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 6150Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6160Sstevel@tonic-gate } else { 6170Sstevel@tonic-gate segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size); 6180Sstevel@tonic-gate } 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate /* 6220Sstevel@tonic-gate * lock/unlock kmem pages over a given range [addr, addr+len). 6230Sstevel@tonic-gate * Returns a shadow list of pages in ppp if *ppp is not NULL 6240Sstevel@tonic-gate * and memory can be allocated to hold the shadow list. 6250Sstevel@tonic-gate */ 6260Sstevel@tonic-gate /*ARGSUSED*/ 6270Sstevel@tonic-gate static int 6280Sstevel@tonic-gate segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len, 6290Sstevel@tonic-gate page_t ***ppp, enum lock_type type, enum seg_rw rw) 6300Sstevel@tonic-gate { 6310Sstevel@tonic-gate page_t **pplist, *pp; 6320Sstevel@tonic-gate pgcnt_t npages; 6330Sstevel@tonic-gate size_t nb; 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 6360Sstevel@tonic-gate /* 6370Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 6380Sstevel@tonic-gate */ 6390Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 6400Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 6410Sstevel@tonic-gate return (SEGOP_PAGELOCK(segkp, addr, len, ppp, 6420Sstevel@tonic-gate type, rw)); 6430Sstevel@tonic-gate } 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate if (type == L_PAGERECLAIM) 6460Sstevel@tonic-gate return (ENOTSUP); 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate npages = btopr(len); 6490Sstevel@tonic-gate nb = sizeof (page_t *) * npages; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 6520Sstevel@tonic-gate if ((pplist = *ppp) == NULL) { 6530Sstevel@tonic-gate /* 6540Sstevel@tonic-gate * No shadow list. Iterate over the range 6550Sstevel@tonic-gate * using page_find() and unlock the pages 6560Sstevel@tonic-gate * that we encounter. 6570Sstevel@tonic-gate */ 6580Sstevel@tonic-gate while (npages--) { 6590Sstevel@tonic-gate pp = page_find(&kvp, 6600Sstevel@tonic-gate (u_offset_t)(uintptr_t)addr); 6610Sstevel@tonic-gate if (pp) 6620Sstevel@tonic-gate page_unlock(pp); 6630Sstevel@tonic-gate addr += PAGESIZE; 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate return (0); 6660Sstevel@tonic-gate } 6670Sstevel@tonic-gate 6680Sstevel@tonic-gate while (npages--) { 6690Sstevel@tonic-gate pp = *pplist++; 6700Sstevel@tonic-gate if (pp) 6710Sstevel@tonic-gate page_unlock(pp); 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate kmem_free(*ppp, nb); 6740Sstevel@tonic-gate return (0); 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate ASSERT(type == L_PAGELOCK); 6780Sstevel@tonic-gate 6790Sstevel@tonic-gate pplist = NULL; 6800Sstevel@tonic-gate if (ppp != NULL) 6810Sstevel@tonic-gate *ppp = pplist = kmem_alloc(nb, KM_NOSLEEP); 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate while (npages--) { 6840Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_SHARED); 6850Sstevel@tonic-gate /* 6860Sstevel@tonic-gate * We'd like to ASSERT(pp != NULL) here, but we can't 6870Sstevel@tonic-gate * because there are legitimate cases where the address 6880Sstevel@tonic-gate * isn't really mapped -- for instance, attaching a 6890Sstevel@tonic-gate * kernel debugger and poking at a non-existent address. 6900Sstevel@tonic-gate */ 6910Sstevel@tonic-gate if (pplist) 6920Sstevel@tonic-gate *pplist++ = pp; 6930Sstevel@tonic-gate addr += PAGESIZE; 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate return (0); 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate /* 6990Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 7000Sstevel@tonic-gate * when segkp is under the heap. 7010Sstevel@tonic-gate */ 7020Sstevel@tonic-gate /* ARGSUSED */ 7030Sstevel@tonic-gate static int 7040Sstevel@tonic-gate segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate if (seg->s_as != &kas) 7090Sstevel@tonic-gate segkmem_badop(); 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate /* 7140Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 7150Sstevel@tonic-gate */ 7160Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 7170Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 7180Sstevel@tonic-gate return (SEGOP_GETMEMID(segkp, addr, memidp)); 7190Sstevel@tonic-gate } 7200Sstevel@tonic-gate segkmem_badop(); 7210Sstevel@tonic-gate return (0); 7220Sstevel@tonic-gate } 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate /*ARGSUSED*/ 7250Sstevel@tonic-gate static lgrp_mem_policy_info_t * 7260Sstevel@tonic-gate segkmem_getpolicy(struct seg *seg, caddr_t addr) 7270Sstevel@tonic-gate { 7280Sstevel@tonic-gate return (NULL); 7290Sstevel@tonic-gate } 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate static struct seg_ops segkmem_ops = { 7330Sstevel@tonic-gate SEGKMEM_BADOP(int), /* dup */ 7340Sstevel@tonic-gate SEGKMEM_BADOP(int), /* unmap */ 7350Sstevel@tonic-gate SEGKMEM_BADOP(void), /* free */ 7360Sstevel@tonic-gate segkmem_fault, 7370Sstevel@tonic-gate SEGKMEM_BADOP(faultcode_t), /* faulta */ 7380Sstevel@tonic-gate segkmem_setprot, 7390Sstevel@tonic-gate segkmem_checkprot, 7400Sstevel@tonic-gate segkmem_kluster, 7410Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* swapout */ 7420Sstevel@tonic-gate SEGKMEM_BADOP(int), /* sync */ 7430Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* incore */ 7440Sstevel@tonic-gate SEGKMEM_BADOP(int), /* lockop */ 7450Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getprot */ 7460Sstevel@tonic-gate SEGKMEM_BADOP(u_offset_t), /* getoffset */ 7470Sstevel@tonic-gate SEGKMEM_BADOP(int), /* gettype */ 7480Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getvp */ 7490Sstevel@tonic-gate SEGKMEM_BADOP(int), /* advise */ 7500Sstevel@tonic-gate segkmem_dump, 7510Sstevel@tonic-gate segkmem_pagelock, 7520Sstevel@tonic-gate SEGKMEM_BADOP(int), /* setpgsz */ 7530Sstevel@tonic-gate segkmem_getmemid, 7540Sstevel@tonic-gate segkmem_getpolicy, /* getpolicy */ 7550Sstevel@tonic-gate }; 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate int 7580Sstevel@tonic-gate segkmem_create(struct seg *seg) 7590Sstevel@tonic-gate { 7600Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 7610Sstevel@tonic-gate seg->s_ops = &segkmem_ops; 7620Sstevel@tonic-gate seg->s_data = NULL; 7630Sstevel@tonic-gate kas.a_size += seg->s_size; 7640Sstevel@tonic-gate return (0); 7650Sstevel@tonic-gate } 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate /*ARGSUSED*/ 7680Sstevel@tonic-gate page_t * 7690Sstevel@tonic-gate segkmem_page_create(void *addr, size_t size, int vmflag, void *arg) 7700Sstevel@tonic-gate { 7710Sstevel@tonic-gate struct seg kseg; 7720Sstevel@tonic-gate int pgflags; 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate kseg.s_as = &kas; 7750Sstevel@tonic-gate pgflags = PG_EXCL; 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 7780Sstevel@tonic-gate pgflags |= PG_NORELOC; 7790Sstevel@tonic-gate if ((vmflag & VM_NOSLEEP) == 0) 7800Sstevel@tonic-gate pgflags |= PG_WAIT; 7810Sstevel@tonic-gate if (vmflag & VM_PANIC) 7820Sstevel@tonic-gate pgflags |= PG_PANIC; 7830Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE) 7840Sstevel@tonic-gate pgflags |= PG_PUSHPAGE; 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate return (page_create_va(&kvp, (u_offset_t)(uintptr_t)addr, size, 7870Sstevel@tonic-gate pgflags, &kseg, addr)); 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate /* 7910Sstevel@tonic-gate * Allocate pages to back the virtual address range [addr, addr + size). 7920Sstevel@tonic-gate * If addr is NULL, allocate the virtual address space as well. 7930Sstevel@tonic-gate */ 7940Sstevel@tonic-gate void * 7950Sstevel@tonic-gate segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr, 7960Sstevel@tonic-gate page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg) 7970Sstevel@tonic-gate { 7980Sstevel@tonic-gate page_t *ppl; 7990Sstevel@tonic-gate caddr_t addr = inaddr; 8000Sstevel@tonic-gate pgcnt_t npages = btopr(size); 8010Sstevel@tonic-gate int allocflag; 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 8040Sstevel@tonic-gate return (NULL); 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 8090Sstevel@tonic-gate if (inaddr == NULL) 8100Sstevel@tonic-gate vmem_free(vmp, addr, size); 8110Sstevel@tonic-gate return (NULL); 8120Sstevel@tonic-gate } 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate ppl = page_create_func(addr, size, vmflag, pcarg); 8150Sstevel@tonic-gate if (ppl == NULL) { 8160Sstevel@tonic-gate if (inaddr == NULL) 8170Sstevel@tonic-gate vmem_free(vmp, addr, size); 8180Sstevel@tonic-gate page_unresv(npages); 8190Sstevel@tonic-gate return (NULL); 8200Sstevel@tonic-gate } 8210Sstevel@tonic-gate 8220Sstevel@tonic-gate /* 8230Sstevel@tonic-gate * Under certain conditions, we need to let the HAT layer know 8240Sstevel@tonic-gate * that it cannot safely allocate memory. Allocations from 8250Sstevel@tonic-gate * the hat_memload vmem arena always need this, to prevent 8260Sstevel@tonic-gate * infinite recursion. 8270Sstevel@tonic-gate * 8280Sstevel@tonic-gate * In addition, the x86 hat cannot safely do memory 8290Sstevel@tonic-gate * allocations while in vmem_populate(), because there 8300Sstevel@tonic-gate * is no simple bound on its usage. 8310Sstevel@tonic-gate */ 8320Sstevel@tonic-gate if (vmflag & VM_MEMLOAD) 8330Sstevel@tonic-gate allocflag = HAT_NO_KALLOC; 8340Sstevel@tonic-gate #if defined(__x86) 8350Sstevel@tonic-gate else if (vmem_is_populator()) 8360Sstevel@tonic-gate allocflag = HAT_NO_KALLOC; 8370Sstevel@tonic-gate #endif 8380Sstevel@tonic-gate else 8390Sstevel@tonic-gate allocflag = 0; 8400Sstevel@tonic-gate 8410Sstevel@tonic-gate while (ppl != NULL) { 8420Sstevel@tonic-gate page_t *pp = ppl; 8430Sstevel@tonic-gate page_sub(&ppl, pp); 8440Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 8450Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 8460Sstevel@tonic-gate page_io_unlock(pp); 8470Sstevel@tonic-gate hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp, 8480Sstevel@tonic-gate (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 8490Sstevel@tonic-gate HAT_LOAD_LOCK | allocflag); 8500Sstevel@tonic-gate pp->p_lckcnt = 1; 8510Sstevel@tonic-gate #if defined(__x86) 8520Sstevel@tonic-gate page_downgrade(pp); 8530Sstevel@tonic-gate #else 8540Sstevel@tonic-gate if (vmflag & SEGKMEM_SHARELOCKED) 8550Sstevel@tonic-gate page_downgrade(pp); 8560Sstevel@tonic-gate else 8570Sstevel@tonic-gate page_unlock(pp); 8580Sstevel@tonic-gate #endif 8590Sstevel@tonic-gate } 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate return (addr); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate void * 8650Sstevel@tonic-gate segkmem_alloc(vmem_t *vmp, size_t size, int vmflag) 8660Sstevel@tonic-gate { 8670Sstevel@tonic-gate void *addr; 8680Sstevel@tonic-gate segkmem_gc_list_t *gcp, **prev_gcpp; 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate if (kvseg.s_base == NULL) { 8710Sstevel@tonic-gate #ifndef __sparc 8720Sstevel@tonic-gate if (bootops->bsys_alloc == NULL) 8730Sstevel@tonic-gate halt("Memory allocation between bop_alloc() and " 8740Sstevel@tonic-gate "kmem_alloc().\n"); 8750Sstevel@tonic-gate #endif 8760Sstevel@tonic-gate 8770Sstevel@tonic-gate /* 8780Sstevel@tonic-gate * There's not a lot of memory to go around during boot, 8790Sstevel@tonic-gate * so recycle it if we can. 8800Sstevel@tonic-gate */ 8810Sstevel@tonic-gate for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL; 8820Sstevel@tonic-gate prev_gcpp = &gcp->gc_next) { 8830Sstevel@tonic-gate if (gcp->gc_arena == vmp && gcp->gc_size == size) { 8840Sstevel@tonic-gate *prev_gcpp = gcp->gc_next; 8850Sstevel@tonic-gate return (gcp); 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate addr = vmem_alloc(vmp, size, vmflag | VM_PANIC); 8900Sstevel@tonic-gate if (boot_alloc(addr, size, BO_NO_ALIGN) != addr) 8910Sstevel@tonic-gate panic("segkmem_alloc: boot_alloc failed"); 8920Sstevel@tonic-gate return (addr); 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate return (segkmem_xalloc(vmp, NULL, size, vmflag, 0, 8950Sstevel@tonic-gate segkmem_page_create, NULL)); 8960Sstevel@tonic-gate } 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate /* 8990Sstevel@tonic-gate * Any changes to this routine must also be carried over to 9000Sstevel@tonic-gate * devmap_free_pages() in the seg_dev driver. This is because 9010Sstevel@tonic-gate * we currently don't have a special kernel segment for non-paged 9020Sstevel@tonic-gate * kernel memory that is exported by drivers to user space. 9030Sstevel@tonic-gate */ 9040Sstevel@tonic-gate void 9050Sstevel@tonic-gate segkmem_free(vmem_t *vmp, void *inaddr, size_t size) 9060Sstevel@tonic-gate { 9070Sstevel@tonic-gate page_t *pp; 9080Sstevel@tonic-gate caddr_t addr = inaddr; 9090Sstevel@tonic-gate caddr_t eaddr; 9100Sstevel@tonic-gate pgcnt_t npages = btopr(size); 9110Sstevel@tonic-gate 9120Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate if (kvseg.s_base == NULL) { 9150Sstevel@tonic-gate segkmem_gc_list_t *gc = inaddr; 9160Sstevel@tonic-gate gc->gc_arena = vmp; 9170Sstevel@tonic-gate gc->gc_size = size; 9180Sstevel@tonic-gate gc->gc_next = segkmem_gc_list; 9190Sstevel@tonic-gate segkmem_gc_list = gc; 9200Sstevel@tonic-gate return; 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 9260Sstevel@tonic-gate #if defined(__x86) 9270Sstevel@tonic-gate pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr); 9280Sstevel@tonic-gate if (pp == NULL) 9290Sstevel@tonic-gate panic("segkmem_free: page not found"); 9300Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 9310Sstevel@tonic-gate /* 9320Sstevel@tonic-gate * Some other thread has a sharelock. Wait for 9330Sstevel@tonic-gate * it to drop the lock so we can free this page. 9340Sstevel@tonic-gate */ 9350Sstevel@tonic-gate page_unlock(pp); 9360Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, 9370Sstevel@tonic-gate SE_EXCL); 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate #else 9400Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 9410Sstevel@tonic-gate #endif 9420Sstevel@tonic-gate if (pp == NULL) 9430Sstevel@tonic-gate panic("segkmem_free: page not found"); 9440Sstevel@tonic-gate /* Clear p_lckcnt so page_destroy() doesn't update availrmem */ 9450Sstevel@tonic-gate pp->p_lckcnt = 0; 9460Sstevel@tonic-gate page_destroy(pp, 0); 9470Sstevel@tonic-gate } 9480Sstevel@tonic-gate page_unresv(npages); 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate if (vmp != NULL) 9510Sstevel@tonic-gate vmem_free(vmp, inaddr, size); 9520Sstevel@tonic-gate } 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate void 9550Sstevel@tonic-gate segkmem_gc(void) 9560Sstevel@tonic-gate { 9570Sstevel@tonic-gate ASSERT(kvseg.s_base != NULL); 9580Sstevel@tonic-gate while (segkmem_gc_list != NULL) { 9590Sstevel@tonic-gate segkmem_gc_list_t *gc = segkmem_gc_list; 9600Sstevel@tonic-gate segkmem_gc_list = gc->gc_next; 9610Sstevel@tonic-gate segkmem_free(gc->gc_arena, gc, gc->gc_size); 9620Sstevel@tonic-gate } 9630Sstevel@tonic-gate } 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate /* 9660Sstevel@tonic-gate * Legacy entry points from here to end of file. 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate void 9690Sstevel@tonic-gate segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot, 9700Sstevel@tonic-gate pfn_t pfn, uint_t flags) 9710Sstevel@tonic-gate { 9720Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 9730Sstevel@tonic-gate hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot, 9740Sstevel@tonic-gate flags | HAT_LOAD_LOCK); 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate void 9780Sstevel@tonic-gate segkmem_mapout(struct seg *seg, void *addr, size_t size) 9790Sstevel@tonic-gate { 9800Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 9810Sstevel@tonic-gate } 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate void * 9840Sstevel@tonic-gate kmem_getpages(pgcnt_t npages, int kmflag) 9850Sstevel@tonic-gate { 9860Sstevel@tonic-gate return (kmem_alloc(ptob(npages), kmflag)); 9870Sstevel@tonic-gate } 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate void 9900Sstevel@tonic-gate kmem_freepages(void *addr, pgcnt_t npages) 9910Sstevel@tonic-gate { 9920Sstevel@tonic-gate kmem_free(addr, ptob(npages)); 9930Sstevel@tonic-gate } 9940Sstevel@tonic-gate 9950Sstevel@tonic-gate /* 9960Sstevel@tonic-gate * segkmem_page_create_large() allocates a large page to be used for the kmem 9970Sstevel@tonic-gate * caches. If kpr is enabled we ask for a relocatable page unless requested 9980Sstevel@tonic-gate * otherwise. If kpr is disabled we have to ask for a non-reloc page 9990Sstevel@tonic-gate */ 10000Sstevel@tonic-gate static page_t * 10010Sstevel@tonic-gate segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg) 10020Sstevel@tonic-gate { 10030Sstevel@tonic-gate int pgflags; 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate pgflags = PG_EXCL; 10060Sstevel@tonic-gate 10070Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 10080Sstevel@tonic-gate pgflags |= PG_NORELOC; 10090Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP)) 10100Sstevel@tonic-gate pgflags |= PG_WAIT; 10110Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE) 10120Sstevel@tonic-gate pgflags |= PG_PUSHPAGE; 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 10150Sstevel@tonic-gate pgflags, &kvseg, addr, arg)); 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate /* 10190Sstevel@tonic-gate * Allocate a large page to back the virtual address range 10200Sstevel@tonic-gate * [addr, addr + size). If addr is NULL, allocate the virtual address 10210Sstevel@tonic-gate * space as well. 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate static void * 10240Sstevel@tonic-gate segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag, 10250Sstevel@tonic-gate uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *), 10260Sstevel@tonic-gate void *pcarg) 10270Sstevel@tonic-gate { 10280Sstevel@tonic-gate caddr_t addr = inaddr, pa; 10290Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 10300Sstevel@tonic-gate pgcnt_t npages = btopr(size); 10310Sstevel@tonic-gate pgcnt_t nbpages = btop(lpsize); 10320Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift; 10330Sstevel@tonic-gate size_t ppasize = nbpages * sizeof (page_t *); 10340Sstevel@tonic-gate page_t *pp, *rootpp, **ppa, *pplist = NULL; 10350Sstevel@tonic-gate int i; 10360Sstevel@tonic-gate 10370Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 10380Sstevel@tonic-gate return (NULL); 10390Sstevel@tonic-gate } 10400Sstevel@tonic-gate 10410Sstevel@tonic-gate /* 10420Sstevel@tonic-gate * allocate an array we need for hat_memload_array. 10430Sstevel@tonic-gate * we use a separate arena to avoid recursion. 10440Sstevel@tonic-gate * we will not need this array when hat_memload_array learns pp++ 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) { 10470Sstevel@tonic-gate goto fail_array_alloc; 10480Sstevel@tonic-gate } 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 10510Sstevel@tonic-gate goto fail_vmem_alloc; 10520Sstevel@tonic-gate 10530Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0); 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate /* create all the pages */ 10560Sstevel@tonic-gate for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) { 10570Sstevel@tonic-gate if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL) 10580Sstevel@tonic-gate goto fail_page_create; 10590Sstevel@tonic-gate page_list_concat(&pplist, &pp); 10600Sstevel@tonic-gate } 10610Sstevel@tonic-gate 10620Sstevel@tonic-gate /* at this point we have all the resource to complete the request */ 10630Sstevel@tonic-gate while ((rootpp = pplist) != NULL) { 10640Sstevel@tonic-gate for (i = 0; i < nbpages; i++) { 10650Sstevel@tonic-gate ASSERT(pplist != NULL); 10660Sstevel@tonic-gate pp = pplist; 10670Sstevel@tonic-gate page_sub(&pplist, pp); 10680Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 10690Sstevel@tonic-gate page_io_unlock(pp); 10700Sstevel@tonic-gate ppa[i] = pp; 10710Sstevel@tonic-gate } 10720Sstevel@tonic-gate /* 10730Sstevel@tonic-gate * Load the locked entry. It's OK to preload the entry into the 10740Sstevel@tonic-gate * TSB since we now support large mappings in the kernel TSB. 10750Sstevel@tonic-gate */ 10760Sstevel@tonic-gate hat_memload_array(kas.a_hat, 10770Sstevel@tonic-gate (caddr_t)(uintptr_t)rootpp->p_offset, lpsize, 10780Sstevel@tonic-gate ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 10790Sstevel@tonic-gate HAT_LOAD_LOCK); 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate for (--i; i >= 0; --i) { 10820Sstevel@tonic-gate ppa[i]->p_lckcnt = 1; 10830Sstevel@tonic-gate page_unlock(ppa[i]); 10840Sstevel@tonic-gate } 10850Sstevel@tonic-gate } 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize); 10880Sstevel@tonic-gate return (addr); 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate fail_page_create: 10910Sstevel@tonic-gate while ((rootpp = pplist) != NULL) { 10920Sstevel@tonic-gate for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) { 10930Sstevel@tonic-gate ASSERT(pp != NULL); 10940Sstevel@tonic-gate page_sub(&pplist, pp); 10950Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 10960Sstevel@tonic-gate page_io_unlock(pp); 10970Sstevel@tonic-gate } 10980Sstevel@tonic-gate page_destroy_pages(rootpp); 10990Sstevel@tonic-gate } 11000Sstevel@tonic-gate 11010Sstevel@tonic-gate if (inaddr == NULL) 11020Sstevel@tonic-gate vmem_free(vmp, addr, size); 11030Sstevel@tonic-gate 11040Sstevel@tonic-gate fail_vmem_alloc: 11050Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize); 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate fail_array_alloc: 11080Sstevel@tonic-gate page_unresv(npages); 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate return (NULL); 11110Sstevel@tonic-gate } 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate static void 11140Sstevel@tonic-gate segkmem_free_one_lp(caddr_t addr, size_t size) 11150Sstevel@tonic-gate { 11160Sstevel@tonic-gate page_t *pp, *rootpp = NULL; 11170Sstevel@tonic-gate pgcnt_t pgs_left = btopr(size); 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate ASSERT(size == segkmem_lpsize); 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) { 11240Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 11250Sstevel@tonic-gate if (pp == NULL) 11260Sstevel@tonic-gate panic("segkmem_free_one_lp: page not found"); 11270Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 11280Sstevel@tonic-gate pp->p_lckcnt = 0; 11290Sstevel@tonic-gate if (rootpp == NULL) 11300Sstevel@tonic-gate rootpp = pp; 11310Sstevel@tonic-gate } 11320Sstevel@tonic-gate ASSERT(rootpp != NULL); 11330Sstevel@tonic-gate page_destroy_pages(rootpp); 11340Sstevel@tonic-gate 11350Sstevel@tonic-gate /* page_unresv() is done by the caller */ 11360Sstevel@tonic-gate } 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate /* 11390Sstevel@tonic-gate * This function is called to import new spans into the vmem arenas like 11400Sstevel@tonic-gate * kmem_default_arena and kmem_oversize_arena. It first tries to import 11410Sstevel@tonic-gate * spans from large page arena - kmem_lp_arena. In order to do this it might 11420Sstevel@tonic-gate * have to "upgrade the requested size" to kmem_lp_arena quantum. If 11430Sstevel@tonic-gate * it was not able to satisfy the upgraded request it then calls regular 11440Sstevel@tonic-gate * segkmem_alloc() that satisfies the request by importing from "*vmp" arena 11450Sstevel@tonic-gate */ 11460Sstevel@tonic-gate void * 11470Sstevel@tonic-gate segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, int vmflag) 11480Sstevel@tonic-gate { 11490Sstevel@tonic-gate size_t size; 11500Sstevel@tonic-gate kthread_t *t = curthread; 11510Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate ASSERT(sizep != NULL); 11540Sstevel@tonic-gate 11550Sstevel@tonic-gate size = *sizep; 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) && 11580Sstevel@tonic-gate !(vmflag & SEGKMEM_SHARELOCKED)) { 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate size_t kmemlp_qnt = segkmem_kmemlp_quantum; 11610Sstevel@tonic-gate size_t asize = P2ROUNDUP(size, kmemlp_qnt); 11620Sstevel@tonic-gate void *addr = NULL; 11630Sstevel@tonic-gate ulong_t *lpthrtp = &lpcb->lp_throttle; 11640Sstevel@tonic-gate ulong_t lpthrt = *lpthrtp; 11650Sstevel@tonic-gate int dowakeup = 0; 11660Sstevel@tonic-gate int doalloc = 1; 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL); 11690Sstevel@tonic-gate ASSERT(asize >= size); 11700Sstevel@tonic-gate 11710Sstevel@tonic-gate if (lpthrt != 0) { 11720Sstevel@tonic-gate /* try to update the throttle value */ 11730Sstevel@tonic-gate lpthrt = atomic_add_long_nv(lpthrtp, 1); 11740Sstevel@tonic-gate if (lpthrt >= segkmem_lpthrottle_max) { 11750Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 11760Sstevel@tonic-gate segkmem_lpthrottle_max / 4); 11770Sstevel@tonic-gate } 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate /* 11800Sstevel@tonic-gate * when we get above throttle start do an exponential 11810Sstevel@tonic-gate * backoff at trying large pages and reaping 11820Sstevel@tonic-gate */ 11830Sstevel@tonic-gate if (lpthrt > segkmem_lpthrottle_start && 11840Sstevel@tonic-gate (lpthrt & (lpthrt - 1))) { 1185*215Seg155566 lpcb->allocs_throttled++; 11860Sstevel@tonic-gate lpthrt--; 11870Sstevel@tonic-gate if ((lpthrt & (lpthrt - 1)) == 0) 11880Sstevel@tonic-gate kmem_reap(); 11890Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 11900Sstevel@tonic-gate } 11910Sstevel@tonic-gate } 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP) && 11940Sstevel@tonic-gate segkmem_heaplp_quantum >= (8 * kmemlp_qnt) && 11950Sstevel@tonic-gate vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt && 11960Sstevel@tonic-gate asize < (segkmem_heaplp_quantum - kmemlp_qnt)) { 11970Sstevel@tonic-gate 11980Sstevel@tonic-gate /* 11990Sstevel@tonic-gate * we are low on free memory in kmem_lp_arena 12000Sstevel@tonic-gate * we let only one guy to allocate heap_lp 12010Sstevel@tonic-gate * quantum size chunk that everybody is going to 12020Sstevel@tonic-gate * share 12030Sstevel@tonic-gate */ 12040Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock); 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate if (lpcb->lp_wait) { 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate /* we are not the first one - wait */ 12090Sstevel@tonic-gate cv_wait(&lpcb->lp_cv, &lpcb->lp_lock); 12100Sstevel@tonic-gate if (vmem_size(kmem_lp_arena, VMEM_FREE) < 12110Sstevel@tonic-gate kmemlp_qnt) { 12120Sstevel@tonic-gate doalloc = 0; 12130Sstevel@tonic-gate } 12140Sstevel@tonic-gate } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <= 12150Sstevel@tonic-gate kmemlp_qnt) { 12160Sstevel@tonic-gate 12170Sstevel@tonic-gate /* 12180Sstevel@tonic-gate * we are the first one, make sure we import 12190Sstevel@tonic-gate * a large page 12200Sstevel@tonic-gate */ 12210Sstevel@tonic-gate if (asize == kmemlp_qnt) 12220Sstevel@tonic-gate asize += kmemlp_qnt; 12230Sstevel@tonic-gate dowakeup = 1; 12240Sstevel@tonic-gate lpcb->lp_wait = 1; 12250Sstevel@tonic-gate } 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock); 12280Sstevel@tonic-gate } 12290Sstevel@tonic-gate 12300Sstevel@tonic-gate /* 12310Sstevel@tonic-gate * VM_ABORT flag prevents sleeps in vmem_xalloc when 12320Sstevel@tonic-gate * large pages are not available. In that case this allocation 12330Sstevel@tonic-gate * attempt will fail and we will retry allocation with small 12340Sstevel@tonic-gate * pages. We also do not want to panic if this allocation fails 12350Sstevel@tonic-gate * because we are going to retry. 12360Sstevel@tonic-gate */ 12370Sstevel@tonic-gate if (doalloc) { 12380Sstevel@tonic-gate addr = vmem_alloc(kmem_lp_arena, asize, 12390Sstevel@tonic-gate (vmflag | VM_ABORT) & ~VM_PANIC); 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate if (dowakeup) { 12420Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock); 12430Sstevel@tonic-gate ASSERT(lpcb->lp_wait != 0); 12440Sstevel@tonic-gate lpcb->lp_wait = 0; 12450Sstevel@tonic-gate cv_broadcast(&lpcb->lp_cv); 12460Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock); 12470Sstevel@tonic-gate } 12480Sstevel@tonic-gate } 12490Sstevel@tonic-gate 12500Sstevel@tonic-gate if (addr != NULL) { 12510Sstevel@tonic-gate *sizep = asize; 12520Sstevel@tonic-gate *lpthrtp = 0; 12530Sstevel@tonic-gate return (addr); 12540Sstevel@tonic-gate } 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate if (vmflag & VM_NOSLEEP) 1257*215Seg155566 lpcb->nosleep_allocs_failed++; 12580Sstevel@tonic-gate else 1259*215Seg155566 lpcb->sleep_allocs_failed++; 1260*215Seg155566 lpcb->alloc_bytes_failed += size; 12610Sstevel@tonic-gate 12620Sstevel@tonic-gate /* if large page throttling is not started yet do it */ 12630Sstevel@tonic-gate if (segkmem_use_lpthrottle && lpthrt == 0) { 12640Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1); 12650Sstevel@tonic-gate } 12660Sstevel@tonic-gate } 12670Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 12680Sstevel@tonic-gate } 12690Sstevel@tonic-gate 12700Sstevel@tonic-gate void 12710Sstevel@tonic-gate segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size) 12720Sstevel@tonic-gate { 12730Sstevel@tonic-gate if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) { 12740Sstevel@tonic-gate segkmem_free(vmp, inaddr, size); 12750Sstevel@tonic-gate } else { 12760Sstevel@tonic-gate vmem_free(kmem_lp_arena, inaddr, size); 12770Sstevel@tonic-gate } 12780Sstevel@tonic-gate } 12790Sstevel@tonic-gate 12800Sstevel@tonic-gate /* 12810Sstevel@tonic-gate * segkmem_alloc_lpi() imports virtual memory from large page heap arena 12820Sstevel@tonic-gate * into kmem_lp arena. In the process it maps the imported segment with 12830Sstevel@tonic-gate * large pages 12840Sstevel@tonic-gate */ 12850Sstevel@tonic-gate static void * 12860Sstevel@tonic-gate segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag) 12870Sstevel@tonic-gate { 12880Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 12890Sstevel@tonic-gate void *addr; 12900Sstevel@tonic-gate 12910Sstevel@tonic-gate ASSERT(size != 0); 12920Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena); 12930Sstevel@tonic-gate 12940Sstevel@tonic-gate /* do not allow large page heap grow beyound limits */ 12950Sstevel@tonic-gate if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) { 1296*215Seg155566 lpcb->allocs_limited++; 12970Sstevel@tonic-gate return (NULL); 12980Sstevel@tonic-gate } 12990Sstevel@tonic-gate 13000Sstevel@tonic-gate addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0, 13010Sstevel@tonic-gate segkmem_page_create_large, NULL); 13020Sstevel@tonic-gate return (addr); 13030Sstevel@tonic-gate } 13040Sstevel@tonic-gate 13050Sstevel@tonic-gate /* 13060Sstevel@tonic-gate * segkmem_free_lpi() returns virtual memory back into large page heap arena 13070Sstevel@tonic-gate * from kmem_lp arena. Beore doing this it unmaps the segment and frees 13080Sstevel@tonic-gate * large pages used to map it. 13090Sstevel@tonic-gate */ 13100Sstevel@tonic-gate static void 13110Sstevel@tonic-gate segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size) 13120Sstevel@tonic-gate { 13130Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift; 13140Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 13150Sstevel@tonic-gate caddr_t addr = inaddr; 13160Sstevel@tonic-gate pgcnt_t npages = btopr(size); 13170Sstevel@tonic-gate int i; 13180Sstevel@tonic-gate 13190Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena); 13200Sstevel@tonic-gate ASSERT(IS_KMEM_VA_LARGEPAGE(addr)); 13210Sstevel@tonic-gate ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0); 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate for (i = 0; i < nlpages; i++) { 13240Sstevel@tonic-gate segkmem_free_one_lp(addr, lpsize); 13250Sstevel@tonic-gate addr += lpsize; 13260Sstevel@tonic-gate } 13270Sstevel@tonic-gate 13280Sstevel@tonic-gate page_unresv(npages); 13290Sstevel@tonic-gate 13300Sstevel@tonic-gate vmem_free(vmp, inaddr, size); 13310Sstevel@tonic-gate } 13320Sstevel@tonic-gate 13330Sstevel@tonic-gate /* 13340Sstevel@tonic-gate * This function is called at system boot time by kmem_init right after 13350Sstevel@tonic-gate * /etc/system file has been read. It checks based on hardware configuration 13360Sstevel@tonic-gate * and /etc/system settings if system is going to use large pages. The 13370Sstevel@tonic-gate * initialiazation necessary to actually start using large pages 13380Sstevel@tonic-gate * happens later in the process after segkmem_heap_lp_init() is called. 13390Sstevel@tonic-gate */ 13400Sstevel@tonic-gate int 13410Sstevel@tonic-gate segkmem_lpsetup() 13420Sstevel@tonic-gate { 13430Sstevel@tonic-gate int use_large_pages = 0; 13440Sstevel@tonic-gate 13450Sstevel@tonic-gate #ifdef __sparc 13460Sstevel@tonic-gate 13470Sstevel@tonic-gate size_t memtotal = physmem * PAGESIZE; 13480Sstevel@tonic-gate 13490Sstevel@tonic-gate if (heap_lp_base == NULL) { 13500Sstevel@tonic-gate segkmem_lpsize = PAGESIZE; 13510Sstevel@tonic-gate return (0); 13520Sstevel@tonic-gate } 13530Sstevel@tonic-gate 13540Sstevel@tonic-gate /* get a platform dependent value of large page size for kernel heap */ 13550Sstevel@tonic-gate segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize); 13560Sstevel@tonic-gate 13570Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) { 13580Sstevel@tonic-gate /* 13590Sstevel@tonic-gate * put virtual space reserved for the large page kernel 13600Sstevel@tonic-gate * back to the regular heap 13610Sstevel@tonic-gate */ 13620Sstevel@tonic-gate vmem_xfree(heap_arena, heap_lp_base, 13630Sstevel@tonic-gate heap_lp_end - heap_lp_base); 13640Sstevel@tonic-gate heap_lp_base = NULL; 13650Sstevel@tonic-gate heap_lp_end = NULL; 13660Sstevel@tonic-gate segkmem_lpsize = PAGESIZE; 13670Sstevel@tonic-gate return (0); 13680Sstevel@tonic-gate } 13690Sstevel@tonic-gate 13700Sstevel@tonic-gate /* set heap_lp quantum if necessary */ 13710Sstevel@tonic-gate if (segkmem_heaplp_quantum == 0 || 13720Sstevel@tonic-gate (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) || 13730Sstevel@tonic-gate P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) { 13740Sstevel@tonic-gate segkmem_heaplp_quantum = segkmem_lpsize; 13750Sstevel@tonic-gate } 13760Sstevel@tonic-gate 13770Sstevel@tonic-gate /* set kmem_lp quantum if necessary */ 13780Sstevel@tonic-gate if (segkmem_kmemlp_quantum == 0 || 13790Sstevel@tonic-gate (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) || 13800Sstevel@tonic-gate segkmem_kmemlp_quantum > segkmem_heaplp_quantum) { 13810Sstevel@tonic-gate segkmem_kmemlp_quantum = segkmem_heaplp_quantum; 13820Sstevel@tonic-gate } 13830Sstevel@tonic-gate 13840Sstevel@tonic-gate /* set total amount of memory allowed for large page kernel heap */ 13850Sstevel@tonic-gate if (segkmem_kmemlp_max == 0) { 13860Sstevel@tonic-gate if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100) 1387*215Seg155566 segkmem_kmemlp_pcnt = 12; 1388*215Seg155566 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100; 13890Sstevel@tonic-gate } 13900Sstevel@tonic-gate segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max, 13910Sstevel@tonic-gate segkmem_heaplp_quantum); 13920Sstevel@tonic-gate 13930Sstevel@tonic-gate /* fix lp kmem preallocation request if necesssary */ 13940Sstevel@tonic-gate if (segkmem_kmemlp_min) { 13950Sstevel@tonic-gate segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min, 13960Sstevel@tonic-gate segkmem_heaplp_quantum); 13970Sstevel@tonic-gate if (segkmem_kmemlp_min > segkmem_kmemlp_max) 13980Sstevel@tonic-gate segkmem_kmemlp_min = segkmem_kmemlp_max; 13990Sstevel@tonic-gate } 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate use_large_pages = 1; 14020Sstevel@tonic-gate segkmem_lpshift = page_get_shift(page_szc(segkmem_lpsize)); 14030Sstevel@tonic-gate 14040Sstevel@tonic-gate #endif 14050Sstevel@tonic-gate return (use_large_pages); 14060Sstevel@tonic-gate } 14070Sstevel@tonic-gate 14080Sstevel@tonic-gate #ifdef __sparc 14090Sstevel@tonic-gate 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate static void * 14120Sstevel@tonic-gate segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag) 14130Sstevel@tonic-gate { 14140Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 14150Sstevel@tonic-gate void *addr; 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate if (ppaquantum <= PAGESIZE) 14180Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 14190Sstevel@tonic-gate 14200Sstevel@tonic-gate ASSERT((size & (ppaquantum - 1)) == 0); 14210Sstevel@tonic-gate 14220Sstevel@tonic-gate addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag); 14230Sstevel@tonic-gate if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0, 14240Sstevel@tonic-gate segkmem_page_create, NULL) == NULL) { 14250Sstevel@tonic-gate vmem_xfree(vmp, addr, size); 14260Sstevel@tonic-gate addr = NULL; 14270Sstevel@tonic-gate } 14280Sstevel@tonic-gate 14290Sstevel@tonic-gate return (addr); 14300Sstevel@tonic-gate } 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate static void 14330Sstevel@tonic-gate segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size) 14340Sstevel@tonic-gate { 14350Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 14360Sstevel@tonic-gate 14370Sstevel@tonic-gate ASSERT(addr != NULL); 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate if (ppaquantum <= PAGESIZE) { 14400Sstevel@tonic-gate segkmem_free(vmp, addr, size); 14410Sstevel@tonic-gate } else { 14420Sstevel@tonic-gate segkmem_free(NULL, addr, size); 14430Sstevel@tonic-gate vmem_xfree(vmp, addr, size); 14440Sstevel@tonic-gate } 14450Sstevel@tonic-gate } 14460Sstevel@tonic-gate 14470Sstevel@tonic-gate void 14480Sstevel@tonic-gate segkmem_heap_lp_init() 14490Sstevel@tonic-gate { 14500Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 14510Sstevel@tonic-gate size_t heap_lp_size = heap_lp_end - heap_lp_base; 14520Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 14530Sstevel@tonic-gate size_t ppaquantum; 14540Sstevel@tonic-gate void *addr; 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) { 14570Sstevel@tonic-gate ASSERT(heap_lp_base == NULL); 14580Sstevel@tonic-gate ASSERT(heap_lp_end == NULL); 14590Sstevel@tonic-gate return; 14600Sstevel@tonic-gate } 14610Sstevel@tonic-gate 14620Sstevel@tonic-gate ASSERT(segkmem_heaplp_quantum >= lpsize); 14630Sstevel@tonic-gate ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0); 14640Sstevel@tonic-gate ASSERT(lpcb->lp_uselp == 0); 14650Sstevel@tonic-gate ASSERT(heap_lp_base != NULL); 14660Sstevel@tonic-gate ASSERT(heap_lp_end != NULL); 14670Sstevel@tonic-gate ASSERT(heap_lp_base < heap_lp_end); 14680Sstevel@tonic-gate ASSERT(heap_lp_arena == NULL); 14690Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0); 14700Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0); 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate /* create large page heap arena */ 14730Sstevel@tonic-gate heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size, 14740Sstevel@tonic-gate segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP); 14750Sstevel@tonic-gate 14760Sstevel@tonic-gate ASSERT(heap_lp_arena != NULL); 14770Sstevel@tonic-gate 14780Sstevel@tonic-gate /* This arena caches memory already mapped by large pages */ 14790Sstevel@tonic-gate kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum, 14800Sstevel@tonic-gate segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP); 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL); 14830Sstevel@tonic-gate 14840Sstevel@tonic-gate mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL); 14850Sstevel@tonic-gate cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL); 14860Sstevel@tonic-gate 14870Sstevel@tonic-gate /* 14880Sstevel@tonic-gate * this arena is used for the array of page_t pointers necessary 14890Sstevel@tonic-gate * to call hat_mem_load_array 14900Sstevel@tonic-gate */ 14910Sstevel@tonic-gate ppaquantum = btopr(lpsize) * sizeof (page_t *); 14920Sstevel@tonic-gate segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum, 14930Sstevel@tonic-gate segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum, 14940Sstevel@tonic-gate VM_SLEEP); 14950Sstevel@tonic-gate 14960Sstevel@tonic-gate ASSERT(segkmem_ppa_arena != NULL); 14970Sstevel@tonic-gate 14980Sstevel@tonic-gate /* prealloacate some memory for the lp kernel heap */ 14990Sstevel@tonic-gate if (segkmem_kmemlp_min) { 15000Sstevel@tonic-gate 15010Sstevel@tonic-gate ASSERT(P2PHASE(segkmem_kmemlp_min, 15020Sstevel@tonic-gate segkmem_heaplp_quantum) == 0); 15030Sstevel@tonic-gate 15040Sstevel@tonic-gate if ((addr = segkmem_alloc_lpi(heap_lp_arena, 15050Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP)) != NULL) { 15060Sstevel@tonic-gate 15070Sstevel@tonic-gate addr = vmem_add(kmem_lp_arena, addr, 15080Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP); 15090Sstevel@tonic-gate ASSERT(addr != NULL); 15100Sstevel@tonic-gate } 15110Sstevel@tonic-gate } 15120Sstevel@tonic-gate 15130Sstevel@tonic-gate lpcb->lp_uselp = 1; 15140Sstevel@tonic-gate } 15150Sstevel@tonic-gate 15160Sstevel@tonic-gate #endif 1517