10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53290Sjohansen * Common Development and Distribution License (the "License"). 63290Sjohansen * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 223351Saguzovsk * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/t_lock.h> 300Sstevel@tonic-gate #include <sys/param.h> 310Sstevel@tonic-gate #include <sys/sysmacros.h> 320Sstevel@tonic-gate #include <sys/tuneable.h> 330Sstevel@tonic-gate #include <sys/systm.h> 340Sstevel@tonic-gate #include <sys/vm.h> 350Sstevel@tonic-gate #include <sys/kmem.h> 360Sstevel@tonic-gate #include <sys/vmem.h> 370Sstevel@tonic-gate #include <sys/mman.h> 380Sstevel@tonic-gate #include <sys/cmn_err.h> 390Sstevel@tonic-gate #include <sys/debug.h> 400Sstevel@tonic-gate #include <sys/dumphdr.h> 410Sstevel@tonic-gate #include <sys/bootconf.h> 420Sstevel@tonic-gate #include <sys/lgrp.h> 430Sstevel@tonic-gate #include <vm/seg_kmem.h> 440Sstevel@tonic-gate #include <vm/hat.h> 450Sstevel@tonic-gate #include <vm/page.h> 460Sstevel@tonic-gate #include <vm/vm_dep.h> 470Sstevel@tonic-gate #include <vm/faultcode.h> 480Sstevel@tonic-gate #include <sys/promif.h> 490Sstevel@tonic-gate #include <vm/seg_kp.h> 500Sstevel@tonic-gate #include <sys/bitmap.h> 510Sstevel@tonic-gate #include <sys/mem_cage.h> 520Sstevel@tonic-gate 530Sstevel@tonic-gate /* 540Sstevel@tonic-gate * seg_kmem is the primary kernel memory segment driver. It 550Sstevel@tonic-gate * maps the kernel heap [kernelheap, ekernelheap), module text, 560Sstevel@tonic-gate * and all memory which was allocated before the VM was initialized 570Sstevel@tonic-gate * into kas. 580Sstevel@tonic-gate * 590Sstevel@tonic-gate * Pages which belong to seg_kmem are hashed into &kvp vnode at 600Sstevel@tonic-gate * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1. 610Sstevel@tonic-gate * They must never be paged out since segkmem_fault() is a no-op to 620Sstevel@tonic-gate * prevent recursive faults. 630Sstevel@tonic-gate * 640Sstevel@tonic-gate * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on 650Sstevel@tonic-gate * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86 660Sstevel@tonic-gate * supports relocation the #ifdef kludges can be removed. 670Sstevel@tonic-gate * 680Sstevel@tonic-gate * seg_kmem pages may be subject to relocation by page_relocate(), 690Sstevel@tonic-gate * provided that the HAT supports it; if this is so, segkmem_reloc 700Sstevel@tonic-gate * will be set to a nonzero value. All boot time allocated memory as 710Sstevel@tonic-gate * well as static memory is considered off limits to relocation. 720Sstevel@tonic-gate * Pages are "relocatable" if p_state does not have P_NORELOC set, so 730Sstevel@tonic-gate * we request P_NORELOC pages for memory that isn't safe to relocate. 740Sstevel@tonic-gate * 750Sstevel@tonic-gate * The kernel heap is logically divided up into four pieces: 760Sstevel@tonic-gate * 770Sstevel@tonic-gate * heap32_arena is for allocations that require 32-bit absolute 780Sstevel@tonic-gate * virtual addresses (e.g. code that uses 32-bit pointers/offsets). 790Sstevel@tonic-gate * 800Sstevel@tonic-gate * heap_core is for allocations that require 2GB *relative* 810Sstevel@tonic-gate * offsets; in other words all memory from heap_core is within 820Sstevel@tonic-gate * 2GB of all other memory from the same arena. This is a requirement 830Sstevel@tonic-gate * of the addressing modes of some processors in supervisor code. 840Sstevel@tonic-gate * 850Sstevel@tonic-gate * heap_arena is the general heap arena. 860Sstevel@tonic-gate * 870Sstevel@tonic-gate * static_arena is the static memory arena. Allocations from it 880Sstevel@tonic-gate * are not subject to relocation so it is safe to use the memory 890Sstevel@tonic-gate * physical address as well as the virtual address (e.g. the VA to 900Sstevel@tonic-gate * PA translations are static). Caches may import from static_arena; 910Sstevel@tonic-gate * all other static memory allocations should use static_alloc_arena. 920Sstevel@tonic-gate * 930Sstevel@tonic-gate * On some platforms which have limited virtual address space, seg_kmem 940Sstevel@tonic-gate * may share [kernelheap, ekernelheap) with seg_kp; if this is so, 950Sstevel@tonic-gate * segkp_bitmap is non-NULL, and each bit represents a page of virtual 960Sstevel@tonic-gate * address space which is actually seg_kp mapped. 970Sstevel@tonic-gate */ 980Sstevel@tonic-gate 990Sstevel@tonic-gate extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */ 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate char *kernelheap; /* start of primary kernel heap */ 1020Sstevel@tonic-gate char *ekernelheap; /* end of primary kernel heap */ 1030Sstevel@tonic-gate struct seg kvseg; /* primary kernel heap segment */ 1040Sstevel@tonic-gate struct seg kvseg_core; /* "core" kernel heap segment */ 1053290Sjohansen struct seg kzioseg; /* Segment for zio mappings */ 1060Sstevel@tonic-gate vmem_t *heap_arena; /* primary kernel heap arena */ 1070Sstevel@tonic-gate vmem_t *heap_core_arena; /* core kernel heap arena */ 1080Sstevel@tonic-gate char *heap_core_base; /* start of core kernel heap arena */ 1090Sstevel@tonic-gate char *heap_lp_base; /* start of kernel large page heap arena */ 1100Sstevel@tonic-gate char *heap_lp_end; /* end of kernel large page heap arena */ 1110Sstevel@tonic-gate vmem_t *hat_memload_arena; /* HAT translation data */ 1120Sstevel@tonic-gate struct seg kvseg32; /* 32-bit kernel heap segment */ 1130Sstevel@tonic-gate vmem_t *heap32_arena; /* 32-bit kernel heap arena */ 1140Sstevel@tonic-gate vmem_t *heaptext_arena; /* heaptext arena */ 1150Sstevel@tonic-gate struct as kas; /* kernel address space */ 1160Sstevel@tonic-gate struct vnode kvp; /* vnode for all segkmem pages */ 1173290Sjohansen struct vnode zvp; /* vnode for zfs pages */ 1180Sstevel@tonic-gate int segkmem_reloc; /* enable/disable relocatable segkmem pages */ 1190Sstevel@tonic-gate vmem_t *static_arena; /* arena for caches to import static memory */ 1200Sstevel@tonic-gate vmem_t *static_alloc_arena; /* arena for allocating static memory */ 1213290Sjohansen vmem_t *zio_arena = NULL; /* arena for allocating zio memory */ 1223290Sjohansen vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */ 1230Sstevel@tonic-gate 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * seg_kmem driver can map part of the kernel heap with large pages. 1260Sstevel@tonic-gate * Currently this functionality is implemented for sparc platforms only. 1270Sstevel@tonic-gate * 1280Sstevel@tonic-gate * The large page size "segkmem_lpsize" for kernel heap is selected in the 1290Sstevel@tonic-gate * platform specific code. It can also be modified via /etc/system file. 1300Sstevel@tonic-gate * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large 1310Sstevel@tonic-gate * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to 1320Sstevel@tonic-gate * match segkmem_lpsize. 1330Sstevel@tonic-gate * 1340Sstevel@tonic-gate * At boot time we carve from kernel heap arena a range of virtual addresses 1350Sstevel@tonic-gate * that will be used for large page mappings. This range [heap_lp_base, 1360Sstevel@tonic-gate * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also 1370Sstevel@tonic-gate * create "kmem_lp_arena" that caches memory already backed up by large 1380Sstevel@tonic-gate * pages. kmem_lp_arena imports virtual segments from heap_lp_arena. 1390Sstevel@tonic-gate */ 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate size_t segkmem_lpsize; 1420Sstevel@tonic-gate static uint_t segkmem_lpshift = PAGESHIFT; 1433351Saguzovsk int segkmem_lpszc = 0; 1440Sstevel@tonic-gate 1450Sstevel@tonic-gate size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */ 1460Sstevel@tonic-gate size_t segkmem_heaplp_quantum; 1475Seg155566 vmem_t *heap_lp_arena; 1480Sstevel@tonic-gate static vmem_t *kmem_lp_arena; 1490Sstevel@tonic-gate static vmem_t *segkmem_ppa_arena; 1500Sstevel@tonic-gate static segkmem_lpcb_t segkmem_lpcb; 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate /* 1530Sstevel@tonic-gate * We use "segkmem_kmemlp_max" to limit the total amount of physical memory 154215Seg155566 * consumed by the large page heap. By default this parameter is set to 1/8 of 1550Sstevel@tonic-gate * physmem but can be adjusted through /etc/system either directly or 1560Sstevel@tonic-gate * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem 1570Sstevel@tonic-gate * we allow for large page heap. 1580Sstevel@tonic-gate */ 1590Sstevel@tonic-gate size_t segkmem_kmemlp_max; 1600Sstevel@tonic-gate static uint_t segkmem_kmemlp_pcnt; 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate /* 1630Sstevel@tonic-gate * Getting large pages for kernel heap could be problematic due to 1640Sstevel@tonic-gate * physical memory fragmentation. That's why we allow to preallocate 1650Sstevel@tonic-gate * "segkmem_kmemlp_min" bytes at boot time. 1660Sstevel@tonic-gate */ 1670Sstevel@tonic-gate static size_t segkmem_kmemlp_min; 1680Sstevel@tonic-gate 1690Sstevel@tonic-gate /* 1700Sstevel@tonic-gate * Throttling is used to avoid expensive tries to allocate large pages 1710Sstevel@tonic-gate * for kernel heap when a lot of succesive attempts to do so fail. 1720Sstevel@tonic-gate */ 1730Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_max = 0x400000; 1740Sstevel@tonic-gate static ulong_t segkmem_lpthrottle_start = 0x40; 1750Sstevel@tonic-gate static ulong_t segkmem_use_lpthrottle = 1; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * Freed pages accumulate on a garbage list until segkmem is ready, 1790Sstevel@tonic-gate * at which point we call segkmem_gc() to free it all. 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate typedef struct segkmem_gc_list { 1820Sstevel@tonic-gate struct segkmem_gc_list *gc_next; 1830Sstevel@tonic-gate vmem_t *gc_arena; 1840Sstevel@tonic-gate size_t gc_size; 1850Sstevel@tonic-gate } segkmem_gc_list_t; 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate static segkmem_gc_list_t *segkmem_gc_list; 1880Sstevel@tonic-gate 1890Sstevel@tonic-gate /* 1900Sstevel@tonic-gate * Allocations from the hat_memload arena add VM_MEMLOAD to their 1910Sstevel@tonic-gate * vmflags so that segkmem_xalloc() can inform the hat layer that it needs 1920Sstevel@tonic-gate * to take steps to prevent infinite recursion. HAT allocations also 1930Sstevel@tonic-gate * must be non-relocatable to prevent recursive page faults. 1940Sstevel@tonic-gate */ 1950Sstevel@tonic-gate static void * 1960Sstevel@tonic-gate hat_memload_alloc(vmem_t *vmp, size_t size, int flags) 1970Sstevel@tonic-gate { 1980Sstevel@tonic-gate flags |= (VM_MEMLOAD | VM_NORELOC); 1990Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags)); 2000Sstevel@tonic-gate } 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate /* 2030Sstevel@tonic-gate * Allocations from static_arena arena (or any other arena that uses 2040Sstevel@tonic-gate * segkmem_alloc_permanent()) require non-relocatable (permanently 2050Sstevel@tonic-gate * wired) memory pages, since these pages are referenced by physical 2060Sstevel@tonic-gate * as well as virtual address. 2070Sstevel@tonic-gate */ 2080Sstevel@tonic-gate void * 2090Sstevel@tonic-gate segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags) 2100Sstevel@tonic-gate { 2110Sstevel@tonic-gate return (segkmem_alloc(vmp, size, flags | VM_NORELOC)); 2120Sstevel@tonic-gate } 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate /* 2150Sstevel@tonic-gate * Initialize kernel heap boundaries. 2160Sstevel@tonic-gate */ 2170Sstevel@tonic-gate void 2180Sstevel@tonic-gate kernelheap_init( 2190Sstevel@tonic-gate void *heap_start, 2200Sstevel@tonic-gate void *heap_end, 2210Sstevel@tonic-gate char *first_avail, 2220Sstevel@tonic-gate void *core_start, 2230Sstevel@tonic-gate void *core_end) 2240Sstevel@tonic-gate { 2250Sstevel@tonic-gate uintptr_t textbase; 2260Sstevel@tonic-gate size_t core_size; 2270Sstevel@tonic-gate size_t heap_size; 2280Sstevel@tonic-gate vmem_t *heaptext_parent; 2290Sstevel@tonic-gate size_t heap_lp_size = 0; 2303764Sdp78419 #ifdef __sparc 2313764Sdp78419 size_t kmem64_sz = kmem64_aligned_end - kmem64_base; 2323764Sdp78419 #endif /* __sparc */ 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate kernelheap = heap_start; 2350Sstevel@tonic-gate ekernelheap = heap_end; 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate #ifdef __sparc 2380Sstevel@tonic-gate heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4); 2393764Sdp78419 /* 2403764Sdp78419 * Bias heap_lp start address by kmem64_sz to reduce collisions 2413764Sdp78419 * in 4M kernel TSB between kmem64 area and heap_lp 2423764Sdp78419 */ 2433764Sdp78419 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M); 2443764Sdp78419 if (kmem64_sz <= heap_lp_size / 2) 2453764Sdp78419 heap_lp_size -= kmem64_sz; 2460Sstevel@tonic-gate heap_lp_base = ekernelheap - heap_lp_size; 2470Sstevel@tonic-gate heap_lp_end = heap_lp_base + heap_lp_size; 2480Sstevel@tonic-gate #endif /* __sparc */ 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate /* 2510Sstevel@tonic-gate * If this platform has a 'core' heap area, then the space for 2520Sstevel@tonic-gate * overflow module text should be carved out of the end of that 2530Sstevel@tonic-gate * heap. Otherwise, it gets carved out of the general purpose 2540Sstevel@tonic-gate * heap. 2550Sstevel@tonic-gate */ 2560Sstevel@tonic-gate core_size = (uintptr_t)core_end - (uintptr_t)core_start; 2570Sstevel@tonic-gate if (core_size > 0) { 2580Sstevel@tonic-gate ASSERT(core_size >= HEAPTEXT_SIZE); 2590Sstevel@tonic-gate textbase = (uintptr_t)core_end - HEAPTEXT_SIZE; 2600Sstevel@tonic-gate core_size -= HEAPTEXT_SIZE; 2610Sstevel@tonic-gate } 2620Sstevel@tonic-gate #ifndef __sparc 2630Sstevel@tonic-gate else { 2640Sstevel@tonic-gate ekernelheap -= HEAPTEXT_SIZE; 2650Sstevel@tonic-gate textbase = (uintptr_t)ekernelheap; 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate #endif 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap; 2700Sstevel@tonic-gate heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE, 2710Sstevel@tonic-gate segkmem_alloc, segkmem_free); 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate if (core_size > 0) { 2740Sstevel@tonic-gate heap_core_arena = vmem_create("heap_core", core_start, 2750Sstevel@tonic-gate core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP); 2760Sstevel@tonic-gate heap_core_base = core_start; 2770Sstevel@tonic-gate } else { 2780Sstevel@tonic-gate heap_core_arena = heap_arena; 2790Sstevel@tonic-gate heap_core_base = kernelheap; 2800Sstevel@tonic-gate } 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate /* 2830Sstevel@tonic-gate * reserve space for the large page heap. If large pages for kernel 2840Sstevel@tonic-gate * heap is enabled large page heap arean will be created later in the 2850Sstevel@tonic-gate * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated 2860Sstevel@tonic-gate * range will be returned back to the heap_arena. 2870Sstevel@tonic-gate */ 2880Sstevel@tonic-gate if (heap_lp_size) { 2890Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0, 2900Sstevel@tonic-gate heap_lp_base, heap_lp_end, 2910Sstevel@tonic-gate VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 2920Sstevel@tonic-gate } 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate /* 2950Sstevel@tonic-gate * Remove the already-spoken-for memory range [kernelheap, first_avail). 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE, 2980Sstevel@tonic-gate 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate #ifdef __sparc 3010Sstevel@tonic-gate heap32_arena = vmem_create("heap32", (void *)SYSBASE32, 3020Sstevel@tonic-gate SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL, 3030Sstevel@tonic-gate NULL, NULL, 0, VM_SLEEP); 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate textbase = SYSLIMIT32 - HEAPTEXT_SIZE; 3060Sstevel@tonic-gate heaptext_parent = NULL; 3070Sstevel@tonic-gate #else /* __sparc */ 3080Sstevel@tonic-gate heap32_arena = heap_core_arena; 3090Sstevel@tonic-gate heaptext_parent = heap_core_arena; 3100Sstevel@tonic-gate #endif /* __sparc */ 3110Sstevel@tonic-gate 3120Sstevel@tonic-gate heaptext_arena = vmem_create("heaptext", (void *)textbase, 3130Sstevel@tonic-gate HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP); 3140Sstevel@tonic-gate 3150Sstevel@tonic-gate /* 3160Sstevel@tonic-gate * Create a set of arenas for memory with static translations 3170Sstevel@tonic-gate * (e.g. VA -> PA translations cannot change). Since using 3180Sstevel@tonic-gate * kernel pages by physical address implies it isn't safe to 3190Sstevel@tonic-gate * walk across page boundaries, the static_arena quantum must 3200Sstevel@tonic-gate * be PAGESIZE. Any kmem caches that require static memory 3210Sstevel@tonic-gate * should source from static_arena, while direct allocations 3220Sstevel@tonic-gate * should only use static_alloc_arena. 3230Sstevel@tonic-gate */ 3240Sstevel@tonic-gate static_arena = vmem_create("static", NULL, 0, PAGESIZE, 3250Sstevel@tonic-gate segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 3260Sstevel@tonic-gate static_alloc_arena = vmem_create("static_alloc", NULL, 0, 3270Sstevel@tonic-gate sizeof (uint64_t), vmem_alloc, vmem_free, static_arena, 3280Sstevel@tonic-gate 0, VM_SLEEP); 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate /* 3310Sstevel@tonic-gate * Create an arena for translation data (ptes, hmes, or hblks). 3320Sstevel@tonic-gate * We need an arena for this because hat_memload() is essential 3330Sstevel@tonic-gate * to vmem_populate() (see comments in common/os/vmem.c). 3340Sstevel@tonic-gate * 3350Sstevel@tonic-gate * Note: any kmem cache that allocates from hat_memload_arena 3360Sstevel@tonic-gate * must be created as a KMC_NOHASH cache (i.e. no external slab 3370Sstevel@tonic-gate * and bufctl structures to allocate) so that slab creation doesn't 3380Sstevel@tonic-gate * require anything more than a single vmem_alloc(). 3390Sstevel@tonic-gate */ 3400Sstevel@tonic-gate hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE, 3410Sstevel@tonic-gate hat_memload_alloc, segkmem_free, heap_arena, 0, 3420Sstevel@tonic-gate VM_SLEEP | VMC_POPULATOR); 3430Sstevel@tonic-gate } 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate /* 3460Sstevel@tonic-gate * Grow kernel heap downward. 3470Sstevel@tonic-gate */ 3480Sstevel@tonic-gate void 3490Sstevel@tonic-gate kernelheap_extend(void *range_start, void *range_end) 3500Sstevel@tonic-gate { 3510Sstevel@tonic-gate size_t len = (uintptr_t)range_end - (uintptr_t)range_start; 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate ASSERT(range_start < range_end && range_end == kernelheap); 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate if (vmem_add(heap_arena, range_start, len, VM_NOSLEEP) == NULL) { 3560Sstevel@tonic-gate cmn_err(CE_WARN, "Could not grow kernel heap below 0x%p", 3570Sstevel@tonic-gate (void *)kernelheap); 3580Sstevel@tonic-gate } else { 3590Sstevel@tonic-gate kernelheap = range_start; 3600Sstevel@tonic-gate } 3610Sstevel@tonic-gate } 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate void 3640Sstevel@tonic-gate boot_mapin(caddr_t addr, size_t size) 3650Sstevel@tonic-gate { 3660Sstevel@tonic-gate caddr_t eaddr; 3670Sstevel@tonic-gate page_t *pp; 3680Sstevel@tonic-gate pfn_t pfnum; 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate if (page_resv(btop(size), KM_NOSLEEP) == 0) 3710Sstevel@tonic-gate panic("boot_mapin: page_resv failed"); 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 3740Sstevel@tonic-gate pfnum = va_to_pfn(addr); 3750Sstevel@tonic-gate if ((pp = page_numtopp_nolock(pfnum)) == NULL) 3760Sstevel@tonic-gate panic("boot_mapin(): No pp for pfnum = %lx", pfnum); 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate /* 3790Sstevel@tonic-gate * must break up any large pages that may have constituent 3800Sstevel@tonic-gate * pages being utilized for BOP_ALLOC()'s before calling 3810Sstevel@tonic-gate * page_numtopp().The locking code (ie. page_reclaim()) 3820Sstevel@tonic-gate * can't handle them 3830Sstevel@tonic-gate */ 3840Sstevel@tonic-gate if (pp->p_szc != 0) 3850Sstevel@tonic-gate page_boot_demote(pp); 3860Sstevel@tonic-gate 3870Sstevel@tonic-gate pp = page_numtopp(pfnum, SE_EXCL); 3880Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp)) 3890Sstevel@tonic-gate panic("boot_alloc: pp is NULL or free"); 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate /* 3920Sstevel@tonic-gate * If the cage is on but doesn't yet contain this page, 3930Sstevel@tonic-gate * mark it as non-relocatable. 3940Sstevel@tonic-gate */ 3950Sstevel@tonic-gate if (kcage_on && !PP_ISNORELOC(pp)) 3960Sstevel@tonic-gate PP_SETNORELOC(pp); 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL); 3990Sstevel@tonic-gate pp->p_lckcnt = 1; 4000Sstevel@tonic-gate #if defined(__x86) 4010Sstevel@tonic-gate page_downgrade(pp); 4020Sstevel@tonic-gate #else 4030Sstevel@tonic-gate page_unlock(pp); 4040Sstevel@tonic-gate #endif 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate } 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate /* 4090Sstevel@tonic-gate * Get pages from boot and hash them into the kernel's vp. 4100Sstevel@tonic-gate * Used after page structs have been allocated, but before segkmem is ready. 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate void * 4130Sstevel@tonic-gate boot_alloc(void *inaddr, size_t size, uint_t align) 4140Sstevel@tonic-gate { 4150Sstevel@tonic-gate caddr_t addr = inaddr; 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate if (bootops == NULL) 4180Sstevel@tonic-gate prom_panic("boot_alloc: attempt to allocate memory after " 4190Sstevel@tonic-gate "BOP_GONE"); 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate size = ptob(btopr(size)); 4220Sstevel@tonic-gate if (BOP_ALLOC(bootops, addr, size, align) != addr) 4230Sstevel@tonic-gate panic("boot_alloc: BOP_ALLOC failed"); 4240Sstevel@tonic-gate boot_mapin((caddr_t)addr, size); 4250Sstevel@tonic-gate return (addr); 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate static void 4290Sstevel@tonic-gate segkmem_badop() 4300Sstevel@tonic-gate { 4310Sstevel@tonic-gate panic("segkmem_badop"); 4320Sstevel@tonic-gate } 4330Sstevel@tonic-gate 4340Sstevel@tonic-gate #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate /*ARGSUSED*/ 4370Sstevel@tonic-gate static faultcode_t 4380Sstevel@tonic-gate segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size, 4390Sstevel@tonic-gate enum fault_type type, enum seg_rw rw) 4400Sstevel@tonic-gate { 4411338Selowe pgcnt_t npages; 4421338Selowe spgcnt_t pg; 4431338Selowe page_t *pp; 4443290Sjohansen struct vnode *vp = seg->s_data; 4451338Selowe 4460Sstevel@tonic-gate ASSERT(RW_READ_HELD(&seg->s_as->a_lock)); 4470Sstevel@tonic-gate 4480Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size || 4490Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 4500Sstevel@tonic-gate panic("segkmem_fault: bad args"); 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 4530Sstevel@tonic-gate /* 4540Sstevel@tonic-gate * If it is one of segkp pages, call segkp_fault. 4550Sstevel@tonic-gate */ 4560Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 4570Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 4580Sstevel@tonic-gate return (SEGOP_FAULT(hat, segkp, addr, size, type, rw)); 4590Sstevel@tonic-gate } 4600Sstevel@tonic-gate 4611338Selowe if (rw != S_READ && rw != S_WRITE && rw != S_OTHER) 4621338Selowe return (FC_NOSUPPORT); 4631338Selowe 4641338Selowe npages = btopr(size); 4651338Selowe 4660Sstevel@tonic-gate switch (type) { 4670Sstevel@tonic-gate case F_SOFTLOCK: /* lock down already-loaded translations */ 4681338Selowe for (pg = 0; pg < npages; pg++) { 4693290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 4701338Selowe SE_SHARED); 4711338Selowe if (pp == NULL) { 4721338Selowe /* 4731338Selowe * Hmm, no page. Does a kernel mapping 4741338Selowe * exist for it? 4751338Selowe */ 4761338Selowe if (!hat_probe(kas.a_hat, addr)) { 4771338Selowe addr -= PAGESIZE; 4781338Selowe while (--pg >= 0) { 4793290Sjohansen pp = page_find(vp, 4801338Selowe (u_offset_t)(uintptr_t)addr); 4811338Selowe if (pp) 4821338Selowe page_unlock(pp); 4831338Selowe addr -= PAGESIZE; 4841338Selowe } 4851338Selowe return (FC_NOMAP); 4861338Selowe } 4871338Selowe } 4881338Selowe addr += PAGESIZE; 4891338Selowe } 4901338Selowe if (rw == S_OTHER) 4910Sstevel@tonic-gate hat_reserve(seg->s_as, addr, size); 4921338Selowe return (0); 4930Sstevel@tonic-gate case F_SOFTUNLOCK: 4941338Selowe while (npages--) { 4953290Sjohansen pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 4961338Selowe if (pp) 4971338Selowe page_unlock(pp); 4981338Selowe addr += PAGESIZE; 4991338Selowe } 5001338Selowe return (0); 5010Sstevel@tonic-gate default: 5021338Selowe return (FC_NOSUPPORT); 5030Sstevel@tonic-gate } 5041338Selowe /*NOTREACHED*/ 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate static int 5080Sstevel@tonic-gate segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 5090Sstevel@tonic-gate { 5100Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate if (seg->s_as != &kas || size > seg->s_size || 5130Sstevel@tonic-gate addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 5140Sstevel@tonic-gate panic("segkmem_setprot: bad args"); 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate /* 5190Sstevel@tonic-gate * If it is one of segkp pages, call segkp. 5200Sstevel@tonic-gate */ 5210Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 5220Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 5230Sstevel@tonic-gate return (SEGOP_SETPROT(segkp, addr, size, prot)); 5240Sstevel@tonic-gate } 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate if (prot == 0) 5270Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD); 5280Sstevel@tonic-gate else 5290Sstevel@tonic-gate hat_chgprot(kas.a_hat, addr, size, prot); 5300Sstevel@tonic-gate return (0); 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate /* 5340Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 5350Sstevel@tonic-gate * when segkp is under the heap. 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate /* ARGSUSED */ 5380Sstevel@tonic-gate static int 5390Sstevel@tonic-gate segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 5400Sstevel@tonic-gate { 5410Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 5420Sstevel@tonic-gate 5430Sstevel@tonic-gate if (seg->s_as != &kas) 5440Sstevel@tonic-gate segkmem_badop(); 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 5470Sstevel@tonic-gate 5480Sstevel@tonic-gate /* 5490Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 5520Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 5530Sstevel@tonic-gate return (SEGOP_CHECKPROT(segkp, addr, size, prot)); 5540Sstevel@tonic-gate } 5550Sstevel@tonic-gate segkmem_badop(); 5560Sstevel@tonic-gate return (0); 5570Sstevel@tonic-gate } 5580Sstevel@tonic-gate 5590Sstevel@tonic-gate /* 5600Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 5610Sstevel@tonic-gate * when segkp is under the heap. 5620Sstevel@tonic-gate */ 5630Sstevel@tonic-gate /* ARGSUSED */ 5640Sstevel@tonic-gate static int 5650Sstevel@tonic-gate segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 5660Sstevel@tonic-gate { 5670Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate if (seg->s_as != &kas) 5700Sstevel@tonic-gate segkmem_badop(); 5710Sstevel@tonic-gate 5720Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* 5750Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 5760Sstevel@tonic-gate */ 5770Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 5780Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 5790Sstevel@tonic-gate return (SEGOP_KLUSTER(segkp, addr, delta)); 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate segkmem_badop(); 5820Sstevel@tonic-gate return (0); 5830Sstevel@tonic-gate } 5840Sstevel@tonic-gate 5850Sstevel@tonic-gate static void 5860Sstevel@tonic-gate segkmem_xdump_range(void *arg, void *start, size_t size) 5870Sstevel@tonic-gate { 5880Sstevel@tonic-gate struct as *as = arg; 5890Sstevel@tonic-gate caddr_t addr = start; 5900Sstevel@tonic-gate caddr_t addr_end = addr + size; 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate while (addr < addr_end) { 5930Sstevel@tonic-gate pfn_t pfn = hat_getpfnum(kas.a_hat, addr); 5940Sstevel@tonic-gate if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn)) 5950Sstevel@tonic-gate dump_addpage(as, addr, pfn); 5960Sstevel@tonic-gate addr += PAGESIZE; 5970Sstevel@tonic-gate dump_timeleft = dump_timeout; 5980Sstevel@tonic-gate } 5990Sstevel@tonic-gate } 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate static void 6020Sstevel@tonic-gate segkmem_dump_range(void *arg, void *start, size_t size) 6030Sstevel@tonic-gate { 6040Sstevel@tonic-gate caddr_t addr = start; 6050Sstevel@tonic-gate caddr_t addr_end = addr + size; 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate /* 6080Sstevel@tonic-gate * If we are about to start dumping the range of addresses we 6090Sstevel@tonic-gate * carved out of the kernel heap for the large page heap walk 6100Sstevel@tonic-gate * heap_lp_arena to find what segments are actually populated 6110Sstevel@tonic-gate */ 6120Sstevel@tonic-gate if (SEGKMEM_USE_LARGEPAGES && 6130Sstevel@tonic-gate addr == heap_lp_base && addr_end == heap_lp_end && 6140Sstevel@tonic-gate vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 6150Sstevel@tonic-gate vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT, 6160Sstevel@tonic-gate segkmem_xdump_range, arg); 6170Sstevel@tonic-gate } else { 6180Sstevel@tonic-gate segkmem_xdump_range(arg, start, size); 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate static void 6230Sstevel@tonic-gate segkmem_dump(struct seg *seg) 6240Sstevel@tonic-gate { 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * The kernel's heap_arena (represented by kvseg) is a very large 6270Sstevel@tonic-gate * VA space, most of which is typically unused. To speed up dumping 6280Sstevel@tonic-gate * we use vmem_walk() to quickly find the pieces of heap_arena that 6290Sstevel@tonic-gate * are actually in use. We do the same for heap32_arena and 6300Sstevel@tonic-gate * heap_core. 6310Sstevel@tonic-gate * 6320Sstevel@tonic-gate * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage() 6330Sstevel@tonic-gate * may ultimately need to allocate memory. Reentrant walks are 6340Sstevel@tonic-gate * necessarily imperfect snapshots. The kernel heap continues 6350Sstevel@tonic-gate * to change during a live crash dump, for example. For a normal 6360Sstevel@tonic-gate * crash dump, however, we know that there won't be any other threads 6370Sstevel@tonic-gate * messing with the heap. Therefore, at worst, we may fail to dump 6380Sstevel@tonic-gate * the pages that get allocated by the act of dumping; but we will 6390Sstevel@tonic-gate * always dump every page that was allocated when the walk began. 6400Sstevel@tonic-gate * 6410Sstevel@tonic-gate * The other segkmem segments are dense (fully populated), so there's 6420Sstevel@tonic-gate * no need to use this technique when dumping them. 6430Sstevel@tonic-gate * 6440Sstevel@tonic-gate * Note: when adding special dump handling for any new sparsely- 6450Sstevel@tonic-gate * populated segments, be sure to add similar handling to the ::kgrep 6460Sstevel@tonic-gate * code in mdb. 6470Sstevel@tonic-gate */ 6480Sstevel@tonic-gate if (seg == &kvseg) { 6490Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT, 6500Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6510Sstevel@tonic-gate #ifndef __sparc 6520Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 6530Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6540Sstevel@tonic-gate #endif 6550Sstevel@tonic-gate } else if (seg == &kvseg_core) { 6560Sstevel@tonic-gate vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT, 6570Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6580Sstevel@tonic-gate } else if (seg == &kvseg32) { 6590Sstevel@tonic-gate vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT, 6600Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6610Sstevel@tonic-gate vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 6620Sstevel@tonic-gate segkmem_dump_range, seg->s_as); 6633290Sjohansen } else if (seg == &kzioseg) { 6643290Sjohansen /* 6653290Sjohansen * We don't want to dump pages attached to kzioseg since they 6663290Sjohansen * contain file data from ZFS. If this page's segment is 6673290Sjohansen * kzioseg return instead of writing it to the dump device. 6683290Sjohansen */ 6693290Sjohansen return; 6700Sstevel@tonic-gate } else { 6710Sstevel@tonic-gate segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size); 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate } 6740Sstevel@tonic-gate 6750Sstevel@tonic-gate /* 6760Sstevel@tonic-gate * lock/unlock kmem pages over a given range [addr, addr+len). 6771338Selowe * Returns a shadow list of pages in ppp. If there are holes 6781338Selowe * in the range (e.g. some of the kernel mappings do not have 6791338Selowe * underlying page_ts) returns ENOTSUP so that as_pagelock() 6801338Selowe * will handle the range via as_fault(F_SOFTLOCK). 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate /*ARGSUSED*/ 6830Sstevel@tonic-gate static int 6840Sstevel@tonic-gate segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len, 6850Sstevel@tonic-gate page_t ***ppp, enum lock_type type, enum seg_rw rw) 6860Sstevel@tonic-gate { 6870Sstevel@tonic-gate page_t **pplist, *pp; 6880Sstevel@tonic-gate pgcnt_t npages; 6891338Selowe spgcnt_t pg; 6900Sstevel@tonic-gate size_t nb; 6913290Sjohansen struct vnode *vp = seg->s_data; 6920Sstevel@tonic-gate 6931338Selowe ASSERT(ppp != NULL); 6941338Selowe 6950Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 6960Sstevel@tonic-gate /* 6970Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 6980Sstevel@tonic-gate */ 6990Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 7000Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 7010Sstevel@tonic-gate return (SEGOP_PAGELOCK(segkp, addr, len, ppp, 7020Sstevel@tonic-gate type, rw)); 7030Sstevel@tonic-gate } 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate if (type == L_PAGERECLAIM) 7060Sstevel@tonic-gate return (ENOTSUP); 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate npages = btopr(len); 7090Sstevel@tonic-gate nb = sizeof (page_t *) * npages; 7100Sstevel@tonic-gate 7110Sstevel@tonic-gate if (type == L_PAGEUNLOCK) { 7121338Selowe pplist = *ppp; 7131338Selowe ASSERT(pplist != NULL); 7141338Selowe 7151338Selowe for (pg = 0; pg < npages; pg++) { 7161338Selowe pp = pplist[pg]; 7171338Selowe page_unlock(pp); 7180Sstevel@tonic-gate } 7191338Selowe kmem_free(pplist, nb); 7200Sstevel@tonic-gate return (0); 7210Sstevel@tonic-gate } 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate ASSERT(type == L_PAGELOCK); 7240Sstevel@tonic-gate 7251338Selowe pplist = kmem_alloc(nb, KM_NOSLEEP); 7261338Selowe if (pplist == NULL) { 7271338Selowe *ppp = NULL; 7281338Selowe return (ENOTSUP); /* take the slow path */ 7291338Selowe } 7300Sstevel@tonic-gate 7311338Selowe for (pg = 0; pg < npages; pg++) { 7323290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED); 7331338Selowe if (pp == NULL) { 7341338Selowe while (--pg >= 0) 7351338Selowe page_unlock(pplist[pg]); 7361338Selowe kmem_free(pplist, nb); 7371338Selowe *ppp = NULL; 7381338Selowe return (ENOTSUP); 7391338Selowe } 7401338Selowe pplist[pg] = pp; 7410Sstevel@tonic-gate addr += PAGESIZE; 7420Sstevel@tonic-gate } 7431338Selowe 7441338Selowe *ppp = pplist; 7450Sstevel@tonic-gate return (0); 7460Sstevel@tonic-gate } 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate /* 7490Sstevel@tonic-gate * This is a dummy segkmem function overloaded to call segkp 7500Sstevel@tonic-gate * when segkp is under the heap. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate /* ARGSUSED */ 7530Sstevel@tonic-gate static int 7540Sstevel@tonic-gate segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 7550Sstevel@tonic-gate { 7560Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate if (seg->s_as != &kas) 7590Sstevel@tonic-gate segkmem_badop(); 7600Sstevel@tonic-gate 7610Sstevel@tonic-gate if (segkp_bitmap && seg == &kvseg) { 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate /* 7640Sstevel@tonic-gate * If it is one of segkp pages, call into segkp. 7650Sstevel@tonic-gate */ 7660Sstevel@tonic-gate if (BT_TEST(segkp_bitmap, 7670Sstevel@tonic-gate btop((uintptr_t)(addr - seg->s_base)))) 7680Sstevel@tonic-gate return (SEGOP_GETMEMID(segkp, addr, memidp)); 7690Sstevel@tonic-gate } 7700Sstevel@tonic-gate segkmem_badop(); 7710Sstevel@tonic-gate return (0); 7720Sstevel@tonic-gate } 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate /*ARGSUSED*/ 7750Sstevel@tonic-gate static lgrp_mem_policy_info_t * 7760Sstevel@tonic-gate segkmem_getpolicy(struct seg *seg, caddr_t addr) 7770Sstevel@tonic-gate { 7780Sstevel@tonic-gate return (NULL); 7790Sstevel@tonic-gate } 7800Sstevel@tonic-gate 781670Selowe /*ARGSUSED*/ 782670Selowe static int 783670Selowe segkmem_capable(struct seg *seg, segcapability_t capability) 784670Selowe { 785670Selowe if (capability == S_CAPABILITY_NOMINFLT) 786670Selowe return (1); 787670Selowe return (0); 788670Selowe } 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate static struct seg_ops segkmem_ops = { 7910Sstevel@tonic-gate SEGKMEM_BADOP(int), /* dup */ 7920Sstevel@tonic-gate SEGKMEM_BADOP(int), /* unmap */ 7930Sstevel@tonic-gate SEGKMEM_BADOP(void), /* free */ 7940Sstevel@tonic-gate segkmem_fault, 7950Sstevel@tonic-gate SEGKMEM_BADOP(faultcode_t), /* faulta */ 7960Sstevel@tonic-gate segkmem_setprot, 7970Sstevel@tonic-gate segkmem_checkprot, 7980Sstevel@tonic-gate segkmem_kluster, 7990Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* swapout */ 8000Sstevel@tonic-gate SEGKMEM_BADOP(int), /* sync */ 8010Sstevel@tonic-gate SEGKMEM_BADOP(size_t), /* incore */ 8020Sstevel@tonic-gate SEGKMEM_BADOP(int), /* lockop */ 8030Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getprot */ 8040Sstevel@tonic-gate SEGKMEM_BADOP(u_offset_t), /* getoffset */ 8050Sstevel@tonic-gate SEGKMEM_BADOP(int), /* gettype */ 8060Sstevel@tonic-gate SEGKMEM_BADOP(int), /* getvp */ 8070Sstevel@tonic-gate SEGKMEM_BADOP(int), /* advise */ 8080Sstevel@tonic-gate segkmem_dump, 8090Sstevel@tonic-gate segkmem_pagelock, 8100Sstevel@tonic-gate SEGKMEM_BADOP(int), /* setpgsz */ 8110Sstevel@tonic-gate segkmem_getmemid, 8120Sstevel@tonic-gate segkmem_getpolicy, /* getpolicy */ 813670Selowe segkmem_capable, /* capable */ 8140Sstevel@tonic-gate }; 8150Sstevel@tonic-gate 8160Sstevel@tonic-gate int 8173290Sjohansen segkmem_zio_create(struct seg *seg) 8183290Sjohansen { 8193290Sjohansen ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 8203290Sjohansen seg->s_ops = &segkmem_ops; 8213290Sjohansen seg->s_data = &zvp; 8223290Sjohansen kas.a_size += seg->s_size; 8233290Sjohansen return (0); 8243290Sjohansen } 8253290Sjohansen 8263290Sjohansen int 8270Sstevel@tonic-gate segkmem_create(struct seg *seg) 8280Sstevel@tonic-gate { 8290Sstevel@tonic-gate ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 8300Sstevel@tonic-gate seg->s_ops = &segkmem_ops; 8313290Sjohansen seg->s_data = &kvp; 8320Sstevel@tonic-gate kas.a_size += seg->s_size; 8330Sstevel@tonic-gate return (0); 8340Sstevel@tonic-gate } 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate /*ARGSUSED*/ 8370Sstevel@tonic-gate page_t * 8380Sstevel@tonic-gate segkmem_page_create(void *addr, size_t size, int vmflag, void *arg) 8390Sstevel@tonic-gate { 8400Sstevel@tonic-gate struct seg kseg; 8410Sstevel@tonic-gate int pgflags; 8423290Sjohansen struct vnode *vp = arg; 8433290Sjohansen 8443290Sjohansen if (vp == NULL) 8453290Sjohansen vp = &kvp; 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate kseg.s_as = &kas; 8480Sstevel@tonic-gate pgflags = PG_EXCL; 8490Sstevel@tonic-gate 8500Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 8510Sstevel@tonic-gate pgflags |= PG_NORELOC; 8520Sstevel@tonic-gate if ((vmflag & VM_NOSLEEP) == 0) 8530Sstevel@tonic-gate pgflags |= PG_WAIT; 8540Sstevel@tonic-gate if (vmflag & VM_PANIC) 8550Sstevel@tonic-gate pgflags |= PG_PANIC; 8560Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE) 8570Sstevel@tonic-gate pgflags |= PG_PUSHPAGE; 8580Sstevel@tonic-gate 8593290Sjohansen return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size, 8600Sstevel@tonic-gate pgflags, &kseg, addr)); 8610Sstevel@tonic-gate } 8620Sstevel@tonic-gate 8630Sstevel@tonic-gate /* 8640Sstevel@tonic-gate * Allocate pages to back the virtual address range [addr, addr + size). 8650Sstevel@tonic-gate * If addr is NULL, allocate the virtual address space as well. 8660Sstevel@tonic-gate */ 8670Sstevel@tonic-gate void * 8680Sstevel@tonic-gate segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr, 8690Sstevel@tonic-gate page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg) 8700Sstevel@tonic-gate { 8710Sstevel@tonic-gate page_t *ppl; 8720Sstevel@tonic-gate caddr_t addr = inaddr; 8730Sstevel@tonic-gate pgcnt_t npages = btopr(size); 8740Sstevel@tonic-gate int allocflag; 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 8770Sstevel@tonic-gate return (NULL); 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 8820Sstevel@tonic-gate if (inaddr == NULL) 8830Sstevel@tonic-gate vmem_free(vmp, addr, size); 8840Sstevel@tonic-gate return (NULL); 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate ppl = page_create_func(addr, size, vmflag, pcarg); 8880Sstevel@tonic-gate if (ppl == NULL) { 8890Sstevel@tonic-gate if (inaddr == NULL) 8900Sstevel@tonic-gate vmem_free(vmp, addr, size); 8910Sstevel@tonic-gate page_unresv(npages); 8920Sstevel@tonic-gate return (NULL); 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate /* 8960Sstevel@tonic-gate * Under certain conditions, we need to let the HAT layer know 8970Sstevel@tonic-gate * that it cannot safely allocate memory. Allocations from 8980Sstevel@tonic-gate * the hat_memload vmem arena always need this, to prevent 8990Sstevel@tonic-gate * infinite recursion. 9000Sstevel@tonic-gate * 9010Sstevel@tonic-gate * In addition, the x86 hat cannot safely do memory 9020Sstevel@tonic-gate * allocations while in vmem_populate(), because there 9030Sstevel@tonic-gate * is no simple bound on its usage. 9040Sstevel@tonic-gate */ 9050Sstevel@tonic-gate if (vmflag & VM_MEMLOAD) 9060Sstevel@tonic-gate allocflag = HAT_NO_KALLOC; 9070Sstevel@tonic-gate #if defined(__x86) 9080Sstevel@tonic-gate else if (vmem_is_populator()) 9090Sstevel@tonic-gate allocflag = HAT_NO_KALLOC; 9100Sstevel@tonic-gate #endif 9110Sstevel@tonic-gate else 9120Sstevel@tonic-gate allocflag = 0; 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate while (ppl != NULL) { 9150Sstevel@tonic-gate page_t *pp = ppl; 9160Sstevel@tonic-gate page_sub(&ppl, pp); 9170Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 9180Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 9190Sstevel@tonic-gate page_io_unlock(pp); 9200Sstevel@tonic-gate hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp, 9210Sstevel@tonic-gate (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 9220Sstevel@tonic-gate HAT_LOAD_LOCK | allocflag); 9230Sstevel@tonic-gate pp->p_lckcnt = 1; 9240Sstevel@tonic-gate #if defined(__x86) 9250Sstevel@tonic-gate page_downgrade(pp); 9260Sstevel@tonic-gate #else 9270Sstevel@tonic-gate if (vmflag & SEGKMEM_SHARELOCKED) 9280Sstevel@tonic-gate page_downgrade(pp); 9290Sstevel@tonic-gate else 9300Sstevel@tonic-gate page_unlock(pp); 9310Sstevel@tonic-gate #endif 9320Sstevel@tonic-gate } 9330Sstevel@tonic-gate 9340Sstevel@tonic-gate return (addr); 9350Sstevel@tonic-gate } 9360Sstevel@tonic-gate 9373290Sjohansen static void * 9383290Sjohansen segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp) 9390Sstevel@tonic-gate { 9400Sstevel@tonic-gate void *addr; 9410Sstevel@tonic-gate segkmem_gc_list_t *gcp, **prev_gcpp; 9420Sstevel@tonic-gate 9433290Sjohansen ASSERT(vp != NULL); 9443290Sjohansen 9450Sstevel@tonic-gate if (kvseg.s_base == NULL) { 9460Sstevel@tonic-gate #ifndef __sparc 9470Sstevel@tonic-gate if (bootops->bsys_alloc == NULL) 9480Sstevel@tonic-gate halt("Memory allocation between bop_alloc() and " 9490Sstevel@tonic-gate "kmem_alloc().\n"); 9500Sstevel@tonic-gate #endif 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate /* 9530Sstevel@tonic-gate * There's not a lot of memory to go around during boot, 9540Sstevel@tonic-gate * so recycle it if we can. 9550Sstevel@tonic-gate */ 9560Sstevel@tonic-gate for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL; 9570Sstevel@tonic-gate prev_gcpp = &gcp->gc_next) { 9580Sstevel@tonic-gate if (gcp->gc_arena == vmp && gcp->gc_size == size) { 9590Sstevel@tonic-gate *prev_gcpp = gcp->gc_next; 9600Sstevel@tonic-gate return (gcp); 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate } 9630Sstevel@tonic-gate 9640Sstevel@tonic-gate addr = vmem_alloc(vmp, size, vmflag | VM_PANIC); 9650Sstevel@tonic-gate if (boot_alloc(addr, size, BO_NO_ALIGN) != addr) 9660Sstevel@tonic-gate panic("segkmem_alloc: boot_alloc failed"); 9670Sstevel@tonic-gate return (addr); 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate return (segkmem_xalloc(vmp, NULL, size, vmflag, 0, 9703290Sjohansen segkmem_page_create, vp)); 9713290Sjohansen } 9723290Sjohansen 9733290Sjohansen void * 9743290Sjohansen segkmem_alloc(vmem_t *vmp, size_t size, int vmflag) 9753290Sjohansen { 9763290Sjohansen return (segkmem_alloc_vn(vmp, size, vmflag, &kvp)); 9773290Sjohansen } 9783290Sjohansen 9793290Sjohansen void * 9803290Sjohansen segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag) 9813290Sjohansen { 9823290Sjohansen return (segkmem_alloc_vn(vmp, size, vmflag, &zvp)); 9830Sstevel@tonic-gate } 9840Sstevel@tonic-gate 9850Sstevel@tonic-gate /* 9860Sstevel@tonic-gate * Any changes to this routine must also be carried over to 9870Sstevel@tonic-gate * devmap_free_pages() in the seg_dev driver. This is because 9880Sstevel@tonic-gate * we currently don't have a special kernel segment for non-paged 9890Sstevel@tonic-gate * kernel memory that is exported by drivers to user space. 9900Sstevel@tonic-gate */ 9913290Sjohansen static void 9923290Sjohansen segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp) 9930Sstevel@tonic-gate { 9940Sstevel@tonic-gate page_t *pp; 9950Sstevel@tonic-gate caddr_t addr = inaddr; 9960Sstevel@tonic-gate caddr_t eaddr; 9970Sstevel@tonic-gate pgcnt_t npages = btopr(size); 9980Sstevel@tonic-gate 9990Sstevel@tonic-gate ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 10003290Sjohansen ASSERT(vp != NULL); 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate if (kvseg.s_base == NULL) { 10030Sstevel@tonic-gate segkmem_gc_list_t *gc = inaddr; 10040Sstevel@tonic-gate gc->gc_arena = vmp; 10050Sstevel@tonic-gate gc->gc_size = size; 10060Sstevel@tonic-gate gc->gc_next = segkmem_gc_list; 10070Sstevel@tonic-gate segkmem_gc_list = gc; 10080Sstevel@tonic-gate return; 10090Sstevel@tonic-gate } 10100Sstevel@tonic-gate 10110Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 10120Sstevel@tonic-gate 10130Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 10140Sstevel@tonic-gate #if defined(__x86) 10153290Sjohansen pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 10160Sstevel@tonic-gate if (pp == NULL) 10170Sstevel@tonic-gate panic("segkmem_free: page not found"); 10180Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 10190Sstevel@tonic-gate /* 10200Sstevel@tonic-gate * Some other thread has a sharelock. Wait for 10210Sstevel@tonic-gate * it to drop the lock so we can free this page. 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate page_unlock(pp); 10243290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 10250Sstevel@tonic-gate SE_EXCL); 10260Sstevel@tonic-gate } 10270Sstevel@tonic-gate #else 10283290Sjohansen pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 10290Sstevel@tonic-gate #endif 10300Sstevel@tonic-gate if (pp == NULL) 10310Sstevel@tonic-gate panic("segkmem_free: page not found"); 10320Sstevel@tonic-gate /* Clear p_lckcnt so page_destroy() doesn't update availrmem */ 10330Sstevel@tonic-gate pp->p_lckcnt = 0; 10340Sstevel@tonic-gate page_destroy(pp, 0); 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate page_unresv(npages); 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate if (vmp != NULL) 10390Sstevel@tonic-gate vmem_free(vmp, inaddr, size); 10403290Sjohansen 10413290Sjohansen } 10423290Sjohansen 10433290Sjohansen void 10443290Sjohansen segkmem_free(vmem_t *vmp, void *inaddr, size_t size) 10453290Sjohansen { 10463290Sjohansen segkmem_free_vn(vmp, inaddr, size, &kvp); 10473290Sjohansen } 10483290Sjohansen 10493290Sjohansen void 10503290Sjohansen segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size) 10513290Sjohansen { 10523290Sjohansen segkmem_free_vn(vmp, inaddr, size, &zvp); 10530Sstevel@tonic-gate } 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate void 10560Sstevel@tonic-gate segkmem_gc(void) 10570Sstevel@tonic-gate { 10580Sstevel@tonic-gate ASSERT(kvseg.s_base != NULL); 10590Sstevel@tonic-gate while (segkmem_gc_list != NULL) { 10600Sstevel@tonic-gate segkmem_gc_list_t *gc = segkmem_gc_list; 10610Sstevel@tonic-gate segkmem_gc_list = gc->gc_next; 10620Sstevel@tonic-gate segkmem_free(gc->gc_arena, gc, gc->gc_size); 10630Sstevel@tonic-gate } 10640Sstevel@tonic-gate } 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate /* 10670Sstevel@tonic-gate * Legacy entry points from here to end of file. 10680Sstevel@tonic-gate */ 10690Sstevel@tonic-gate void 10700Sstevel@tonic-gate segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot, 10710Sstevel@tonic-gate pfn_t pfn, uint_t flags) 10720Sstevel@tonic-gate { 10730Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 10740Sstevel@tonic-gate hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot, 10750Sstevel@tonic-gate flags | HAT_LOAD_LOCK); 10760Sstevel@tonic-gate } 10770Sstevel@tonic-gate 10780Sstevel@tonic-gate void 10790Sstevel@tonic-gate segkmem_mapout(struct seg *seg, void *addr, size_t size) 10800Sstevel@tonic-gate { 10810Sstevel@tonic-gate hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate void * 10850Sstevel@tonic-gate kmem_getpages(pgcnt_t npages, int kmflag) 10860Sstevel@tonic-gate { 10870Sstevel@tonic-gate return (kmem_alloc(ptob(npages), kmflag)); 10880Sstevel@tonic-gate } 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate void 10910Sstevel@tonic-gate kmem_freepages(void *addr, pgcnt_t npages) 10920Sstevel@tonic-gate { 10930Sstevel@tonic-gate kmem_free(addr, ptob(npages)); 10940Sstevel@tonic-gate } 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate /* 10970Sstevel@tonic-gate * segkmem_page_create_large() allocates a large page to be used for the kmem 10980Sstevel@tonic-gate * caches. If kpr is enabled we ask for a relocatable page unless requested 10990Sstevel@tonic-gate * otherwise. If kpr is disabled we have to ask for a non-reloc page 11000Sstevel@tonic-gate */ 11010Sstevel@tonic-gate static page_t * 11020Sstevel@tonic-gate segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg) 11030Sstevel@tonic-gate { 11040Sstevel@tonic-gate int pgflags; 11050Sstevel@tonic-gate 11060Sstevel@tonic-gate pgflags = PG_EXCL; 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 11090Sstevel@tonic-gate pgflags |= PG_NORELOC; 11100Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP)) 11110Sstevel@tonic-gate pgflags |= PG_WAIT; 11120Sstevel@tonic-gate if (vmflag & VM_PUSHPAGE) 11130Sstevel@tonic-gate pgflags |= PG_PUSHPAGE; 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 11160Sstevel@tonic-gate pgflags, &kvseg, addr, arg)); 11170Sstevel@tonic-gate } 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate /* 11200Sstevel@tonic-gate * Allocate a large page to back the virtual address range 11210Sstevel@tonic-gate * [addr, addr + size). If addr is NULL, allocate the virtual address 11220Sstevel@tonic-gate * space as well. 11230Sstevel@tonic-gate */ 11240Sstevel@tonic-gate static void * 11250Sstevel@tonic-gate segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag, 11260Sstevel@tonic-gate uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *), 11270Sstevel@tonic-gate void *pcarg) 11280Sstevel@tonic-gate { 11290Sstevel@tonic-gate caddr_t addr = inaddr, pa; 11300Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 11310Sstevel@tonic-gate pgcnt_t npages = btopr(size); 11320Sstevel@tonic-gate pgcnt_t nbpages = btop(lpsize); 11330Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift; 11340Sstevel@tonic-gate size_t ppasize = nbpages * sizeof (page_t *); 11350Sstevel@tonic-gate page_t *pp, *rootpp, **ppa, *pplist = NULL; 11360Sstevel@tonic-gate int i; 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 11390Sstevel@tonic-gate return (NULL); 11400Sstevel@tonic-gate } 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate /* 11430Sstevel@tonic-gate * allocate an array we need for hat_memload_array. 11440Sstevel@tonic-gate * we use a separate arena to avoid recursion. 11450Sstevel@tonic-gate * we will not need this array when hat_memload_array learns pp++ 11460Sstevel@tonic-gate */ 11470Sstevel@tonic-gate if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) { 11480Sstevel@tonic-gate goto fail_array_alloc; 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate 11510Sstevel@tonic-gate if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 11520Sstevel@tonic-gate goto fail_vmem_alloc; 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0); 11550Sstevel@tonic-gate 11560Sstevel@tonic-gate /* create all the pages */ 11570Sstevel@tonic-gate for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) { 11580Sstevel@tonic-gate if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL) 11590Sstevel@tonic-gate goto fail_page_create; 11600Sstevel@tonic-gate page_list_concat(&pplist, &pp); 11610Sstevel@tonic-gate } 11620Sstevel@tonic-gate 11630Sstevel@tonic-gate /* at this point we have all the resource to complete the request */ 11640Sstevel@tonic-gate while ((rootpp = pplist) != NULL) { 11650Sstevel@tonic-gate for (i = 0; i < nbpages; i++) { 11660Sstevel@tonic-gate ASSERT(pplist != NULL); 11670Sstevel@tonic-gate pp = pplist; 11680Sstevel@tonic-gate page_sub(&pplist, pp); 11690Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 11700Sstevel@tonic-gate page_io_unlock(pp); 11710Sstevel@tonic-gate ppa[i] = pp; 11720Sstevel@tonic-gate } 11730Sstevel@tonic-gate /* 11740Sstevel@tonic-gate * Load the locked entry. It's OK to preload the entry into the 11750Sstevel@tonic-gate * TSB since we now support large mappings in the kernel TSB. 11760Sstevel@tonic-gate */ 11770Sstevel@tonic-gate hat_memload_array(kas.a_hat, 11780Sstevel@tonic-gate (caddr_t)(uintptr_t)rootpp->p_offset, lpsize, 11790Sstevel@tonic-gate ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 11800Sstevel@tonic-gate HAT_LOAD_LOCK); 11810Sstevel@tonic-gate 11820Sstevel@tonic-gate for (--i; i >= 0; --i) { 11830Sstevel@tonic-gate ppa[i]->p_lckcnt = 1; 11840Sstevel@tonic-gate page_unlock(ppa[i]); 11850Sstevel@tonic-gate } 11860Sstevel@tonic-gate } 11870Sstevel@tonic-gate 11880Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize); 11890Sstevel@tonic-gate return (addr); 11900Sstevel@tonic-gate 11910Sstevel@tonic-gate fail_page_create: 11920Sstevel@tonic-gate while ((rootpp = pplist) != NULL) { 11930Sstevel@tonic-gate for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) { 11940Sstevel@tonic-gate ASSERT(pp != NULL); 11950Sstevel@tonic-gate page_sub(&pplist, pp); 11960Sstevel@tonic-gate ASSERT(page_iolock_assert(pp)); 11970Sstevel@tonic-gate page_io_unlock(pp); 11980Sstevel@tonic-gate } 11990Sstevel@tonic-gate page_destroy_pages(rootpp); 12000Sstevel@tonic-gate } 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate if (inaddr == NULL) 12030Sstevel@tonic-gate vmem_free(vmp, addr, size); 12040Sstevel@tonic-gate 12050Sstevel@tonic-gate fail_vmem_alloc: 12060Sstevel@tonic-gate vmem_free(segkmem_ppa_arena, ppa, ppasize); 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate fail_array_alloc: 12090Sstevel@tonic-gate page_unresv(npages); 12100Sstevel@tonic-gate 12110Sstevel@tonic-gate return (NULL); 12120Sstevel@tonic-gate } 12130Sstevel@tonic-gate 12140Sstevel@tonic-gate static void 12150Sstevel@tonic-gate segkmem_free_one_lp(caddr_t addr, size_t size) 12160Sstevel@tonic-gate { 12170Sstevel@tonic-gate page_t *pp, *rootpp = NULL; 12180Sstevel@tonic-gate pgcnt_t pgs_left = btopr(size); 12190Sstevel@tonic-gate 12200Sstevel@tonic-gate ASSERT(size == segkmem_lpsize); 12210Sstevel@tonic-gate 12220Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) { 12250Sstevel@tonic-gate pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 12260Sstevel@tonic-gate if (pp == NULL) 12270Sstevel@tonic-gate panic("segkmem_free_one_lp: page not found"); 12280Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 12290Sstevel@tonic-gate pp->p_lckcnt = 0; 12300Sstevel@tonic-gate if (rootpp == NULL) 12310Sstevel@tonic-gate rootpp = pp; 12320Sstevel@tonic-gate } 12330Sstevel@tonic-gate ASSERT(rootpp != NULL); 12340Sstevel@tonic-gate page_destroy_pages(rootpp); 12350Sstevel@tonic-gate 12360Sstevel@tonic-gate /* page_unresv() is done by the caller */ 12370Sstevel@tonic-gate } 12380Sstevel@tonic-gate 12390Sstevel@tonic-gate /* 12400Sstevel@tonic-gate * This function is called to import new spans into the vmem arenas like 12410Sstevel@tonic-gate * kmem_default_arena and kmem_oversize_arena. It first tries to import 12420Sstevel@tonic-gate * spans from large page arena - kmem_lp_arena. In order to do this it might 12430Sstevel@tonic-gate * have to "upgrade the requested size" to kmem_lp_arena quantum. If 12440Sstevel@tonic-gate * it was not able to satisfy the upgraded request it then calls regular 12450Sstevel@tonic-gate * segkmem_alloc() that satisfies the request by importing from "*vmp" arena 12460Sstevel@tonic-gate */ 1247*4204Sha137994 /*ARGSUSED*/ 12480Sstevel@tonic-gate void * 1249*4204Sha137994 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag) 12500Sstevel@tonic-gate { 12510Sstevel@tonic-gate size_t size; 12520Sstevel@tonic-gate kthread_t *t = curthread; 12530Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 12540Sstevel@tonic-gate 12550Sstevel@tonic-gate ASSERT(sizep != NULL); 12560Sstevel@tonic-gate 12570Sstevel@tonic-gate size = *sizep; 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) && 12600Sstevel@tonic-gate !(vmflag & SEGKMEM_SHARELOCKED)) { 12610Sstevel@tonic-gate 12620Sstevel@tonic-gate size_t kmemlp_qnt = segkmem_kmemlp_quantum; 12630Sstevel@tonic-gate size_t asize = P2ROUNDUP(size, kmemlp_qnt); 12640Sstevel@tonic-gate void *addr = NULL; 12650Sstevel@tonic-gate ulong_t *lpthrtp = &lpcb->lp_throttle; 12660Sstevel@tonic-gate ulong_t lpthrt = *lpthrtp; 12670Sstevel@tonic-gate int dowakeup = 0; 12680Sstevel@tonic-gate int doalloc = 1; 12690Sstevel@tonic-gate 12700Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL); 12710Sstevel@tonic-gate ASSERT(asize >= size); 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate if (lpthrt != 0) { 12740Sstevel@tonic-gate /* try to update the throttle value */ 12750Sstevel@tonic-gate lpthrt = atomic_add_long_nv(lpthrtp, 1); 12760Sstevel@tonic-gate if (lpthrt >= segkmem_lpthrottle_max) { 12770Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 12780Sstevel@tonic-gate segkmem_lpthrottle_max / 4); 12790Sstevel@tonic-gate } 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate /* 12820Sstevel@tonic-gate * when we get above throttle start do an exponential 12830Sstevel@tonic-gate * backoff at trying large pages and reaping 12840Sstevel@tonic-gate */ 12850Sstevel@tonic-gate if (lpthrt > segkmem_lpthrottle_start && 12860Sstevel@tonic-gate (lpthrt & (lpthrt - 1))) { 1287215Seg155566 lpcb->allocs_throttled++; 12880Sstevel@tonic-gate lpthrt--; 12890Sstevel@tonic-gate if ((lpthrt & (lpthrt - 1)) == 0) 12900Sstevel@tonic-gate kmem_reap(); 12910Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 12920Sstevel@tonic-gate } 12930Sstevel@tonic-gate } 12940Sstevel@tonic-gate 12950Sstevel@tonic-gate if (!(vmflag & VM_NOSLEEP) && 12960Sstevel@tonic-gate segkmem_heaplp_quantum >= (8 * kmemlp_qnt) && 12970Sstevel@tonic-gate vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt && 12980Sstevel@tonic-gate asize < (segkmem_heaplp_quantum - kmemlp_qnt)) { 12990Sstevel@tonic-gate 13000Sstevel@tonic-gate /* 13010Sstevel@tonic-gate * we are low on free memory in kmem_lp_arena 13020Sstevel@tonic-gate * we let only one guy to allocate heap_lp 13030Sstevel@tonic-gate * quantum size chunk that everybody is going to 13040Sstevel@tonic-gate * share 13050Sstevel@tonic-gate */ 13060Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock); 13070Sstevel@tonic-gate 13080Sstevel@tonic-gate if (lpcb->lp_wait) { 13090Sstevel@tonic-gate 13100Sstevel@tonic-gate /* we are not the first one - wait */ 13110Sstevel@tonic-gate cv_wait(&lpcb->lp_cv, &lpcb->lp_lock); 13120Sstevel@tonic-gate if (vmem_size(kmem_lp_arena, VMEM_FREE) < 13130Sstevel@tonic-gate kmemlp_qnt) { 13140Sstevel@tonic-gate doalloc = 0; 13150Sstevel@tonic-gate } 13160Sstevel@tonic-gate } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <= 13170Sstevel@tonic-gate kmemlp_qnt) { 13180Sstevel@tonic-gate 13190Sstevel@tonic-gate /* 13200Sstevel@tonic-gate * we are the first one, make sure we import 13210Sstevel@tonic-gate * a large page 13220Sstevel@tonic-gate */ 13230Sstevel@tonic-gate if (asize == kmemlp_qnt) 13240Sstevel@tonic-gate asize += kmemlp_qnt; 13250Sstevel@tonic-gate dowakeup = 1; 13260Sstevel@tonic-gate lpcb->lp_wait = 1; 13270Sstevel@tonic-gate } 13280Sstevel@tonic-gate 13290Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock); 13300Sstevel@tonic-gate } 13310Sstevel@tonic-gate 13320Sstevel@tonic-gate /* 13330Sstevel@tonic-gate * VM_ABORT flag prevents sleeps in vmem_xalloc when 13340Sstevel@tonic-gate * large pages are not available. In that case this allocation 13350Sstevel@tonic-gate * attempt will fail and we will retry allocation with small 13360Sstevel@tonic-gate * pages. We also do not want to panic if this allocation fails 13370Sstevel@tonic-gate * because we are going to retry. 13380Sstevel@tonic-gate */ 13390Sstevel@tonic-gate if (doalloc) { 13400Sstevel@tonic-gate addr = vmem_alloc(kmem_lp_arena, asize, 13410Sstevel@tonic-gate (vmflag | VM_ABORT) & ~VM_PANIC); 13420Sstevel@tonic-gate 13430Sstevel@tonic-gate if (dowakeup) { 13440Sstevel@tonic-gate mutex_enter(&lpcb->lp_lock); 13450Sstevel@tonic-gate ASSERT(lpcb->lp_wait != 0); 13460Sstevel@tonic-gate lpcb->lp_wait = 0; 13470Sstevel@tonic-gate cv_broadcast(&lpcb->lp_cv); 13480Sstevel@tonic-gate mutex_exit(&lpcb->lp_lock); 13490Sstevel@tonic-gate } 13500Sstevel@tonic-gate } 13510Sstevel@tonic-gate 13520Sstevel@tonic-gate if (addr != NULL) { 13530Sstevel@tonic-gate *sizep = asize; 13540Sstevel@tonic-gate *lpthrtp = 0; 13550Sstevel@tonic-gate return (addr); 13560Sstevel@tonic-gate } 13570Sstevel@tonic-gate 13580Sstevel@tonic-gate if (vmflag & VM_NOSLEEP) 1359215Seg155566 lpcb->nosleep_allocs_failed++; 13600Sstevel@tonic-gate else 1361215Seg155566 lpcb->sleep_allocs_failed++; 1362215Seg155566 lpcb->alloc_bytes_failed += size; 13630Sstevel@tonic-gate 13640Sstevel@tonic-gate /* if large page throttling is not started yet do it */ 13650Sstevel@tonic-gate if (segkmem_use_lpthrottle && lpthrt == 0) { 13660Sstevel@tonic-gate lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1); 13670Sstevel@tonic-gate } 13680Sstevel@tonic-gate } 13690Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 13700Sstevel@tonic-gate } 13710Sstevel@tonic-gate 13720Sstevel@tonic-gate void 13730Sstevel@tonic-gate segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size) 13740Sstevel@tonic-gate { 13750Sstevel@tonic-gate if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) { 13760Sstevel@tonic-gate segkmem_free(vmp, inaddr, size); 13770Sstevel@tonic-gate } else { 13780Sstevel@tonic-gate vmem_free(kmem_lp_arena, inaddr, size); 13790Sstevel@tonic-gate } 13800Sstevel@tonic-gate } 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate /* 13830Sstevel@tonic-gate * segkmem_alloc_lpi() imports virtual memory from large page heap arena 13840Sstevel@tonic-gate * into kmem_lp arena. In the process it maps the imported segment with 13850Sstevel@tonic-gate * large pages 13860Sstevel@tonic-gate */ 13870Sstevel@tonic-gate static void * 13880Sstevel@tonic-gate segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag) 13890Sstevel@tonic-gate { 13900Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 13910Sstevel@tonic-gate void *addr; 13920Sstevel@tonic-gate 13930Sstevel@tonic-gate ASSERT(size != 0); 13940Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena); 13950Sstevel@tonic-gate 13960Sstevel@tonic-gate /* do not allow large page heap grow beyound limits */ 13970Sstevel@tonic-gate if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) { 1398215Seg155566 lpcb->allocs_limited++; 13990Sstevel@tonic-gate return (NULL); 14000Sstevel@tonic-gate } 14010Sstevel@tonic-gate 14020Sstevel@tonic-gate addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0, 14030Sstevel@tonic-gate segkmem_page_create_large, NULL); 14040Sstevel@tonic-gate return (addr); 14050Sstevel@tonic-gate } 14060Sstevel@tonic-gate 14070Sstevel@tonic-gate /* 14080Sstevel@tonic-gate * segkmem_free_lpi() returns virtual memory back into large page heap arena 14090Sstevel@tonic-gate * from kmem_lp arena. Beore doing this it unmaps the segment and frees 14100Sstevel@tonic-gate * large pages used to map it. 14110Sstevel@tonic-gate */ 14120Sstevel@tonic-gate static void 14130Sstevel@tonic-gate segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size) 14140Sstevel@tonic-gate { 14150Sstevel@tonic-gate pgcnt_t nlpages = size >> segkmem_lpshift; 14160Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 14170Sstevel@tonic-gate caddr_t addr = inaddr; 14180Sstevel@tonic-gate pgcnt_t npages = btopr(size); 14190Sstevel@tonic-gate int i; 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate ASSERT(vmp == heap_lp_arena); 14220Sstevel@tonic-gate ASSERT(IS_KMEM_VA_LARGEPAGE(addr)); 14230Sstevel@tonic-gate ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0); 14240Sstevel@tonic-gate 14250Sstevel@tonic-gate for (i = 0; i < nlpages; i++) { 14260Sstevel@tonic-gate segkmem_free_one_lp(addr, lpsize); 14270Sstevel@tonic-gate addr += lpsize; 14280Sstevel@tonic-gate } 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate page_unresv(npages); 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate vmem_free(vmp, inaddr, size); 14330Sstevel@tonic-gate } 14340Sstevel@tonic-gate 14350Sstevel@tonic-gate /* 14360Sstevel@tonic-gate * This function is called at system boot time by kmem_init right after 14370Sstevel@tonic-gate * /etc/system file has been read. It checks based on hardware configuration 14380Sstevel@tonic-gate * and /etc/system settings if system is going to use large pages. The 14390Sstevel@tonic-gate * initialiazation necessary to actually start using large pages 14400Sstevel@tonic-gate * happens later in the process after segkmem_heap_lp_init() is called. 14410Sstevel@tonic-gate */ 14420Sstevel@tonic-gate int 14430Sstevel@tonic-gate segkmem_lpsetup() 14440Sstevel@tonic-gate { 14450Sstevel@tonic-gate int use_large_pages = 0; 14460Sstevel@tonic-gate 14470Sstevel@tonic-gate #ifdef __sparc 14480Sstevel@tonic-gate 14490Sstevel@tonic-gate size_t memtotal = physmem * PAGESIZE; 14500Sstevel@tonic-gate 14510Sstevel@tonic-gate if (heap_lp_base == NULL) { 14520Sstevel@tonic-gate segkmem_lpsize = PAGESIZE; 14530Sstevel@tonic-gate return (0); 14540Sstevel@tonic-gate } 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate /* get a platform dependent value of large page size for kernel heap */ 14570Sstevel@tonic-gate segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize); 14580Sstevel@tonic-gate 14590Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) { 14600Sstevel@tonic-gate /* 14610Sstevel@tonic-gate * put virtual space reserved for the large page kernel 14620Sstevel@tonic-gate * back to the regular heap 14630Sstevel@tonic-gate */ 14640Sstevel@tonic-gate vmem_xfree(heap_arena, heap_lp_base, 14650Sstevel@tonic-gate heap_lp_end - heap_lp_base); 14660Sstevel@tonic-gate heap_lp_base = NULL; 14670Sstevel@tonic-gate heap_lp_end = NULL; 14680Sstevel@tonic-gate segkmem_lpsize = PAGESIZE; 14690Sstevel@tonic-gate return (0); 14700Sstevel@tonic-gate } 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate /* set heap_lp quantum if necessary */ 14730Sstevel@tonic-gate if (segkmem_heaplp_quantum == 0 || 14740Sstevel@tonic-gate (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) || 14750Sstevel@tonic-gate P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) { 14760Sstevel@tonic-gate segkmem_heaplp_quantum = segkmem_lpsize; 14770Sstevel@tonic-gate } 14780Sstevel@tonic-gate 14790Sstevel@tonic-gate /* set kmem_lp quantum if necessary */ 14800Sstevel@tonic-gate if (segkmem_kmemlp_quantum == 0 || 14810Sstevel@tonic-gate (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) || 14820Sstevel@tonic-gate segkmem_kmemlp_quantum > segkmem_heaplp_quantum) { 14830Sstevel@tonic-gate segkmem_kmemlp_quantum = segkmem_heaplp_quantum; 14840Sstevel@tonic-gate } 14850Sstevel@tonic-gate 14860Sstevel@tonic-gate /* set total amount of memory allowed for large page kernel heap */ 14870Sstevel@tonic-gate if (segkmem_kmemlp_max == 0) { 14880Sstevel@tonic-gate if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100) 1489215Seg155566 segkmem_kmemlp_pcnt = 12; 1490215Seg155566 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100; 14910Sstevel@tonic-gate } 14920Sstevel@tonic-gate segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max, 14930Sstevel@tonic-gate segkmem_heaplp_quantum); 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate /* fix lp kmem preallocation request if necesssary */ 14960Sstevel@tonic-gate if (segkmem_kmemlp_min) { 14970Sstevel@tonic-gate segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min, 14980Sstevel@tonic-gate segkmem_heaplp_quantum); 14990Sstevel@tonic-gate if (segkmem_kmemlp_min > segkmem_kmemlp_max) 15000Sstevel@tonic-gate segkmem_kmemlp_min = segkmem_kmemlp_max; 15010Sstevel@tonic-gate } 15020Sstevel@tonic-gate 15030Sstevel@tonic-gate use_large_pages = 1; 15043351Saguzovsk segkmem_lpszc = page_szc(segkmem_lpsize); 15053351Saguzovsk segkmem_lpshift = page_get_shift(segkmem_lpszc); 15060Sstevel@tonic-gate 15070Sstevel@tonic-gate #endif 15080Sstevel@tonic-gate return (use_large_pages); 15090Sstevel@tonic-gate } 15100Sstevel@tonic-gate 15113290Sjohansen void 15123290Sjohansen segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size) 15133290Sjohansen { 15143290Sjohansen ASSERT(zio_mem_base != NULL); 15153290Sjohansen ASSERT(zio_mem_size != 0); 15163290Sjohansen 15173290Sjohansen zio_arena = vmem_create("zio", zio_mem_base, zio_mem_size, PAGESIZE, 15183290Sjohansen NULL, NULL, NULL, 0, VM_SLEEP); 15193290Sjohansen 15203290Sjohansen zio_alloc_arena = vmem_create("zio_buf", NULL, 0, PAGESIZE, 15213290Sjohansen segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP); 15223290Sjohansen 15233290Sjohansen ASSERT(zio_arena != NULL); 15243290Sjohansen ASSERT(zio_alloc_arena != NULL); 15253290Sjohansen } 15263290Sjohansen 15270Sstevel@tonic-gate #ifdef __sparc 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate 15300Sstevel@tonic-gate static void * 15310Sstevel@tonic-gate segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag) 15320Sstevel@tonic-gate { 15330Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 15340Sstevel@tonic-gate void *addr; 15350Sstevel@tonic-gate 15360Sstevel@tonic-gate if (ppaquantum <= PAGESIZE) 15370Sstevel@tonic-gate return (segkmem_alloc(vmp, size, vmflag)); 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate ASSERT((size & (ppaquantum - 1)) == 0); 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag); 15420Sstevel@tonic-gate if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0, 15430Sstevel@tonic-gate segkmem_page_create, NULL) == NULL) { 15440Sstevel@tonic-gate vmem_xfree(vmp, addr, size); 15450Sstevel@tonic-gate addr = NULL; 15460Sstevel@tonic-gate } 15470Sstevel@tonic-gate 15480Sstevel@tonic-gate return (addr); 15490Sstevel@tonic-gate } 15500Sstevel@tonic-gate 15510Sstevel@tonic-gate static void 15520Sstevel@tonic-gate segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size) 15530Sstevel@tonic-gate { 15540Sstevel@tonic-gate size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 15550Sstevel@tonic-gate 15560Sstevel@tonic-gate ASSERT(addr != NULL); 15570Sstevel@tonic-gate 15580Sstevel@tonic-gate if (ppaquantum <= PAGESIZE) { 15590Sstevel@tonic-gate segkmem_free(vmp, addr, size); 15600Sstevel@tonic-gate } else { 15610Sstevel@tonic-gate segkmem_free(NULL, addr, size); 15620Sstevel@tonic-gate vmem_xfree(vmp, addr, size); 15630Sstevel@tonic-gate } 15640Sstevel@tonic-gate } 15650Sstevel@tonic-gate 15660Sstevel@tonic-gate void 15670Sstevel@tonic-gate segkmem_heap_lp_init() 15680Sstevel@tonic-gate { 15690Sstevel@tonic-gate segkmem_lpcb_t *lpcb = &segkmem_lpcb; 15700Sstevel@tonic-gate size_t heap_lp_size = heap_lp_end - heap_lp_base; 15710Sstevel@tonic-gate size_t lpsize = segkmem_lpsize; 15720Sstevel@tonic-gate size_t ppaquantum; 15730Sstevel@tonic-gate void *addr; 15740Sstevel@tonic-gate 15750Sstevel@tonic-gate if (segkmem_lpsize <= PAGESIZE) { 15760Sstevel@tonic-gate ASSERT(heap_lp_base == NULL); 15770Sstevel@tonic-gate ASSERT(heap_lp_end == NULL); 15780Sstevel@tonic-gate return; 15790Sstevel@tonic-gate } 15800Sstevel@tonic-gate 15810Sstevel@tonic-gate ASSERT(segkmem_heaplp_quantum >= lpsize); 15820Sstevel@tonic-gate ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0); 15830Sstevel@tonic-gate ASSERT(lpcb->lp_uselp == 0); 15840Sstevel@tonic-gate ASSERT(heap_lp_base != NULL); 15850Sstevel@tonic-gate ASSERT(heap_lp_end != NULL); 15860Sstevel@tonic-gate ASSERT(heap_lp_base < heap_lp_end); 15870Sstevel@tonic-gate ASSERT(heap_lp_arena == NULL); 15880Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0); 15890Sstevel@tonic-gate ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0); 15900Sstevel@tonic-gate 15910Sstevel@tonic-gate /* create large page heap arena */ 15920Sstevel@tonic-gate heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size, 15930Sstevel@tonic-gate segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP); 15940Sstevel@tonic-gate 15950Sstevel@tonic-gate ASSERT(heap_lp_arena != NULL); 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate /* This arena caches memory already mapped by large pages */ 15980Sstevel@tonic-gate kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum, 15990Sstevel@tonic-gate segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP); 16000Sstevel@tonic-gate 16010Sstevel@tonic-gate ASSERT(kmem_lp_arena != NULL); 16020Sstevel@tonic-gate 16030Sstevel@tonic-gate mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL); 16040Sstevel@tonic-gate cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL); 16050Sstevel@tonic-gate 16060Sstevel@tonic-gate /* 16070Sstevel@tonic-gate * this arena is used for the array of page_t pointers necessary 16080Sstevel@tonic-gate * to call hat_mem_load_array 16090Sstevel@tonic-gate */ 16100Sstevel@tonic-gate ppaquantum = btopr(lpsize) * sizeof (page_t *); 16110Sstevel@tonic-gate segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum, 16120Sstevel@tonic-gate segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum, 16130Sstevel@tonic-gate VM_SLEEP); 16140Sstevel@tonic-gate 16150Sstevel@tonic-gate ASSERT(segkmem_ppa_arena != NULL); 16160Sstevel@tonic-gate 16170Sstevel@tonic-gate /* prealloacate some memory for the lp kernel heap */ 16180Sstevel@tonic-gate if (segkmem_kmemlp_min) { 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate ASSERT(P2PHASE(segkmem_kmemlp_min, 16210Sstevel@tonic-gate segkmem_heaplp_quantum) == 0); 16220Sstevel@tonic-gate 16230Sstevel@tonic-gate if ((addr = segkmem_alloc_lpi(heap_lp_arena, 16240Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP)) != NULL) { 16250Sstevel@tonic-gate 16260Sstevel@tonic-gate addr = vmem_add(kmem_lp_arena, addr, 16270Sstevel@tonic-gate segkmem_kmemlp_min, VM_SLEEP); 16280Sstevel@tonic-gate ASSERT(addr != NULL); 16290Sstevel@tonic-gate } 16300Sstevel@tonic-gate } 16310Sstevel@tonic-gate 16320Sstevel@tonic-gate lpcb->lp_uselp = 1; 16330Sstevel@tonic-gate } 16340Sstevel@tonic-gate 16350Sstevel@tonic-gate #endif 1636