10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 54204Sha137994 * Common Development and Distribution License (the "License"). 64204Sha137994 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*6058Sbonwick * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * Big Theory Statement for the virtual memory allocator. 300Sstevel@tonic-gate * 310Sstevel@tonic-gate * For a more complete description of the main ideas, see: 320Sstevel@tonic-gate * 330Sstevel@tonic-gate * Jeff Bonwick and Jonathan Adams, 340Sstevel@tonic-gate * 350Sstevel@tonic-gate * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 360Sstevel@tonic-gate * Arbitrary Resources. 370Sstevel@tonic-gate * 380Sstevel@tonic-gate * Proceedings of the 2001 Usenix Conference. 390Sstevel@tonic-gate * Available as http://www.usenix.org/event/usenix01/bonwick.html 400Sstevel@tonic-gate * 410Sstevel@tonic-gate * 420Sstevel@tonic-gate * 1. General Concepts 430Sstevel@tonic-gate * ------------------- 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * 1.1 Overview 460Sstevel@tonic-gate * ------------ 470Sstevel@tonic-gate * We divide the kernel address space into a number of logically distinct 480Sstevel@tonic-gate * pieces, or *arenas*: text, data, heap, stack, and so on. Within these 490Sstevel@tonic-gate * arenas we often subdivide further; for example, we use heap addresses 500Sstevel@tonic-gate * not only for the kernel heap (kmem_alloc() space), but also for DVMA, 510Sstevel@tonic-gate * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip. 520Sstevel@tonic-gate * The kernel address space, therefore, is most accurately described as 530Sstevel@tonic-gate * a tree of arenas in which each node of the tree *imports* some subset 540Sstevel@tonic-gate * of its parent. The virtual memory allocator manages these arenas and 550Sstevel@tonic-gate * supports their natural hierarchical structure. 560Sstevel@tonic-gate * 570Sstevel@tonic-gate * 1.2 Arenas 580Sstevel@tonic-gate * ---------- 590Sstevel@tonic-gate * An arena is nothing more than a set of integers. These integers most 600Sstevel@tonic-gate * commonly represent virtual addresses, but in fact they can represent 610Sstevel@tonic-gate * anything at all. For example, we could use an arena containing the 620Sstevel@tonic-gate * integers minpid through maxpid to allocate process IDs. vmem_create() 630Sstevel@tonic-gate * and vmem_destroy() create and destroy vmem arenas. In order to 640Sstevel@tonic-gate * differentiate between arenas used for adresses and arenas used for 650Sstevel@tonic-gate * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This 660Sstevel@tonic-gate * prevents identifier exhaustion from being diagnosed as general memory 670Sstevel@tonic-gate * failure. 680Sstevel@tonic-gate * 690Sstevel@tonic-gate * 1.3 Spans 700Sstevel@tonic-gate * --------- 710Sstevel@tonic-gate * We represent the integers in an arena as a collection of *spans*, or 720Sstevel@tonic-gate * contiguous ranges of integers. For example, the kernel heap consists 730Sstevel@tonic-gate * of just one span: [kernelheap, ekernelheap). Spans can be added to an 740Sstevel@tonic-gate * arena in two ways: explicitly, by vmem_add(), or implicitly, by 750Sstevel@tonic-gate * importing, as described in Section 1.5 below. 760Sstevel@tonic-gate * 770Sstevel@tonic-gate * 1.4 Segments 780Sstevel@tonic-gate * ------------ 790Sstevel@tonic-gate * Spans are subdivided into *segments*, each of which is either allocated 800Sstevel@tonic-gate * or free. A segment, like a span, is a contiguous range of integers. 810Sstevel@tonic-gate * Each allocated segment [addr, addr + size) represents exactly one 820Sstevel@tonic-gate * vmem_alloc(size) that returned addr. Free segments represent the space 830Sstevel@tonic-gate * between allocated segments. If two free segments are adjacent, we 840Sstevel@tonic-gate * coalesce them into one larger segment; that is, if segments [a, b) and 850Sstevel@tonic-gate * [b, c) are both free, we merge them into a single segment [a, c). 860Sstevel@tonic-gate * The segments within a span are linked together in increasing-address order 870Sstevel@tonic-gate * so we can easily determine whether coalescing is possible. 880Sstevel@tonic-gate * 890Sstevel@tonic-gate * Segments never cross span boundaries. When all segments within 900Sstevel@tonic-gate * an imported span become free, we return the span to its source. 910Sstevel@tonic-gate * 920Sstevel@tonic-gate * 1.5 Imported Memory 930Sstevel@tonic-gate * ------------------- 940Sstevel@tonic-gate * As mentioned in the overview, some arenas are logical subsets of 950Sstevel@tonic-gate * other arenas. For example, kmem_va_arena (a virtual address cache 960Sstevel@tonic-gate * that satisfies most kmem_slab_create() requests) is just a subset 970Sstevel@tonic-gate * of heap_arena (the kernel heap) that provides caching for the most 980Sstevel@tonic-gate * common slab sizes. When kmem_va_arena runs out of virtual memory, 990Sstevel@tonic-gate * it *imports* more from the heap; we say that heap_arena is the 1000Sstevel@tonic-gate * *vmem source* for kmem_va_arena. vmem_create() allows you to 1010Sstevel@tonic-gate * specify any existing vmem arena as the source for your new arena. 1020Sstevel@tonic-gate * Topologically, since every arena is a child of at most one source, 1030Sstevel@tonic-gate * the set of all arenas forms a collection of trees. 1040Sstevel@tonic-gate * 1050Sstevel@tonic-gate * 1.6 Constrained Allocations 1060Sstevel@tonic-gate * --------------------------- 1070Sstevel@tonic-gate * Some vmem clients are quite picky about the kind of address they want. 1080Sstevel@tonic-gate * For example, the DVMA code may need an address that is at a particular 1090Sstevel@tonic-gate * phase with respect to some alignment (to get good cache coloring), or 1100Sstevel@tonic-gate * that lies within certain limits (the addressable range of a device), 1110Sstevel@tonic-gate * or that doesn't cross some boundary (a DMA counter restriction) -- 1120Sstevel@tonic-gate * or all of the above. vmem_xalloc() allows the client to specify any 1130Sstevel@tonic-gate * or all of these constraints. 1140Sstevel@tonic-gate * 1150Sstevel@tonic-gate * 1.7 The Vmem Quantum 1160Sstevel@tonic-gate * -------------------- 1170Sstevel@tonic-gate * Every arena has a notion of 'quantum', specified at vmem_create() time, 1180Sstevel@tonic-gate * that defines the arena's minimum unit of currency. Most commonly the 1190Sstevel@tonic-gate * quantum is either 1 or PAGESIZE, but any power of 2 is legal. 1200Sstevel@tonic-gate * All vmem allocations are guaranteed to be quantum-aligned. 1210Sstevel@tonic-gate * 1220Sstevel@tonic-gate * 1.8 Quantum Caching 1230Sstevel@tonic-gate * ------------------- 1240Sstevel@tonic-gate * A vmem arena may be so hot (frequently used) that the scalability of vmem 1250Sstevel@tonic-gate * allocation is a significant concern. We address this by allowing the most 1260Sstevel@tonic-gate * common allocation sizes to be serviced by the kernel memory allocator, 1270Sstevel@tonic-gate * which provides low-latency per-cpu caching. The qcache_max argument to 1280Sstevel@tonic-gate * vmem_create() specifies the largest allocation size to cache. 1290Sstevel@tonic-gate * 1300Sstevel@tonic-gate * 1.9 Relationship to Kernel Memory Allocator 1310Sstevel@tonic-gate * ------------------------------------------- 1320Sstevel@tonic-gate * Every kmem cache has a vmem arena as its slab supplier. The kernel memory 1330Sstevel@tonic-gate * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs. 1340Sstevel@tonic-gate * 1350Sstevel@tonic-gate * 1360Sstevel@tonic-gate * 2. Implementation 1370Sstevel@tonic-gate * ----------------- 1380Sstevel@tonic-gate * 1390Sstevel@tonic-gate * 2.1 Segment lists and markers 1400Sstevel@tonic-gate * ----------------------------- 1410Sstevel@tonic-gate * The segment structure (vmem_seg_t) contains two doubly-linked lists. 1420Sstevel@tonic-gate * 1430Sstevel@tonic-gate * The arena list (vs_anext/vs_aprev) links all segments in the arena. 1440Sstevel@tonic-gate * In addition to the allocated and free segments, the arena contains 1450Sstevel@tonic-gate * special marker segments at span boundaries. Span markers simplify 1460Sstevel@tonic-gate * coalescing and importing logic by making it easy to tell both when 1470Sstevel@tonic-gate * we're at a span boundary (so we don't coalesce across it), and when 1480Sstevel@tonic-gate * a span is completely free (its neighbors will both be span markers). 1490Sstevel@tonic-gate * 1500Sstevel@tonic-gate * Imported spans will have vs_import set. 1510Sstevel@tonic-gate * 1520Sstevel@tonic-gate * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type: 1530Sstevel@tonic-gate * (1) for allocated segments, vs_knext is the hash chain linkage; 1540Sstevel@tonic-gate * (2) for free segments, vs_knext is the freelist linkage; 1550Sstevel@tonic-gate * (3) for span marker segments, vs_knext is the next span marker. 1560Sstevel@tonic-gate * 1570Sstevel@tonic-gate * 2.2 Allocation hashing 1580Sstevel@tonic-gate * ---------------------- 1590Sstevel@tonic-gate * We maintain a hash table of all allocated segments, hashed by address. 1600Sstevel@tonic-gate * This allows vmem_free() to discover the target segment in constant time. 1610Sstevel@tonic-gate * vmem_update() periodically resizes hash tables to keep hash chains short. 1620Sstevel@tonic-gate * 1630Sstevel@tonic-gate * 2.3 Freelist management 1640Sstevel@tonic-gate * ----------------------- 1650Sstevel@tonic-gate * We maintain power-of-2 freelists for free segments, i.e. free segments 1660Sstevel@tonic-gate * of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time 1670Sstevel@tonic-gate * allocation, vmem_xalloc() looks not in the first freelist that *might* 1680Sstevel@tonic-gate * satisfy the allocation, but in the first freelist that *definitely* 1690Sstevel@tonic-gate * satisfies the allocation (unless VM_BESTFIT is specified, or all larger 1700Sstevel@tonic-gate * freelists are empty). For example, a 1000-byte allocation will be 1710Sstevel@tonic-gate * satisfied not from the 512..1023-byte freelist, whose members *might* 1720Sstevel@tonic-gate * contains a 1000-byte segment, but from a 1024-byte or larger freelist, 1730Sstevel@tonic-gate * the first member of which will *definitely* satisfy the allocation. 1740Sstevel@tonic-gate * This ensures that vmem_xalloc() works in constant time. 1750Sstevel@tonic-gate * 1760Sstevel@tonic-gate * We maintain a bit map to determine quickly which freelists are non-empty. 1770Sstevel@tonic-gate * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty. 1780Sstevel@tonic-gate * 1790Sstevel@tonic-gate * The different freelists are linked together into one large freelist, 1800Sstevel@tonic-gate * with the freelist heads serving as markers. Freelist markers simplify 1810Sstevel@tonic-gate * the maintenance of vm_freemap by making it easy to tell when we're taking 1820Sstevel@tonic-gate * the last member of a freelist (both of its neighbors will be markers). 1830Sstevel@tonic-gate * 1840Sstevel@tonic-gate * 2.4 Vmem Locking 1850Sstevel@tonic-gate * ---------------- 1860Sstevel@tonic-gate * For simplicity, all arena state is protected by a per-arena lock. 1870Sstevel@tonic-gate * For very hot arenas, use quantum caching for scalability. 1880Sstevel@tonic-gate * 1890Sstevel@tonic-gate * 2.5 Vmem Population 1900Sstevel@tonic-gate * ------------------- 1910Sstevel@tonic-gate * Any internal vmem routine that might need to allocate new segment 1920Sstevel@tonic-gate * structures must prepare in advance by calling vmem_populate(), which 1930Sstevel@tonic-gate * will preallocate enough vmem_seg_t's to get is through the entire 1940Sstevel@tonic-gate * operation without dropping the arena lock. 1950Sstevel@tonic-gate * 1960Sstevel@tonic-gate * 2.6 Auditing 1970Sstevel@tonic-gate * ------------ 1980Sstevel@tonic-gate * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well. 1990Sstevel@tonic-gate * Since virtual addresses cannot be scribbled on, there is no equivalent 2000Sstevel@tonic-gate * in vmem to redzone checking, deadbeef, or other kmem debugging features. 2010Sstevel@tonic-gate * Moreover, we do not audit frees because segment coalescing destroys the 2020Sstevel@tonic-gate * association between an address and its segment structure. Auditing is 2030Sstevel@tonic-gate * thus intended primarily to keep track of who's consuming the arena. 2040Sstevel@tonic-gate * Debugging support could certainly be extended in the future if it proves 2050Sstevel@tonic-gate * necessary, but we do so much live checking via the allocation hash table 2060Sstevel@tonic-gate * that even non-DEBUG systems get quite a bit of sanity checking already. 2070Sstevel@tonic-gate */ 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate #include <sys/vmem_impl.h> 2100Sstevel@tonic-gate #include <sys/kmem.h> 2110Sstevel@tonic-gate #include <sys/kstat.h> 2120Sstevel@tonic-gate #include <sys/param.h> 2130Sstevel@tonic-gate #include <sys/systm.h> 2140Sstevel@tonic-gate #include <sys/atomic.h> 2150Sstevel@tonic-gate #include <sys/bitmap.h> 2160Sstevel@tonic-gate #include <sys/sysmacros.h> 2170Sstevel@tonic-gate #include <sys/cmn_err.h> 2180Sstevel@tonic-gate #include <sys/debug.h> 2190Sstevel@tonic-gate #include <sys/panic.h> 2200Sstevel@tonic-gate 2210Sstevel@tonic-gate #define VMEM_INITIAL 10 /* early vmem arenas */ 2220Sstevel@tonic-gate #define VMEM_SEG_INITIAL 200 /* early segments */ 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * Adding a new span to an arena requires two segment structures: one to 2260Sstevel@tonic-gate * represent the span, and one to represent the free segment it contains. 2270Sstevel@tonic-gate */ 2280Sstevel@tonic-gate #define VMEM_SEGS_PER_SPAN_CREATE 2 2290Sstevel@tonic-gate 2300Sstevel@tonic-gate /* 2310Sstevel@tonic-gate * Allocating a piece of an existing segment requires 0-2 segment structures 2320Sstevel@tonic-gate * depending on how much of the segment we're allocating. 2330Sstevel@tonic-gate * 2340Sstevel@tonic-gate * To allocate the entire segment, no new segment structures are needed; we 2350Sstevel@tonic-gate * simply move the existing segment structure from the freelist to the 2360Sstevel@tonic-gate * allocation hash table. 2370Sstevel@tonic-gate * 2380Sstevel@tonic-gate * To allocate a piece from the left or right end of the segment, we must 2390Sstevel@tonic-gate * split the segment into two pieces (allocated part and remainder), so we 2400Sstevel@tonic-gate * need one new segment structure to represent the remainder. 2410Sstevel@tonic-gate * 2420Sstevel@tonic-gate * To allocate from the middle of a segment, we need two new segment strucures 2430Sstevel@tonic-gate * to represent the remainders on either side of the allocated part. 2440Sstevel@tonic-gate */ 2450Sstevel@tonic-gate #define VMEM_SEGS_PER_EXACT_ALLOC 0 2460Sstevel@tonic-gate #define VMEM_SEGS_PER_LEFT_ALLOC 1 2470Sstevel@tonic-gate #define VMEM_SEGS_PER_RIGHT_ALLOC 1 2480Sstevel@tonic-gate #define VMEM_SEGS_PER_MIDDLE_ALLOC 2 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate /* 2510Sstevel@tonic-gate * vmem_populate() preallocates segment structures for vmem to do its work. 2520Sstevel@tonic-gate * It must preallocate enough for the worst case, which is when we must import 2530Sstevel@tonic-gate * a new span and then allocate from the middle of it. 2540Sstevel@tonic-gate */ 2550Sstevel@tonic-gate #define VMEM_SEGS_PER_ALLOC_MAX \ 2560Sstevel@tonic-gate (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC) 2570Sstevel@tonic-gate 2580Sstevel@tonic-gate /* 2590Sstevel@tonic-gate * The segment structures themselves are allocated from vmem_seg_arena, so 2600Sstevel@tonic-gate * we have a recursion problem when vmem_seg_arena needs to populate itself. 2610Sstevel@tonic-gate * We address this by working out the maximum number of segment structures 2620Sstevel@tonic-gate * this act will require, and multiplying by the maximum number of threads 2630Sstevel@tonic-gate * that we'll allow to do it simultaneously. 2640Sstevel@tonic-gate * 2650Sstevel@tonic-gate * The worst-case segment consumption to populate vmem_seg_arena is as 2660Sstevel@tonic-gate * follows (depicted as a stack trace to indicate why events are occurring): 2670Sstevel@tonic-gate * 2680Sstevel@tonic-gate * (In order to lower the fragmentation in the heap_arena, we specify a 2690Sstevel@tonic-gate * minimum import size for the vmem_metadata_arena which is the same size 2700Sstevel@tonic-gate * as the kmem_va quantum cache allocations. This causes the worst-case 2710Sstevel@tonic-gate * allocation from the vmem_metadata_arena to be 3 segments.) 2720Sstevel@tonic-gate * 2730Sstevel@tonic-gate * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc) 2740Sstevel@tonic-gate * segkmem_alloc(vmem_metadata_arena) 2750Sstevel@tonic-gate * vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc) 2760Sstevel@tonic-gate * vmem_alloc(heap_arena) -> 1 seg (left alloc) 2770Sstevel@tonic-gate * page_create() 2780Sstevel@tonic-gate * hat_memload() 2790Sstevel@tonic-gate * kmem_cache_alloc() 2800Sstevel@tonic-gate * kmem_slab_create() 2810Sstevel@tonic-gate * vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc) 2820Sstevel@tonic-gate * segkmem_alloc(heap_arena) 2830Sstevel@tonic-gate * vmem_alloc(heap_arena) -> 1 seg (left alloc) 2840Sstevel@tonic-gate * page_create() 2850Sstevel@tonic-gate * hat_memload() -> (hat layer won't recurse further) 2860Sstevel@tonic-gate * 2870Sstevel@tonic-gate * The worst-case consumption for each arena is 3 segment structures. 2880Sstevel@tonic-gate * Of course, a 3-seg reserve could easily be blown by multiple threads. 2890Sstevel@tonic-gate * Therefore, we serialize all allocations from vmem_seg_arena (which is OK 2900Sstevel@tonic-gate * because they're rare). We cannot allow a non-blocking allocation to get 2910Sstevel@tonic-gate * tied up behind a blocking allocation, however, so we use separate locks 292*6058Sbonwick * for VM_SLEEP and VM_NOSLEEP allocations. Similarly, VM_PUSHPAGE allocations 293*6058Sbonwick * must not block behind ordinary VM_SLEEPs. In addition, if the system is 2940Sstevel@tonic-gate * panicking then we must keep enough resources for panic_thread to do its 295*6058Sbonwick * work. Thus we have at most four threads trying to allocate from 2960Sstevel@tonic-gate * vmem_seg_arena, and each thread consumes at most three segment structures, 297*6058Sbonwick * so we must maintain a 12-seg reserve. 2980Sstevel@tonic-gate */ 299*6058Sbonwick #define VMEM_POPULATE_RESERVE 12 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures 3030Sstevel@tonic-gate * so that it can satisfy the worst-case allocation *and* participate in 3040Sstevel@tonic-gate * worst-case allocation from vmem_seg_arena. 3050Sstevel@tonic-gate */ 3060Sstevel@tonic-gate #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX) 3070Sstevel@tonic-gate 3080Sstevel@tonic-gate static vmem_t vmem0[VMEM_INITIAL]; 3090Sstevel@tonic-gate static vmem_t *vmem_populator[VMEM_INITIAL]; 3100Sstevel@tonic-gate static uint32_t vmem_id; 3110Sstevel@tonic-gate static uint32_t vmem_populators; 3120Sstevel@tonic-gate static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL]; 3130Sstevel@tonic-gate static vmem_seg_t *vmem_segfree; 3140Sstevel@tonic-gate static kmutex_t vmem_list_lock; 3150Sstevel@tonic-gate static kmutex_t vmem_segfree_lock; 3160Sstevel@tonic-gate static kmutex_t vmem_sleep_lock; 3170Sstevel@tonic-gate static kmutex_t vmem_nosleep_lock; 318*6058Sbonwick static kmutex_t vmem_pushpage_lock; 3190Sstevel@tonic-gate static kmutex_t vmem_panic_lock; 3200Sstevel@tonic-gate static vmem_t *vmem_list; 3210Sstevel@tonic-gate static vmem_t *vmem_metadata_arena; 3220Sstevel@tonic-gate static vmem_t *vmem_seg_arena; 3230Sstevel@tonic-gate static vmem_t *vmem_hash_arena; 3240Sstevel@tonic-gate static vmem_t *vmem_vmem_arena; 3250Sstevel@tonic-gate static long vmem_update_interval = 15; /* vmem_update() every 15 seconds */ 3260Sstevel@tonic-gate uint32_t vmem_mtbf; /* mean time between failures [default: off] */ 3270Sstevel@tonic-gate size_t vmem_seg_size = sizeof (vmem_seg_t); 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate static vmem_kstat_t vmem_kstat_template = { 3300Sstevel@tonic-gate { "mem_inuse", KSTAT_DATA_UINT64 }, 3310Sstevel@tonic-gate { "mem_import", KSTAT_DATA_UINT64 }, 3320Sstevel@tonic-gate { "mem_total", KSTAT_DATA_UINT64 }, 3330Sstevel@tonic-gate { "vmem_source", KSTAT_DATA_UINT32 }, 3340Sstevel@tonic-gate { "alloc", KSTAT_DATA_UINT64 }, 3350Sstevel@tonic-gate { "free", KSTAT_DATA_UINT64 }, 3360Sstevel@tonic-gate { "wait", KSTAT_DATA_UINT64 }, 3370Sstevel@tonic-gate { "fail", KSTAT_DATA_UINT64 }, 3380Sstevel@tonic-gate { "lookup", KSTAT_DATA_UINT64 }, 3390Sstevel@tonic-gate { "search", KSTAT_DATA_UINT64 }, 3400Sstevel@tonic-gate { "populate_wait", KSTAT_DATA_UINT64 }, 3410Sstevel@tonic-gate { "populate_fail", KSTAT_DATA_UINT64 }, 3420Sstevel@tonic-gate { "contains", KSTAT_DATA_UINT64 }, 3430Sstevel@tonic-gate { "contains_search", KSTAT_DATA_UINT64 }, 3440Sstevel@tonic-gate }; 3450Sstevel@tonic-gate 3460Sstevel@tonic-gate /* 3470Sstevel@tonic-gate * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k'). 3480Sstevel@tonic-gate */ 3490Sstevel@tonic-gate #define VMEM_INSERT(vprev, vsp, type) \ 3500Sstevel@tonic-gate { \ 3510Sstevel@tonic-gate vmem_seg_t *vnext = (vprev)->vs_##type##next; \ 3520Sstevel@tonic-gate (vsp)->vs_##type##next = (vnext); \ 3530Sstevel@tonic-gate (vsp)->vs_##type##prev = (vprev); \ 3540Sstevel@tonic-gate (vprev)->vs_##type##next = (vsp); \ 3550Sstevel@tonic-gate (vnext)->vs_##type##prev = (vsp); \ 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate 3580Sstevel@tonic-gate #define VMEM_DELETE(vsp, type) \ 3590Sstevel@tonic-gate { \ 3600Sstevel@tonic-gate vmem_seg_t *vprev = (vsp)->vs_##type##prev; \ 3610Sstevel@tonic-gate vmem_seg_t *vnext = (vsp)->vs_##type##next; \ 3620Sstevel@tonic-gate (vprev)->vs_##type##next = (vnext); \ 3630Sstevel@tonic-gate (vnext)->vs_##type##prev = (vprev); \ 3640Sstevel@tonic-gate } 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate /* 3670Sstevel@tonic-gate * Get a vmem_seg_t from the global segfree list. 3680Sstevel@tonic-gate */ 3690Sstevel@tonic-gate static vmem_seg_t * 3700Sstevel@tonic-gate vmem_getseg_global(void) 3710Sstevel@tonic-gate { 3720Sstevel@tonic-gate vmem_seg_t *vsp; 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate mutex_enter(&vmem_segfree_lock); 3750Sstevel@tonic-gate if ((vsp = vmem_segfree) != NULL) 3760Sstevel@tonic-gate vmem_segfree = vsp->vs_knext; 3770Sstevel@tonic-gate mutex_exit(&vmem_segfree_lock); 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate return (vsp); 3800Sstevel@tonic-gate } 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate /* 3830Sstevel@tonic-gate * Put a vmem_seg_t on the global segfree list. 3840Sstevel@tonic-gate */ 3850Sstevel@tonic-gate static void 3860Sstevel@tonic-gate vmem_putseg_global(vmem_seg_t *vsp) 3870Sstevel@tonic-gate { 3880Sstevel@tonic-gate mutex_enter(&vmem_segfree_lock); 3890Sstevel@tonic-gate vsp->vs_knext = vmem_segfree; 3900Sstevel@tonic-gate vmem_segfree = vsp; 3910Sstevel@tonic-gate mutex_exit(&vmem_segfree_lock); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate /* 3950Sstevel@tonic-gate * Get a vmem_seg_t from vmp's segfree list. 3960Sstevel@tonic-gate */ 3970Sstevel@tonic-gate static vmem_seg_t * 3980Sstevel@tonic-gate vmem_getseg(vmem_t *vmp) 3990Sstevel@tonic-gate { 4000Sstevel@tonic-gate vmem_seg_t *vsp; 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate ASSERT(vmp->vm_nsegfree > 0); 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate vsp = vmp->vm_segfree; 4050Sstevel@tonic-gate vmp->vm_segfree = vsp->vs_knext; 4060Sstevel@tonic-gate vmp->vm_nsegfree--; 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate return (vsp); 4090Sstevel@tonic-gate } 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate /* 4120Sstevel@tonic-gate * Put a vmem_seg_t on vmp's segfree list. 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate static void 4150Sstevel@tonic-gate vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp) 4160Sstevel@tonic-gate { 4170Sstevel@tonic-gate vsp->vs_knext = vmp->vm_segfree; 4180Sstevel@tonic-gate vmp->vm_segfree = vsp; 4190Sstevel@tonic-gate vmp->vm_nsegfree++; 4200Sstevel@tonic-gate } 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate /* 4230Sstevel@tonic-gate * Add vsp to the appropriate freelist. 4240Sstevel@tonic-gate */ 4250Sstevel@tonic-gate static void 4260Sstevel@tonic-gate vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp) 4270Sstevel@tonic-gate { 4280Sstevel@tonic-gate vmem_seg_t *vprev; 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp); 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1]; 4330Sstevel@tonic-gate vsp->vs_type = VMEM_FREE; 4340Sstevel@tonic-gate vmp->vm_freemap |= VS_SIZE(vprev); 4350Sstevel@tonic-gate VMEM_INSERT(vprev, vsp, k); 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate cv_broadcast(&vmp->vm_cv); 4380Sstevel@tonic-gate } 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate /* 4410Sstevel@tonic-gate * Take vsp from the freelist. 4420Sstevel@tonic-gate */ 4430Sstevel@tonic-gate static void 4440Sstevel@tonic-gate vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp) 4450Sstevel@tonic-gate { 4460Sstevel@tonic-gate ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp); 4470Sstevel@tonic-gate ASSERT(vsp->vs_type == VMEM_FREE); 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) { 4500Sstevel@tonic-gate /* 4510Sstevel@tonic-gate * The segments on both sides of 'vsp' are freelist heads, 4520Sstevel@tonic-gate * so taking vsp leaves the freelist at vsp->vs_kprev empty. 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev)); 4550Sstevel@tonic-gate vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev); 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate VMEM_DELETE(vsp, k); 4580Sstevel@tonic-gate } 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate /* 4610Sstevel@tonic-gate * Add vsp to the allocated-segment hash table and update kstats. 4620Sstevel@tonic-gate */ 4630Sstevel@tonic-gate static void 4640Sstevel@tonic-gate vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp) 4650Sstevel@tonic-gate { 4660Sstevel@tonic-gate vmem_seg_t **bucket; 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate vsp->vs_type = VMEM_ALLOC; 4690Sstevel@tonic-gate bucket = VMEM_HASH(vmp, vsp->vs_start); 4700Sstevel@tonic-gate vsp->vs_knext = *bucket; 4710Sstevel@tonic-gate *bucket = vsp; 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate if (vmem_seg_size == sizeof (vmem_seg_t)) { 4740Sstevel@tonic-gate vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack, 4750Sstevel@tonic-gate VMEM_STACK_DEPTH); 4760Sstevel@tonic-gate vsp->vs_thread = curthread; 4770Sstevel@tonic-gate vsp->vs_timestamp = gethrtime(); 4780Sstevel@tonic-gate } else { 4790Sstevel@tonic-gate vsp->vs_depth = 0; 4800Sstevel@tonic-gate } 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate vmp->vm_kstat.vk_alloc.value.ui64++; 4830Sstevel@tonic-gate vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp); 4840Sstevel@tonic-gate } 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* 4870Sstevel@tonic-gate * Remove vsp from the allocated-segment hash table and update kstats. 4880Sstevel@tonic-gate */ 4890Sstevel@tonic-gate static vmem_seg_t * 4900Sstevel@tonic-gate vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size) 4910Sstevel@tonic-gate { 4920Sstevel@tonic-gate vmem_seg_t *vsp, **prev_vspp; 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate prev_vspp = VMEM_HASH(vmp, addr); 4950Sstevel@tonic-gate while ((vsp = *prev_vspp) != NULL) { 4960Sstevel@tonic-gate if (vsp->vs_start == addr) { 4970Sstevel@tonic-gate *prev_vspp = vsp->vs_knext; 4980Sstevel@tonic-gate break; 4990Sstevel@tonic-gate } 5000Sstevel@tonic-gate vmp->vm_kstat.vk_lookup.value.ui64++; 5010Sstevel@tonic-gate prev_vspp = &vsp->vs_knext; 5020Sstevel@tonic-gate } 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate if (vsp == NULL) 5050Sstevel@tonic-gate panic("vmem_hash_delete(%p, %lx, %lu): bad free", 5060Sstevel@tonic-gate vmp, addr, size); 5070Sstevel@tonic-gate if (VS_SIZE(vsp) != size) 5080Sstevel@tonic-gate panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)", 5090Sstevel@tonic-gate vmp, addr, size, VS_SIZE(vsp)); 5100Sstevel@tonic-gate 5110Sstevel@tonic-gate vmp->vm_kstat.vk_free.value.ui64++; 5120Sstevel@tonic-gate vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate return (vsp); 5150Sstevel@tonic-gate } 5160Sstevel@tonic-gate 5170Sstevel@tonic-gate /* 5180Sstevel@tonic-gate * Create a segment spanning the range [start, end) and add it to the arena. 5190Sstevel@tonic-gate */ 5200Sstevel@tonic-gate static vmem_seg_t * 5210Sstevel@tonic-gate vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end) 5220Sstevel@tonic-gate { 5230Sstevel@tonic-gate vmem_seg_t *newseg = vmem_getseg(vmp); 5240Sstevel@tonic-gate 5250Sstevel@tonic-gate newseg->vs_start = start; 5260Sstevel@tonic-gate newseg->vs_end = end; 5270Sstevel@tonic-gate newseg->vs_type = 0; 5280Sstevel@tonic-gate newseg->vs_import = 0; 5290Sstevel@tonic-gate 5300Sstevel@tonic-gate VMEM_INSERT(vprev, newseg, a); 5310Sstevel@tonic-gate 5320Sstevel@tonic-gate return (newseg); 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate /* 5360Sstevel@tonic-gate * Remove segment vsp from the arena. 5370Sstevel@tonic-gate */ 5380Sstevel@tonic-gate static void 5390Sstevel@tonic-gate vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp) 5400Sstevel@tonic-gate { 5410Sstevel@tonic-gate ASSERT(vsp->vs_type != VMEM_ROTOR); 5420Sstevel@tonic-gate VMEM_DELETE(vsp, a); 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate vmem_putseg(vmp, vsp); 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate /* 5480Sstevel@tonic-gate * Add the span [vaddr, vaddr + size) to vmp and update kstats. 5490Sstevel@tonic-gate */ 5500Sstevel@tonic-gate static vmem_seg_t * 5510Sstevel@tonic-gate vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import) 5520Sstevel@tonic-gate { 5530Sstevel@tonic-gate vmem_seg_t *newseg, *span; 5540Sstevel@tonic-gate uintptr_t start = (uintptr_t)vaddr; 5550Sstevel@tonic-gate uintptr_t end = start + size; 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate ASSERT(MUTEX_HELD(&vmp->vm_lock)); 5580Sstevel@tonic-gate 5590Sstevel@tonic-gate if ((start | end) & (vmp->vm_quantum - 1)) 5600Sstevel@tonic-gate panic("vmem_span_create(%p, %p, %lu): misaligned", 5610Sstevel@tonic-gate vmp, vaddr, size); 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end); 5640Sstevel@tonic-gate span->vs_type = VMEM_SPAN; 5650Sstevel@tonic-gate span->vs_import = import; 5660Sstevel@tonic-gate VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k); 5670Sstevel@tonic-gate 5680Sstevel@tonic-gate newseg = vmem_seg_create(vmp, span, start, end); 5690Sstevel@tonic-gate vmem_freelist_insert(vmp, newseg); 5700Sstevel@tonic-gate 5710Sstevel@tonic-gate if (import) 5720Sstevel@tonic-gate vmp->vm_kstat.vk_mem_import.value.ui64 += size; 5730Sstevel@tonic-gate vmp->vm_kstat.vk_mem_total.value.ui64 += size; 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate return (newseg); 5760Sstevel@tonic-gate } 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate /* 5790Sstevel@tonic-gate * Remove span vsp from vmp and update kstats. 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate static void 5820Sstevel@tonic-gate vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp) 5830Sstevel@tonic-gate { 5840Sstevel@tonic-gate vmem_seg_t *span = vsp->vs_aprev; 5850Sstevel@tonic-gate size_t size = VS_SIZE(vsp); 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate ASSERT(MUTEX_HELD(&vmp->vm_lock)); 5880Sstevel@tonic-gate ASSERT(span->vs_type == VMEM_SPAN); 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate if (span->vs_import) 5910Sstevel@tonic-gate vmp->vm_kstat.vk_mem_import.value.ui64 -= size; 5920Sstevel@tonic-gate vmp->vm_kstat.vk_mem_total.value.ui64 -= size; 5930Sstevel@tonic-gate 5940Sstevel@tonic-gate VMEM_DELETE(span, k); 5950Sstevel@tonic-gate 5960Sstevel@tonic-gate vmem_seg_destroy(vmp, vsp); 5970Sstevel@tonic-gate vmem_seg_destroy(vmp, span); 5980Sstevel@tonic-gate } 5990Sstevel@tonic-gate 6000Sstevel@tonic-gate /* 6010Sstevel@tonic-gate * Allocate the subrange [addr, addr + size) from segment vsp. 6020Sstevel@tonic-gate * If there are leftovers on either side, place them on the freelist. 6030Sstevel@tonic-gate * Returns a pointer to the segment representing [addr, addr + size). 6040Sstevel@tonic-gate */ 6050Sstevel@tonic-gate static vmem_seg_t * 6060Sstevel@tonic-gate vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size) 6070Sstevel@tonic-gate { 6080Sstevel@tonic-gate uintptr_t vs_start = vsp->vs_start; 6090Sstevel@tonic-gate uintptr_t vs_end = vsp->vs_end; 6100Sstevel@tonic-gate size_t vs_size = vs_end - vs_start; 6110Sstevel@tonic-gate size_t realsize = P2ROUNDUP(size, vmp->vm_quantum); 6120Sstevel@tonic-gate uintptr_t addr_end = addr + realsize; 6130Sstevel@tonic-gate 6140Sstevel@tonic-gate ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0); 6150Sstevel@tonic-gate ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0); 6160Sstevel@tonic-gate ASSERT(vsp->vs_type == VMEM_FREE); 6170Sstevel@tonic-gate ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1); 6180Sstevel@tonic-gate ASSERT(addr - 1 <= addr_end - 1); 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate /* 6210Sstevel@tonic-gate * If we're allocating from the start of the segment, and the 6220Sstevel@tonic-gate * remainder will be on the same freelist, we can save quite 6230Sstevel@tonic-gate * a bit of work. 6240Sstevel@tonic-gate */ 6250Sstevel@tonic-gate if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) { 6260Sstevel@tonic-gate ASSERT(highbit(vs_size) == highbit(vs_size - realsize)); 6270Sstevel@tonic-gate vsp->vs_start = addr_end; 6280Sstevel@tonic-gate vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size); 6290Sstevel@tonic-gate vmem_hash_insert(vmp, vsp); 6300Sstevel@tonic-gate return (vsp); 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate vmem_freelist_delete(vmp, vsp); 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate if (vs_end != addr_end) 6360Sstevel@tonic-gate vmem_freelist_insert(vmp, 6370Sstevel@tonic-gate vmem_seg_create(vmp, vsp, addr_end, vs_end)); 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate if (vs_start != addr) 6400Sstevel@tonic-gate vmem_freelist_insert(vmp, 6410Sstevel@tonic-gate vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr)); 6420Sstevel@tonic-gate 6430Sstevel@tonic-gate vsp->vs_start = addr; 6440Sstevel@tonic-gate vsp->vs_end = addr + size; 6450Sstevel@tonic-gate 6460Sstevel@tonic-gate vmem_hash_insert(vmp, vsp); 6470Sstevel@tonic-gate return (vsp); 6480Sstevel@tonic-gate } 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* 6510Sstevel@tonic-gate * Returns 1 if we are populating, 0 otherwise. 6520Sstevel@tonic-gate * Call it if we want to prevent recursion from HAT. 6530Sstevel@tonic-gate */ 6540Sstevel@tonic-gate int 6550Sstevel@tonic-gate vmem_is_populator() 6560Sstevel@tonic-gate { 6570Sstevel@tonic-gate return (mutex_owner(&vmem_sleep_lock) == curthread || 6580Sstevel@tonic-gate mutex_owner(&vmem_nosleep_lock) == curthread || 659*6058Sbonwick mutex_owner(&vmem_pushpage_lock) == curthread || 6600Sstevel@tonic-gate mutex_owner(&vmem_panic_lock) == curthread); 6610Sstevel@tonic-gate } 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate /* 6640Sstevel@tonic-gate * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures. 6650Sstevel@tonic-gate */ 6660Sstevel@tonic-gate static int 6670Sstevel@tonic-gate vmem_populate(vmem_t *vmp, int vmflag) 6680Sstevel@tonic-gate { 6690Sstevel@tonic-gate char *p; 6700Sstevel@tonic-gate vmem_seg_t *vsp; 6710Sstevel@tonic-gate ssize_t nseg; 6720Sstevel@tonic-gate size_t size; 6730Sstevel@tonic-gate kmutex_t *lp; 6740Sstevel@tonic-gate int i; 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate while (vmp->vm_nsegfree < VMEM_MINFREE && 6770Sstevel@tonic-gate (vsp = vmem_getseg_global()) != NULL) 6780Sstevel@tonic-gate vmem_putseg(vmp, vsp); 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate if (vmp->vm_nsegfree >= VMEM_MINFREE) 6810Sstevel@tonic-gate return (1); 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate /* 6840Sstevel@tonic-gate * If we're already populating, tap the reserve. 6850Sstevel@tonic-gate */ 6860Sstevel@tonic-gate if (vmem_is_populator()) { 6870Sstevel@tonic-gate ASSERT(vmp->vm_cflags & VMC_POPULATOR); 6880Sstevel@tonic-gate return (1); 6890Sstevel@tonic-gate } 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 6920Sstevel@tonic-gate 6930Sstevel@tonic-gate if (panic_thread == curthread) 6940Sstevel@tonic-gate lp = &vmem_panic_lock; 6950Sstevel@tonic-gate else if (vmflag & VM_NOSLEEP) 6960Sstevel@tonic-gate lp = &vmem_nosleep_lock; 697*6058Sbonwick else if (vmflag & VM_PUSHPAGE) 698*6058Sbonwick lp = &vmem_pushpage_lock; 6990Sstevel@tonic-gate else 7000Sstevel@tonic-gate lp = &vmem_sleep_lock; 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate mutex_enter(lp); 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE; 7050Sstevel@tonic-gate size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum); 7060Sstevel@tonic-gate nseg = size / vmem_seg_size; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate /* 7090Sstevel@tonic-gate * The following vmem_alloc() may need to populate vmem_seg_arena 7100Sstevel@tonic-gate * and all the things it imports from. When doing so, it will tap 7110Sstevel@tonic-gate * each arena's reserve to prevent recursion (see the block comment 7120Sstevel@tonic-gate * above the definition of VMEM_POPULATE_RESERVE). 7130Sstevel@tonic-gate */ 7140Sstevel@tonic-gate p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS); 7150Sstevel@tonic-gate if (p == NULL) { 7160Sstevel@tonic-gate mutex_exit(lp); 7170Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 7180Sstevel@tonic-gate vmp->vm_kstat.vk_populate_fail.value.ui64++; 7190Sstevel@tonic-gate return (0); 7200Sstevel@tonic-gate } 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate /* 7230Sstevel@tonic-gate * Restock the arenas that may have been depleted during population. 7240Sstevel@tonic-gate */ 7250Sstevel@tonic-gate for (i = 0; i < vmem_populators; i++) { 7260Sstevel@tonic-gate mutex_enter(&vmem_populator[i]->vm_lock); 7270Sstevel@tonic-gate while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE) 7280Sstevel@tonic-gate vmem_putseg(vmem_populator[i], 7290Sstevel@tonic-gate (vmem_seg_t *)(p + --nseg * vmem_seg_size)); 7300Sstevel@tonic-gate mutex_exit(&vmem_populator[i]->vm_lock); 7310Sstevel@tonic-gate } 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate mutex_exit(lp); 7340Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate /* 7370Sstevel@tonic-gate * Now take our own segments. 7380Sstevel@tonic-gate */ 7390Sstevel@tonic-gate ASSERT(nseg >= VMEM_MINFREE); 7400Sstevel@tonic-gate while (vmp->vm_nsegfree < VMEM_MINFREE) 7410Sstevel@tonic-gate vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size)); 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate /* 7440Sstevel@tonic-gate * Give the remainder to charity. 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate while (nseg > 0) 7470Sstevel@tonic-gate vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size)); 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate return (1); 7500Sstevel@tonic-gate } 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate /* 7530Sstevel@tonic-gate * Advance a walker from its previous position to 'afterme'. 7540Sstevel@tonic-gate * Note: may drop and reacquire vmp->vm_lock. 7550Sstevel@tonic-gate */ 7560Sstevel@tonic-gate static void 7570Sstevel@tonic-gate vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme) 7580Sstevel@tonic-gate { 7590Sstevel@tonic-gate vmem_seg_t *vprev = walker->vs_aprev; 7600Sstevel@tonic-gate vmem_seg_t *vnext = walker->vs_anext; 7610Sstevel@tonic-gate vmem_seg_t *vsp = NULL; 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate VMEM_DELETE(walker, a); 7640Sstevel@tonic-gate 7650Sstevel@tonic-gate if (afterme != NULL) 7660Sstevel@tonic-gate VMEM_INSERT(afterme, walker, a); 7670Sstevel@tonic-gate 7680Sstevel@tonic-gate /* 7690Sstevel@tonic-gate * The walker segment's presence may have prevented its neighbors 7700Sstevel@tonic-gate * from coalescing. If so, coalesce them now. 7710Sstevel@tonic-gate */ 7720Sstevel@tonic-gate if (vprev->vs_type == VMEM_FREE) { 7730Sstevel@tonic-gate if (vnext->vs_type == VMEM_FREE) { 7740Sstevel@tonic-gate ASSERT(vprev->vs_end == vnext->vs_start); 7750Sstevel@tonic-gate vmem_freelist_delete(vmp, vnext); 7760Sstevel@tonic-gate vmem_freelist_delete(vmp, vprev); 7770Sstevel@tonic-gate vprev->vs_end = vnext->vs_end; 7780Sstevel@tonic-gate vmem_freelist_insert(vmp, vprev); 7790Sstevel@tonic-gate vmem_seg_destroy(vmp, vnext); 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate vsp = vprev; 7820Sstevel@tonic-gate } else if (vnext->vs_type == VMEM_FREE) { 7830Sstevel@tonic-gate vsp = vnext; 7840Sstevel@tonic-gate } 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate /* 7870Sstevel@tonic-gate * vsp could represent a complete imported span, 7880Sstevel@tonic-gate * in which case we must return it to the source. 7890Sstevel@tonic-gate */ 7900Sstevel@tonic-gate if (vsp != NULL && vsp->vs_aprev->vs_import && 7910Sstevel@tonic-gate vmp->vm_source_free != NULL && 7920Sstevel@tonic-gate vsp->vs_aprev->vs_type == VMEM_SPAN && 7930Sstevel@tonic-gate vsp->vs_anext->vs_type == VMEM_SPAN) { 7940Sstevel@tonic-gate void *vaddr = (void *)vsp->vs_start; 7950Sstevel@tonic-gate size_t size = VS_SIZE(vsp); 7960Sstevel@tonic-gate ASSERT(size == VS_SIZE(vsp->vs_aprev)); 7970Sstevel@tonic-gate vmem_freelist_delete(vmp, vsp); 7980Sstevel@tonic-gate vmem_span_destroy(vmp, vsp); 7990Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 8000Sstevel@tonic-gate vmp->vm_source_free(vmp->vm_source, vaddr, size); 8010Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 8020Sstevel@tonic-gate } 8030Sstevel@tonic-gate } 8040Sstevel@tonic-gate 8050Sstevel@tonic-gate /* 8060Sstevel@tonic-gate * VM_NEXTFIT allocations deliberately cycle through all virtual addresses 8070Sstevel@tonic-gate * in an arena, so that we avoid reusing addresses for as long as possible. 8080Sstevel@tonic-gate * This helps to catch used-after-freed bugs. It's also the perfect policy 8090Sstevel@tonic-gate * for allocating things like process IDs, where we want to cycle through 8100Sstevel@tonic-gate * all values in order. 8110Sstevel@tonic-gate */ 8120Sstevel@tonic-gate static void * 8130Sstevel@tonic-gate vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag) 8140Sstevel@tonic-gate { 8150Sstevel@tonic-gate vmem_seg_t *vsp, *rotor; 8160Sstevel@tonic-gate uintptr_t addr; 8170Sstevel@tonic-gate size_t realsize = P2ROUNDUP(size, vmp->vm_quantum); 8180Sstevel@tonic-gate size_t vs_size; 8190Sstevel@tonic-gate 8200Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 8210Sstevel@tonic-gate 8220Sstevel@tonic-gate if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) { 8230Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 8240Sstevel@tonic-gate return (NULL); 8250Sstevel@tonic-gate } 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate /* 8280Sstevel@tonic-gate * The common case is that the segment right after the rotor is free, 8290Sstevel@tonic-gate * and large enough that extracting 'size' bytes won't change which 8300Sstevel@tonic-gate * freelist it's on. In this case we can avoid a *lot* of work. 8310Sstevel@tonic-gate * Instead of the normal vmem_seg_alloc(), we just advance the start 8320Sstevel@tonic-gate * address of the victim segment. Instead of moving the rotor, we 8330Sstevel@tonic-gate * create the new segment structure *behind the rotor*, which has 8340Sstevel@tonic-gate * the same effect. And finally, we know we don't have to coalesce 8350Sstevel@tonic-gate * the rotor's neighbors because the new segment lies between them. 8360Sstevel@tonic-gate */ 8370Sstevel@tonic-gate rotor = &vmp->vm_rotor; 8380Sstevel@tonic-gate vsp = rotor->vs_anext; 8390Sstevel@tonic-gate if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize && 8400Sstevel@tonic-gate P2SAMEHIGHBIT(vs_size, vs_size - realsize)) { 8410Sstevel@tonic-gate ASSERT(highbit(vs_size) == highbit(vs_size - realsize)); 8420Sstevel@tonic-gate addr = vsp->vs_start; 8430Sstevel@tonic-gate vsp->vs_start = addr + realsize; 8440Sstevel@tonic-gate vmem_hash_insert(vmp, 8450Sstevel@tonic-gate vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size)); 8460Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 8470Sstevel@tonic-gate return ((void *)addr); 8480Sstevel@tonic-gate } 8490Sstevel@tonic-gate 8500Sstevel@tonic-gate /* 8510Sstevel@tonic-gate * Starting at the rotor, look for a segment large enough to 8520Sstevel@tonic-gate * satisfy the allocation. 8530Sstevel@tonic-gate */ 8540Sstevel@tonic-gate for (;;) { 8550Sstevel@tonic-gate vmp->vm_kstat.vk_search.value.ui64++; 8560Sstevel@tonic-gate if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size) 8570Sstevel@tonic-gate break; 8580Sstevel@tonic-gate vsp = vsp->vs_anext; 8590Sstevel@tonic-gate if (vsp == rotor) { 8600Sstevel@tonic-gate /* 8610Sstevel@tonic-gate * We've come full circle. One possibility is that the 8620Sstevel@tonic-gate * there's actually enough space, but the rotor itself 8630Sstevel@tonic-gate * is preventing the allocation from succeeding because 8640Sstevel@tonic-gate * it's sitting between two free segments. Therefore, 8650Sstevel@tonic-gate * we advance the rotor and see if that liberates a 8660Sstevel@tonic-gate * suitable segment. 8670Sstevel@tonic-gate */ 8680Sstevel@tonic-gate vmem_advance(vmp, rotor, rotor->vs_anext); 8690Sstevel@tonic-gate vsp = rotor->vs_aprev; 8700Sstevel@tonic-gate if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size) 8710Sstevel@tonic-gate break; 8720Sstevel@tonic-gate /* 8730Sstevel@tonic-gate * If there's a lower arena we can import from, or it's 8740Sstevel@tonic-gate * a VM_NOSLEEP allocation, let vmem_xalloc() handle it. 8750Sstevel@tonic-gate * Otherwise, wait until another thread frees something. 8760Sstevel@tonic-gate */ 8770Sstevel@tonic-gate if (vmp->vm_source_alloc != NULL || 8780Sstevel@tonic-gate (vmflag & VM_NOSLEEP)) { 8790Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 8800Sstevel@tonic-gate return (vmem_xalloc(vmp, size, vmp->vm_quantum, 8810Sstevel@tonic-gate 0, 0, NULL, NULL, vmflag & VM_KMFLAGS)); 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate vmp->vm_kstat.vk_wait.value.ui64++; 8840Sstevel@tonic-gate cv_wait(&vmp->vm_cv, &vmp->vm_lock); 8850Sstevel@tonic-gate vsp = rotor->vs_anext; 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate /* 8900Sstevel@tonic-gate * We found a segment. Extract enough space to satisfy the allocation. 8910Sstevel@tonic-gate */ 8920Sstevel@tonic-gate addr = vsp->vs_start; 8930Sstevel@tonic-gate vsp = vmem_seg_alloc(vmp, vsp, addr, size); 8940Sstevel@tonic-gate ASSERT(vsp->vs_type == VMEM_ALLOC && 8950Sstevel@tonic-gate vsp->vs_start == addr && vsp->vs_end == addr + size); 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate /* 8980Sstevel@tonic-gate * Advance the rotor to right after the newly-allocated segment. 8990Sstevel@tonic-gate * That's where the next VM_NEXTFIT allocation will begin searching. 9000Sstevel@tonic-gate */ 9010Sstevel@tonic-gate vmem_advance(vmp, rotor, vsp); 9020Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 9030Sstevel@tonic-gate return ((void *)addr); 9040Sstevel@tonic-gate } 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate /* 9070Sstevel@tonic-gate * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its 9080Sstevel@tonic-gate * freelist. If size is not a power-of-2, it can return a false-negative. 9090Sstevel@tonic-gate * 9100Sstevel@tonic-gate * Used to decide if a newly imported span is superfluous after re-acquiring 9110Sstevel@tonic-gate * the arena lock. 9120Sstevel@tonic-gate */ 9130Sstevel@tonic-gate static int 9140Sstevel@tonic-gate vmem_canalloc(vmem_t *vmp, size_t size) 9150Sstevel@tonic-gate { 9160Sstevel@tonic-gate int hb; 9170Sstevel@tonic-gate int flist = 0; 9180Sstevel@tonic-gate ASSERT(MUTEX_HELD(&vmp->vm_lock)); 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate if ((size & (size - 1)) == 0) 9210Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 9220Sstevel@tonic-gate else if ((hb = highbit(size)) < VMEM_FREELISTS) 9230Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate return (flist); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * Allocate size bytes at offset phase from an align boundary such that the 9300Sstevel@tonic-gate * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr) 9310Sstevel@tonic-gate * that does not straddle a nocross-aligned boundary. 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate void * 9340Sstevel@tonic-gate vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase, 9350Sstevel@tonic-gate size_t nocross, void *minaddr, void *maxaddr, int vmflag) 9360Sstevel@tonic-gate { 9370Sstevel@tonic-gate vmem_seg_t *vsp; 9380Sstevel@tonic-gate vmem_seg_t *vbest = NULL; 9390Sstevel@tonic-gate uintptr_t addr, taddr, start, end; 9400Sstevel@tonic-gate uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum; 9410Sstevel@tonic-gate void *vaddr, *xvaddr = NULL; 9420Sstevel@tonic-gate size_t xsize; 9430Sstevel@tonic-gate int hb, flist, resv; 9440Sstevel@tonic-gate uint32_t mtbf; 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate if ((align | phase | nocross) & (vmp->vm_quantum - 1)) 9470Sstevel@tonic-gate panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 9480Sstevel@tonic-gate "parameters not vm_quantum aligned", 9490Sstevel@tonic-gate (void *)vmp, size, align_arg, phase, nocross, 9500Sstevel@tonic-gate minaddr, maxaddr, vmflag); 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate if (nocross != 0 && 9530Sstevel@tonic-gate (align > nocross || P2ROUNDUP(phase + size, align) > nocross)) 9540Sstevel@tonic-gate panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 9550Sstevel@tonic-gate "overconstrained allocation", 9560Sstevel@tonic-gate (void *)vmp, size, align_arg, phase, nocross, 9570Sstevel@tonic-gate minaddr, maxaddr, vmflag); 9580Sstevel@tonic-gate 9590Sstevel@tonic-gate if (phase >= align || (align & (align - 1)) != 0 || 9600Sstevel@tonic-gate (nocross & (nocross - 1)) != 0) 9610Sstevel@tonic-gate panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 9620Sstevel@tonic-gate "parameters inconsistent or invalid", 9630Sstevel@tonic-gate (void *)vmp, size, align_arg, phase, nocross, 9640Sstevel@tonic-gate minaddr, maxaddr, vmflag); 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 && 9670Sstevel@tonic-gate (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP) 9680Sstevel@tonic-gate return (NULL); 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 9710Sstevel@tonic-gate for (;;) { 9720Sstevel@tonic-gate if (vmp->vm_nsegfree < VMEM_MINFREE && 9730Sstevel@tonic-gate !vmem_populate(vmp, vmflag)) 9740Sstevel@tonic-gate break; 9750Sstevel@tonic-gate do_alloc: 9760Sstevel@tonic-gate /* 9770Sstevel@tonic-gate * highbit() returns the highest bit + 1, which is exactly 9780Sstevel@tonic-gate * what we want: we want to search the first freelist whose 9790Sstevel@tonic-gate * members are *definitely* large enough to satisfy our 9800Sstevel@tonic-gate * allocation. However, there are certain cases in which we 9810Sstevel@tonic-gate * want to look at the next-smallest freelist (which *might* 9820Sstevel@tonic-gate * be able to satisfy the allocation): 9830Sstevel@tonic-gate * 9840Sstevel@tonic-gate * (1) The size is exactly a power of 2, in which case 9850Sstevel@tonic-gate * the smaller freelist is always big enough; 9860Sstevel@tonic-gate * 9870Sstevel@tonic-gate * (2) All other freelists are empty; 9880Sstevel@tonic-gate * 9890Sstevel@tonic-gate * (3) We're in the highest possible freelist, which is 9900Sstevel@tonic-gate * always empty (e.g. the 4GB freelist on 32-bit systems); 9910Sstevel@tonic-gate * 9920Sstevel@tonic-gate * (4) We're doing a best-fit or first-fit allocation. 9930Sstevel@tonic-gate */ 9940Sstevel@tonic-gate if ((size & (size - 1)) == 0) { 9950Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 9960Sstevel@tonic-gate } else { 9970Sstevel@tonic-gate hb = highbit(size); 9980Sstevel@tonic-gate if ((vmp->vm_freemap >> hb) == 0 || 9990Sstevel@tonic-gate hb == VMEM_FREELISTS || 10000Sstevel@tonic-gate (vmflag & (VM_BESTFIT | VM_FIRSTFIT))) 10010Sstevel@tonic-gate hb--; 10020Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 10030Sstevel@tonic-gate } 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate for (vbest = NULL, vsp = (flist == 0) ? NULL : 10060Sstevel@tonic-gate vmp->vm_freelist[flist - 1].vs_knext; 10070Sstevel@tonic-gate vsp != NULL; vsp = vsp->vs_knext) { 10080Sstevel@tonic-gate vmp->vm_kstat.vk_search.value.ui64++; 10090Sstevel@tonic-gate if (vsp->vs_start == 0) { 10100Sstevel@tonic-gate /* 10110Sstevel@tonic-gate * We're moving up to a larger freelist, 10120Sstevel@tonic-gate * so if we've already found a candidate, 10130Sstevel@tonic-gate * the fit can't possibly get any better. 10140Sstevel@tonic-gate */ 10150Sstevel@tonic-gate if (vbest != NULL) 10160Sstevel@tonic-gate break; 10170Sstevel@tonic-gate /* 10180Sstevel@tonic-gate * Find the next non-empty freelist. 10190Sstevel@tonic-gate */ 10200Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, 10210Sstevel@tonic-gate VS_SIZE(vsp))); 10220Sstevel@tonic-gate if (flist-- == 0) 10230Sstevel@tonic-gate break; 10240Sstevel@tonic-gate vsp = (vmem_seg_t *)&vmp->vm_freelist[flist]; 10250Sstevel@tonic-gate ASSERT(vsp->vs_knext->vs_type == VMEM_FREE); 10260Sstevel@tonic-gate continue; 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate if (vsp->vs_end - 1 < (uintptr_t)minaddr) 10290Sstevel@tonic-gate continue; 10300Sstevel@tonic-gate if (vsp->vs_start > (uintptr_t)maxaddr - 1) 10310Sstevel@tonic-gate continue; 10320Sstevel@tonic-gate start = MAX(vsp->vs_start, (uintptr_t)minaddr); 10330Sstevel@tonic-gate end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1; 10340Sstevel@tonic-gate taddr = P2PHASEUP(start, align, phase); 10350Sstevel@tonic-gate if (P2CROSS(taddr, taddr + size - 1, nocross)) 10360Sstevel@tonic-gate taddr += 10370Sstevel@tonic-gate P2ROUNDUP(P2NPHASE(taddr, nocross), align); 10380Sstevel@tonic-gate if ((taddr - start) + size > end - start || 10390Sstevel@tonic-gate (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest))) 10400Sstevel@tonic-gate continue; 10410Sstevel@tonic-gate vbest = vsp; 10420Sstevel@tonic-gate addr = taddr; 10430Sstevel@tonic-gate if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size) 10440Sstevel@tonic-gate break; 10450Sstevel@tonic-gate } 10460Sstevel@tonic-gate if (vbest != NULL) 10470Sstevel@tonic-gate break; 10480Sstevel@tonic-gate ASSERT(xvaddr == NULL); 10490Sstevel@tonic-gate if (size == 0) 10500Sstevel@tonic-gate panic("vmem_xalloc(): size == 0"); 10510Sstevel@tonic-gate if (vmp->vm_source_alloc != NULL && nocross == 0 && 10520Sstevel@tonic-gate minaddr == NULL && maxaddr == NULL) { 10530Sstevel@tonic-gate size_t aneeded, asize; 10540Sstevel@tonic-gate size_t aquantum = MAX(vmp->vm_quantum, 10550Sstevel@tonic-gate vmp->vm_source->vm_quantum); 10560Sstevel@tonic-gate size_t aphase = phase; 10574204Sha137994 if ((align > aquantum) && 10584204Sha137994 !(vmp->vm_cflags & VMC_XALIGN)) { 10590Sstevel@tonic-gate aphase = (P2PHASE(phase, aquantum) != 0) ? 10600Sstevel@tonic-gate align - vmp->vm_quantum : align - aquantum; 10610Sstevel@tonic-gate ASSERT(aphase >= phase); 10620Sstevel@tonic-gate } 10630Sstevel@tonic-gate aneeded = MAX(size + aphase, vmp->vm_min_import); 10640Sstevel@tonic-gate asize = P2ROUNDUP(aneeded, aquantum); 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate /* 10670Sstevel@tonic-gate * Determine how many segment structures we'll consume. 10680Sstevel@tonic-gate * The calculation must be precise because if we're 10690Sstevel@tonic-gate * here on behalf of vmem_populate(), we are taking 10700Sstevel@tonic-gate * segments from a very limited reserve. 10710Sstevel@tonic-gate */ 10720Sstevel@tonic-gate if (size == asize && !(vmp->vm_cflags & VMC_XALLOC)) 10730Sstevel@tonic-gate resv = VMEM_SEGS_PER_SPAN_CREATE + 10740Sstevel@tonic-gate VMEM_SEGS_PER_EXACT_ALLOC; 10750Sstevel@tonic-gate else if (phase == 0 && 10760Sstevel@tonic-gate align <= vmp->vm_source->vm_quantum) 10770Sstevel@tonic-gate resv = VMEM_SEGS_PER_SPAN_CREATE + 10780Sstevel@tonic-gate VMEM_SEGS_PER_LEFT_ALLOC; 10790Sstevel@tonic-gate else 10800Sstevel@tonic-gate resv = VMEM_SEGS_PER_ALLOC_MAX; 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate ASSERT(vmp->vm_nsegfree >= resv); 10830Sstevel@tonic-gate vmp->vm_nsegfree -= resv; /* reserve our segs */ 10840Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 10850Sstevel@tonic-gate if (vmp->vm_cflags & VMC_XALLOC) { 10860Sstevel@tonic-gate size_t oasize = asize; 10870Sstevel@tonic-gate vaddr = ((vmem_ximport_t *) 10880Sstevel@tonic-gate vmp->vm_source_alloc)(vmp->vm_source, 10894204Sha137994 &asize, align, vmflag & VM_KMFLAGS); 10900Sstevel@tonic-gate ASSERT(asize >= oasize); 10910Sstevel@tonic-gate ASSERT(P2PHASE(asize, 10920Sstevel@tonic-gate vmp->vm_source->vm_quantum) == 0); 10934204Sha137994 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) || 10944204Sha137994 IS_P2ALIGNED(vaddr, align)); 10950Sstevel@tonic-gate } else { 10960Sstevel@tonic-gate vaddr = vmp->vm_source_alloc(vmp->vm_source, 10970Sstevel@tonic-gate asize, vmflag & VM_KMFLAGS); 10980Sstevel@tonic-gate } 10990Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 11000Sstevel@tonic-gate vmp->vm_nsegfree += resv; /* claim reservation */ 11010Sstevel@tonic-gate aneeded = size + align - vmp->vm_quantum; 11020Sstevel@tonic-gate aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum); 11030Sstevel@tonic-gate if (vaddr != NULL) { 11040Sstevel@tonic-gate /* 11050Sstevel@tonic-gate * Since we dropped the vmem lock while 11060Sstevel@tonic-gate * calling the import function, other 11070Sstevel@tonic-gate * threads could have imported space 11080Sstevel@tonic-gate * and made our import unnecessary. In 11090Sstevel@tonic-gate * order to save space, we return 11100Sstevel@tonic-gate * excess imports immediately. 11110Sstevel@tonic-gate */ 11120Sstevel@tonic-gate if (asize > aneeded && 11130Sstevel@tonic-gate vmp->vm_source_free != NULL && 11140Sstevel@tonic-gate vmem_canalloc(vmp, aneeded)) { 11150Sstevel@tonic-gate ASSERT(resv >= 11160Sstevel@tonic-gate VMEM_SEGS_PER_MIDDLE_ALLOC); 11170Sstevel@tonic-gate xvaddr = vaddr; 11180Sstevel@tonic-gate xsize = asize; 11190Sstevel@tonic-gate goto do_alloc; 11200Sstevel@tonic-gate } 11210Sstevel@tonic-gate vbest = vmem_span_create(vmp, vaddr, asize, 1); 11220Sstevel@tonic-gate addr = P2PHASEUP(vbest->vs_start, align, phase); 11230Sstevel@tonic-gate break; 11240Sstevel@tonic-gate } else if (vmem_canalloc(vmp, aneeded)) { 11250Sstevel@tonic-gate /* 11260Sstevel@tonic-gate * Our import failed, but another thread 11270Sstevel@tonic-gate * added sufficient free memory to the arena 11280Sstevel@tonic-gate * to satisfy our request. Go back and 11290Sstevel@tonic-gate * grab it. 11300Sstevel@tonic-gate */ 11310Sstevel@tonic-gate ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC); 11320Sstevel@tonic-gate goto do_alloc; 11330Sstevel@tonic-gate } 11340Sstevel@tonic-gate } 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate /* 11370Sstevel@tonic-gate * If the requestor chooses to fail the allocation attempt 11380Sstevel@tonic-gate * rather than reap wait and retry - get out of the loop. 11390Sstevel@tonic-gate */ 11400Sstevel@tonic-gate if (vmflag & VM_ABORT) 11410Sstevel@tonic-gate break; 11420Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 11430Sstevel@tonic-gate if (vmp->vm_cflags & VMC_IDENTIFIER) 11440Sstevel@tonic-gate kmem_reap_idspace(); 11450Sstevel@tonic-gate else 11460Sstevel@tonic-gate kmem_reap(); 11470Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 11480Sstevel@tonic-gate if (vmflag & VM_NOSLEEP) 11490Sstevel@tonic-gate break; 11500Sstevel@tonic-gate vmp->vm_kstat.vk_wait.value.ui64++; 11510Sstevel@tonic-gate cv_wait(&vmp->vm_cv, &vmp->vm_lock); 11520Sstevel@tonic-gate } 11530Sstevel@tonic-gate if (vbest != NULL) { 11540Sstevel@tonic-gate ASSERT(vbest->vs_type == VMEM_FREE); 11550Sstevel@tonic-gate ASSERT(vbest->vs_knext != vbest); 11560Sstevel@tonic-gate (void) vmem_seg_alloc(vmp, vbest, addr, size); 11570Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 11580Sstevel@tonic-gate if (xvaddr) 11590Sstevel@tonic-gate vmp->vm_source_free(vmp->vm_source, xvaddr, xsize); 11600Sstevel@tonic-gate ASSERT(P2PHASE(addr, align) == phase); 11610Sstevel@tonic-gate ASSERT(!P2CROSS(addr, addr + size - 1, nocross)); 11620Sstevel@tonic-gate ASSERT(addr >= (uintptr_t)minaddr); 11630Sstevel@tonic-gate ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1); 11640Sstevel@tonic-gate return ((void *)addr); 11650Sstevel@tonic-gate } 11660Sstevel@tonic-gate vmp->vm_kstat.vk_fail.value.ui64++; 11670Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 11680Sstevel@tonic-gate if (vmflag & VM_PANIC) 11690Sstevel@tonic-gate panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 11700Sstevel@tonic-gate "cannot satisfy mandatory allocation", 11710Sstevel@tonic-gate (void *)vmp, size, align_arg, phase, nocross, 11720Sstevel@tonic-gate minaddr, maxaddr, vmflag); 11730Sstevel@tonic-gate ASSERT(xvaddr == NULL); 11740Sstevel@tonic-gate return (NULL); 11750Sstevel@tonic-gate } 11760Sstevel@tonic-gate 11770Sstevel@tonic-gate /* 11780Sstevel@tonic-gate * Free the segment [vaddr, vaddr + size), where vaddr was a constrained 11790Sstevel@tonic-gate * allocation. vmem_xalloc() and vmem_xfree() must always be paired because 11800Sstevel@tonic-gate * both routines bypass the quantum caches. 11810Sstevel@tonic-gate */ 11820Sstevel@tonic-gate void 11830Sstevel@tonic-gate vmem_xfree(vmem_t *vmp, void *vaddr, size_t size) 11840Sstevel@tonic-gate { 11850Sstevel@tonic-gate vmem_seg_t *vsp, *vnext, *vprev; 11860Sstevel@tonic-gate 11870Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size); 11900Sstevel@tonic-gate vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum); 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate /* 11930Sstevel@tonic-gate * Attempt to coalesce with the next segment. 11940Sstevel@tonic-gate */ 11950Sstevel@tonic-gate vnext = vsp->vs_anext; 11960Sstevel@tonic-gate if (vnext->vs_type == VMEM_FREE) { 11970Sstevel@tonic-gate ASSERT(vsp->vs_end == vnext->vs_start); 11980Sstevel@tonic-gate vmem_freelist_delete(vmp, vnext); 11990Sstevel@tonic-gate vsp->vs_end = vnext->vs_end; 12000Sstevel@tonic-gate vmem_seg_destroy(vmp, vnext); 12010Sstevel@tonic-gate } 12020Sstevel@tonic-gate 12030Sstevel@tonic-gate /* 12040Sstevel@tonic-gate * Attempt to coalesce with the previous segment. 12050Sstevel@tonic-gate */ 12060Sstevel@tonic-gate vprev = vsp->vs_aprev; 12070Sstevel@tonic-gate if (vprev->vs_type == VMEM_FREE) { 12080Sstevel@tonic-gate ASSERT(vprev->vs_end == vsp->vs_start); 12090Sstevel@tonic-gate vmem_freelist_delete(vmp, vprev); 12100Sstevel@tonic-gate vprev->vs_end = vsp->vs_end; 12110Sstevel@tonic-gate vmem_seg_destroy(vmp, vsp); 12120Sstevel@tonic-gate vsp = vprev; 12130Sstevel@tonic-gate } 12140Sstevel@tonic-gate 12150Sstevel@tonic-gate /* 12160Sstevel@tonic-gate * If the entire span is free, return it to the source. 12170Sstevel@tonic-gate */ 12180Sstevel@tonic-gate if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL && 12190Sstevel@tonic-gate vsp->vs_aprev->vs_type == VMEM_SPAN && 12200Sstevel@tonic-gate vsp->vs_anext->vs_type == VMEM_SPAN) { 12210Sstevel@tonic-gate vaddr = (void *)vsp->vs_start; 12220Sstevel@tonic-gate size = VS_SIZE(vsp); 12230Sstevel@tonic-gate ASSERT(size == VS_SIZE(vsp->vs_aprev)); 12240Sstevel@tonic-gate vmem_span_destroy(vmp, vsp); 12250Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 12260Sstevel@tonic-gate vmp->vm_source_free(vmp->vm_source, vaddr, size); 12270Sstevel@tonic-gate } else { 12280Sstevel@tonic-gate vmem_freelist_insert(vmp, vsp); 12290Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 12300Sstevel@tonic-gate } 12310Sstevel@tonic-gate } 12320Sstevel@tonic-gate 12330Sstevel@tonic-gate /* 12340Sstevel@tonic-gate * Allocate size bytes from arena vmp. Returns the allocated address 12350Sstevel@tonic-gate * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP, 12360Sstevel@tonic-gate * and may also specify best-fit, first-fit, or next-fit allocation policy 12370Sstevel@tonic-gate * instead of the default instant-fit policy. VM_SLEEP allocations are 12380Sstevel@tonic-gate * guaranteed to succeed. 12390Sstevel@tonic-gate */ 12400Sstevel@tonic-gate void * 12410Sstevel@tonic-gate vmem_alloc(vmem_t *vmp, size_t size, int vmflag) 12420Sstevel@tonic-gate { 12430Sstevel@tonic-gate vmem_seg_t *vsp; 12440Sstevel@tonic-gate uintptr_t addr; 12450Sstevel@tonic-gate int hb; 12460Sstevel@tonic-gate int flist = 0; 12470Sstevel@tonic-gate uint32_t mtbf; 12480Sstevel@tonic-gate 12490Sstevel@tonic-gate if (size - 1 < vmp->vm_qcache_max) 12500Sstevel@tonic-gate return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >> 12510Sstevel@tonic-gate vmp->vm_qshift], vmflag & VM_KMFLAGS)); 12520Sstevel@tonic-gate 12530Sstevel@tonic-gate if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 && 12540Sstevel@tonic-gate (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP) 12550Sstevel@tonic-gate return (NULL); 12560Sstevel@tonic-gate 12570Sstevel@tonic-gate if (vmflag & VM_NEXTFIT) 12580Sstevel@tonic-gate return (vmem_nextfit_alloc(vmp, size, vmflag)); 12590Sstevel@tonic-gate 12600Sstevel@tonic-gate if (vmflag & (VM_BESTFIT | VM_FIRSTFIT)) 12610Sstevel@tonic-gate return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0, 12620Sstevel@tonic-gate NULL, NULL, vmflag)); 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate /* 12650Sstevel@tonic-gate * Unconstrained instant-fit allocation from the segment list. 12660Sstevel@tonic-gate */ 12670Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 12680Sstevel@tonic-gate 12690Sstevel@tonic-gate if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) { 12700Sstevel@tonic-gate if ((size & (size - 1)) == 0) 12710Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 12720Sstevel@tonic-gate else if ((hb = highbit(size)) < VMEM_FREELISTS) 12730Sstevel@tonic-gate flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 12740Sstevel@tonic-gate } 12750Sstevel@tonic-gate 12760Sstevel@tonic-gate if (flist-- == 0) { 12770Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 12780Sstevel@tonic-gate return (vmem_xalloc(vmp, size, vmp->vm_quantum, 12790Sstevel@tonic-gate 0, 0, NULL, NULL, vmflag)); 12800Sstevel@tonic-gate } 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate ASSERT(size <= (1UL << flist)); 12830Sstevel@tonic-gate vsp = vmp->vm_freelist[flist].vs_knext; 12840Sstevel@tonic-gate addr = vsp->vs_start; 12850Sstevel@tonic-gate (void) vmem_seg_alloc(vmp, vsp, addr, size); 12860Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 12870Sstevel@tonic-gate return ((void *)addr); 12880Sstevel@tonic-gate } 12890Sstevel@tonic-gate 12900Sstevel@tonic-gate /* 12910Sstevel@tonic-gate * Free the segment [vaddr, vaddr + size). 12920Sstevel@tonic-gate */ 12930Sstevel@tonic-gate void 12940Sstevel@tonic-gate vmem_free(vmem_t *vmp, void *vaddr, size_t size) 12950Sstevel@tonic-gate { 12960Sstevel@tonic-gate if (size - 1 < vmp->vm_qcache_max) 12970Sstevel@tonic-gate kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift], 12980Sstevel@tonic-gate vaddr); 12990Sstevel@tonic-gate else 13000Sstevel@tonic-gate vmem_xfree(vmp, vaddr, size); 13010Sstevel@tonic-gate } 13020Sstevel@tonic-gate 13030Sstevel@tonic-gate /* 13040Sstevel@tonic-gate * Determine whether arena vmp contains the segment [vaddr, vaddr + size). 13050Sstevel@tonic-gate */ 13060Sstevel@tonic-gate int 13070Sstevel@tonic-gate vmem_contains(vmem_t *vmp, void *vaddr, size_t size) 13080Sstevel@tonic-gate { 13090Sstevel@tonic-gate uintptr_t start = (uintptr_t)vaddr; 13100Sstevel@tonic-gate uintptr_t end = start + size; 13110Sstevel@tonic-gate vmem_seg_t *vsp; 13120Sstevel@tonic-gate vmem_seg_t *seg0 = &vmp->vm_seg0; 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 13150Sstevel@tonic-gate vmp->vm_kstat.vk_contains.value.ui64++; 13160Sstevel@tonic-gate for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) { 13170Sstevel@tonic-gate vmp->vm_kstat.vk_contains_search.value.ui64++; 13180Sstevel@tonic-gate ASSERT(vsp->vs_type == VMEM_SPAN); 13190Sstevel@tonic-gate if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1) 13200Sstevel@tonic-gate break; 13210Sstevel@tonic-gate } 13220Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 13230Sstevel@tonic-gate return (vsp != seg0); 13240Sstevel@tonic-gate } 13250Sstevel@tonic-gate 13260Sstevel@tonic-gate /* 13270Sstevel@tonic-gate * Add the span [vaddr, vaddr + size) to arena vmp. 13280Sstevel@tonic-gate */ 13290Sstevel@tonic-gate void * 13300Sstevel@tonic-gate vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag) 13310Sstevel@tonic-gate { 13320Sstevel@tonic-gate if (vaddr == NULL || size == 0) 13330Sstevel@tonic-gate panic("vmem_add(%p, %p, %lu): bad arguments", vmp, vaddr, size); 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate ASSERT(!vmem_contains(vmp, vaddr, size)); 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 13380Sstevel@tonic-gate if (vmem_populate(vmp, vmflag)) 13390Sstevel@tonic-gate (void) vmem_span_create(vmp, vaddr, size, 0); 13400Sstevel@tonic-gate else 13410Sstevel@tonic-gate vaddr = NULL; 13420Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 13430Sstevel@tonic-gate return (vaddr); 13440Sstevel@tonic-gate } 13450Sstevel@tonic-gate 13460Sstevel@tonic-gate /* 13470Sstevel@tonic-gate * Walk the vmp arena, applying func to each segment matching typemask. 13480Sstevel@tonic-gate * If VMEM_REENTRANT is specified, the arena lock is dropped across each 13490Sstevel@tonic-gate * call to func(); otherwise, it is held for the duration of vmem_walk() 13500Sstevel@tonic-gate * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks 13510Sstevel@tonic-gate * are *not* necessarily consistent, so they may only be used when a hint 13520Sstevel@tonic-gate * is adequate. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate void 13550Sstevel@tonic-gate vmem_walk(vmem_t *vmp, int typemask, 13560Sstevel@tonic-gate void (*func)(void *, void *, size_t), void *arg) 13570Sstevel@tonic-gate { 13580Sstevel@tonic-gate vmem_seg_t *vsp; 13590Sstevel@tonic-gate vmem_seg_t *seg0 = &vmp->vm_seg0; 13600Sstevel@tonic-gate vmem_seg_t walker; 13610Sstevel@tonic-gate 13620Sstevel@tonic-gate if (typemask & VMEM_WALKER) 13630Sstevel@tonic-gate return; 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate bzero(&walker, sizeof (walker)); 13660Sstevel@tonic-gate walker.vs_type = VMEM_WALKER; 13670Sstevel@tonic-gate 13680Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 13690Sstevel@tonic-gate VMEM_INSERT(seg0, &walker, a); 13700Sstevel@tonic-gate for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) { 13710Sstevel@tonic-gate if (vsp->vs_type & typemask) { 13720Sstevel@tonic-gate void *start = (void *)vsp->vs_start; 13730Sstevel@tonic-gate size_t size = VS_SIZE(vsp); 13740Sstevel@tonic-gate if (typemask & VMEM_REENTRANT) { 13750Sstevel@tonic-gate vmem_advance(vmp, &walker, vsp); 13760Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 13770Sstevel@tonic-gate func(arg, start, size); 13780Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 13790Sstevel@tonic-gate vsp = &walker; 13800Sstevel@tonic-gate } else { 13810Sstevel@tonic-gate func(arg, start, size); 13820Sstevel@tonic-gate } 13830Sstevel@tonic-gate } 13840Sstevel@tonic-gate } 13850Sstevel@tonic-gate vmem_advance(vmp, &walker, NULL); 13860Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 13870Sstevel@tonic-gate } 13880Sstevel@tonic-gate 13890Sstevel@tonic-gate /* 13900Sstevel@tonic-gate * Return the total amount of memory whose type matches typemask. Thus: 13910Sstevel@tonic-gate * 13920Sstevel@tonic-gate * typemask VMEM_ALLOC yields total memory allocated (in use). 13930Sstevel@tonic-gate * typemask VMEM_FREE yields total memory free (available). 13940Sstevel@tonic-gate * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size. 13950Sstevel@tonic-gate */ 13960Sstevel@tonic-gate size_t 13970Sstevel@tonic-gate vmem_size(vmem_t *vmp, int typemask) 13980Sstevel@tonic-gate { 13990Sstevel@tonic-gate uint64_t size = 0; 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate if (typemask & VMEM_ALLOC) 14020Sstevel@tonic-gate size += vmp->vm_kstat.vk_mem_inuse.value.ui64; 14030Sstevel@tonic-gate if (typemask & VMEM_FREE) 14040Sstevel@tonic-gate size += vmp->vm_kstat.vk_mem_total.value.ui64 - 14050Sstevel@tonic-gate vmp->vm_kstat.vk_mem_inuse.value.ui64; 14060Sstevel@tonic-gate return ((size_t)size); 14070Sstevel@tonic-gate } 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate /* 14100Sstevel@tonic-gate * Create an arena called name whose initial span is [base, base + size). 14110Sstevel@tonic-gate * The arena's natural unit of currency is quantum, so vmem_alloc() 14120Sstevel@tonic-gate * guarantees quantum-aligned results. The arena may import new spans 14130Sstevel@tonic-gate * by invoking afunc() on source, and may return those spans by invoking 14140Sstevel@tonic-gate * ffunc() on source. To make small allocations fast and scalable, 14150Sstevel@tonic-gate * the arena offers high-performance caching for each integer multiple 14160Sstevel@tonic-gate * of quantum up to qcache_max. 14170Sstevel@tonic-gate */ 14180Sstevel@tonic-gate static vmem_t * 14190Sstevel@tonic-gate vmem_create_common(const char *name, void *base, size_t size, size_t quantum, 14200Sstevel@tonic-gate void *(*afunc)(vmem_t *, size_t, int), 14210Sstevel@tonic-gate void (*ffunc)(vmem_t *, void *, size_t), 14220Sstevel@tonic-gate vmem_t *source, size_t qcache_max, int vmflag) 14230Sstevel@tonic-gate { 14240Sstevel@tonic-gate int i; 14250Sstevel@tonic-gate size_t nqcache; 14260Sstevel@tonic-gate vmem_t *vmp, *cur, **vmpp; 14270Sstevel@tonic-gate vmem_seg_t *vsp; 14280Sstevel@tonic-gate vmem_freelist_t *vfp; 14290Sstevel@tonic-gate uint32_t id = atomic_add_32_nv(&vmem_id, 1); 14300Sstevel@tonic-gate 14310Sstevel@tonic-gate if (vmem_vmem_arena != NULL) { 14320Sstevel@tonic-gate vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t), 14330Sstevel@tonic-gate vmflag & VM_KMFLAGS); 14340Sstevel@tonic-gate } else { 14350Sstevel@tonic-gate ASSERT(id <= VMEM_INITIAL); 14360Sstevel@tonic-gate vmp = &vmem0[id - 1]; 14370Sstevel@tonic-gate } 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate /* An identifier arena must inherit from another identifier arena */ 14400Sstevel@tonic-gate ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) == 14410Sstevel@tonic-gate (vmflag & VMC_IDENTIFIER))); 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate if (vmp == NULL) 14440Sstevel@tonic-gate return (NULL); 14450Sstevel@tonic-gate bzero(vmp, sizeof (vmem_t)); 14460Sstevel@tonic-gate 14470Sstevel@tonic-gate (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name); 14480Sstevel@tonic-gate mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL); 14490Sstevel@tonic-gate cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL); 14500Sstevel@tonic-gate vmp->vm_cflags = vmflag; 14510Sstevel@tonic-gate vmflag &= VM_KMFLAGS; 14520Sstevel@tonic-gate 14530Sstevel@tonic-gate vmp->vm_quantum = quantum; 14540Sstevel@tonic-gate vmp->vm_qshift = highbit(quantum) - 1; 14550Sstevel@tonic-gate nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX); 14560Sstevel@tonic-gate 14570Sstevel@tonic-gate for (i = 0; i <= VMEM_FREELISTS; i++) { 14580Sstevel@tonic-gate vfp = &vmp->vm_freelist[i]; 14590Sstevel@tonic-gate vfp->vs_end = 1UL << i; 14600Sstevel@tonic-gate vfp->vs_knext = (vmem_seg_t *)(vfp + 1); 14610Sstevel@tonic-gate vfp->vs_kprev = (vmem_seg_t *)(vfp - 1); 14620Sstevel@tonic-gate } 14630Sstevel@tonic-gate 14640Sstevel@tonic-gate vmp->vm_freelist[0].vs_kprev = NULL; 14650Sstevel@tonic-gate vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL; 14660Sstevel@tonic-gate vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0; 14670Sstevel@tonic-gate vmp->vm_hash_table = vmp->vm_hash0; 14680Sstevel@tonic-gate vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1; 14690Sstevel@tonic-gate vmp->vm_hash_shift = highbit(vmp->vm_hash_mask); 14700Sstevel@tonic-gate 14710Sstevel@tonic-gate vsp = &vmp->vm_seg0; 14720Sstevel@tonic-gate vsp->vs_anext = vsp; 14730Sstevel@tonic-gate vsp->vs_aprev = vsp; 14740Sstevel@tonic-gate vsp->vs_knext = vsp; 14750Sstevel@tonic-gate vsp->vs_kprev = vsp; 14760Sstevel@tonic-gate vsp->vs_type = VMEM_SPAN; 14770Sstevel@tonic-gate 14780Sstevel@tonic-gate vsp = &vmp->vm_rotor; 14790Sstevel@tonic-gate vsp->vs_type = VMEM_ROTOR; 14800Sstevel@tonic-gate VMEM_INSERT(&vmp->vm_seg0, vsp, a); 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t)); 14830Sstevel@tonic-gate 14840Sstevel@tonic-gate vmp->vm_id = id; 14850Sstevel@tonic-gate if (source != NULL) 14860Sstevel@tonic-gate vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id; 14870Sstevel@tonic-gate vmp->vm_source = source; 14880Sstevel@tonic-gate vmp->vm_source_alloc = afunc; 14890Sstevel@tonic-gate vmp->vm_source_free = ffunc; 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate /* 14920Sstevel@tonic-gate * Some arenas (like vmem_metadata and kmem_metadata) cannot 14930Sstevel@tonic-gate * use quantum caching to lower fragmentation. Instead, we 14940Sstevel@tonic-gate * increase their imports, giving a similar effect. 14950Sstevel@tonic-gate */ 14960Sstevel@tonic-gate if (vmp->vm_cflags & VMC_NO_QCACHE) { 14970Sstevel@tonic-gate vmp->vm_min_import = 14980Sstevel@tonic-gate VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift); 14990Sstevel@tonic-gate nqcache = 0; 15000Sstevel@tonic-gate } 15010Sstevel@tonic-gate 15020Sstevel@tonic-gate if (nqcache != 0) { 15030Sstevel@tonic-gate ASSERT(!(vmflag & VM_NOSLEEP)); 15040Sstevel@tonic-gate vmp->vm_qcache_max = nqcache << vmp->vm_qshift; 15050Sstevel@tonic-gate for (i = 0; i < nqcache; i++) { 15060Sstevel@tonic-gate char buf[VMEM_NAMELEN + 21]; 15070Sstevel@tonic-gate (void) sprintf(buf, "%s_%lu", vmp->vm_name, 15080Sstevel@tonic-gate (i + 1) * quantum); 15090Sstevel@tonic-gate vmp->vm_qcache[i] = kmem_cache_create(buf, 15100Sstevel@tonic-gate (i + 1) * quantum, quantum, NULL, NULL, NULL, 15110Sstevel@tonic-gate NULL, vmp, KMC_QCACHE | KMC_NOTOUCH); 15120Sstevel@tonic-gate } 15130Sstevel@tonic-gate } 15140Sstevel@tonic-gate 15150Sstevel@tonic-gate if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name, 15160Sstevel@tonic-gate "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) / 15170Sstevel@tonic-gate sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) { 15180Sstevel@tonic-gate vmp->vm_ksp->ks_data = &vmp->vm_kstat; 15190Sstevel@tonic-gate kstat_install(vmp->vm_ksp); 15200Sstevel@tonic-gate } 15210Sstevel@tonic-gate 15220Sstevel@tonic-gate mutex_enter(&vmem_list_lock); 15230Sstevel@tonic-gate vmpp = &vmem_list; 15240Sstevel@tonic-gate while ((cur = *vmpp) != NULL) 15250Sstevel@tonic-gate vmpp = &cur->vm_next; 15260Sstevel@tonic-gate *vmpp = vmp; 15270Sstevel@tonic-gate mutex_exit(&vmem_list_lock); 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate if (vmp->vm_cflags & VMC_POPULATOR) { 15300Sstevel@tonic-gate ASSERT(vmem_populators < VMEM_INITIAL); 15310Sstevel@tonic-gate vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp; 15320Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 15330Sstevel@tonic-gate (void) vmem_populate(vmp, vmflag | VM_PANIC); 15340Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 15350Sstevel@tonic-gate } 15360Sstevel@tonic-gate 15370Sstevel@tonic-gate if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) { 15380Sstevel@tonic-gate vmem_destroy(vmp); 15390Sstevel@tonic-gate return (NULL); 15400Sstevel@tonic-gate } 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate return (vmp); 15430Sstevel@tonic-gate } 15440Sstevel@tonic-gate 15450Sstevel@tonic-gate vmem_t * 15460Sstevel@tonic-gate vmem_xcreate(const char *name, void *base, size_t size, size_t quantum, 15470Sstevel@tonic-gate vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source, 15480Sstevel@tonic-gate size_t qcache_max, int vmflag) 15490Sstevel@tonic-gate { 15500Sstevel@tonic-gate ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC))); 15510Sstevel@tonic-gate vmflag &= ~(VMC_POPULATOR | VMC_XALLOC); 15520Sstevel@tonic-gate 15530Sstevel@tonic-gate return (vmem_create_common(name, base, size, quantum, 15540Sstevel@tonic-gate (vmem_alloc_t *)afunc, ffunc, source, qcache_max, 15550Sstevel@tonic-gate vmflag | VMC_XALLOC)); 15560Sstevel@tonic-gate } 15570Sstevel@tonic-gate 15580Sstevel@tonic-gate vmem_t * 15590Sstevel@tonic-gate vmem_create(const char *name, void *base, size_t size, size_t quantum, 15600Sstevel@tonic-gate vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source, 15610Sstevel@tonic-gate size_t qcache_max, int vmflag) 15620Sstevel@tonic-gate { 15634204Sha137994 ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN))); 15644204Sha137994 vmflag &= ~(VMC_XALLOC | VMC_XALIGN); 15650Sstevel@tonic-gate 15660Sstevel@tonic-gate return (vmem_create_common(name, base, size, quantum, 15670Sstevel@tonic-gate afunc, ffunc, source, qcache_max, vmflag)); 15680Sstevel@tonic-gate } 15690Sstevel@tonic-gate 15700Sstevel@tonic-gate /* 15710Sstevel@tonic-gate * Destroy arena vmp. 15720Sstevel@tonic-gate */ 15730Sstevel@tonic-gate void 15740Sstevel@tonic-gate vmem_destroy(vmem_t *vmp) 15750Sstevel@tonic-gate { 15760Sstevel@tonic-gate vmem_t *cur, **vmpp; 15770Sstevel@tonic-gate vmem_seg_t *seg0 = &vmp->vm_seg0; 15780Sstevel@tonic-gate vmem_seg_t *vsp; 15790Sstevel@tonic-gate size_t leaked; 15800Sstevel@tonic-gate int i; 15810Sstevel@tonic-gate 15820Sstevel@tonic-gate mutex_enter(&vmem_list_lock); 15830Sstevel@tonic-gate vmpp = &vmem_list; 15840Sstevel@tonic-gate while ((cur = *vmpp) != vmp) 15850Sstevel@tonic-gate vmpp = &cur->vm_next; 15860Sstevel@tonic-gate *vmpp = vmp->vm_next; 15870Sstevel@tonic-gate mutex_exit(&vmem_list_lock); 15880Sstevel@tonic-gate 15890Sstevel@tonic-gate for (i = 0; i < VMEM_NQCACHE_MAX; i++) 15900Sstevel@tonic-gate if (vmp->vm_qcache[i]) 15910Sstevel@tonic-gate kmem_cache_destroy(vmp->vm_qcache[i]); 15920Sstevel@tonic-gate 15930Sstevel@tonic-gate leaked = vmem_size(vmp, VMEM_ALLOC); 15940Sstevel@tonic-gate if (leaked != 0) 15950Sstevel@tonic-gate cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s", 15960Sstevel@tonic-gate vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ? 15970Sstevel@tonic-gate "identifiers" : "bytes"); 15980Sstevel@tonic-gate 15990Sstevel@tonic-gate if (vmp->vm_hash_table != vmp->vm_hash0) 16000Sstevel@tonic-gate vmem_free(vmem_hash_arena, vmp->vm_hash_table, 16010Sstevel@tonic-gate (vmp->vm_hash_mask + 1) * sizeof (void *)); 16020Sstevel@tonic-gate 16030Sstevel@tonic-gate /* 16040Sstevel@tonic-gate * Give back the segment structures for anything that's left in the 16050Sstevel@tonic-gate * arena, e.g. the primary spans and their free segments. 16060Sstevel@tonic-gate */ 16070Sstevel@tonic-gate VMEM_DELETE(&vmp->vm_rotor, a); 16080Sstevel@tonic-gate for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) 16090Sstevel@tonic-gate vmem_putseg_global(vsp); 16100Sstevel@tonic-gate 16110Sstevel@tonic-gate while (vmp->vm_nsegfree > 0) 16120Sstevel@tonic-gate vmem_putseg_global(vmem_getseg(vmp)); 16130Sstevel@tonic-gate 16140Sstevel@tonic-gate kstat_delete(vmp->vm_ksp); 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate mutex_destroy(&vmp->vm_lock); 16170Sstevel@tonic-gate cv_destroy(&vmp->vm_cv); 16180Sstevel@tonic-gate vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t)); 16190Sstevel@tonic-gate } 16200Sstevel@tonic-gate 16210Sstevel@tonic-gate /* 16220Sstevel@tonic-gate * Resize vmp's hash table to keep the average lookup depth near 1.0. 16230Sstevel@tonic-gate */ 16240Sstevel@tonic-gate static void 16250Sstevel@tonic-gate vmem_hash_rescale(vmem_t *vmp) 16260Sstevel@tonic-gate { 16270Sstevel@tonic-gate vmem_seg_t **old_table, **new_table, *vsp; 16280Sstevel@tonic-gate size_t old_size, new_size, h, nseg; 16290Sstevel@tonic-gate 16300Sstevel@tonic-gate nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 - 16310Sstevel@tonic-gate vmp->vm_kstat.vk_free.value.ui64); 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2)); 16340Sstevel@tonic-gate old_size = vmp->vm_hash_mask + 1; 16350Sstevel@tonic-gate 16360Sstevel@tonic-gate if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 16370Sstevel@tonic-gate return; 16380Sstevel@tonic-gate 16390Sstevel@tonic-gate new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *), 16400Sstevel@tonic-gate VM_NOSLEEP); 16410Sstevel@tonic-gate if (new_table == NULL) 16420Sstevel@tonic-gate return; 16430Sstevel@tonic-gate bzero(new_table, new_size * sizeof (void *)); 16440Sstevel@tonic-gate 16450Sstevel@tonic-gate mutex_enter(&vmp->vm_lock); 16460Sstevel@tonic-gate 16470Sstevel@tonic-gate old_size = vmp->vm_hash_mask + 1; 16480Sstevel@tonic-gate old_table = vmp->vm_hash_table; 16490Sstevel@tonic-gate 16500Sstevel@tonic-gate vmp->vm_hash_mask = new_size - 1; 16510Sstevel@tonic-gate vmp->vm_hash_table = new_table; 16520Sstevel@tonic-gate vmp->vm_hash_shift = highbit(vmp->vm_hash_mask); 16530Sstevel@tonic-gate 16540Sstevel@tonic-gate for (h = 0; h < old_size; h++) { 16550Sstevel@tonic-gate vsp = old_table[h]; 16560Sstevel@tonic-gate while (vsp != NULL) { 16570Sstevel@tonic-gate uintptr_t addr = vsp->vs_start; 16580Sstevel@tonic-gate vmem_seg_t *next_vsp = vsp->vs_knext; 16590Sstevel@tonic-gate vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr); 16600Sstevel@tonic-gate vsp->vs_knext = *hash_bucket; 16610Sstevel@tonic-gate *hash_bucket = vsp; 16620Sstevel@tonic-gate vsp = next_vsp; 16630Sstevel@tonic-gate } 16640Sstevel@tonic-gate } 16650Sstevel@tonic-gate 16660Sstevel@tonic-gate mutex_exit(&vmp->vm_lock); 16670Sstevel@tonic-gate 16680Sstevel@tonic-gate if (old_table != vmp->vm_hash0) 16690Sstevel@tonic-gate vmem_free(vmem_hash_arena, old_table, 16700Sstevel@tonic-gate old_size * sizeof (void *)); 16710Sstevel@tonic-gate } 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate /* 16740Sstevel@tonic-gate * Perform periodic maintenance on all vmem arenas. 16750Sstevel@tonic-gate */ 16760Sstevel@tonic-gate void 16770Sstevel@tonic-gate vmem_update(void *dummy) 16780Sstevel@tonic-gate { 16790Sstevel@tonic-gate vmem_t *vmp; 16800Sstevel@tonic-gate 16810Sstevel@tonic-gate mutex_enter(&vmem_list_lock); 16820Sstevel@tonic-gate for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) { 16830Sstevel@tonic-gate /* 16840Sstevel@tonic-gate * If threads are waiting for resources, wake them up 16850Sstevel@tonic-gate * periodically so they can issue another kmem_reap() 16860Sstevel@tonic-gate * to reclaim resources cached by the slab allocator. 16870Sstevel@tonic-gate */ 16880Sstevel@tonic-gate cv_broadcast(&vmp->vm_cv); 16890Sstevel@tonic-gate 16900Sstevel@tonic-gate /* 16910Sstevel@tonic-gate * Rescale the hash table to keep the hash chains short. 16920Sstevel@tonic-gate */ 16930Sstevel@tonic-gate vmem_hash_rescale(vmp); 16940Sstevel@tonic-gate } 16950Sstevel@tonic-gate mutex_exit(&vmem_list_lock); 16960Sstevel@tonic-gate 16970Sstevel@tonic-gate (void) timeout(vmem_update, dummy, vmem_update_interval * hz); 16980Sstevel@tonic-gate } 16990Sstevel@tonic-gate 17000Sstevel@tonic-gate /* 17010Sstevel@tonic-gate * Prepare vmem for use. 17020Sstevel@tonic-gate */ 17030Sstevel@tonic-gate vmem_t * 17040Sstevel@tonic-gate vmem_init(const char *heap_name, 17050Sstevel@tonic-gate void *heap_start, size_t heap_size, size_t heap_quantum, 17060Sstevel@tonic-gate void *(*heap_alloc)(vmem_t *, size_t, int), 17070Sstevel@tonic-gate void (*heap_free)(vmem_t *, void *, size_t)) 17080Sstevel@tonic-gate { 17090Sstevel@tonic-gate uint32_t id; 17100Sstevel@tonic-gate int nseg = VMEM_SEG_INITIAL; 17110Sstevel@tonic-gate vmem_t *heap; 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate while (--nseg >= 0) 17140Sstevel@tonic-gate vmem_putseg_global(&vmem_seg0[nseg]); 17150Sstevel@tonic-gate 17160Sstevel@tonic-gate heap = vmem_create(heap_name, 17170Sstevel@tonic-gate heap_start, heap_size, heap_quantum, 17180Sstevel@tonic-gate NULL, NULL, NULL, 0, 17190Sstevel@tonic-gate VM_SLEEP | VMC_POPULATOR); 17200Sstevel@tonic-gate 17210Sstevel@tonic-gate vmem_metadata_arena = vmem_create("vmem_metadata", 17220Sstevel@tonic-gate NULL, 0, heap_quantum, 17230Sstevel@tonic-gate vmem_alloc, vmem_free, heap, 8 * heap_quantum, 17240Sstevel@tonic-gate VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE); 17250Sstevel@tonic-gate 17260Sstevel@tonic-gate vmem_seg_arena = vmem_create("vmem_seg", 17270Sstevel@tonic-gate NULL, 0, heap_quantum, 17280Sstevel@tonic-gate heap_alloc, heap_free, vmem_metadata_arena, 0, 17290Sstevel@tonic-gate VM_SLEEP | VMC_POPULATOR); 17300Sstevel@tonic-gate 17310Sstevel@tonic-gate vmem_hash_arena = vmem_create("vmem_hash", 17320Sstevel@tonic-gate NULL, 0, 8, 17330Sstevel@tonic-gate heap_alloc, heap_free, vmem_metadata_arena, 0, 17340Sstevel@tonic-gate VM_SLEEP); 17350Sstevel@tonic-gate 17360Sstevel@tonic-gate vmem_vmem_arena = vmem_create("vmem_vmem", 17370Sstevel@tonic-gate vmem0, sizeof (vmem0), 1, 17380Sstevel@tonic-gate heap_alloc, heap_free, vmem_metadata_arena, 0, 17390Sstevel@tonic-gate VM_SLEEP); 17400Sstevel@tonic-gate 17410Sstevel@tonic-gate for (id = 0; id < vmem_id; id++) 17420Sstevel@tonic-gate (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t), 17430Sstevel@tonic-gate 1, 0, 0, &vmem0[id], &vmem0[id + 1], 17440Sstevel@tonic-gate VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate return (heap); 17470Sstevel@tonic-gate } 1748