10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51528Sjwadams * Common Development and Distribution License (the "License").
61528Sjwadams * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
211219Sraf
220Sstevel@tonic-gate /*
235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
240Sstevel@tonic-gate * Use is subject to license terms.
250Sstevel@tonic-gate */
260Sstevel@tonic-gate
270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
280Sstevel@tonic-gate
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
310Sstevel@tonic-gate *
320Sstevel@tonic-gate * The slab allocator, as described in the following two papers:
330Sstevel@tonic-gate *
340Sstevel@tonic-gate * Jeff Bonwick,
350Sstevel@tonic-gate * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
360Sstevel@tonic-gate * Proceedings of the Summer 1994 Usenix Conference.
370Sstevel@tonic-gate * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
380Sstevel@tonic-gate *
390Sstevel@tonic-gate * Jeff Bonwick and Jonathan Adams,
400Sstevel@tonic-gate * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
410Sstevel@tonic-gate * Arbitrary Resources.
420Sstevel@tonic-gate * Proceedings of the 2001 Usenix Conference.
430Sstevel@tonic-gate * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
440Sstevel@tonic-gate *
450Sstevel@tonic-gate * 1. Overview
460Sstevel@tonic-gate * -----------
470Sstevel@tonic-gate * umem is very close to kmem in implementation. There are four major
480Sstevel@tonic-gate * areas of divergence:
490Sstevel@tonic-gate *
500Sstevel@tonic-gate * * Initialization
510Sstevel@tonic-gate *
520Sstevel@tonic-gate * * CPU handling
530Sstevel@tonic-gate *
540Sstevel@tonic-gate * * umem_update()
550Sstevel@tonic-gate *
560Sstevel@tonic-gate * * KM_SLEEP v.s. UMEM_NOFAIL
570Sstevel@tonic-gate *
58652Sjwadams * * lock ordering
590Sstevel@tonic-gate *
600Sstevel@tonic-gate * 2. Initialization
610Sstevel@tonic-gate * -----------------
620Sstevel@tonic-gate * kmem is initialized early on in boot, and knows that no one will call
630Sstevel@tonic-gate * into it before it is ready. umem does not have these luxuries. Instead,
640Sstevel@tonic-gate * initialization is divided into two phases:
650Sstevel@tonic-gate *
660Sstevel@tonic-gate * * library initialization, and
670Sstevel@tonic-gate *
680Sstevel@tonic-gate * * first use
690Sstevel@tonic-gate *
700Sstevel@tonic-gate * umem's full initialization happens at the time of the first allocation
710Sstevel@tonic-gate * request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
720Sstevel@tonic-gate * or the first call to umem_cache_create().
730Sstevel@tonic-gate *
740Sstevel@tonic-gate * umem_free(), and umem_cache_alloc() do not require special handling,
750Sstevel@tonic-gate * since the only way to get valid arguments for them is to successfully
760Sstevel@tonic-gate * call a function from the first group.
770Sstevel@tonic-gate *
780Sstevel@tonic-gate * 2.1. Library Initialization: umem_startup()
790Sstevel@tonic-gate * -------------------------------------------
800Sstevel@tonic-gate * umem_startup() is libumem.so's .init section. It calls pthread_atfork()
810Sstevel@tonic-gate * to install the handlers necessary for umem's Fork1-Safety. Because of
820Sstevel@tonic-gate * race condition issues, all other pre-umem_init() initialization is done
830Sstevel@tonic-gate * statically (i.e. by the dynamic linker).
840Sstevel@tonic-gate *
850Sstevel@tonic-gate * For standalone use, umem_startup() returns everything to its initial
860Sstevel@tonic-gate * state.
870Sstevel@tonic-gate *
880Sstevel@tonic-gate * 2.2. First use: umem_init()
890Sstevel@tonic-gate * ------------------------------
900Sstevel@tonic-gate * The first time any memory allocation function is used, we have to
910Sstevel@tonic-gate * create the backing caches and vmem arenas which are needed for it.
920Sstevel@tonic-gate * umem_init() is the central point for that task. When it completes,
930Sstevel@tonic-gate * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
940Sstevel@tonic-gate * to initialize, probably due to lack of memory).
950Sstevel@tonic-gate *
960Sstevel@tonic-gate * There are four different paths from which umem_init() is called:
970Sstevel@tonic-gate *
980Sstevel@tonic-gate * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
990Sstevel@tonic-gate *
1000Sstevel@tonic-gate * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
1010Sstevel@tonic-gate *
1020Sstevel@tonic-gate * * from umem_cache_create(), and
1030Sstevel@tonic-gate *
1040Sstevel@tonic-gate * * from memalign(), with align > UMEM_ALIGN.
1050Sstevel@tonic-gate *
1060Sstevel@tonic-gate * The last three just check if umem is initialized, and call umem_init()
1070Sstevel@tonic-gate * if it is not. For performance reasons, the first case is more complicated.
1080Sstevel@tonic-gate *
1090Sstevel@tonic-gate * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
1100Sstevel@tonic-gate * -----------------------------------------------------------------
1110Sstevel@tonic-gate * In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
1120Sstevel@tonic-gate * There is special case code in which causes any allocation on
1130Sstevel@tonic-gate * &umem_null_cache to fail by returning (NULL), regardless of the
1140Sstevel@tonic-gate * flags argument.
1150Sstevel@tonic-gate *
1160Sstevel@tonic-gate * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
1170Sstevel@tonic-gate * umem_alloc_retry(). umem_alloc_retry() sees that the allocation
1180Sstevel@tonic-gate * was agains &umem_null_cache, and calls umem_init().
1190Sstevel@tonic-gate *
1200Sstevel@tonic-gate * If initialization is successful, umem_alloc_retry() returns 1, which
1210Sstevel@tonic-gate * causes umem_alloc()/umem_zalloc() to start over, which causes it to load
1220Sstevel@tonic-gate * the (now valid) cache pointer from umem_alloc_table.
1230Sstevel@tonic-gate *
1240Sstevel@tonic-gate * 2.2.2. Dealing with race conditions
1250Sstevel@tonic-gate * -----------------------------------
1260Sstevel@tonic-gate * There are a couple race conditions resulting from the initialization
1270Sstevel@tonic-gate * code that we have to guard against:
1280Sstevel@tonic-gate *
1290Sstevel@tonic-gate * * In umem_cache_create(), there is a special UMC_INTERNAL cflag
1300Sstevel@tonic-gate * that is passed for caches created during initialization. It
1310Sstevel@tonic-gate * is illegal for a user to try to create a UMC_INTERNAL cache.
1320Sstevel@tonic-gate * This allows initialization to proceed, but any other
1330Sstevel@tonic-gate * umem_cache_create()s will block by calling umem_init().
1340Sstevel@tonic-gate *
1350Sstevel@tonic-gate * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
1360Sstevel@tonic-gate * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to
1370Sstevel@tonic-gate * mask the cpu number. This prevents a race between grabbing a
1380Sstevel@tonic-gate * cache pointer out of umem_alloc_table and growing the cpu array.
1390Sstevel@tonic-gate *
1400Sstevel@tonic-gate *
1410Sstevel@tonic-gate * 3. CPU handling
1420Sstevel@tonic-gate * ---------------
1430Sstevel@tonic-gate * kmem uses the CPU's sequence number to determine which "cpu cache" to
1440Sstevel@tonic-gate * use for an allocation. Currently, there is no way to get the sequence
1450Sstevel@tonic-gate * number in userspace.
1460Sstevel@tonic-gate *
1470Sstevel@tonic-gate * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
1480Sstevel@tonic-gate * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask
1490Sstevel@tonic-gate * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
1500Sstevel@tonic-gate * The mechanics of this is all in the CPU(mask) macro.
1510Sstevel@tonic-gate *
1520Sstevel@tonic-gate * Currently, umem uses _lwp_self() as its hint.
1530Sstevel@tonic-gate *
1540Sstevel@tonic-gate *
1550Sstevel@tonic-gate * 4. The update thread
1560Sstevel@tonic-gate * --------------------
1570Sstevel@tonic-gate * kmem uses a task queue, kmem_taskq, to do periodic maintenance on
1580Sstevel@tonic-gate * every kmem cache. vmem has a periodic timeout for hash table resizing.
1590Sstevel@tonic-gate * The kmem_taskq also provides a separate context for kmem_cache_reap()'s
1600Sstevel@tonic-gate * to be done in, avoiding issues of the context of kmem_reap() callers.
1610Sstevel@tonic-gate *
1620Sstevel@tonic-gate * Instead, umem has the concept of "updates", which are asynchronous requests
1630Sstevel@tonic-gate * for work attached to single caches. All caches with pending work are
1640Sstevel@tonic-gate * on a doubly linked list rooted at the umem_null_cache. All update state
1650Sstevel@tonic-gate * is protected by the umem_update_lock mutex, and the umem_update_cv is used
1660Sstevel@tonic-gate * for notification between threads.
1670Sstevel@tonic-gate *
1680Sstevel@tonic-gate * 4.1. Cache states with regards to updates
1690Sstevel@tonic-gate * -----------------------------------------
1700Sstevel@tonic-gate * A given cache is in one of three states:
1710Sstevel@tonic-gate *
1720Sstevel@tonic-gate * Inactive cache_uflags is zero, cache_u{next,prev} are NULL
1730Sstevel@tonic-gate *
1740Sstevel@tonic-gate * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set),
1750Sstevel@tonic-gate * cache_u{next,prev} link the cache onto the global
1760Sstevel@tonic-gate * update list
1770Sstevel@tonic-gate *
1780Sstevel@tonic-gate * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
1790Sstevel@tonic-gate * are NULL, and either umem_update_thr or
1800Sstevel@tonic-gate * umem_st_update_thr are actively doing work on the
1810Sstevel@tonic-gate * cache.
1820Sstevel@tonic-gate *
1830Sstevel@tonic-gate * An update can be added to any cache in any state -- if the cache is
1840Sstevel@tonic-gate * Inactive, it transitions to being Work Requested. If the cache is
1850Sstevel@tonic-gate * Active, the worker will notice the new update and act on it before
1860Sstevel@tonic-gate * transitioning the cache to the Inactive state.
1870Sstevel@tonic-gate *
1880Sstevel@tonic-gate * If a cache is in the Active state, UMU_NOTIFY can be set, which asks
1890Sstevel@tonic-gate * the worker to broadcast the umem_update_cv when it has finished.
1900Sstevel@tonic-gate *
1910Sstevel@tonic-gate * 4.2. Update interface
1920Sstevel@tonic-gate * ---------------------
1930Sstevel@tonic-gate * umem_add_update() adds an update to a particular cache.
1940Sstevel@tonic-gate * umem_updateall() adds an update to all caches.
1950Sstevel@tonic-gate * umem_remove_updates() returns a cache to the Inactive state.
1960Sstevel@tonic-gate *
1970Sstevel@tonic-gate * umem_process_updates() process all caches in the Work Requested state.
1980Sstevel@tonic-gate *
1990Sstevel@tonic-gate * 4.3. Reaping
2000Sstevel@tonic-gate * ------------
2010Sstevel@tonic-gate * When umem_reap() is called (at the time of heap growth), it schedule
2020Sstevel@tonic-gate * UMU_REAP updates on every cache. It then checks to see if the update
2030Sstevel@tonic-gate * thread exists (umem_update_thr != 0). If it is, it broadcasts
2040Sstevel@tonic-gate * the umem_update_cv to wake the update thread up, and returns.
2050Sstevel@tonic-gate *
2060Sstevel@tonic-gate * If the update thread does not exist (umem_update_thr == 0), and the
2070Sstevel@tonic-gate * program currently has multiple threads, umem_reap() attempts to create
2080Sstevel@tonic-gate * a new update thread.
2090Sstevel@tonic-gate *
2100Sstevel@tonic-gate * If the process is not multithreaded, or the creation fails, umem_reap()
2110Sstevel@tonic-gate * calls umem_st_update() to do an inline update.
2120Sstevel@tonic-gate *
2130Sstevel@tonic-gate * 4.4. The update thread
2140Sstevel@tonic-gate * ----------------------
2150Sstevel@tonic-gate * The update thread spends most of its time in cond_timedwait() on the
2160Sstevel@tonic-gate * umem_update_cv. It wakes up under two conditions:
2170Sstevel@tonic-gate *
2180Sstevel@tonic-gate * * The timedwait times out, in which case it needs to run a global
2190Sstevel@tonic-gate * update, or
2200Sstevel@tonic-gate *
2210Sstevel@tonic-gate * * someone cond_broadcast(3THR)s the umem_update_cv, in which case
2220Sstevel@tonic-gate * it needs to check if there are any caches in the Work Requested
2230Sstevel@tonic-gate * state.
2240Sstevel@tonic-gate *
2250Sstevel@tonic-gate * When it is time for another global update, umem calls umem_cache_update()
2260Sstevel@tonic-gate * on every cache, then calls vmem_update(), which tunes the vmem structures.
2270Sstevel@tonic-gate * umem_cache_update() can request further work using umem_add_update().
2280Sstevel@tonic-gate *
2290Sstevel@tonic-gate * After any work from the global update completes, the update timer is
2300Sstevel@tonic-gate * reset to umem_reap_interval seconds in the future. This makes the
2310Sstevel@tonic-gate * updates self-throttling.
2320Sstevel@tonic-gate *
2330Sstevel@tonic-gate * Reaps are similarly self-throttling. After a UMU_REAP update has
2340Sstevel@tonic-gate * been scheduled on all caches, umem_reap() sets a flag and wakes up the
2350Sstevel@tonic-gate * update thread. The update thread notices the flag, and resets the
2360Sstevel@tonic-gate * reap state.
2370Sstevel@tonic-gate *
2380Sstevel@tonic-gate * 4.5. Inline updates
2390Sstevel@tonic-gate * -------------------
2400Sstevel@tonic-gate * If the update thread is not running, umem_st_update() is used instead. It
2410Sstevel@tonic-gate * immediately does a global update (as above), then calls
2420Sstevel@tonic-gate * umem_process_updates() to process both the reaps that umem_reap() added and
2430Sstevel@tonic-gate * any work generated by the global update. Afterwards, it resets the reap
2440Sstevel@tonic-gate * state.
2450Sstevel@tonic-gate *
2460Sstevel@tonic-gate * While the umem_st_update() is running, umem_st_update_thr holds the thread
2470Sstevel@tonic-gate * id of the thread performing the update.
2480Sstevel@tonic-gate *
2490Sstevel@tonic-gate * 4.6. Updates and fork1()
2500Sstevel@tonic-gate * ------------------------
2510Sstevel@tonic-gate * umem has fork1() pre- and post-handlers which lock up (and release) every
2520Sstevel@tonic-gate * mutex in every cache. They also lock up the umem_update_lock. Since
2530Sstevel@tonic-gate * fork1() only copies over a single lwp, other threads (including the update
2540Sstevel@tonic-gate * thread) could have been actively using a cache in the parent. This
2550Sstevel@tonic-gate * can lead to inconsistencies in the child process.
2560Sstevel@tonic-gate *
2570Sstevel@tonic-gate * Because we locked all of the mutexes, the only possible inconsistancies are:
2580Sstevel@tonic-gate *
2590Sstevel@tonic-gate * * a umem_cache_alloc() could leak its buffer.
2600Sstevel@tonic-gate *
2610Sstevel@tonic-gate * * a caller of umem_depot_alloc() could leak a magazine, and all the
2620Sstevel@tonic-gate * buffers contained in it.
2630Sstevel@tonic-gate *
2640Sstevel@tonic-gate * * a cache could be in the Active update state. In the child, there
2650Sstevel@tonic-gate * would be no thread actually working on it.
2660Sstevel@tonic-gate *
2670Sstevel@tonic-gate * * a umem_hash_rescale() could leak the new hash table.
2680Sstevel@tonic-gate *
2690Sstevel@tonic-gate * * a umem_magazine_resize() could be in progress.
2700Sstevel@tonic-gate *
2710Sstevel@tonic-gate * * a umem_reap() could be in progress.
2720Sstevel@tonic-gate *
2730Sstevel@tonic-gate * The memory leaks we can't do anything about. umem_release_child() resets
2740Sstevel@tonic-gate * the update state, moves any caches in the Active state to the Work Requested
2750Sstevel@tonic-gate * state. This might cause some updates to be re-run, but UMU_REAP and
2760Sstevel@tonic-gate * UMU_HASH_RESCALE are effectively idempotent, and the worst that can
2770Sstevel@tonic-gate * happen from umem_magazine_resize() is resizing the magazine twice in close
2780Sstevel@tonic-gate * succession.
2790Sstevel@tonic-gate *
2800Sstevel@tonic-gate * Much of the cleanup in umem_release_child() is skipped if
2810Sstevel@tonic-gate * umem_st_update_thr == thr_self(). This is so that applications which call
2820Sstevel@tonic-gate * fork1() from a cache callback does not break. Needless to say, any such
2830Sstevel@tonic-gate * application is tremendously broken.
2840Sstevel@tonic-gate *
2850Sstevel@tonic-gate *
2860Sstevel@tonic-gate * 5. KM_SLEEP v.s. UMEM_NOFAIL
2870Sstevel@tonic-gate * ----------------------------
2880Sstevel@tonic-gate * Allocations against kmem and vmem have two basic modes: SLEEP and
2890Sstevel@tonic-gate * NOSLEEP. A sleeping allocation is will go to sleep (waiting for
2900Sstevel@tonic-gate * more memory) instead of failing (returning NULL).
2910Sstevel@tonic-gate *
2920Sstevel@tonic-gate * SLEEP allocations presume an extremely multithreaded model, with
2930Sstevel@tonic-gate * a lot of allocation and deallocation activity. umem cannot presume
2940Sstevel@tonic-gate * that its clients have any particular type of behavior. Instead,
2950Sstevel@tonic-gate * it provides two types of allocations:
2960Sstevel@tonic-gate *
2970Sstevel@tonic-gate * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
2980Sstevel@tonic-gate * failure)
2990Sstevel@tonic-gate *
3000Sstevel@tonic-gate * * UMEM_NOFAIL, which, on failure, calls an optional callback
3010Sstevel@tonic-gate * (registered with umem_nofail_callback()).
3020Sstevel@tonic-gate *
3030Sstevel@tonic-gate * The callback is invoked with no locks held, and can do an arbitrary
3040Sstevel@tonic-gate * amount of work. It then has a choice between:
3050Sstevel@tonic-gate *
3060Sstevel@tonic-gate * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation
3070Sstevel@tonic-gate * to be restarted.
3080Sstevel@tonic-gate *
3090Sstevel@tonic-gate * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
3100Sstevel@tonic-gate * to be invoked with status. If multiple threads attempt to do
3110Sstevel@tonic-gate * this simultaneously, only one will call exit(2).
3120Sstevel@tonic-gate *
3130Sstevel@tonic-gate * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
3140Sstevel@tonic-gate * etc.)
3150Sstevel@tonic-gate *
3160Sstevel@tonic-gate * The default callback returns UMEM_CALLBACK_EXIT(255).
3170Sstevel@tonic-gate *
3180Sstevel@tonic-gate * To have these callbacks without risk of state corruption (in the case of
3190Sstevel@tonic-gate * a non-local exit), we have to ensure that the callbacks get invoked
3200Sstevel@tonic-gate * close to the original allocation, with no inconsistent state or held
3210Sstevel@tonic-gate * locks. The following steps are taken:
3220Sstevel@tonic-gate *
3230Sstevel@tonic-gate * * All invocations of vmem are VM_NOSLEEP.
3240Sstevel@tonic-gate *
3250Sstevel@tonic-gate * * All constructor callbacks (which can themselves to allocations)
3260Sstevel@tonic-gate * are passed UMEM_DEFAULT as their required allocation argument. This
3270Sstevel@tonic-gate * way, the constructor will fail, allowing the highest-level allocation
3280Sstevel@tonic-gate * invoke the nofail callback.
3290Sstevel@tonic-gate *
3300Sstevel@tonic-gate * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
3310Sstevel@tonic-gate * the nofail callback does a non-local exit, we will leak the
3320Sstevel@tonic-gate * partially-constructed buffer.
333652Sjwadams *
334652Sjwadams *
335652Sjwadams * 6. Lock Ordering
336652Sjwadams * ----------------
337652Sjwadams * umem has a few more locks than kmem does, mostly in the update path. The
338652Sjwadams * overall lock ordering (earlier locks must be acquired first) is:
339652Sjwadams *
340652Sjwadams * umem_init_lock
341652Sjwadams *
342652Sjwadams * vmem_list_lock
343652Sjwadams * vmem_nosleep_lock.vmpl_mutex
344652Sjwadams * vmem_t's:
345652Sjwadams * vm_lock
3461528Sjwadams * sbrk_lock
347652Sjwadams *
348652Sjwadams * umem_cache_lock
349652Sjwadams * umem_update_lock
350652Sjwadams * umem_flags_lock
351652Sjwadams * umem_cache_t's:
352652Sjwadams * cache_cpu[*].cc_lock
353652Sjwadams * cache_depot_lock
354652Sjwadams * cache_lock
355652Sjwadams * umem_log_header_t's:
356652Sjwadams * lh_cpu[*].clh_lock
357652Sjwadams * lh_lock
3580Sstevel@tonic-gate */
3590Sstevel@tonic-gate
3600Sstevel@tonic-gate #include <umem_impl.h>
3610Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
3620Sstevel@tonic-gate #include "umem_base.h"
3630Sstevel@tonic-gate #include "vmem_base.h"
3640Sstevel@tonic-gate
3650Sstevel@tonic-gate #include <sys/processor.h>
3660Sstevel@tonic-gate #include <sys/sysmacros.h>
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate #include <alloca.h>
3690Sstevel@tonic-gate #include <errno.h>
3700Sstevel@tonic-gate #include <limits.h>
3710Sstevel@tonic-gate #include <stdio.h>
3720Sstevel@tonic-gate #include <stdlib.h>
3730Sstevel@tonic-gate #include <string.h>
3740Sstevel@tonic-gate #include <strings.h>
3750Sstevel@tonic-gate #include <signal.h>
3760Sstevel@tonic-gate #include <unistd.h>
3770Sstevel@tonic-gate #include <atomic.h>
3780Sstevel@tonic-gate
3790Sstevel@tonic-gate #include "misc.h"
3800Sstevel@tonic-gate
3810Sstevel@tonic-gate #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP)
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate size_t pagesize;
3840Sstevel@tonic-gate
3850Sstevel@tonic-gate /*
3860Sstevel@tonic-gate * The default set of caches to back umem_alloc().
3870Sstevel@tonic-gate * These sizes should be reevaluated periodically.
3880Sstevel@tonic-gate *
3890Sstevel@tonic-gate * We want allocations that are multiples of the coherency granularity
3900Sstevel@tonic-gate * (64 bytes) to be satisfied from a cache which is a multiple of 64
3910Sstevel@tonic-gate * bytes, so that it will be 64-byte aligned. For all multiples of 64,
3920Sstevel@tonic-gate * the next kmem_cache_size greater than or equal to it must be a
3930Sstevel@tonic-gate * multiple of 64.
3941528Sjwadams *
3951528Sjwadams * This table must be in sorted order, from smallest to highest. The
3961528Sjwadams * highest slot must be UMEM_MAXBUF, and every slot afterwards must be
3971528Sjwadams * zero.
3980Sstevel@tonic-gate */
3991528Sjwadams static int umem_alloc_sizes[] = {
4000Sstevel@tonic-gate #ifdef _LP64
4010Sstevel@tonic-gate 1 * 8,
4020Sstevel@tonic-gate 1 * 16,
4030Sstevel@tonic-gate 2 * 16,
4040Sstevel@tonic-gate 3 * 16,
4050Sstevel@tonic-gate #else
4060Sstevel@tonic-gate 1 * 8,
4070Sstevel@tonic-gate 2 * 8,
4080Sstevel@tonic-gate 3 * 8,
4090Sstevel@tonic-gate 4 * 8, 5 * 8, 6 * 8, 7 * 8,
4100Sstevel@tonic-gate #endif
4110Sstevel@tonic-gate 4 * 16, 5 * 16, 6 * 16, 7 * 16,
4120Sstevel@tonic-gate 4 * 32, 5 * 32, 6 * 32, 7 * 32,
4130Sstevel@tonic-gate 4 * 64, 5 * 64, 6 * 64, 7 * 64,
4140Sstevel@tonic-gate 4 * 128, 5 * 128, 6 * 128, 7 * 128,
4150Sstevel@tonic-gate P2ALIGN(8192 / 7, 64),
4160Sstevel@tonic-gate P2ALIGN(8192 / 6, 64),
4170Sstevel@tonic-gate P2ALIGN(8192 / 5, 64),
4181528Sjwadams P2ALIGN(8192 / 4, 64), 2304,
4190Sstevel@tonic-gate P2ALIGN(8192 / 3, 64),
4201528Sjwadams P2ALIGN(8192 / 2, 64), 4544,
4211528Sjwadams P2ALIGN(8192 / 1, 64), 9216,
4220Sstevel@tonic-gate 4096 * 3,
4231528Sjwadams UMEM_MAXBUF, /* = 8192 * 2 */
4241528Sjwadams /* 24 slots for user expansion */
4251528Sjwadams 0, 0, 0, 0, 0, 0, 0, 0,
4261528Sjwadams 0, 0, 0, 0, 0, 0, 0, 0,
4271528Sjwadams 0, 0, 0, 0, 0, 0, 0, 0,
4280Sstevel@tonic-gate };
4290Sstevel@tonic-gate #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
4300Sstevel@tonic-gate
4310Sstevel@tonic-gate static umem_magtype_t umem_magtype[] = {
4320Sstevel@tonic-gate { 1, 8, 3200, 65536 },
4330Sstevel@tonic-gate { 3, 16, 256, 32768 },
4340Sstevel@tonic-gate { 7, 32, 64, 16384 },
4350Sstevel@tonic-gate { 15, 64, 0, 8192 },
4360Sstevel@tonic-gate { 31, 64, 0, 4096 },
4370Sstevel@tonic-gate { 47, 64, 0, 2048 },
4380Sstevel@tonic-gate { 63, 64, 0, 1024 },
4390Sstevel@tonic-gate { 95, 64, 0, 512 },
4400Sstevel@tonic-gate { 143, 64, 0, 0 },
4410Sstevel@tonic-gate };
4420Sstevel@tonic-gate
4430Sstevel@tonic-gate /*
4440Sstevel@tonic-gate * umem tunables
4450Sstevel@tonic-gate */
4460Sstevel@tonic-gate uint32_t umem_max_ncpus; /* # of CPU caches. */
4470Sstevel@tonic-gate
4480Sstevel@tonic-gate uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
4490Sstevel@tonic-gate uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
4500Sstevel@tonic-gate uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
4510Sstevel@tonic-gate uint_t umem_abort = 1; /* whether to abort on error */
4520Sstevel@tonic-gate uint_t umem_output = 0; /* whether to write to standard error */
4530Sstevel@tonic-gate uint_t umem_logging = 0; /* umem_log_enter() override */
4540Sstevel@tonic-gate uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */
4550Sstevel@tonic-gate size_t umem_transaction_log_size; /* size of transaction log */
4560Sstevel@tonic-gate size_t umem_content_log_size; /* size of content log */
4570Sstevel@tonic-gate size_t umem_failure_log_size; /* failure log [4 pages per CPU] */
4580Sstevel@tonic-gate size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */
4590Sstevel@tonic-gate size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
4600Sstevel@tonic-gate size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */
4610Sstevel@tonic-gate size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
4620Sstevel@tonic-gate size_t umem_maxverify; /* maximum bytes to inspect in debug routines */
4630Sstevel@tonic-gate size_t umem_minfirewall; /* hardware-enforced redzone threshold */
4640Sstevel@tonic-gate
4650Sstevel@tonic-gate uint_t umem_flags = 0;
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate mutex_t umem_init_lock; /* locks initialization */
4680Sstevel@tonic-gate cond_t umem_init_cv; /* initialization CV */
4690Sstevel@tonic-gate thread_t umem_init_thr; /* thread initializing */
4700Sstevel@tonic-gate int umem_init_env_ready; /* environ pre-initted */
4710Sstevel@tonic-gate int umem_ready = UMEM_READY_STARTUP;
4720Sstevel@tonic-gate
4730Sstevel@tonic-gate static umem_nofail_callback_t *nofail_callback;
4740Sstevel@tonic-gate static mutex_t umem_nofail_exit_lock;
4750Sstevel@tonic-gate static thread_t umem_nofail_exit_thr;
4760Sstevel@tonic-gate
4770Sstevel@tonic-gate static umem_cache_t *umem_slab_cache;
4780Sstevel@tonic-gate static umem_cache_t *umem_bufctl_cache;
4790Sstevel@tonic-gate static umem_cache_t *umem_bufctl_audit_cache;
4800Sstevel@tonic-gate
4810Sstevel@tonic-gate mutex_t umem_flags_lock;
4820Sstevel@tonic-gate
4830Sstevel@tonic-gate static vmem_t *heap_arena;
4840Sstevel@tonic-gate static vmem_alloc_t *heap_alloc;
4850Sstevel@tonic-gate static vmem_free_t *heap_free;
4860Sstevel@tonic-gate
4870Sstevel@tonic-gate static vmem_t *umem_internal_arena;
4880Sstevel@tonic-gate static vmem_t *umem_cache_arena;
4890Sstevel@tonic-gate static vmem_t *umem_hash_arena;
4900Sstevel@tonic-gate static vmem_t *umem_log_arena;
4910Sstevel@tonic-gate static vmem_t *umem_oversize_arena;
4920Sstevel@tonic-gate static vmem_t *umem_va_arena;
4930Sstevel@tonic-gate static vmem_t *umem_default_arena;
4940Sstevel@tonic-gate static vmem_t *umem_firewall_va_arena;
4950Sstevel@tonic-gate static vmem_t *umem_firewall_arena;
4960Sstevel@tonic-gate
4970Sstevel@tonic-gate vmem_t *umem_memalign_arena;
4980Sstevel@tonic-gate
4990Sstevel@tonic-gate umem_log_header_t *umem_transaction_log;
5000Sstevel@tonic-gate umem_log_header_t *umem_content_log;
5010Sstevel@tonic-gate umem_log_header_t *umem_failure_log;
5020Sstevel@tonic-gate umem_log_header_t *umem_slab_log;
5030Sstevel@tonic-gate
504*6812Sraf #define CPUHINT() (thr_self())
5050Sstevel@tonic-gate #define CPUHINT_MAX() INT_MAX
5060Sstevel@tonic-gate
5070Sstevel@tonic-gate #define CPU(mask) (umem_cpus + (CPUHINT() & (mask)))
5080Sstevel@tonic-gate static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */
5090Sstevel@tonic-gate UMEM_CACHE_SIZE(0),
5100Sstevel@tonic-gate 0
5110Sstevel@tonic-gate };
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate static uint32_t umem_cpu_mask = 0; /* global cpu mask */
5140Sstevel@tonic-gate static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */
5150Sstevel@tonic-gate
5160Sstevel@tonic-gate volatile uint32_t umem_reaping;
5170Sstevel@tonic-gate
5180Sstevel@tonic-gate thread_t umem_update_thr;
5190Sstevel@tonic-gate struct timeval umem_update_next; /* timeofday of next update */
5200Sstevel@tonic-gate volatile thread_t umem_st_update_thr; /* only used when single-thd */
5210Sstevel@tonic-gate
5220Sstevel@tonic-gate #define IN_UPDATE() (thr_self() == umem_update_thr || \
5230Sstevel@tonic-gate thr_self() == umem_st_update_thr)
5240Sstevel@tonic-gate #define IN_REAP() IN_UPDATE()
5250Sstevel@tonic-gate
5260Sstevel@tonic-gate mutex_t umem_update_lock; /* cache_u{next,prev,flags} */
5270Sstevel@tonic-gate cond_t umem_update_cv;
5280Sstevel@tonic-gate
5290Sstevel@tonic-gate volatile hrtime_t umem_reap_next; /* min hrtime of next reap */
5300Sstevel@tonic-gate
5310Sstevel@tonic-gate mutex_t umem_cache_lock; /* inter-cache linkage only */
5320Sstevel@tonic-gate
5330Sstevel@tonic-gate #ifdef UMEM_STANDALONE
5340Sstevel@tonic-gate umem_cache_t umem_null_cache;
5350Sstevel@tonic-gate static const umem_cache_t umem_null_cache_template = {
5360Sstevel@tonic-gate #else
5370Sstevel@tonic-gate umem_cache_t umem_null_cache = {
5380Sstevel@tonic-gate #endif
5390Sstevel@tonic-gate 0, 0, 0, 0, 0,
5400Sstevel@tonic-gate 0, 0,
5410Sstevel@tonic-gate 0, 0,
5420Sstevel@tonic-gate 0, 0,
5430Sstevel@tonic-gate "invalid_cache",
5440Sstevel@tonic-gate 0, 0,
5450Sstevel@tonic-gate NULL, NULL, NULL, NULL,
5460Sstevel@tonic-gate NULL,
5470Sstevel@tonic-gate 0, 0, 0, 0,
5480Sstevel@tonic-gate &umem_null_cache, &umem_null_cache,
5490Sstevel@tonic-gate &umem_null_cache, &umem_null_cache,
5500Sstevel@tonic-gate 0,
5510Sstevel@tonic-gate DEFAULTMUTEX, /* start of slab layer */
5520Sstevel@tonic-gate 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5530Sstevel@tonic-gate &umem_null_cache.cache_nullslab,
5540Sstevel@tonic-gate {
5550Sstevel@tonic-gate &umem_null_cache,
5560Sstevel@tonic-gate NULL,
5570Sstevel@tonic-gate &umem_null_cache.cache_nullslab,
5580Sstevel@tonic-gate &umem_null_cache.cache_nullslab,
5590Sstevel@tonic-gate NULL,
5600Sstevel@tonic-gate -1,
5610Sstevel@tonic-gate 0
5620Sstevel@tonic-gate },
5630Sstevel@tonic-gate NULL,
5640Sstevel@tonic-gate NULL,
5650Sstevel@tonic-gate DEFAULTMUTEX, /* start of depot layer */
5660Sstevel@tonic-gate NULL, {
5670Sstevel@tonic-gate NULL, 0, 0, 0, 0
5680Sstevel@tonic-gate }, {
5690Sstevel@tonic-gate NULL, 0, 0, 0, 0
5700Sstevel@tonic-gate }, {
5710Sstevel@tonic-gate {
5720Sstevel@tonic-gate DEFAULTMUTEX, /* start of CPU cache */
5730Sstevel@tonic-gate 0, 0, NULL, NULL, -1, -1, 0
5740Sstevel@tonic-gate }
5750Sstevel@tonic-gate }
5760Sstevel@tonic-gate };
5770Sstevel@tonic-gate
5780Sstevel@tonic-gate #define ALLOC_TABLE_4 \
5790Sstevel@tonic-gate &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
5800Sstevel@tonic-gate
5810Sstevel@tonic-gate #define ALLOC_TABLE_64 \
5820Sstevel@tonic-gate ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
5830Sstevel@tonic-gate ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
5840Sstevel@tonic-gate ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
5850Sstevel@tonic-gate ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
5860Sstevel@tonic-gate
5870Sstevel@tonic-gate #define ALLOC_TABLE_1024 \
5880Sstevel@tonic-gate ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
5890Sstevel@tonic-gate ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
5900Sstevel@tonic-gate ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
5910Sstevel@tonic-gate ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
5940Sstevel@tonic-gate ALLOC_TABLE_1024,
5950Sstevel@tonic-gate ALLOC_TABLE_1024
5960Sstevel@tonic-gate };
5970Sstevel@tonic-gate
5980Sstevel@tonic-gate
5990Sstevel@tonic-gate /* Used to constrain audit-log stack traces */
6000Sstevel@tonic-gate caddr_t umem_min_stack;
6010Sstevel@tonic-gate caddr_t umem_max_stack;
6020Sstevel@tonic-gate
6030Sstevel@tonic-gate
6040Sstevel@tonic-gate #define UMERR_MODIFIED 0 /* buffer modified while on freelist */
6050Sstevel@tonic-gate #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */
6060Sstevel@tonic-gate #define UMERR_DUPFREE 2 /* freed a buffer twice */
6070Sstevel@tonic-gate #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */
6080Sstevel@tonic-gate #define UMERR_BADBUFTAG 4 /* buftag corrupted */
6090Sstevel@tonic-gate #define UMERR_BADBUFCTL 5 /* bufctl corrupted */
6100Sstevel@tonic-gate #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
6110Sstevel@tonic-gate #define UMERR_BADSIZE 7 /* alloc size != free size */
6120Sstevel@tonic-gate #define UMERR_BADBASE 8 /* buffer base address wrong */
6130Sstevel@tonic-gate
6140Sstevel@tonic-gate struct {
6150Sstevel@tonic-gate hrtime_t ump_timestamp; /* timestamp of error */
6160Sstevel@tonic-gate int ump_error; /* type of umem error (UMERR_*) */
6170Sstevel@tonic-gate void *ump_buffer; /* buffer that induced abort */
6180Sstevel@tonic-gate void *ump_realbuf; /* real start address for buffer */
6190Sstevel@tonic-gate umem_cache_t *ump_cache; /* buffer's cache according to client */
6200Sstevel@tonic-gate umem_cache_t *ump_realcache; /* actual cache containing buffer */
6210Sstevel@tonic-gate umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */
6220Sstevel@tonic-gate umem_bufctl_t *ump_bufctl; /* bufctl */
6230Sstevel@tonic-gate } umem_abort_info;
6240Sstevel@tonic-gate
6250Sstevel@tonic-gate static void
copy_pattern(uint64_t pattern,void * buf_arg,size_t size)6260Sstevel@tonic-gate copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
6270Sstevel@tonic-gate {
6280Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
6290Sstevel@tonic-gate uint64_t *buf = buf_arg;
6300Sstevel@tonic-gate
6310Sstevel@tonic-gate while (buf < bufend)
6320Sstevel@tonic-gate *buf++ = pattern;
6330Sstevel@tonic-gate }
6340Sstevel@tonic-gate
6350Sstevel@tonic-gate static void *
verify_pattern(uint64_t pattern,void * buf_arg,size_t size)6360Sstevel@tonic-gate verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
6370Sstevel@tonic-gate {
6380Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
6390Sstevel@tonic-gate uint64_t *buf;
6400Sstevel@tonic-gate
6410Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++)
6420Sstevel@tonic-gate if (*buf != pattern)
6430Sstevel@tonic-gate return (buf);
6440Sstevel@tonic-gate return (NULL);
6450Sstevel@tonic-gate }
6460Sstevel@tonic-gate
6470Sstevel@tonic-gate static void *
verify_and_copy_pattern(uint64_t old,uint64_t new,void * buf_arg,size_t size)6480Sstevel@tonic-gate verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
6490Sstevel@tonic-gate {
6500Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
6510Sstevel@tonic-gate uint64_t *buf;
6520Sstevel@tonic-gate
6530Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) {
6540Sstevel@tonic-gate if (*buf != old) {
6550Sstevel@tonic-gate copy_pattern(old, buf_arg,
6560Sstevel@tonic-gate (char *)buf - (char *)buf_arg);
6570Sstevel@tonic-gate return (buf);
6580Sstevel@tonic-gate }
6590Sstevel@tonic-gate *buf = new;
6600Sstevel@tonic-gate }
6610Sstevel@tonic-gate
6620Sstevel@tonic-gate return (NULL);
6630Sstevel@tonic-gate }
6640Sstevel@tonic-gate
6650Sstevel@tonic-gate void
umem_cache_applyall(void (* func)(umem_cache_t *))6660Sstevel@tonic-gate umem_cache_applyall(void (*func)(umem_cache_t *))
6670Sstevel@tonic-gate {
6680Sstevel@tonic-gate umem_cache_t *cp;
6690Sstevel@tonic-gate
6700Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock);
6710Sstevel@tonic-gate for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
6720Sstevel@tonic-gate cp = cp->cache_next)
6730Sstevel@tonic-gate func(cp);
6740Sstevel@tonic-gate (void) mutex_unlock(&umem_cache_lock);
6750Sstevel@tonic-gate }
6760Sstevel@tonic-gate
6770Sstevel@tonic-gate static void
umem_add_update_unlocked(umem_cache_t * cp,int flags)6780Sstevel@tonic-gate umem_add_update_unlocked(umem_cache_t *cp, int flags)
6790Sstevel@tonic-gate {
6800Sstevel@tonic-gate umem_cache_t *cnext, *cprev;
6810Sstevel@tonic-gate
6820Sstevel@tonic-gate flags &= ~UMU_ACTIVE;
6830Sstevel@tonic-gate
6840Sstevel@tonic-gate if (!flags)
6850Sstevel@tonic-gate return;
6860Sstevel@tonic-gate
6870Sstevel@tonic-gate if (cp->cache_uflags & UMU_ACTIVE) {
6880Sstevel@tonic-gate cp->cache_uflags |= flags;
6890Sstevel@tonic-gate } else {
6900Sstevel@tonic-gate if (cp->cache_unext != NULL) {
6910Sstevel@tonic-gate ASSERT(cp->cache_uflags != 0);
6920Sstevel@tonic-gate cp->cache_uflags |= flags;
6930Sstevel@tonic-gate } else {
6940Sstevel@tonic-gate ASSERT(cp->cache_uflags == 0);
6950Sstevel@tonic-gate cp->cache_uflags = flags;
6960Sstevel@tonic-gate cp->cache_unext = cnext = &umem_null_cache;
6970Sstevel@tonic-gate cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
6980Sstevel@tonic-gate cnext->cache_uprev = cp;
6990Sstevel@tonic-gate cprev->cache_unext = cp;
7000Sstevel@tonic-gate }
7010Sstevel@tonic-gate }
7020Sstevel@tonic-gate }
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate static void
umem_add_update(umem_cache_t * cp,int flags)7050Sstevel@tonic-gate umem_add_update(umem_cache_t *cp, int flags)
7060Sstevel@tonic-gate {
7070Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
7080Sstevel@tonic-gate
7090Sstevel@tonic-gate umem_add_update_unlocked(cp, flags);
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate if (!IN_UPDATE())
7120Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv);
7130Sstevel@tonic-gate
7140Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
7150Sstevel@tonic-gate }
7160Sstevel@tonic-gate
7170Sstevel@tonic-gate /*
7180Sstevel@tonic-gate * Remove a cache from the update list, waiting for any in-progress work to
7190Sstevel@tonic-gate * complete first.
7200Sstevel@tonic-gate */
7210Sstevel@tonic-gate static void
umem_remove_updates(umem_cache_t * cp)7220Sstevel@tonic-gate umem_remove_updates(umem_cache_t *cp)
7230Sstevel@tonic-gate {
7240Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
7250Sstevel@tonic-gate
7260Sstevel@tonic-gate /*
7270Sstevel@tonic-gate * Get it out of the active state
7280Sstevel@tonic-gate */
7290Sstevel@tonic-gate while (cp->cache_uflags & UMU_ACTIVE) {
7305891Sraf int cancel_state;
7315891Sraf
7320Sstevel@tonic-gate ASSERT(cp->cache_unext == NULL);
7330Sstevel@tonic-gate
7340Sstevel@tonic-gate cp->cache_uflags |= UMU_NOTIFY;
7350Sstevel@tonic-gate
7360Sstevel@tonic-gate /*
7370Sstevel@tonic-gate * Make sure the update state is sane, before we wait
7380Sstevel@tonic-gate */
7390Sstevel@tonic-gate ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0);
7400Sstevel@tonic-gate ASSERT(umem_update_thr != thr_self() &&
7410Sstevel@tonic-gate umem_st_update_thr != thr_self());
7420Sstevel@tonic-gate
7435891Sraf (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
7445891Sraf &cancel_state);
7455891Sraf (void) cond_wait(&umem_update_cv, &umem_update_lock);
7465891Sraf (void) pthread_setcancelstate(cancel_state, NULL);
7470Sstevel@tonic-gate }
7480Sstevel@tonic-gate /*
7490Sstevel@tonic-gate * Get it out of the Work Requested state
7500Sstevel@tonic-gate */
7510Sstevel@tonic-gate if (cp->cache_unext != NULL) {
7520Sstevel@tonic-gate cp->cache_uprev->cache_unext = cp->cache_unext;
7530Sstevel@tonic-gate cp->cache_unext->cache_uprev = cp->cache_uprev;
7540Sstevel@tonic-gate cp->cache_uprev = cp->cache_unext = NULL;
7550Sstevel@tonic-gate cp->cache_uflags = 0;
7560Sstevel@tonic-gate }
7570Sstevel@tonic-gate /*
7580Sstevel@tonic-gate * Make sure it is in the Inactive state
7590Sstevel@tonic-gate */
7600Sstevel@tonic-gate ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
7610Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
7620Sstevel@tonic-gate }
7630Sstevel@tonic-gate
7640Sstevel@tonic-gate static void
umem_updateall(int flags)7650Sstevel@tonic-gate umem_updateall(int flags)
7660Sstevel@tonic-gate {
7670Sstevel@tonic-gate umem_cache_t *cp;
7680Sstevel@tonic-gate
7690Sstevel@tonic-gate /*
7700Sstevel@tonic-gate * NOTE: To prevent deadlock, umem_cache_lock is always acquired first.
7710Sstevel@tonic-gate *
7720Sstevel@tonic-gate * (umem_add_update is called from things run via umem_cache_applyall)
7730Sstevel@tonic-gate */
7740Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock);
7750Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
7760Sstevel@tonic-gate
7770Sstevel@tonic-gate for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
7780Sstevel@tonic-gate cp = cp->cache_next)
7790Sstevel@tonic-gate umem_add_update_unlocked(cp, flags);
7800Sstevel@tonic-gate
7810Sstevel@tonic-gate if (!IN_UPDATE())
7820Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv);
7830Sstevel@tonic-gate
7840Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
7850Sstevel@tonic-gate (void) mutex_unlock(&umem_cache_lock);
7860Sstevel@tonic-gate }
7870Sstevel@tonic-gate
7880Sstevel@tonic-gate /*
7890Sstevel@tonic-gate * Debugging support. Given a buffer address, find its slab.
7900Sstevel@tonic-gate */
7910Sstevel@tonic-gate static umem_slab_t *
umem_findslab(umem_cache_t * cp,void * buf)7920Sstevel@tonic-gate umem_findslab(umem_cache_t *cp, void *buf)
7930Sstevel@tonic-gate {
7940Sstevel@tonic-gate umem_slab_t *sp;
7950Sstevel@tonic-gate
7960Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
7970Sstevel@tonic-gate for (sp = cp->cache_nullslab.slab_next;
7980Sstevel@tonic-gate sp != &cp->cache_nullslab; sp = sp->slab_next) {
7990Sstevel@tonic-gate if (UMEM_SLAB_MEMBER(sp, buf)) {
8000Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
8010Sstevel@tonic-gate return (sp);
8020Sstevel@tonic-gate }
8030Sstevel@tonic-gate }
8040Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
8050Sstevel@tonic-gate
8060Sstevel@tonic-gate return (NULL);
8070Sstevel@tonic-gate }
8080Sstevel@tonic-gate
8090Sstevel@tonic-gate static void
umem_error(int error,umem_cache_t * cparg,void * bufarg)8100Sstevel@tonic-gate umem_error(int error, umem_cache_t *cparg, void *bufarg)
8110Sstevel@tonic-gate {
8120Sstevel@tonic-gate umem_buftag_t *btp = NULL;
8130Sstevel@tonic-gate umem_bufctl_t *bcp = NULL;
8140Sstevel@tonic-gate umem_cache_t *cp = cparg;
8150Sstevel@tonic-gate umem_slab_t *sp;
8160Sstevel@tonic-gate uint64_t *off;
8170Sstevel@tonic-gate void *buf = bufarg;
8180Sstevel@tonic-gate
8190Sstevel@tonic-gate int old_logging = umem_logging;
8200Sstevel@tonic-gate
8210Sstevel@tonic-gate umem_logging = 0; /* stop logging when a bad thing happens */
8220Sstevel@tonic-gate
8230Sstevel@tonic-gate umem_abort_info.ump_timestamp = gethrtime();
8240Sstevel@tonic-gate
8250Sstevel@tonic-gate sp = umem_findslab(cp, buf);
8260Sstevel@tonic-gate if (sp == NULL) {
8270Sstevel@tonic-gate for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
8280Sstevel@tonic-gate cp = cp->cache_prev) {
8290Sstevel@tonic-gate if ((sp = umem_findslab(cp, buf)) != NULL)
8300Sstevel@tonic-gate break;
8310Sstevel@tonic-gate }
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate
8340Sstevel@tonic-gate if (sp == NULL) {
8350Sstevel@tonic-gate cp = NULL;
8360Sstevel@tonic-gate error = UMERR_BADADDR;
8370Sstevel@tonic-gate } else {
8380Sstevel@tonic-gate if (cp != cparg)
8390Sstevel@tonic-gate error = UMERR_BADCACHE;
8400Sstevel@tonic-gate else
8410Sstevel@tonic-gate buf = (char *)bufarg - ((uintptr_t)bufarg -
8420Sstevel@tonic-gate (uintptr_t)sp->slab_base) % cp->cache_chunksize;
8430Sstevel@tonic-gate if (buf != bufarg)
8440Sstevel@tonic-gate error = UMERR_BADBASE;
8450Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG)
8460Sstevel@tonic-gate btp = UMEM_BUFTAG(cp, buf);
8470Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
8480Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
8490Sstevel@tonic-gate for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
8500Sstevel@tonic-gate if (bcp->bc_addr == buf)
8510Sstevel@tonic-gate break;
8520Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
8530Sstevel@tonic-gate if (bcp == NULL && btp != NULL)
8540Sstevel@tonic-gate bcp = btp->bt_bufctl;
8550Sstevel@tonic-gate if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
8560Sstevel@tonic-gate NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) ||
8570Sstevel@tonic-gate bcp->bc_addr != buf) {
8580Sstevel@tonic-gate error = UMERR_BADBUFCTL;
8590Sstevel@tonic-gate bcp = NULL;
8600Sstevel@tonic-gate }
8610Sstevel@tonic-gate }
8620Sstevel@tonic-gate }
8630Sstevel@tonic-gate
8640Sstevel@tonic-gate umem_abort_info.ump_error = error;
8650Sstevel@tonic-gate umem_abort_info.ump_buffer = bufarg;
8660Sstevel@tonic-gate umem_abort_info.ump_realbuf = buf;
8670Sstevel@tonic-gate umem_abort_info.ump_cache = cparg;
8680Sstevel@tonic-gate umem_abort_info.ump_realcache = cp;
8690Sstevel@tonic-gate umem_abort_info.ump_slab = sp;
8700Sstevel@tonic-gate umem_abort_info.ump_bufctl = bcp;
8710Sstevel@tonic-gate
8720Sstevel@tonic-gate umem_printf("umem allocator: ");
8730Sstevel@tonic-gate
8740Sstevel@tonic-gate switch (error) {
8750Sstevel@tonic-gate
8760Sstevel@tonic-gate case UMERR_MODIFIED:
8770Sstevel@tonic-gate umem_printf("buffer modified after being freed\n");
8780Sstevel@tonic-gate off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
8790Sstevel@tonic-gate if (off == NULL) /* shouldn't happen */
8800Sstevel@tonic-gate off = buf;
8810Sstevel@tonic-gate umem_printf("modification occurred at offset 0x%lx "
8820Sstevel@tonic-gate "(0x%llx replaced by 0x%llx)\n",
8830Sstevel@tonic-gate (uintptr_t)off - (uintptr_t)buf,
8840Sstevel@tonic-gate (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off);
8850Sstevel@tonic-gate break;
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate case UMERR_REDZONE:
8880Sstevel@tonic-gate umem_printf("redzone violation: write past end of buffer\n");
8890Sstevel@tonic-gate break;
8900Sstevel@tonic-gate
8910Sstevel@tonic-gate case UMERR_BADADDR:
8920Sstevel@tonic-gate umem_printf("invalid free: buffer not in cache\n");
8930Sstevel@tonic-gate break;
8940Sstevel@tonic-gate
8950Sstevel@tonic-gate case UMERR_DUPFREE:
8960Sstevel@tonic-gate umem_printf("duplicate free: buffer freed twice\n");
8970Sstevel@tonic-gate break;
8980Sstevel@tonic-gate
8990Sstevel@tonic-gate case UMERR_BADBUFTAG:
9000Sstevel@tonic-gate umem_printf("boundary tag corrupted\n");
9010Sstevel@tonic-gate umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
9020Sstevel@tonic-gate (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
9030Sstevel@tonic-gate UMEM_BUFTAG_FREE);
9040Sstevel@tonic-gate break;
9050Sstevel@tonic-gate
9060Sstevel@tonic-gate case UMERR_BADBUFCTL:
9070Sstevel@tonic-gate umem_printf("bufctl corrupted\n");
9080Sstevel@tonic-gate break;
9090Sstevel@tonic-gate
9100Sstevel@tonic-gate case UMERR_BADCACHE:
9110Sstevel@tonic-gate umem_printf("buffer freed to wrong cache\n");
9120Sstevel@tonic-gate umem_printf("buffer was allocated from %s,\n", cp->cache_name);
9130Sstevel@tonic-gate umem_printf("caller attempting free to %s.\n",
9140Sstevel@tonic-gate cparg->cache_name);
9150Sstevel@tonic-gate break;
9160Sstevel@tonic-gate
9170Sstevel@tonic-gate case UMERR_BADSIZE:
9180Sstevel@tonic-gate umem_printf("bad free: free size (%u) != alloc size (%u)\n",
9190Sstevel@tonic-gate UMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
9200Sstevel@tonic-gate UMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
9210Sstevel@tonic-gate break;
9220Sstevel@tonic-gate
9230Sstevel@tonic-gate case UMERR_BADBASE:
9240Sstevel@tonic-gate umem_printf("bad free: free address (%p) != alloc address "
9250Sstevel@tonic-gate "(%p)\n", bufarg, buf);
9260Sstevel@tonic-gate break;
9270Sstevel@tonic-gate }
9280Sstevel@tonic-gate
9290Sstevel@tonic-gate umem_printf("buffer=%p bufctl=%p cache: %s\n",
9300Sstevel@tonic-gate bufarg, (void *)bcp, cparg->cache_name);
9310Sstevel@tonic-gate
9320Sstevel@tonic-gate if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
9330Sstevel@tonic-gate error != UMERR_BADBUFCTL) {
9340Sstevel@tonic-gate int d;
9350Sstevel@tonic-gate timespec_t ts;
9360Sstevel@tonic-gate hrtime_t diff;
9370Sstevel@tonic-gate umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp;
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp;
9400Sstevel@tonic-gate ts.tv_sec = diff / NANOSEC;
9410Sstevel@tonic-gate ts.tv_nsec = diff % NANOSEC;
9420Sstevel@tonic-gate
9430Sstevel@tonic-gate umem_printf("previous transaction on buffer %p:\n", buf);
9440Sstevel@tonic-gate umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
9450Sstevel@tonic-gate (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
9460Sstevel@tonic-gate (void *)sp, cp->cache_name);
9470Sstevel@tonic-gate for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) {
9480Sstevel@tonic-gate (void) print_sym((void *)bcap->bc_stack[d]);
9490Sstevel@tonic-gate umem_printf("\n");
9500Sstevel@tonic-gate }
9510Sstevel@tonic-gate }
9520Sstevel@tonic-gate
9530Sstevel@tonic-gate umem_err_recoverable("umem: heap corruption detected");
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate umem_logging = old_logging; /* resume logging */
9560Sstevel@tonic-gate }
9570Sstevel@tonic-gate
9580Sstevel@tonic-gate void
umem_nofail_callback(umem_nofail_callback_t * cb)9590Sstevel@tonic-gate umem_nofail_callback(umem_nofail_callback_t *cb)
9600Sstevel@tonic-gate {
9610Sstevel@tonic-gate nofail_callback = cb;
9620Sstevel@tonic-gate }
9630Sstevel@tonic-gate
9640Sstevel@tonic-gate static int
umem_alloc_retry(umem_cache_t * cp,int umflag)9650Sstevel@tonic-gate umem_alloc_retry(umem_cache_t *cp, int umflag)
9660Sstevel@tonic-gate {
9670Sstevel@tonic-gate if (cp == &umem_null_cache) {
9680Sstevel@tonic-gate if (umem_init())
9690Sstevel@tonic-gate return (1); /* retry */
9700Sstevel@tonic-gate /*
9710Sstevel@tonic-gate * Initialization failed. Do normal failure processing.
9720Sstevel@tonic-gate */
9730Sstevel@tonic-gate }
9740Sstevel@tonic-gate if (umflag & UMEM_NOFAIL) {
9750Sstevel@tonic-gate int def_result = UMEM_CALLBACK_EXIT(255);
9760Sstevel@tonic-gate int result = def_result;
9770Sstevel@tonic-gate umem_nofail_callback_t *callback = nofail_callback;
9780Sstevel@tonic-gate
9790Sstevel@tonic-gate if (callback != NULL)
9800Sstevel@tonic-gate result = callback();
9810Sstevel@tonic-gate
9820Sstevel@tonic-gate if (result == UMEM_CALLBACK_RETRY)
9830Sstevel@tonic-gate return (1);
9840Sstevel@tonic-gate
9850Sstevel@tonic-gate if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) {
9860Sstevel@tonic-gate log_message("nofail callback returned %x\n", result);
9870Sstevel@tonic-gate result = def_result;
9880Sstevel@tonic-gate }
9890Sstevel@tonic-gate
9900Sstevel@tonic-gate /*
9910Sstevel@tonic-gate * only one thread will call exit
9920Sstevel@tonic-gate */
9930Sstevel@tonic-gate if (umem_nofail_exit_thr == thr_self())
9940Sstevel@tonic-gate umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
9950Sstevel@tonic-gate
9960Sstevel@tonic-gate (void) mutex_lock(&umem_nofail_exit_lock);
9970Sstevel@tonic-gate umem_nofail_exit_thr = thr_self();
9980Sstevel@tonic-gate exit(result & 0xFF);
9990Sstevel@tonic-gate /*NOTREACHED*/
10000Sstevel@tonic-gate }
10010Sstevel@tonic-gate return (0);
10020Sstevel@tonic-gate }
10030Sstevel@tonic-gate
10040Sstevel@tonic-gate static umem_log_header_t *
umem_log_init(size_t logsize)10050Sstevel@tonic-gate umem_log_init(size_t logsize)
10060Sstevel@tonic-gate {
10070Sstevel@tonic-gate umem_log_header_t *lhp;
10080Sstevel@tonic-gate int nchunks = 4 * umem_max_ncpus;
10090Sstevel@tonic-gate size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]);
10100Sstevel@tonic-gate int i;
10110Sstevel@tonic-gate
10120Sstevel@tonic-gate if (logsize == 0)
10130Sstevel@tonic-gate return (NULL);
10140Sstevel@tonic-gate
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate * Make sure that lhp->lh_cpu[] is nicely aligned
10170Sstevel@tonic-gate * to prevent false sharing of cache lines.
10180Sstevel@tonic-gate */
10190Sstevel@tonic-gate lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN);
10200Sstevel@tonic-gate lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
10210Sstevel@tonic-gate NULL, NULL, VM_NOSLEEP);
10220Sstevel@tonic-gate if (lhp == NULL)
10230Sstevel@tonic-gate goto fail;
10240Sstevel@tonic-gate
10250Sstevel@tonic-gate bzero(lhp, lhsize);
10260Sstevel@tonic-gate
10270Sstevel@tonic-gate (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL);
10280Sstevel@tonic-gate lhp->lh_nchunks = nchunks;
10290Sstevel@tonic-gate lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE);
10300Sstevel@tonic-gate if (lhp->lh_chunksize == 0)
10310Sstevel@tonic-gate lhp->lh_chunksize = PAGESIZE;
10320Sstevel@tonic-gate
10330Sstevel@tonic-gate lhp->lh_base = vmem_alloc(umem_log_arena,
10340Sstevel@tonic-gate lhp->lh_chunksize * nchunks, VM_NOSLEEP);
10350Sstevel@tonic-gate if (lhp->lh_base == NULL)
10360Sstevel@tonic-gate goto fail;
10370Sstevel@tonic-gate
10380Sstevel@tonic-gate lhp->lh_free = vmem_alloc(umem_log_arena,
10390Sstevel@tonic-gate nchunks * sizeof (int), VM_NOSLEEP);
10400Sstevel@tonic-gate if (lhp->lh_free == NULL)
10410Sstevel@tonic-gate goto fail;
10420Sstevel@tonic-gate
10430Sstevel@tonic-gate bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate for (i = 0; i < umem_max_ncpus; i++) {
10460Sstevel@tonic-gate umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
10470Sstevel@tonic-gate (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL);
10480Sstevel@tonic-gate clhp->clh_chunk = i;
10490Sstevel@tonic-gate }
10500Sstevel@tonic-gate
10510Sstevel@tonic-gate for (i = umem_max_ncpus; i < nchunks; i++)
10520Sstevel@tonic-gate lhp->lh_free[i] = i;
10530Sstevel@tonic-gate
10540Sstevel@tonic-gate lhp->lh_head = umem_max_ncpus;
10550Sstevel@tonic-gate lhp->lh_tail = 0;
10560Sstevel@tonic-gate
10570Sstevel@tonic-gate return (lhp);
10580Sstevel@tonic-gate
10590Sstevel@tonic-gate fail:
10600Sstevel@tonic-gate if (lhp != NULL) {
10610Sstevel@tonic-gate if (lhp->lh_base != NULL)
10620Sstevel@tonic-gate vmem_free(umem_log_arena, lhp->lh_base,
10630Sstevel@tonic-gate lhp->lh_chunksize * nchunks);
10640Sstevel@tonic-gate
10650Sstevel@tonic-gate vmem_xfree(umem_log_arena, lhp, lhsize);
10660Sstevel@tonic-gate }
10670Sstevel@tonic-gate return (NULL);
10680Sstevel@tonic-gate }
10690Sstevel@tonic-gate
10700Sstevel@tonic-gate static void *
umem_log_enter(umem_log_header_t * lhp,void * data,size_t size)10710Sstevel@tonic-gate umem_log_enter(umem_log_header_t *lhp, void *data, size_t size)
10720Sstevel@tonic-gate {
10730Sstevel@tonic-gate void *logspace;
10740Sstevel@tonic-gate umem_cpu_log_header_t *clhp =
10750Sstevel@tonic-gate &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number];
10760Sstevel@tonic-gate
10770Sstevel@tonic-gate if (lhp == NULL || umem_logging == 0)
10780Sstevel@tonic-gate return (NULL);
10790Sstevel@tonic-gate
10800Sstevel@tonic-gate (void) mutex_lock(&clhp->clh_lock);
10810Sstevel@tonic-gate clhp->clh_hits++;
10820Sstevel@tonic-gate if (size > clhp->clh_avail) {
10830Sstevel@tonic-gate (void) mutex_lock(&lhp->lh_lock);
10840Sstevel@tonic-gate lhp->lh_hits++;
10850Sstevel@tonic-gate lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
10860Sstevel@tonic-gate lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
10870Sstevel@tonic-gate clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
10880Sstevel@tonic-gate lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
10890Sstevel@tonic-gate clhp->clh_current = lhp->lh_base +
10900Sstevel@tonic-gate clhp->clh_chunk * lhp->lh_chunksize;
10910Sstevel@tonic-gate clhp->clh_avail = lhp->lh_chunksize;
10920Sstevel@tonic-gate if (size > lhp->lh_chunksize)
10930Sstevel@tonic-gate size = lhp->lh_chunksize;
10940Sstevel@tonic-gate (void) mutex_unlock(&lhp->lh_lock);
10950Sstevel@tonic-gate }
10960Sstevel@tonic-gate logspace = clhp->clh_current;
10970Sstevel@tonic-gate clhp->clh_current += size;
10980Sstevel@tonic-gate clhp->clh_avail -= size;
10990Sstevel@tonic-gate bcopy(data, logspace, size);
11000Sstevel@tonic-gate (void) mutex_unlock(&clhp->clh_lock);
11010Sstevel@tonic-gate return (logspace);
11020Sstevel@tonic-gate }
11030Sstevel@tonic-gate
11040Sstevel@tonic-gate #define UMEM_AUDIT(lp, cp, bcp) \
11050Sstevel@tonic-gate { \
11060Sstevel@tonic-gate umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \
11070Sstevel@tonic-gate _bcp->bc_timestamp = gethrtime(); \
11080Sstevel@tonic-gate _bcp->bc_thread = thr_self(); \
11090Sstevel@tonic-gate _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \
11100Sstevel@tonic-gate (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \
11110Sstevel@tonic-gate _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \
11120Sstevel@tonic-gate UMEM_BUFCTL_AUDIT_SIZE); \
11130Sstevel@tonic-gate }
11140Sstevel@tonic-gate
11150Sstevel@tonic-gate static void
umem_log_event(umem_log_header_t * lp,umem_cache_t * cp,umem_slab_t * sp,void * addr)11160Sstevel@tonic-gate umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
11170Sstevel@tonic-gate umem_slab_t *sp, void *addr)
11180Sstevel@tonic-gate {
11190Sstevel@tonic-gate umem_bufctl_audit_t *bcp;
11200Sstevel@tonic-gate UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
11210Sstevel@tonic-gate
11220Sstevel@tonic-gate bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE);
11230Sstevel@tonic-gate bcp->bc_addr = addr;
11240Sstevel@tonic-gate bcp->bc_slab = sp;
11250Sstevel@tonic-gate bcp->bc_cache = cp;
11260Sstevel@tonic-gate UMEM_AUDIT(lp, cp, bcp);
11270Sstevel@tonic-gate }
11280Sstevel@tonic-gate
11290Sstevel@tonic-gate /*
11300Sstevel@tonic-gate * Create a new slab for cache cp.
11310Sstevel@tonic-gate */
11320Sstevel@tonic-gate static umem_slab_t *
umem_slab_create(umem_cache_t * cp,int umflag)11330Sstevel@tonic-gate umem_slab_create(umem_cache_t *cp, int umflag)
11340Sstevel@tonic-gate {
11350Sstevel@tonic-gate size_t slabsize = cp->cache_slabsize;
11360Sstevel@tonic-gate size_t chunksize = cp->cache_chunksize;
11370Sstevel@tonic-gate int cache_flags = cp->cache_flags;
11380Sstevel@tonic-gate size_t color, chunks;
11390Sstevel@tonic-gate char *buf, *slab;
11400Sstevel@tonic-gate umem_slab_t *sp;
11410Sstevel@tonic-gate umem_bufctl_t *bcp;
11420Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena;
11430Sstevel@tonic-gate
11440Sstevel@tonic-gate color = cp->cache_color + cp->cache_align;
11450Sstevel@tonic-gate if (color > cp->cache_maxcolor)
11460Sstevel@tonic-gate color = cp->cache_mincolor;
11470Sstevel@tonic-gate cp->cache_color = color;
11480Sstevel@tonic-gate
11490Sstevel@tonic-gate slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag));
11500Sstevel@tonic-gate
11510Sstevel@tonic-gate if (slab == NULL)
11520Sstevel@tonic-gate goto vmem_alloc_failure;
11530Sstevel@tonic-gate
11540Sstevel@tonic-gate ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
11550Sstevel@tonic-gate
11560Sstevel@tonic-gate if (!(cp->cache_cflags & UMC_NOTOUCH) &&
11570Sstevel@tonic-gate (cp->cache_flags & UMF_DEADBEEF))
11580Sstevel@tonic-gate copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize);
11590Sstevel@tonic-gate
11600Sstevel@tonic-gate if (cache_flags & UMF_HASH) {
11610Sstevel@tonic-gate if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL)
11620Sstevel@tonic-gate goto slab_alloc_failure;
11630Sstevel@tonic-gate chunks = (slabsize - color) / chunksize;
11640Sstevel@tonic-gate } else {
11650Sstevel@tonic-gate sp = UMEM_SLAB(cp, slab);
11660Sstevel@tonic-gate chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize;
11670Sstevel@tonic-gate }
11680Sstevel@tonic-gate
11690Sstevel@tonic-gate sp->slab_cache = cp;
11700Sstevel@tonic-gate sp->slab_head = NULL;
11710Sstevel@tonic-gate sp->slab_refcnt = 0;
11720Sstevel@tonic-gate sp->slab_base = buf = slab + color;
11730Sstevel@tonic-gate sp->slab_chunks = chunks;
11740Sstevel@tonic-gate
11750Sstevel@tonic-gate ASSERT(chunks > 0);
11760Sstevel@tonic-gate while (chunks-- != 0) {
11770Sstevel@tonic-gate if (cache_flags & UMF_HASH) {
11780Sstevel@tonic-gate bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
11790Sstevel@tonic-gate if (bcp == NULL)
11800Sstevel@tonic-gate goto bufctl_alloc_failure;
11810Sstevel@tonic-gate if (cache_flags & UMF_AUDIT) {
11820Sstevel@tonic-gate umem_bufctl_audit_t *bcap =
11830Sstevel@tonic-gate (umem_bufctl_audit_t *)bcp;
11840Sstevel@tonic-gate bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE);
11850Sstevel@tonic-gate bcap->bc_cache = cp;
11860Sstevel@tonic-gate }
11870Sstevel@tonic-gate bcp->bc_addr = buf;
11880Sstevel@tonic-gate bcp->bc_slab = sp;
11890Sstevel@tonic-gate } else {
11900Sstevel@tonic-gate bcp = UMEM_BUFCTL(cp, buf);
11910Sstevel@tonic-gate }
11920Sstevel@tonic-gate if (cache_flags & UMF_BUFTAG) {
11930Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
11940Sstevel@tonic-gate btp->bt_redzone = UMEM_REDZONE_PATTERN;
11950Sstevel@tonic-gate btp->bt_bufctl = bcp;
11960Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
11970Sstevel@tonic-gate if (cache_flags & UMF_DEADBEEF) {
11980Sstevel@tonic-gate copy_pattern(UMEM_FREE_PATTERN, buf,
11990Sstevel@tonic-gate cp->cache_verify);
12000Sstevel@tonic-gate }
12010Sstevel@tonic-gate }
12020Sstevel@tonic-gate bcp->bc_next = sp->slab_head;
12030Sstevel@tonic-gate sp->slab_head = bcp;
12040Sstevel@tonic-gate buf += chunksize;
12050Sstevel@tonic-gate }
12060Sstevel@tonic-gate
12070Sstevel@tonic-gate umem_log_event(umem_slab_log, cp, sp, slab);
12080Sstevel@tonic-gate
12090Sstevel@tonic-gate return (sp);
12100Sstevel@tonic-gate
12110Sstevel@tonic-gate bufctl_alloc_failure:
12120Sstevel@tonic-gate
12130Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) {
12140Sstevel@tonic-gate sp->slab_head = bcp->bc_next;
12150Sstevel@tonic-gate _umem_cache_free(cp->cache_bufctl_cache, bcp);
12160Sstevel@tonic-gate }
12170Sstevel@tonic-gate _umem_cache_free(umem_slab_cache, sp);
12180Sstevel@tonic-gate
12190Sstevel@tonic-gate slab_alloc_failure:
12200Sstevel@tonic-gate
12210Sstevel@tonic-gate vmem_free(vmp, slab, slabsize);
12220Sstevel@tonic-gate
12230Sstevel@tonic-gate vmem_alloc_failure:
12240Sstevel@tonic-gate
12250Sstevel@tonic-gate umem_log_event(umem_failure_log, cp, NULL, NULL);
12260Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1);
12270Sstevel@tonic-gate
12280Sstevel@tonic-gate return (NULL);
12290Sstevel@tonic-gate }
12300Sstevel@tonic-gate
12310Sstevel@tonic-gate /*
12320Sstevel@tonic-gate * Destroy a slab.
12330Sstevel@tonic-gate */
12340Sstevel@tonic-gate static void
umem_slab_destroy(umem_cache_t * cp,umem_slab_t * sp)12350Sstevel@tonic-gate umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
12360Sstevel@tonic-gate {
12370Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena;
12380Sstevel@tonic-gate void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
12390Sstevel@tonic-gate
12400Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
12410Sstevel@tonic-gate umem_bufctl_t *bcp;
12420Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) {
12430Sstevel@tonic-gate sp->slab_head = bcp->bc_next;
12440Sstevel@tonic-gate _umem_cache_free(cp->cache_bufctl_cache, bcp);
12450Sstevel@tonic-gate }
12460Sstevel@tonic-gate _umem_cache_free(umem_slab_cache, sp);
12470Sstevel@tonic-gate }
12480Sstevel@tonic-gate vmem_free(vmp, slab, cp->cache_slabsize);
12490Sstevel@tonic-gate }
12500Sstevel@tonic-gate
12510Sstevel@tonic-gate /*
12520Sstevel@tonic-gate * Allocate a raw (unconstructed) buffer from cp's slab layer.
12530Sstevel@tonic-gate */
12540Sstevel@tonic-gate static void *
umem_slab_alloc(umem_cache_t * cp,int umflag)12550Sstevel@tonic-gate umem_slab_alloc(umem_cache_t *cp, int umflag)
12560Sstevel@tonic-gate {
12570Sstevel@tonic-gate umem_bufctl_t *bcp, **hash_bucket;
12580Sstevel@tonic-gate umem_slab_t *sp;
12590Sstevel@tonic-gate void *buf;
12600Sstevel@tonic-gate
12610Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
12620Sstevel@tonic-gate cp->cache_slab_alloc++;
12630Sstevel@tonic-gate sp = cp->cache_freelist;
12640Sstevel@tonic-gate ASSERT(sp->slab_cache == cp);
12650Sstevel@tonic-gate if (sp->slab_head == NULL) {
12660Sstevel@tonic-gate /*
12670Sstevel@tonic-gate * The freelist is empty. Create a new slab.
12680Sstevel@tonic-gate */
12690Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
12700Sstevel@tonic-gate if (cp == &umem_null_cache)
12710Sstevel@tonic-gate return (NULL);
12720Sstevel@tonic-gate if ((sp = umem_slab_create(cp, umflag)) == NULL)
12730Sstevel@tonic-gate return (NULL);
12740Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
12750Sstevel@tonic-gate cp->cache_slab_create++;
12760Sstevel@tonic-gate if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
12770Sstevel@tonic-gate cp->cache_bufmax = cp->cache_buftotal;
12780Sstevel@tonic-gate sp->slab_next = cp->cache_freelist;
12790Sstevel@tonic-gate sp->slab_prev = cp->cache_freelist->slab_prev;
12800Sstevel@tonic-gate sp->slab_next->slab_prev = sp;
12810Sstevel@tonic-gate sp->slab_prev->slab_next = sp;
12820Sstevel@tonic-gate cp->cache_freelist = sp;
12830Sstevel@tonic-gate }
12840Sstevel@tonic-gate
12850Sstevel@tonic-gate sp->slab_refcnt++;
12860Sstevel@tonic-gate ASSERT(sp->slab_refcnt <= sp->slab_chunks);
12870Sstevel@tonic-gate
12880Sstevel@tonic-gate /*
12890Sstevel@tonic-gate * If we're taking the last buffer in the slab,
12900Sstevel@tonic-gate * remove the slab from the cache's freelist.
12910Sstevel@tonic-gate */
12920Sstevel@tonic-gate bcp = sp->slab_head;
12930Sstevel@tonic-gate if ((sp->slab_head = bcp->bc_next) == NULL) {
12940Sstevel@tonic-gate cp->cache_freelist = sp->slab_next;
12950Sstevel@tonic-gate ASSERT(sp->slab_refcnt == sp->slab_chunks);
12960Sstevel@tonic-gate }
12970Sstevel@tonic-gate
12980Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
12990Sstevel@tonic-gate /*
13000Sstevel@tonic-gate * Add buffer to allocated-address hash table.
13010Sstevel@tonic-gate */
13020Sstevel@tonic-gate buf = bcp->bc_addr;
13030Sstevel@tonic-gate hash_bucket = UMEM_HASH(cp, buf);
13040Sstevel@tonic-gate bcp->bc_next = *hash_bucket;
13050Sstevel@tonic-gate *hash_bucket = bcp;
13060Sstevel@tonic-gate if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
13070Sstevel@tonic-gate UMEM_AUDIT(umem_transaction_log, cp, bcp);
13080Sstevel@tonic-gate }
13090Sstevel@tonic-gate } else {
13100Sstevel@tonic-gate buf = UMEM_BUF(cp, bcp);
13110Sstevel@tonic-gate }
13120Sstevel@tonic-gate
13130Sstevel@tonic-gate ASSERT(UMEM_SLAB_MEMBER(sp, buf));
13140Sstevel@tonic-gate
13150Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
13160Sstevel@tonic-gate
13170Sstevel@tonic-gate return (buf);
13180Sstevel@tonic-gate }
13190Sstevel@tonic-gate
13200Sstevel@tonic-gate /*
13210Sstevel@tonic-gate * Free a raw (unconstructed) buffer to cp's slab layer.
13220Sstevel@tonic-gate */
13230Sstevel@tonic-gate static void
umem_slab_free(umem_cache_t * cp,void * buf)13240Sstevel@tonic-gate umem_slab_free(umem_cache_t *cp, void *buf)
13250Sstevel@tonic-gate {
13260Sstevel@tonic-gate umem_slab_t *sp;
13270Sstevel@tonic-gate umem_bufctl_t *bcp, **prev_bcpp;
13280Sstevel@tonic-gate
13290Sstevel@tonic-gate ASSERT(buf != NULL);
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
13320Sstevel@tonic-gate cp->cache_slab_free++;
13330Sstevel@tonic-gate
13340Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
13350Sstevel@tonic-gate /*
13360Sstevel@tonic-gate * Look up buffer in allocated-address hash table.
13370Sstevel@tonic-gate */
13380Sstevel@tonic-gate prev_bcpp = UMEM_HASH(cp, buf);
13390Sstevel@tonic-gate while ((bcp = *prev_bcpp) != NULL) {
13400Sstevel@tonic-gate if (bcp->bc_addr == buf) {
13410Sstevel@tonic-gate *prev_bcpp = bcp->bc_next;
13420Sstevel@tonic-gate sp = bcp->bc_slab;
13430Sstevel@tonic-gate break;
13440Sstevel@tonic-gate }
13450Sstevel@tonic-gate cp->cache_lookup_depth++;
13460Sstevel@tonic-gate prev_bcpp = &bcp->bc_next;
13470Sstevel@tonic-gate }
13480Sstevel@tonic-gate } else {
13490Sstevel@tonic-gate bcp = UMEM_BUFCTL(cp, buf);
13500Sstevel@tonic-gate sp = UMEM_SLAB(cp, buf);
13510Sstevel@tonic-gate }
13520Sstevel@tonic-gate
13530Sstevel@tonic-gate if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
13540Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
13550Sstevel@tonic-gate umem_error(UMERR_BADADDR, cp, buf);
13560Sstevel@tonic-gate return;
13570Sstevel@tonic-gate }
13580Sstevel@tonic-gate
13590Sstevel@tonic-gate if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
13600Sstevel@tonic-gate if (cp->cache_flags & UMF_CONTENTS)
13610Sstevel@tonic-gate ((umem_bufctl_audit_t *)bcp)->bc_contents =
13620Sstevel@tonic-gate umem_log_enter(umem_content_log, buf,
13630Sstevel@tonic-gate cp->cache_contents);
13640Sstevel@tonic-gate UMEM_AUDIT(umem_transaction_log, cp, bcp);
13650Sstevel@tonic-gate }
13660Sstevel@tonic-gate
13670Sstevel@tonic-gate /*
13680Sstevel@tonic-gate * If this slab isn't currently on the freelist, put it there.
13690Sstevel@tonic-gate */
13700Sstevel@tonic-gate if (sp->slab_head == NULL) {
13710Sstevel@tonic-gate ASSERT(sp->slab_refcnt == sp->slab_chunks);
13720Sstevel@tonic-gate ASSERT(cp->cache_freelist != sp);
13730Sstevel@tonic-gate sp->slab_next->slab_prev = sp->slab_prev;
13740Sstevel@tonic-gate sp->slab_prev->slab_next = sp->slab_next;
13750Sstevel@tonic-gate sp->slab_next = cp->cache_freelist;
13760Sstevel@tonic-gate sp->slab_prev = cp->cache_freelist->slab_prev;
13770Sstevel@tonic-gate sp->slab_next->slab_prev = sp;
13780Sstevel@tonic-gate sp->slab_prev->slab_next = sp;
13790Sstevel@tonic-gate cp->cache_freelist = sp;
13800Sstevel@tonic-gate }
13810Sstevel@tonic-gate
13820Sstevel@tonic-gate bcp->bc_next = sp->slab_head;
13830Sstevel@tonic-gate sp->slab_head = bcp;
13840Sstevel@tonic-gate
13850Sstevel@tonic-gate ASSERT(sp->slab_refcnt >= 1);
13860Sstevel@tonic-gate if (--sp->slab_refcnt == 0) {
13870Sstevel@tonic-gate /*
13880Sstevel@tonic-gate * There are no outstanding allocations from this slab,
13890Sstevel@tonic-gate * so we can reclaim the memory.
13900Sstevel@tonic-gate */
13910Sstevel@tonic-gate sp->slab_next->slab_prev = sp->slab_prev;
13920Sstevel@tonic-gate sp->slab_prev->slab_next = sp->slab_next;
13930Sstevel@tonic-gate if (sp == cp->cache_freelist)
13940Sstevel@tonic-gate cp->cache_freelist = sp->slab_next;
13950Sstevel@tonic-gate cp->cache_slab_destroy++;
13960Sstevel@tonic-gate cp->cache_buftotal -= sp->slab_chunks;
13970Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
13980Sstevel@tonic-gate umem_slab_destroy(cp, sp);
13990Sstevel@tonic-gate return;
14000Sstevel@tonic-gate }
14010Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
14020Sstevel@tonic-gate }
14030Sstevel@tonic-gate
14040Sstevel@tonic-gate static int
umem_cache_alloc_debug(umem_cache_t * cp,void * buf,int umflag)14050Sstevel@tonic-gate umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
14060Sstevel@tonic-gate {
14070Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
14080Sstevel@tonic-gate umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
14090Sstevel@tonic-gate uint32_t mtbf;
14100Sstevel@tonic-gate int flags_nfatal;
14110Sstevel@tonic-gate
14120Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
14130Sstevel@tonic-gate umem_error(UMERR_BADBUFTAG, cp, buf);
14140Sstevel@tonic-gate return (-1);
14150Sstevel@tonic-gate }
14160Sstevel@tonic-gate
14170Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC;
14180Sstevel@tonic-gate
14190Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
14200Sstevel@tonic-gate umem_error(UMERR_BADBUFCTL, cp, buf);
14210Sstevel@tonic-gate return (-1);
14220Sstevel@tonic-gate }
14230Sstevel@tonic-gate
14240Sstevel@tonic-gate btp->bt_redzone = UMEM_REDZONE_PATTERN;
14250Sstevel@tonic-gate
14260Sstevel@tonic-gate if (cp->cache_flags & UMF_DEADBEEF) {
14270Sstevel@tonic-gate if (verify_and_copy_pattern(UMEM_FREE_PATTERN,
14280Sstevel@tonic-gate UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
14290Sstevel@tonic-gate umem_error(UMERR_MODIFIED, cp, buf);
14300Sstevel@tonic-gate return (-1);
14310Sstevel@tonic-gate }
14320Sstevel@tonic-gate }
14330Sstevel@tonic-gate
14340Sstevel@tonic-gate if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
14350Sstevel@tonic-gate gethrtime() % mtbf == 0 &&
14360Sstevel@tonic-gate (umflag & (UMEM_FATAL_FLAGS)) == 0) {
14370Sstevel@tonic-gate umem_log_event(umem_failure_log, cp, NULL, NULL);
14380Sstevel@tonic-gate } else {
14390Sstevel@tonic-gate mtbf = 0;
14400Sstevel@tonic-gate }
14410Sstevel@tonic-gate
14420Sstevel@tonic-gate /*
14430Sstevel@tonic-gate * We do not pass fatal flags on to the constructor. This prevents
14440Sstevel@tonic-gate * leaking buffers in the event of a subordinate constructor failing.
14450Sstevel@tonic-gate */
14460Sstevel@tonic-gate flags_nfatal = UMEM_DEFAULT;
14470Sstevel@tonic-gate if (mtbf || (cp->cache_constructor != NULL &&
14480Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
14490Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1);
14500Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
14510Sstevel@tonic-gate copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
14520Sstevel@tonic-gate umem_slab_free(cp, buf);
14530Sstevel@tonic-gate return (-1);
14540Sstevel@tonic-gate }
14550Sstevel@tonic-gate
14560Sstevel@tonic-gate if (cp->cache_flags & UMF_AUDIT) {
14570Sstevel@tonic-gate UMEM_AUDIT(umem_transaction_log, cp, bcp);
14580Sstevel@tonic-gate }
14590Sstevel@tonic-gate
14600Sstevel@tonic-gate return (0);
14610Sstevel@tonic-gate }
14620Sstevel@tonic-gate
14630Sstevel@tonic-gate static int
umem_cache_free_debug(umem_cache_t * cp,void * buf)14640Sstevel@tonic-gate umem_cache_free_debug(umem_cache_t *cp, void *buf)
14650Sstevel@tonic-gate {
14660Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
14670Sstevel@tonic-gate umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
14680Sstevel@tonic-gate umem_slab_t *sp;
14690Sstevel@tonic-gate
14700Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) {
14710Sstevel@tonic-gate if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
14720Sstevel@tonic-gate umem_error(UMERR_DUPFREE, cp, buf);
14730Sstevel@tonic-gate return (-1);
14740Sstevel@tonic-gate }
14750Sstevel@tonic-gate sp = umem_findslab(cp, buf);
14760Sstevel@tonic-gate if (sp == NULL || sp->slab_cache != cp)
14770Sstevel@tonic-gate umem_error(UMERR_BADADDR, cp, buf);
14780Sstevel@tonic-gate else
14790Sstevel@tonic-gate umem_error(UMERR_REDZONE, cp, buf);
14800Sstevel@tonic-gate return (-1);
14810Sstevel@tonic-gate }
14820Sstevel@tonic-gate
14830Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
14840Sstevel@tonic-gate
14850Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
14860Sstevel@tonic-gate umem_error(UMERR_BADBUFCTL, cp, buf);
14870Sstevel@tonic-gate return (-1);
14880Sstevel@tonic-gate }
14890Sstevel@tonic-gate
14900Sstevel@tonic-gate if (btp->bt_redzone != UMEM_REDZONE_PATTERN) {
14910Sstevel@tonic-gate umem_error(UMERR_REDZONE, cp, buf);
14920Sstevel@tonic-gate return (-1);
14930Sstevel@tonic-gate }
14940Sstevel@tonic-gate
14950Sstevel@tonic-gate if (cp->cache_flags & UMF_AUDIT) {
14960Sstevel@tonic-gate if (cp->cache_flags & UMF_CONTENTS)
14970Sstevel@tonic-gate bcp->bc_contents = umem_log_enter(umem_content_log,
14980Sstevel@tonic-gate buf, cp->cache_contents);
14990Sstevel@tonic-gate UMEM_AUDIT(umem_transaction_log, cp, bcp);
15000Sstevel@tonic-gate }
15010Sstevel@tonic-gate
15020Sstevel@tonic-gate if (cp->cache_destructor != NULL)
15030Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
15040Sstevel@tonic-gate
15050Sstevel@tonic-gate if (cp->cache_flags & UMF_DEADBEEF)
15060Sstevel@tonic-gate copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
15070Sstevel@tonic-gate
15080Sstevel@tonic-gate return (0);
15090Sstevel@tonic-gate }
15100Sstevel@tonic-gate
15110Sstevel@tonic-gate /*
15120Sstevel@tonic-gate * Free each object in magazine mp to cp's slab layer, and free mp itself.
15130Sstevel@tonic-gate */
15140Sstevel@tonic-gate static void
umem_magazine_destroy(umem_cache_t * cp,umem_magazine_t * mp,int nrounds)15150Sstevel@tonic-gate umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
15160Sstevel@tonic-gate {
15170Sstevel@tonic-gate int round;
15180Sstevel@tonic-gate
15190Sstevel@tonic-gate ASSERT(cp->cache_next == NULL || IN_UPDATE());
15200Sstevel@tonic-gate
15210Sstevel@tonic-gate for (round = 0; round < nrounds; round++) {
15220Sstevel@tonic-gate void *buf = mp->mag_round[round];
15230Sstevel@tonic-gate
15240Sstevel@tonic-gate if ((cp->cache_flags & UMF_DEADBEEF) &&
15250Sstevel@tonic-gate verify_pattern(UMEM_FREE_PATTERN, buf,
15260Sstevel@tonic-gate cp->cache_verify) != NULL) {
15270Sstevel@tonic-gate umem_error(UMERR_MODIFIED, cp, buf);
15280Sstevel@tonic-gate continue;
15290Sstevel@tonic-gate }
15300Sstevel@tonic-gate
15310Sstevel@tonic-gate if (!(cp->cache_flags & UMF_BUFTAG) &&
15320Sstevel@tonic-gate cp->cache_destructor != NULL)
15330Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
15340Sstevel@tonic-gate
15350Sstevel@tonic-gate umem_slab_free(cp, buf);
15360Sstevel@tonic-gate }
15370Sstevel@tonic-gate ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
15380Sstevel@tonic-gate _umem_cache_free(cp->cache_magtype->mt_cache, mp);
15390Sstevel@tonic-gate }
15400Sstevel@tonic-gate
15410Sstevel@tonic-gate /*
15420Sstevel@tonic-gate * Allocate a magazine from the depot.
15430Sstevel@tonic-gate */
15440Sstevel@tonic-gate static umem_magazine_t *
umem_depot_alloc(umem_cache_t * cp,umem_maglist_t * mlp)15450Sstevel@tonic-gate umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
15460Sstevel@tonic-gate {
15470Sstevel@tonic-gate umem_magazine_t *mp;
15480Sstevel@tonic-gate
15490Sstevel@tonic-gate /*
15500Sstevel@tonic-gate * If we can't get the depot lock without contention,
15510Sstevel@tonic-gate * update our contention count. We use the depot
15520Sstevel@tonic-gate * contention rate to determine whether we need to
15530Sstevel@tonic-gate * increase the magazine size for better scalability.
15540Sstevel@tonic-gate */
15550Sstevel@tonic-gate if (mutex_trylock(&cp->cache_depot_lock) != 0) {
15560Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
15570Sstevel@tonic-gate cp->cache_depot_contention++;
15580Sstevel@tonic-gate }
15590Sstevel@tonic-gate
15600Sstevel@tonic-gate if ((mp = mlp->ml_list) != NULL) {
15610Sstevel@tonic-gate ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
15620Sstevel@tonic-gate mlp->ml_list = mp->mag_next;
15630Sstevel@tonic-gate if (--mlp->ml_total < mlp->ml_min)
15640Sstevel@tonic-gate mlp->ml_min = mlp->ml_total;
15650Sstevel@tonic-gate mlp->ml_alloc++;
15660Sstevel@tonic-gate }
15670Sstevel@tonic-gate
15680Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
15690Sstevel@tonic-gate
15700Sstevel@tonic-gate return (mp);
15710Sstevel@tonic-gate }
15720Sstevel@tonic-gate
15730Sstevel@tonic-gate /*
15740Sstevel@tonic-gate * Free a magazine to the depot.
15750Sstevel@tonic-gate */
15760Sstevel@tonic-gate static void
umem_depot_free(umem_cache_t * cp,umem_maglist_t * mlp,umem_magazine_t * mp)15770Sstevel@tonic-gate umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
15780Sstevel@tonic-gate {
15790Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
15800Sstevel@tonic-gate ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
15810Sstevel@tonic-gate mp->mag_next = mlp->ml_list;
15820Sstevel@tonic-gate mlp->ml_list = mp;
15830Sstevel@tonic-gate mlp->ml_total++;
15840Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
15850Sstevel@tonic-gate }
15860Sstevel@tonic-gate
15870Sstevel@tonic-gate /*
15880Sstevel@tonic-gate * Update the working set statistics for cp's depot.
15890Sstevel@tonic-gate */
15900Sstevel@tonic-gate static void
umem_depot_ws_update(umem_cache_t * cp)15910Sstevel@tonic-gate umem_depot_ws_update(umem_cache_t *cp)
15920Sstevel@tonic-gate {
15930Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
15940Sstevel@tonic-gate cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
15950Sstevel@tonic-gate cp->cache_full.ml_min = cp->cache_full.ml_total;
15960Sstevel@tonic-gate cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
15970Sstevel@tonic-gate cp->cache_empty.ml_min = cp->cache_empty.ml_total;
15980Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
15990Sstevel@tonic-gate }
16000Sstevel@tonic-gate
16010Sstevel@tonic-gate /*
16020Sstevel@tonic-gate * Reap all magazines that have fallen out of the depot's working set.
16030Sstevel@tonic-gate */
16040Sstevel@tonic-gate static void
umem_depot_ws_reap(umem_cache_t * cp)16050Sstevel@tonic-gate umem_depot_ws_reap(umem_cache_t *cp)
16060Sstevel@tonic-gate {
16070Sstevel@tonic-gate long reap;
16080Sstevel@tonic-gate umem_magazine_t *mp;
16090Sstevel@tonic-gate
16100Sstevel@tonic-gate ASSERT(cp->cache_next == NULL || IN_REAP());
16110Sstevel@tonic-gate
16120Sstevel@tonic-gate reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
16130Sstevel@tonic-gate while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
16140Sstevel@tonic-gate umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
16150Sstevel@tonic-gate
16160Sstevel@tonic-gate reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
16170Sstevel@tonic-gate while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
16180Sstevel@tonic-gate umem_magazine_destroy(cp, mp, 0);
16190Sstevel@tonic-gate }
16200Sstevel@tonic-gate
16210Sstevel@tonic-gate static void
umem_cpu_reload(umem_cpu_cache_t * ccp,umem_magazine_t * mp,int rounds)16220Sstevel@tonic-gate umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds)
16230Sstevel@tonic-gate {
16240Sstevel@tonic-gate ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
16250Sstevel@tonic-gate (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
16260Sstevel@tonic-gate ASSERT(ccp->cc_magsize > 0);
16270Sstevel@tonic-gate
16280Sstevel@tonic-gate ccp->cc_ploaded = ccp->cc_loaded;
16290Sstevel@tonic-gate ccp->cc_prounds = ccp->cc_rounds;
16300Sstevel@tonic-gate ccp->cc_loaded = mp;
16310Sstevel@tonic-gate ccp->cc_rounds = rounds;
16320Sstevel@tonic-gate }
16330Sstevel@tonic-gate
16340Sstevel@tonic-gate /*
16350Sstevel@tonic-gate * Allocate a constructed object from cache cp.
16360Sstevel@tonic-gate */
16370Sstevel@tonic-gate #pragma weak umem_cache_alloc = _umem_cache_alloc
16380Sstevel@tonic-gate void *
_umem_cache_alloc(umem_cache_t * cp,int umflag)16390Sstevel@tonic-gate _umem_cache_alloc(umem_cache_t *cp, int umflag)
16400Sstevel@tonic-gate {
16410Sstevel@tonic-gate umem_cpu_cache_t *ccp;
16420Sstevel@tonic-gate umem_magazine_t *fmp;
16430Sstevel@tonic-gate void *buf;
16440Sstevel@tonic-gate int flags_nfatal;
16450Sstevel@tonic-gate
16460Sstevel@tonic-gate retry:
16470Sstevel@tonic-gate ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
16480Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
16490Sstevel@tonic-gate for (;;) {
16500Sstevel@tonic-gate /*
16510Sstevel@tonic-gate * If there's an object available in the current CPU's
16520Sstevel@tonic-gate * loaded magazine, just take it and return.
16530Sstevel@tonic-gate */
16540Sstevel@tonic-gate if (ccp->cc_rounds > 0) {
16550Sstevel@tonic-gate buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
16560Sstevel@tonic-gate ccp->cc_alloc++;
16570Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
16580Sstevel@tonic-gate if ((ccp->cc_flags & UMF_BUFTAG) &&
16590Sstevel@tonic-gate umem_cache_alloc_debug(cp, buf, umflag) == -1) {
16600Sstevel@tonic-gate if (umem_alloc_retry(cp, umflag)) {
16610Sstevel@tonic-gate goto retry;
16620Sstevel@tonic-gate }
16630Sstevel@tonic-gate
16640Sstevel@tonic-gate return (NULL);
16650Sstevel@tonic-gate }
16660Sstevel@tonic-gate return (buf);
16670Sstevel@tonic-gate }
16680Sstevel@tonic-gate
16690Sstevel@tonic-gate /*
16700Sstevel@tonic-gate * The loaded magazine is empty. If the previously loaded
16710Sstevel@tonic-gate * magazine was full, exchange them and try again.
16720Sstevel@tonic-gate */
16730Sstevel@tonic-gate if (ccp->cc_prounds > 0) {
16740Sstevel@tonic-gate umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
16750Sstevel@tonic-gate continue;
16760Sstevel@tonic-gate }
16770Sstevel@tonic-gate
16780Sstevel@tonic-gate /*
16790Sstevel@tonic-gate * If the magazine layer is disabled, break out now.
16800Sstevel@tonic-gate */
16810Sstevel@tonic-gate if (ccp->cc_magsize == 0)
16820Sstevel@tonic-gate break;
16830Sstevel@tonic-gate
16840Sstevel@tonic-gate /*
16850Sstevel@tonic-gate * Try to get a full magazine from the depot.
16860Sstevel@tonic-gate */
16870Sstevel@tonic-gate fmp = umem_depot_alloc(cp, &cp->cache_full);
16880Sstevel@tonic-gate if (fmp != NULL) {
16890Sstevel@tonic-gate if (ccp->cc_ploaded != NULL)
16900Sstevel@tonic-gate umem_depot_free(cp, &cp->cache_empty,
16910Sstevel@tonic-gate ccp->cc_ploaded);
16920Sstevel@tonic-gate umem_cpu_reload(ccp, fmp, ccp->cc_magsize);
16930Sstevel@tonic-gate continue;
16940Sstevel@tonic-gate }
16950Sstevel@tonic-gate
16960Sstevel@tonic-gate /*
16970Sstevel@tonic-gate * There are no full magazines in the depot,
16980Sstevel@tonic-gate * so fall through to the slab layer.
16990Sstevel@tonic-gate */
17000Sstevel@tonic-gate break;
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
17030Sstevel@tonic-gate
17040Sstevel@tonic-gate /*
17050Sstevel@tonic-gate * We couldn't allocate a constructed object from the magazine layer,
17060Sstevel@tonic-gate * so get a raw buffer from the slab layer and apply its constructor.
17070Sstevel@tonic-gate */
17080Sstevel@tonic-gate buf = umem_slab_alloc(cp, umflag);
17090Sstevel@tonic-gate
17100Sstevel@tonic-gate if (buf == NULL) {
17110Sstevel@tonic-gate if (cp == &umem_null_cache)
17120Sstevel@tonic-gate return (NULL);
17130Sstevel@tonic-gate if (umem_alloc_retry(cp, umflag)) {
17140Sstevel@tonic-gate goto retry;
17150Sstevel@tonic-gate }
17160Sstevel@tonic-gate
17170Sstevel@tonic-gate return (NULL);
17180Sstevel@tonic-gate }
17190Sstevel@tonic-gate
17200Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) {
17210Sstevel@tonic-gate /*
17220Sstevel@tonic-gate * Let umem_cache_alloc_debug() apply the constructor for us.
17230Sstevel@tonic-gate */
17240Sstevel@tonic-gate if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
17250Sstevel@tonic-gate if (umem_alloc_retry(cp, umflag)) {
17260Sstevel@tonic-gate goto retry;
17270Sstevel@tonic-gate }
17280Sstevel@tonic-gate return (NULL);
17290Sstevel@tonic-gate }
17300Sstevel@tonic-gate return (buf);
17310Sstevel@tonic-gate }
17320Sstevel@tonic-gate
17330Sstevel@tonic-gate /*
17340Sstevel@tonic-gate * We do not pass fatal flags on to the constructor. This prevents
17350Sstevel@tonic-gate * leaking buffers in the event of a subordinate constructor failing.
17360Sstevel@tonic-gate */
17370Sstevel@tonic-gate flags_nfatal = UMEM_DEFAULT;
17380Sstevel@tonic-gate if (cp->cache_constructor != NULL &&
17390Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
17400Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1);
17410Sstevel@tonic-gate umem_slab_free(cp, buf);
17420Sstevel@tonic-gate
17430Sstevel@tonic-gate if (umem_alloc_retry(cp, umflag)) {
17440Sstevel@tonic-gate goto retry;
17450Sstevel@tonic-gate }
17460Sstevel@tonic-gate return (NULL);
17470Sstevel@tonic-gate }
17480Sstevel@tonic-gate
17490Sstevel@tonic-gate return (buf);
17500Sstevel@tonic-gate }
17510Sstevel@tonic-gate
17520Sstevel@tonic-gate /*
17530Sstevel@tonic-gate * Free a constructed object to cache cp.
17540Sstevel@tonic-gate */
17550Sstevel@tonic-gate #pragma weak umem_cache_free = _umem_cache_free
17560Sstevel@tonic-gate void
_umem_cache_free(umem_cache_t * cp,void * buf)17570Sstevel@tonic-gate _umem_cache_free(umem_cache_t *cp, void *buf)
17580Sstevel@tonic-gate {
17590Sstevel@tonic-gate umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
17600Sstevel@tonic-gate umem_magazine_t *emp;
17610Sstevel@tonic-gate umem_magtype_t *mtp;
17620Sstevel@tonic-gate
17630Sstevel@tonic-gate if (ccp->cc_flags & UMF_BUFTAG)
17640Sstevel@tonic-gate if (umem_cache_free_debug(cp, buf) == -1)
17650Sstevel@tonic-gate return;
17660Sstevel@tonic-gate
17670Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
17680Sstevel@tonic-gate for (;;) {
17690Sstevel@tonic-gate /*
17700Sstevel@tonic-gate * If there's a slot available in the current CPU's
17710Sstevel@tonic-gate * loaded magazine, just put the object there and return.
17720Sstevel@tonic-gate */
17730Sstevel@tonic-gate if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
17740Sstevel@tonic-gate ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
17750Sstevel@tonic-gate ccp->cc_free++;
17760Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
17770Sstevel@tonic-gate return;
17780Sstevel@tonic-gate }
17790Sstevel@tonic-gate
17800Sstevel@tonic-gate /*
17810Sstevel@tonic-gate * The loaded magazine is full. If the previously loaded
17820Sstevel@tonic-gate * magazine was empty, exchange them and try again.
17830Sstevel@tonic-gate */
17840Sstevel@tonic-gate if (ccp->cc_prounds == 0) {
17850Sstevel@tonic-gate umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
17860Sstevel@tonic-gate continue;
17870Sstevel@tonic-gate }
17880Sstevel@tonic-gate
17890Sstevel@tonic-gate /*
17900Sstevel@tonic-gate * If the magazine layer is disabled, break out now.
17910Sstevel@tonic-gate */
17920Sstevel@tonic-gate if (ccp->cc_magsize == 0)
17930Sstevel@tonic-gate break;
17940Sstevel@tonic-gate
17950Sstevel@tonic-gate /*
17960Sstevel@tonic-gate * Try to get an empty magazine from the depot.
17970Sstevel@tonic-gate */
17980Sstevel@tonic-gate emp = umem_depot_alloc(cp, &cp->cache_empty);
17990Sstevel@tonic-gate if (emp != NULL) {
18000Sstevel@tonic-gate if (ccp->cc_ploaded != NULL)
18010Sstevel@tonic-gate umem_depot_free(cp, &cp->cache_full,
18020Sstevel@tonic-gate ccp->cc_ploaded);
18030Sstevel@tonic-gate umem_cpu_reload(ccp, emp, 0);
18040Sstevel@tonic-gate continue;
18050Sstevel@tonic-gate }
18060Sstevel@tonic-gate
18070Sstevel@tonic-gate /*
18080Sstevel@tonic-gate * There are no empty magazines in the depot,
18090Sstevel@tonic-gate * so try to allocate a new one. We must drop all locks
18100Sstevel@tonic-gate * across umem_cache_alloc() because lower layers may
18110Sstevel@tonic-gate * attempt to allocate from this cache.
18120Sstevel@tonic-gate */
18130Sstevel@tonic-gate mtp = cp->cache_magtype;
18140Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
18150Sstevel@tonic-gate emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT);
18160Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
18170Sstevel@tonic-gate
18180Sstevel@tonic-gate if (emp != NULL) {
18190Sstevel@tonic-gate /*
18200Sstevel@tonic-gate * We successfully allocated an empty magazine.
18210Sstevel@tonic-gate * However, we had to drop ccp->cc_lock to do it,
18220Sstevel@tonic-gate * so the cache's magazine size may have changed.
18230Sstevel@tonic-gate * If so, free the magazine and try again.
18240Sstevel@tonic-gate */
18250Sstevel@tonic-gate if (ccp->cc_magsize != mtp->mt_magsize) {
18260Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
18270Sstevel@tonic-gate _umem_cache_free(mtp->mt_cache, emp);
18280Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
18290Sstevel@tonic-gate continue;
18300Sstevel@tonic-gate }
18310Sstevel@tonic-gate
18320Sstevel@tonic-gate /*
18330Sstevel@tonic-gate * We got a magazine of the right size. Add it to
18340Sstevel@tonic-gate * the depot and try the whole dance again.
18350Sstevel@tonic-gate */
18360Sstevel@tonic-gate umem_depot_free(cp, &cp->cache_empty, emp);
18370Sstevel@tonic-gate continue;
18380Sstevel@tonic-gate }
18390Sstevel@tonic-gate
18400Sstevel@tonic-gate /*
18410Sstevel@tonic-gate * We couldn't allocate an empty magazine,
18420Sstevel@tonic-gate * so fall through to the slab layer.
18430Sstevel@tonic-gate */
18440Sstevel@tonic-gate break;
18450Sstevel@tonic-gate }
18460Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
18470Sstevel@tonic-gate
18480Sstevel@tonic-gate /*
18490Sstevel@tonic-gate * We couldn't free our constructed object to the magazine layer,
18500Sstevel@tonic-gate * so apply its destructor and free it to the slab layer.
18510Sstevel@tonic-gate * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
18520Sstevel@tonic-gate * will have already applied the destructor.
18530Sstevel@tonic-gate */
18540Sstevel@tonic-gate if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
18550Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
18560Sstevel@tonic-gate
18570Sstevel@tonic-gate umem_slab_free(cp, buf);
18580Sstevel@tonic-gate }
18590Sstevel@tonic-gate
18600Sstevel@tonic-gate #pragma weak umem_zalloc = _umem_zalloc
18610Sstevel@tonic-gate void *
_umem_zalloc(size_t size,int umflag)18620Sstevel@tonic-gate _umem_zalloc(size_t size, int umflag)
18630Sstevel@tonic-gate {
18640Sstevel@tonic-gate size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
18650Sstevel@tonic-gate void *buf;
18660Sstevel@tonic-gate
18670Sstevel@tonic-gate retry:
18680Sstevel@tonic-gate if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
18690Sstevel@tonic-gate umem_cache_t *cp = umem_alloc_table[index];
18700Sstevel@tonic-gate buf = _umem_cache_alloc(cp, umflag);
18710Sstevel@tonic-gate if (buf != NULL) {
18720Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) {
18730Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
18740Sstevel@tonic-gate ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
18750Sstevel@tonic-gate ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
18760Sstevel@tonic-gate }
18770Sstevel@tonic-gate bzero(buf, size);
18780Sstevel@tonic-gate } else if (umem_alloc_retry(cp, umflag))
18790Sstevel@tonic-gate goto retry;
18800Sstevel@tonic-gate } else {
18810Sstevel@tonic-gate buf = _umem_alloc(size, umflag); /* handles failure */
18820Sstevel@tonic-gate if (buf != NULL)
18830Sstevel@tonic-gate bzero(buf, size);
18840Sstevel@tonic-gate }
18850Sstevel@tonic-gate return (buf);
18860Sstevel@tonic-gate }
18870Sstevel@tonic-gate
18880Sstevel@tonic-gate #pragma weak umem_alloc = _umem_alloc
18890Sstevel@tonic-gate void *
_umem_alloc(size_t size,int umflag)18900Sstevel@tonic-gate _umem_alloc(size_t size, int umflag)
18910Sstevel@tonic-gate {
18920Sstevel@tonic-gate size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
18930Sstevel@tonic-gate void *buf;
18940Sstevel@tonic-gate umem_alloc_retry:
18950Sstevel@tonic-gate if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
18960Sstevel@tonic-gate umem_cache_t *cp = umem_alloc_table[index];
18970Sstevel@tonic-gate buf = _umem_cache_alloc(cp, umflag);
18980Sstevel@tonic-gate if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
18990Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
19000Sstevel@tonic-gate ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
19010Sstevel@tonic-gate ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
19020Sstevel@tonic-gate }
19030Sstevel@tonic-gate if (buf == NULL && umem_alloc_retry(cp, umflag))
19040Sstevel@tonic-gate goto umem_alloc_retry;
19050Sstevel@tonic-gate return (buf);
19060Sstevel@tonic-gate }
19070Sstevel@tonic-gate if (size == 0)
19080Sstevel@tonic-gate return (NULL);
19090Sstevel@tonic-gate if (umem_oversize_arena == NULL) {
19100Sstevel@tonic-gate if (umem_init())
19110Sstevel@tonic-gate ASSERT(umem_oversize_arena != NULL);
19120Sstevel@tonic-gate else
19130Sstevel@tonic-gate return (NULL);
19140Sstevel@tonic-gate }
19150Sstevel@tonic-gate buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag));
19160Sstevel@tonic-gate if (buf == NULL) {
19170Sstevel@tonic-gate umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
19180Sstevel@tonic-gate if (umem_alloc_retry(NULL, umflag))
19190Sstevel@tonic-gate goto umem_alloc_retry;
19200Sstevel@tonic-gate }
19210Sstevel@tonic-gate return (buf);
19220Sstevel@tonic-gate }
19230Sstevel@tonic-gate
19240Sstevel@tonic-gate #pragma weak umem_alloc_align = _umem_alloc_align
19250Sstevel@tonic-gate void *
_umem_alloc_align(size_t size,size_t align,int umflag)19260Sstevel@tonic-gate _umem_alloc_align(size_t size, size_t align, int umflag)
19270Sstevel@tonic-gate {
19280Sstevel@tonic-gate void *buf;
19290Sstevel@tonic-gate
19300Sstevel@tonic-gate if (size == 0)
19310Sstevel@tonic-gate return (NULL);
19320Sstevel@tonic-gate if ((align & (align - 1)) != 0)
19330Sstevel@tonic-gate return (NULL);
19340Sstevel@tonic-gate if (align < UMEM_ALIGN)
19350Sstevel@tonic-gate align = UMEM_ALIGN;
19360Sstevel@tonic-gate
19370Sstevel@tonic-gate umem_alloc_align_retry:
19380Sstevel@tonic-gate if (umem_memalign_arena == NULL) {
19390Sstevel@tonic-gate if (umem_init())
19400Sstevel@tonic-gate ASSERT(umem_oversize_arena != NULL);
19410Sstevel@tonic-gate else
19420Sstevel@tonic-gate return (NULL);
19430Sstevel@tonic-gate }
19440Sstevel@tonic-gate buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL,
19450Sstevel@tonic-gate UMEM_VMFLAGS(umflag));
19460Sstevel@tonic-gate if (buf == NULL) {
19470Sstevel@tonic-gate umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
19480Sstevel@tonic-gate if (umem_alloc_retry(NULL, umflag))
19490Sstevel@tonic-gate goto umem_alloc_align_retry;
19500Sstevel@tonic-gate }
19510Sstevel@tonic-gate return (buf);
19520Sstevel@tonic-gate }
19530Sstevel@tonic-gate
19540Sstevel@tonic-gate #pragma weak umem_free = _umem_free
19550Sstevel@tonic-gate void
_umem_free(void * buf,size_t size)19560Sstevel@tonic-gate _umem_free(void *buf, size_t size)
19570Sstevel@tonic-gate {
19580Sstevel@tonic-gate size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
19590Sstevel@tonic-gate
19600Sstevel@tonic-gate if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
19610Sstevel@tonic-gate umem_cache_t *cp = umem_alloc_table[index];
19620Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) {
19630Sstevel@tonic-gate umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
19640Sstevel@tonic-gate uint32_t *ip = (uint32_t *)btp;
19650Sstevel@tonic-gate if (ip[1] != UMEM_SIZE_ENCODE(size)) {
19660Sstevel@tonic-gate if (*(uint64_t *)buf == UMEM_FREE_PATTERN) {
19670Sstevel@tonic-gate umem_error(UMERR_DUPFREE, cp, buf);
19680Sstevel@tonic-gate return;
19690Sstevel@tonic-gate }
19700Sstevel@tonic-gate if (UMEM_SIZE_VALID(ip[1])) {
19710Sstevel@tonic-gate ip[0] = UMEM_SIZE_ENCODE(size);
19720Sstevel@tonic-gate umem_error(UMERR_BADSIZE, cp, buf);
19730Sstevel@tonic-gate } else {
19740Sstevel@tonic-gate umem_error(UMERR_REDZONE, cp, buf);
19750Sstevel@tonic-gate }
19760Sstevel@tonic-gate return;
19770Sstevel@tonic-gate }
19780Sstevel@tonic-gate if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) {
19790Sstevel@tonic-gate umem_error(UMERR_REDZONE, cp, buf);
19800Sstevel@tonic-gate return;
19810Sstevel@tonic-gate }
19820Sstevel@tonic-gate btp->bt_redzone = UMEM_REDZONE_PATTERN;
19830Sstevel@tonic-gate }
19840Sstevel@tonic-gate _umem_cache_free(cp, buf);
19850Sstevel@tonic-gate } else {
19860Sstevel@tonic-gate if (buf == NULL && size == 0)
19870Sstevel@tonic-gate return;
19880Sstevel@tonic-gate vmem_free(umem_oversize_arena, buf, size);
19890Sstevel@tonic-gate }
19900Sstevel@tonic-gate }
19910Sstevel@tonic-gate
19920Sstevel@tonic-gate #pragma weak umem_free_align = _umem_free_align
19930Sstevel@tonic-gate void
_umem_free_align(void * buf,size_t size)19940Sstevel@tonic-gate _umem_free_align(void *buf, size_t size)
19950Sstevel@tonic-gate {
19960Sstevel@tonic-gate if (buf == NULL && size == 0)
19970Sstevel@tonic-gate return;
19980Sstevel@tonic-gate vmem_xfree(umem_memalign_arena, buf, size);
19990Sstevel@tonic-gate }
20000Sstevel@tonic-gate
20010Sstevel@tonic-gate static void *
umem_firewall_va_alloc(vmem_t * vmp,size_t size,int vmflag)20020Sstevel@tonic-gate umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
20030Sstevel@tonic-gate {
20040Sstevel@tonic-gate size_t realsize = size + vmp->vm_quantum;
20050Sstevel@tonic-gate
20060Sstevel@tonic-gate /*
20070Sstevel@tonic-gate * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
20080Sstevel@tonic-gate * vm_quantum will cause integer wraparound. Check for this, and
20090Sstevel@tonic-gate * blow off the firewall page in this case. Note that such a
20100Sstevel@tonic-gate * giant allocation (the entire address space) can never be
20110Sstevel@tonic-gate * satisfied, so it will either fail immediately (VM_NOSLEEP)
20120Sstevel@tonic-gate * or sleep forever (VM_SLEEP). Thus, there is no need for a
20130Sstevel@tonic-gate * corresponding check in umem_firewall_va_free().
20140Sstevel@tonic-gate */
20150Sstevel@tonic-gate if (realsize < size)
20160Sstevel@tonic-gate realsize = size;
20170Sstevel@tonic-gate
20180Sstevel@tonic-gate return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT));
20190Sstevel@tonic-gate }
20200Sstevel@tonic-gate
20210Sstevel@tonic-gate static void
umem_firewall_va_free(vmem_t * vmp,void * addr,size_t size)20220Sstevel@tonic-gate umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
20230Sstevel@tonic-gate {
20240Sstevel@tonic-gate vmem_free(vmp, addr, size + vmp->vm_quantum);
20250Sstevel@tonic-gate }
20260Sstevel@tonic-gate
20270Sstevel@tonic-gate /*
20280Sstevel@tonic-gate * Reclaim all unused memory from a cache.
20290Sstevel@tonic-gate */
20300Sstevel@tonic-gate static void
umem_cache_reap(umem_cache_t * cp)20310Sstevel@tonic-gate umem_cache_reap(umem_cache_t *cp)
20320Sstevel@tonic-gate {
20330Sstevel@tonic-gate /*
20340Sstevel@tonic-gate * Ask the cache's owner to free some memory if possible.
20350Sstevel@tonic-gate * The idea is to handle things like the inode cache, which
20360Sstevel@tonic-gate * typically sits on a bunch of memory that it doesn't truly
20370Sstevel@tonic-gate * *need*. Reclaim policy is entirely up to the owner; this
20380Sstevel@tonic-gate * callback is just an advisory plea for help.
20390Sstevel@tonic-gate */
20400Sstevel@tonic-gate if (cp->cache_reclaim != NULL)
20410Sstevel@tonic-gate cp->cache_reclaim(cp->cache_private);
20420Sstevel@tonic-gate
20430Sstevel@tonic-gate umem_depot_ws_reap(cp);
20440Sstevel@tonic-gate }
20450Sstevel@tonic-gate
20460Sstevel@tonic-gate /*
20470Sstevel@tonic-gate * Purge all magazines from a cache and set its magazine limit to zero.
20480Sstevel@tonic-gate * All calls are serialized by being done by the update thread, except for
20490Sstevel@tonic-gate * the final call from umem_cache_destroy().
20500Sstevel@tonic-gate */
20510Sstevel@tonic-gate static void
umem_cache_magazine_purge(umem_cache_t * cp)20520Sstevel@tonic-gate umem_cache_magazine_purge(umem_cache_t *cp)
20530Sstevel@tonic-gate {
20540Sstevel@tonic-gate umem_cpu_cache_t *ccp;
20550Sstevel@tonic-gate umem_magazine_t *mp, *pmp;
20560Sstevel@tonic-gate int rounds, prounds, cpu_seqid;
20570Sstevel@tonic-gate
20580Sstevel@tonic-gate ASSERT(cp->cache_next == NULL || IN_UPDATE());
20590Sstevel@tonic-gate
20600Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
20610Sstevel@tonic-gate ccp = &cp->cache_cpu[cpu_seqid];
20620Sstevel@tonic-gate
20630Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
20640Sstevel@tonic-gate mp = ccp->cc_loaded;
20650Sstevel@tonic-gate pmp = ccp->cc_ploaded;
20660Sstevel@tonic-gate rounds = ccp->cc_rounds;
20670Sstevel@tonic-gate prounds = ccp->cc_prounds;
20680Sstevel@tonic-gate ccp->cc_loaded = NULL;
20690Sstevel@tonic-gate ccp->cc_ploaded = NULL;
20700Sstevel@tonic-gate ccp->cc_rounds = -1;
20710Sstevel@tonic-gate ccp->cc_prounds = -1;
20720Sstevel@tonic-gate ccp->cc_magsize = 0;
20730Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
20740Sstevel@tonic-gate
20750Sstevel@tonic-gate if (mp)
20760Sstevel@tonic-gate umem_magazine_destroy(cp, mp, rounds);
20770Sstevel@tonic-gate if (pmp)
20780Sstevel@tonic-gate umem_magazine_destroy(cp, pmp, prounds);
20790Sstevel@tonic-gate }
20800Sstevel@tonic-gate
20810Sstevel@tonic-gate /*
20820Sstevel@tonic-gate * Updating the working set statistics twice in a row has the
20830Sstevel@tonic-gate * effect of setting the working set size to zero, so everything
20840Sstevel@tonic-gate * is eligible for reaping.
20850Sstevel@tonic-gate */
20860Sstevel@tonic-gate umem_depot_ws_update(cp);
20870Sstevel@tonic-gate umem_depot_ws_update(cp);
20880Sstevel@tonic-gate
20890Sstevel@tonic-gate umem_depot_ws_reap(cp);
20900Sstevel@tonic-gate }
20910Sstevel@tonic-gate
20920Sstevel@tonic-gate /*
20930Sstevel@tonic-gate * Enable per-cpu magazines on a cache.
20940Sstevel@tonic-gate */
20950Sstevel@tonic-gate static void
umem_cache_magazine_enable(umem_cache_t * cp)20960Sstevel@tonic-gate umem_cache_magazine_enable(umem_cache_t *cp)
20970Sstevel@tonic-gate {
20980Sstevel@tonic-gate int cpu_seqid;
20990Sstevel@tonic-gate
21000Sstevel@tonic-gate if (cp->cache_flags & UMF_NOMAGAZINE)
21010Sstevel@tonic-gate return;
21020Sstevel@tonic-gate
21030Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
21040Sstevel@tonic-gate umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
21050Sstevel@tonic-gate (void) mutex_lock(&ccp->cc_lock);
21060Sstevel@tonic-gate ccp->cc_magsize = cp->cache_magtype->mt_magsize;
21070Sstevel@tonic-gate (void) mutex_unlock(&ccp->cc_lock);
21080Sstevel@tonic-gate }
21090Sstevel@tonic-gate
21100Sstevel@tonic-gate }
21110Sstevel@tonic-gate
21120Sstevel@tonic-gate /*
21130Sstevel@tonic-gate * Recompute a cache's magazine size. The trade-off is that larger magazines
21140Sstevel@tonic-gate * provide a higher transfer rate with the depot, while smaller magazines
21150Sstevel@tonic-gate * reduce memory consumption. Magazine resizing is an expensive operation;
21160Sstevel@tonic-gate * it should not be done frequently.
21170Sstevel@tonic-gate *
21180Sstevel@tonic-gate * Changes to the magazine size are serialized by only having one thread
21190Sstevel@tonic-gate * doing updates. (the update thread)
21200Sstevel@tonic-gate *
21210Sstevel@tonic-gate * Note: at present this only grows the magazine size. It might be useful
21220Sstevel@tonic-gate * to allow shrinkage too.
21230Sstevel@tonic-gate */
21240Sstevel@tonic-gate static void
umem_cache_magazine_resize(umem_cache_t * cp)21250Sstevel@tonic-gate umem_cache_magazine_resize(umem_cache_t *cp)
21260Sstevel@tonic-gate {
21270Sstevel@tonic-gate umem_magtype_t *mtp = cp->cache_magtype;
21280Sstevel@tonic-gate
21290Sstevel@tonic-gate ASSERT(IN_UPDATE());
21300Sstevel@tonic-gate
21310Sstevel@tonic-gate if (cp->cache_chunksize < mtp->mt_maxbuf) {
21320Sstevel@tonic-gate umem_cache_magazine_purge(cp);
21330Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
21340Sstevel@tonic-gate cp->cache_magtype = ++mtp;
21350Sstevel@tonic-gate cp->cache_depot_contention_prev =
21360Sstevel@tonic-gate cp->cache_depot_contention + INT_MAX;
21370Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
21380Sstevel@tonic-gate umem_cache_magazine_enable(cp);
21390Sstevel@tonic-gate }
21400Sstevel@tonic-gate }
21410Sstevel@tonic-gate
21420Sstevel@tonic-gate /*
21430Sstevel@tonic-gate * Rescale a cache's hash table, so that the table size is roughly the
21440Sstevel@tonic-gate * cache size. We want the average lookup time to be extremely small.
21450Sstevel@tonic-gate */
21460Sstevel@tonic-gate static void
umem_hash_rescale(umem_cache_t * cp)21470Sstevel@tonic-gate umem_hash_rescale(umem_cache_t *cp)
21480Sstevel@tonic-gate {
21490Sstevel@tonic-gate umem_bufctl_t **old_table, **new_table, *bcp;
21500Sstevel@tonic-gate size_t old_size, new_size, h;
21510Sstevel@tonic-gate
21520Sstevel@tonic-gate ASSERT(IN_UPDATE());
21530Sstevel@tonic-gate
21540Sstevel@tonic-gate new_size = MAX(UMEM_HASH_INITIAL,
21550Sstevel@tonic-gate 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
21560Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1;
21570Sstevel@tonic-gate
21580Sstevel@tonic-gate if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
21590Sstevel@tonic-gate return;
21600Sstevel@tonic-gate
21610Sstevel@tonic-gate new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *),
21620Sstevel@tonic-gate VM_NOSLEEP);
21630Sstevel@tonic-gate if (new_table == NULL)
21640Sstevel@tonic-gate return;
21650Sstevel@tonic-gate bzero(new_table, new_size * sizeof (void *));
21660Sstevel@tonic-gate
21670Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
21680Sstevel@tonic-gate
21690Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1;
21700Sstevel@tonic-gate old_table = cp->cache_hash_table;
21710Sstevel@tonic-gate
21720Sstevel@tonic-gate cp->cache_hash_mask = new_size - 1;
21730Sstevel@tonic-gate cp->cache_hash_table = new_table;
21740Sstevel@tonic-gate cp->cache_rescale++;
21750Sstevel@tonic-gate
21760Sstevel@tonic-gate for (h = 0; h < old_size; h++) {
21770Sstevel@tonic-gate bcp = old_table[h];
21780Sstevel@tonic-gate while (bcp != NULL) {
21790Sstevel@tonic-gate void *addr = bcp->bc_addr;
21800Sstevel@tonic-gate umem_bufctl_t *next_bcp = bcp->bc_next;
21810Sstevel@tonic-gate umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
21820Sstevel@tonic-gate bcp->bc_next = *hash_bucket;
21830Sstevel@tonic-gate *hash_bucket = bcp;
21840Sstevel@tonic-gate bcp = next_bcp;
21850Sstevel@tonic-gate }
21860Sstevel@tonic-gate }
21870Sstevel@tonic-gate
21880Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
21890Sstevel@tonic-gate
21900Sstevel@tonic-gate vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *));
21910Sstevel@tonic-gate }
21920Sstevel@tonic-gate
21930Sstevel@tonic-gate /*
21940Sstevel@tonic-gate * Perform periodic maintenance on a cache: hash rescaling,
21950Sstevel@tonic-gate * depot working-set update, and magazine resizing.
21960Sstevel@tonic-gate */
21970Sstevel@tonic-gate void
umem_cache_update(umem_cache_t * cp)21980Sstevel@tonic-gate umem_cache_update(umem_cache_t *cp)
21990Sstevel@tonic-gate {
22000Sstevel@tonic-gate int update_flags = 0;
22010Sstevel@tonic-gate
22020Sstevel@tonic-gate ASSERT(MUTEX_HELD(&umem_cache_lock));
22030Sstevel@tonic-gate
22040Sstevel@tonic-gate /*
22050Sstevel@tonic-gate * If the cache has become much larger or smaller than its hash table,
22060Sstevel@tonic-gate * fire off a request to rescale the hash table.
22070Sstevel@tonic-gate */
22080Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
22090Sstevel@tonic-gate
22100Sstevel@tonic-gate if ((cp->cache_flags & UMF_HASH) &&
22110Sstevel@tonic-gate (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
22120Sstevel@tonic-gate (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
22130Sstevel@tonic-gate cp->cache_hash_mask > UMEM_HASH_INITIAL)))
22140Sstevel@tonic-gate update_flags |= UMU_HASH_RESCALE;
22150Sstevel@tonic-gate
22160Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
22170Sstevel@tonic-gate
22180Sstevel@tonic-gate /*
22190Sstevel@tonic-gate * Update the depot working set statistics.
22200Sstevel@tonic-gate */
22210Sstevel@tonic-gate umem_depot_ws_update(cp);
22220Sstevel@tonic-gate
22230Sstevel@tonic-gate /*
22240Sstevel@tonic-gate * If there's a lot of contention in the depot,
22250Sstevel@tonic-gate * increase the magazine size.
22260Sstevel@tonic-gate */
22270Sstevel@tonic-gate (void) mutex_lock(&cp->cache_depot_lock);
22280Sstevel@tonic-gate
22290Sstevel@tonic-gate if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
22300Sstevel@tonic-gate (int)(cp->cache_depot_contention -
22310Sstevel@tonic-gate cp->cache_depot_contention_prev) > umem_depot_contention)
22320Sstevel@tonic-gate update_flags |= UMU_MAGAZINE_RESIZE;
22330Sstevel@tonic-gate
22340Sstevel@tonic-gate cp->cache_depot_contention_prev = cp->cache_depot_contention;
22350Sstevel@tonic-gate
22360Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_depot_lock);
22370Sstevel@tonic-gate
22380Sstevel@tonic-gate if (update_flags)
22390Sstevel@tonic-gate umem_add_update(cp, update_flags);
22400Sstevel@tonic-gate }
22410Sstevel@tonic-gate
22420Sstevel@tonic-gate /*
22430Sstevel@tonic-gate * Runs all pending updates.
22440Sstevel@tonic-gate *
22450Sstevel@tonic-gate * The update lock must be held on entrance, and will be held on exit.
22460Sstevel@tonic-gate */
22470Sstevel@tonic-gate void
umem_process_updates(void)22480Sstevel@tonic-gate umem_process_updates(void)
22490Sstevel@tonic-gate {
22500Sstevel@tonic-gate ASSERT(MUTEX_HELD(&umem_update_lock));
22510Sstevel@tonic-gate
22520Sstevel@tonic-gate while (umem_null_cache.cache_unext != &umem_null_cache) {
22530Sstevel@tonic-gate int notify = 0;
22540Sstevel@tonic-gate umem_cache_t *cp = umem_null_cache.cache_unext;
22550Sstevel@tonic-gate
22560Sstevel@tonic-gate cp->cache_uprev->cache_unext = cp->cache_unext;
22570Sstevel@tonic-gate cp->cache_unext->cache_uprev = cp->cache_uprev;
22580Sstevel@tonic-gate cp->cache_uprev = cp->cache_unext = NULL;
22590Sstevel@tonic-gate
22600Sstevel@tonic-gate ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
22610Sstevel@tonic-gate
22620Sstevel@tonic-gate while (cp->cache_uflags) {
22630Sstevel@tonic-gate int uflags = (cp->cache_uflags |= UMU_ACTIVE);
22640Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
22650Sstevel@tonic-gate
22660Sstevel@tonic-gate /*
22670Sstevel@tonic-gate * The order here is important. Each step can speed up
22680Sstevel@tonic-gate * later steps.
22690Sstevel@tonic-gate */
22700Sstevel@tonic-gate
22710Sstevel@tonic-gate if (uflags & UMU_HASH_RESCALE)
22720Sstevel@tonic-gate umem_hash_rescale(cp);
22730Sstevel@tonic-gate
22740Sstevel@tonic-gate if (uflags & UMU_MAGAZINE_RESIZE)
22750Sstevel@tonic-gate umem_cache_magazine_resize(cp);
22760Sstevel@tonic-gate
22770Sstevel@tonic-gate if (uflags & UMU_REAP)
22780Sstevel@tonic-gate umem_cache_reap(cp);
22790Sstevel@tonic-gate
22800Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
22810Sstevel@tonic-gate
22820Sstevel@tonic-gate /*
22830Sstevel@tonic-gate * check if anyone has requested notification
22840Sstevel@tonic-gate */
22850Sstevel@tonic-gate if (cp->cache_uflags & UMU_NOTIFY) {
22860Sstevel@tonic-gate uflags |= UMU_NOTIFY;
22870Sstevel@tonic-gate notify = 1;
22880Sstevel@tonic-gate }
22890Sstevel@tonic-gate cp->cache_uflags &= ~uflags;
22900Sstevel@tonic-gate }
22910Sstevel@tonic-gate if (notify)
22920Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv);
22930Sstevel@tonic-gate }
22940Sstevel@tonic-gate }
22950Sstevel@tonic-gate
22960Sstevel@tonic-gate #ifndef UMEM_STANDALONE
22970Sstevel@tonic-gate static void
umem_st_update(void)22980Sstevel@tonic-gate umem_st_update(void)
22990Sstevel@tonic-gate {
23000Sstevel@tonic-gate ASSERT(MUTEX_HELD(&umem_update_lock));
23010Sstevel@tonic-gate ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0);
23020Sstevel@tonic-gate
23030Sstevel@tonic-gate umem_st_update_thr = thr_self();
23040Sstevel@tonic-gate
23050Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
23060Sstevel@tonic-gate
23070Sstevel@tonic-gate vmem_update(NULL);
23080Sstevel@tonic-gate umem_cache_applyall(umem_cache_update);
23090Sstevel@tonic-gate
23100Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
23110Sstevel@tonic-gate
23120Sstevel@tonic-gate umem_process_updates(); /* does all of the requested work */
23130Sstevel@tonic-gate
23140Sstevel@tonic-gate umem_reap_next = gethrtime() +
23150Sstevel@tonic-gate (hrtime_t)umem_reap_interval * NANOSEC;
23160Sstevel@tonic-gate
23170Sstevel@tonic-gate umem_reaping = UMEM_REAP_DONE;
23180Sstevel@tonic-gate
23190Sstevel@tonic-gate umem_st_update_thr = 0;
23200Sstevel@tonic-gate }
23210Sstevel@tonic-gate #endif
23220Sstevel@tonic-gate
23230Sstevel@tonic-gate /*
23240Sstevel@tonic-gate * Reclaim all unused memory from all caches. Called from vmem when memory
23250Sstevel@tonic-gate * gets tight. Must be called with no locks held.
23260Sstevel@tonic-gate *
23270Sstevel@tonic-gate * This just requests a reap on all caches, and notifies the update thread.
23280Sstevel@tonic-gate */
23290Sstevel@tonic-gate void
umem_reap(void)23300Sstevel@tonic-gate umem_reap(void)
23310Sstevel@tonic-gate {
23320Sstevel@tonic-gate #ifndef UMEM_STANDALONE
23330Sstevel@tonic-gate extern int __nthreads(void);
23340Sstevel@tonic-gate #endif
23350Sstevel@tonic-gate
23360Sstevel@tonic-gate if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE ||
23370Sstevel@tonic-gate gethrtime() < umem_reap_next)
23380Sstevel@tonic-gate return;
23390Sstevel@tonic-gate
23400Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
23410Sstevel@tonic-gate
23420Sstevel@tonic-gate if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) {
23430Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
23440Sstevel@tonic-gate return;
23450Sstevel@tonic-gate }
23460Sstevel@tonic-gate umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */
23470Sstevel@tonic-gate
23480Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
23490Sstevel@tonic-gate
23500Sstevel@tonic-gate umem_updateall(UMU_REAP);
23510Sstevel@tonic-gate
23520Sstevel@tonic-gate (void) mutex_lock(&umem_update_lock);
23530Sstevel@tonic-gate
23540Sstevel@tonic-gate umem_reaping = UMEM_REAP_ACTIVE;
23550Sstevel@tonic-gate
23560Sstevel@tonic-gate /* Standalone is single-threaded */
23570Sstevel@tonic-gate #ifndef UMEM_STANDALONE
23580Sstevel@tonic-gate if (umem_update_thr == 0) {
23590Sstevel@tonic-gate /*
23600Sstevel@tonic-gate * The update thread does not exist. If the process is
23610Sstevel@tonic-gate * multi-threaded, create it. If not, or the creation fails,
23620Sstevel@tonic-gate * do the update processing inline.
23630Sstevel@tonic-gate */
23640Sstevel@tonic-gate ASSERT(umem_st_update_thr == 0);
23650Sstevel@tonic-gate
23660Sstevel@tonic-gate if (__nthreads() <= 1 || umem_create_update_thread() == 0)
23670Sstevel@tonic-gate umem_st_update();
23680Sstevel@tonic-gate }
23690Sstevel@tonic-gate
23700Sstevel@tonic-gate (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */
23710Sstevel@tonic-gate #endif
23720Sstevel@tonic-gate
23730Sstevel@tonic-gate (void) mutex_unlock(&umem_update_lock);
23740Sstevel@tonic-gate }
23750Sstevel@tonic-gate
23760Sstevel@tonic-gate umem_cache_t *
umem_cache_create(char * name,size_t bufsize,size_t align,umem_constructor_t * constructor,umem_destructor_t * destructor,umem_reclaim_t * reclaim,void * private,vmem_t * vmp,int cflags)23770Sstevel@tonic-gate umem_cache_create(
23780Sstevel@tonic-gate char *name, /* descriptive name for this cache */
23790Sstevel@tonic-gate size_t bufsize, /* size of the objects it manages */
23800Sstevel@tonic-gate size_t align, /* required object alignment */
23810Sstevel@tonic-gate umem_constructor_t *constructor, /* object constructor */
23820Sstevel@tonic-gate umem_destructor_t *destructor, /* object destructor */
23830Sstevel@tonic-gate umem_reclaim_t *reclaim, /* memory reclaim callback */
23840Sstevel@tonic-gate void *private, /* pass-thru arg for constr/destr/reclaim */
23850Sstevel@tonic-gate vmem_t *vmp, /* vmem source for slab allocation */
23860Sstevel@tonic-gate int cflags) /* cache creation flags */
23870Sstevel@tonic-gate {
23880Sstevel@tonic-gate int cpu_seqid;
23890Sstevel@tonic-gate size_t chunksize;
23900Sstevel@tonic-gate umem_cache_t *cp, *cnext, *cprev;
23910Sstevel@tonic-gate umem_magtype_t *mtp;
23920Sstevel@tonic-gate size_t csize;
23930Sstevel@tonic-gate size_t phase;
23940Sstevel@tonic-gate
23950Sstevel@tonic-gate /*
23960Sstevel@tonic-gate * The init thread is allowed to create internal and quantum caches.
23970Sstevel@tonic-gate *
23980Sstevel@tonic-gate * Other threads must wait until until initialization is complete.
23990Sstevel@tonic-gate */
24000Sstevel@tonic-gate if (umem_init_thr == thr_self())
24010Sstevel@tonic-gate ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0);
24020Sstevel@tonic-gate else {
24030Sstevel@tonic-gate ASSERT(!(cflags & UMC_INTERNAL));
24040Sstevel@tonic-gate if (umem_ready != UMEM_READY && umem_init() == 0) {
24050Sstevel@tonic-gate errno = EAGAIN;
24060Sstevel@tonic-gate return (NULL);
24070Sstevel@tonic-gate }
24080Sstevel@tonic-gate }
24090Sstevel@tonic-gate
24100Sstevel@tonic-gate csize = UMEM_CACHE_SIZE(umem_max_ncpus);
24110Sstevel@tonic-gate phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE);
24120Sstevel@tonic-gate
24130Sstevel@tonic-gate if (vmp == NULL)
24140Sstevel@tonic-gate vmp = umem_default_arena;
24150Sstevel@tonic-gate
24160Sstevel@tonic-gate ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0);
24170Sstevel@tonic-gate
24180Sstevel@tonic-gate /*
24190Sstevel@tonic-gate * Check that the arguments are reasonable
24200Sstevel@tonic-gate */
24210Sstevel@tonic-gate if ((align & (align - 1)) != 0 || align > vmp->vm_quantum ||
24220Sstevel@tonic-gate ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) ||
24230Sstevel@tonic-gate name == NULL || bufsize == 0) {
24240Sstevel@tonic-gate errno = EINVAL;
24250Sstevel@tonic-gate return (NULL);
24260Sstevel@tonic-gate }
24270Sstevel@tonic-gate
24280Sstevel@tonic-gate /*
24290Sstevel@tonic-gate * If align == 0, we set it to the minimum required alignment.
24300Sstevel@tonic-gate *
24310Sstevel@tonic-gate * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
24320Sstevel@tonic-gate * UMC_NOTOUCH was passed.
24330Sstevel@tonic-gate */
24340Sstevel@tonic-gate if (align == 0) {
24350Sstevel@tonic-gate if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN)
24360Sstevel@tonic-gate align = UMEM_SECOND_ALIGN;
24370Sstevel@tonic-gate else
24380Sstevel@tonic-gate align = UMEM_ALIGN;
24390Sstevel@tonic-gate } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0)
24400Sstevel@tonic-gate align = UMEM_ALIGN;
24410Sstevel@tonic-gate
24420Sstevel@tonic-gate
24430Sstevel@tonic-gate /*
24440Sstevel@tonic-gate * Get a umem_cache structure. We arrange that cp->cache_cpu[]
24450Sstevel@tonic-gate * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
24460Sstevel@tonic-gate * false sharing of per-CPU data.
24470Sstevel@tonic-gate */
24480Sstevel@tonic-gate cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
24490Sstevel@tonic-gate 0, NULL, NULL, VM_NOSLEEP);
24500Sstevel@tonic-gate
24510Sstevel@tonic-gate if (cp == NULL) {
24520Sstevel@tonic-gate errno = EAGAIN;
24530Sstevel@tonic-gate return (NULL);
24540Sstevel@tonic-gate }
24550Sstevel@tonic-gate
24560Sstevel@tonic-gate bzero(cp, csize);
24570Sstevel@tonic-gate
24580Sstevel@tonic-gate (void) mutex_lock(&umem_flags_lock);
24590Sstevel@tonic-gate if (umem_flags & UMF_RANDOMIZE)
24600Sstevel@tonic-gate umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) |
24610Sstevel@tonic-gate UMF_RANDOMIZE;
24620Sstevel@tonic-gate cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
24630Sstevel@tonic-gate (void) mutex_unlock(&umem_flags_lock);
24640Sstevel@tonic-gate
24650Sstevel@tonic-gate /*
24660Sstevel@tonic-gate * Make sure all the various flags are reasonable.
24670Sstevel@tonic-gate */
24680Sstevel@tonic-gate if (cp->cache_flags & UMF_LITE) {
24690Sstevel@tonic-gate if (bufsize >= umem_lite_minsize &&
24700Sstevel@tonic-gate align <= umem_lite_maxalign &&
24710Sstevel@tonic-gate P2PHASE(bufsize, umem_lite_maxalign) != 0) {
24720Sstevel@tonic-gate cp->cache_flags |= UMF_BUFTAG;
24730Sstevel@tonic-gate cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
24740Sstevel@tonic-gate } else {
24750Sstevel@tonic-gate cp->cache_flags &= ~UMF_DEBUG;
24760Sstevel@tonic-gate }
24770Sstevel@tonic-gate }
24780Sstevel@tonic-gate
24790Sstevel@tonic-gate if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
24800Sstevel@tonic-gate cp->cache_flags |= UMF_NOMAGAZINE;
24810Sstevel@tonic-gate
24820Sstevel@tonic-gate if (cflags & UMC_NODEBUG)
24830Sstevel@tonic-gate cp->cache_flags &= ~UMF_DEBUG;
24840Sstevel@tonic-gate
24850Sstevel@tonic-gate if (cflags & UMC_NOTOUCH)
24860Sstevel@tonic-gate cp->cache_flags &= ~UMF_TOUCH;
24870Sstevel@tonic-gate
24880Sstevel@tonic-gate if (cflags & UMC_NOHASH)
24890Sstevel@tonic-gate cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
24900Sstevel@tonic-gate
24910Sstevel@tonic-gate if (cflags & UMC_NOMAGAZINE)
24920Sstevel@tonic-gate cp->cache_flags |= UMF_NOMAGAZINE;
24930Sstevel@tonic-gate
24940Sstevel@tonic-gate if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
24950Sstevel@tonic-gate cp->cache_flags |= UMF_REDZONE;
24960Sstevel@tonic-gate
24970Sstevel@tonic-gate if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
24980Sstevel@tonic-gate !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
24990Sstevel@tonic-gate cp->cache_flags |= UMF_FIREWALL;
25000Sstevel@tonic-gate
25010Sstevel@tonic-gate if (vmp != umem_default_arena || umem_firewall_arena == NULL)
25020Sstevel@tonic-gate cp->cache_flags &= ~UMF_FIREWALL;
25030Sstevel@tonic-gate
25040Sstevel@tonic-gate if (cp->cache_flags & UMF_FIREWALL) {
25050Sstevel@tonic-gate cp->cache_flags &= ~UMF_BUFTAG;
25060Sstevel@tonic-gate cp->cache_flags |= UMF_NOMAGAZINE;
25070Sstevel@tonic-gate ASSERT(vmp == umem_default_arena);
25080Sstevel@tonic-gate vmp = umem_firewall_arena;
25090Sstevel@tonic-gate }
25100Sstevel@tonic-gate
25110Sstevel@tonic-gate /*
25120Sstevel@tonic-gate * Set cache properties.
25130Sstevel@tonic-gate */
25140Sstevel@tonic-gate (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
25150Sstevel@tonic-gate cp->cache_bufsize = bufsize;
25160Sstevel@tonic-gate cp->cache_align = align;
25170Sstevel@tonic-gate cp->cache_constructor = constructor;
25180Sstevel@tonic-gate cp->cache_destructor = destructor;
25190Sstevel@tonic-gate cp->cache_reclaim = reclaim;
25200Sstevel@tonic-gate cp->cache_private = private;
25210Sstevel@tonic-gate cp->cache_arena = vmp;
25220Sstevel@tonic-gate cp->cache_cflags = cflags;
25230Sstevel@tonic-gate cp->cache_cpu_mask = umem_cpu_mask;
25240Sstevel@tonic-gate
25250Sstevel@tonic-gate /*
25260Sstevel@tonic-gate * Determine the chunk size.
25270Sstevel@tonic-gate */
25280Sstevel@tonic-gate chunksize = bufsize;
25290Sstevel@tonic-gate
25300Sstevel@tonic-gate if (align >= UMEM_ALIGN) {
25310Sstevel@tonic-gate chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN);
25320Sstevel@tonic-gate cp->cache_bufctl = chunksize - UMEM_ALIGN;
25330Sstevel@tonic-gate }
25340Sstevel@tonic-gate
25350Sstevel@tonic-gate if (cp->cache_flags & UMF_BUFTAG) {
25360Sstevel@tonic-gate cp->cache_bufctl = chunksize;
25370Sstevel@tonic-gate cp->cache_buftag = chunksize;
25380Sstevel@tonic-gate chunksize += sizeof (umem_buftag_t);
25390Sstevel@tonic-gate }
25400Sstevel@tonic-gate
25410Sstevel@tonic-gate if (cp->cache_flags & UMF_DEADBEEF) {
25420Sstevel@tonic-gate cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
25430Sstevel@tonic-gate if (cp->cache_flags & UMF_LITE)
25440Sstevel@tonic-gate cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
25450Sstevel@tonic-gate }
25460Sstevel@tonic-gate
25470Sstevel@tonic-gate cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
25480Sstevel@tonic-gate
25490Sstevel@tonic-gate cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
25500Sstevel@tonic-gate
25510Sstevel@tonic-gate if (chunksize < bufsize) {
25520Sstevel@tonic-gate errno = ENOMEM;
25530Sstevel@tonic-gate goto fail;
25540Sstevel@tonic-gate }
25550Sstevel@tonic-gate
25560Sstevel@tonic-gate /*
25570Sstevel@tonic-gate * Now that we know the chunk size, determine the optimal slab size.
25580Sstevel@tonic-gate */
25590Sstevel@tonic-gate if (vmp == umem_firewall_arena) {
25600Sstevel@tonic-gate cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
25610Sstevel@tonic-gate cp->cache_mincolor = cp->cache_slabsize - chunksize;
25620Sstevel@tonic-gate cp->cache_maxcolor = cp->cache_mincolor;
25630Sstevel@tonic-gate cp->cache_flags |= UMF_HASH;
25640Sstevel@tonic-gate ASSERT(!(cp->cache_flags & UMF_BUFTAG));
25650Sstevel@tonic-gate } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) &&
25660Sstevel@tonic-gate !(cp->cache_flags & UMF_AUDIT) &&
25670Sstevel@tonic-gate chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) {
25680Sstevel@tonic-gate cp->cache_slabsize = vmp->vm_quantum;
25690Sstevel@tonic-gate cp->cache_mincolor = 0;
25700Sstevel@tonic-gate cp->cache_maxcolor =
25710Sstevel@tonic-gate (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
25720Sstevel@tonic-gate
25730Sstevel@tonic-gate if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
25740Sstevel@tonic-gate errno = EINVAL;
25750Sstevel@tonic-gate goto fail;
25760Sstevel@tonic-gate }
25770Sstevel@tonic-gate ASSERT(!(cp->cache_flags & UMF_AUDIT));
25780Sstevel@tonic-gate } else {
25790Sstevel@tonic-gate size_t chunks, bestfit, waste, slabsize;
25800Sstevel@tonic-gate size_t minwaste = LONG_MAX;
25810Sstevel@tonic-gate
25820Sstevel@tonic-gate for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) {
25830Sstevel@tonic-gate slabsize = P2ROUNDUP(chunksize * chunks,
25840Sstevel@tonic-gate vmp->vm_quantum);
25850Sstevel@tonic-gate /*
25860Sstevel@tonic-gate * check for overflow
25870Sstevel@tonic-gate */
25880Sstevel@tonic-gate if ((slabsize / chunks) < chunksize) {
25890Sstevel@tonic-gate errno = ENOMEM;
25900Sstevel@tonic-gate goto fail;
25910Sstevel@tonic-gate }
25920Sstevel@tonic-gate chunks = slabsize / chunksize;
25930Sstevel@tonic-gate waste = (slabsize % chunksize) / chunks;
25940Sstevel@tonic-gate if (waste < minwaste) {
25950Sstevel@tonic-gate minwaste = waste;
25960Sstevel@tonic-gate bestfit = slabsize;
25970Sstevel@tonic-gate }
25980Sstevel@tonic-gate }
25990Sstevel@tonic-gate if (cflags & UMC_QCACHE)
26000Sstevel@tonic-gate bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64);
26010Sstevel@tonic-gate cp->cache_slabsize = bestfit;
26020Sstevel@tonic-gate cp->cache_mincolor = 0;
26030Sstevel@tonic-gate cp->cache_maxcolor = bestfit % chunksize;
26040Sstevel@tonic-gate cp->cache_flags |= UMF_HASH;
26050Sstevel@tonic-gate }
26060Sstevel@tonic-gate
26070Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
26080Sstevel@tonic-gate ASSERT(!(cflags & UMC_NOHASH));
26090Sstevel@tonic-gate cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
26100Sstevel@tonic-gate umem_bufctl_audit_cache : umem_bufctl_cache;
26110Sstevel@tonic-gate }
26120Sstevel@tonic-gate
26130Sstevel@tonic-gate if (cp->cache_maxcolor >= vmp->vm_quantum)
26140Sstevel@tonic-gate cp->cache_maxcolor = vmp->vm_quantum - 1;
26150Sstevel@tonic-gate
26160Sstevel@tonic-gate cp->cache_color = cp->cache_mincolor;
26170Sstevel@tonic-gate
26180Sstevel@tonic-gate /*
26190Sstevel@tonic-gate * Initialize the rest of the slab layer.
26200Sstevel@tonic-gate */
26210Sstevel@tonic-gate (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
26220Sstevel@tonic-gate
26230Sstevel@tonic-gate cp->cache_freelist = &cp->cache_nullslab;
26240Sstevel@tonic-gate cp->cache_nullslab.slab_cache = cp;
26250Sstevel@tonic-gate cp->cache_nullslab.slab_refcnt = -1;
26260Sstevel@tonic-gate cp->cache_nullslab.slab_next = &cp->cache_nullslab;
26270Sstevel@tonic-gate cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
26280Sstevel@tonic-gate
26290Sstevel@tonic-gate if (cp->cache_flags & UMF_HASH) {
26300Sstevel@tonic-gate cp->cache_hash_table = vmem_alloc(umem_hash_arena,
26310Sstevel@tonic-gate UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
26320Sstevel@tonic-gate if (cp->cache_hash_table == NULL) {
26330Sstevel@tonic-gate errno = EAGAIN;
26340Sstevel@tonic-gate goto fail_lock;
26350Sstevel@tonic-gate }
26360Sstevel@tonic-gate bzero(cp->cache_hash_table,
26370Sstevel@tonic-gate UMEM_HASH_INITIAL * sizeof (void *));
26380Sstevel@tonic-gate cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
26390Sstevel@tonic-gate cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
26400Sstevel@tonic-gate }
26410Sstevel@tonic-gate
26420Sstevel@tonic-gate /*
26430Sstevel@tonic-gate * Initialize the depot.
26440Sstevel@tonic-gate */
26450Sstevel@tonic-gate (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
26460Sstevel@tonic-gate
26470Sstevel@tonic-gate for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
26480Sstevel@tonic-gate continue;
26490Sstevel@tonic-gate
26500Sstevel@tonic-gate cp->cache_magtype = mtp;
26510Sstevel@tonic-gate
26520Sstevel@tonic-gate /*
26530Sstevel@tonic-gate * Initialize the CPU layer.
26540Sstevel@tonic-gate */
26550Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
26560Sstevel@tonic-gate umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
26570Sstevel@tonic-gate (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL);
26580Sstevel@tonic-gate ccp->cc_flags = cp->cache_flags;
26590Sstevel@tonic-gate ccp->cc_rounds = -1;
26600Sstevel@tonic-gate ccp->cc_prounds = -1;
26610Sstevel@tonic-gate }
26620Sstevel@tonic-gate
26630Sstevel@tonic-gate /*
26640Sstevel@tonic-gate * Add the cache to the global list. This makes it visible
26650Sstevel@tonic-gate * to umem_update(), so the cache must be ready for business.
26660Sstevel@tonic-gate */
26670Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock);
26680Sstevel@tonic-gate cp->cache_next = cnext = &umem_null_cache;
26690Sstevel@tonic-gate cp->cache_prev = cprev = umem_null_cache.cache_prev;
26700Sstevel@tonic-gate cnext->cache_prev = cp;
26710Sstevel@tonic-gate cprev->cache_next = cp;
26720Sstevel@tonic-gate (void) mutex_unlock(&umem_cache_lock);
26730Sstevel@tonic-gate
26740Sstevel@tonic-gate if (umem_ready == UMEM_READY)
26750Sstevel@tonic-gate umem_cache_magazine_enable(cp);
26760Sstevel@tonic-gate
26770Sstevel@tonic-gate return (cp);
26780Sstevel@tonic-gate
26790Sstevel@tonic-gate fail_lock:
26800Sstevel@tonic-gate (void) mutex_destroy(&cp->cache_lock);
26810Sstevel@tonic-gate fail:
26820Sstevel@tonic-gate vmem_xfree(umem_cache_arena, cp, csize);
26830Sstevel@tonic-gate return (NULL);
26840Sstevel@tonic-gate }
26850Sstevel@tonic-gate
26860Sstevel@tonic-gate void
umem_cache_destroy(umem_cache_t * cp)26870Sstevel@tonic-gate umem_cache_destroy(umem_cache_t *cp)
26880Sstevel@tonic-gate {
26890Sstevel@tonic-gate int cpu_seqid;
26900Sstevel@tonic-gate
26910Sstevel@tonic-gate /*
26920Sstevel@tonic-gate * Remove the cache from the global cache list so that no new updates
26930Sstevel@tonic-gate * will be scheduled on its behalf, wait for any pending tasks to
26940Sstevel@tonic-gate * complete, purge the cache, and then destroy it.
26950Sstevel@tonic-gate */
26960Sstevel@tonic-gate (void) mutex_lock(&umem_cache_lock);
26970Sstevel@tonic-gate cp->cache_prev->cache_next = cp->cache_next;
26980Sstevel@tonic-gate cp->cache_next->cache_prev = cp->cache_prev;
26990Sstevel@tonic-gate cp->cache_prev = cp->cache_next = NULL;
27000Sstevel@tonic-gate (void) mutex_unlock(&umem_cache_lock);
27010Sstevel@tonic-gate
27020Sstevel@tonic-gate umem_remove_updates(cp);
27030Sstevel@tonic-gate
27040Sstevel@tonic-gate umem_cache_magazine_purge(cp);
27050Sstevel@tonic-gate
27060Sstevel@tonic-gate (void) mutex_lock(&cp->cache_lock);
27070Sstevel@tonic-gate if (cp->cache_buftotal != 0)
27080Sstevel@tonic-gate log_message("umem_cache_destroy: '%s' (%p) not empty\n",
27090Sstevel@tonic-gate cp->cache_name, (void *)cp);
27100Sstevel@tonic-gate cp->cache_reclaim = NULL;
27110Sstevel@tonic-gate /*
27120Sstevel@tonic-gate * The cache is now dead. There should be no further activity.
27130Sstevel@tonic-gate * We enforce this by setting land mines in the constructor and
27140Sstevel@tonic-gate * destructor routines that induce a segmentation fault if invoked.
27150Sstevel@tonic-gate */
27160Sstevel@tonic-gate cp->cache_constructor = (umem_constructor_t *)1;
27170Sstevel@tonic-gate cp->cache_destructor = (umem_destructor_t *)2;
27180Sstevel@tonic-gate (void) mutex_unlock(&cp->cache_lock);
27190Sstevel@tonic-gate
27200Sstevel@tonic-gate if (cp->cache_hash_table != NULL)
27210Sstevel@tonic-gate vmem_free(umem_hash_arena, cp->cache_hash_table,
27220Sstevel@tonic-gate (cp->cache_hash_mask + 1) * sizeof (void *));
27230Sstevel@tonic-gate
27240Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++)
27250Sstevel@tonic-gate (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
27260Sstevel@tonic-gate
27270Sstevel@tonic-gate (void) mutex_destroy(&cp->cache_depot_lock);
27280Sstevel@tonic-gate (void) mutex_destroy(&cp->cache_lock);
27290Sstevel@tonic-gate
27300Sstevel@tonic-gate vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
27310Sstevel@tonic-gate }
27320Sstevel@tonic-gate
27331528Sjwadams void
umem_alloc_sizes_clear(void)27341528Sjwadams umem_alloc_sizes_clear(void)
27351528Sjwadams {
27361528Sjwadams int i;
27371528Sjwadams
27381528Sjwadams umem_alloc_sizes[0] = UMEM_MAXBUF;
27391528Sjwadams for (i = 1; i < NUM_ALLOC_SIZES; i++)
27401528Sjwadams umem_alloc_sizes[i] = 0;
27411528Sjwadams }
27421528Sjwadams
27431528Sjwadams void
umem_alloc_sizes_add(size_t size_arg)27441528Sjwadams umem_alloc_sizes_add(size_t size_arg)
27451528Sjwadams {
27461528Sjwadams int i, j;
27471528Sjwadams size_t size = size_arg;
27481528Sjwadams
27491528Sjwadams if (size == 0) {
27501528Sjwadams log_message("size_add: cannot add zero-sized cache\n",
27511528Sjwadams size, UMEM_MAXBUF);
27521528Sjwadams return;
27531528Sjwadams }
27541528Sjwadams
27551528Sjwadams if (size > UMEM_MAXBUF) {
27561528Sjwadams log_message("size_add: %ld > %d, cannot add\n", size,
27571528Sjwadams UMEM_MAXBUF);
27581528Sjwadams return;
27591528Sjwadams }
27601528Sjwadams
27611528Sjwadams if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) {
27621528Sjwadams log_message("size_add: no space in alloc_table for %d\n",
27631528Sjwadams size);
27641528Sjwadams return;
27651528Sjwadams }
27661528Sjwadams
27671528Sjwadams if (P2PHASE(size, UMEM_ALIGN) != 0) {
27681528Sjwadams size = P2ROUNDUP(size, UMEM_ALIGN);
27691528Sjwadams log_message("size_add: rounding %d up to %d\n", size_arg,
27701528Sjwadams size);
27711528Sjwadams }
27721528Sjwadams
27731528Sjwadams for (i = 0; i < NUM_ALLOC_SIZES; i++) {
27741528Sjwadams int cur = umem_alloc_sizes[i];
27751528Sjwadams if (cur == size) {
27761528Sjwadams log_message("size_add: %ld already in table\n",
27771528Sjwadams size);
27781528Sjwadams return;
27791528Sjwadams }
27801528Sjwadams if (cur > size)
27811528Sjwadams break;
27821528Sjwadams }
27831528Sjwadams
27841528Sjwadams for (j = NUM_ALLOC_SIZES - 1; j > i; j--)
27851528Sjwadams umem_alloc_sizes[j] = umem_alloc_sizes[j-1];
27861528Sjwadams umem_alloc_sizes[i] = size;
27871528Sjwadams }
27881528Sjwadams
27891528Sjwadams void
umem_alloc_sizes_remove(size_t size)27901528Sjwadams umem_alloc_sizes_remove(size_t size)
27911528Sjwadams {
27921528Sjwadams int i;
27931528Sjwadams
27941528Sjwadams if (size == UMEM_MAXBUF) {
27951528Sjwadams log_message("size_remove: cannot remove %ld\n", size);
27961528Sjwadams return;
27971528Sjwadams }
27981528Sjwadams
27991528Sjwadams for (i = 0; i < NUM_ALLOC_SIZES; i++) {
28001528Sjwadams int cur = umem_alloc_sizes[i];
28011528Sjwadams if (cur == size)
28021528Sjwadams break;
28031528Sjwadams else if (cur > size || cur == 0) {
28041528Sjwadams log_message("size_remove: %ld not found in table\n",
28051528Sjwadams size);
28061528Sjwadams return;
28071528Sjwadams }
28081528Sjwadams }
28091528Sjwadams
28101528Sjwadams for (; i + 1 < NUM_ALLOC_SIZES; i++)
28111528Sjwadams umem_alloc_sizes[i] = umem_alloc_sizes[i+1];
28121528Sjwadams umem_alloc_sizes[i] = 0;
28131528Sjwadams }
28141528Sjwadams
28150Sstevel@tonic-gate static int
umem_cache_init(void)28160Sstevel@tonic-gate umem_cache_init(void)
28170Sstevel@tonic-gate {
28180Sstevel@tonic-gate int i;
28190Sstevel@tonic-gate size_t size, max_size;
28200Sstevel@tonic-gate umem_cache_t *cp;
28210Sstevel@tonic-gate umem_magtype_t *mtp;
28220Sstevel@tonic-gate char name[UMEM_CACHE_NAMELEN + 1];
28230Sstevel@tonic-gate umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES];
28240Sstevel@tonic-gate
28250Sstevel@tonic-gate for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
28260Sstevel@tonic-gate mtp = &umem_magtype[i];
28270Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "umem_magazine_%d",
28280Sstevel@tonic-gate mtp->mt_magsize);
28290Sstevel@tonic-gate mtp->mt_cache = umem_cache_create(name,
28300Sstevel@tonic-gate (mtp->mt_magsize + 1) * sizeof (void *),
28310Sstevel@tonic-gate mtp->mt_align, NULL, NULL, NULL, NULL,
28320Sstevel@tonic-gate umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
28330Sstevel@tonic-gate if (mtp->mt_cache == NULL)
28340Sstevel@tonic-gate return (0);
28350Sstevel@tonic-gate }
28360Sstevel@tonic-gate
28370Sstevel@tonic-gate umem_slab_cache = umem_cache_create("umem_slab_cache",
28380Sstevel@tonic-gate sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL,
28390Sstevel@tonic-gate umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
28400Sstevel@tonic-gate
28410Sstevel@tonic-gate if (umem_slab_cache == NULL)
28420Sstevel@tonic-gate return (0);
28430Sstevel@tonic-gate
28440Sstevel@tonic-gate umem_bufctl_cache = umem_cache_create("umem_bufctl_cache",
28450Sstevel@tonic-gate sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL,
28460Sstevel@tonic-gate umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
28470Sstevel@tonic-gate
28480Sstevel@tonic-gate if (umem_bufctl_cache == NULL)
28490Sstevel@tonic-gate return (0);
28500Sstevel@tonic-gate
28510Sstevel@tonic-gate /*
28520Sstevel@tonic-gate * The size of the umem_bufctl_audit structure depends upon
28530Sstevel@tonic-gate * umem_stack_depth. See umem_impl.h for details on the size
28540Sstevel@tonic-gate * restrictions.
28550Sstevel@tonic-gate */
28560Sstevel@tonic-gate
28570Sstevel@tonic-gate size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
28580Sstevel@tonic-gate max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE;
28590Sstevel@tonic-gate
28600Sstevel@tonic-gate if (size > max_size) { /* too large -- truncate */
28610Sstevel@tonic-gate int max_frames = UMEM_MAX_STACK_DEPTH;
28620Sstevel@tonic-gate
28630Sstevel@tonic-gate ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size);
28640Sstevel@tonic-gate
28650Sstevel@tonic-gate umem_stack_depth = max_frames;
28660Sstevel@tonic-gate size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
28670Sstevel@tonic-gate }
28680Sstevel@tonic-gate
28690Sstevel@tonic-gate umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache",
28700Sstevel@tonic-gate size, 0, NULL, NULL, NULL, NULL, umem_internal_arena,
28710Sstevel@tonic-gate UMC_NOHASH | UMC_INTERNAL);
28720Sstevel@tonic-gate
28730Sstevel@tonic-gate if (umem_bufctl_audit_cache == NULL)
28740Sstevel@tonic-gate return (0);
28750Sstevel@tonic-gate
28760Sstevel@tonic-gate if (vmem_backend & VMEM_BACKEND_MMAP)
28770Sstevel@tonic-gate umem_va_arena = vmem_create("umem_va",
28780Sstevel@tonic-gate NULL, 0, pagesize,
28790Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena,
28800Sstevel@tonic-gate 8 * pagesize, VM_NOSLEEP);
28810Sstevel@tonic-gate else
28820Sstevel@tonic-gate umem_va_arena = heap_arena;
28830Sstevel@tonic-gate
28840Sstevel@tonic-gate if (umem_va_arena == NULL)
28850Sstevel@tonic-gate return (0);
28860Sstevel@tonic-gate
28870Sstevel@tonic-gate umem_default_arena = vmem_create("umem_default",
28880Sstevel@tonic-gate NULL, 0, pagesize,
28890Sstevel@tonic-gate heap_alloc, heap_free, umem_va_arena,
28900Sstevel@tonic-gate 0, VM_NOSLEEP);
28910Sstevel@tonic-gate
28920Sstevel@tonic-gate if (umem_default_arena == NULL)
28930Sstevel@tonic-gate return (0);
28940Sstevel@tonic-gate
28950Sstevel@tonic-gate /*
28960Sstevel@tonic-gate * make sure the umem_alloc table initializer is correct
28970Sstevel@tonic-gate */
28980Sstevel@tonic-gate i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
28990Sstevel@tonic-gate ASSERT(umem_alloc_table[i - 1] == &umem_null_cache);
29000Sstevel@tonic-gate
29010Sstevel@tonic-gate /*
29020Sstevel@tonic-gate * Create the default caches to back umem_alloc()
29030Sstevel@tonic-gate */
29040Sstevel@tonic-gate for (i = 0; i < NUM_ALLOC_SIZES; i++) {
29050Sstevel@tonic-gate size_t cache_size = umem_alloc_sizes[i];
29060Sstevel@tonic-gate size_t align = 0;
29071528Sjwadams
29081528Sjwadams if (cache_size == 0)
29091528Sjwadams break; /* 0 terminates the list */
29101528Sjwadams
29110Sstevel@tonic-gate /*
29120Sstevel@tonic-gate * If they allocate a multiple of the coherency granularity,
29130Sstevel@tonic-gate * they get a coherency-granularity-aligned address.
29140Sstevel@tonic-gate */
29150Sstevel@tonic-gate if (IS_P2ALIGNED(cache_size, 64))
29160Sstevel@tonic-gate align = 64;
29170Sstevel@tonic-gate if (IS_P2ALIGNED(cache_size, pagesize))
29180Sstevel@tonic-gate align = pagesize;
29190Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "umem_alloc_%lu",
29200Sstevel@tonic-gate (long)cache_size);
29210Sstevel@tonic-gate
29220Sstevel@tonic-gate cp = umem_cache_create(name, cache_size, align,
29230Sstevel@tonic-gate NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL);
29240Sstevel@tonic-gate if (cp == NULL)
29250Sstevel@tonic-gate return (0);
29260Sstevel@tonic-gate
29270Sstevel@tonic-gate umem_alloc_caches[i] = cp;
29280Sstevel@tonic-gate }
29290Sstevel@tonic-gate
29300Sstevel@tonic-gate /*
29310Sstevel@tonic-gate * Initialization cannot fail at this point. Make the caches
29320Sstevel@tonic-gate * visible to umem_alloc() and friends.
29330Sstevel@tonic-gate */
29340Sstevel@tonic-gate size = UMEM_ALIGN;
29350Sstevel@tonic-gate for (i = 0; i < NUM_ALLOC_SIZES; i++) {
29360Sstevel@tonic-gate size_t cache_size = umem_alloc_sizes[i];
29370Sstevel@tonic-gate
29381528Sjwadams if (cache_size == 0)
29391528Sjwadams break; /* 0 terminates the list */
29401528Sjwadams
29410Sstevel@tonic-gate cp = umem_alloc_caches[i];
29420Sstevel@tonic-gate
29430Sstevel@tonic-gate while (size <= cache_size) {
29440Sstevel@tonic-gate umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;
29450Sstevel@tonic-gate size += UMEM_ALIGN;
29460Sstevel@tonic-gate }
29470Sstevel@tonic-gate }
29481528Sjwadams ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF);
29490Sstevel@tonic-gate return (1);
29500Sstevel@tonic-gate }
29510Sstevel@tonic-gate
29520Sstevel@tonic-gate /*
29530Sstevel@tonic-gate * umem_startup() is called early on, and must be called explicitly if we're
29540Sstevel@tonic-gate * the standalone version.
29550Sstevel@tonic-gate */
29560Sstevel@tonic-gate #ifdef UMEM_STANDALONE
29570Sstevel@tonic-gate void
29580Sstevel@tonic-gate #else
29590Sstevel@tonic-gate #pragma init(umem_startup)
29600Sstevel@tonic-gate static void
29610Sstevel@tonic-gate #endif
umem_startup(caddr_t start,size_t len,size_t pagesize,caddr_t minstack,caddr_t maxstack)29620Sstevel@tonic-gate umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack,
29630Sstevel@tonic-gate caddr_t maxstack)
29640Sstevel@tonic-gate {
29650Sstevel@tonic-gate #ifdef UMEM_STANDALONE
29660Sstevel@tonic-gate int idx;
29670Sstevel@tonic-gate /* Standalone doesn't fork */
29680Sstevel@tonic-gate #else
29690Sstevel@tonic-gate umem_forkhandler_init(); /* register the fork handler */
29700Sstevel@tonic-gate #endif
29710Sstevel@tonic-gate
29720Sstevel@tonic-gate #ifdef __lint
29730Sstevel@tonic-gate /* make lint happy */
29740Sstevel@tonic-gate minstack = maxstack;
29750Sstevel@tonic-gate #endif
29760Sstevel@tonic-gate
29770Sstevel@tonic-gate #ifdef UMEM_STANDALONE
29780Sstevel@tonic-gate umem_ready = UMEM_READY_STARTUP;
29790Sstevel@tonic-gate umem_init_env_ready = 0;
29800Sstevel@tonic-gate
29810Sstevel@tonic-gate umem_min_stack = minstack;
29820Sstevel@tonic-gate umem_max_stack = maxstack;
29830Sstevel@tonic-gate
29840Sstevel@tonic-gate nofail_callback = NULL;
29850Sstevel@tonic-gate umem_slab_cache = NULL;
29860Sstevel@tonic-gate umem_bufctl_cache = NULL;
29870Sstevel@tonic-gate umem_bufctl_audit_cache = NULL;
29880Sstevel@tonic-gate heap_arena = NULL;
29890Sstevel@tonic-gate heap_alloc = NULL;
29900Sstevel@tonic-gate heap_free = NULL;
29910Sstevel@tonic-gate umem_internal_arena = NULL;
29920Sstevel@tonic-gate umem_cache_arena = NULL;
29930Sstevel@tonic-gate umem_hash_arena = NULL;
29940Sstevel@tonic-gate umem_log_arena = NULL;
29950Sstevel@tonic-gate umem_oversize_arena = NULL;
29960Sstevel@tonic-gate umem_va_arena = NULL;
29970Sstevel@tonic-gate umem_default_arena = NULL;
29980Sstevel@tonic-gate umem_firewall_va_arena = NULL;
29990Sstevel@tonic-gate umem_firewall_arena = NULL;
30000Sstevel@tonic-gate umem_memalign_arena = NULL;
30010Sstevel@tonic-gate umem_transaction_log = NULL;
30020Sstevel@tonic-gate umem_content_log = NULL;
30030Sstevel@tonic-gate umem_failure_log = NULL;
30040Sstevel@tonic-gate umem_slab_log = NULL;
30050Sstevel@tonic-gate umem_cpu_mask = 0;
30060Sstevel@tonic-gate
30070Sstevel@tonic-gate umem_cpus = &umem_startup_cpu;
30080Sstevel@tonic-gate umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0);
30090Sstevel@tonic-gate umem_startup_cpu.cpu_number = 0;
30100Sstevel@tonic-gate
30110Sstevel@tonic-gate bcopy(&umem_null_cache_template, &umem_null_cache,
30120Sstevel@tonic-gate sizeof (umem_cache_t));
30130Sstevel@tonic-gate
30140Sstevel@tonic-gate for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++)
30150Sstevel@tonic-gate umem_alloc_table[idx] = &umem_null_cache;
30160Sstevel@tonic-gate #endif
30170Sstevel@tonic-gate
30180Sstevel@tonic-gate /*
30190Sstevel@tonic-gate * Perform initialization specific to the way we've been compiled
30200Sstevel@tonic-gate * (library or standalone)
30210Sstevel@tonic-gate */
30220Sstevel@tonic-gate umem_type_init(start, len, pagesize);
30230Sstevel@tonic-gate
30240Sstevel@tonic-gate vmem_startup();
30250Sstevel@tonic-gate }
30260Sstevel@tonic-gate
30270Sstevel@tonic-gate int
umem_init(void)30280Sstevel@tonic-gate umem_init(void)
30290Sstevel@tonic-gate {
30300Sstevel@tonic-gate size_t maxverify, minfirewall;
30310Sstevel@tonic-gate size_t size;
30320Sstevel@tonic-gate int idx;
30330Sstevel@tonic-gate umem_cpu_t *new_cpus;
30340Sstevel@tonic-gate
30350Sstevel@tonic-gate vmem_t *memalign_arena, *oversize_arena;
30360Sstevel@tonic-gate
30370Sstevel@tonic-gate if (thr_self() != umem_init_thr) {
30380Sstevel@tonic-gate /*
30390Sstevel@tonic-gate * The usual case -- non-recursive invocation of umem_init().
30400Sstevel@tonic-gate */
30410Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock);
30420Sstevel@tonic-gate if (umem_ready != UMEM_READY_STARTUP) {
30430Sstevel@tonic-gate /*
30440Sstevel@tonic-gate * someone else beat us to initializing umem. Wait
30450Sstevel@tonic-gate * for them to complete, then return.
30460Sstevel@tonic-gate */
30475891Sraf while (umem_ready == UMEM_READY_INITING) {
30485891Sraf int cancel_state;
30495891Sraf
30505891Sraf (void) pthread_setcancelstate(
30515891Sraf PTHREAD_CANCEL_DISABLE, &cancel_state);
30525891Sraf (void) cond_wait(&umem_init_cv,
30530Sstevel@tonic-gate &umem_init_lock);
30545891Sraf (void) pthread_setcancelstate(
30555891Sraf cancel_state, NULL);
30565891Sraf }
30570Sstevel@tonic-gate ASSERT(umem_ready == UMEM_READY ||
30580Sstevel@tonic-gate umem_ready == UMEM_READY_INIT_FAILED);
30590Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock);
30600Sstevel@tonic-gate return (umem_ready == UMEM_READY);
30610Sstevel@tonic-gate }
30620Sstevel@tonic-gate
30630Sstevel@tonic-gate ASSERT(umem_ready == UMEM_READY_STARTUP);
30640Sstevel@tonic-gate ASSERT(umem_init_env_ready == 0);
30650Sstevel@tonic-gate
30660Sstevel@tonic-gate umem_ready = UMEM_READY_INITING;
30670Sstevel@tonic-gate umem_init_thr = thr_self();
30680Sstevel@tonic-gate
30690Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock);
30700Sstevel@tonic-gate umem_setup_envvars(0); /* can recurse -- see below */
30710Sstevel@tonic-gate if (umem_init_env_ready) {
30720Sstevel@tonic-gate /*
30730Sstevel@tonic-gate * initialization was completed already
30740Sstevel@tonic-gate */
30750Sstevel@tonic-gate ASSERT(umem_ready == UMEM_READY ||
30760Sstevel@tonic-gate umem_ready == UMEM_READY_INIT_FAILED);
30770Sstevel@tonic-gate ASSERT(umem_init_thr == 0);
30780Sstevel@tonic-gate return (umem_ready == UMEM_READY);
30790Sstevel@tonic-gate }
30800Sstevel@tonic-gate } else if (!umem_init_env_ready) {
30810Sstevel@tonic-gate /*
30820Sstevel@tonic-gate * The umem_setup_envvars() call (above) makes calls into
30830Sstevel@tonic-gate * the dynamic linker and directly into user-supplied code.
30840Sstevel@tonic-gate * Since we cannot know what that code will do, we could be
30850Sstevel@tonic-gate * recursively invoked (by, say, a malloc() call in the code
30860Sstevel@tonic-gate * itself, or in a (C++) _init section it causes to be fired).
30870Sstevel@tonic-gate *
30880Sstevel@tonic-gate * This code is where we end up if such recursion occurs. We
30890Sstevel@tonic-gate * first clean up any partial results in the envvar code, then
30900Sstevel@tonic-gate * proceed to finish initialization processing in the recursive
30910Sstevel@tonic-gate * call. The original call will notice this, and return
30920Sstevel@tonic-gate * immediately.
30930Sstevel@tonic-gate */
30940Sstevel@tonic-gate umem_setup_envvars(1); /* clean up any partial state */
30950Sstevel@tonic-gate } else {
30960Sstevel@tonic-gate umem_panic(
30970Sstevel@tonic-gate "recursive allocation while initializing umem\n");
30980Sstevel@tonic-gate }
30990Sstevel@tonic-gate umem_init_env_ready = 1;
31000Sstevel@tonic-gate
31010Sstevel@tonic-gate /*
31020Sstevel@tonic-gate * From this point until we finish, recursion into umem_init() will
31030Sstevel@tonic-gate * cause a umem_panic().
31040Sstevel@tonic-gate */
31050Sstevel@tonic-gate maxverify = minfirewall = ULONG_MAX;
31060Sstevel@tonic-gate
31070Sstevel@tonic-gate /* LINTED constant condition */
31080Sstevel@tonic-gate if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
31090Sstevel@tonic-gate umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
31100Sstevel@tonic-gate sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
31110Sstevel@tonic-gate }
31120Sstevel@tonic-gate
31130Sstevel@tonic-gate umem_max_ncpus = umem_get_max_ncpus();
31140Sstevel@tonic-gate
31150Sstevel@tonic-gate /*
31160Sstevel@tonic-gate * load tunables from environment
31170Sstevel@tonic-gate */
31180Sstevel@tonic-gate umem_process_envvars();
31190Sstevel@tonic-gate
31200Sstevel@tonic-gate if (issetugid())
31210Sstevel@tonic-gate umem_mtbf = 0;
31220Sstevel@tonic-gate
31230Sstevel@tonic-gate /*
31240Sstevel@tonic-gate * set up vmem
31250Sstevel@tonic-gate */
31260Sstevel@tonic-gate if (!(umem_flags & UMF_AUDIT))
31270Sstevel@tonic-gate vmem_no_debug();
31280Sstevel@tonic-gate
31290Sstevel@tonic-gate heap_arena = vmem_heap_arena(&heap_alloc, &heap_free);
31300Sstevel@tonic-gate
31310Sstevel@tonic-gate pagesize = heap_arena->vm_quantum;
31320Sstevel@tonic-gate
31330Sstevel@tonic-gate umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize,
31340Sstevel@tonic-gate heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
31350Sstevel@tonic-gate
31360Sstevel@tonic-gate umem_default_arena = umem_internal_arena;
31370Sstevel@tonic-gate
31380Sstevel@tonic-gate if (umem_internal_arena == NULL)
31390Sstevel@tonic-gate goto fail;
31400Sstevel@tonic-gate
31410Sstevel@tonic-gate umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN,
31420Sstevel@tonic-gate vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
31430Sstevel@tonic-gate
31440Sstevel@tonic-gate umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN,
31450Sstevel@tonic-gate vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
31460Sstevel@tonic-gate
31470Sstevel@tonic-gate umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN,
31480Sstevel@tonic-gate heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
31490Sstevel@tonic-gate
31500Sstevel@tonic-gate umem_firewall_va_arena = vmem_create("umem_firewall_va",
31510Sstevel@tonic-gate NULL, 0, pagesize,
31520Sstevel@tonic-gate umem_firewall_va_alloc, umem_firewall_va_free, heap_arena,
31530Sstevel@tonic-gate 0, VM_NOSLEEP);
31540Sstevel@tonic-gate
31550Sstevel@tonic-gate if (umem_cache_arena == NULL || umem_hash_arena == NULL ||
31560Sstevel@tonic-gate umem_log_arena == NULL || umem_firewall_va_arena == NULL)
31570Sstevel@tonic-gate goto fail;
31580Sstevel@tonic-gate
31590Sstevel@tonic-gate umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize,
31600Sstevel@tonic-gate heap_alloc, heap_free, umem_firewall_va_arena, 0,
31610Sstevel@tonic-gate VM_NOSLEEP);
31620Sstevel@tonic-gate
31630Sstevel@tonic-gate if (umem_firewall_arena == NULL)
31640Sstevel@tonic-gate goto fail;
31650Sstevel@tonic-gate
31660Sstevel@tonic-gate oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize,
31670Sstevel@tonic-gate heap_alloc, heap_free, minfirewall < ULONG_MAX ?
31680Sstevel@tonic-gate umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
31690Sstevel@tonic-gate
31700Sstevel@tonic-gate memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN,
31710Sstevel@tonic-gate heap_alloc, heap_free, minfirewall < ULONG_MAX ?
31720Sstevel@tonic-gate umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
31730Sstevel@tonic-gate
31740Sstevel@tonic-gate if (oversize_arena == NULL || memalign_arena == NULL)
31750Sstevel@tonic-gate goto fail;
31760Sstevel@tonic-gate
31770Sstevel@tonic-gate if (umem_max_ncpus > CPUHINT_MAX())
31780Sstevel@tonic-gate umem_max_ncpus = CPUHINT_MAX();
31790Sstevel@tonic-gate
31800Sstevel@tonic-gate while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0)
31810Sstevel@tonic-gate umem_max_ncpus++;
31820Sstevel@tonic-gate
31830Sstevel@tonic-gate if (umem_max_ncpus == 0)
31840Sstevel@tonic-gate umem_max_ncpus = 1;
31850Sstevel@tonic-gate
31860Sstevel@tonic-gate size = umem_max_ncpus * sizeof (umem_cpu_t);
31870Sstevel@tonic-gate new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP);
31880Sstevel@tonic-gate if (new_cpus == NULL)
31890Sstevel@tonic-gate goto fail;
31900Sstevel@tonic-gate
31910Sstevel@tonic-gate bzero(new_cpus, size);
31920Sstevel@tonic-gate for (idx = 0; idx < umem_max_ncpus; idx++) {
31930Sstevel@tonic-gate new_cpus[idx].cpu_number = idx;
31940Sstevel@tonic-gate new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx);
31950Sstevel@tonic-gate }
31960Sstevel@tonic-gate umem_cpus = new_cpus;
31970Sstevel@tonic-gate umem_cpu_mask = (umem_max_ncpus - 1);
31980Sstevel@tonic-gate
31990Sstevel@tonic-gate if (umem_maxverify == 0)
32000Sstevel@tonic-gate umem_maxverify = maxverify;
32010Sstevel@tonic-gate
32020Sstevel@tonic-gate if (umem_minfirewall == 0)
32030Sstevel@tonic-gate umem_minfirewall = minfirewall;
32040Sstevel@tonic-gate
32050Sstevel@tonic-gate /*
32060Sstevel@tonic-gate * Set up updating and reaping
32070Sstevel@tonic-gate */
32080Sstevel@tonic-gate umem_reap_next = gethrtime() + NANOSEC;
32090Sstevel@tonic-gate
32100Sstevel@tonic-gate #ifndef UMEM_STANDALONE
32110Sstevel@tonic-gate (void) gettimeofday(&umem_update_next, NULL);
32120Sstevel@tonic-gate #endif
32130Sstevel@tonic-gate
32140Sstevel@tonic-gate /*
32150Sstevel@tonic-gate * Set up logging -- failure here is okay, since it will just disable
32160Sstevel@tonic-gate * the logs
32170Sstevel@tonic-gate */
32180Sstevel@tonic-gate if (umem_logging) {
32190Sstevel@tonic-gate umem_transaction_log = umem_log_init(umem_transaction_log_size);
32200Sstevel@tonic-gate umem_content_log = umem_log_init(umem_content_log_size);
32210Sstevel@tonic-gate umem_failure_log = umem_log_init(umem_failure_log_size);
32220Sstevel@tonic-gate umem_slab_log = umem_log_init(umem_slab_log_size);
32230Sstevel@tonic-gate }
32240Sstevel@tonic-gate
32250Sstevel@tonic-gate /*
32260Sstevel@tonic-gate * Set up caches -- if successful, initialization cannot fail, since
32270Sstevel@tonic-gate * allocations from other threads can now succeed.
32280Sstevel@tonic-gate */
32290Sstevel@tonic-gate if (umem_cache_init() == 0) {
32300Sstevel@tonic-gate log_message("unable to create initial caches\n");
32310Sstevel@tonic-gate goto fail;
32320Sstevel@tonic-gate }
32330Sstevel@tonic-gate umem_oversize_arena = oversize_arena;
32340Sstevel@tonic-gate umem_memalign_arena = memalign_arena;
32350Sstevel@tonic-gate
32360Sstevel@tonic-gate umem_cache_applyall(umem_cache_magazine_enable);
32370Sstevel@tonic-gate
32380Sstevel@tonic-gate /*
32390Sstevel@tonic-gate * initialization done, ready to go
32400Sstevel@tonic-gate */
32410Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock);
32420Sstevel@tonic-gate umem_ready = UMEM_READY;
32430Sstevel@tonic-gate umem_init_thr = 0;
32440Sstevel@tonic-gate (void) cond_broadcast(&umem_init_cv);
32450Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock);
32460Sstevel@tonic-gate return (1);
32470Sstevel@tonic-gate
32480Sstevel@tonic-gate fail:
32490Sstevel@tonic-gate log_message("umem initialization failed\n");
32500Sstevel@tonic-gate
32510Sstevel@tonic-gate (void) mutex_lock(&umem_init_lock);
32520Sstevel@tonic-gate umem_ready = UMEM_READY_INIT_FAILED;
32530Sstevel@tonic-gate umem_init_thr = 0;
32540Sstevel@tonic-gate (void) cond_broadcast(&umem_init_cv);
32550Sstevel@tonic-gate (void) mutex_unlock(&umem_init_lock);
32560Sstevel@tonic-gate return (0);
32570Sstevel@tonic-gate }
3258