10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 56306Stomee * Common Development and Distribution License (the "License"). 66306Stomee * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 216306Stomee 220Sstevel@tonic-gate /* 23*12093SDavid.Valin@Sun.COM * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #ifndef _SYS_KMEM_IMPL_H 270Sstevel@tonic-gate #define _SYS_KMEM_IMPL_H 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/kmem.h> 300Sstevel@tonic-gate #include <sys/vmem.h> 310Sstevel@tonic-gate #include <sys/thread.h> 320Sstevel@tonic-gate #include <sys/t_lock.h> 330Sstevel@tonic-gate #include <sys/time.h> 340Sstevel@tonic-gate #include <sys/kstat.h> 350Sstevel@tonic-gate #include <sys/cpuvar.h> 360Sstevel@tonic-gate #include <sys/systm.h> 370Sstevel@tonic-gate #include <vm/page.h> 386712Stomee #include <sys/avl.h> 396712Stomee #include <sys/list.h> 400Sstevel@tonic-gate 410Sstevel@tonic-gate #ifdef __cplusplus 420Sstevel@tonic-gate extern "C" { 430Sstevel@tonic-gate #endif 440Sstevel@tonic-gate 450Sstevel@tonic-gate /* 460Sstevel@tonic-gate * kernel memory allocator: implementation-private data structures 476712Stomee * 486712Stomee * Lock order: 496712Stomee * 1. cache_lock 506712Stomee * 2. cc_lock in order by CPU ID 516712Stomee * 3. cache_depot_lock 526712Stomee * 536712Stomee * Do not call kmem_cache_alloc() or taskq_dispatch() while holding any of the 546712Stomee * above locks. 550Sstevel@tonic-gate */ 560Sstevel@tonic-gate 570Sstevel@tonic-gate #define KMF_AUDIT 0x00000001 /* transaction auditing */ 580Sstevel@tonic-gate #define KMF_DEADBEEF 0x00000002 /* deadbeef checking */ 590Sstevel@tonic-gate #define KMF_REDZONE 0x00000004 /* redzone checking */ 600Sstevel@tonic-gate #define KMF_CONTENTS 0x00000008 /* freed-buffer content logging */ 610Sstevel@tonic-gate #define KMF_STICKY 0x00000010 /* if set, override /etc/system */ 620Sstevel@tonic-gate #define KMF_NOMAGAZINE 0x00000020 /* disable per-cpu magazines */ 630Sstevel@tonic-gate #define KMF_FIREWALL 0x00000040 /* put all bufs before unmapped pages */ 640Sstevel@tonic-gate #define KMF_LITE 0x00000100 /* lightweight debugging */ 650Sstevel@tonic-gate 660Sstevel@tonic-gate #define KMF_HASH 0x00000200 /* cache has hash table */ 670Sstevel@tonic-gate #define KMF_RANDOMIZE 0x00000400 /* randomize other kmem_flags */ 680Sstevel@tonic-gate 6911178SDave.Plauger@Sun.COM #define KMF_DUMPDIVERT 0x00001000 /* use alternate memory at dump time */ 7011178SDave.Plauger@Sun.COM #define KMF_DUMPUNSAFE 0x00002000 /* flag caches used at dump time */ 71*12093SDavid.Valin@Sun.COM #define KMF_PREFILL 0x00004000 /* Prefill the slab when created. */ 7211178SDave.Plauger@Sun.COM 730Sstevel@tonic-gate #define KMF_BUFTAG (KMF_DEADBEEF | KMF_REDZONE) 740Sstevel@tonic-gate #define KMF_TOUCH (KMF_BUFTAG | KMF_LITE | KMF_CONTENTS) 750Sstevel@tonic-gate #define KMF_RANDOM (KMF_TOUCH | KMF_AUDIT | KMF_NOMAGAZINE) 760Sstevel@tonic-gate #define KMF_DEBUG (KMF_RANDOM | KMF_FIREWALL) 770Sstevel@tonic-gate 780Sstevel@tonic-gate #define KMEM_STACK_DEPTH 15 790Sstevel@tonic-gate 800Sstevel@tonic-gate #define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL 810Sstevel@tonic-gate #define KMEM_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL 820Sstevel@tonic-gate #define KMEM_REDZONE_PATTERN 0xfeedfacefeedfaceULL 830Sstevel@tonic-gate #define KMEM_REDZONE_BYTE 0xbb 840Sstevel@tonic-gate 850Sstevel@tonic-gate /* 860Sstevel@tonic-gate * Redzone size encodings for kmem_alloc() / kmem_free(). We encode the 870Sstevel@tonic-gate * allocation size, rather than storing it directly, so that kmem_free() 880Sstevel@tonic-gate * can distinguish frees of the wrong size from redzone violations. 890Sstevel@tonic-gate * 900Sstevel@tonic-gate * A size of zero is never valid. 910Sstevel@tonic-gate */ 920Sstevel@tonic-gate #define KMEM_SIZE_ENCODE(x) (251 * (x) + 1) 930Sstevel@tonic-gate #define KMEM_SIZE_DECODE(x) ((x) / 251) 940Sstevel@tonic-gate #define KMEM_SIZE_VALID(x) ((x) % 251 == 1 && (x) != 1) 950Sstevel@tonic-gate 966712Stomee 976712Stomee #define KMEM_ALIGN 8 /* min guaranteed alignment */ 986712Stomee #define KMEM_ALIGN_SHIFT 3 /* log2(KMEM_ALIGN) */ 996712Stomee #define KMEM_VOID_FRACTION 8 /* never waste more than 1/8 of slab */ 1006712Stomee 1016712Stomee #define KMEM_SLAB_IS_PARTIAL(sp) \ 1026712Stomee ((sp)->slab_refcnt > 0 && (sp)->slab_refcnt < (sp)->slab_chunks) 1036712Stomee #define KMEM_SLAB_IS_ALL_USED(sp) \ 1046712Stomee ((sp)->slab_refcnt == (sp)->slab_chunks) 1056712Stomee 1060Sstevel@tonic-gate /* 1070Sstevel@tonic-gate * The bufctl (buffer control) structure keeps some minimal information 1080Sstevel@tonic-gate * about each buffer: its address, its slab, and its current linkage, 1090Sstevel@tonic-gate * which is either on the slab's freelist (if the buffer is free), or 1100Sstevel@tonic-gate * on the cache's buf-to-bufctl hash table (if the buffer is allocated). 1110Sstevel@tonic-gate * In the case of non-hashed, or "raw", caches (the common case), only 1120Sstevel@tonic-gate * the freelist linkage is necessary: the buffer address is at a fixed 1130Sstevel@tonic-gate * offset from the bufctl address, and the slab is at the end of the page. 1140Sstevel@tonic-gate * 1150Sstevel@tonic-gate * NOTE: bc_next must be the first field; raw buffers have linkage only. 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate typedef struct kmem_bufctl { 1180Sstevel@tonic-gate struct kmem_bufctl *bc_next; /* next bufctl struct */ 1190Sstevel@tonic-gate void *bc_addr; /* address of buffer */ 1200Sstevel@tonic-gate struct kmem_slab *bc_slab; /* controlling slab */ 1210Sstevel@tonic-gate } kmem_bufctl_t; 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate /* 1240Sstevel@tonic-gate * The KMF_AUDIT version of the bufctl structure. The beginning of this 1250Sstevel@tonic-gate * structure must be identical to the normal bufctl structure so that 1260Sstevel@tonic-gate * pointers are interchangeable. 1270Sstevel@tonic-gate */ 1280Sstevel@tonic-gate typedef struct kmem_bufctl_audit { 1290Sstevel@tonic-gate struct kmem_bufctl *bc_next; /* next bufctl struct */ 1300Sstevel@tonic-gate void *bc_addr; /* address of buffer */ 1310Sstevel@tonic-gate struct kmem_slab *bc_slab; /* controlling slab */ 1320Sstevel@tonic-gate kmem_cache_t *bc_cache; /* controlling cache */ 1330Sstevel@tonic-gate hrtime_t bc_timestamp; /* transaction time */ 1340Sstevel@tonic-gate kthread_t *bc_thread; /* thread doing transaction */ 1350Sstevel@tonic-gate struct kmem_bufctl *bc_lastlog; /* last log entry */ 1360Sstevel@tonic-gate void *bc_contents; /* contents at last free */ 1370Sstevel@tonic-gate int bc_depth; /* stack depth */ 1380Sstevel@tonic-gate pc_t bc_stack[KMEM_STACK_DEPTH]; /* pc stack */ 1390Sstevel@tonic-gate } kmem_bufctl_audit_t; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate /* 1420Sstevel@tonic-gate * A kmem_buftag structure is appended to each buffer whenever any of the 1430Sstevel@tonic-gate * KMF_BUFTAG flags (KMF_DEADBEEF, KMF_REDZONE, KMF_VERIFY) are set. 1440Sstevel@tonic-gate */ 1450Sstevel@tonic-gate typedef struct kmem_buftag { 1460Sstevel@tonic-gate uint64_t bt_redzone; /* 64-bit redzone pattern */ 1470Sstevel@tonic-gate kmem_bufctl_t *bt_bufctl; /* bufctl */ 1480Sstevel@tonic-gate intptr_t bt_bxstat; /* bufctl ^ (alloc/free) */ 1490Sstevel@tonic-gate } kmem_buftag_t; 1500Sstevel@tonic-gate 1510Sstevel@tonic-gate /* 1520Sstevel@tonic-gate * A variant of the kmem_buftag structure used for KMF_LITE caches. 1530Sstevel@tonic-gate * Previous callers are stored in reverse chronological order. (i.e. most 1540Sstevel@tonic-gate * recent first) 1550Sstevel@tonic-gate */ 1560Sstevel@tonic-gate typedef struct kmem_buftag_lite { 1570Sstevel@tonic-gate kmem_buftag_t bt_buftag; /* a normal buftag */ 1580Sstevel@tonic-gate pc_t bt_history[1]; /* zero or more callers */ 1590Sstevel@tonic-gate } kmem_buftag_lite_t; 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate #define KMEM_BUFTAG_LITE_SIZE(f) \ 1620Sstevel@tonic-gate (offsetof(kmem_buftag_lite_t, bt_history[f])) 1630Sstevel@tonic-gate 1640Sstevel@tonic-gate #define KMEM_BUFTAG(cp, buf) \ 1650Sstevel@tonic-gate ((kmem_buftag_t *)((char *)(buf) + (cp)->cache_buftag)) 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate #define KMEM_BUFCTL(cp, buf) \ 1680Sstevel@tonic-gate ((kmem_bufctl_t *)((char *)(buf) + (cp)->cache_bufctl)) 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate #define KMEM_BUF(cp, bcp) \ 1710Sstevel@tonic-gate ((void *)((char *)(bcp) - (cp)->cache_bufctl)) 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate #define KMEM_SLAB(cp, buf) \ 1740Sstevel@tonic-gate ((kmem_slab_t *)P2END((uintptr_t)(buf), (cp)->cache_slabsize) - 1) 1750Sstevel@tonic-gate 1768887SMichael.Corcoran@Sun.COM /* 17711178SDave.Plauger@Sun.COM * Test for using alternate memory at dump time. 17811178SDave.Plauger@Sun.COM */ 17911178SDave.Plauger@Sun.COM #define KMEM_DUMP(cp) ((cp)->cache_flags & KMF_DUMPDIVERT) 18011178SDave.Plauger@Sun.COM #define KMEM_DUMPCC(ccp) ((ccp)->cc_flags & KMF_DUMPDIVERT) 18111178SDave.Plauger@Sun.COM 18211178SDave.Plauger@Sun.COM /* 1838887SMichael.Corcoran@Sun.COM * The "CPU" macro loads a cpu_t that refers to the cpu that the current 1848887SMichael.Corcoran@Sun.COM * thread is running on at the time the macro is executed. A context switch 1858887SMichael.Corcoran@Sun.COM * may occur immediately after loading this data structure, leaving this 1868887SMichael.Corcoran@Sun.COM * thread pointing at the cpu_t for the previous cpu. This is not a problem; 1878887SMichael.Corcoran@Sun.COM * we'd just end up checking the previous cpu's per-cpu cache, and then check 1888887SMichael.Corcoran@Sun.COM * the other layers of the kmem cache if need be. 1898887SMichael.Corcoran@Sun.COM * 1908887SMichael.Corcoran@Sun.COM * It's not even a problem if the old cpu gets DR'ed out during the context 1918887SMichael.Corcoran@Sun.COM * switch. The cpu-remove DR operation bzero()s the cpu_t, but doesn't free 1928887SMichael.Corcoran@Sun.COM * it. So the cpu_t's cpu_cache_offset would read as 0, causing us to use 1938887SMichael.Corcoran@Sun.COM * cpu 0's per-cpu cache. 1948887SMichael.Corcoran@Sun.COM * 1958887SMichael.Corcoran@Sun.COM * So, there is no need to disable kernel preemption while using the CPU macro 1968887SMichael.Corcoran@Sun.COM * below since if we have been context switched, there will not be any 1978887SMichael.Corcoran@Sun.COM * correctness problem, just a momentary use of a different per-cpu cache. 1988887SMichael.Corcoran@Sun.COM */ 1998887SMichael.Corcoran@Sun.COM 2008887SMichael.Corcoran@Sun.COM #define KMEM_CPU_CACHE(cp) \ 20111178SDave.Plauger@Sun.COM ((kmem_cpu_cache_t *)((char *)(&cp->cache_cpu) + CPU->cpu_cache_offset)) 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate #define KMEM_MAGAZINE_VALID(cp, mp) \ 2040Sstevel@tonic-gate (((kmem_slab_t *)P2END((uintptr_t)(mp), PAGESIZE) - 1)->slab_cache == \ 2050Sstevel@tonic-gate (cp)->cache_magtype->mt_cache) 2060Sstevel@tonic-gate 2076712Stomee #define KMEM_SLAB_OFFSET(sp, buf) \ 2086712Stomee ((size_t)((uintptr_t)(buf) - (uintptr_t)((sp)->slab_base))) 2096712Stomee 2100Sstevel@tonic-gate #define KMEM_SLAB_MEMBER(sp, buf) \ 2116712Stomee (KMEM_SLAB_OFFSET(sp, buf) < (sp)->slab_cache->cache_slabsize) 2120Sstevel@tonic-gate 2130Sstevel@tonic-gate #define KMEM_BUFTAG_ALLOC 0xa110c8edUL 2140Sstevel@tonic-gate #define KMEM_BUFTAG_FREE 0xf4eef4eeUL 2150Sstevel@tonic-gate 2166712Stomee /* slab_later_count thresholds */ 2176712Stomee #define KMEM_DISBELIEF 3 2186712Stomee 2196712Stomee /* slab_flags */ 2206712Stomee #define KMEM_SLAB_NOMOVE 0x1 2216712Stomee #define KMEM_SLAB_MOVE_PENDING 0x2 2226712Stomee 2230Sstevel@tonic-gate typedef struct kmem_slab { 2240Sstevel@tonic-gate struct kmem_cache *slab_cache; /* controlling cache */ 2250Sstevel@tonic-gate void *slab_base; /* base of allocated memory */ 2266712Stomee avl_node_t slab_link; /* slab linkage */ 2270Sstevel@tonic-gate struct kmem_bufctl *slab_head; /* first free buffer */ 2280Sstevel@tonic-gate long slab_refcnt; /* outstanding allocations */ 2290Sstevel@tonic-gate long slab_chunks; /* chunks (bufs) in this slab */ 2306712Stomee uint32_t slab_stuck_offset; /* unmoved buffer offset */ 2316712Stomee uint16_t slab_later_count; /* cf KMEM_CBRC_LATER */ 2326712Stomee uint16_t slab_flags; /* bits to mark the slab */ 2330Sstevel@tonic-gate } kmem_slab_t; 2340Sstevel@tonic-gate 2350Sstevel@tonic-gate #define KMEM_HASH_INITIAL 64 2360Sstevel@tonic-gate 2370Sstevel@tonic-gate #define KMEM_HASH(cp, buf) \ 2380Sstevel@tonic-gate ((cp)->cache_hash_table + \ 2390Sstevel@tonic-gate (((uintptr_t)(buf) >> (cp)->cache_hash_shift) & (cp)->cache_hash_mask)) 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate typedef struct kmem_magazine { 2420Sstevel@tonic-gate void *mag_next; 2430Sstevel@tonic-gate void *mag_round[1]; /* one or more rounds */ 2440Sstevel@tonic-gate } kmem_magazine_t; 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate /* 2470Sstevel@tonic-gate * The magazine types for fast per-cpu allocation 2480Sstevel@tonic-gate */ 2490Sstevel@tonic-gate typedef struct kmem_magtype { 25011178SDave.Plauger@Sun.COM short mt_magsize; /* magazine size (number of rounds) */ 2510Sstevel@tonic-gate int mt_align; /* magazine alignment */ 2520Sstevel@tonic-gate size_t mt_minbuf; /* all smaller buffers qualify */ 2530Sstevel@tonic-gate size_t mt_maxbuf; /* no larger buffers qualify */ 2540Sstevel@tonic-gate kmem_cache_t *mt_cache; /* magazine cache */ 2550Sstevel@tonic-gate } kmem_magtype_t; 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate #define KMEM_CPU_CACHE_SIZE 64 /* must be power of 2 */ 2580Sstevel@tonic-gate #define KMEM_CPU_PAD (KMEM_CPU_CACHE_SIZE - sizeof (kmutex_t) - \ 25911178SDave.Plauger@Sun.COM 2 * sizeof (uint64_t) - 2 * sizeof (void *) - sizeof (int) - \ 26011178SDave.Plauger@Sun.COM 5 * sizeof (short)) 2610Sstevel@tonic-gate #define KMEM_CACHE_SIZE(ncpus) \ 2620Sstevel@tonic-gate ((size_t)(&((kmem_cache_t *)0)->cache_cpu[ncpus])) 2630Sstevel@tonic-gate 2648887SMichael.Corcoran@Sun.COM /* Offset from kmem_cache->cache_cpu for per cpu caches */ 2658887SMichael.Corcoran@Sun.COM #define KMEM_CPU_CACHE_OFFSET(cpuid) \ 2668887SMichael.Corcoran@Sun.COM ((size_t)(&((kmem_cache_t *)0)->cache_cpu[cpuid]) - \ 2678887SMichael.Corcoran@Sun.COM (size_t)(&((kmem_cache_t *)0)->cache_cpu)) 2688887SMichael.Corcoran@Sun.COM 2690Sstevel@tonic-gate typedef struct kmem_cpu_cache { 2700Sstevel@tonic-gate kmutex_t cc_lock; /* protects this cpu's local cache */ 2710Sstevel@tonic-gate uint64_t cc_alloc; /* allocations from this cpu */ 2720Sstevel@tonic-gate uint64_t cc_free; /* frees to this cpu */ 2730Sstevel@tonic-gate kmem_magazine_t *cc_loaded; /* the currently loaded magazine */ 2740Sstevel@tonic-gate kmem_magazine_t *cc_ploaded; /* the previously loaded magazine */ 2750Sstevel@tonic-gate int cc_flags; /* CPU-local copy of cache_flags */ 27611178SDave.Plauger@Sun.COM short cc_rounds; /* number of objects in loaded mag */ 27711178SDave.Plauger@Sun.COM short cc_prounds; /* number of objects in previous mag */ 27811178SDave.Plauger@Sun.COM short cc_magsize; /* number of rounds in a full mag */ 27911178SDave.Plauger@Sun.COM short cc_dump_rounds; /* dump time copy of cc_rounds */ 28011178SDave.Plauger@Sun.COM short cc_dump_prounds; /* dump time copy of cc_prounds */ 2810Sstevel@tonic-gate char cc_pad[KMEM_CPU_PAD]; /* for nice alignment */ 2820Sstevel@tonic-gate } kmem_cpu_cache_t; 2830Sstevel@tonic-gate 2840Sstevel@tonic-gate /* 2850Sstevel@tonic-gate * The magazine lists used in the depot. 2860Sstevel@tonic-gate */ 2870Sstevel@tonic-gate typedef struct kmem_maglist { 2880Sstevel@tonic-gate kmem_magazine_t *ml_list; /* magazine list */ 2890Sstevel@tonic-gate long ml_total; /* number of magazines */ 2900Sstevel@tonic-gate long ml_min; /* min since last update */ 2910Sstevel@tonic-gate long ml_reaplimit; /* max reapable magazines */ 2920Sstevel@tonic-gate uint64_t ml_alloc; /* allocations from this list */ 2930Sstevel@tonic-gate } kmem_maglist_t; 2940Sstevel@tonic-gate 2956712Stomee typedef struct kmem_defrag { 2966712Stomee /* 2976712Stomee * Statistics 2986712Stomee */ 2996712Stomee uint64_t kmd_callbacks; /* move callbacks */ 3006712Stomee uint64_t kmd_yes; /* KMEM_CBRC_YES responses */ 3016712Stomee uint64_t kmd_no; /* NO responses */ 3026712Stomee uint64_t kmd_later; /* LATER responses */ 3036712Stomee uint64_t kmd_dont_need; /* DONT_NEED responses */ 3046712Stomee uint64_t kmd_dont_know; /* DONT_KNOW responses */ 3056712Stomee uint64_t kmd_hunt_found; /* DONT_KNOW: # found in mag */ 30610217STom.Erickson@Sun.COM uint64_t kmd_slabs_freed; /* slabs freed by moves */ 30710217STom.Erickson@Sun.COM uint64_t kmd_defrags; /* kmem_cache_defrag() */ 30810217STom.Erickson@Sun.COM uint64_t kmd_scans; /* kmem_cache_scan() */ 3096712Stomee 3106712Stomee /* 3116712Stomee * Consolidator fields 3126712Stomee */ 3136712Stomee avl_tree_t kmd_moves_pending; /* buffer moves pending */ 3146712Stomee list_t kmd_deadlist; /* deferred slab frees */ 3156712Stomee size_t kmd_deadcount; /* # of slabs in kmd_deadlist */ 3166712Stomee uint8_t kmd_reclaim_numer; /* slab usage threshold */ 3176712Stomee uint8_t kmd_pad1; /* compiler padding */ 31810217STom.Erickson@Sun.COM uint16_t kmd_consolidate; /* triggers consolidator */ 31910217STom.Erickson@Sun.COM uint32_t kmd_pad2; /* compiler padding */ 3206712Stomee size_t kmd_slabs_sought; /* reclaimable slabs sought */ 3216712Stomee size_t kmd_slabs_found; /* reclaimable slabs found */ 32210217STom.Erickson@Sun.COM size_t kmd_tries; /* nth scan interval counter */ 3236712Stomee /* 3246712Stomee * Fields used to ASSERT that the client does not kmem_cache_free() 3256712Stomee * objects passed to the move callback. 3266712Stomee */ 3276712Stomee void *kmd_from_buf; /* object to move */ 3286712Stomee void *kmd_to_buf; /* move destination */ 3296712Stomee kthread_t *kmd_thread; /* thread calling move */ 3306712Stomee } kmem_defrag_t; 3316712Stomee 3320Sstevel@tonic-gate #define KMEM_CACHE_NAMELEN 31 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate struct kmem_cache { 3350Sstevel@tonic-gate /* 3360Sstevel@tonic-gate * Statistics 3370Sstevel@tonic-gate */ 3380Sstevel@tonic-gate uint64_t cache_slab_create; /* slab creates */ 3390Sstevel@tonic-gate uint64_t cache_slab_destroy; /* slab destroys */ 3400Sstevel@tonic-gate uint64_t cache_slab_alloc; /* slab layer allocations */ 3410Sstevel@tonic-gate uint64_t cache_slab_free; /* slab layer frees */ 3420Sstevel@tonic-gate uint64_t cache_alloc_fail; /* total failed allocations */ 3430Sstevel@tonic-gate uint64_t cache_buftotal; /* total buffers */ 3440Sstevel@tonic-gate uint64_t cache_bufmax; /* max buffers ever */ 3456306Stomee uint64_t cache_bufslab; /* buffers free in slab layer */ 34610217STom.Erickson@Sun.COM uint64_t cache_reap; /* cache reaps */ 34710217STom.Erickson@Sun.COM uint64_t cache_rescale; /* hash table rescales */ 3480Sstevel@tonic-gate uint64_t cache_lookup_depth; /* hash lookup depth */ 3490Sstevel@tonic-gate uint64_t cache_depot_contention; /* mutex contention count */ 3500Sstevel@tonic-gate uint64_t cache_depot_contention_prev; /* previous snapshot */ 3510Sstevel@tonic-gate 3520Sstevel@tonic-gate /* 3530Sstevel@tonic-gate * Cache properties 3540Sstevel@tonic-gate */ 3550Sstevel@tonic-gate char cache_name[KMEM_CACHE_NAMELEN + 1]; 3560Sstevel@tonic-gate size_t cache_bufsize; /* object size */ 3570Sstevel@tonic-gate size_t cache_align; /* object alignment */ 3580Sstevel@tonic-gate int (*cache_constructor)(void *, void *, int); 3590Sstevel@tonic-gate void (*cache_destructor)(void *, void *); 3600Sstevel@tonic-gate void (*cache_reclaim)(void *); 3616712Stomee kmem_cbrc_t (*cache_move)(void *, void *, size_t, void *); 3620Sstevel@tonic-gate void *cache_private; /* opaque arg to callbacks */ 3630Sstevel@tonic-gate vmem_t *cache_arena; /* vmem source for slabs */ 3640Sstevel@tonic-gate int cache_cflags; /* cache creation flags */ 3650Sstevel@tonic-gate int cache_flags; /* various cache state info */ 3660Sstevel@tonic-gate uint32_t cache_mtbf; /* induced alloc failure rate */ 3676712Stomee uint32_t cache_pad1; /* compiler padding */ 3680Sstevel@tonic-gate kstat_t *cache_kstat; /* exported statistics */ 3696712Stomee list_node_t cache_link; /* cache linkage */ 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate /* 3720Sstevel@tonic-gate * Slab layer 3730Sstevel@tonic-gate */ 3740Sstevel@tonic-gate kmutex_t cache_lock; /* protects slab layer */ 3750Sstevel@tonic-gate size_t cache_chunksize; /* buf + alignment [+ debug] */ 3760Sstevel@tonic-gate size_t cache_slabsize; /* size of a slab */ 3776712Stomee size_t cache_maxchunks; /* max buffers per slab */ 3780Sstevel@tonic-gate size_t cache_bufctl; /* buf-to-bufctl distance */ 3790Sstevel@tonic-gate size_t cache_buftag; /* buf-to-buftag distance */ 3800Sstevel@tonic-gate size_t cache_verify; /* bytes to verify */ 3810Sstevel@tonic-gate size_t cache_contents; /* bytes of saved content */ 3820Sstevel@tonic-gate size_t cache_color; /* next slab color */ 3830Sstevel@tonic-gate size_t cache_mincolor; /* maximum slab color */ 3840Sstevel@tonic-gate size_t cache_maxcolor; /* maximum slab color */ 3850Sstevel@tonic-gate size_t cache_hash_shift; /* get to interesting bits */ 3860Sstevel@tonic-gate size_t cache_hash_mask; /* hash table mask */ 3876712Stomee list_t cache_complete_slabs; /* completely allocated slabs */ 3886712Stomee size_t cache_complete_slab_count; 3896712Stomee avl_tree_t cache_partial_slabs; /* partial slab freelist */ 3906712Stomee size_t cache_partial_binshift; /* for AVL sort bins */ 3910Sstevel@tonic-gate kmem_cache_t *cache_bufctl_cache; /* source of bufctls */ 3920Sstevel@tonic-gate kmem_bufctl_t **cache_hash_table; /* hash table base */ 3936712Stomee kmem_defrag_t *cache_defrag; /* slab consolidator fields */ 3940Sstevel@tonic-gate 3950Sstevel@tonic-gate /* 3960Sstevel@tonic-gate * Depot layer 3970Sstevel@tonic-gate */ 3980Sstevel@tonic-gate kmutex_t cache_depot_lock; /* protects depot */ 3990Sstevel@tonic-gate kmem_magtype_t *cache_magtype; /* magazine type */ 4000Sstevel@tonic-gate kmem_maglist_t cache_full; /* full magazines */ 4010Sstevel@tonic-gate kmem_maglist_t cache_empty; /* empty magazines */ 40211178SDave.Plauger@Sun.COM void *cache_dumpfreelist; /* heap during crash dump */ 40311178SDave.Plauger@Sun.COM void *cache_dumplog; /* log entry during dump */ 4040Sstevel@tonic-gate 4050Sstevel@tonic-gate /* 4060Sstevel@tonic-gate * Per-CPU layer 4070Sstevel@tonic-gate */ 4080Sstevel@tonic-gate kmem_cpu_cache_t cache_cpu[1]; /* max_ncpus actual elements */ 4090Sstevel@tonic-gate }; 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate typedef struct kmem_cpu_log_header { 4120Sstevel@tonic-gate kmutex_t clh_lock; 4130Sstevel@tonic-gate char *clh_current; 4140Sstevel@tonic-gate size_t clh_avail; 4150Sstevel@tonic-gate int clh_chunk; 4160Sstevel@tonic-gate int clh_hits; 4170Sstevel@tonic-gate char clh_pad[64 - sizeof (kmutex_t) - sizeof (char *) - 4180Sstevel@tonic-gate sizeof (size_t) - 2 * sizeof (int)]; 4190Sstevel@tonic-gate } kmem_cpu_log_header_t; 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate typedef struct kmem_log_header { 4220Sstevel@tonic-gate kmutex_t lh_lock; 4230Sstevel@tonic-gate char *lh_base; 4240Sstevel@tonic-gate int *lh_free; 4250Sstevel@tonic-gate size_t lh_chunksize; 4260Sstevel@tonic-gate int lh_nchunks; 4270Sstevel@tonic-gate int lh_head; 4280Sstevel@tonic-gate int lh_tail; 4290Sstevel@tonic-gate int lh_hits; 4300Sstevel@tonic-gate kmem_cpu_log_header_t lh_cpu[1]; /* ncpus actually allocated */ 4310Sstevel@tonic-gate } kmem_log_header_t; 4320Sstevel@tonic-gate 4336712Stomee /* kmem_move kmm_flags */ 4346712Stomee #define KMM_DESPERATE 0x1 4356712Stomee #define KMM_NOTIFY 0x2 43610217STom.Erickson@Sun.COM #define KMM_DEBUG 0x4 4376712Stomee 4386712Stomee typedef struct kmem_move { 4396712Stomee kmem_slab_t *kmm_from_slab; 4406712Stomee void *kmm_from_buf; 4416712Stomee void *kmm_to_buf; 4426712Stomee avl_node_t kmm_entry; 4436712Stomee int kmm_flags; 4446712Stomee } kmem_move_t; 4456712Stomee 4466712Stomee /* 4476712Stomee * In order to consolidate partial slabs, it must be possible for the cache to 4486712Stomee * have partial slabs. 4496712Stomee */ 4506712Stomee #define KMEM_IS_MOVABLE(cp) \ 4516712Stomee (((cp)->cache_chunksize * 2) <= (cp)->cache_slabsize) 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate #ifdef __cplusplus 4540Sstevel@tonic-gate } 4550Sstevel@tonic-gate #endif 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate #endif /* _SYS_KMEM_IMPL_H */ 458