xref: /freebsd-src/sys/contrib/openzfs/include/os/linux/spl/sys/kmem_cache.h (revision 7a7741af18d6c8a804cc643cb7ecda9d730c6aa6)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3eda14cbcSMatt Macy  *  Copyright (C) 2007 The Regents of the University of California.
4eda14cbcSMatt Macy  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5eda14cbcSMatt Macy  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6eda14cbcSMatt Macy  *  UCRL-CODE-235197
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  *  This file is part of the SPL, Solaris Porting Layer.
9eda14cbcSMatt Macy  *
10eda14cbcSMatt Macy  *  The SPL is free software; you can redistribute it and/or modify it
11eda14cbcSMatt Macy  *  under the terms of the GNU General Public License as published by the
12eda14cbcSMatt Macy  *  Free Software Foundation; either version 2 of the License, or (at your
13eda14cbcSMatt Macy  *  option) any later version.
14eda14cbcSMatt Macy  *
15eda14cbcSMatt Macy  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16eda14cbcSMatt Macy  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17eda14cbcSMatt Macy  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18eda14cbcSMatt Macy  *  for more details.
19eda14cbcSMatt Macy  *
20eda14cbcSMatt Macy  *  You should have received a copy of the GNU General Public License along
21eda14cbcSMatt Macy  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22eda14cbcSMatt Macy  */
23eda14cbcSMatt Macy 
24eda14cbcSMatt Macy #ifndef _SPL_KMEM_CACHE_H
25eda14cbcSMatt Macy #define	_SPL_KMEM_CACHE_H
26eda14cbcSMatt Macy 
27eda14cbcSMatt Macy #include <sys/taskq.h>
28eda14cbcSMatt Macy 
29eda14cbcSMatt Macy /*
30eda14cbcSMatt Macy  * Slab allocation interfaces.  The SPL slab differs from the standard
31eda14cbcSMatt Macy  * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
32eda14cbcSMatt Macy  * allocated from the physical or virtual memory address space.  The virtual
33eda14cbcSMatt Macy  * slabs allow for good behavior when allocation large objects of identical
34eda14cbcSMatt Macy  * size.  This slab implementation also supports both constructors and
35eda14cbcSMatt Macy  * destructors which the Linux slab does not.
36eda14cbcSMatt Macy  */
37eda14cbcSMatt Macy typedef enum kmc_bit {
38eda14cbcSMatt Macy 	KMC_BIT_NODEBUG		= 1,	/* Default behavior */
39eda14cbcSMatt Macy 	KMC_BIT_KVMEM		= 7,	/* Use kvmalloc linux allocator  */
40eda14cbcSMatt Macy 	KMC_BIT_SLAB		= 8,	/* Use Linux slab cache */
41eda14cbcSMatt Macy 	KMC_BIT_DEADLOCKED	= 14,	/* Deadlock detected */
42eda14cbcSMatt Macy 	KMC_BIT_GROWING		= 15,	/* Growing in progress */
43eda14cbcSMatt Macy 	KMC_BIT_REAPING		= 16,	/* Reaping in progress */
44eda14cbcSMatt Macy 	KMC_BIT_DESTROY		= 17,	/* Destroy in progress */
45eda14cbcSMatt Macy 	KMC_BIT_TOTAL		= 18,	/* Proc handler helper bit */
46eda14cbcSMatt Macy 	KMC_BIT_ALLOC		= 19,	/* Proc handler helper bit */
47eda14cbcSMatt Macy 	KMC_BIT_MAX		= 20,	/* Proc handler helper bit */
48ce4dcb97SMartin Matuska 	KMC_BIT_RECLAIMABLE	= 21,	/* Can be freed by shrinker */
49eda14cbcSMatt Macy } kmc_bit_t;
50eda14cbcSMatt Macy 
51eda14cbcSMatt Macy /* kmem move callback return values */
52eda14cbcSMatt Macy typedef enum kmem_cbrc {
53eda14cbcSMatt Macy 	KMEM_CBRC_YES		= 0,	/* Object moved */
54eda14cbcSMatt Macy 	KMEM_CBRC_NO		= 1,	/* Object not moved */
55eda14cbcSMatt Macy 	KMEM_CBRC_LATER		= 2,	/* Object not moved, try again later */
56eda14cbcSMatt Macy 	KMEM_CBRC_DONT_NEED	= 3,	/* Neither object is needed */
57eda14cbcSMatt Macy 	KMEM_CBRC_DONT_KNOW	= 4,	/* Object unknown */
58eda14cbcSMatt Macy } kmem_cbrc_t;
59eda14cbcSMatt Macy 
60eda14cbcSMatt Macy #define	KMC_NODEBUG		(1 << KMC_BIT_NODEBUG)
61eda14cbcSMatt Macy #define	KMC_KVMEM		(1 << KMC_BIT_KVMEM)
62eda14cbcSMatt Macy #define	KMC_SLAB		(1 << KMC_BIT_SLAB)
63eda14cbcSMatt Macy #define	KMC_DEADLOCKED		(1 << KMC_BIT_DEADLOCKED)
64eda14cbcSMatt Macy #define	KMC_GROWING		(1 << KMC_BIT_GROWING)
65eda14cbcSMatt Macy #define	KMC_REAPING		(1 << KMC_BIT_REAPING)
66eda14cbcSMatt Macy #define	KMC_DESTROY		(1 << KMC_BIT_DESTROY)
67eda14cbcSMatt Macy #define	KMC_TOTAL		(1 << KMC_BIT_TOTAL)
68eda14cbcSMatt Macy #define	KMC_ALLOC		(1 << KMC_BIT_ALLOC)
69eda14cbcSMatt Macy #define	KMC_MAX			(1 << KMC_BIT_MAX)
70ce4dcb97SMartin Matuska #define	KMC_RECLAIMABLE		(1 << KMC_BIT_RECLAIMABLE)
71eda14cbcSMatt Macy 
72eda14cbcSMatt Macy extern struct list_head spl_kmem_cache_list;
73eda14cbcSMatt Macy extern struct rw_semaphore spl_kmem_cache_sem;
74eda14cbcSMatt Macy 
75eda14cbcSMatt Macy #define	SKM_MAGIC			0x2e2e2e2e
76eda14cbcSMatt Macy #define	SKO_MAGIC			0x20202020
77eda14cbcSMatt Macy #define	SKS_MAGIC			0x22222222
78eda14cbcSMatt Macy #define	SKC_MAGIC			0x2c2c2c2c
79eda14cbcSMatt Macy 
80eda14cbcSMatt Macy #define	SPL_KMEM_CACHE_OBJ_PER_SLAB	8	/* Target objects per slab */
81eda14cbcSMatt Macy #define	SPL_KMEM_CACHE_ALIGN		8	/* Default object alignment */
82eda14cbcSMatt Macy #ifdef _LP64
83eda14cbcSMatt Macy #define	SPL_KMEM_CACHE_MAX_SIZE		32	/* Max slab size in MB */
84eda14cbcSMatt Macy #else
85eda14cbcSMatt Macy #define	SPL_KMEM_CACHE_MAX_SIZE		4	/* Max slab size in MB */
86eda14cbcSMatt Macy #endif
87eda14cbcSMatt Macy 
88eda14cbcSMatt Macy #define	SPL_MAX_ORDER			(MAX_ORDER - 3)
89eda14cbcSMatt Macy #define	SPL_MAX_ORDER_NR_PAGES		(1 << (SPL_MAX_ORDER - 1))
90eda14cbcSMatt Macy 
91eda14cbcSMatt Macy #ifdef CONFIG_SLUB
92eda14cbcSMatt Macy #define	SPL_MAX_KMEM_CACHE_ORDER	PAGE_ALLOC_COSTLY_ORDER
93eda14cbcSMatt Macy #define	SPL_MAX_KMEM_ORDER_NR_PAGES	(1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
94eda14cbcSMatt Macy #else
95eda14cbcSMatt Macy #define	SPL_MAX_KMEM_ORDER_NR_PAGES	(KMALLOC_MAX_SIZE >> PAGE_SHIFT)
96eda14cbcSMatt Macy #endif
97eda14cbcSMatt Macy 
98eda14cbcSMatt Macy typedef int (*spl_kmem_ctor_t)(void *, void *, int);
99eda14cbcSMatt Macy typedef void (*spl_kmem_dtor_t)(void *, void *);
100eda14cbcSMatt Macy 
101eda14cbcSMatt Macy typedef struct spl_kmem_magazine {
102eda14cbcSMatt Macy 	uint32_t		skm_magic;	/* Sanity magic */
103eda14cbcSMatt Macy 	uint32_t		skm_avail;	/* Available objects */
104eda14cbcSMatt Macy 	uint32_t		skm_size;	/* Magazine size */
105eda14cbcSMatt Macy 	uint32_t		skm_refill;	/* Batch refill size */
106eda14cbcSMatt Macy 	struct spl_kmem_cache	*skm_cache;	/* Owned by cache */
107eda14cbcSMatt Macy 	unsigned int		skm_cpu;	/* Owned by cpu */
10847bb16f8SMartin Matuska 	void			*skm_objs[];	/* Object pointers */
109eda14cbcSMatt Macy } spl_kmem_magazine_t;
110eda14cbcSMatt Macy 
111eda14cbcSMatt Macy typedef struct spl_kmem_obj {
112eda14cbcSMatt Macy 	uint32_t		sko_magic;	/* Sanity magic */
113eda14cbcSMatt Macy 	void			*sko_addr;	/* Buffer address */
114eda14cbcSMatt Macy 	struct spl_kmem_slab	*sko_slab;	/* Owned by slab */
115eda14cbcSMatt Macy 	struct list_head	sko_list;	/* Free object list linkage */
116eda14cbcSMatt Macy } spl_kmem_obj_t;
117eda14cbcSMatt Macy 
118eda14cbcSMatt Macy typedef struct spl_kmem_slab {
119eda14cbcSMatt Macy 	uint32_t		sks_magic;	/* Sanity magic */
120eda14cbcSMatt Macy 	uint32_t		sks_objs;	/* Objects per slab */
121eda14cbcSMatt Macy 	struct spl_kmem_cache	*sks_cache;	/* Owned by cache */
122eda14cbcSMatt Macy 	struct list_head	sks_list;	/* Slab list linkage */
123eda14cbcSMatt Macy 	struct list_head	sks_free_list;	/* Free object list */
124eda14cbcSMatt Macy 	unsigned long		sks_age;	/* Last modify jiffie */
125eda14cbcSMatt Macy 	uint32_t		sks_ref;	/* Ref count used objects */
126eda14cbcSMatt Macy } spl_kmem_slab_t;
127eda14cbcSMatt Macy 
128eda14cbcSMatt Macy typedef struct spl_kmem_alloc {
129eda14cbcSMatt Macy 	struct spl_kmem_cache	*ska_cache;	/* Owned by cache */
130eda14cbcSMatt Macy 	int			ska_flags;	/* Allocation flags */
131eda14cbcSMatt Macy 	taskq_ent_t		ska_tqe;	/* Task queue entry */
132eda14cbcSMatt Macy } spl_kmem_alloc_t;
133eda14cbcSMatt Macy 
134eda14cbcSMatt Macy typedef struct spl_kmem_emergency {
135eda14cbcSMatt Macy 	struct rb_node		ske_node;	/* Emergency tree linkage */
136eda14cbcSMatt Macy 	unsigned long		ske_obj;	/* Buffer address */
137eda14cbcSMatt Macy } spl_kmem_emergency_t;
138eda14cbcSMatt Macy 
139eda14cbcSMatt Macy typedef struct spl_kmem_cache {
140eda14cbcSMatt Macy 	uint32_t		skc_magic;	/* Sanity magic */
141eda14cbcSMatt Macy 	uint32_t		skc_name_size;	/* Name length */
142eda14cbcSMatt Macy 	char			*skc_name;	/* Name string */
143eda14cbcSMatt Macy 	spl_kmem_magazine_t	**skc_mag;	/* Per-CPU warm cache */
144eda14cbcSMatt Macy 	uint32_t		skc_mag_size;	/* Magazine size */
145eda14cbcSMatt Macy 	uint32_t		skc_mag_refill;	/* Magazine refill count */
146eda14cbcSMatt Macy 	spl_kmem_ctor_t		skc_ctor;	/* Constructor */
147eda14cbcSMatt Macy 	spl_kmem_dtor_t		skc_dtor;	/* Destructor */
148eda14cbcSMatt Macy 	void			*skc_private;	/* Private data */
149eda14cbcSMatt Macy 	void			*skc_vmp;	/* Unused */
150eda14cbcSMatt Macy 	struct kmem_cache	*skc_linux_cache; /* Linux slab cache if used */
151eda14cbcSMatt Macy 	unsigned long		skc_flags;	/* Flags */
152eda14cbcSMatt Macy 	uint32_t		skc_obj_size;	/* Object size */
153eda14cbcSMatt Macy 	uint32_t		skc_obj_align;	/* Object alignment */
154eda14cbcSMatt Macy 	uint32_t		skc_slab_objs;	/* Objects per slab */
155eda14cbcSMatt Macy 	uint32_t		skc_slab_size;	/* Slab size */
156eda14cbcSMatt Macy 	atomic_t		skc_ref;	/* Ref count callers */
157eda14cbcSMatt Macy 	taskqid_t		skc_taskqid;	/* Slab reclaim task */
158eda14cbcSMatt Macy 	struct list_head	skc_list;	/* List of caches linkage */
159eda14cbcSMatt Macy 	struct list_head	skc_complete_list; /* Completely alloc'ed */
160eda14cbcSMatt Macy 	struct list_head	skc_partial_list;  /* Partially alloc'ed */
161eda14cbcSMatt Macy 	struct rb_root		skc_emergency_tree; /* Min sized objects */
162eda14cbcSMatt Macy 	spinlock_t		skc_lock;	/* Cache lock */
163*7a7741afSMartin Matuska 	wait_queue_head_t	skc_waitq;	/* Allocation waiters */
164eda14cbcSMatt Macy 	uint64_t		skc_slab_fail;	/* Slab alloc failures */
165eda14cbcSMatt Macy 	uint64_t		skc_slab_create;  /* Slab creates */
166eda14cbcSMatt Macy 	uint64_t		skc_slab_destroy; /* Slab destroys */
167eda14cbcSMatt Macy 	uint64_t		skc_slab_total;	/* Slab total current */
168eda14cbcSMatt Macy 	uint64_t		skc_slab_alloc;	/* Slab alloc current */
169eda14cbcSMatt Macy 	uint64_t		skc_slab_max;	/* Slab max historic  */
170eda14cbcSMatt Macy 	uint64_t		skc_obj_total;	/* Obj total current */
171eda14cbcSMatt Macy 	uint64_t		skc_obj_alloc;	/* Obj alloc current */
172eda14cbcSMatt Macy 	struct percpu_counter	skc_linux_alloc;   /* Linux-backed Obj alloc  */
173eda14cbcSMatt Macy 	uint64_t		skc_obj_max;	/* Obj max historic */
174eda14cbcSMatt Macy 	uint64_t		skc_obj_deadlock;  /* Obj emergency deadlocks */
175eda14cbcSMatt Macy 	uint64_t		skc_obj_emergency; /* Obj emergency current */
176eda14cbcSMatt Macy 	uint64_t		skc_obj_emergency_max; /* Obj emergency max */
177eda14cbcSMatt Macy } spl_kmem_cache_t;
178eda14cbcSMatt Macy #define	kmem_cache_t		spl_kmem_cache_t
179eda14cbcSMatt Macy 
180a0b956f5SMartin Matuska extern spl_kmem_cache_t *spl_kmem_cache_create(const char *name, size_t size,
181eda14cbcSMatt Macy     size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor,
182eda14cbcSMatt Macy     void *reclaim, void *priv, void *vmp, int flags);
183eda14cbcSMatt Macy extern void spl_kmem_cache_set_move(spl_kmem_cache_t *,
184eda14cbcSMatt Macy     kmem_cbrc_t (*)(void *, void *, size_t, void *));
185eda14cbcSMatt Macy extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
186eda14cbcSMatt Macy extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
187eda14cbcSMatt Macy extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
188eda14cbcSMatt Macy extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
189eda14cbcSMatt Macy extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
190eda14cbcSMatt Macy extern void spl_kmem_reap(void);
191eda14cbcSMatt Macy extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
192eda14cbcSMatt Macy extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
193eda14cbcSMatt Macy 
19475e1fea6SMartin Matuska #ifndef	SPL_KMEM_CACHE_IMPLEMENTING
19575e1fea6SMartin Matuska /*
19675e1fea6SMartin Matuska  * Macros for the kmem_cache_* API expected by ZFS and SPL clients. We don't
19775e1fea6SMartin Matuska  * define them inside spl-kmem-cache.c, as that uses the kernel's incompatible
19875e1fea6SMartin Matuska  * kmem_cache_* facilities to implement ours.
19975e1fea6SMartin Matuska  */
20075e1fea6SMartin Matuska 
20175e1fea6SMartin Matuska /* Avoid conflicts with kernel names that might be implemented as macros. */
20275e1fea6SMartin Matuska #undef	kmem_cache_alloc
203*7a7741afSMartin Matuska #undef	kmem_cache_create
20475e1fea6SMartin Matuska 
205eda14cbcSMatt Macy #define	kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
206eda14cbcSMatt Macy     spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
207eda14cbcSMatt Macy #define	kmem_cache_set_move(skc, move)	spl_kmem_cache_set_move(skc, move)
208eda14cbcSMatt Macy #define	kmem_cache_destroy(skc)		spl_kmem_cache_destroy(skc)
209eda14cbcSMatt Macy #define	kmem_cache_alloc(skc, flags)	spl_kmem_cache_alloc(skc, flags)
210eda14cbcSMatt Macy #define	kmem_cache_free(skc, obj)	spl_kmem_cache_free(skc, obj)
211eda14cbcSMatt Macy #define	kmem_cache_reap_now(skc)	spl_kmem_cache_reap_now(skc)
212eda14cbcSMatt Macy #define	kmem_reap()			spl_kmem_reap()
21375e1fea6SMartin Matuska #endif
214eda14cbcSMatt Macy 
215eda14cbcSMatt Macy /*
216eda14cbcSMatt Macy  * The following functions are only available for internal use.
217eda14cbcSMatt Macy  */
218eda14cbcSMatt Macy extern int spl_kmem_cache_init(void);
219eda14cbcSMatt Macy extern void spl_kmem_cache_fini(void);
220eda14cbcSMatt Macy 
221eda14cbcSMatt Macy #endif	/* _SPL_KMEM_CACHE_H */
222