xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_idr.c (revision 92c79c768bac5e61208ad7e7ebf583192f863840)
1 /*	$NetBSD: linux_idr.c,v 1.13 2021/12/19 01:00:17 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.13 2021/12/19 01:00:17 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/rbtree.h>
38 #include <sys/sdt.h>
39 
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/slab.h>
43 
44 #ifdef _KERNEL_OPT
45 #include "opt_ddb.h"
46 #endif
47 
48 #ifdef DDB
49 #include <ddb/ddb.h>
50 #endif
51 
52 struct idr_node {
53 	rb_node_t		in_rb_node;
54 	int			in_index;
55 	void			*in_data;
56 };
57 
58 struct idr_cache {
59 	struct idr_node		*ic_node;
60 	void			*ic_where;
61 };
62 
63 SDT_PROBE_DEFINE0(sdt, linux, idr, leak);
64 SDT_PROBE_DEFINE1(sdt, linux, idr, init, "struct idr *"/*idr*/);
65 SDT_PROBE_DEFINE1(sdt, linux, idr, destroy, "struct idr *"/*idr*/);
66 SDT_PROBE_DEFINE4(sdt, linux, idr, replace,
67     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*odata*/, "void *"/*ndata*/);
68 SDT_PROBE_DEFINE3(sdt, linux, idr, remove,
69     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
70 SDT_PROBE_DEFINE0(sdt, linux, idr, preload);
71 SDT_PROBE_DEFINE0(sdt, linux, idr, preload__end);
72 SDT_PROBE_DEFINE3(sdt, linux, idr, alloc,
73     "struct idr *"/*idr*/, "int"/*id*/, "void *"/*data*/);
74 
75 static specificdata_key_t idr_cache_key __read_mostly;
76 
77 static void
78 idr_cache_warning(struct idr_cache *cache)
79 {
80 #ifdef DDB
81 	const char *name;
82 	db_expr_t offset;
83 #endif
84 
85 	KASSERT(cache->ic_node != NULL);
86 
87 #ifdef DDB
88 	db_find_sym_and_offset((db_addr_t)(uintptr_t)cache->ic_where,
89 	    &name, &offset);
90 	if (name) {
91 		printf("WARNING: idr preload at %s+%#"DDB_EXPR_FMT"x"
92 		    " leaked in lwp %s @ %p\n",
93 		    name, offset, curlwp->l_name, curlwp);
94 	} else
95 #endif
96 	{
97 		printf("WARNING: idr preload at %p leaked in lwp %s @ %p\n",
98 		    cache->ic_where, curlwp->l_name, curlwp);
99 	}
100 }
101 
102 static void
103 idr_cache_dtor(void *cookie)
104 {
105 	struct idr_cache *cache = cookie;
106 
107 	if (cache->ic_node) {
108 		SDT_PROBE0(sdt, linux, idr, leak);
109 		idr_cache_warning(cache);
110 		kmem_free(cache->ic_node, sizeof(*cache->ic_node));
111 	}
112 	kmem_free(cache, sizeof(*cache));
113 }
114 
115 int
116 linux_idr_module_init(void)
117 {
118 	int error;
119 
120 	error = lwp_specific_key_create(&idr_cache_key, &idr_cache_dtor);
121 	if (error)
122 		return error;
123 
124 	return 0;
125 }
126 
127 void
128 linux_idr_module_fini(void)
129 {
130 
131 	lwp_specific_key_delete(idr_cache_key);
132 }
133 
134 static signed int idr_tree_compare_nodes(void *, const void *, const void *);
135 static signed int idr_tree_compare_key(void *, const void *, const void *);
136 
137 static const rb_tree_ops_t idr_rb_ops = {
138 	.rbto_compare_nodes = &idr_tree_compare_nodes,
139 	.rbto_compare_key = &idr_tree_compare_key,
140 	.rbto_node_offset = offsetof(struct idr_node, in_rb_node),
141 	.rbto_context = NULL,
142 };
143 
144 static signed int
145 idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
146 {
147 	const int a = ((const struct idr_node *)na)->in_index;
148 	const int b = ((const struct idr_node *)nb)->in_index;
149 
150 	if (a < b)
151 		return -1;
152 	else if (b < a)
153 		 return +1;
154 	else
155 		return 0;
156 }
157 
158 static signed int
159 idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
160 {
161 	const int a = ((const struct idr_node *)n)->in_index;
162 	const int b = *(const int *)key;
163 
164 	if (a < b)
165 		return -1;
166 	else if (b < a)
167 		return +1;
168 	else
169 		return 0;
170 }
171 
172 void
173 idr_init(struct idr *idr)
174 {
175 
176 	idr_init_base(idr, 0);
177 }
178 
179 void
180 idr_init_base(struct idr *idr, int base)
181 {
182 
183 	mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
184 	rb_tree_init(&idr->idr_tree, &idr_rb_ops);
185 	idr->idr_base = base;
186 
187 	SDT_PROBE1(sdt, linux, idr, init,  idr);
188 }
189 
190 void
191 idr_destroy(struct idr *idr)
192 {
193 
194 	SDT_PROBE1(sdt, linux, idr, destroy,  idr);
195 #if 0				/* XXX No rb_tree_destroy?  */
196 	rb_tree_destroy(&idr->idr_tree);
197 #endif
198 	mutex_destroy(&idr->idr_lock);
199 }
200 
201 bool
202 idr_is_empty(struct idr *idr)
203 {
204 
205 	return (RB_TREE_MIN(&idr->idr_tree) == NULL);
206 }
207 
208 void *
209 idr_find(struct idr *idr, int id)
210 {
211 	const struct idr_node *node;
212 	void *data;
213 
214 	mutex_spin_enter(&idr->idr_lock);
215 	node = rb_tree_find_node(&idr->idr_tree, &id);
216 	data = (node == NULL? NULL : node->in_data);
217 	mutex_spin_exit(&idr->idr_lock);
218 
219 	return data;
220 }
221 
222 void *
223 idr_get_next(struct idr *idr, int *idp)
224 {
225 	const struct idr_node *node;
226 	void *data;
227 
228 	mutex_spin_enter(&idr->idr_lock);
229 	node = rb_tree_find_node_geq(&idr->idr_tree, idp);
230 	if (node == NULL) {
231 		data = NULL;
232 	} else {
233 		data = node->in_data;
234 		*idp = node->in_index;
235 	}
236 	mutex_spin_exit(&idr->idr_lock);
237 
238 	return data;
239 }
240 
241 void *
242 idr_replace(struct idr *idr, void *replacement, int id)
243 {
244 	struct idr_node *node;
245 	void *result;
246 
247 	mutex_spin_enter(&idr->idr_lock);
248 	node = rb_tree_find_node(&idr->idr_tree, &id);
249 	if (node == NULL) {
250 		result = ERR_PTR(-ENOENT);
251 	} else {
252 		result = node->in_data;
253 		node->in_data = replacement;
254 		SDT_PROBE4(sdt, linux, idr, replace,
255 		    idr, id, result, replacement);
256 	}
257 	mutex_spin_exit(&idr->idr_lock);
258 
259 	return result;
260 }
261 
262 void
263 idr_remove(struct idr *idr, int id)
264 {
265 	struct idr_node *node;
266 
267 	mutex_spin_enter(&idr->idr_lock);
268 	node = rb_tree_find_node(&idr->idr_tree, &id);
269 	KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
270 	SDT_PROBE3(sdt, linux, idr, remove,  idr, id, node->in_data);
271 	rb_tree_remove_node(&idr->idr_tree, node);
272 	mutex_spin_exit(&idr->idr_lock);
273 
274 	kmem_free(node, sizeof(*node));
275 }
276 
277 void
278 idr_preload(gfp_t gfp)
279 {
280 	struct idr_cache *cache;
281 	struct idr_node *node;
282 	km_flag_t kmflag = ISSET(gfp, __GFP_WAIT) ? KM_SLEEP : KM_NOSLEEP;
283 
284 	SDT_PROBE0(sdt, linux, idr, preload);
285 
286 	/* If caller asked to wait, we had better be sleepable.  */
287 	if (ISSET(gfp, __GFP_WAIT))
288 		ASSERT_SLEEPABLE();
289 
290 	/*
291 	 * Get the current lwp's private idr cache.
292 	 */
293 	cache = lwp_getspecific(idr_cache_key);
294 	if (cache == NULL) {
295 		/* lwp_setspecific must be sleepable.  */
296 		if (!ISSET(gfp, __GFP_WAIT))
297 			return;
298 		cache = kmem_zalloc(sizeof(*cache), kmflag);
299 		if (cache == NULL)
300 			return;
301 		lwp_setspecific(idr_cache_key, cache);
302 	}
303 
304 	/*
305 	 * If there already is a node, a prior call to idr_preload must
306 	 * not have been matched by idr_preload_end.  Print a warning,
307 	 * claim the node, and record our return address for where this
308 	 * node came from so the next leak is attributed to us.
309 	 */
310 	if (cache->ic_node) {
311 		idr_cache_warning(cache);
312 		goto out;
313 	}
314 
315 	/*
316 	 * No cached node.  Allocate a new one, store it in the cache,
317 	 * and record our return address for where this node came from
318 	 * so the next leak is attributed to us.
319 	 */
320 	node = kmem_alloc(sizeof(*node), kmflag);
321 	KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
322 	if (node == NULL)
323 		return;
324 
325 	cache->ic_node = node;
326 out:	cache->ic_where = __builtin_return_address(0);
327 }
328 
329 int
330 idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
331 {
332 	int maximum = (end <= 0? INT_MAX : (end - 1));
333 	struct idr_cache *cache;
334 	struct idr_node *node, *search, *collision __diagused;
335 	int id = start;
336 
337 	/* Sanity-check inputs.  */
338 	if (ISSET(gfp, __GFP_WAIT))
339 		ASSERT_SLEEPABLE();
340 	if (__predict_false(start < 0))
341 		return -EINVAL;
342 	if (__predict_false(maximum < start))
343 		return -ENOSPC;
344 
345 	/*
346 	 * Grab a node allocated by idr_preload, if we have a cache and
347 	 * it is populated.
348 	 */
349 	cache = lwp_getspecific(idr_cache_key);
350 	if (cache == NULL || cache->ic_node == NULL)
351 		return -ENOMEM;
352 	node = cache->ic_node;
353 	cache->ic_node = NULL;
354 
355 	/* Find an id.  */
356 	mutex_spin_enter(&idr->idr_lock);
357 	search = rb_tree_find_node_geq(&idr->idr_tree, &start);
358 	while ((search != NULL) && (search->in_index == id)) {
359 		if (maximum <= id) {
360 			id = -ENOSPC;
361 			goto out;
362 		}
363 		search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
364 		id++;
365 	}
366 	node->in_index = id;
367 	node->in_data = data;
368 	collision = rb_tree_insert_node(&idr->idr_tree, node);
369 	KASSERT(collision == node);
370 out:	mutex_spin_exit(&idr->idr_lock);
371 
372 	/* Discard the node on failure.  */
373 	if (id < 0) {
374 		cache->ic_node = node;
375 	} else {
376 		SDT_PROBE3(sdt, linux, idr, alloc,  idr, id, data);
377 	}
378 	return id;
379 }
380 
381 void
382 idr_preload_end(void)
383 {
384 	struct idr_cache *cache;
385 
386 	SDT_PROBE0(sdt, linux, idr, preload__end);
387 
388 	/* Get the cache, or bail if it's not there.  */
389 	cache = lwp_getspecific(idr_cache_key);
390 	if (cache == NULL)
391 		return;
392 
393 	/*
394 	 * If there is a node, either because we didn't idr_alloc or
395 	 * because idr_alloc failed, chuck it.
396 	 *
397 	 * XXX If we are not sleepable, then while the caller may have
398 	 * used idr_preload(GFP_ATOMIC), kmem_free may still sleep.
399 	 * What to do?
400 	 */
401 	if (cache->ic_node) {
402 		struct idr_node *node;
403 
404 		node = cache->ic_node;
405 		cache->ic_node = NULL;
406 		cache->ic_where = NULL;
407 
408 		kmem_free(node, sizeof(*node));
409 	}
410 }
411 
412 int
413 idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
414 {
415 	struct idr_node *node;
416 	int error = 0;
417 
418 	/* XXX Caller must exclude modifications.  */
419 	membar_consumer();
420 	RB_TREE_FOREACH(node, &idr->idr_tree) {
421 		error = (*proc)(node->in_index, node->in_data, arg);
422 		if (error)
423 			break;
424 	}
425 
426 	return error;
427 }
428