xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_idr.c (revision 77a04540872eb87ac39433f5095530bb8590dcb1)
1 /*	$NetBSD: linux_idr.c,v 1.7 2018/08/27 06:55:01 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.7 2018/08/27 06:55:01 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/rbtree.h>
38 
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/slab.h>
42 
43 struct idr_node {
44 	rb_node_t		in_rb_node;
45 	int			in_index;
46 	void			*in_data;
47 	SIMPLEQ_ENTRY(idr_node)	in_list;
48 };
49 SIMPLEQ_HEAD(idr_head, idr_node);
50 
51 static struct {
52 	kmutex_t	lock;
53 	struct idr_head	preloaded_nodes;
54 	struct idr_head	discarded_nodes;
55 } idr_cache __cacheline_aligned;
56 
57 int
58 linux_idr_module_init(void)
59 {
60 
61 	mutex_init(&idr_cache.lock, MUTEX_DEFAULT, IPL_VM);
62 	SIMPLEQ_INIT(&idr_cache.preloaded_nodes);
63 	SIMPLEQ_INIT(&idr_cache.discarded_nodes);
64 	return 0;
65 }
66 
67 void
68 linux_idr_module_fini(void)
69 {
70 
71 	KASSERT(SIMPLEQ_EMPTY(&idr_cache.discarded_nodes));
72 	KASSERT(SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes));
73 	mutex_destroy(&idr_cache.lock);
74 }
75 
76 static signed int idr_tree_compare_nodes(void *, const void *, const void *);
77 static signed int idr_tree_compare_key(void *, const void *, const void *);
78 
79 static const rb_tree_ops_t idr_rb_ops = {
80 	.rbto_compare_nodes = &idr_tree_compare_nodes,
81 	.rbto_compare_key = &idr_tree_compare_key,
82 	.rbto_node_offset = offsetof(struct idr_node, in_rb_node),
83 	.rbto_context = NULL,
84 };
85 
86 static signed int
87 idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb)
88 {
89 	const int a = ((const struct idr_node *)na)->in_index;
90 	const int b = ((const struct idr_node *)nb)->in_index;
91 
92 	if (a < b)
93 		return -1;
94 	else if (b < a)
95 		 return +1;
96 	else
97 		return 0;
98 }
99 
100 static signed int
101 idr_tree_compare_key(void *ctx __unused, const void *n, const void *key)
102 {
103 	const int a = ((const struct idr_node *)n)->in_index;
104 	const int b = *(const int *)key;
105 
106 	if (a < b)
107 		return -1;
108 	else if (b < a)
109 		return +1;
110 	else
111 		return 0;
112 }
113 
114 void
115 idr_init(struct idr *idr)
116 {
117 
118 	mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM);
119 	rb_tree_init(&idr->idr_tree, &idr_rb_ops);
120 }
121 
122 void
123 idr_destroy(struct idr *idr)
124 {
125 
126 #if 0				/* XXX No rb_tree_destroy?  */
127 	rb_tree_destroy(&idr->idr_tree);
128 #endif
129 	mutex_destroy(&idr->idr_lock);
130 }
131 
132 bool
133 idr_is_empty(struct idr *idr)
134 {
135 
136 	return (RB_TREE_MIN(&idr->idr_tree) == NULL);
137 }
138 
139 void *
140 idr_find(struct idr *idr, int id)
141 {
142 	const struct idr_node *node;
143 	void *data;
144 
145 	mutex_spin_enter(&idr->idr_lock);
146 	node = rb_tree_find_node(&idr->idr_tree, &id);
147 	data = (node == NULL? NULL : node->in_data);
148 	mutex_spin_exit(&idr->idr_lock);
149 
150 	return data;
151 }
152 
153 void *
154 idr_get_next(struct idr *idr, int *idp)
155 {
156 	const struct idr_node *node;
157 	void *data;
158 
159 	mutex_spin_enter(&idr->idr_lock);
160 	node = rb_tree_find_node_geq(&idr->idr_tree, idp);
161 	if (node == NULL) {
162 		data = NULL;
163 	} else {
164 		data = node->in_data;
165 		*idp = node->in_index;
166 	}
167 	mutex_spin_exit(&idr->idr_lock);
168 
169 	return data;
170 }
171 
172 void *
173 idr_replace(struct idr *idr, void *replacement, int id)
174 {
175 	struct idr_node *node;
176 	void *result;
177 
178 	mutex_spin_enter(&idr->idr_lock);
179 	node = rb_tree_find_node(&idr->idr_tree, &id);
180 	if (node == NULL) {
181 		result = ERR_PTR(-ENOENT);
182 	} else {
183 		result = node->in_data;
184 		node->in_data = replacement;
185 	}
186 	mutex_spin_exit(&idr->idr_lock);
187 
188 	return result;
189 }
190 
191 void
192 idr_remove(struct idr *idr, int id)
193 {
194 	struct idr_node *node;
195 
196 	mutex_spin_enter(&idr->idr_lock);
197 	node = rb_tree_find_node(&idr->idr_tree, &id);
198 	KASSERTMSG((node != NULL), "idr %p has no entry for id %d", idr, id);
199 	rb_tree_remove_node(&idr->idr_tree, node);
200 	mutex_spin_exit(&idr->idr_lock);
201 	kfree(node);
202 }
203 
204 void
205 idr_preload(gfp_t gfp)
206 {
207 	struct idr_node *node;
208 
209 	if (ISSET(gfp, __GFP_WAIT))
210 		ASSERT_SLEEPABLE();
211 
212 	node = kmalloc(sizeof(*node), gfp);
213 	KASSERT(node != NULL || !ISSET(gfp, __GFP_WAIT));
214 	if (node == NULL)
215 		return;
216 
217 	mutex_spin_enter(&idr_cache.lock);
218 	SIMPLEQ_INSERT_TAIL(&idr_cache.preloaded_nodes, node, in_list);
219 	mutex_spin_exit(&idr_cache.lock);
220 }
221 
222 int
223 idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp)
224 {
225 	int maximum = (end <= 0? INT_MAX : (end - 1));
226 	struct idr_node *node, *search, *collision __diagused;
227 	int id = start;
228 
229 	/* Sanity-check inputs.  */
230 	if (ISSET(gfp, __GFP_WAIT))
231 		ASSERT_SLEEPABLE();
232 	if (__predict_false(start < 0))
233 		return -EINVAL;
234 	if (__predict_false(maximum < start))
235 		return -ENOSPC;
236 
237 	/* Grab a node allocated by idr_preload.  */
238 	mutex_spin_enter(&idr_cache.lock);
239 	if (SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes)) {
240 		mutex_spin_exit(&idr_cache.lock);
241 		return -ENOMEM;
242 	}
243 	node = SIMPLEQ_FIRST(&idr_cache.preloaded_nodes);
244 	SIMPLEQ_REMOVE_HEAD(&idr_cache.preloaded_nodes, in_list);
245 	mutex_spin_exit(&idr_cache.lock);
246 
247 	/* Find an id.  */
248 	mutex_spin_enter(&idr->idr_lock);
249 	search = rb_tree_find_node_geq(&idr->idr_tree, &start);
250 	while ((search != NULL) && (search->in_index == id)) {
251 		if (maximum <= id) {
252 			id = -ENOSPC;
253 			goto out;
254 		}
255 		search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT);
256 		id++;
257 	}
258 	node->in_index = id;
259 	node->in_data = data;
260 	collision = rb_tree_insert_node(&idr->idr_tree, node);
261 	KASSERT(collision == node);
262 out:	mutex_spin_exit(&idr->idr_lock);
263 
264 	if (id < 0) {
265 		/* Discard the node on failure.  */
266 		mutex_spin_enter(&idr_cache.lock);
267 		SIMPLEQ_INSERT_HEAD(&idr_cache.discarded_nodes, node, in_list);
268 		mutex_spin_exit(&idr_cache.lock);
269 	}
270 	return id;
271 }
272 
273 void
274 idr_preload_end(void)
275 {
276 	struct idr_head temp = SIMPLEQ_HEAD_INITIALIZER(temp);
277 	struct idr_node *node, *next;
278 
279 	mutex_spin_enter(&idr_cache.lock);
280 	SIMPLEQ_FOREACH_SAFE(node, &idr_cache.discarded_nodes, in_list, next) {
281 		SIMPLEQ_REMOVE_HEAD(&idr_cache.discarded_nodes, in_list);
282 		SIMPLEQ_INSERT_HEAD(&temp, node, in_list);
283 	}
284 	mutex_spin_exit(&idr_cache.lock);
285 
286 	SIMPLEQ_FOREACH_SAFE(node, &temp, in_list, next) {
287 		SIMPLEQ_REMOVE_HEAD(&temp, in_list);
288 		kfree(node);
289 	}
290 }
291 
292 int
293 idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg)
294 {
295 	struct idr_node *node;
296 	int error = 0;
297 
298 	/* XXX Caller must exclude modifications.  */
299 	membar_consumer();
300 	RB_TREE_FOREACH(node, &idr->idr_tree) {
301 		error = (*proc)(node->in_index, node->in_data, arg);
302 		if (error)
303 			break;
304 	}
305 
306 	return error;
307 }
308