xref: /netbsd-src/sys/external/bsd/drm2/include/linux/interval_tree_generic.h (revision 6c62068b15bf8065691ccdd09658b8d8ec024c6f)
1 /*	$NetBSD: interval_tree_generic.h,v 1.5 2021/12/19 12:33:11 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef	_LINUX_INTERVAL_TREE_GENERIC_H_
33 #define	_LINUX_INTERVAL_TREE_GENERIC_H_
34 
35 /* XXX See interval_tree.h for warnings. */
36 
37 #include <sys/rbtree.h>
38 
39 #define	INTERVAL_TREE_DEFINE(T, F, KT, KLAST, NSTART, NLAST, QUAL, PREFIX)    \
40 									      \
41 static inline int							      \
42 PREFIX##__compare_nodes(void *__cookie, const void *__va, const void *__vb)   \
43 {									      \
44 	const T *__na = __va;						      \
45 	const T *__nb = __vb;						      \
46 	const KT __astart = START(__na), __alast = LAST(__na);		      \
47 	const KT __bstart = START(__nb), __blast = LAST(__nb);		      \
48 									      \
49 	if (__astart < __bstart)					      \
50 		return -1;						      \
51 	if (__astart > __bstart)					      \
52 		return +1;						      \
53 	if (__alast < __blast)						      \
54 		return -1;						      \
55 	if (__alast > __blast)						      \
56 		return +1;						      \
57 	return 0;		       					      \
58 }									      \
59 									      \
60 static inline int							      \
61 PREFIX##__compare_key(void *__cookie, const void *__vn, const void *__vk)     \
62 {									      \
63 	const T *__n = __vn;						      \
64 	const KT *__k = __vk;						      \
65 	const KT __nstart = START(__n), __nlast = LAST(__n);		      \
66 									      \
67 	if (__nlast < *__k)						      \
68 		return -1;						      \
69 	if (*__k < __nstart)						      \
70 		return +1;						      \
71 	return 0;							      \
72 }									      \
73 									      \
74 static const rb_tree_ops_t PREFIX##__rbtree_ops = {			      \
75 	.rbto_compare_nodes = PREFIX##__compare_nodes,			      \
76 	.rbto_compare_key = PREFIX##__compare_key,			      \
77 	.rbto_node_offset = offsetof(T, F),				      \
78 };									      \
79 									      \
80 /* Not in Linux API, needed for us.  */					      \
81 QUAL void								      \
82 PREFIX##_init(struct rb_root_cached *__root)				      \
83 {									      \
84 	rb_tree_init(&__root->rb_root.rbr_tree, &PREFIX##__rbtree_ops);	      \
85 }									      \
86 									      \
87 QUAL void								      \
88 PREFIX##_insert(T *__node, struct rb_root_cached *__root)		      \
89 {									      \
90 	T *__collision __diagused;					      \
91 									      \
92 	__collision = rb_tree_insert_node(&__root->rb_root.rbr_tree, __node); \
93 	KASSERT(__collision == __node);					      \
94 }									      \
95 									      \
96 QUAL void								      \
97 PREFIX##_remove(T *__node, struct rb_root_cached *__root)		      \
98 {									      \
99 	rb_tree_remove_node(&__root->rb_root.rbr_tree, __node);		      \
100 }									      \
101 									      \
102 QUAL T *								      \
103 PREFIX##_iter_first(struct rb_root_cached *__root, KT __start, KT __last)     \
104 {									      \
105 	T *__node;							      \
106 									      \
107 	__node = rb_tree_find_node_geq(&__root->rb_root.rbr_tree, &__start);  \
108 	if (__node == NULL)						      \
109 		return NULL;						      \
110 	KASSERT(__start <= START(__node));				      \
111 	if (__last < START(__node))					      \
112 		return NULL;						      \
113 									      \
114 	return __node;							      \
115 }									      \
116 									      \
117 QUAL T *								      \
118 PREFIX##_iter_next(struct rb_root_cached *__root, T *__node,		      \
119     KT __start, KT __last)						      \
120 {									      \
121 	T *__next;							      \
122 									      \
123 	KASSERT(__node != NULL);					      \
124 	__next = rb_tree_iterate(&__root->rb_root.rbr_tree, __node,	      \
125 	    RB_DIR_RIGHT);						      \
126 	if (__next == NULL)						      \
127 		return NULL;						      \
128 	if (__last < START(__next))					      \
129 		return NULL;						      \
130 	KASSERT(LAST(__next) >= __start);				      \
131 									      \
132 	return __next;							      \
133 }
134 
135 #endif	/* _LINUX_INTERVAL_TREE_GENERIC_H_ */
136