xref: /spdk/include/spdk/queue_extras.h (revision da6841e4509a8eec7972dfe154ea9f13d09d9be1)
1 /*-
2  * Copyright (c) 1991, 1993
3  * Copyright (C) 2015 Intel Corporation.
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)queue.h	8.5 (Berkeley) 8/20/94
31  * $FreeBSD$
32  */
33 
34 #ifndef SPDK_QUEUE_EXTRAS_H
35 #define SPDK_QUEUE_EXTRAS_H
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /*
42  * This file defines four types of data structures: singly-linked lists,
43  * singly-linked tail queues, lists and tail queues.
44  *
45  * A singly-linked list is headed by a single forward pointer. The elements
46  * are singly linked for minimum space and pointer manipulation overhead at
47  * the expense of O(n) removal for arbitrary elements. New elements can be
48  * added to the list after an existing element or at the head of the list.
49  * Elements being removed from the head of the list should use the explicit
50  * macro for this purpose for optimum efficiency. A singly-linked list may
51  * only be traversed in the forward direction.  Singly-linked lists are ideal
52  * for applications with large datasets and few or no removals or for
53  * implementing a LIFO queue.
54  *
55  * A singly-linked tail queue is headed by a pair of pointers, one to the
56  * head of the list and the other to the tail of the list. The elements are
57  * singly linked for minimum space and pointer manipulation overhead at the
58  * expense of O(n) removal for arbitrary elements. New elements can be added
59  * to the list after an existing element, at the head of the list, or at the
60  * end of the list. Elements being removed from the head of the tail queue
61  * should use the explicit macro for this purpose for optimum efficiency.
62  * A singly-linked tail queue may only be traversed in the forward direction.
63  * Singly-linked tail queues are ideal for applications with large datasets
64  * and few or no removals or for implementing a FIFO queue.
65  *
66  * A list is headed by a single forward pointer (or an array of forward
67  * pointers for a hash table header). The elements are doubly linked
68  * so that an arbitrary element can be removed without a need to
69  * traverse the list. New elements can be added to the list before
70  * or after an existing element or at the head of the list. A list
71  * may be traversed in either direction.
72  *
73  * A tail queue is headed by a pair of pointers, one to the head of the
74  * list and the other to the tail of the list. The elements are doubly
75  * linked so that an arbitrary element can be removed without a need to
76  * traverse the list. New elements can be added to the list before or
77  * after an existing element, at the head of the list, or at the end of
78  * the list. A tail queue may be traversed in either direction.
79  *
80  * For details on the use of these macros, see the queue(3) manual page.
81  *
82  *
83  *				SLIST	LIST	STAILQ	TAILQ
84  * _HEAD			+	+	+	+
85  * _HEAD_INITIALIZER		+	+	+	+
86  * _ENTRY			+	+	+	+
87  * _INIT			+	+	+	+
88  * _EMPTY			+	+	+	+
89  * _FIRST			+	+	+	+
90  * _NEXT			+	+	+	+
91  * _PREV			-	+	-	+
92  * _LAST			-	-	+	+
93  * _FOREACH			+	+	+	+
94  * _FOREACH_FROM		+	+	+	+
95  * _FOREACH_SAFE		+	+	+	+
96  * _FOREACH_FROM_SAFE		+	+	+	+
97  * _FOREACH_REVERSE		-	-	-	+
98  * _FOREACH_REVERSE_FROM	-	-	-	+
99  * _FOREACH_REVERSE_SAFE	-	-	-	+
100  * _FOREACH_REVERSE_FROM_SAFE	-	-	-	+
101  * _INSERT_HEAD			+	+	+	+
102  * _INSERT_BEFORE		-	+	-	+
103  * _INSERT_AFTER		+	+	+	+
104  * _INSERT_TAIL			-	-	+	+
105  * _CONCAT			-	-	+	+
106  * _REMOVE_AFTER		+	-	+	-
107  * _REMOVE_HEAD			+	-	+	-
108  * _REMOVE			+	+	+	+
109  * _SWAP			+	+	+	+
110  *
111  */
112 
113 /* gcc-13 reports bogus Wdangling-pointer warnings on some of the macros below (see issue #3030) */
114 #if defined(__GNUC__) && (__GNUC__ >= 13)
115 #define SPDK_QE_SUPPRESS_WARNINGS() do {				\
116 	_Pragma("GCC diagnostic push")					\
117 	_Pragma("GCC diagnostic ignored \"-Wdangling-pointer\"")	\
118 } while (0)
119 #define SPDK_QE_UNSUPPRESS_WARNINGS() _Pragma("GCC diagnostic pop")
120 #else
121 #define SPDK_QE_SUPPRESS_WARNINGS()
122 #define SPDK_QE_UNSUPPRESS_WARNINGS()
123 #endif
124 
125 /*
126  * Singly-linked Tail queue declarations.
127  */
128 #define	STAILQ_HEAD(name, type)						\
129 struct name {								\
130 	struct type *stqh_first;/* first element */			\
131 	struct type **stqh_last;/* addr of last next element */		\
132 }
133 
134 #define	STAILQ_HEAD_INITIALIZER(head)					\
135 	{ NULL, &(head).stqh_first }
136 
137 /*
138  * Singly-linked Tail queue functions.
139  */
140 #define	STAILQ_EMPTY(head)	((head)->stqh_first == NULL)
141 
142 #define	STAILQ_FIRST(head)	((head)->stqh_first)
143 
144 #define	STAILQ_FOREACH_FROM(var, head, field)				\
145 	for ((var) = ((var) ? (var) : STAILQ_FIRST((head)));		\
146 	   (var);							\
147 	   (var) = STAILQ_NEXT((var), field))
148 
149 #define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
150 	for ((var) = STAILQ_FIRST((head));				\
151 	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
152 	    (var) = (tvar))
153 
154 #define	STAILQ_FOREACH_FROM_SAFE(var, head, field, tvar)		\
155 	for ((var) = ((var) ? (var) : STAILQ_FIRST((head)));		\
156 	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
157 	    (var) = (tvar))
158 
159 #define	STAILQ_LAST(head, type, field)					\
160 	(STAILQ_EMPTY((head)) ? NULL :					\
161 	    SPDK_CONTAINEROF((head)->stqh_last, struct type, field.stqe_next))
162 
163 #define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
164 
165 #define STAILQ_REMOVE_AFTER(head, elm, field) do {			\
166 	if ((STAILQ_NEXT(elm, field) =					\
167 	     STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL)	\
168 		(head)->stqh_last = &STAILQ_NEXT((elm), field);		\
169 } while (0)
170 
171 #define STAILQ_SWAP(head1, head2, type) do {				\
172 	struct type *swap_first = STAILQ_FIRST(head1);			\
173 	struct type **swap_last = (head1)->stqh_last;			\
174 	STAILQ_FIRST(head1) = STAILQ_FIRST(head2);			\
175 	(head1)->stqh_last = (head2)->stqh_last;			\
176 	STAILQ_FIRST(head2) = swap_first;				\
177 	(head2)->stqh_last = swap_last;					\
178 	if (STAILQ_EMPTY(head1))					\
179 		(head1)->stqh_last = &STAILQ_FIRST(head1);		\
180 	if (STAILQ_EMPTY(head2))					\
181 		(head2)->stqh_last = &STAILQ_FIRST(head2);		\
182 } while (0)
183 
184 /*
185  * List declarations.
186  */
187 #define	LIST_HEAD(name, type)						\
188 struct name {								\
189 	struct type *lh_first;	/* first element */			\
190 }
191 
192 #define	LIST_HEAD_INITIALIZER(head)					\
193 	{ NULL }
194 
195 #define	LIST_ENTRY(type)						\
196 struct {								\
197 	struct type *le_next;	/* next element */			\
198 	struct type **le_prev;	/* address of previous next element */	\
199 }
200 
201 /*
202  * List functions.
203  */
204 
205 #if (defined(_KERNEL) && defined(INVARIANTS))
206 #define	QMD_LIST_CHECK_HEAD(head, field) do {				\
207 	if (LIST_FIRST((head)) != NULL &&				\
208 	    LIST_FIRST((head))->field.le_prev !=			\
209 	     &LIST_FIRST((head)))					\
210 		panic("Bad list head %p first->prev != head", (head));	\
211 } while (0)
212 
213 #define	QMD_LIST_CHECK_NEXT(elm, field) do {				\
214 	if (LIST_NEXT((elm), field) != NULL &&				\
215 	    LIST_NEXT((elm), field)->field.le_prev !=			\
216 	     &((elm)->field.le_next))					\
217 		panic("Bad link elm %p next->prev != elm", (elm));	\
218 } while (0)
219 
220 #define	QMD_LIST_CHECK_PREV(elm, field) do {				\
221 	if (*(elm)->field.le_prev != (elm))				\
222 		panic("Bad link elm %p prev->next != elm", (elm));	\
223 } while (0)
224 #else
225 #define	QMD_LIST_CHECK_HEAD(head, field)
226 #define	QMD_LIST_CHECK_NEXT(elm, field)
227 #define	QMD_LIST_CHECK_PREV(elm, field)
228 #endif /* (_KERNEL && INVARIANTS) */
229 
230 #define	LIST_EMPTY(head)	((head)->lh_first == NULL)
231 
232 #define	LIST_FIRST(head)	((head)->lh_first)
233 
234 #define	LIST_FOREACH_FROM(var, head, field)				\
235 	for ((var) = ((var) ? (var) : LIST_FIRST((head)));		\
236 	    (var);							\
237 	    (var) = LIST_NEXT((var), field))
238 
239 #define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
240 	for ((var) = LIST_FIRST((head));				\
241 	    (var) && ((tvar) = LIST_NEXT((var), field), 1);		\
242 	    (var) = (tvar))
243 
244 #define	LIST_FOREACH_FROM_SAFE(var, head, field, tvar)			\
245 	for ((var) = ((var) ? (var) : LIST_FIRST((head)));		\
246 	    (var) && ((tvar) = LIST_NEXT((var), field), 1);		\
247 	    (var) = (tvar))
248 
249 #define	LIST_NEXT(elm, field)	((elm)->field.le_next)
250 
251 #define	LIST_PREV(elm, head, type, field)				\
252 	((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL :		\
253 	    SPDK_CONTAINEROF((elm)->field.le_prev, struct type, field.le_next))
254 
255 #define LIST_SWAP(head1, head2, type, field) do {			\
256 	struct type *swap_tmp = LIST_FIRST((head1));			\
257 	LIST_FIRST((head1)) = LIST_FIRST((head2));			\
258 	LIST_FIRST((head2)) = swap_tmp;					\
259 	if ((swap_tmp = LIST_FIRST((head1))) != NULL)			\
260 		swap_tmp->field.le_prev = &LIST_FIRST((head1));		\
261 	if ((swap_tmp = LIST_FIRST((head2))) != NULL)			\
262 		swap_tmp->field.le_prev = &LIST_FIRST((head2));		\
263 } while (0)
264 
265 /*
266  * Singly-linked List functions.
267  */
268 #define	SLIST_SWAP(head1, head2, type) do {			\
269 	struct type *swap_tmp = SLIST_FIRST((head1));			\
270 	SLIST_FIRST((head1)) = SLIST_FIRST((head2));			\
271 	SLIST_FIRST((head2)) = swap_tmp;				\
272 } while (0)
273 
274 #define	SLIST_FOREACH_SAFE(var, head, field, tvar)		\
275 	for ((var) = SLIST_FIRST((head));				\
276 	    (var) && ((tvar) = SLIST_NEXT((var), field), 1);		\
277 	    (var) = (tvar))
278 
279 /*
280  * Tail queue functions.
281  */
282 #if (defined(_KERNEL) && defined(INVARIANTS))
283 #define	QMD_TAILQ_CHECK_HEAD(head, field) do {				\
284 	if (!TAILQ_EMPTY(head) &&					\
285 	    TAILQ_FIRST((head))->field.tqe_prev !=			\
286 	     &TAILQ_FIRST((head)))					\
287 		panic("Bad tailq head %p first->prev != head", (head));	\
288 } while (0)
289 
290 #define	QMD_TAILQ_CHECK_TAIL(head, field) do {				\
291 	if (*(head)->tqh_last != NULL)					\
292 		panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head));	\
293 } while (0)
294 
295 #define	QMD_TAILQ_CHECK_NEXT(elm, field) do {				\
296 	if (TAILQ_NEXT((elm), field) != NULL &&				\
297 	    TAILQ_NEXT((elm), field)->field.tqe_prev !=			\
298 	     &((elm)->field.tqe_next))					\
299 		panic("Bad link elm %p next->prev != elm", (elm));	\
300 } while (0)
301 
302 #define	QMD_TAILQ_CHECK_PREV(elm, field) do {				\
303 	if (*(elm)->field.tqe_prev != (elm))				\
304 		panic("Bad link elm %p prev->next != elm", (elm));	\
305 } while (0)
306 #else
307 #define	QMD_TAILQ_CHECK_HEAD(head, field)
308 #define	QMD_TAILQ_CHECK_TAIL(head, headname)
309 #define	QMD_TAILQ_CHECK_NEXT(elm, field)
310 #define	QMD_TAILQ_CHECK_PREV(elm, field)
311 #endif /* (_KERNEL && INVARIANTS) */
312 
313 #define	TAILQ_EMPTY(head)	((head)->tqh_first == NULL)
314 
315 #define	TAILQ_FIRST(head)	((head)->tqh_first)
316 
317 #define	TAILQ_FOREACH_FROM(var, head, field)				\
318 	for ((var) = ((var) ? (var) : TAILQ_FIRST((head)));		\
319 	    (var);							\
320 	    (var) = TAILQ_NEXT((var), field))
321 
322 #define	TAILQ_FOREACH_SAFE(var, head, field, tvar)			\
323 	for ((var) = TAILQ_FIRST((head));				\
324 	    (var) && ((tvar) = TAILQ_NEXT((var), field), 1);		\
325 	    (var) = (tvar))
326 
327 #define	TAILQ_FOREACH_FROM_SAFE(var, head, field, tvar)			\
328 	for ((var) = ((var) ? (var) : TAILQ_FIRST((head)));		\
329 	    (var) && ((tvar) = TAILQ_NEXT((var), field), 1);		\
330 	    (var) = (tvar))
331 
332 #define	TAILQ_FOREACH_REVERSE_FROM(var, head, headname, field)		\
333 	for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname));	\
334 	    (var);							\
335 	    (var) = TAILQ_PREV((var), headname, field))
336 
337 #define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)	\
338 	for ((var) = TAILQ_LAST((head), headname);			\
339 	    (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);	\
340 	    (var) = (tvar))
341 
342 #define	TAILQ_FOREACH_REVERSE_FROM_SAFE(var, head, headname, field, tvar) \
343 	for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname));	\
344 	    (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);	\
345 	    (var) = (tvar))
346 
347 #define	TAILQ_LAST(head, headname)					\
348 	(*(((struct headname *)((head)->tqh_last))->tqh_last))
349 
350 #define	TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
351 
352 #define	TAILQ_PREV(elm, headname, field)				\
353 	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
354 
355 #define TAILQ_SWAP(head1, head2, type, field) do {			\
356 	SPDK_QE_SUPPRESS_WARNINGS();					\
357 	struct type *swap_first = (head1)->tqh_first;			\
358 	struct type **swap_last = (head1)->tqh_last;			\
359 	(head1)->tqh_first = (head2)->tqh_first;			\
360 	(head1)->tqh_last = (head2)->tqh_last;				\
361 	(head2)->tqh_first = swap_first;				\
362 	(head2)->tqh_last = swap_last;					\
363 	if ((swap_first = (head1)->tqh_first) != NULL)			\
364 		swap_first->field.tqe_prev = &(head1)->tqh_first;	\
365 	else								\
366 		(head1)->tqh_last = &(head1)->tqh_first;		\
367 	if ((swap_first = (head2)->tqh_first) != NULL)			\
368 		swap_first->field.tqe_prev = &(head2)->tqh_first;	\
369 	else								\
370 		(head2)->tqh_last = &(head2)->tqh_first;		\
371 	SPDK_QE_UNSUPPRESS_WARNINGS();					\
372 } while (0)
373 
374 #ifdef __cplusplus
375 }
376 #endif
377 
378 #endif
379