xref: /netbsd-src/sys/external/bsd/drm2/dist/include/drm/spsc_queue.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: spsc_queue.h,v 1.2 2021/12/18 23:45:46 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2017 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
27 #define DRM_SCHEDULER_SPSC_QUEUE_H_
28 
29 #include <linux/atomic.h>
30 #include <linux/preempt.h>
31 
32 /** SPSC lockless queue */
33 
34 struct spsc_node {
35 
36 	/* Stores spsc_node* */
37 	struct spsc_node *next;
38 };
39 
40 struct spsc_queue {
41 
42 	 struct spsc_node *head;
43 
44 	/* atomic pointer to struct spsc_node* */
45 	atomic_long_t tail;
46 
47 	atomic_t job_count;
48 };
49 
spsc_queue_init(struct spsc_queue * queue)50 static inline void spsc_queue_init(struct spsc_queue *queue)
51 {
52 	queue->head = NULL;
53 	atomic_long_set(&queue->tail, (long)&queue->head);
54 	atomic_set(&queue->job_count, 0);
55 }
56 
spsc_queue_peek(struct spsc_queue * queue)57 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
58 {
59 	return queue->head;
60 }
61 
spsc_queue_count(struct spsc_queue * queue)62 static inline int spsc_queue_count(struct spsc_queue *queue)
63 {
64 	return atomic_read(&queue->job_count);
65 }
66 
spsc_queue_push(struct spsc_queue * queue,struct spsc_node * node)67 static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
68 {
69 	struct spsc_node **tail;
70 
71 	node->next = NULL;
72 
73 	preempt_disable();
74 
75 	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
76 	WRITE_ONCE(*tail, node);
77 	atomic_inc(&queue->job_count);
78 
79 	/*
80 	 * In case of first element verify new node will be visible to the consumer
81 	 * thread when we ping the kernel thread that there is new work to do.
82 	 */
83 	smp_wmb();
84 
85 	preempt_enable();
86 
87 	return tail == &queue->head;
88 }
89 
90 
spsc_queue_pop(struct spsc_queue * queue)91 static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
92 {
93 	struct spsc_node *next, *node;
94 
95 	/* Verify reading from memory and not the cache */
96 	smp_rmb();
97 
98 	node = READ_ONCE(queue->head);
99 
100 	if (!node)
101 		return NULL;
102 
103 	next = READ_ONCE(node->next);
104 	WRITE_ONCE(queue->head, next);
105 
106 	if (unlikely(!next)) {
107 		/* slowpath for the last element in the queue */
108 
109 		if (atomic_long_cmpxchg(&queue->tail,
110 				(long)&node->next, (long) &queue->head) != (long)&node->next) {
111 			/* Updating tail failed wait for new next to appear */
112 			do {
113 				smp_rmb();
114 			} while (unlikely(!(queue->head = READ_ONCE(node->next))));
115 		}
116 	}
117 
118 	atomic_dec(&queue->job_count);
119 	return node;
120 }
121 
122 
123 
124 #endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
125