xref: /dpdk/drivers/event/sw/iq_chunk.h (revision 510e2b655b46d91d3cc189882e6392afdc0363ff)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _IQ_CHUNK_H_
6 #define _IQ_CHUNK_H_
7 
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <rte_eventdev.h>
11 
12 struct __rte_cache_aligned sw_queue_chunk {
13 	struct rte_event events[SW_EVS_PER_Q_CHUNK];
14 	struct sw_queue_chunk *next;
15 };
16 
17 static __rte_always_inline bool
iq_empty(struct sw_iq * iq)18 iq_empty(struct sw_iq *iq)
19 {
20 	return (iq->count == 0);
21 }
22 
23 static __rte_always_inline uint16_t
iq_count(const struct sw_iq * iq)24 iq_count(const struct sw_iq *iq)
25 {
26 	return iq->count;
27 }
28 
29 static __rte_always_inline struct sw_queue_chunk *
iq_alloc_chunk(struct sw_evdev * sw)30 iq_alloc_chunk(struct sw_evdev *sw)
31 {
32 	struct sw_queue_chunk *chunk = sw->chunk_list_head;
33 	sw->chunk_list_head = chunk->next;
34 	chunk->next = NULL;
35 	return chunk;
36 }
37 
38 static __rte_always_inline void
iq_free_chunk(struct sw_evdev * sw,struct sw_queue_chunk * chunk)39 iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
40 {
41 	chunk->next = sw->chunk_list_head;
42 	sw->chunk_list_head = chunk;
43 }
44 
45 static __rte_always_inline void
iq_free_chunk_list(struct sw_evdev * sw,struct sw_queue_chunk * head)46 iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
47 {
48 	while (head) {
49 		struct sw_queue_chunk *next;
50 		next = head->next;
51 		iq_free_chunk(sw, head);
52 		head = next;
53 	}
54 }
55 
56 static __rte_always_inline void
iq_init(struct sw_evdev * sw,struct sw_iq * iq)57 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
58 {
59 	iq->head = iq_alloc_chunk(sw);
60 	iq->tail = iq->head;
61 	iq->head_idx = 0;
62 	iq->tail_idx = 0;
63 	iq->count = 0;
64 }
65 
66 static __rte_always_inline void
iq_enqueue(struct sw_evdev * sw,struct sw_iq * iq,const struct rte_event * ev)67 iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
68 {
69 	iq->tail->events[iq->tail_idx++] = *ev;
70 	iq->count++;
71 
72 	if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
73 		/* The number of chunks is defined in relation to the total
74 		 * number of inflight events and number of IQS such that
75 		 * allocation will always succeed.
76 		 */
77 		struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
78 		iq->tail->next = chunk;
79 		iq->tail = chunk;
80 		iq->tail_idx = 0;
81 	}
82 }
83 
84 static __rte_always_inline void
iq_pop(struct sw_evdev * sw,struct sw_iq * iq)85 iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
86 {
87 	iq->head_idx++;
88 	iq->count--;
89 
90 	if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
91 		struct sw_queue_chunk *next = iq->head->next;
92 		iq_free_chunk(sw, iq->head);
93 		iq->head = next;
94 		iq->head_idx = 0;
95 	}
96 }
97 
98 static __rte_always_inline const struct rte_event *
iq_peek(struct sw_iq * iq)99 iq_peek(struct sw_iq *iq)
100 {
101 	return &iq->head->events[iq->head_idx];
102 }
103 
104 /* Note: the caller must ensure that count <= iq_count() */
105 static __rte_always_inline uint16_t
iq_dequeue_burst(struct sw_evdev * sw,struct sw_iq * iq,struct rte_event * ev,uint16_t count)106 iq_dequeue_burst(struct sw_evdev *sw,
107 		 struct sw_iq *iq,
108 		 struct rte_event *ev,
109 		 uint16_t count)
110 {
111 	struct sw_queue_chunk *current;
112 	uint16_t total, index;
113 
114 	count = RTE_MIN(count, iq_count(iq));
115 
116 	current = iq->head;
117 	index = iq->head_idx;
118 	total = 0;
119 
120 	/* Loop over the chunks */
121 	while (1) {
122 		struct sw_queue_chunk *next;
123 		for (; index < SW_EVS_PER_Q_CHUNK;) {
124 			ev[total++] = current->events[index++];
125 
126 			if (unlikely(total == count))
127 				goto done;
128 		}
129 
130 		/* Move to the next chunk */
131 		next = current->next;
132 		iq_free_chunk(sw, current);
133 		current = next;
134 		index = 0;
135 	}
136 
137 done:
138 	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
139 		struct sw_queue_chunk *next = current->next;
140 		iq_free_chunk(sw, current);
141 		iq->head = next;
142 		iq->head_idx = 0;
143 	} else {
144 		iq->head = current;
145 		iq->head_idx = index;
146 	}
147 
148 	iq->count -= total;
149 
150 	return total;
151 }
152 
153 static __rte_always_inline void
iq_put_back(struct sw_evdev * sw,struct sw_iq * iq,struct rte_event * ev,unsigned int count)154 iq_put_back(struct sw_evdev *sw,
155 	    struct sw_iq *iq,
156 	    struct rte_event *ev,
157 	    unsigned int count)
158 {
159 	/* Put back events that fit in the current head chunk. If necessary,
160 	 * put back events in a new head chunk. The caller must ensure that
161 	 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
162 	 * needed.
163 	 */
164 	uint16_t avail_space = iq->head_idx;
165 
166 	if (avail_space >= count) {
167 		const uint16_t idx = avail_space - count;
168 		uint16_t i;
169 
170 		for (i = 0; i < count; i++)
171 			iq->head->events[idx + i] = ev[i];
172 
173 		iq->head_idx = idx;
174 	} else if (avail_space < count) {
175 		const uint16_t remaining = count - avail_space;
176 		struct sw_queue_chunk *new_head;
177 		uint16_t i;
178 
179 		for (i = 0; i < avail_space; i++)
180 			iq->head->events[i] = ev[remaining + i];
181 
182 		new_head = iq_alloc_chunk(sw);
183 		new_head->next = iq->head;
184 		iq->head = new_head;
185 		iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
186 
187 		for (i = 0; i < remaining; i++)
188 			iq->head->events[iq->head_idx + i] = ev[i];
189 	}
190 
191 	iq->count += count;
192 }
193 
194 #endif /* _IQ_CHUNK_H_ */
195