xref: /dpdk/drivers/event/sw/iq_chunk.h (revision 510e2b655b46d91d3cc189882e6392afdc0363ff)
1dca926caSGage Eads /* SPDX-License-Identifier: BSD-3-Clause
2dca926caSGage Eads  * Copyright(c) 2017 Intel Corporation
3dca926caSGage Eads  */
4dca926caSGage Eads 
5dca926caSGage Eads #ifndef _IQ_CHUNK_H_
6dca926caSGage Eads #define _IQ_CHUNK_H_
7dca926caSGage Eads 
8dca926caSGage Eads #include <stdint.h>
9dca926caSGage Eads #include <stdbool.h>
10dca926caSGage Eads #include <rte_eventdev.h>
11dca926caSGage Eads 
12*27595cd8STyler Retzlaff struct __rte_cache_aligned sw_queue_chunk {
13dca926caSGage Eads 	struct rte_event events[SW_EVS_PER_Q_CHUNK];
14dca926caSGage Eads 	struct sw_queue_chunk *next;
15*27595cd8STyler Retzlaff };
16dca926caSGage Eads 
17dca926caSGage Eads static __rte_always_inline bool
iq_empty(struct sw_iq * iq)18dca926caSGage Eads iq_empty(struct sw_iq *iq)
19dca926caSGage Eads {
20dca926caSGage Eads 	return (iq->count == 0);
21dca926caSGage Eads }
22dca926caSGage Eads 
23dca926caSGage Eads static __rte_always_inline uint16_t
iq_count(const struct sw_iq * iq)24dca926caSGage Eads iq_count(const struct sw_iq *iq)
25dca926caSGage Eads {
26dca926caSGage Eads 	return iq->count;
27dca926caSGage Eads }
28dca926caSGage Eads 
29dca926caSGage Eads static __rte_always_inline struct sw_queue_chunk *
iq_alloc_chunk(struct sw_evdev * sw)30dca926caSGage Eads iq_alloc_chunk(struct sw_evdev *sw)
31dca926caSGage Eads {
32dca926caSGage Eads 	struct sw_queue_chunk *chunk = sw->chunk_list_head;
33dca926caSGage Eads 	sw->chunk_list_head = chunk->next;
34dca926caSGage Eads 	chunk->next = NULL;
35dca926caSGage Eads 	return chunk;
36dca926caSGage Eads }
37dca926caSGage Eads 
38dca926caSGage Eads static __rte_always_inline void
iq_free_chunk(struct sw_evdev * sw,struct sw_queue_chunk * chunk)39dca926caSGage Eads iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
40dca926caSGage Eads {
41dca926caSGage Eads 	chunk->next = sw->chunk_list_head;
42dca926caSGage Eads 	sw->chunk_list_head = chunk;
43dca926caSGage Eads }
44dca926caSGage Eads 
45dca926caSGage Eads static __rte_always_inline void
iq_free_chunk_list(struct sw_evdev * sw,struct sw_queue_chunk * head)4645219005SGage Eads iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
4745219005SGage Eads {
4845219005SGage Eads 	while (head) {
4945219005SGage Eads 		struct sw_queue_chunk *next;
5045219005SGage Eads 		next = head->next;
5145219005SGage Eads 		iq_free_chunk(sw, head);
5245219005SGage Eads 		head = next;
5345219005SGage Eads 	}
5445219005SGage Eads }
5545219005SGage Eads 
5645219005SGage Eads static __rte_always_inline void
iq_init(struct sw_evdev * sw,struct sw_iq * iq)57dca926caSGage Eads iq_init(struct sw_evdev *sw, struct sw_iq *iq)
58dca926caSGage Eads {
59dca926caSGage Eads 	iq->head = iq_alloc_chunk(sw);
60dca926caSGage Eads 	iq->tail = iq->head;
61dca926caSGage Eads 	iq->head_idx = 0;
62dca926caSGage Eads 	iq->tail_idx = 0;
6345219005SGage Eads 	iq->count = 0;
64dca926caSGage Eads }
65dca926caSGage Eads 
66dca926caSGage Eads static __rte_always_inline void
iq_enqueue(struct sw_evdev * sw,struct sw_iq * iq,const struct rte_event * ev)67dca926caSGage Eads iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
68dca926caSGage Eads {
69dca926caSGage Eads 	iq->tail->events[iq->tail_idx++] = *ev;
70dca926caSGage Eads 	iq->count++;
71dca926caSGage Eads 
72dca926caSGage Eads 	if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
73dca926caSGage Eads 		/* The number of chunks is defined in relation to the total
74dca926caSGage Eads 		 * number of inflight events and number of IQS such that
75dca926caSGage Eads 		 * allocation will always succeed.
76dca926caSGage Eads 		 */
77dca926caSGage Eads 		struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
78dca926caSGage Eads 		iq->tail->next = chunk;
79dca926caSGage Eads 		iq->tail = chunk;
80dca926caSGage Eads 		iq->tail_idx = 0;
81dca926caSGage Eads 	}
82dca926caSGage Eads }
83dca926caSGage Eads 
84dca926caSGage Eads static __rte_always_inline void
iq_pop(struct sw_evdev * sw,struct sw_iq * iq)85dca926caSGage Eads iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
86dca926caSGage Eads {
87dca926caSGage Eads 	iq->head_idx++;
88dca926caSGage Eads 	iq->count--;
89dca926caSGage Eads 
90dca926caSGage Eads 	if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
91dca926caSGage Eads 		struct sw_queue_chunk *next = iq->head->next;
92dca926caSGage Eads 		iq_free_chunk(sw, iq->head);
93dca926caSGage Eads 		iq->head = next;
94dca926caSGage Eads 		iq->head_idx = 0;
95dca926caSGage Eads 	}
96dca926caSGage Eads }
97dca926caSGage Eads 
98dca926caSGage Eads static __rte_always_inline const struct rte_event *
iq_peek(struct sw_iq * iq)99dca926caSGage Eads iq_peek(struct sw_iq *iq)
100dca926caSGage Eads {
101dca926caSGage Eads 	return &iq->head->events[iq->head_idx];
102dca926caSGage Eads }
103dca926caSGage Eads 
104dca926caSGage Eads /* Note: the caller must ensure that count <= iq_count() */
105dca926caSGage Eads static __rte_always_inline uint16_t
iq_dequeue_burst(struct sw_evdev * sw,struct sw_iq * iq,struct rte_event * ev,uint16_t count)106dca926caSGage Eads iq_dequeue_burst(struct sw_evdev *sw,
107dca926caSGage Eads 		 struct sw_iq *iq,
108dca926caSGage Eads 		 struct rte_event *ev,
109dca926caSGage Eads 		 uint16_t count)
110dca926caSGage Eads {
111dca926caSGage Eads 	struct sw_queue_chunk *current;
112dca926caSGage Eads 	uint16_t total, index;
113dca926caSGage Eads 
114dca926caSGage Eads 	count = RTE_MIN(count, iq_count(iq));
115dca926caSGage Eads 
116dca926caSGage Eads 	current = iq->head;
117dca926caSGage Eads 	index = iq->head_idx;
118dca926caSGage Eads 	total = 0;
119dca926caSGage Eads 
120dca926caSGage Eads 	/* Loop over the chunks */
121dca926caSGage Eads 	while (1) {
122dca926caSGage Eads 		struct sw_queue_chunk *next;
123dca926caSGage Eads 		for (; index < SW_EVS_PER_Q_CHUNK;) {
124dca926caSGage Eads 			ev[total++] = current->events[index++];
125dca926caSGage Eads 
126dca926caSGage Eads 			if (unlikely(total == count))
127dca926caSGage Eads 				goto done;
128dca926caSGage Eads 		}
129dca926caSGage Eads 
130dca926caSGage Eads 		/* Move to the next chunk */
131dca926caSGage Eads 		next = current->next;
132dca926caSGage Eads 		iq_free_chunk(sw, current);
133dca926caSGage Eads 		current = next;
134dca926caSGage Eads 		index = 0;
135dca926caSGage Eads 	}
136dca926caSGage Eads 
137dca926caSGage Eads done:
138dca926caSGage Eads 	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
13945219005SGage Eads 		struct sw_queue_chunk *next = current->next;
140dca926caSGage Eads 		iq_free_chunk(sw, current);
141dca926caSGage Eads 		iq->head = next;
142dca926caSGage Eads 		iq->head_idx = 0;
143dca926caSGage Eads 	} else {
144dca926caSGage Eads 		iq->head = current;
145dca926caSGage Eads 		iq->head_idx = index;
146dca926caSGage Eads 	}
147dca926caSGage Eads 
148dca926caSGage Eads 	iq->count -= total;
149dca926caSGage Eads 
150dca926caSGage Eads 	return total;
151dca926caSGage Eads }
152dca926caSGage Eads 
153dca926caSGage Eads static __rte_always_inline void
iq_put_back(struct sw_evdev * sw,struct sw_iq * iq,struct rte_event * ev,unsigned int count)154dca926caSGage Eads iq_put_back(struct sw_evdev *sw,
155dca926caSGage Eads 	    struct sw_iq *iq,
156dca926caSGage Eads 	    struct rte_event *ev,
157dca926caSGage Eads 	    unsigned int count)
158dca926caSGage Eads {
159dca926caSGage Eads 	/* Put back events that fit in the current head chunk. If necessary,
160dca926caSGage Eads 	 * put back events in a new head chunk. The caller must ensure that
161dca926caSGage Eads 	 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
162dca926caSGage Eads 	 * needed.
163dca926caSGage Eads 	 */
164dca926caSGage Eads 	uint16_t avail_space = iq->head_idx;
165dca926caSGage Eads 
166dca926caSGage Eads 	if (avail_space >= count) {
167dca926caSGage Eads 		const uint16_t idx = avail_space - count;
168dca926caSGage Eads 		uint16_t i;
169dca926caSGage Eads 
170dca926caSGage Eads 		for (i = 0; i < count; i++)
171dca926caSGage Eads 			iq->head->events[idx + i] = ev[i];
172dca926caSGage Eads 
173dca926caSGage Eads 		iq->head_idx = idx;
174dca926caSGage Eads 	} else if (avail_space < count) {
175dca926caSGage Eads 		const uint16_t remaining = count - avail_space;
176dca926caSGage Eads 		struct sw_queue_chunk *new_head;
177dca926caSGage Eads 		uint16_t i;
178dca926caSGage Eads 
179dca926caSGage Eads 		for (i = 0; i < avail_space; i++)
180dca926caSGage Eads 			iq->head->events[i] = ev[remaining + i];
181dca926caSGage Eads 
182dca926caSGage Eads 		new_head = iq_alloc_chunk(sw);
183dca926caSGage Eads 		new_head->next = iq->head;
184dca926caSGage Eads 		iq->head = new_head;
185dca926caSGage Eads 		iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
186dca926caSGage Eads 
187dca926caSGage Eads 		for (i = 0; i < remaining; i++)
188dca926caSGage Eads 			iq->head->events[iq->head_idx + i] = ev[i];
189dca926caSGage Eads 	}
190dca926caSGage Eads 
191dca926caSGage Eads 	iq->count += count;
192dca926caSGage Eads }
193dca926caSGage Eads 
194dca926caSGage Eads #endif /* _IQ_CHUNK_H_ */
195