xref: /dpdk/lib/port/rte_port_eventdev.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*99a2dd95SBruce Richardson  * Copyright(c) 2019 Intel Corporation
3*99a2dd95SBruce Richardson  */
4*99a2dd95SBruce Richardson 
5*99a2dd95SBruce Richardson #include <string.h>
6*99a2dd95SBruce Richardson #include <stdint.h>
7*99a2dd95SBruce Richardson 
8*99a2dd95SBruce Richardson #include <rte_mbuf.h>
9*99a2dd95SBruce Richardson #include <rte_malloc.h>
10*99a2dd95SBruce Richardson 
11*99a2dd95SBruce Richardson #include "rte_port_eventdev.h"
12*99a2dd95SBruce Richardson 
13*99a2dd95SBruce Richardson /*
14*99a2dd95SBruce Richardson  * Port EVENTDEV Reader
15*99a2dd95SBruce Richardson  */
16*99a2dd95SBruce Richardson #ifdef RTE_PORT_STATS_COLLECT
17*99a2dd95SBruce Richardson 
18*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_READER_STATS_PKTS_IN_ADD(port, val) \
19*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_in += val;} while (0)
20*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_READER_STATS_PKTS_DROP_ADD(port, val) \
21*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_drop += val;} while (0)
22*99a2dd95SBruce Richardson 
23*99a2dd95SBruce Richardson #else
24*99a2dd95SBruce Richardson 
25*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_READER_STATS_PKTS_IN_ADD(port, val)
26*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_READER_STATS_PKTS_DROP_ADD(port, val)
27*99a2dd95SBruce Richardson 
28*99a2dd95SBruce Richardson #endif
29*99a2dd95SBruce Richardson 
30*99a2dd95SBruce Richardson struct rte_port_eventdev_reader {
31*99a2dd95SBruce Richardson 	struct rte_port_in_stats stats;
32*99a2dd95SBruce Richardson 
33*99a2dd95SBruce Richardson 	uint8_t  eventdev_id;
34*99a2dd95SBruce Richardson 	uint16_t port_id;
35*99a2dd95SBruce Richardson 
36*99a2dd95SBruce Richardson 	struct rte_event ev[RTE_PORT_IN_BURST_SIZE_MAX];
37*99a2dd95SBruce Richardson };
38*99a2dd95SBruce Richardson 
39*99a2dd95SBruce Richardson static void *
40*99a2dd95SBruce Richardson rte_port_eventdev_reader_create(void *params, int socket_id)
41*99a2dd95SBruce Richardson {
42*99a2dd95SBruce Richardson 	struct rte_port_eventdev_reader_params *conf =
43*99a2dd95SBruce Richardson 			params;
44*99a2dd95SBruce Richardson 	struct rte_port_eventdev_reader *port;
45*99a2dd95SBruce Richardson 
46*99a2dd95SBruce Richardson 	/* Check input parameters */
47*99a2dd95SBruce Richardson 	if (conf == NULL) {
48*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
49*99a2dd95SBruce Richardson 		return NULL;
50*99a2dd95SBruce Richardson 	}
51*99a2dd95SBruce Richardson 
52*99a2dd95SBruce Richardson 	/* Memory allocation */
53*99a2dd95SBruce Richardson 	port = rte_zmalloc_socket("PORT", sizeof(*port),
54*99a2dd95SBruce Richardson 		RTE_CACHE_LINE_SIZE, socket_id);
55*99a2dd95SBruce Richardson 	if (port == NULL) {
56*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
57*99a2dd95SBruce Richardson 		return NULL;
58*99a2dd95SBruce Richardson 	}
59*99a2dd95SBruce Richardson 
60*99a2dd95SBruce Richardson 	/* Initialization */
61*99a2dd95SBruce Richardson 	port->eventdev_id = conf->eventdev_id;
62*99a2dd95SBruce Richardson 	port->port_id = conf->port_id;
63*99a2dd95SBruce Richardson 
64*99a2dd95SBruce Richardson 	return port;
65*99a2dd95SBruce Richardson }
66*99a2dd95SBruce Richardson 
67*99a2dd95SBruce Richardson static int
68*99a2dd95SBruce Richardson rte_port_eventdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
69*99a2dd95SBruce Richardson {
70*99a2dd95SBruce Richardson 	struct rte_port_eventdev_reader *p = port;
71*99a2dd95SBruce Richardson 	uint16_t rx_evts_cnt, i;
72*99a2dd95SBruce Richardson 
73*99a2dd95SBruce Richardson 	rx_evts_cnt = rte_event_dequeue_burst(p->eventdev_id, p->port_id,
74*99a2dd95SBruce Richardson 			p->ev, n_pkts, 0);
75*99a2dd95SBruce Richardson 
76*99a2dd95SBruce Richardson 	for (i = 0; i < rx_evts_cnt; i++)
77*99a2dd95SBruce Richardson 		pkts[i] = p->ev[i].mbuf;
78*99a2dd95SBruce Richardson 
79*99a2dd95SBruce Richardson 	RTE_PORT_EVENTDEV_READER_STATS_PKTS_IN_ADD(p, rx_evts_cnt);
80*99a2dd95SBruce Richardson 
81*99a2dd95SBruce Richardson 	return rx_evts_cnt;
82*99a2dd95SBruce Richardson }
83*99a2dd95SBruce Richardson 
84*99a2dd95SBruce Richardson static int
85*99a2dd95SBruce Richardson rte_port_eventdev_reader_free(void *port)
86*99a2dd95SBruce Richardson {
87*99a2dd95SBruce Richardson 	if (port == NULL) {
88*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
89*99a2dd95SBruce Richardson 		return -EINVAL;
90*99a2dd95SBruce Richardson 	}
91*99a2dd95SBruce Richardson 
92*99a2dd95SBruce Richardson 	rte_free(port);
93*99a2dd95SBruce Richardson 
94*99a2dd95SBruce Richardson 	return 0;
95*99a2dd95SBruce Richardson }
96*99a2dd95SBruce Richardson 
97*99a2dd95SBruce Richardson static int rte_port_eventdev_reader_stats_read(void *port,
98*99a2dd95SBruce Richardson 	struct rte_port_in_stats *stats, int clear)
99*99a2dd95SBruce Richardson {
100*99a2dd95SBruce Richardson 	struct rte_port_eventdev_reader *p =
101*99a2dd95SBruce Richardson 			port;
102*99a2dd95SBruce Richardson 
103*99a2dd95SBruce Richardson 	if (stats != NULL)
104*99a2dd95SBruce Richardson 		memcpy(stats, &p->stats, sizeof(p->stats));
105*99a2dd95SBruce Richardson 
106*99a2dd95SBruce Richardson 	if (clear)
107*99a2dd95SBruce Richardson 		memset(&p->stats, 0, sizeof(p->stats));
108*99a2dd95SBruce Richardson 
109*99a2dd95SBruce Richardson 	return 0;
110*99a2dd95SBruce Richardson }
111*99a2dd95SBruce Richardson 
112*99a2dd95SBruce Richardson /*
113*99a2dd95SBruce Richardson  * Port EVENTDEV Writer
114*99a2dd95SBruce Richardson  */
115*99a2dd95SBruce Richardson #ifdef RTE_PORT_STATS_COLLECT
116*99a2dd95SBruce Richardson 
117*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \
118*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_in += val;} while (0)
119*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \
120*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_drop += val;} while (0)
121*99a2dd95SBruce Richardson 
122*99a2dd95SBruce Richardson #else
123*99a2dd95SBruce Richardson 
124*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(port, val)
125*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_DROP_ADD(port, val)
126*99a2dd95SBruce Richardson 
127*99a2dd95SBruce Richardson #endif
128*99a2dd95SBruce Richardson 
129*99a2dd95SBruce Richardson struct rte_port_eventdev_writer {
130*99a2dd95SBruce Richardson 	struct rte_port_out_stats stats;
131*99a2dd95SBruce Richardson 
132*99a2dd95SBruce Richardson 	struct rte_event ev[2 * RTE_PORT_IN_BURST_SIZE_MAX];
133*99a2dd95SBruce Richardson 
134*99a2dd95SBruce Richardson 	uint32_t enq_burst_sz;
135*99a2dd95SBruce Richardson 	uint32_t enq_buf_count;
136*99a2dd95SBruce Richardson 	uint64_t bsz_mask;
137*99a2dd95SBruce Richardson 
138*99a2dd95SBruce Richardson 	uint8_t eventdev_id;
139*99a2dd95SBruce Richardson 	uint8_t port_id;
140*99a2dd95SBruce Richardson 	uint8_t queue_id;
141*99a2dd95SBruce Richardson 	uint8_t sched_type;
142*99a2dd95SBruce Richardson 	uint8_t evt_op;
143*99a2dd95SBruce Richardson };
144*99a2dd95SBruce Richardson 
145*99a2dd95SBruce Richardson static void *
146*99a2dd95SBruce Richardson rte_port_eventdev_writer_create(void *params, int socket_id)
147*99a2dd95SBruce Richardson {
148*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_params *conf =
149*99a2dd95SBruce Richardson 			params;
150*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer *port;
151*99a2dd95SBruce Richardson 	unsigned int i;
152*99a2dd95SBruce Richardson 
153*99a2dd95SBruce Richardson 	/* Check input parameters */
154*99a2dd95SBruce Richardson 	if ((conf == NULL) ||
155*99a2dd95SBruce Richardson 		(conf->enq_burst_sz == 0) ||
156*99a2dd95SBruce Richardson 		(conf->enq_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
157*99a2dd95SBruce Richardson 		(!rte_is_power_of_2(conf->enq_burst_sz))) {
158*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
159*99a2dd95SBruce Richardson 		return NULL;
160*99a2dd95SBruce Richardson 	}
161*99a2dd95SBruce Richardson 
162*99a2dd95SBruce Richardson 	/* Memory allocation */
163*99a2dd95SBruce Richardson 	port = rte_zmalloc_socket("PORT", sizeof(*port),
164*99a2dd95SBruce Richardson 		RTE_CACHE_LINE_SIZE, socket_id);
165*99a2dd95SBruce Richardson 	if (port == NULL) {
166*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
167*99a2dd95SBruce Richardson 		return NULL;
168*99a2dd95SBruce Richardson 	}
169*99a2dd95SBruce Richardson 
170*99a2dd95SBruce Richardson 	/* Initialization */
171*99a2dd95SBruce Richardson 	port->enq_burst_sz = conf->enq_burst_sz;
172*99a2dd95SBruce Richardson 	port->enq_buf_count = 0;
173*99a2dd95SBruce Richardson 	port->bsz_mask = 1LLU << (conf->enq_burst_sz - 1);
174*99a2dd95SBruce Richardson 
175*99a2dd95SBruce Richardson 	port->eventdev_id = conf->eventdev_id;
176*99a2dd95SBruce Richardson 	port->port_id = conf->port_id;
177*99a2dd95SBruce Richardson 	port->queue_id = conf->queue_id;
178*99a2dd95SBruce Richardson 	port->sched_type = conf->sched_type;
179*99a2dd95SBruce Richardson 	port->evt_op = conf->evt_op;
180*99a2dd95SBruce Richardson 	memset(&port->ev, 0, sizeof(port->ev));
181*99a2dd95SBruce Richardson 
182*99a2dd95SBruce Richardson 	for (i = 0; i < RTE_DIM(port->ev); i++) {
183*99a2dd95SBruce Richardson 		port->ev[i].queue_id = port->queue_id;
184*99a2dd95SBruce Richardson 		port->ev[i].sched_type = port->sched_type;
185*99a2dd95SBruce Richardson 		port->ev[i].op = port->evt_op;
186*99a2dd95SBruce Richardson 	}
187*99a2dd95SBruce Richardson 
188*99a2dd95SBruce Richardson 	return port;
189*99a2dd95SBruce Richardson }
190*99a2dd95SBruce Richardson 
191*99a2dd95SBruce Richardson static inline void
192*99a2dd95SBruce Richardson send_burst(struct rte_port_eventdev_writer *p)
193*99a2dd95SBruce Richardson {
194*99a2dd95SBruce Richardson 	uint32_t nb_enq;
195*99a2dd95SBruce Richardson 
196*99a2dd95SBruce Richardson 	nb_enq = rte_event_enqueue_burst(p->eventdev_id, p->port_id,
197*99a2dd95SBruce Richardson 			p->ev, p->enq_buf_count);
198*99a2dd95SBruce Richardson 
199*99a2dd95SBruce Richardson 	RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->enq_buf_count -
200*99a2dd95SBruce Richardson 			nb_enq);
201*99a2dd95SBruce Richardson 
202*99a2dd95SBruce Richardson 	for (; nb_enq < p->enq_buf_count; nb_enq++)
203*99a2dd95SBruce Richardson 		rte_pktmbuf_free(p->ev[nb_enq].mbuf);
204*99a2dd95SBruce Richardson 
205*99a2dd95SBruce Richardson 	p->enq_buf_count = 0;
206*99a2dd95SBruce Richardson }
207*99a2dd95SBruce Richardson 
208*99a2dd95SBruce Richardson static int
209*99a2dd95SBruce Richardson rte_port_eventdev_writer_tx(void *port, struct rte_mbuf *pkt)
210*99a2dd95SBruce Richardson {
211*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer *p = port;
212*99a2dd95SBruce Richardson 
213*99a2dd95SBruce Richardson 	p->ev[p->enq_buf_count++].mbuf  = pkt;
214*99a2dd95SBruce Richardson 	RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
215*99a2dd95SBruce Richardson 	if (p->enq_buf_count >= p->enq_burst_sz)
216*99a2dd95SBruce Richardson 		send_burst(p);
217*99a2dd95SBruce Richardson 
218*99a2dd95SBruce Richardson 	return 0;
219*99a2dd95SBruce Richardson }
220*99a2dd95SBruce Richardson 
221*99a2dd95SBruce Richardson static int
222*99a2dd95SBruce Richardson rte_port_eventdev_writer_tx_bulk(void *port,
223*99a2dd95SBruce Richardson 	struct rte_mbuf **pkts,
224*99a2dd95SBruce Richardson 	uint64_t pkts_mask)
225*99a2dd95SBruce Richardson {
226*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer *p =
227*99a2dd95SBruce Richardson 			port;
228*99a2dd95SBruce Richardson 	uint64_t bsz_mask = p->bsz_mask;
229*99a2dd95SBruce Richardson 	uint32_t enq_buf_count = p->enq_buf_count;
230*99a2dd95SBruce Richardson 	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
231*99a2dd95SBruce Richardson 					((pkts_mask & bsz_mask) ^ bsz_mask);
232*99a2dd95SBruce Richardson 
233*99a2dd95SBruce Richardson 	if (expr == 0) {
234*99a2dd95SBruce Richardson 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
235*99a2dd95SBruce Richardson 		uint32_t i, n_enq_ok;
236*99a2dd95SBruce Richardson 
237*99a2dd95SBruce Richardson 		if (enq_buf_count)
238*99a2dd95SBruce Richardson 			send_burst(p);
239*99a2dd95SBruce Richardson 
240*99a2dd95SBruce Richardson 		RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
241*99a2dd95SBruce Richardson 
242*99a2dd95SBruce Richardson 		struct rte_event events[2 * RTE_PORT_IN_BURST_SIZE_MAX] = {};
243*99a2dd95SBruce Richardson 		for (i = 0; i < n_pkts; i++) {
244*99a2dd95SBruce Richardson 			events[i].mbuf = pkts[i];
245*99a2dd95SBruce Richardson 			events[i].queue_id = p->queue_id;
246*99a2dd95SBruce Richardson 			events[i].sched_type = p->sched_type;
247*99a2dd95SBruce Richardson 			events[i].op = p->evt_op;
248*99a2dd95SBruce Richardson 		}
249*99a2dd95SBruce Richardson 
250*99a2dd95SBruce Richardson 		n_enq_ok = rte_event_enqueue_burst(p->eventdev_id, p->port_id,
251*99a2dd95SBruce Richardson 				events, n_pkts);
252*99a2dd95SBruce Richardson 
253*99a2dd95SBruce Richardson 		RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_DROP_ADD(p,
254*99a2dd95SBruce Richardson 				n_pkts - n_enq_ok);
255*99a2dd95SBruce Richardson 		for (; n_enq_ok < n_pkts; n_enq_ok++)
256*99a2dd95SBruce Richardson 			rte_pktmbuf_free(pkts[n_enq_ok]);
257*99a2dd95SBruce Richardson 
258*99a2dd95SBruce Richardson 	} else {
259*99a2dd95SBruce Richardson 		for (; pkts_mask;) {
260*99a2dd95SBruce Richardson 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
261*99a2dd95SBruce Richardson 			uint64_t pkt_mask = 1LLU << pkt_index;
262*99a2dd95SBruce Richardson 
263*99a2dd95SBruce Richardson 			p->ev[enq_buf_count++].mbuf = pkts[pkt_index];
264*99a2dd95SBruce Richardson 
265*99a2dd95SBruce Richardson 			RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
266*99a2dd95SBruce Richardson 			pkts_mask &= ~pkt_mask;
267*99a2dd95SBruce Richardson 		}
268*99a2dd95SBruce Richardson 
269*99a2dd95SBruce Richardson 		p->enq_buf_count = enq_buf_count;
270*99a2dd95SBruce Richardson 		if (enq_buf_count >= p->enq_burst_sz)
271*99a2dd95SBruce Richardson 			send_burst(p);
272*99a2dd95SBruce Richardson 	}
273*99a2dd95SBruce Richardson 
274*99a2dd95SBruce Richardson 	return 0;
275*99a2dd95SBruce Richardson }
276*99a2dd95SBruce Richardson 
277*99a2dd95SBruce Richardson static int
278*99a2dd95SBruce Richardson rte_port_eventdev_writer_flush(void *port)
279*99a2dd95SBruce Richardson {
280*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer *p =
281*99a2dd95SBruce Richardson 			port;
282*99a2dd95SBruce Richardson 
283*99a2dd95SBruce Richardson 	if (p->enq_buf_count > 0)
284*99a2dd95SBruce Richardson 		send_burst(p);
285*99a2dd95SBruce Richardson 
286*99a2dd95SBruce Richardson 	return 0;
287*99a2dd95SBruce Richardson }
288*99a2dd95SBruce Richardson 
289*99a2dd95SBruce Richardson static int
290*99a2dd95SBruce Richardson rte_port_eventdev_writer_free(void *port)
291*99a2dd95SBruce Richardson {
292*99a2dd95SBruce Richardson 	if (port == NULL) {
293*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
294*99a2dd95SBruce Richardson 		return -EINVAL;
295*99a2dd95SBruce Richardson 	}
296*99a2dd95SBruce Richardson 
297*99a2dd95SBruce Richardson 	rte_port_eventdev_writer_flush(port);
298*99a2dd95SBruce Richardson 	rte_free(port);
299*99a2dd95SBruce Richardson 
300*99a2dd95SBruce Richardson 	return 0;
301*99a2dd95SBruce Richardson }
302*99a2dd95SBruce Richardson 
303*99a2dd95SBruce Richardson static int rte_port_eventdev_writer_stats_read(void *port,
304*99a2dd95SBruce Richardson 	struct rte_port_out_stats *stats, int clear)
305*99a2dd95SBruce Richardson {
306*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer *p =
307*99a2dd95SBruce Richardson 			port;
308*99a2dd95SBruce Richardson 
309*99a2dd95SBruce Richardson 	if (stats != NULL)
310*99a2dd95SBruce Richardson 		memcpy(stats, &p->stats, sizeof(p->stats));
311*99a2dd95SBruce Richardson 
312*99a2dd95SBruce Richardson 	if (clear)
313*99a2dd95SBruce Richardson 		memset(&p->stats, 0, sizeof(p->stats));
314*99a2dd95SBruce Richardson 
315*99a2dd95SBruce Richardson 	return 0;
316*99a2dd95SBruce Richardson }
317*99a2dd95SBruce Richardson 
318*99a2dd95SBruce Richardson /*
319*99a2dd95SBruce Richardson  * Port EVENTDEV Writer Nodrop
320*99a2dd95SBruce Richardson  */
321*99a2dd95SBruce Richardson #ifdef RTE_PORT_STATS_COLLECT
322*99a2dd95SBruce Richardson 
323*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
324*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_in += val;} while (0)
325*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
326*99a2dd95SBruce Richardson 	do {port->stats.n_pkts_drop += val;} while (0)
327*99a2dd95SBruce Richardson 
328*99a2dd95SBruce Richardson #else
329*99a2dd95SBruce Richardson 
330*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
331*99a2dd95SBruce Richardson #define RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
332*99a2dd95SBruce Richardson 
333*99a2dd95SBruce Richardson #endif
334*99a2dd95SBruce Richardson 
335*99a2dd95SBruce Richardson struct rte_port_eventdev_writer_nodrop {
336*99a2dd95SBruce Richardson 	struct rte_port_out_stats stats;
337*99a2dd95SBruce Richardson 
338*99a2dd95SBruce Richardson 	struct rte_event ev[2 * RTE_PORT_IN_BURST_SIZE_MAX];
339*99a2dd95SBruce Richardson 
340*99a2dd95SBruce Richardson 	uint32_t enq_burst_sz;
341*99a2dd95SBruce Richardson 	uint32_t enq_buf_count;
342*99a2dd95SBruce Richardson 	uint64_t bsz_mask;
343*99a2dd95SBruce Richardson 	uint64_t n_retries;
344*99a2dd95SBruce Richardson 	uint8_t eventdev_id;
345*99a2dd95SBruce Richardson 	uint8_t port_id;
346*99a2dd95SBruce Richardson 	uint8_t queue_id;
347*99a2dd95SBruce Richardson 	uint8_t sched_type;
348*99a2dd95SBruce Richardson 	uint8_t evt_op;
349*99a2dd95SBruce Richardson };
350*99a2dd95SBruce Richardson 
351*99a2dd95SBruce Richardson 
352*99a2dd95SBruce Richardson static void *
353*99a2dd95SBruce Richardson rte_port_eventdev_writer_nodrop_create(void *params, int socket_id)
354*99a2dd95SBruce Richardson {
355*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop_params *conf =
356*99a2dd95SBruce Richardson 			params;
357*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop *port;
358*99a2dd95SBruce Richardson 	unsigned int i;
359*99a2dd95SBruce Richardson 
360*99a2dd95SBruce Richardson 	/* Check input parameters */
361*99a2dd95SBruce Richardson 	if ((conf == NULL) ||
362*99a2dd95SBruce Richardson 		(conf->enq_burst_sz == 0) ||
363*99a2dd95SBruce Richardson 		(conf->enq_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
364*99a2dd95SBruce Richardson 		(!rte_is_power_of_2(conf->enq_burst_sz))) {
365*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
366*99a2dd95SBruce Richardson 		return NULL;
367*99a2dd95SBruce Richardson 	}
368*99a2dd95SBruce Richardson 
369*99a2dd95SBruce Richardson 	/* Memory allocation */
370*99a2dd95SBruce Richardson 	port = rte_zmalloc_socket("PORT", sizeof(*port),
371*99a2dd95SBruce Richardson 		RTE_CACHE_LINE_SIZE, socket_id);
372*99a2dd95SBruce Richardson 	if (port == NULL) {
373*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
374*99a2dd95SBruce Richardson 		return NULL;
375*99a2dd95SBruce Richardson 	}
376*99a2dd95SBruce Richardson 
377*99a2dd95SBruce Richardson 	/* Initialization */
378*99a2dd95SBruce Richardson 	port->enq_burst_sz = conf->enq_burst_sz;
379*99a2dd95SBruce Richardson 	port->enq_buf_count = 0;
380*99a2dd95SBruce Richardson 	port->bsz_mask = 1LLU << (conf->enq_burst_sz - 1);
381*99a2dd95SBruce Richardson 
382*99a2dd95SBruce Richardson 	port->eventdev_id = conf->eventdev_id;
383*99a2dd95SBruce Richardson 	port->port_id = conf->port_id;
384*99a2dd95SBruce Richardson 	port->queue_id = conf->queue_id;
385*99a2dd95SBruce Richardson 	port->sched_type = conf->sched_type;
386*99a2dd95SBruce Richardson 	port->evt_op = conf->evt_op;
387*99a2dd95SBruce Richardson 	memset(&port->ev, 0, sizeof(port->ev));
388*99a2dd95SBruce Richardson 
389*99a2dd95SBruce Richardson 	for (i = 0; i < RTE_DIM(port->ev); i++) {
390*99a2dd95SBruce Richardson 		port->ev[i].queue_id = port->queue_id;
391*99a2dd95SBruce Richardson 		port->ev[i].sched_type = port->sched_type;
392*99a2dd95SBruce Richardson 		port->ev[i].op = port->evt_op;
393*99a2dd95SBruce Richardson 	}
394*99a2dd95SBruce Richardson 	/*
395*99a2dd95SBruce Richardson 	 * When n_retries is 0 it means that we should wait for every event to
396*99a2dd95SBruce Richardson 	 * send no matter how many retries should it take. To limit number of
397*99a2dd95SBruce Richardson 	 * branches in fast path, we use UINT64_MAX instead of branching.
398*99a2dd95SBruce Richardson 	 */
399*99a2dd95SBruce Richardson 	port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
400*99a2dd95SBruce Richardson 
401*99a2dd95SBruce Richardson 	return port;
402*99a2dd95SBruce Richardson }
403*99a2dd95SBruce Richardson 
404*99a2dd95SBruce Richardson static inline void
405*99a2dd95SBruce Richardson send_burst_nodrop(struct rte_port_eventdev_writer_nodrop *p)
406*99a2dd95SBruce Richardson {
407*99a2dd95SBruce Richardson 	uint32_t nb_enq, i;
408*99a2dd95SBruce Richardson 
409*99a2dd95SBruce Richardson 	nb_enq = rte_event_enqueue_burst(p->eventdev_id, p->port_id,
410*99a2dd95SBruce Richardson 			p->ev, p->enq_buf_count);
411*99a2dd95SBruce Richardson 
412*99a2dd95SBruce Richardson 	/* We sent all the packets in a first try */
413*99a2dd95SBruce Richardson 	if (nb_enq >= p->enq_buf_count) {
414*99a2dd95SBruce Richardson 		p->enq_buf_count = 0;
415*99a2dd95SBruce Richardson 		return;
416*99a2dd95SBruce Richardson 	}
417*99a2dd95SBruce Richardson 
418*99a2dd95SBruce Richardson 	for (i = 0; i < p->n_retries; i++) {
419*99a2dd95SBruce Richardson 		nb_enq += rte_event_enqueue_burst(p->eventdev_id, p->port_id,
420*99a2dd95SBruce Richardson 							p->ev + nb_enq,
421*99a2dd95SBruce Richardson 							p->enq_buf_count - nb_enq);
422*99a2dd95SBruce Richardson 
423*99a2dd95SBruce Richardson 		/* We sent all the events in more than one try */
424*99a2dd95SBruce Richardson 		if (nb_enq >= p->enq_buf_count) {
425*99a2dd95SBruce Richardson 			p->enq_buf_count = 0;
426*99a2dd95SBruce Richardson 			return;
427*99a2dd95SBruce Richardson 		}
428*99a2dd95SBruce Richardson 	}
429*99a2dd95SBruce Richardson 	/* We didn't send the events in maximum allowed attempts */
430*99a2dd95SBruce Richardson 	RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p,
431*99a2dd95SBruce Richardson 			p->enq_buf_count - nb_enq);
432*99a2dd95SBruce Richardson 	for (; nb_enq < p->enq_buf_count; nb_enq++)
433*99a2dd95SBruce Richardson 		rte_pktmbuf_free(p->ev[nb_enq].mbuf);
434*99a2dd95SBruce Richardson 
435*99a2dd95SBruce Richardson 	p->enq_buf_count = 0;
436*99a2dd95SBruce Richardson }
437*99a2dd95SBruce Richardson 
438*99a2dd95SBruce Richardson static int
439*99a2dd95SBruce Richardson rte_port_eventdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
440*99a2dd95SBruce Richardson {
441*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop *p = port;
442*99a2dd95SBruce Richardson 
443*99a2dd95SBruce Richardson 	p->ev[p->enq_buf_count++].mbuf = pkt;
444*99a2dd95SBruce Richardson 
445*99a2dd95SBruce Richardson 	RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
446*99a2dd95SBruce Richardson 	if (p->enq_buf_count >= p->enq_burst_sz)
447*99a2dd95SBruce Richardson 		send_burst_nodrop(p);
448*99a2dd95SBruce Richardson 
449*99a2dd95SBruce Richardson 	return 0;
450*99a2dd95SBruce Richardson }
451*99a2dd95SBruce Richardson 
452*99a2dd95SBruce Richardson static int
453*99a2dd95SBruce Richardson rte_port_eventdev_writer_nodrop_tx_bulk(void *port,
454*99a2dd95SBruce Richardson 	struct rte_mbuf **pkts,
455*99a2dd95SBruce Richardson 	uint64_t pkts_mask)
456*99a2dd95SBruce Richardson {
457*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop *p =
458*99a2dd95SBruce Richardson 			port;
459*99a2dd95SBruce Richardson 
460*99a2dd95SBruce Richardson 	uint64_t bsz_mask = p->bsz_mask;
461*99a2dd95SBruce Richardson 	uint32_t enq_buf_count = p->enq_buf_count;
462*99a2dd95SBruce Richardson 	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
463*99a2dd95SBruce Richardson 					((pkts_mask & bsz_mask) ^ bsz_mask);
464*99a2dd95SBruce Richardson 
465*99a2dd95SBruce Richardson 	if (expr == 0) {
466*99a2dd95SBruce Richardson 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
467*99a2dd95SBruce Richardson 		uint32_t i, n_enq_ok;
468*99a2dd95SBruce Richardson 
469*99a2dd95SBruce Richardson 		if (enq_buf_count)
470*99a2dd95SBruce Richardson 			send_burst_nodrop(p);
471*99a2dd95SBruce Richardson 
472*99a2dd95SBruce Richardson 		RTE_PORT_EVENTDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
473*99a2dd95SBruce Richardson 
474*99a2dd95SBruce Richardson 		struct rte_event events[RTE_PORT_IN_BURST_SIZE_MAX] = {};
475*99a2dd95SBruce Richardson 
476*99a2dd95SBruce Richardson 		for (i = 0; i < n_pkts; i++) {
477*99a2dd95SBruce Richardson 			events[i].mbuf = pkts[i];
478*99a2dd95SBruce Richardson 			events[i].queue_id = p->queue_id;
479*99a2dd95SBruce Richardson 			events[i].sched_type = p->sched_type;
480*99a2dd95SBruce Richardson 			events[i].op = p->evt_op;
481*99a2dd95SBruce Richardson 		}
482*99a2dd95SBruce Richardson 
483*99a2dd95SBruce Richardson 		n_enq_ok = rte_event_enqueue_burst(p->eventdev_id, p->port_id,
484*99a2dd95SBruce Richardson 				events, n_pkts);
485*99a2dd95SBruce Richardson 
486*99a2dd95SBruce Richardson 		if (n_enq_ok >= n_pkts)
487*99a2dd95SBruce Richardson 			return 0;
488*99a2dd95SBruce Richardson 
489*99a2dd95SBruce Richardson 		/*
490*99a2dd95SBruce Richardson 		 * If we did not manage to enqueue all events in single burst,
491*99a2dd95SBruce Richardson 		 * move remaining events to the buffer and call send burst.
492*99a2dd95SBruce Richardson 		 */
493*99a2dd95SBruce Richardson 		for (; n_enq_ok < n_pkts; n_enq_ok++) {
494*99a2dd95SBruce Richardson 			struct rte_mbuf *pkt = pkts[n_enq_ok];
495*99a2dd95SBruce Richardson 			p->ev[p->enq_buf_count++].mbuf = pkt;
496*99a2dd95SBruce Richardson 		}
497*99a2dd95SBruce Richardson 		send_burst_nodrop(p);
498*99a2dd95SBruce Richardson 	} else {
499*99a2dd95SBruce Richardson 		for (; pkts_mask;) {
500*99a2dd95SBruce Richardson 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
501*99a2dd95SBruce Richardson 			uint64_t pkt_mask = 1LLU << pkt_index;
502*99a2dd95SBruce Richardson 
503*99a2dd95SBruce Richardson 			p->ev[enq_buf_count++].mbuf = pkts[pkt_index];
504*99a2dd95SBruce Richardson 
505*99a2dd95SBruce Richardson 			RTE_PORT_EVENTDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
506*99a2dd95SBruce Richardson 			pkts_mask &= ~pkt_mask;
507*99a2dd95SBruce Richardson 		}
508*99a2dd95SBruce Richardson 
509*99a2dd95SBruce Richardson 		p->enq_buf_count = enq_buf_count;
510*99a2dd95SBruce Richardson 		if (enq_buf_count >= p->enq_burst_sz)
511*99a2dd95SBruce Richardson 			send_burst_nodrop(p);
512*99a2dd95SBruce Richardson 	}
513*99a2dd95SBruce Richardson 
514*99a2dd95SBruce Richardson 	return 0;
515*99a2dd95SBruce Richardson }
516*99a2dd95SBruce Richardson 
517*99a2dd95SBruce Richardson static int
518*99a2dd95SBruce Richardson rte_port_eventdev_writer_nodrop_flush(void *port)
519*99a2dd95SBruce Richardson {
520*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop *p =
521*99a2dd95SBruce Richardson 			port;
522*99a2dd95SBruce Richardson 
523*99a2dd95SBruce Richardson 	if (p->enq_buf_count > 0)
524*99a2dd95SBruce Richardson 		send_burst_nodrop(p);
525*99a2dd95SBruce Richardson 
526*99a2dd95SBruce Richardson 	return 0;
527*99a2dd95SBruce Richardson }
528*99a2dd95SBruce Richardson 
529*99a2dd95SBruce Richardson static int
530*99a2dd95SBruce Richardson rte_port_eventdev_writer_nodrop_free(void *port)
531*99a2dd95SBruce Richardson {
532*99a2dd95SBruce Richardson 	if (port == NULL) {
533*99a2dd95SBruce Richardson 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
534*99a2dd95SBruce Richardson 		return -EINVAL;
535*99a2dd95SBruce Richardson 	}
536*99a2dd95SBruce Richardson 
537*99a2dd95SBruce Richardson 	rte_port_eventdev_writer_nodrop_flush(port);
538*99a2dd95SBruce Richardson 	rte_free(port);
539*99a2dd95SBruce Richardson 
540*99a2dd95SBruce Richardson 	return 0;
541*99a2dd95SBruce Richardson }
542*99a2dd95SBruce Richardson 
543*99a2dd95SBruce Richardson static int rte_port_eventdev_writer_nodrop_stats_read(void *port,
544*99a2dd95SBruce Richardson 	struct rte_port_out_stats *stats, int clear)
545*99a2dd95SBruce Richardson {
546*99a2dd95SBruce Richardson 	struct rte_port_eventdev_writer_nodrop *p =
547*99a2dd95SBruce Richardson 			port;
548*99a2dd95SBruce Richardson 
549*99a2dd95SBruce Richardson 	if (stats != NULL)
550*99a2dd95SBruce Richardson 		memcpy(stats, &p->stats, sizeof(p->stats));
551*99a2dd95SBruce Richardson 
552*99a2dd95SBruce Richardson 	if (clear)
553*99a2dd95SBruce Richardson 		memset(&p->stats, 0, sizeof(p->stats));
554*99a2dd95SBruce Richardson 
555*99a2dd95SBruce Richardson 	return 0;
556*99a2dd95SBruce Richardson }
557*99a2dd95SBruce Richardson 
558*99a2dd95SBruce Richardson /*
559*99a2dd95SBruce Richardson  * Summary of port operations
560*99a2dd95SBruce Richardson  */
561*99a2dd95SBruce Richardson struct rte_port_in_ops rte_port_eventdev_reader_ops = {
562*99a2dd95SBruce Richardson 	.f_create = rte_port_eventdev_reader_create,
563*99a2dd95SBruce Richardson 	.f_free = rte_port_eventdev_reader_free,
564*99a2dd95SBruce Richardson 	.f_rx = rte_port_eventdev_reader_rx,
565*99a2dd95SBruce Richardson 	.f_stats = rte_port_eventdev_reader_stats_read,
566*99a2dd95SBruce Richardson };
567*99a2dd95SBruce Richardson 
568*99a2dd95SBruce Richardson struct rte_port_out_ops rte_port_eventdev_writer_ops = {
569*99a2dd95SBruce Richardson 	.f_create = rte_port_eventdev_writer_create,
570*99a2dd95SBruce Richardson 	.f_free = rte_port_eventdev_writer_free,
571*99a2dd95SBruce Richardson 	.f_tx = rte_port_eventdev_writer_tx,
572*99a2dd95SBruce Richardson 	.f_tx_bulk = rte_port_eventdev_writer_tx_bulk,
573*99a2dd95SBruce Richardson 	.f_flush = rte_port_eventdev_writer_flush,
574*99a2dd95SBruce Richardson 	.f_stats = rte_port_eventdev_writer_stats_read,
575*99a2dd95SBruce Richardson };
576*99a2dd95SBruce Richardson 
577*99a2dd95SBruce Richardson struct rte_port_out_ops rte_port_eventdev_writer_nodrop_ops = {
578*99a2dd95SBruce Richardson 	.f_create = rte_port_eventdev_writer_nodrop_create,
579*99a2dd95SBruce Richardson 	.f_free = rte_port_eventdev_writer_nodrop_free,
580*99a2dd95SBruce Richardson 	.f_tx = rte_port_eventdev_writer_nodrop_tx,
581*99a2dd95SBruce Richardson 	.f_tx_bulk = rte_port_eventdev_writer_nodrop_tx_bulk,
582*99a2dd95SBruce Richardson 	.f_flush = rte_port_eventdev_writer_nodrop_flush,
583*99a2dd95SBruce Richardson 	.f_stats = rte_port_eventdev_writer_nodrop_stats_read,
584*99a2dd95SBruce Richardson };
585