xref: /dpdk/lib/port/rte_port_ethdev.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <string.h>
5 #include <stdint.h>
6 
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10 
11 #include "rte_port_ethdev.h"
12 
13 /*
14  * Port ETHDEV Reader
15  */
16 #ifdef RTE_PORT_STATS_COLLECT
17 
18 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) \
19 	port->stats.n_pkts_in += val
20 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) \
21 	port->stats.n_pkts_drop += val
22 
23 #else
24 
25 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val)
26 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val)
27 
28 #endif
29 
30 struct rte_port_ethdev_reader {
31 	struct rte_port_in_stats stats;
32 
33 	uint16_t queue_id;
34 	uint16_t port_id;
35 };
36 
37 static void *
38 rte_port_ethdev_reader_create(void *params, int socket_id)
39 {
40 	struct rte_port_ethdev_reader_params *conf =
41 			params;
42 	struct rte_port_ethdev_reader *port;
43 
44 	/* Check input parameters */
45 	if (conf == NULL) {
46 		RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
47 		return NULL;
48 	}
49 
50 	/* Memory allocation */
51 	port = rte_zmalloc_socket("PORT", sizeof(*port),
52 			RTE_CACHE_LINE_SIZE, socket_id);
53 	if (port == NULL) {
54 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
55 		return NULL;
56 	}
57 
58 	/* Initialization */
59 	port->port_id = conf->port_id;
60 	port->queue_id = conf->queue_id;
61 
62 	return port;
63 }
64 
65 static int
66 rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
67 {
68 	struct rte_port_ethdev_reader *p =
69 		port;
70 	uint16_t rx_pkt_cnt;
71 
72 	rx_pkt_cnt = rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
73 	RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
74 	return rx_pkt_cnt;
75 }
76 
77 static int
78 rte_port_ethdev_reader_free(void *port)
79 {
80 	if (port == NULL) {
81 		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
82 		return -EINVAL;
83 	}
84 
85 	rte_free(port);
86 
87 	return 0;
88 }
89 
90 static int rte_port_ethdev_reader_stats_read(void *port,
91 		struct rte_port_in_stats *stats, int clear)
92 {
93 	struct rte_port_ethdev_reader *p =
94 			port;
95 
96 	if (stats != NULL)
97 		memcpy(stats, &p->stats, sizeof(p->stats));
98 
99 	if (clear)
100 		memset(&p->stats, 0, sizeof(p->stats));
101 
102 	return 0;
103 }
104 
105 /*
106  * Port ETHDEV Writer
107  */
108 #ifdef RTE_PORT_STATS_COLLECT
109 
110 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \
111 	port->stats.n_pkts_in += val
112 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \
113 	port->stats.n_pkts_drop += val
114 
115 #else
116 
117 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val)
118 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val)
119 
120 #endif
121 
122 struct rte_port_ethdev_writer {
123 	struct rte_port_out_stats stats;
124 
125 	struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
126 	uint32_t tx_burst_sz;
127 	uint16_t tx_buf_count;
128 	uint64_t bsz_mask;
129 	uint16_t queue_id;
130 	uint16_t port_id;
131 };
132 
133 static void *
134 rte_port_ethdev_writer_create(void *params, int socket_id)
135 {
136 	struct rte_port_ethdev_writer_params *conf =
137 			params;
138 	struct rte_port_ethdev_writer *port;
139 
140 	/* Check input parameters */
141 	if ((conf == NULL) ||
142 		(conf->tx_burst_sz == 0) ||
143 		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
144 		(!rte_is_power_of_2(conf->tx_burst_sz))) {
145 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
146 		return NULL;
147 	}
148 
149 	/* Memory allocation */
150 	port = rte_zmalloc_socket("PORT", sizeof(*port),
151 			RTE_CACHE_LINE_SIZE, socket_id);
152 	if (port == NULL) {
153 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
154 		return NULL;
155 	}
156 
157 	/* Initialization */
158 	port->port_id = conf->port_id;
159 	port->queue_id = conf->queue_id;
160 	port->tx_burst_sz = conf->tx_burst_sz;
161 	port->tx_buf_count = 0;
162 	port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
163 
164 	return port;
165 }
166 
167 static inline void
168 send_burst(struct rte_port_ethdev_writer *p)
169 {
170 	uint32_t nb_tx;
171 
172 	nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
173 			 p->tx_buf, p->tx_buf_count);
174 
175 	RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
176 	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
177 		rte_pktmbuf_free(p->tx_buf[nb_tx]);
178 
179 	p->tx_buf_count = 0;
180 }
181 
182 static int
183 rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
184 {
185 	struct rte_port_ethdev_writer *p =
186 		port;
187 
188 	p->tx_buf[p->tx_buf_count++] = pkt;
189 	RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
190 	if (p->tx_buf_count >= p->tx_burst_sz)
191 		send_burst(p);
192 
193 	return 0;
194 }
195 
196 static int
197 rte_port_ethdev_writer_tx_bulk(void *port,
198 		struct rte_mbuf **pkts,
199 		uint64_t pkts_mask)
200 {
201 	struct rte_port_ethdev_writer *p =
202 		port;
203 	uint64_t bsz_mask = p->bsz_mask;
204 	uint32_t tx_buf_count = p->tx_buf_count;
205 	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
206 			((pkts_mask & bsz_mask) ^ bsz_mask);
207 
208 	if (expr == 0) {
209 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
210 		uint32_t n_pkts_ok;
211 
212 		if (tx_buf_count)
213 			send_burst(p);
214 
215 		RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
216 		n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
217 			n_pkts);
218 
219 		RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
220 		for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
221 			struct rte_mbuf *pkt = pkts[n_pkts_ok];
222 
223 			rte_pktmbuf_free(pkt);
224 		}
225 	} else {
226 		for ( ; pkts_mask; ) {
227 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
228 			uint64_t pkt_mask = 1LLU << pkt_index;
229 			struct rte_mbuf *pkt = pkts[pkt_index];
230 
231 			p->tx_buf[tx_buf_count++] = pkt;
232 			RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
233 			pkts_mask &= ~pkt_mask;
234 		}
235 
236 		p->tx_buf_count = tx_buf_count;
237 		if (tx_buf_count >= p->tx_burst_sz)
238 			send_burst(p);
239 	}
240 
241 	return 0;
242 }
243 
244 static int
245 rte_port_ethdev_writer_flush(void *port)
246 {
247 	struct rte_port_ethdev_writer *p =
248 		port;
249 
250 	if (p->tx_buf_count > 0)
251 		send_burst(p);
252 
253 	return 0;
254 }
255 
256 static int
257 rte_port_ethdev_writer_free(void *port)
258 {
259 	if (port == NULL) {
260 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
261 		return -EINVAL;
262 	}
263 
264 	rte_port_ethdev_writer_flush(port);
265 	rte_free(port);
266 
267 	return 0;
268 }
269 
270 static int rte_port_ethdev_writer_stats_read(void *port,
271 		struct rte_port_out_stats *stats, int clear)
272 {
273 	struct rte_port_ethdev_writer *p =
274 		port;
275 
276 	if (stats != NULL)
277 		memcpy(stats, &p->stats, sizeof(p->stats));
278 
279 	if (clear)
280 		memset(&p->stats, 0, sizeof(p->stats));
281 
282 	return 0;
283 }
284 
285 /*
286  * Port ETHDEV Writer Nodrop
287  */
288 #ifdef RTE_PORT_STATS_COLLECT
289 
290 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
291 	port->stats.n_pkts_in += val
292 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
293 	port->stats.n_pkts_drop += val
294 
295 #else
296 
297 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
298 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
299 
300 #endif
301 
302 struct rte_port_ethdev_writer_nodrop {
303 	struct rte_port_out_stats stats;
304 
305 	struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
306 	uint32_t tx_burst_sz;
307 	uint16_t tx_buf_count;
308 	uint64_t bsz_mask;
309 	uint64_t n_retries;
310 	uint16_t queue_id;
311 	uint16_t port_id;
312 };
313 
314 static void *
315 rte_port_ethdev_writer_nodrop_create(void *params, int socket_id)
316 {
317 	struct rte_port_ethdev_writer_nodrop_params *conf =
318 			params;
319 	struct rte_port_ethdev_writer_nodrop *port;
320 
321 	/* Check input parameters */
322 	if ((conf == NULL) ||
323 		(conf->tx_burst_sz == 0) ||
324 		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
325 		(!rte_is_power_of_2(conf->tx_burst_sz))) {
326 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
327 		return NULL;
328 	}
329 
330 	/* Memory allocation */
331 	port = rte_zmalloc_socket("PORT", sizeof(*port),
332 			RTE_CACHE_LINE_SIZE, socket_id);
333 	if (port == NULL) {
334 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
335 		return NULL;
336 	}
337 
338 	/* Initialization */
339 	port->port_id = conf->port_id;
340 	port->queue_id = conf->queue_id;
341 	port->tx_burst_sz = conf->tx_burst_sz;
342 	port->tx_buf_count = 0;
343 	port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
344 
345 	/*
346 	 * When n_retries is 0 it means that we should wait for every packet to
347 	 * send no matter how many retries should it take. To limit number of
348 	 * branches in fast path, we use UINT64_MAX instead of branching.
349 	 */
350 	port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
351 
352 	return port;
353 }
354 
355 static inline void
356 send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
357 {
358 	uint32_t nb_tx = 0, i;
359 
360 	nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
361 			p->tx_buf_count);
362 
363 	/* We sent all the packets in a first try */
364 	if (nb_tx >= p->tx_buf_count) {
365 		p->tx_buf_count = 0;
366 		return;
367 	}
368 
369 	for (i = 0; i < p->n_retries; i++) {
370 		nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
371 							 p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);
372 
373 		/* We sent all the packets in more than one try */
374 		if (nb_tx >= p->tx_buf_count) {
375 			p->tx_buf_count = 0;
376 			return;
377 		}
378 	}
379 
380 	/* We didn't send the packets in maximum allowed attempts */
381 	RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
382 	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
383 		rte_pktmbuf_free(p->tx_buf[nb_tx]);
384 
385 	p->tx_buf_count = 0;
386 }
387 
388 static int
389 rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
390 {
391 	struct rte_port_ethdev_writer_nodrop *p =
392 		port;
393 
394 	p->tx_buf[p->tx_buf_count++] = pkt;
395 	RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
396 	if (p->tx_buf_count >= p->tx_burst_sz)
397 		send_burst_nodrop(p);
398 
399 	return 0;
400 }
401 
402 static int
403 rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
404 		struct rte_mbuf **pkts,
405 		uint64_t pkts_mask)
406 {
407 	struct rte_port_ethdev_writer_nodrop *p =
408 		port;
409 
410 	uint64_t bsz_mask = p->bsz_mask;
411 	uint32_t tx_buf_count = p->tx_buf_count;
412 	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
413 			((pkts_mask & bsz_mask) ^ bsz_mask);
414 
415 	if (expr == 0) {
416 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
417 		uint32_t n_pkts_ok;
418 
419 		if (tx_buf_count)
420 			send_burst_nodrop(p);
421 
422 		RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
423 		n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
424 			n_pkts);
425 
426 		if (n_pkts_ok >= n_pkts)
427 			return 0;
428 
429 		/*
430 		 * If we did not manage to send all packets in single burst,
431 		 * move remaining packets to the buffer and call send burst.
432 		 */
433 		for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
434 			struct rte_mbuf *pkt = pkts[n_pkts_ok];
435 			p->tx_buf[p->tx_buf_count++] = pkt;
436 		}
437 		send_burst_nodrop(p);
438 	} else {
439 		for ( ; pkts_mask; ) {
440 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
441 			uint64_t pkt_mask = 1LLU << pkt_index;
442 			struct rte_mbuf *pkt = pkts[pkt_index];
443 
444 			p->tx_buf[tx_buf_count++] = pkt;
445 			RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
446 			pkts_mask &= ~pkt_mask;
447 		}
448 
449 		p->tx_buf_count = tx_buf_count;
450 		if (tx_buf_count >= p->tx_burst_sz)
451 			send_burst_nodrop(p);
452 	}
453 
454 	return 0;
455 }
456 
457 static int
458 rte_port_ethdev_writer_nodrop_flush(void *port)
459 {
460 	struct rte_port_ethdev_writer_nodrop *p =
461 		port;
462 
463 	if (p->tx_buf_count > 0)
464 		send_burst_nodrop(p);
465 
466 	return 0;
467 }
468 
469 static int
470 rte_port_ethdev_writer_nodrop_free(void *port)
471 {
472 	if (port == NULL) {
473 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
474 		return -EINVAL;
475 	}
476 
477 	rte_port_ethdev_writer_nodrop_flush(port);
478 	rte_free(port);
479 
480 	return 0;
481 }
482 
483 static int rte_port_ethdev_writer_nodrop_stats_read(void *port,
484 		struct rte_port_out_stats *stats, int clear)
485 {
486 	struct rte_port_ethdev_writer_nodrop *p =
487 		port;
488 
489 	if (stats != NULL)
490 		memcpy(stats, &p->stats, sizeof(p->stats));
491 
492 	if (clear)
493 		memset(&p->stats, 0, sizeof(p->stats));
494 
495 	return 0;
496 }
497 
498 /*
499  * Summary of port operations
500  */
501 struct rte_port_in_ops rte_port_ethdev_reader_ops = {
502 	.f_create = rte_port_ethdev_reader_create,
503 	.f_free = rte_port_ethdev_reader_free,
504 	.f_rx = rte_port_ethdev_reader_rx,
505 	.f_stats = rte_port_ethdev_reader_stats_read,
506 };
507 
508 struct rte_port_out_ops rte_port_ethdev_writer_ops = {
509 	.f_create = rte_port_ethdev_writer_create,
510 	.f_free = rte_port_ethdev_writer_free,
511 	.f_tx = rte_port_ethdev_writer_tx,
512 	.f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
513 	.f_flush = rte_port_ethdev_writer_flush,
514 	.f_stats = rte_port_ethdev_writer_stats_read,
515 };
516 
517 struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = {
518 	.f_create = rte_port_ethdev_writer_nodrop_create,
519 	.f_free = rte_port_ethdev_writer_nodrop_free,
520 	.f_tx = rte_port_ethdev_writer_nodrop_tx,
521 	.f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk,
522 	.f_flush = rte_port_ethdev_writer_nodrop_flush,
523 	.f_stats = rte_port_ethdev_writer_nodrop_stats_read,
524 };
525