1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4 #include <string.h>
5 #include <stdint.h>
6
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_malloc.h>
10
11 #include "rte_port_ethdev.h"
12
13 #include "port_log.h"
14
15 /*
16 * Port ETHDEV Reader
17 */
18 #ifdef RTE_PORT_STATS_COLLECT
19
20 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) \
21 port->stats.n_pkts_in += val
22 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) \
23 port->stats.n_pkts_drop += val
24
25 #else
26
27 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val)
28 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val)
29
30 #endif
31
32 struct rte_port_ethdev_reader {
33 struct rte_port_in_stats stats;
34
35 uint16_t queue_id;
36 uint16_t port_id;
37 };
38
39 static void *
rte_port_ethdev_reader_create(void * params,int socket_id)40 rte_port_ethdev_reader_create(void *params, int socket_id)
41 {
42 struct rte_port_ethdev_reader_params *conf =
43 params;
44 struct rte_port_ethdev_reader *port;
45
46 /* Check input parameters */
47 if (conf == NULL) {
48 PORT_LOG(ERR, "%s: params is NULL", __func__);
49 return NULL;
50 }
51
52 /* Memory allocation */
53 port = rte_zmalloc_socket("PORT", sizeof(*port),
54 RTE_CACHE_LINE_SIZE, socket_id);
55 if (port == NULL) {
56 PORT_LOG(ERR, "%s: Failed to allocate port", __func__);
57 return NULL;
58 }
59
60 /* Initialization */
61 port->port_id = conf->port_id;
62 port->queue_id = conf->queue_id;
63
64 return port;
65 }
66
67 static int
rte_port_ethdev_reader_rx(void * port,struct rte_mbuf ** pkts,uint32_t n_pkts)68 rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
69 {
70 struct rte_port_ethdev_reader *p =
71 port;
72 uint16_t rx_pkt_cnt;
73
74 rx_pkt_cnt = rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
75 RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
76 return rx_pkt_cnt;
77 }
78
79 static int
rte_port_ethdev_reader_free(void * port)80 rte_port_ethdev_reader_free(void *port)
81 {
82 if (port == NULL) {
83 PORT_LOG(ERR, "%s: port is NULL", __func__);
84 return -EINVAL;
85 }
86
87 rte_free(port);
88
89 return 0;
90 }
91
rte_port_ethdev_reader_stats_read(void * port,struct rte_port_in_stats * stats,int clear)92 static int rte_port_ethdev_reader_stats_read(void *port,
93 struct rte_port_in_stats *stats, int clear)
94 {
95 struct rte_port_ethdev_reader *p =
96 port;
97
98 if (stats != NULL)
99 memcpy(stats, &p->stats, sizeof(p->stats));
100
101 if (clear)
102 memset(&p->stats, 0, sizeof(p->stats));
103
104 return 0;
105 }
106
107 /*
108 * Port ETHDEV Writer
109 */
110 #ifdef RTE_PORT_STATS_COLLECT
111
112 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \
113 port->stats.n_pkts_in += val
114 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \
115 port->stats.n_pkts_drop += val
116
117 #else
118
119 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val)
120 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val)
121
122 #endif
123
124 struct rte_port_ethdev_writer {
125 struct rte_port_out_stats stats;
126
127 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
128 uint32_t tx_burst_sz;
129 uint16_t tx_buf_count;
130 uint64_t bsz_mask;
131 uint16_t queue_id;
132 uint16_t port_id;
133 };
134
135 static void *
rte_port_ethdev_writer_create(void * params,int socket_id)136 rte_port_ethdev_writer_create(void *params, int socket_id)
137 {
138 struct rte_port_ethdev_writer_params *conf =
139 params;
140 struct rte_port_ethdev_writer *port;
141
142 /* Check input parameters */
143 if ((conf == NULL) ||
144 (conf->tx_burst_sz == 0) ||
145 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
146 (!rte_is_power_of_2(conf->tx_burst_sz))) {
147 PORT_LOG(ERR, "%s: Invalid input parameters", __func__);
148 return NULL;
149 }
150
151 /* Memory allocation */
152 port = rte_zmalloc_socket("PORT", sizeof(*port),
153 RTE_CACHE_LINE_SIZE, socket_id);
154 if (port == NULL) {
155 PORT_LOG(ERR, "%s: Failed to allocate port", __func__);
156 return NULL;
157 }
158
159 /* Initialization */
160 port->port_id = conf->port_id;
161 port->queue_id = conf->queue_id;
162 port->tx_burst_sz = conf->tx_burst_sz;
163 port->tx_buf_count = 0;
164 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
165
166 return port;
167 }
168
169 static inline void
send_burst(struct rte_port_ethdev_writer * p)170 send_burst(struct rte_port_ethdev_writer *p)
171 {
172 uint32_t nb_tx;
173
174 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
175 p->tx_buf, p->tx_buf_count);
176
177 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
178 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
179 rte_pktmbuf_free(p->tx_buf[nb_tx]);
180
181 p->tx_buf_count = 0;
182 }
183
184 static int
rte_port_ethdev_writer_tx(void * port,struct rte_mbuf * pkt)185 rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
186 {
187 struct rte_port_ethdev_writer *p =
188 port;
189
190 p->tx_buf[p->tx_buf_count++] = pkt;
191 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
192 if (p->tx_buf_count >= p->tx_burst_sz)
193 send_burst(p);
194
195 return 0;
196 }
197
198 static int
rte_port_ethdev_writer_tx_bulk(void * port,struct rte_mbuf ** pkts,uint64_t pkts_mask)199 rte_port_ethdev_writer_tx_bulk(void *port,
200 struct rte_mbuf **pkts,
201 uint64_t pkts_mask)
202 {
203 struct rte_port_ethdev_writer *p =
204 port;
205 uint64_t bsz_mask = p->bsz_mask;
206 uint32_t tx_buf_count = p->tx_buf_count;
207 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
208 ((pkts_mask & bsz_mask) ^ bsz_mask);
209
210 if (expr == 0) {
211 uint64_t n_pkts = rte_popcount64(pkts_mask);
212 uint32_t n_pkts_ok;
213
214 if (tx_buf_count)
215 send_burst(p);
216
217 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
218 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
219 n_pkts);
220
221 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
222 for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
223 struct rte_mbuf *pkt = pkts[n_pkts_ok];
224
225 rte_pktmbuf_free(pkt);
226 }
227 } else {
228 for ( ; pkts_mask; ) {
229 uint32_t pkt_index = rte_ctz64(pkts_mask);
230 uint64_t pkt_mask = 1LLU << pkt_index;
231 struct rte_mbuf *pkt = pkts[pkt_index];
232
233 p->tx_buf[tx_buf_count++] = pkt;
234 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
235 pkts_mask &= ~pkt_mask;
236 }
237
238 p->tx_buf_count = tx_buf_count;
239 if (tx_buf_count >= p->tx_burst_sz)
240 send_burst(p);
241 }
242
243 return 0;
244 }
245
246 static int
rte_port_ethdev_writer_flush(void * port)247 rte_port_ethdev_writer_flush(void *port)
248 {
249 struct rte_port_ethdev_writer *p =
250 port;
251
252 if (p->tx_buf_count > 0)
253 send_burst(p);
254
255 return 0;
256 }
257
258 static int
rte_port_ethdev_writer_free(void * port)259 rte_port_ethdev_writer_free(void *port)
260 {
261 if (port == NULL) {
262 PORT_LOG(ERR, "%s: Port is NULL", __func__);
263 return -EINVAL;
264 }
265
266 rte_port_ethdev_writer_flush(port);
267 rte_free(port);
268
269 return 0;
270 }
271
rte_port_ethdev_writer_stats_read(void * port,struct rte_port_out_stats * stats,int clear)272 static int rte_port_ethdev_writer_stats_read(void *port,
273 struct rte_port_out_stats *stats, int clear)
274 {
275 struct rte_port_ethdev_writer *p =
276 port;
277
278 if (stats != NULL)
279 memcpy(stats, &p->stats, sizeof(p->stats));
280
281 if (clear)
282 memset(&p->stats, 0, sizeof(p->stats));
283
284 return 0;
285 }
286
287 /*
288 * Port ETHDEV Writer Nodrop
289 */
290 #ifdef RTE_PORT_STATS_COLLECT
291
292 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
293 port->stats.n_pkts_in += val
294 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
295 port->stats.n_pkts_drop += val
296
297 #else
298
299 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
300 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
301
302 #endif
303
304 struct rte_port_ethdev_writer_nodrop {
305 struct rte_port_out_stats stats;
306
307 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
308 uint32_t tx_burst_sz;
309 uint16_t tx_buf_count;
310 uint64_t bsz_mask;
311 uint64_t n_retries;
312 uint16_t queue_id;
313 uint16_t port_id;
314 };
315
316 static void *
rte_port_ethdev_writer_nodrop_create(void * params,int socket_id)317 rte_port_ethdev_writer_nodrop_create(void *params, int socket_id)
318 {
319 struct rte_port_ethdev_writer_nodrop_params *conf =
320 params;
321 struct rte_port_ethdev_writer_nodrop *port;
322
323 /* Check input parameters */
324 if ((conf == NULL) ||
325 (conf->tx_burst_sz == 0) ||
326 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
327 (!rte_is_power_of_2(conf->tx_burst_sz))) {
328 PORT_LOG(ERR, "%s: Invalid input parameters", __func__);
329 return NULL;
330 }
331
332 /* Memory allocation */
333 port = rte_zmalloc_socket("PORT", sizeof(*port),
334 RTE_CACHE_LINE_SIZE, socket_id);
335 if (port == NULL) {
336 PORT_LOG(ERR, "%s: Failed to allocate port", __func__);
337 return NULL;
338 }
339
340 /* Initialization */
341 port->port_id = conf->port_id;
342 port->queue_id = conf->queue_id;
343 port->tx_burst_sz = conf->tx_burst_sz;
344 port->tx_buf_count = 0;
345 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
346
347 /*
348 * When n_retries is 0 it means that we should wait for every packet to
349 * send no matter how many retries should it take. To limit number of
350 * branches in fast path, we use UINT64_MAX instead of branching.
351 */
352 port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
353
354 return port;
355 }
356
357 static inline void
send_burst_nodrop(struct rte_port_ethdev_writer_nodrop * p)358 send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
359 {
360 uint32_t nb_tx = 0, i;
361
362 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
363 p->tx_buf_count);
364
365 /* We sent all the packets in a first try */
366 if (nb_tx >= p->tx_buf_count) {
367 p->tx_buf_count = 0;
368 return;
369 }
370
371 for (i = 0; i < p->n_retries; i++) {
372 nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
373 p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);
374
375 /* We sent all the packets in more than one try */
376 if (nb_tx >= p->tx_buf_count) {
377 p->tx_buf_count = 0;
378 return;
379 }
380 }
381
382 /* We didn't send the packets in maximum allowed attempts */
383 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
384 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
385 rte_pktmbuf_free(p->tx_buf[nb_tx]);
386
387 p->tx_buf_count = 0;
388 }
389
390 static int
rte_port_ethdev_writer_nodrop_tx(void * port,struct rte_mbuf * pkt)391 rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
392 {
393 struct rte_port_ethdev_writer_nodrop *p =
394 port;
395
396 p->tx_buf[p->tx_buf_count++] = pkt;
397 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
398 if (p->tx_buf_count >= p->tx_burst_sz)
399 send_burst_nodrop(p);
400
401 return 0;
402 }
403
404 static int
rte_port_ethdev_writer_nodrop_tx_bulk(void * port,struct rte_mbuf ** pkts,uint64_t pkts_mask)405 rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
406 struct rte_mbuf **pkts,
407 uint64_t pkts_mask)
408 {
409 struct rte_port_ethdev_writer_nodrop *p =
410 port;
411
412 uint64_t bsz_mask = p->bsz_mask;
413 uint32_t tx_buf_count = p->tx_buf_count;
414 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
415 ((pkts_mask & bsz_mask) ^ bsz_mask);
416
417 if (expr == 0) {
418 uint64_t n_pkts = rte_popcount64(pkts_mask);
419 uint32_t n_pkts_ok;
420
421 if (tx_buf_count)
422 send_burst_nodrop(p);
423
424 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
425 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
426 n_pkts);
427
428 if (n_pkts_ok >= n_pkts)
429 return 0;
430
431 /*
432 * If we did not manage to send all packets in single burst,
433 * move remaining packets to the buffer and call send burst.
434 */
435 for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
436 struct rte_mbuf *pkt = pkts[n_pkts_ok];
437 p->tx_buf[p->tx_buf_count++] = pkt;
438 }
439 send_burst_nodrop(p);
440 } else {
441 for ( ; pkts_mask; ) {
442 uint32_t pkt_index = rte_ctz64(pkts_mask);
443 uint64_t pkt_mask = 1LLU << pkt_index;
444 struct rte_mbuf *pkt = pkts[pkt_index];
445
446 p->tx_buf[tx_buf_count++] = pkt;
447 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
448 pkts_mask &= ~pkt_mask;
449 }
450
451 p->tx_buf_count = tx_buf_count;
452 if (tx_buf_count >= p->tx_burst_sz)
453 send_burst_nodrop(p);
454 }
455
456 return 0;
457 }
458
459 static int
rte_port_ethdev_writer_nodrop_flush(void * port)460 rte_port_ethdev_writer_nodrop_flush(void *port)
461 {
462 struct rte_port_ethdev_writer_nodrop *p =
463 port;
464
465 if (p->tx_buf_count > 0)
466 send_burst_nodrop(p);
467
468 return 0;
469 }
470
471 static int
rte_port_ethdev_writer_nodrop_free(void * port)472 rte_port_ethdev_writer_nodrop_free(void *port)
473 {
474 if (port == NULL) {
475 PORT_LOG(ERR, "%s: Port is NULL", __func__);
476 return -EINVAL;
477 }
478
479 rte_port_ethdev_writer_nodrop_flush(port);
480 rte_free(port);
481
482 return 0;
483 }
484
rte_port_ethdev_writer_nodrop_stats_read(void * port,struct rte_port_out_stats * stats,int clear)485 static int rte_port_ethdev_writer_nodrop_stats_read(void *port,
486 struct rte_port_out_stats *stats, int clear)
487 {
488 struct rte_port_ethdev_writer_nodrop *p =
489 port;
490
491 if (stats != NULL)
492 memcpy(stats, &p->stats, sizeof(p->stats));
493
494 if (clear)
495 memset(&p->stats, 0, sizeof(p->stats));
496
497 return 0;
498 }
499
500 /*
501 * Summary of port operations
502 */
503 struct rte_port_in_ops rte_port_ethdev_reader_ops = {
504 .f_create = rte_port_ethdev_reader_create,
505 .f_free = rte_port_ethdev_reader_free,
506 .f_rx = rte_port_ethdev_reader_rx,
507 .f_stats = rte_port_ethdev_reader_stats_read,
508 };
509
510 struct rte_port_out_ops rte_port_ethdev_writer_ops = {
511 .f_create = rte_port_ethdev_writer_create,
512 .f_free = rte_port_ethdev_writer_free,
513 .f_tx = rte_port_ethdev_writer_tx,
514 .f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
515 .f_flush = rte_port_ethdev_writer_flush,
516 .f_stats = rte_port_ethdev_writer_stats_read,
517 };
518
519 struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = {
520 .f_create = rte_port_ethdev_writer_nodrop_create,
521 .f_free = rte_port_ethdev_writer_nodrop_free,
522 .f_tx = rte_port_ethdev_writer_nodrop_tx,
523 .f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk,
524 .f_flush = rte_port_ethdev_writer_nodrop_flush,
525 .f_stats = rte_port_ethdev_writer_nodrop_stats_read,
526 };
527