xref: /dpdk/lib/port/rte_port_fd.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4 #include <string.h>
5 #include <stdint.h>
6 #include <unistd.h>
7 
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 
11 #include "rte_port_fd.h"
12 
13 /*
14  * Port FD Reader
15  */
16 #ifdef RTE_PORT_STATS_COLLECT
17 
18 #define RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(port, val) \
19 	do { port->stats.n_pkts_in += val; } while (0)
20 #define RTE_PORT_FD_READER_STATS_PKTS_DROP_ADD(port, val) \
21 	do { port->stats.n_pkts_drop += val; } while (0)
22 
23 #else
24 
25 #define RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(port, val)
26 #define RTE_PORT_FD_READER_STATS_PKTS_DROP_ADD(port, val)
27 
28 #endif
29 
30 struct rte_port_fd_reader {
31 	struct rte_port_in_stats stats;
32 	int fd;
33 	uint32_t mtu;
34 	struct rte_mempool *mempool;
35 };
36 
37 static void *
38 rte_port_fd_reader_create(void *params, int socket_id)
39 {
40 	struct rte_port_fd_reader_params *conf =
41 			params;
42 	struct rte_port_fd_reader *port;
43 
44 	/* Check input parameters */
45 	if (conf == NULL) {
46 		RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
47 		return NULL;
48 	}
49 	if (conf->fd < 0) {
50 		RTE_LOG(ERR, PORT, "%s: Invalid file descriptor\n", __func__);
51 		return NULL;
52 	}
53 	if (conf->mtu == 0) {
54 		RTE_LOG(ERR, PORT, "%s: Invalid MTU\n", __func__);
55 		return NULL;
56 	}
57 	if (conf->mempool == NULL) {
58 		RTE_LOG(ERR, PORT, "%s: Invalid mempool\n", __func__);
59 		return NULL;
60 	}
61 
62 	/* Memory allocation */
63 	port = rte_zmalloc_socket("PORT", sizeof(*port),
64 			RTE_CACHE_LINE_SIZE, socket_id);
65 	if (port == NULL) {
66 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
67 		return NULL;
68 	}
69 
70 	/* Initialization */
71 	port->fd = conf->fd;
72 	port->mtu = conf->mtu;
73 	port->mempool = conf->mempool;
74 
75 	return port;
76 }
77 
78 static int
79 rte_port_fd_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
80 {
81 	struct rte_port_fd_reader *p = port;
82 	uint32_t i, j;
83 
84 	if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0)
85 		return 0;
86 
87 	for (i = 0; i < n_pkts; i++) {
88 		struct rte_mbuf *pkt = pkts[i];
89 		void *pkt_data = rte_pktmbuf_mtod(pkt, void *);
90 		ssize_t n_bytes;
91 
92 		n_bytes = read(p->fd, pkt_data, (size_t) p->mtu);
93 		if (n_bytes <= 0)
94 			break;
95 
96 		pkt->data_len = n_bytes;
97 		pkt->pkt_len = n_bytes;
98 	}
99 
100 	for (j = i; j < n_pkts; j++)
101 		rte_pktmbuf_free(pkts[j]);
102 
103 	RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(p, i);
104 
105 	return i;
106 }
107 
108 static int
109 rte_port_fd_reader_free(void *port)
110 {
111 	if (port == NULL) {
112 		RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
113 		return -EINVAL;
114 	}
115 
116 	rte_free(port);
117 
118 	return 0;
119 }
120 
121 static int rte_port_fd_reader_stats_read(void *port,
122 		struct rte_port_in_stats *stats, int clear)
123 {
124 	struct rte_port_fd_reader *p =
125 			port;
126 
127 	if (stats != NULL)
128 		memcpy(stats, &p->stats, sizeof(p->stats));
129 
130 	if (clear)
131 		memset(&p->stats, 0, sizeof(p->stats));
132 
133 	return 0;
134 }
135 
136 /*
137  * Port FD Writer
138  */
139 #ifdef RTE_PORT_STATS_COLLECT
140 
141 #define RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(port, val) \
142 	do { port->stats.n_pkts_in += val; } while (0)
143 #define RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(port, val) \
144 	do { port->stats.n_pkts_drop += val; } while (0)
145 
146 #else
147 
148 #define RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(port, val)
149 #define RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(port, val)
150 
151 #endif
152 
153 struct rte_port_fd_writer {
154 	struct rte_port_out_stats stats;
155 
156 	struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
157 	uint32_t tx_burst_sz;
158 	uint16_t tx_buf_count;
159 	uint32_t fd;
160 };
161 
162 static void *
163 rte_port_fd_writer_create(void *params, int socket_id)
164 {
165 	struct rte_port_fd_writer_params *conf =
166 		params;
167 	struct rte_port_fd_writer *port;
168 
169 	/* Check input parameters */
170 	if ((conf == NULL) ||
171 		(conf->tx_burst_sz == 0) ||
172 		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
173 		(!rte_is_power_of_2(conf->tx_burst_sz))) {
174 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
175 		return NULL;
176 	}
177 
178 	/* Memory allocation */
179 	port = rte_zmalloc_socket("PORT", sizeof(*port),
180 		RTE_CACHE_LINE_SIZE, socket_id);
181 	if (port == NULL) {
182 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
183 		return NULL;
184 	}
185 
186 	/* Initialization */
187 	port->fd = conf->fd;
188 	port->tx_burst_sz = conf->tx_burst_sz;
189 	port->tx_buf_count = 0;
190 
191 	return port;
192 }
193 
194 static inline void
195 send_burst(struct rte_port_fd_writer *p)
196 {
197 	uint32_t i;
198 
199 	for (i = 0; i < p->tx_buf_count; i++) {
200 		struct rte_mbuf *pkt = p->tx_buf[i];
201 		void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
202 		size_t n_bytes = rte_pktmbuf_data_len(pkt);
203 		ssize_t ret;
204 
205 		ret = write(p->fd, pkt_data, n_bytes);
206 		if (ret < 0)
207 			break;
208 	}
209 
210 	RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);
211 
212 	for (i = 0; i < p->tx_buf_count; i++)
213 		rte_pktmbuf_free(p->tx_buf[i]);
214 
215 	p->tx_buf_count = 0;
216 }
217 
218 static int
219 rte_port_fd_writer_tx(void *port, struct rte_mbuf *pkt)
220 {
221 	struct rte_port_fd_writer *p =
222 		port;
223 
224 	p->tx_buf[p->tx_buf_count++] = pkt;
225 	RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, 1);
226 	if (p->tx_buf_count >= p->tx_burst_sz)
227 		send_burst(p);
228 
229 	return 0;
230 }
231 
232 static int
233 rte_port_fd_writer_tx_bulk(void *port,
234 	struct rte_mbuf **pkts,
235 	uint64_t pkts_mask)
236 {
237 	struct rte_port_fd_writer *p =
238 		port;
239 	uint32_t tx_buf_count = p->tx_buf_count;
240 
241 	if ((pkts_mask & (pkts_mask + 1)) == 0) {
242 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
243 		uint32_t i;
244 
245 		for (i = 0; i < n_pkts; i++)
246 			p->tx_buf[tx_buf_count++] = pkts[i];
247 		RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
248 	} else
249 		for ( ; pkts_mask; ) {
250 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
251 			uint64_t pkt_mask = 1LLU << pkt_index;
252 			struct rte_mbuf *pkt = pkts[pkt_index];
253 
254 			p->tx_buf[tx_buf_count++] = pkt;
255 			RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, 1);
256 			pkts_mask &= ~pkt_mask;
257 		}
258 
259 	p->tx_buf_count = tx_buf_count;
260 	if (tx_buf_count >= p->tx_burst_sz)
261 		send_burst(p);
262 
263 	return 0;
264 }
265 
266 static int
267 rte_port_fd_writer_flush(void *port)
268 {
269 	struct rte_port_fd_writer *p =
270 		port;
271 
272 	if (p->tx_buf_count > 0)
273 		send_burst(p);
274 
275 	return 0;
276 }
277 
278 static int
279 rte_port_fd_writer_free(void *port)
280 {
281 	if (port == NULL) {
282 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
283 		return -EINVAL;
284 	}
285 
286 	rte_port_fd_writer_flush(port);
287 	rte_free(port);
288 
289 	return 0;
290 }
291 
292 static int rte_port_fd_writer_stats_read(void *port,
293 		struct rte_port_out_stats *stats, int clear)
294 {
295 	struct rte_port_fd_writer *p =
296 		port;
297 
298 	if (stats != NULL)
299 		memcpy(stats, &p->stats, sizeof(p->stats));
300 
301 	if (clear)
302 		memset(&p->stats, 0, sizeof(p->stats));
303 
304 	return 0;
305 }
306 
307 /*
308  * Port FD Writer Nodrop
309  */
310 #ifdef RTE_PORT_STATS_COLLECT
311 
312 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
313 	do { port->stats.n_pkts_in += val; } while (0)
314 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
315 	do { port->stats.n_pkts_drop += val; } while (0)
316 
317 #else
318 
319 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
320 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
321 
322 #endif
323 
324 struct rte_port_fd_writer_nodrop {
325 	struct rte_port_out_stats stats;
326 
327 	struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
328 	uint32_t tx_burst_sz;
329 	uint16_t tx_buf_count;
330 	uint64_t n_retries;
331 	uint32_t fd;
332 };
333 
334 static void *
335 rte_port_fd_writer_nodrop_create(void *params, int socket_id)
336 {
337 	struct rte_port_fd_writer_nodrop_params *conf =
338 			params;
339 	struct rte_port_fd_writer_nodrop *port;
340 
341 	/* Check input parameters */
342 	if ((conf == NULL) ||
343 		(conf->fd < 0) ||
344 		(conf->tx_burst_sz == 0) ||
345 		(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
346 		(!rte_is_power_of_2(conf->tx_burst_sz))) {
347 		RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
348 		return NULL;
349 	}
350 
351 	/* Memory allocation */
352 	port = rte_zmalloc_socket("PORT", sizeof(*port),
353 		RTE_CACHE_LINE_SIZE, socket_id);
354 	if (port == NULL) {
355 		RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
356 		return NULL;
357 	}
358 
359 	/* Initialization */
360 	port->fd = conf->fd;
361 	port->tx_burst_sz = conf->tx_burst_sz;
362 	port->tx_buf_count = 0;
363 
364 	/*
365 	 * When n_retries is 0 it means that we should wait for every packet to
366 	 * send no matter how many retries should it take. To limit number of
367 	 * branches in fast path, we use UINT64_MAX instead of branching.
368 	 */
369 	port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
370 
371 	return port;
372 }
373 
374 static inline void
375 send_burst_nodrop(struct rte_port_fd_writer_nodrop *p)
376 {
377 	uint64_t n_retries;
378 	uint32_t i;
379 
380 	n_retries = 0;
381 	for (i = 0; (i < p->tx_buf_count) && (n_retries < p->n_retries); i++) {
382 		struct rte_mbuf *pkt = p->tx_buf[i];
383 		void *pkt_data = rte_pktmbuf_mtod(pkt, void*);
384 		size_t n_bytes = rte_pktmbuf_data_len(pkt);
385 
386 		for ( ; n_retries < p->n_retries; n_retries++) {
387 			ssize_t ret;
388 
389 			ret = write(p->fd, pkt_data, n_bytes);
390 			if (ret)
391 				break;
392 		}
393 	}
394 
395 	RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - i);
396 
397 	for (i = 0; i < p->tx_buf_count; i++)
398 		rte_pktmbuf_free(p->tx_buf[i]);
399 
400 	p->tx_buf_count = 0;
401 }
402 
403 static int
404 rte_port_fd_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
405 {
406 	struct rte_port_fd_writer_nodrop *p =
407 		port;
408 
409 	p->tx_buf[p->tx_buf_count++] = pkt;
410 	RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
411 	if (p->tx_buf_count >= p->tx_burst_sz)
412 		send_burst_nodrop(p);
413 
414 	return 0;
415 }
416 
417 static int
418 rte_port_fd_writer_nodrop_tx_bulk(void *port,
419 	struct rte_mbuf **pkts,
420 	uint64_t pkts_mask)
421 {
422 	struct rte_port_fd_writer_nodrop *p =
423 		port;
424 	uint32_t tx_buf_count = p->tx_buf_count;
425 
426 	if ((pkts_mask & (pkts_mask + 1)) == 0) {
427 		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
428 		uint32_t i;
429 
430 		for (i = 0; i < n_pkts; i++)
431 			p->tx_buf[tx_buf_count++] = pkts[i];
432 		RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
433 	} else
434 		for ( ; pkts_mask; ) {
435 			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
436 			uint64_t pkt_mask = 1LLU << pkt_index;
437 			struct rte_mbuf *pkt = pkts[pkt_index];
438 
439 			p->tx_buf[tx_buf_count++] = pkt;
440 			RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
441 			pkts_mask &= ~pkt_mask;
442 		}
443 
444 	p->tx_buf_count = tx_buf_count;
445 	if (tx_buf_count >= p->tx_burst_sz)
446 		send_burst_nodrop(p);
447 
448 	return 0;
449 }
450 
451 static int
452 rte_port_fd_writer_nodrop_flush(void *port)
453 {
454 	struct rte_port_fd_writer_nodrop *p =
455 		port;
456 
457 	if (p->tx_buf_count > 0)
458 		send_burst_nodrop(p);
459 
460 	return 0;
461 }
462 
463 static int
464 rte_port_fd_writer_nodrop_free(void *port)
465 {
466 	if (port == NULL) {
467 		RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
468 		return -EINVAL;
469 	}
470 
471 	rte_port_fd_writer_nodrop_flush(port);
472 	rte_free(port);
473 
474 return 0;
475 }
476 
477 static int rte_port_fd_writer_nodrop_stats_read(void *port,
478 		struct rte_port_out_stats *stats, int clear)
479 {
480 	struct rte_port_fd_writer_nodrop *p =
481 		port;
482 
483 	if (stats != NULL)
484 		memcpy(stats, &p->stats, sizeof(p->stats));
485 
486 	if (clear)
487 		memset(&p->stats, 0, sizeof(p->stats));
488 
489 	return 0;
490 }
491 
492 /*
493  * Summary of port operations
494  */
495 struct rte_port_in_ops rte_port_fd_reader_ops = {
496 	.f_create = rte_port_fd_reader_create,
497 	.f_free = rte_port_fd_reader_free,
498 	.f_rx = rte_port_fd_reader_rx,
499 	.f_stats = rte_port_fd_reader_stats_read,
500 };
501 
502 struct rte_port_out_ops rte_port_fd_writer_ops = {
503 	.f_create = rte_port_fd_writer_create,
504 	.f_free = rte_port_fd_writer_free,
505 	.f_tx = rte_port_fd_writer_tx,
506 	.f_tx_bulk = rte_port_fd_writer_tx_bulk,
507 	.f_flush = rte_port_fd_writer_flush,
508 	.f_stats = rte_port_fd_writer_stats_read,
509 };
510 
511 struct rte_port_out_ops rte_port_fd_writer_nodrop_ops = {
512 	.f_create = rte_port_fd_writer_nodrop_create,
513 	.f_free = rte_port_fd_writer_nodrop_free,
514 	.f_tx = rte_port_fd_writer_nodrop_tx,
515 	.f_tx_bulk = rte_port_fd_writer_nodrop_tx_bulk,
516 	.f_flush = rte_port_fd_writer_nodrop_flush,
517 	.f_stats = rte_port_fd_writer_nodrop_stats_read,
518 };
519