xref: /dpdk/lib/pdump/rte_pdump.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_pcapng.h>
15 
16 #include "rte_pdump.h"
17 
18 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
19 
20 /* Macro for printing using RTE_LOG */
21 #define PDUMP_LOG(level, fmt, args...)				\
22 	rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt,	\
23 		__func__, ## args)
24 
25 /* Used for the multi-process communication */
26 #define PDUMP_MP	"mp_pdump"
27 
28 enum pdump_operation {
29 	DISABLE = 1,
30 	ENABLE = 2
31 };
32 
33 /* Internal version number in request */
34 enum pdump_version {
35 	V1 = 1,		    /* no filtering or snap */
36 	V2 = 2,
37 };
38 
39 struct pdump_request {
40 	uint16_t ver;
41 	uint16_t op;
42 	uint32_t flags;
43 	char device[RTE_DEV_NAME_MAX_LEN];
44 	uint16_t queue;
45 	struct rte_ring *ring;
46 	struct rte_mempool *mp;
47 
48 	const struct rte_bpf_prm *prm;
49 	uint32_t snaplen;
50 };
51 
52 struct pdump_response {
53 	uint16_t ver;
54 	uint16_t res_op;
55 	int32_t err_value;
56 };
57 
58 static struct pdump_rxtx_cbs {
59 	struct rte_ring *ring;
60 	struct rte_mempool *mp;
61 	const struct rte_eth_rxtx_callback *cb;
62 	const struct rte_bpf *filter;
63 	enum pdump_version ver;
64 	uint32_t snaplen;
65 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
66 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
67 
68 
69 /*
70  * The packet capture statistics keep track of packets
71  * accepted, filtered and dropped. These are per-queue
72  * and in memory between primary and secondary processes.
73  */
74 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats";
75 static struct {
76 	struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
77 	struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
78 	const struct rte_memzone *mz;
79 } *pdump_stats;
80 
81 /* Create a clone of mbuf to be placed into ring. */
82 static void
83 pdump_copy(uint16_t port_id, uint16_t queue,
84 	   enum rte_pcapng_direction direction,
85 	   struct rte_mbuf **pkts, uint16_t nb_pkts,
86 	   const struct pdump_rxtx_cbs *cbs,
87 	   struct rte_pdump_stats *stats)
88 {
89 	unsigned int i;
90 	int ring_enq;
91 	uint16_t d_pkts = 0;
92 	struct rte_mbuf *dup_bufs[nb_pkts];
93 	uint64_t ts;
94 	struct rte_ring *ring;
95 	struct rte_mempool *mp;
96 	struct rte_mbuf *p;
97 	uint64_t rcs[nb_pkts];
98 
99 	if (cbs->filter)
100 		rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts);
101 
102 	ts = rte_get_tsc_cycles();
103 	ring = cbs->ring;
104 	mp = cbs->mp;
105 	for (i = 0; i < nb_pkts; i++) {
106 		/*
107 		 * This uses same BPF return value convention as socket filter
108 		 * and pcap_offline_filter.
109 		 * if program returns zero
110 		 * then packet doesn't match the filter (will be ignored).
111 		 */
112 		if (cbs->filter && rcs[i] == 0) {
113 			__atomic_fetch_add(&stats->filtered,
114 					   1, __ATOMIC_RELAXED);
115 			continue;
116 		}
117 
118 		/*
119 		 * If using pcapng then want to wrap packets
120 		 * otherwise a simple copy.
121 		 */
122 		if (cbs->ver == V2)
123 			p = rte_pcapng_copy(port_id, queue,
124 					    pkts[i], mp, cbs->snaplen,
125 					    ts, direction, NULL);
126 		else
127 			p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
128 
129 		if (unlikely(p == NULL))
130 			__atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED);
131 		else
132 			dup_bufs[d_pkts++] = p;
133 	}
134 
135 	__atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED);
136 
137 	ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
138 	if (unlikely(ring_enq < d_pkts)) {
139 		unsigned int drops = d_pkts - ring_enq;
140 
141 		__atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED);
142 		rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
143 	}
144 }
145 
146 static uint16_t
147 pdump_rx(uint16_t port, uint16_t queue,
148 	struct rte_mbuf **pkts, uint16_t nb_pkts,
149 	uint16_t max_pkts __rte_unused, void *user_params)
150 {
151 	const struct pdump_rxtx_cbs *cbs = user_params;
152 	struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue];
153 
154 	pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN,
155 		   pkts, nb_pkts, cbs, stats);
156 	return nb_pkts;
157 }
158 
159 static uint16_t
160 pdump_tx(uint16_t port, uint16_t queue,
161 		struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
162 {
163 	const struct pdump_rxtx_cbs *cbs = user_params;
164 	struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue];
165 
166 	pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT,
167 		   pkts, nb_pkts, cbs, stats);
168 	return nb_pkts;
169 }
170 
171 static int
172 pdump_register_rx_callbacks(enum pdump_version ver,
173 			    uint16_t end_q, uint16_t port, uint16_t queue,
174 			    struct rte_ring *ring, struct rte_mempool *mp,
175 			    struct rte_bpf *filter,
176 			    uint16_t operation, uint32_t snaplen)
177 {
178 	uint16_t qid;
179 
180 	qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
181 	for (; qid < end_q; qid++) {
182 		struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid];
183 
184 		if (operation == ENABLE) {
185 			if (cbs->cb) {
186 				PDUMP_LOG(ERR,
187 					"rx callback for port=%d queue=%d, already exists\n",
188 					port, qid);
189 				return -EEXIST;
190 			}
191 			cbs->ver = ver;
192 			cbs->ring = ring;
193 			cbs->mp = mp;
194 			cbs->snaplen = snaplen;
195 			cbs->filter = filter;
196 
197 			cbs->cb = rte_eth_add_first_rx_callback(port, qid,
198 								pdump_rx, cbs);
199 			if (cbs->cb == NULL) {
200 				PDUMP_LOG(ERR,
201 					"failed to add rx callback, errno=%d\n",
202 					rte_errno);
203 				return rte_errno;
204 			}
205 		} else if (operation == DISABLE) {
206 			int ret;
207 
208 			if (cbs->cb == NULL) {
209 				PDUMP_LOG(ERR,
210 					"no existing rx callback for port=%d queue=%d\n",
211 					port, qid);
212 				return -EINVAL;
213 			}
214 			ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
215 			if (ret < 0) {
216 				PDUMP_LOG(ERR,
217 					"failed to remove rx callback, errno=%d\n",
218 					-ret);
219 				return ret;
220 			}
221 			cbs->cb = NULL;
222 		}
223 	}
224 
225 	return 0;
226 }
227 
228 static int
229 pdump_register_tx_callbacks(enum pdump_version ver,
230 			    uint16_t end_q, uint16_t port, uint16_t queue,
231 			    struct rte_ring *ring, struct rte_mempool *mp,
232 			    struct rte_bpf *filter,
233 			    uint16_t operation, uint32_t snaplen)
234 {
235 
236 	uint16_t qid;
237 
238 	qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
239 	for (; qid < end_q; qid++) {
240 		struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid];
241 
242 		if (operation == ENABLE) {
243 			if (cbs->cb) {
244 				PDUMP_LOG(ERR,
245 					"tx callback for port=%d queue=%d, already exists\n",
246 					port, qid);
247 				return -EEXIST;
248 			}
249 			cbs->ver = ver;
250 			cbs->ring = ring;
251 			cbs->mp = mp;
252 			cbs->snaplen = snaplen;
253 			cbs->filter = filter;
254 
255 			cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
256 								cbs);
257 			if (cbs->cb == NULL) {
258 				PDUMP_LOG(ERR,
259 					"failed to add tx callback, errno=%d\n",
260 					rte_errno);
261 				return rte_errno;
262 			}
263 		} else if (operation == DISABLE) {
264 			int ret;
265 
266 			if (cbs->cb == NULL) {
267 				PDUMP_LOG(ERR,
268 					"no existing tx callback for port=%d queue=%d\n",
269 					port, qid);
270 				return -EINVAL;
271 			}
272 			ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
273 			if (ret < 0) {
274 				PDUMP_LOG(ERR,
275 					"failed to remove tx callback, errno=%d\n",
276 					-ret);
277 				return ret;
278 			}
279 			cbs->cb = NULL;
280 		}
281 	}
282 
283 	return 0;
284 }
285 
286 static int
287 set_pdump_rxtx_cbs(const struct pdump_request *p)
288 {
289 	uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
290 	uint16_t port;
291 	int ret = 0;
292 	struct rte_bpf *filter = NULL;
293 	uint32_t flags;
294 	uint16_t operation;
295 	struct rte_ring *ring;
296 	struct rte_mempool *mp;
297 
298 	/* Check for possible DPDK version mismatch */
299 	if (!(p->ver == V1 || p->ver == V2)) {
300 		PDUMP_LOG(ERR,
301 			  "incorrect client version %u\n", p->ver);
302 		return -EINVAL;
303 	}
304 
305 	if (p->prm) {
306 		if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
307 			PDUMP_LOG(ERR,
308 				  "invalid BPF program type: %u\n",
309 				  p->prm->prog_arg.type);
310 			return -EINVAL;
311 		}
312 
313 		filter = rte_bpf_load(p->prm);
314 		if (filter == NULL) {
315 			PDUMP_LOG(ERR, "cannot load BPF filter: %s\n",
316 				  rte_strerror(rte_errno));
317 			return -rte_errno;
318 		}
319 	}
320 
321 	flags = p->flags;
322 	operation = p->op;
323 	queue = p->queue;
324 	ring = p->ring;
325 	mp = p->mp;
326 
327 	ret = rte_eth_dev_get_port_by_name(p->device, &port);
328 	if (ret < 0) {
329 		PDUMP_LOG(ERR,
330 			  "failed to get port id for device id=%s\n",
331 			  p->device);
332 		return -EINVAL;
333 	}
334 
335 	/* validation if packet capture is for all queues */
336 	if (queue == RTE_PDUMP_ALL_QUEUES) {
337 		struct rte_eth_dev_info dev_info;
338 
339 		ret = rte_eth_dev_info_get(port, &dev_info);
340 		if (ret != 0) {
341 			PDUMP_LOG(ERR,
342 				"Error during getting device (port %u) info: %s\n",
343 				port, strerror(-ret));
344 			return ret;
345 		}
346 
347 		nb_rx_q = dev_info.nb_rx_queues;
348 		nb_tx_q = dev_info.nb_tx_queues;
349 		if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
350 			PDUMP_LOG(ERR,
351 				"number of rx queues cannot be 0\n");
352 			return -EINVAL;
353 		}
354 		if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
355 			PDUMP_LOG(ERR,
356 				"number of tx queues cannot be 0\n");
357 			return -EINVAL;
358 		}
359 		if ((nb_tx_q == 0 || nb_rx_q == 0) &&
360 			flags == RTE_PDUMP_FLAG_RXTX) {
361 			PDUMP_LOG(ERR,
362 				"both tx&rx queues must be non zero\n");
363 			return -EINVAL;
364 		}
365 	}
366 
367 	/* register RX callback */
368 	if (flags & RTE_PDUMP_FLAG_RX) {
369 		end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
370 		ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue,
371 						  ring, mp, filter,
372 						  operation, p->snaplen);
373 		if (ret < 0)
374 			return ret;
375 	}
376 
377 	/* register TX callback */
378 	if (flags & RTE_PDUMP_FLAG_TX) {
379 		end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
380 		ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue,
381 						  ring, mp, filter,
382 						  operation, p->snaplen);
383 		if (ret < 0)
384 			return ret;
385 	}
386 
387 	return ret;
388 }
389 
390 static int
391 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
392 {
393 	struct rte_mp_msg mp_resp;
394 	const struct pdump_request *cli_req;
395 	struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
396 
397 	/* recv client requests */
398 	if (mp_msg->len_param != sizeof(*cli_req)) {
399 		PDUMP_LOG(ERR, "failed to recv from client\n");
400 		resp->err_value = -EINVAL;
401 	} else {
402 		cli_req = (const struct pdump_request *)mp_msg->param;
403 		resp->ver = cli_req->ver;
404 		resp->res_op = cli_req->op;
405 		resp->err_value = set_pdump_rxtx_cbs(cli_req);
406 	}
407 
408 	rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
409 	mp_resp.len_param = sizeof(*resp);
410 	mp_resp.num_fds = 0;
411 	if (rte_mp_reply(&mp_resp, peer) < 0) {
412 		PDUMP_LOG(ERR, "failed to send to client:%s\n",
413 			  strerror(rte_errno));
414 		return -1;
415 	}
416 
417 	return 0;
418 }
419 
420 int
421 rte_pdump_init(void)
422 {
423 	const struct rte_memzone *mz;
424 	int ret;
425 
426 	mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
427 				 rte_socket_id(), 0);
428 	if (mz == NULL) {
429 		PDUMP_LOG(ERR, "cannot allocate pdump statistics\n");
430 		rte_errno = ENOMEM;
431 		return -1;
432 	}
433 	pdump_stats = mz->addr;
434 	pdump_stats->mz = mz;
435 
436 	ret = rte_mp_action_register(PDUMP_MP, pdump_server);
437 	if (ret && rte_errno != ENOTSUP)
438 		return -1;
439 	return 0;
440 }
441 
442 int
443 rte_pdump_uninit(void)
444 {
445 	rte_mp_action_unregister(PDUMP_MP);
446 
447 	if (pdump_stats != NULL) {
448 		rte_memzone_free(pdump_stats->mz);
449 		pdump_stats = NULL;
450 	}
451 
452 	return 0;
453 }
454 
455 static int
456 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
457 {
458 	if (ring == NULL || mp == NULL) {
459 		PDUMP_LOG(ERR, "NULL ring or mempool\n");
460 		rte_errno = EINVAL;
461 		return -1;
462 	}
463 	if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
464 	    mp->flags & RTE_MEMPOOL_F_SC_GET) {
465 		PDUMP_LOG(ERR,
466 			  "mempool with SP or SC set not valid for pdump,"
467 			  "must have MP and MC set\n");
468 		rte_errno = EINVAL;
469 		return -1;
470 	}
471 	if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
472 		PDUMP_LOG(ERR,
473 			  "ring with SP or SC set is not valid for pdump,"
474 			  "must have MP and MC set\n");
475 		rte_errno = EINVAL;
476 		return -1;
477 	}
478 
479 	return 0;
480 }
481 
482 static int
483 pdump_validate_flags(uint32_t flags)
484 {
485 	if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
486 		PDUMP_LOG(ERR,
487 			"invalid flags, should be either rx/tx/rxtx\n");
488 		rte_errno = EINVAL;
489 		return -1;
490 	}
491 
492 	/* mask off the flags we know about */
493 	if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
494 		PDUMP_LOG(ERR,
495 			  "unknown flags: %#x\n", flags);
496 		rte_errno = ENOTSUP;
497 		return -1;
498 	}
499 
500 	return 0;
501 }
502 
503 static int
504 pdump_validate_port(uint16_t port, char *name)
505 {
506 	int ret = 0;
507 
508 	if (port >= RTE_MAX_ETHPORTS) {
509 		PDUMP_LOG(ERR, "Invalid port id %u\n", port);
510 		rte_errno = EINVAL;
511 		return -1;
512 	}
513 
514 	ret = rte_eth_dev_get_name_by_port(port, name);
515 	if (ret < 0) {
516 		PDUMP_LOG(ERR, "port %u to name mapping failed\n",
517 			  port);
518 		rte_errno = EINVAL;
519 		return -1;
520 	}
521 
522 	return 0;
523 }
524 
525 static int
526 pdump_prepare_client_request(const char *device, uint16_t queue,
527 			     uint32_t flags, uint32_t snaplen,
528 			     uint16_t operation,
529 			     struct rte_ring *ring,
530 			     struct rte_mempool *mp,
531 			     const struct rte_bpf_prm *prm)
532 {
533 	int ret = -1;
534 	struct rte_mp_msg mp_req, *mp_rep;
535 	struct rte_mp_reply mp_reply;
536 	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
537 	struct pdump_request *req = (struct pdump_request *)mp_req.param;
538 	struct pdump_response *resp;
539 
540 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
541 		PDUMP_LOG(ERR,
542 			  "pdump enable/disable not allowed in primary process\n");
543 		return -EINVAL;
544 	}
545 
546 	memset(req, 0, sizeof(*req));
547 
548 	req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1;
549 	req->flags = flags & RTE_PDUMP_FLAG_RXTX;
550 	req->op = operation;
551 	req->queue = queue;
552 	rte_strscpy(req->device, device, sizeof(req->device));
553 
554 	if ((operation & ENABLE) != 0) {
555 		req->ring = ring;
556 		req->mp = mp;
557 		req->prm = prm;
558 		req->snaplen = snaplen;
559 	}
560 
561 	rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
562 	mp_req.len_param = sizeof(*req);
563 	mp_req.num_fds = 0;
564 	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
565 		mp_rep = &mp_reply.msgs[0];
566 		resp = (struct pdump_response *)mp_rep->param;
567 		rte_errno = resp->err_value;
568 		if (!resp->err_value)
569 			ret = 0;
570 		free(mp_reply.msgs);
571 	}
572 
573 	if (ret < 0)
574 		PDUMP_LOG(ERR,
575 			"client request for pdump enable/disable failed\n");
576 	return ret;
577 }
578 
579 /*
580  * There are two versions of this function, because although original API
581  * left place holder for future filter, it never checked the value.
582  * Therefore the API can't depend on application passing a non
583  * bogus value.
584  */
585 static int
586 pdump_enable(uint16_t port, uint16_t queue,
587 	     uint32_t flags, uint32_t snaplen,
588 	     struct rte_ring *ring, struct rte_mempool *mp,
589 	     const struct rte_bpf_prm *prm)
590 {
591 	int ret;
592 	char name[RTE_DEV_NAME_MAX_LEN];
593 
594 	ret = pdump_validate_port(port, name);
595 	if (ret < 0)
596 		return ret;
597 	ret = pdump_validate_ring_mp(ring, mp);
598 	if (ret < 0)
599 		return ret;
600 	ret = pdump_validate_flags(flags);
601 	if (ret < 0)
602 		return ret;
603 
604 	if (snaplen == 0)
605 		snaplen = UINT32_MAX;
606 
607 	return pdump_prepare_client_request(name, queue, flags, snaplen,
608 					    ENABLE, ring, mp, prm);
609 }
610 
611 int
612 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
613 		 struct rte_ring *ring,
614 		 struct rte_mempool *mp,
615 		 void *filter __rte_unused)
616 {
617 	return pdump_enable(port, queue, flags, 0,
618 			    ring, mp, NULL);
619 }
620 
621 int
622 rte_pdump_enable_bpf(uint16_t port, uint16_t queue,
623 		     uint32_t flags, uint32_t snaplen,
624 		     struct rte_ring *ring,
625 		     struct rte_mempool *mp,
626 		     const struct rte_bpf_prm *prm)
627 {
628 	return pdump_enable(port, queue, flags, snaplen,
629 			    ring, mp, prm);
630 }
631 
632 static int
633 pdump_enable_by_deviceid(const char *device_id, uint16_t queue,
634 			 uint32_t flags, uint32_t snaplen,
635 			 struct rte_ring *ring,
636 			 struct rte_mempool *mp,
637 			 const struct rte_bpf_prm *prm)
638 {
639 	int ret;
640 
641 	ret = pdump_validate_ring_mp(ring, mp);
642 	if (ret < 0)
643 		return ret;
644 	ret = pdump_validate_flags(flags);
645 	if (ret < 0)
646 		return ret;
647 
648 	if (snaplen == 0)
649 		snaplen = UINT32_MAX;
650 
651 	return pdump_prepare_client_request(device_id, queue, flags, snaplen,
652 					    ENABLE, ring, mp, prm);
653 }
654 
655 int
656 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
657 			     uint32_t flags,
658 			     struct rte_ring *ring,
659 			     struct rte_mempool *mp,
660 			     void *filter __rte_unused)
661 {
662 	return pdump_enable_by_deviceid(device_id, queue, flags, 0,
663 					ring, mp, NULL);
664 }
665 
666 int
667 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue,
668 				 uint32_t flags, uint32_t snaplen,
669 				 struct rte_ring *ring,
670 				 struct rte_mempool *mp,
671 				 const struct rte_bpf_prm *prm)
672 {
673 	return pdump_enable_by_deviceid(device_id, queue, flags, snaplen,
674 					ring, mp, prm);
675 }
676 
677 int
678 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
679 {
680 	int ret = 0;
681 	char name[RTE_DEV_NAME_MAX_LEN];
682 
683 	ret = pdump_validate_port(port, name);
684 	if (ret < 0)
685 		return ret;
686 	ret = pdump_validate_flags(flags);
687 	if (ret < 0)
688 		return ret;
689 
690 	ret = pdump_prepare_client_request(name, queue, flags, 0,
691 					   DISABLE, NULL, NULL, NULL);
692 
693 	return ret;
694 }
695 
696 int
697 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
698 				uint32_t flags)
699 {
700 	int ret = 0;
701 
702 	ret = pdump_validate_flags(flags);
703 	if (ret < 0)
704 		return ret;
705 
706 	ret = pdump_prepare_client_request(device_id, queue, flags, 0,
707 					   DISABLE, NULL, NULL, NULL);
708 
709 	return ret;
710 }
711 
712 static void
713 pdump_sum_stats(uint16_t port, uint16_t nq,
714 		struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
715 		struct rte_pdump_stats *total)
716 {
717 	uint64_t *sum = (uint64_t *)total;
718 	unsigned int i;
719 	uint64_t val;
720 	uint16_t qid;
721 
722 	for (qid = 0; qid < nq; qid++) {
723 		const uint64_t *perq = (const uint64_t *)&stats[port][qid];
724 
725 		for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
726 			val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED);
727 			sum[i] += val;
728 		}
729 	}
730 }
731 
732 int
733 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
734 {
735 	struct rte_eth_dev_info dev_info;
736 	const struct rte_memzone *mz;
737 	int ret;
738 
739 	memset(stats, 0, sizeof(*stats));
740 	ret = rte_eth_dev_info_get(port, &dev_info);
741 	if (ret != 0) {
742 		PDUMP_LOG(ERR,
743 			  "Error during getting device (port %u) info: %s\n",
744 			  port, strerror(-ret));
745 		return ret;
746 	}
747 
748 	if (pdump_stats == NULL) {
749 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
750 			/* rte_pdump_init was not called */
751 			PDUMP_LOG(ERR, "pdump stats not initialized\n");
752 			rte_errno = EINVAL;
753 			return -1;
754 		}
755 
756 		/* secondary process looks up the memzone */
757 		mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
758 		if (mz == NULL) {
759 			/* rte_pdump_init was not called in primary process?? */
760 			PDUMP_LOG(ERR, "can not find pdump stats\n");
761 			rte_errno = EINVAL;
762 			return -1;
763 		}
764 		pdump_stats = mz->addr;
765 	}
766 
767 	pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats);
768 	pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats);
769 	return 0;
770 }
771