1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
3 */
4
5 #include <stdlib.h>
6
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_memzone.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 #include <rte_pcapng.h>
15
16 #include "rte_pdump.h"
17
18 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
19 #define RTE_LOGTYPE_PDUMP pdump_logtype
20
21 #define PDUMP_LOG_LINE(level, ...) \
22 RTE_LOG_LINE_PREFIX(level, PDUMP, "%s(): ", __func__, __VA_ARGS__)
23
24 /* Used for the multi-process communication */
25 #define PDUMP_MP "mp_pdump"
26
27 enum pdump_operation {
28 DISABLE = 1,
29 ENABLE = 2
30 };
31
32 /* Internal version number in request */
33 enum pdump_version {
34 V1 = 1, /* no filtering or snap */
35 V2 = 2,
36 };
37
38 struct pdump_request {
39 uint16_t ver;
40 uint16_t op;
41 uint32_t flags;
42 char device[RTE_DEV_NAME_MAX_LEN];
43 uint16_t queue;
44 struct rte_ring *ring;
45 struct rte_mempool *mp;
46
47 const struct rte_bpf_prm *prm;
48 uint32_t snaplen;
49 };
50
51 struct pdump_response {
52 uint16_t ver;
53 uint16_t res_op;
54 int32_t err_value;
55 };
56
57 static struct pdump_rxtx_cbs {
58 struct rte_ring *ring;
59 struct rte_mempool *mp;
60 const struct rte_eth_rxtx_callback *cb;
61 const struct rte_bpf *filter;
62 enum pdump_version ver;
63 uint32_t snaplen;
64 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
65 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
66
67
68 /*
69 * The packet capture statistics keep track of packets
70 * accepted, filtered and dropped. These are per-queue
71 * and in memory between primary and secondary processes.
72 */
73 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats";
74 static struct {
75 struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
76 struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
77 const struct rte_memzone *mz;
78 } *pdump_stats;
79
80 /* Create a clone of mbuf to be placed into ring. */
81 static void
pdump_copy(uint16_t port_id,uint16_t queue,enum rte_pcapng_direction direction,struct rte_mbuf ** pkts,uint16_t nb_pkts,const struct pdump_rxtx_cbs * cbs,struct rte_pdump_stats * stats)82 pdump_copy(uint16_t port_id, uint16_t queue,
83 enum rte_pcapng_direction direction,
84 struct rte_mbuf **pkts, uint16_t nb_pkts,
85 const struct pdump_rxtx_cbs *cbs,
86 struct rte_pdump_stats *stats)
87 {
88 unsigned int i;
89 int ring_enq;
90 uint16_t d_pkts = 0;
91 struct rte_mbuf *dup_bufs[nb_pkts];
92 struct rte_ring *ring;
93 struct rte_mempool *mp;
94 struct rte_mbuf *p;
95 uint64_t rcs[nb_pkts];
96
97 if (cbs->filter)
98 rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts);
99
100 ring = cbs->ring;
101 mp = cbs->mp;
102 for (i = 0; i < nb_pkts; i++) {
103 /*
104 * This uses same BPF return value convention as socket filter
105 * and pcap_offline_filter.
106 * if program returns zero
107 * then packet doesn't match the filter (will be ignored).
108 */
109 if (cbs->filter && rcs[i] == 0) {
110 rte_atomic_fetch_add_explicit(&stats->filtered,
111 1, rte_memory_order_relaxed);
112 continue;
113 }
114
115 /*
116 * If using pcapng then want to wrap packets
117 * otherwise a simple copy.
118 */
119 if (cbs->ver == V2)
120 p = rte_pcapng_copy(port_id, queue,
121 pkts[i], mp, cbs->snaplen,
122 direction, NULL);
123 else
124 p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
125
126 if (unlikely(p == NULL))
127 rte_atomic_fetch_add_explicit(&stats->nombuf, 1, rte_memory_order_relaxed);
128 else
129 dup_bufs[d_pkts++] = p;
130 }
131
132 rte_atomic_fetch_add_explicit(&stats->accepted, d_pkts, rte_memory_order_relaxed);
133
134 ring_enq = rte_ring_enqueue_burst(ring, (void *)&dup_bufs[0], d_pkts, NULL);
135 if (unlikely(ring_enq < d_pkts)) {
136 unsigned int drops = d_pkts - ring_enq;
137
138 rte_atomic_fetch_add_explicit(&stats->ringfull, drops, rte_memory_order_relaxed);
139 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
140 }
141 }
142
143 static uint16_t
pdump_rx(uint16_t port,uint16_t queue,struct rte_mbuf ** pkts,uint16_t nb_pkts,uint16_t max_pkts __rte_unused,void * user_params)144 pdump_rx(uint16_t port, uint16_t queue,
145 struct rte_mbuf **pkts, uint16_t nb_pkts,
146 uint16_t max_pkts __rte_unused, void *user_params)
147 {
148 const struct pdump_rxtx_cbs *cbs = user_params;
149 struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue];
150
151 pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN,
152 pkts, nb_pkts, cbs, stats);
153 return nb_pkts;
154 }
155
156 static uint16_t
pdump_tx(uint16_t port,uint16_t queue,struct rte_mbuf ** pkts,uint16_t nb_pkts,void * user_params)157 pdump_tx(uint16_t port, uint16_t queue,
158 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
159 {
160 const struct pdump_rxtx_cbs *cbs = user_params;
161 struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue];
162
163 pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT,
164 pkts, nb_pkts, cbs, stats);
165 return nb_pkts;
166 }
167
168 static int
pdump_register_rx_callbacks(enum pdump_version ver,uint16_t end_q,uint16_t port,uint16_t queue,struct rte_ring * ring,struct rte_mempool * mp,struct rte_bpf * filter,uint16_t operation,uint32_t snaplen)169 pdump_register_rx_callbacks(enum pdump_version ver,
170 uint16_t end_q, uint16_t port, uint16_t queue,
171 struct rte_ring *ring, struct rte_mempool *mp,
172 struct rte_bpf *filter,
173 uint16_t operation, uint32_t snaplen)
174 {
175 uint16_t qid;
176
177 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
178 for (; qid < end_q; qid++) {
179 struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid];
180
181 if (operation == ENABLE) {
182 if (cbs->cb) {
183 PDUMP_LOG_LINE(ERR,
184 "rx callback for port=%d queue=%d, already exists",
185 port, qid);
186 return -EEXIST;
187 }
188 cbs->ver = ver;
189 cbs->ring = ring;
190 cbs->mp = mp;
191 cbs->snaplen = snaplen;
192 cbs->filter = filter;
193
194 cbs->cb = rte_eth_add_first_rx_callback(port, qid,
195 pdump_rx, cbs);
196 if (cbs->cb == NULL) {
197 PDUMP_LOG_LINE(ERR,
198 "failed to add rx callback, errno=%d",
199 rte_errno);
200 return rte_errno;
201 }
202 } else if (operation == DISABLE) {
203 int ret;
204
205 if (cbs->cb == NULL) {
206 PDUMP_LOG_LINE(ERR,
207 "no existing rx callback for port=%d queue=%d",
208 port, qid);
209 return -EINVAL;
210 }
211 ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
212 if (ret < 0) {
213 PDUMP_LOG_LINE(ERR,
214 "failed to remove rx callback, errno=%d",
215 -ret);
216 return ret;
217 }
218 cbs->cb = NULL;
219 }
220 }
221
222 return 0;
223 }
224
225 static int
pdump_register_tx_callbacks(enum pdump_version ver,uint16_t end_q,uint16_t port,uint16_t queue,struct rte_ring * ring,struct rte_mempool * mp,struct rte_bpf * filter,uint16_t operation,uint32_t snaplen)226 pdump_register_tx_callbacks(enum pdump_version ver,
227 uint16_t end_q, uint16_t port, uint16_t queue,
228 struct rte_ring *ring, struct rte_mempool *mp,
229 struct rte_bpf *filter,
230 uint16_t operation, uint32_t snaplen)
231 {
232
233 uint16_t qid;
234
235 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
236 for (; qid < end_q; qid++) {
237 struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid];
238
239 if (operation == ENABLE) {
240 if (cbs->cb) {
241 PDUMP_LOG_LINE(ERR,
242 "tx callback for port=%d queue=%d, already exists",
243 port, qid);
244 return -EEXIST;
245 }
246 cbs->ver = ver;
247 cbs->ring = ring;
248 cbs->mp = mp;
249 cbs->snaplen = snaplen;
250 cbs->filter = filter;
251
252 cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
253 cbs);
254 if (cbs->cb == NULL) {
255 PDUMP_LOG_LINE(ERR,
256 "failed to add tx callback, errno=%d",
257 rte_errno);
258 return rte_errno;
259 }
260 } else if (operation == DISABLE) {
261 int ret;
262
263 if (cbs->cb == NULL) {
264 PDUMP_LOG_LINE(ERR,
265 "no existing tx callback for port=%d queue=%d",
266 port, qid);
267 return -EINVAL;
268 }
269 ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
270 if (ret < 0) {
271 PDUMP_LOG_LINE(ERR,
272 "failed to remove tx callback, errno=%d",
273 -ret);
274 return ret;
275 }
276 cbs->cb = NULL;
277 }
278 }
279
280 return 0;
281 }
282
283 static int
set_pdump_rxtx_cbs(const struct pdump_request * p)284 set_pdump_rxtx_cbs(const struct pdump_request *p)
285 {
286 uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
287 uint16_t port;
288 int ret = 0;
289 struct rte_bpf *filter = NULL;
290 uint32_t flags;
291 uint16_t operation;
292 struct rte_ring *ring;
293 struct rte_mempool *mp;
294
295 /* Check for possible DPDK version mismatch */
296 if (!(p->ver == V1 || p->ver == V2)) {
297 PDUMP_LOG_LINE(ERR,
298 "incorrect client version %u", p->ver);
299 return -EINVAL;
300 }
301
302 if (p->prm) {
303 if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
304 PDUMP_LOG_LINE(ERR,
305 "invalid BPF program type: %u",
306 p->prm->prog_arg.type);
307 return -EINVAL;
308 }
309
310 filter = rte_bpf_load(p->prm);
311 if (filter == NULL) {
312 PDUMP_LOG_LINE(ERR, "cannot load BPF filter: %s",
313 rte_strerror(rte_errno));
314 return -rte_errno;
315 }
316 }
317
318 flags = p->flags;
319 operation = p->op;
320 queue = p->queue;
321 ring = p->ring;
322 mp = p->mp;
323
324 ret = rte_eth_dev_get_port_by_name(p->device, &port);
325 if (ret < 0) {
326 PDUMP_LOG_LINE(ERR,
327 "failed to get port id for device id=%s",
328 p->device);
329 return -EINVAL;
330 }
331
332 /* validation if packet capture is for all queues */
333 if (queue == RTE_PDUMP_ALL_QUEUES) {
334 struct rte_eth_dev_info dev_info;
335
336 ret = rte_eth_dev_info_get(port, &dev_info);
337 if (ret != 0) {
338 PDUMP_LOG_LINE(ERR,
339 "Error during getting device (port %u) info: %s",
340 port, strerror(-ret));
341 return ret;
342 }
343
344 nb_rx_q = dev_info.nb_rx_queues;
345 nb_tx_q = dev_info.nb_tx_queues;
346 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
347 PDUMP_LOG_LINE(ERR,
348 "number of rx queues cannot be 0");
349 return -EINVAL;
350 }
351 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
352 PDUMP_LOG_LINE(ERR,
353 "number of tx queues cannot be 0");
354 return -EINVAL;
355 }
356 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
357 flags == RTE_PDUMP_FLAG_RXTX) {
358 PDUMP_LOG_LINE(ERR,
359 "both tx&rx queues must be non zero");
360 return -EINVAL;
361 }
362 }
363
364 /* register RX callback */
365 if (flags & RTE_PDUMP_FLAG_RX) {
366 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
367 ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue,
368 ring, mp, filter,
369 operation, p->snaplen);
370 if (ret < 0)
371 return ret;
372 }
373
374 /* register TX callback */
375 if (flags & RTE_PDUMP_FLAG_TX) {
376 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
377 ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue,
378 ring, mp, filter,
379 operation, p->snaplen);
380 if (ret < 0)
381 return ret;
382 }
383
384 return ret;
385 }
386
387 static int
pdump_server(const struct rte_mp_msg * mp_msg,const void * peer)388 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
389 {
390 struct rte_mp_msg mp_resp;
391 const struct pdump_request *cli_req;
392 struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
393
394 /* recv client requests */
395 if (mp_msg->len_param != sizeof(*cli_req)) {
396 PDUMP_LOG_LINE(ERR, "failed to recv from client");
397 resp->err_value = -EINVAL;
398 } else {
399 cli_req = (const struct pdump_request *)mp_msg->param;
400 resp->ver = cli_req->ver;
401 resp->res_op = cli_req->op;
402 resp->err_value = set_pdump_rxtx_cbs(cli_req);
403 }
404
405 rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
406 mp_resp.len_param = sizeof(*resp);
407 mp_resp.num_fds = 0;
408 if (rte_mp_reply(&mp_resp, peer) < 0) {
409 PDUMP_LOG_LINE(ERR, "failed to send to client:%s",
410 strerror(rte_errno));
411 return -1;
412 }
413
414 return 0;
415 }
416
417 int
rte_pdump_init(void)418 rte_pdump_init(void)
419 {
420 const struct rte_memzone *mz;
421 int ret;
422
423 mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
424 rte_socket_id(), 0);
425 if (mz == NULL) {
426 PDUMP_LOG_LINE(ERR, "cannot allocate pdump statistics");
427 rte_errno = ENOMEM;
428 return -1;
429 }
430 pdump_stats = mz->addr;
431 pdump_stats->mz = mz;
432
433 ret = rte_mp_action_register(PDUMP_MP, pdump_server);
434 if (ret && rte_errno != ENOTSUP)
435 return -1;
436 return 0;
437 }
438
439 int
rte_pdump_uninit(void)440 rte_pdump_uninit(void)
441 {
442 rte_mp_action_unregister(PDUMP_MP);
443
444 if (pdump_stats != NULL) {
445 rte_memzone_free(pdump_stats->mz);
446 pdump_stats = NULL;
447 }
448
449 return 0;
450 }
451
452 static int
pdump_validate_ring_mp(struct rte_ring * ring,struct rte_mempool * mp)453 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
454 {
455 if (ring == NULL || mp == NULL) {
456 PDUMP_LOG_LINE(ERR, "NULL ring or mempool");
457 rte_errno = EINVAL;
458 return -1;
459 }
460 if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
461 mp->flags & RTE_MEMPOOL_F_SC_GET) {
462 PDUMP_LOG_LINE(ERR,
463 "mempool with SP or SC set not valid for pdump,"
464 "must have MP and MC set");
465 rte_errno = EINVAL;
466 return -1;
467 }
468 if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
469 PDUMP_LOG_LINE(ERR,
470 "ring with SP or SC set is not valid for pdump,"
471 "must have MP and MC set");
472 rte_errno = EINVAL;
473 return -1;
474 }
475
476 return 0;
477 }
478
479 static int
pdump_validate_flags(uint32_t flags)480 pdump_validate_flags(uint32_t flags)
481 {
482 if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
483 PDUMP_LOG_LINE(ERR,
484 "invalid flags, should be either rx/tx/rxtx");
485 rte_errno = EINVAL;
486 return -1;
487 }
488
489 /* mask off the flags we know about */
490 if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
491 PDUMP_LOG_LINE(ERR,
492 "unknown flags: %#x", flags);
493 rte_errno = ENOTSUP;
494 return -1;
495 }
496
497 return 0;
498 }
499
500 static int
pdump_validate_port(uint16_t port,char * name)501 pdump_validate_port(uint16_t port, char *name)
502 {
503 int ret = 0;
504
505 if (port >= RTE_MAX_ETHPORTS) {
506 PDUMP_LOG_LINE(ERR, "Invalid port id %u", port);
507 rte_errno = EINVAL;
508 return -1;
509 }
510
511 ret = rte_eth_dev_get_name_by_port(port, name);
512 if (ret < 0) {
513 PDUMP_LOG_LINE(ERR, "port %u to name mapping failed",
514 port);
515 rte_errno = EINVAL;
516 return -1;
517 }
518
519 return 0;
520 }
521
522 static int
pdump_prepare_client_request(const char * device,uint16_t queue,uint32_t flags,uint32_t snaplen,uint16_t operation,struct rte_ring * ring,struct rte_mempool * mp,const struct rte_bpf_prm * prm)523 pdump_prepare_client_request(const char *device, uint16_t queue,
524 uint32_t flags, uint32_t snaplen,
525 uint16_t operation,
526 struct rte_ring *ring,
527 struct rte_mempool *mp,
528 const struct rte_bpf_prm *prm)
529 {
530 int ret = -1;
531 struct rte_mp_msg mp_req, *mp_rep;
532 struct rte_mp_reply mp_reply;
533 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
534 struct pdump_request *req = (struct pdump_request *)mp_req.param;
535 struct pdump_response *resp;
536
537 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
538 PDUMP_LOG_LINE(ERR,
539 "pdump enable/disable not allowed in primary process");
540 return -EINVAL;
541 }
542
543 memset(req, 0, sizeof(*req));
544
545 req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1;
546 req->flags = flags & RTE_PDUMP_FLAG_RXTX;
547 req->op = operation;
548 req->queue = queue;
549 rte_strscpy(req->device, device, sizeof(req->device));
550
551 if ((operation & ENABLE) != 0) {
552 req->ring = ring;
553 req->mp = mp;
554 req->prm = prm;
555 req->snaplen = snaplen;
556 }
557
558 rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
559 mp_req.len_param = sizeof(*req);
560 mp_req.num_fds = 0;
561 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
562 mp_rep = &mp_reply.msgs[0];
563 resp = (struct pdump_response *)mp_rep->param;
564 if (resp->err_value == 0)
565 ret = 0;
566 else
567 rte_errno = -resp->err_value;
568 free(mp_reply.msgs);
569 }
570
571 if (ret < 0)
572 PDUMP_LOG_LINE(ERR,
573 "client request for pdump enable/disable failed");
574 return ret;
575 }
576
577 /*
578 * There are two versions of this function, because although original API
579 * left place holder for future filter, it never checked the value.
580 * Therefore the API can't depend on application passing a non
581 * bogus value.
582 */
583 static int
pdump_enable(uint16_t port,uint16_t queue,uint32_t flags,uint32_t snaplen,struct rte_ring * ring,struct rte_mempool * mp,const struct rte_bpf_prm * prm)584 pdump_enable(uint16_t port, uint16_t queue,
585 uint32_t flags, uint32_t snaplen,
586 struct rte_ring *ring, struct rte_mempool *mp,
587 const struct rte_bpf_prm *prm)
588 {
589 int ret;
590 char name[RTE_DEV_NAME_MAX_LEN];
591
592 ret = pdump_validate_port(port, name);
593 if (ret < 0)
594 return ret;
595 ret = pdump_validate_ring_mp(ring, mp);
596 if (ret < 0)
597 return ret;
598 ret = pdump_validate_flags(flags);
599 if (ret < 0)
600 return ret;
601
602 if (snaplen == 0)
603 snaplen = UINT32_MAX;
604
605 return pdump_prepare_client_request(name, queue, flags, snaplen,
606 ENABLE, ring, mp, prm);
607 }
608
609 int
rte_pdump_enable(uint16_t port,uint16_t queue,uint32_t flags,struct rte_ring * ring,struct rte_mempool * mp,void * filter __rte_unused)610 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
611 struct rte_ring *ring,
612 struct rte_mempool *mp,
613 void *filter __rte_unused)
614 {
615 return pdump_enable(port, queue, flags, 0,
616 ring, mp, NULL);
617 }
618
619 int
rte_pdump_enable_bpf(uint16_t port,uint16_t queue,uint32_t flags,uint32_t snaplen,struct rte_ring * ring,struct rte_mempool * mp,const struct rte_bpf_prm * prm)620 rte_pdump_enable_bpf(uint16_t port, uint16_t queue,
621 uint32_t flags, uint32_t snaplen,
622 struct rte_ring *ring,
623 struct rte_mempool *mp,
624 const struct rte_bpf_prm *prm)
625 {
626 return pdump_enable(port, queue, flags, snaplen,
627 ring, mp, prm);
628 }
629
630 static int
pdump_enable_by_deviceid(const char * device_id,uint16_t queue,uint32_t flags,uint32_t snaplen,struct rte_ring * ring,struct rte_mempool * mp,const struct rte_bpf_prm * prm)631 pdump_enable_by_deviceid(const char *device_id, uint16_t queue,
632 uint32_t flags, uint32_t snaplen,
633 struct rte_ring *ring,
634 struct rte_mempool *mp,
635 const struct rte_bpf_prm *prm)
636 {
637 int ret;
638
639 ret = pdump_validate_ring_mp(ring, mp);
640 if (ret < 0)
641 return ret;
642 ret = pdump_validate_flags(flags);
643 if (ret < 0)
644 return ret;
645
646 if (snaplen == 0)
647 snaplen = UINT32_MAX;
648
649 return pdump_prepare_client_request(device_id, queue, flags, snaplen,
650 ENABLE, ring, mp, prm);
651 }
652
653 int
rte_pdump_enable_by_deviceid(char * device_id,uint16_t queue,uint32_t flags,struct rte_ring * ring,struct rte_mempool * mp,void * filter __rte_unused)654 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
655 uint32_t flags,
656 struct rte_ring *ring,
657 struct rte_mempool *mp,
658 void *filter __rte_unused)
659 {
660 return pdump_enable_by_deviceid(device_id, queue, flags, 0,
661 ring, mp, NULL);
662 }
663
664 int
rte_pdump_enable_bpf_by_deviceid(const char * device_id,uint16_t queue,uint32_t flags,uint32_t snaplen,struct rte_ring * ring,struct rte_mempool * mp,const struct rte_bpf_prm * prm)665 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue,
666 uint32_t flags, uint32_t snaplen,
667 struct rte_ring *ring,
668 struct rte_mempool *mp,
669 const struct rte_bpf_prm *prm)
670 {
671 return pdump_enable_by_deviceid(device_id, queue, flags, snaplen,
672 ring, mp, prm);
673 }
674
675 int
rte_pdump_disable(uint16_t port,uint16_t queue,uint32_t flags)676 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
677 {
678 int ret = 0;
679 char name[RTE_DEV_NAME_MAX_LEN];
680
681 ret = pdump_validate_port(port, name);
682 if (ret < 0)
683 return ret;
684 ret = pdump_validate_flags(flags);
685 if (ret < 0)
686 return ret;
687
688 ret = pdump_prepare_client_request(name, queue, flags, 0,
689 DISABLE, NULL, NULL, NULL);
690
691 return ret;
692 }
693
694 int
rte_pdump_disable_by_deviceid(char * device_id,uint16_t queue,uint32_t flags)695 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
696 uint32_t flags)
697 {
698 int ret = 0;
699
700 ret = pdump_validate_flags(flags);
701 if (ret < 0)
702 return ret;
703
704 ret = pdump_prepare_client_request(device_id, queue, flags, 0,
705 DISABLE, NULL, NULL, NULL);
706
707 return ret;
708 }
709
710 static void
pdump_sum_stats(uint16_t port,uint16_t nq,struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],struct rte_pdump_stats * total)711 pdump_sum_stats(uint16_t port, uint16_t nq,
712 struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
713 struct rte_pdump_stats *total)
714 {
715 uint64_t *sum = (uint64_t *)total;
716 unsigned int i;
717 uint64_t val;
718 uint16_t qid;
719
720 for (qid = 0; qid < nq; qid++) {
721 const RTE_ATOMIC(uint64_t) *perq = (const uint64_t __rte_atomic *)&stats[port][qid];
722
723 for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
724 val = rte_atomic_load_explicit(&perq[i], rte_memory_order_relaxed);
725 sum[i] += val;
726 }
727 }
728 }
729
730 int
rte_pdump_stats(uint16_t port,struct rte_pdump_stats * stats)731 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
732 {
733 struct rte_eth_dev_info dev_info;
734 const struct rte_memzone *mz;
735 int ret;
736
737 memset(stats, 0, sizeof(*stats));
738 ret = rte_eth_dev_info_get(port, &dev_info);
739 if (ret != 0) {
740 PDUMP_LOG_LINE(ERR,
741 "Error during getting device (port %u) info: %s",
742 port, strerror(-ret));
743 return ret;
744 }
745
746 if (pdump_stats == NULL) {
747 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
748 /* rte_pdump_init was not called */
749 PDUMP_LOG_LINE(ERR, "pdump stats not initialized");
750 rte_errno = EINVAL;
751 return -1;
752 }
753
754 /* secondary process looks up the memzone */
755 mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
756 if (mz == NULL) {
757 /* rte_pdump_init was not called in primary process?? */
758 PDUMP_LOG_LINE(ERR, "can not find pdump stats");
759 rte_errno = EINVAL;
760 return -1;
761 }
762 pdump_stats = mz->addr;
763 }
764
765 pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats);
766 pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats);
767 return 0;
768 }
769