1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2018 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 7 #include <rte_mbuf.h> 8 #include <rte_ethdev.h> 9 #include <rte_lcore.h> 10 #include <rte_log.h> 11 #include <rte_memzone.h> 12 #include <rte_errno.h> 13 #include <rte_string_fns.h> 14 #include <rte_pcapng.h> 15 16 #include "rte_pdump.h" 17 18 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE); 19 20 /* Macro for printing using RTE_LOG */ 21 #define PDUMP_LOG(level, fmt, args...) \ 22 rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \ 23 __func__, ## args) 24 25 /* Used for the multi-process communication */ 26 #define PDUMP_MP "mp_pdump" 27 28 enum pdump_operation { 29 DISABLE = 1, 30 ENABLE = 2 31 }; 32 33 /* Internal version number in request */ 34 enum pdump_version { 35 V1 = 1, /* no filtering or snap */ 36 V2 = 2, 37 }; 38 39 struct pdump_request { 40 uint16_t ver; 41 uint16_t op; 42 uint32_t flags; 43 char device[RTE_DEV_NAME_MAX_LEN]; 44 uint16_t queue; 45 struct rte_ring *ring; 46 struct rte_mempool *mp; 47 48 const struct rte_bpf_prm *prm; 49 uint32_t snaplen; 50 }; 51 52 struct pdump_response { 53 uint16_t ver; 54 uint16_t res_op; 55 int32_t err_value; 56 }; 57 58 static struct pdump_rxtx_cbs { 59 struct rte_ring *ring; 60 struct rte_mempool *mp; 61 const struct rte_eth_rxtx_callback *cb; 62 const struct rte_bpf *filter; 63 enum pdump_version ver; 64 uint32_t snaplen; 65 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT], 66 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT]; 67 68 69 /* 70 * The packet capture statistics keep track of packets 71 * accepted, filtered and dropped. These are per-queue 72 * and in memory between primary and secondary processes. 73 */ 74 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats"; 75 static struct { 76 struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT]; 77 struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT]; 78 const struct rte_memzone *mz; 79 } *pdump_stats; 80 81 /* Create a clone of mbuf to be placed into ring. */ 82 static void 83 pdump_copy(uint16_t port_id, uint16_t queue, 84 enum rte_pcapng_direction direction, 85 struct rte_mbuf **pkts, uint16_t nb_pkts, 86 const struct pdump_rxtx_cbs *cbs, 87 struct rte_pdump_stats *stats) 88 { 89 unsigned int i; 90 int ring_enq; 91 uint16_t d_pkts = 0; 92 struct rte_mbuf *dup_bufs[nb_pkts]; 93 uint64_t ts; 94 struct rte_ring *ring; 95 struct rte_mempool *mp; 96 struct rte_mbuf *p; 97 uint64_t rcs[nb_pkts]; 98 99 if (cbs->filter) 100 rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts); 101 102 ts = rte_get_tsc_cycles(); 103 ring = cbs->ring; 104 mp = cbs->mp; 105 for (i = 0; i < nb_pkts; i++) { 106 /* 107 * This uses same BPF return value convention as socket filter 108 * and pcap_offline_filter. 109 * if program returns zero 110 * then packet doesn't match the filter (will be ignored). 111 */ 112 if (cbs->filter && rcs[i] == 0) { 113 __atomic_fetch_add(&stats->filtered, 114 1, __ATOMIC_RELAXED); 115 continue; 116 } 117 118 /* 119 * If using pcapng then want to wrap packets 120 * otherwise a simple copy. 121 */ 122 if (cbs->ver == V2) 123 p = rte_pcapng_copy(port_id, queue, 124 pkts[i], mp, cbs->snaplen, 125 ts, direction); 126 else 127 p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen); 128 129 if (unlikely(p == NULL)) 130 __atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED); 131 else 132 dup_bufs[d_pkts++] = p; 133 } 134 135 __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED); 136 137 ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL); 138 if (unlikely(ring_enq < d_pkts)) { 139 unsigned int drops = d_pkts - ring_enq; 140 141 __atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED); 142 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops); 143 } 144 } 145 146 static uint16_t 147 pdump_rx(uint16_t port, uint16_t queue, 148 struct rte_mbuf **pkts, uint16_t nb_pkts, 149 uint16_t max_pkts __rte_unused, void *user_params) 150 { 151 const struct pdump_rxtx_cbs *cbs = user_params; 152 struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue]; 153 154 pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN, 155 pkts, nb_pkts, cbs, stats); 156 return nb_pkts; 157 } 158 159 static uint16_t 160 pdump_tx(uint16_t port, uint16_t queue, 161 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params) 162 { 163 const struct pdump_rxtx_cbs *cbs = user_params; 164 struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue]; 165 166 pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT, 167 pkts, nb_pkts, cbs, stats); 168 return nb_pkts; 169 } 170 171 static int 172 pdump_register_rx_callbacks(enum pdump_version ver, 173 uint16_t end_q, uint16_t port, uint16_t queue, 174 struct rte_ring *ring, struct rte_mempool *mp, 175 struct rte_bpf *filter, 176 uint16_t operation, uint32_t snaplen) 177 { 178 uint16_t qid; 179 180 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; 181 for (; qid < end_q; qid++) { 182 struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid]; 183 184 if (operation == ENABLE) { 185 if (cbs->cb) { 186 PDUMP_LOG(ERR, 187 "rx callback for port=%d queue=%d, already exists\n", 188 port, qid); 189 return -EEXIST; 190 } 191 cbs->ver = ver; 192 cbs->ring = ring; 193 cbs->mp = mp; 194 cbs->snaplen = snaplen; 195 cbs->filter = filter; 196 197 cbs->cb = rte_eth_add_first_rx_callback(port, qid, 198 pdump_rx, cbs); 199 if (cbs->cb == NULL) { 200 PDUMP_LOG(ERR, 201 "failed to add rx callback, errno=%d\n", 202 rte_errno); 203 return rte_errno; 204 } 205 } else if (operation == DISABLE) { 206 int ret; 207 208 if (cbs->cb == NULL) { 209 PDUMP_LOG(ERR, 210 "no existing rx callback for port=%d queue=%d\n", 211 port, qid); 212 return -EINVAL; 213 } 214 ret = rte_eth_remove_rx_callback(port, qid, cbs->cb); 215 if (ret < 0) { 216 PDUMP_LOG(ERR, 217 "failed to remove rx callback, errno=%d\n", 218 -ret); 219 return ret; 220 } 221 cbs->cb = NULL; 222 } 223 } 224 225 return 0; 226 } 227 228 static int 229 pdump_register_tx_callbacks(enum pdump_version ver, 230 uint16_t end_q, uint16_t port, uint16_t queue, 231 struct rte_ring *ring, struct rte_mempool *mp, 232 struct rte_bpf *filter, 233 uint16_t operation, uint32_t snaplen) 234 { 235 236 uint16_t qid; 237 238 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; 239 for (; qid < end_q; qid++) { 240 struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid]; 241 242 if (operation == ENABLE) { 243 if (cbs->cb) { 244 PDUMP_LOG(ERR, 245 "tx callback for port=%d queue=%d, already exists\n", 246 port, qid); 247 return -EEXIST; 248 } 249 cbs->ver = ver; 250 cbs->ring = ring; 251 cbs->mp = mp; 252 cbs->snaplen = snaplen; 253 cbs->filter = filter; 254 255 cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx, 256 cbs); 257 if (cbs->cb == NULL) { 258 PDUMP_LOG(ERR, 259 "failed to add tx callback, errno=%d\n", 260 rte_errno); 261 return rte_errno; 262 } 263 } else if (operation == DISABLE) { 264 int ret; 265 266 if (cbs->cb == NULL) { 267 PDUMP_LOG(ERR, 268 "no existing tx callback for port=%d queue=%d\n", 269 port, qid); 270 return -EINVAL; 271 } 272 ret = rte_eth_remove_tx_callback(port, qid, cbs->cb); 273 if (ret < 0) { 274 PDUMP_LOG(ERR, 275 "failed to remove tx callback, errno=%d\n", 276 -ret); 277 return ret; 278 } 279 cbs->cb = NULL; 280 } 281 } 282 283 return 0; 284 } 285 286 static int 287 set_pdump_rxtx_cbs(const struct pdump_request *p) 288 { 289 uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue; 290 uint16_t port; 291 int ret = 0; 292 struct rte_bpf *filter = NULL; 293 uint32_t flags; 294 uint16_t operation; 295 struct rte_ring *ring; 296 struct rte_mempool *mp; 297 298 /* Check for possible DPDK version mismatch */ 299 if (!(p->ver == V1 || p->ver == V2)) { 300 PDUMP_LOG(ERR, 301 "incorrect client version %u\n", p->ver); 302 return -EINVAL; 303 } 304 305 if (p->prm) { 306 if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) { 307 PDUMP_LOG(ERR, 308 "invalid BPF program type: %u\n", 309 p->prm->prog_arg.type); 310 return -EINVAL; 311 } 312 313 filter = rte_bpf_load(p->prm); 314 if (filter == NULL) { 315 PDUMP_LOG(ERR, "cannot load BPF filter: %s\n", 316 rte_strerror(rte_errno)); 317 return -rte_errno; 318 } 319 } 320 321 flags = p->flags; 322 operation = p->op; 323 queue = p->queue; 324 ring = p->ring; 325 mp = p->mp; 326 327 ret = rte_eth_dev_get_port_by_name(p->device, &port); 328 if (ret < 0) { 329 PDUMP_LOG(ERR, 330 "failed to get port id for device id=%s\n", 331 p->device); 332 return -EINVAL; 333 } 334 335 /* validation if packet capture is for all queues */ 336 if (queue == RTE_PDUMP_ALL_QUEUES) { 337 struct rte_eth_dev_info dev_info; 338 339 ret = rte_eth_dev_info_get(port, &dev_info); 340 if (ret != 0) { 341 PDUMP_LOG(ERR, 342 "Error during getting device (port %u) info: %s\n", 343 port, strerror(-ret)); 344 return ret; 345 } 346 347 nb_rx_q = dev_info.nb_rx_queues; 348 nb_tx_q = dev_info.nb_tx_queues; 349 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) { 350 PDUMP_LOG(ERR, 351 "number of rx queues cannot be 0\n"); 352 return -EINVAL; 353 } 354 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) { 355 PDUMP_LOG(ERR, 356 "number of tx queues cannot be 0\n"); 357 return -EINVAL; 358 } 359 if ((nb_tx_q == 0 || nb_rx_q == 0) && 360 flags == RTE_PDUMP_FLAG_RXTX) { 361 PDUMP_LOG(ERR, 362 "both tx&rx queues must be non zero\n"); 363 return -EINVAL; 364 } 365 } 366 367 /* register RX callback */ 368 if (flags & RTE_PDUMP_FLAG_RX) { 369 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1; 370 ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue, 371 ring, mp, filter, 372 operation, p->snaplen); 373 if (ret < 0) 374 return ret; 375 } 376 377 /* register TX callback */ 378 if (flags & RTE_PDUMP_FLAG_TX) { 379 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1; 380 ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue, 381 ring, mp, filter, 382 operation, p->snaplen); 383 if (ret < 0) 384 return ret; 385 } 386 387 return ret; 388 } 389 390 static int 391 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer) 392 { 393 struct rte_mp_msg mp_resp; 394 const struct pdump_request *cli_req; 395 struct pdump_response *resp = (struct pdump_response *)&mp_resp.param; 396 397 /* recv client requests */ 398 if (mp_msg->len_param != sizeof(*cli_req)) { 399 PDUMP_LOG(ERR, "failed to recv from client\n"); 400 resp->err_value = -EINVAL; 401 } else { 402 cli_req = (const struct pdump_request *)mp_msg->param; 403 resp->ver = cli_req->ver; 404 resp->res_op = cli_req->op; 405 resp->err_value = set_pdump_rxtx_cbs(cli_req); 406 } 407 408 rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN); 409 mp_resp.len_param = sizeof(*resp); 410 mp_resp.num_fds = 0; 411 if (rte_mp_reply(&mp_resp, peer) < 0) { 412 PDUMP_LOG(ERR, "failed to send to client:%s\n", 413 strerror(rte_errno)); 414 return -1; 415 } 416 417 return 0; 418 } 419 420 int 421 rte_pdump_init(void) 422 { 423 const struct rte_memzone *mz; 424 int ret; 425 426 mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats), 427 rte_socket_id(), 0); 428 if (mz == NULL) { 429 PDUMP_LOG(ERR, "cannot allocate pdump statistics\n"); 430 rte_errno = ENOMEM; 431 return -1; 432 } 433 pdump_stats = mz->addr; 434 pdump_stats->mz = mz; 435 436 ret = rte_mp_action_register(PDUMP_MP, pdump_server); 437 if (ret && rte_errno != ENOTSUP) 438 return -1; 439 return 0; 440 } 441 442 int 443 rte_pdump_uninit(void) 444 { 445 rte_mp_action_unregister(PDUMP_MP); 446 447 if (pdump_stats != NULL) { 448 rte_memzone_free(pdump_stats->mz); 449 pdump_stats = NULL; 450 } 451 452 return 0; 453 } 454 455 static int 456 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp) 457 { 458 if (ring == NULL || mp == NULL) { 459 PDUMP_LOG(ERR, "NULL ring or mempool\n"); 460 rte_errno = EINVAL; 461 return -1; 462 } 463 if (mp->flags & RTE_MEMPOOL_F_SP_PUT || 464 mp->flags & RTE_MEMPOOL_F_SC_GET) { 465 PDUMP_LOG(ERR, 466 "mempool with SP or SC set not valid for pdump," 467 "must have MP and MC set\n"); 468 rte_errno = EINVAL; 469 return -1; 470 } 471 if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) { 472 PDUMP_LOG(ERR, 473 "ring with SP or SC set is not valid for pdump," 474 "must have MP and MC set\n"); 475 rte_errno = EINVAL; 476 return -1; 477 } 478 479 return 0; 480 } 481 482 static int 483 pdump_validate_flags(uint32_t flags) 484 { 485 if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) { 486 PDUMP_LOG(ERR, 487 "invalid flags, should be either rx/tx/rxtx\n"); 488 rte_errno = EINVAL; 489 return -1; 490 } 491 492 /* mask off the flags we know about */ 493 if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) { 494 PDUMP_LOG(ERR, 495 "unknown flags: %#x\n", flags); 496 rte_errno = ENOTSUP; 497 return -1; 498 } 499 500 return 0; 501 } 502 503 static int 504 pdump_validate_port(uint16_t port, char *name) 505 { 506 int ret = 0; 507 508 if (port >= RTE_MAX_ETHPORTS) { 509 PDUMP_LOG(ERR, "Invalid port id %u\n", port); 510 rte_errno = EINVAL; 511 return -1; 512 } 513 514 ret = rte_eth_dev_get_name_by_port(port, name); 515 if (ret < 0) { 516 PDUMP_LOG(ERR, "port %u to name mapping failed\n", 517 port); 518 rte_errno = EINVAL; 519 return -1; 520 } 521 522 return 0; 523 } 524 525 static int 526 pdump_prepare_client_request(const char *device, uint16_t queue, 527 uint32_t flags, uint32_t snaplen, 528 uint16_t operation, 529 struct rte_ring *ring, 530 struct rte_mempool *mp, 531 const struct rte_bpf_prm *prm) 532 { 533 int ret = -1; 534 struct rte_mp_msg mp_req, *mp_rep; 535 struct rte_mp_reply mp_reply; 536 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; 537 struct pdump_request *req = (struct pdump_request *)mp_req.param; 538 struct pdump_response *resp; 539 540 memset(req, 0, sizeof(*req)); 541 542 req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1; 543 req->flags = flags & RTE_PDUMP_FLAG_RXTX; 544 req->op = operation; 545 req->queue = queue; 546 rte_strscpy(req->device, device, sizeof(req->device)); 547 548 if ((operation & ENABLE) != 0) { 549 req->ring = ring; 550 req->mp = mp; 551 req->prm = prm; 552 req->snaplen = snaplen; 553 } 554 555 rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN); 556 mp_req.len_param = sizeof(*req); 557 mp_req.num_fds = 0; 558 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) { 559 mp_rep = &mp_reply.msgs[0]; 560 resp = (struct pdump_response *)mp_rep->param; 561 rte_errno = resp->err_value; 562 if (!resp->err_value) 563 ret = 0; 564 free(mp_reply.msgs); 565 } 566 567 if (ret < 0) 568 PDUMP_LOG(ERR, 569 "client request for pdump enable/disable failed\n"); 570 return ret; 571 } 572 573 /* 574 * There are two versions of this function, because although original API 575 * left place holder for future filter, it never checked the value. 576 * Therefore the API can't depend on application passing a non 577 * bogus value. 578 */ 579 static int 580 pdump_enable(uint16_t port, uint16_t queue, 581 uint32_t flags, uint32_t snaplen, 582 struct rte_ring *ring, struct rte_mempool *mp, 583 const struct rte_bpf_prm *prm) 584 { 585 int ret; 586 char name[RTE_DEV_NAME_MAX_LEN]; 587 588 ret = pdump_validate_port(port, name); 589 if (ret < 0) 590 return ret; 591 ret = pdump_validate_ring_mp(ring, mp); 592 if (ret < 0) 593 return ret; 594 ret = pdump_validate_flags(flags); 595 if (ret < 0) 596 return ret; 597 598 if (snaplen == 0) 599 snaplen = UINT32_MAX; 600 601 return pdump_prepare_client_request(name, queue, flags, snaplen, 602 ENABLE, ring, mp, prm); 603 } 604 605 int 606 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags, 607 struct rte_ring *ring, 608 struct rte_mempool *mp, 609 void *filter __rte_unused) 610 { 611 return pdump_enable(port, queue, flags, 0, 612 ring, mp, NULL); 613 } 614 615 int 616 rte_pdump_enable_bpf(uint16_t port, uint16_t queue, 617 uint32_t flags, uint32_t snaplen, 618 struct rte_ring *ring, 619 struct rte_mempool *mp, 620 const struct rte_bpf_prm *prm) 621 { 622 return pdump_enable(port, queue, flags, snaplen, 623 ring, mp, prm); 624 } 625 626 static int 627 pdump_enable_by_deviceid(const char *device_id, uint16_t queue, 628 uint32_t flags, uint32_t snaplen, 629 struct rte_ring *ring, 630 struct rte_mempool *mp, 631 const struct rte_bpf_prm *prm) 632 { 633 int ret; 634 635 ret = pdump_validate_ring_mp(ring, mp); 636 if (ret < 0) 637 return ret; 638 ret = pdump_validate_flags(flags); 639 if (ret < 0) 640 return ret; 641 642 if (snaplen == 0) 643 snaplen = UINT32_MAX; 644 645 return pdump_prepare_client_request(device_id, queue, flags, snaplen, 646 ENABLE, ring, mp, prm); 647 } 648 649 int 650 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue, 651 uint32_t flags, 652 struct rte_ring *ring, 653 struct rte_mempool *mp, 654 void *filter __rte_unused) 655 { 656 return pdump_enable_by_deviceid(device_id, queue, flags, 0, 657 ring, mp, NULL); 658 } 659 660 int 661 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue, 662 uint32_t flags, uint32_t snaplen, 663 struct rte_ring *ring, 664 struct rte_mempool *mp, 665 const struct rte_bpf_prm *prm) 666 { 667 return pdump_enable_by_deviceid(device_id, queue, flags, snaplen, 668 ring, mp, prm); 669 } 670 671 int 672 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags) 673 { 674 int ret = 0; 675 char name[RTE_DEV_NAME_MAX_LEN]; 676 677 ret = pdump_validate_port(port, name); 678 if (ret < 0) 679 return ret; 680 ret = pdump_validate_flags(flags); 681 if (ret < 0) 682 return ret; 683 684 ret = pdump_prepare_client_request(name, queue, flags, 0, 685 DISABLE, NULL, NULL, NULL); 686 687 return ret; 688 } 689 690 int 691 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue, 692 uint32_t flags) 693 { 694 int ret = 0; 695 696 ret = pdump_validate_flags(flags); 697 if (ret < 0) 698 return ret; 699 700 ret = pdump_prepare_client_request(device_id, queue, flags, 0, 701 DISABLE, NULL, NULL, NULL); 702 703 return ret; 704 } 705 706 static void 707 pdump_sum_stats(uint16_t port, uint16_t nq, 708 struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT], 709 struct rte_pdump_stats *total) 710 { 711 uint64_t *sum = (uint64_t *)total; 712 unsigned int i; 713 uint64_t val; 714 uint16_t qid; 715 716 for (qid = 0; qid < nq; qid++) { 717 const uint64_t *perq = (const uint64_t *)&stats[port][qid]; 718 719 for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) { 720 val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED); 721 sum[i] += val; 722 } 723 } 724 } 725 726 int 727 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats) 728 { 729 struct rte_eth_dev_info dev_info; 730 const struct rte_memzone *mz; 731 int ret; 732 733 memset(stats, 0, sizeof(*stats)); 734 ret = rte_eth_dev_info_get(port, &dev_info); 735 if (ret != 0) { 736 PDUMP_LOG(ERR, 737 "Error during getting device (port %u) info: %s\n", 738 port, strerror(-ret)); 739 return ret; 740 } 741 742 if (pdump_stats == NULL) { 743 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 744 /* rte_pdump_init was not called */ 745 PDUMP_LOG(ERR, "pdump stats not initialized\n"); 746 rte_errno = EINVAL; 747 return -1; 748 } 749 750 /* secondary process looks up the memzone */ 751 mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS); 752 if (mz == NULL) { 753 /* rte_pdump_init was not called in primary process?? */ 754 PDUMP_LOG(ERR, "can not find pdump stats\n"); 755 rte_errno = EINVAL; 756 return -1; 757 } 758 pdump_stats = mz->addr; 759 } 760 761 pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats); 762 pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats); 763 return 0; 764 } 765