1436c089aSDmitry Kozlyuk /* SPDX-License-Identifier: BSD-3-Clause 2436c089aSDmitry Kozlyuk * Copyright(c) 2010-2016 Intel Corporation. 3436c089aSDmitry Kozlyuk * Copyright(c) 2014 6WIND S.A. 4436c089aSDmitry Kozlyuk * All rights reserved. 5436c089aSDmitry Kozlyuk */ 6436c089aSDmitry Kozlyuk 772b452c5SDmitry Kozlyuk #include <stdlib.h> 8436c089aSDmitry Kozlyuk #include <time.h> 9436c089aSDmitry Kozlyuk 10436c089aSDmitry Kozlyuk #include <pcap.h> 11436c089aSDmitry Kozlyuk 12436c089aSDmitry Kozlyuk #include <rte_cycles.h> 13436c089aSDmitry Kozlyuk #include <ethdev_driver.h> 14436c089aSDmitry Kozlyuk #include <ethdev_vdev.h> 15436c089aSDmitry Kozlyuk #include <rte_kvargs.h> 16436c089aSDmitry Kozlyuk #include <rte_malloc.h> 17436c089aSDmitry Kozlyuk #include <rte_mbuf.h> 18436c089aSDmitry Kozlyuk #include <rte_mbuf_dyn.h> 194851ef2bSDavid Marchand #include <bus_vdev_driver.h> 20b5674be4SDmitry Kozlyuk #include <rte_os_shim.h> 21436c089aSDmitry Kozlyuk 22436c089aSDmitry Kozlyuk #include "pcap_osdep.h" 23436c089aSDmitry Kozlyuk 24436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535 25436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN 26436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_PROMISC 1 27436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_TIMEOUT -1 28436c089aSDmitry Kozlyuk 29436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_PCAP_ARG "rx_pcap" 30436c089aSDmitry Kozlyuk #define ETH_PCAP_TX_PCAP_ARG "tx_pcap" 31436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_IFACE_ARG "rx_iface" 32436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in" 33436c089aSDmitry Kozlyuk #define ETH_PCAP_TX_IFACE_ARG "tx_iface" 34436c089aSDmitry Kozlyuk #define ETH_PCAP_IFACE_ARG "iface" 35436c089aSDmitry Kozlyuk #define ETH_PCAP_PHY_MAC_ARG "phy_mac" 36436c089aSDmitry Kozlyuk #define ETH_PCAP_INFINITE_RX_ARG "infinite_rx" 37436c089aSDmitry Kozlyuk 38436c089aSDmitry Kozlyuk #define ETH_PCAP_ARG_MAXLEN 64 39436c089aSDmitry Kozlyuk 40436c089aSDmitry Kozlyuk #define RTE_PMD_PCAP_MAX_QUEUES 16 41436c089aSDmitry Kozlyuk 42436c089aSDmitry Kozlyuk static char errbuf[PCAP_ERRBUF_SIZE]; 43436c089aSDmitry Kozlyuk static struct timespec start_time; 44436c089aSDmitry Kozlyuk static uint64_t start_cycles; 45436c089aSDmitry Kozlyuk static uint64_t hz; 46436c089aSDmitry Kozlyuk static uint8_t iface_idx; 47436c089aSDmitry Kozlyuk 48436c089aSDmitry Kozlyuk static uint64_t timestamp_rx_dynflag; 49436c089aSDmitry Kozlyuk static int timestamp_dynfield_offset = -1; 50436c089aSDmitry Kozlyuk 51436c089aSDmitry Kozlyuk struct queue_stat { 52436c089aSDmitry Kozlyuk volatile unsigned long pkts; 53436c089aSDmitry Kozlyuk volatile unsigned long bytes; 54436c089aSDmitry Kozlyuk volatile unsigned long err_pkts; 55d70157c1SQiming Chen volatile unsigned long rx_nombuf; 56436c089aSDmitry Kozlyuk }; 57436c089aSDmitry Kozlyuk 58436c089aSDmitry Kozlyuk struct queue_missed_stat { 59436c089aSDmitry Kozlyuk /* last value retrieved from pcap */ 60436c089aSDmitry Kozlyuk unsigned int pcap; 61436c089aSDmitry Kozlyuk /* stores values lost by pcap stop or rollover */ 62436c089aSDmitry Kozlyuk unsigned long mnemonic; 63436c089aSDmitry Kozlyuk /* value on last reset */ 64436c089aSDmitry Kozlyuk unsigned long reset; 65436c089aSDmitry Kozlyuk }; 66436c089aSDmitry Kozlyuk 67436c089aSDmitry Kozlyuk struct pcap_rx_queue { 68436c089aSDmitry Kozlyuk uint16_t port_id; 69436c089aSDmitry Kozlyuk uint16_t queue_id; 70436c089aSDmitry Kozlyuk struct rte_mempool *mb_pool; 71436c089aSDmitry Kozlyuk struct queue_stat rx_stat; 72436c089aSDmitry Kozlyuk struct queue_missed_stat missed_stat; 73436c089aSDmitry Kozlyuk char name[PATH_MAX]; 74436c089aSDmitry Kozlyuk char type[ETH_PCAP_ARG_MAXLEN]; 75436c089aSDmitry Kozlyuk 76436c089aSDmitry Kozlyuk /* Contains pre-generated packets to be looped through */ 77436c089aSDmitry Kozlyuk struct rte_ring *pkts; 78436c089aSDmitry Kozlyuk }; 79436c089aSDmitry Kozlyuk 80436c089aSDmitry Kozlyuk struct pcap_tx_queue { 81436c089aSDmitry Kozlyuk uint16_t port_id; 82436c089aSDmitry Kozlyuk uint16_t queue_id; 83436c089aSDmitry Kozlyuk struct queue_stat tx_stat; 84436c089aSDmitry Kozlyuk char name[PATH_MAX]; 85436c089aSDmitry Kozlyuk char type[ETH_PCAP_ARG_MAXLEN]; 86436c089aSDmitry Kozlyuk }; 87436c089aSDmitry Kozlyuk 88436c089aSDmitry Kozlyuk struct pmd_internals { 89436c089aSDmitry Kozlyuk struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES]; 90436c089aSDmitry Kozlyuk struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES]; 91436c089aSDmitry Kozlyuk char devargs[ETH_PCAP_ARG_MAXLEN]; 92436c089aSDmitry Kozlyuk struct rte_ether_addr eth_addr; 93436c089aSDmitry Kozlyuk int if_index; 94436c089aSDmitry Kozlyuk int single_iface; 95436c089aSDmitry Kozlyuk int phy_mac; 96436c089aSDmitry Kozlyuk unsigned int infinite_rx; 97436c089aSDmitry Kozlyuk }; 98436c089aSDmitry Kozlyuk 99436c089aSDmitry Kozlyuk struct pmd_process_private { 100436c089aSDmitry Kozlyuk pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; 101436c089aSDmitry Kozlyuk pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; 102436c089aSDmitry Kozlyuk pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES]; 103436c089aSDmitry Kozlyuk }; 104436c089aSDmitry Kozlyuk 105436c089aSDmitry Kozlyuk struct pmd_devargs { 106436c089aSDmitry Kozlyuk unsigned int num_of_queue; 107436c089aSDmitry Kozlyuk struct devargs_queue { 108436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 109436c089aSDmitry Kozlyuk pcap_t *pcap; 110436c089aSDmitry Kozlyuk const char *name; 111436c089aSDmitry Kozlyuk const char *type; 112436c089aSDmitry Kozlyuk } queue[RTE_PMD_PCAP_MAX_QUEUES]; 113436c089aSDmitry Kozlyuk int phy_mac; 114436c089aSDmitry Kozlyuk }; 115436c089aSDmitry Kozlyuk 116436c089aSDmitry Kozlyuk struct pmd_devargs_all { 117436c089aSDmitry Kozlyuk struct pmd_devargs rx_queues; 118436c089aSDmitry Kozlyuk struct pmd_devargs tx_queues; 119436c089aSDmitry Kozlyuk int single_iface; 120436c089aSDmitry Kozlyuk unsigned int is_tx_pcap; 121436c089aSDmitry Kozlyuk unsigned int is_tx_iface; 122436c089aSDmitry Kozlyuk unsigned int is_rx_pcap; 123436c089aSDmitry Kozlyuk unsigned int is_rx_iface; 124436c089aSDmitry Kozlyuk unsigned int infinite_rx; 125436c089aSDmitry Kozlyuk }; 126436c089aSDmitry Kozlyuk 127436c089aSDmitry Kozlyuk static const char *valid_arguments[] = { 128436c089aSDmitry Kozlyuk ETH_PCAP_RX_PCAP_ARG, 129436c089aSDmitry Kozlyuk ETH_PCAP_TX_PCAP_ARG, 130436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_ARG, 131436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_IN_ARG, 132436c089aSDmitry Kozlyuk ETH_PCAP_TX_IFACE_ARG, 133436c089aSDmitry Kozlyuk ETH_PCAP_IFACE_ARG, 134436c089aSDmitry Kozlyuk ETH_PCAP_PHY_MAC_ARG, 135436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG, 136436c089aSDmitry Kozlyuk NULL 137436c089aSDmitry Kozlyuk }; 138436c089aSDmitry Kozlyuk 139436c089aSDmitry Kozlyuk static struct rte_eth_link pmd_link = { 140295968d1SFerruh Yigit .link_speed = RTE_ETH_SPEED_NUM_10G, 141295968d1SFerruh Yigit .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, 142295968d1SFerruh Yigit .link_status = RTE_ETH_LINK_DOWN, 143295968d1SFerruh Yigit .link_autoneg = RTE_ETH_LINK_FIXED, 144436c089aSDmitry Kozlyuk }; 145436c089aSDmitry Kozlyuk 146eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE); 147436c089aSDmitry Kozlyuk 148436c089aSDmitry Kozlyuk static struct queue_missed_stat* 149436c089aSDmitry Kozlyuk queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid) 150436c089aSDmitry Kozlyuk { 151436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 152436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 153436c089aSDmitry Kozlyuk &internals->rx_queue[qid].missed_stat; 154436c089aSDmitry Kozlyuk const struct pmd_process_private *pp = dev->process_private; 155436c089aSDmitry Kozlyuk pcap_t *pcap = pp->rx_pcap[qid]; 156436c089aSDmitry Kozlyuk struct pcap_stat stat; 157436c089aSDmitry Kozlyuk 158436c089aSDmitry Kozlyuk if (!pcap || (pcap_stats(pcap, &stat) != 0)) 159436c089aSDmitry Kozlyuk return missed_stat; 160436c089aSDmitry Kozlyuk 161436c089aSDmitry Kozlyuk /* rollover check - best effort fixup assuming single rollover */ 162436c089aSDmitry Kozlyuk if (stat.ps_drop < missed_stat->pcap) 163436c089aSDmitry Kozlyuk missed_stat->mnemonic += UINT_MAX; 164436c089aSDmitry Kozlyuk missed_stat->pcap = stat.ps_drop; 165436c089aSDmitry Kozlyuk 166436c089aSDmitry Kozlyuk return missed_stat; 167436c089aSDmitry Kozlyuk } 168436c089aSDmitry Kozlyuk 169436c089aSDmitry Kozlyuk static void 170436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid) 171436c089aSDmitry Kozlyuk { 172436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 173436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 174436c089aSDmitry Kozlyuk 175436c089aSDmitry Kozlyuk missed_stat->mnemonic += missed_stat->pcap; 176436c089aSDmitry Kozlyuk missed_stat->pcap = 0; 177436c089aSDmitry Kozlyuk } 178436c089aSDmitry Kozlyuk 179436c089aSDmitry Kozlyuk static void 180436c089aSDmitry Kozlyuk queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid) 181436c089aSDmitry Kozlyuk { 182436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 183436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 184436c089aSDmitry Kozlyuk 185436c089aSDmitry Kozlyuk missed_stat->reset = missed_stat->pcap; 186436c089aSDmitry Kozlyuk missed_stat->mnemonic = 0; 187436c089aSDmitry Kozlyuk } 188436c089aSDmitry Kozlyuk 189436c089aSDmitry Kozlyuk static unsigned long 190436c089aSDmitry Kozlyuk queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid) 191436c089aSDmitry Kozlyuk { 192436c089aSDmitry Kozlyuk const struct queue_missed_stat *missed_stat = 193436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 194436c089aSDmitry Kozlyuk 195436c089aSDmitry Kozlyuk return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset; 196436c089aSDmitry Kozlyuk } 197436c089aSDmitry Kozlyuk 198436c089aSDmitry Kozlyuk static int 199436c089aSDmitry Kozlyuk eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, 200436c089aSDmitry Kozlyuk const u_char *data, uint16_t data_len) 201436c089aSDmitry Kozlyuk { 202436c089aSDmitry Kozlyuk /* Copy the first segment. */ 203436c089aSDmitry Kozlyuk uint16_t len = rte_pktmbuf_tailroom(mbuf); 204436c089aSDmitry Kozlyuk struct rte_mbuf *m = mbuf; 205436c089aSDmitry Kozlyuk 206436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len); 207436c089aSDmitry Kozlyuk data_len -= len; 208436c089aSDmitry Kozlyuk data += len; 209436c089aSDmitry Kozlyuk 210436c089aSDmitry Kozlyuk while (data_len > 0) { 211436c089aSDmitry Kozlyuk /* Allocate next mbuf and point to that. */ 212436c089aSDmitry Kozlyuk m->next = rte_pktmbuf_alloc(mb_pool); 213436c089aSDmitry Kozlyuk 214436c089aSDmitry Kozlyuk if (unlikely(!m->next)) 215436c089aSDmitry Kozlyuk return -1; 216436c089aSDmitry Kozlyuk 217436c089aSDmitry Kozlyuk m = m->next; 218436c089aSDmitry Kozlyuk 219436c089aSDmitry Kozlyuk /* Headroom is not needed in chained mbufs. */ 220436c089aSDmitry Kozlyuk rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m)); 221436c089aSDmitry Kozlyuk m->pkt_len = 0; 222436c089aSDmitry Kozlyuk m->data_len = 0; 223436c089aSDmitry Kozlyuk 224436c089aSDmitry Kozlyuk /* Copy next segment. */ 225436c089aSDmitry Kozlyuk len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len); 226436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_append(m, len), data, len); 227436c089aSDmitry Kozlyuk 228436c089aSDmitry Kozlyuk mbuf->nb_segs++; 229436c089aSDmitry Kozlyuk data_len -= len; 230436c089aSDmitry Kozlyuk data += len; 231436c089aSDmitry Kozlyuk } 232436c089aSDmitry Kozlyuk 233436c089aSDmitry Kozlyuk return mbuf->nb_segs; 234436c089aSDmitry Kozlyuk } 235436c089aSDmitry Kozlyuk 236436c089aSDmitry Kozlyuk static uint16_t 237436c089aSDmitry Kozlyuk eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 238436c089aSDmitry Kozlyuk { 239436c089aSDmitry Kozlyuk int i; 240436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = queue; 241436c089aSDmitry Kozlyuk uint32_t rx_bytes = 0; 242436c089aSDmitry Kozlyuk 243436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0)) 244436c089aSDmitry Kozlyuk return 0; 245436c089aSDmitry Kozlyuk 246436c089aSDmitry Kozlyuk if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0) 247436c089aSDmitry Kozlyuk return 0; 248436c089aSDmitry Kozlyuk 249436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 250436c089aSDmitry Kozlyuk struct rte_mbuf *pcap_buf; 251436c089aSDmitry Kozlyuk int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf); 252436c089aSDmitry Kozlyuk if (err) 253436c089aSDmitry Kozlyuk return i; 254436c089aSDmitry Kozlyuk 255436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), 256436c089aSDmitry Kozlyuk rte_pktmbuf_mtod(pcap_buf, void *), 257436c089aSDmitry Kozlyuk pcap_buf->data_len); 258436c089aSDmitry Kozlyuk bufs[i]->data_len = pcap_buf->data_len; 259436c089aSDmitry Kozlyuk bufs[i]->pkt_len = pcap_buf->pkt_len; 260436c089aSDmitry Kozlyuk bufs[i]->port = pcap_q->port_id; 261436c089aSDmitry Kozlyuk rx_bytes += pcap_buf->data_len; 262436c089aSDmitry Kozlyuk 263436c089aSDmitry Kozlyuk /* Enqueue packet back on ring to allow infinite rx. */ 264436c089aSDmitry Kozlyuk rte_ring_enqueue(pcap_q->pkts, pcap_buf); 265436c089aSDmitry Kozlyuk } 266436c089aSDmitry Kozlyuk 267436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts += i; 268436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes += rx_bytes; 269436c089aSDmitry Kozlyuk 270436c089aSDmitry Kozlyuk return i; 271436c089aSDmitry Kozlyuk } 272436c089aSDmitry Kozlyuk 273436c089aSDmitry Kozlyuk static uint16_t 274436c089aSDmitry Kozlyuk eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 275436c089aSDmitry Kozlyuk { 276436c089aSDmitry Kozlyuk unsigned int i; 277*f5ead8f8SStephen Hemminger struct pcap_pkthdr *header; 278436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 279436c089aSDmitry Kozlyuk const u_char *packet; 280436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 281436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = queue; 282436c089aSDmitry Kozlyuk uint16_t num_rx = 0; 283436c089aSDmitry Kozlyuk uint32_t rx_bytes = 0; 284436c089aSDmitry Kozlyuk pcap_t *pcap; 285436c089aSDmitry Kozlyuk 286436c089aSDmitry Kozlyuk pp = rte_eth_devices[pcap_q->port_id].process_private; 287436c089aSDmitry Kozlyuk pcap = pp->rx_pcap[pcap_q->queue_id]; 288436c089aSDmitry Kozlyuk 289436c089aSDmitry Kozlyuk if (unlikely(pcap == NULL || nb_pkts == 0)) 290436c089aSDmitry Kozlyuk return 0; 291436c089aSDmitry Kozlyuk 292436c089aSDmitry Kozlyuk /* Reads the given number of packets from the pcap file one by one 293436c089aSDmitry Kozlyuk * and copies the packet data into a newly allocated mbuf to return. 294436c089aSDmitry Kozlyuk */ 295436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 296436c089aSDmitry Kozlyuk /* Get the next PCAP packet */ 297*f5ead8f8SStephen Hemminger int ret = pcap_next_ex(pcap, &header, &packet); 298*f5ead8f8SStephen Hemminger if (ret != 1) { 299*f5ead8f8SStephen Hemminger if (ret == PCAP_ERROR) 300*f5ead8f8SStephen Hemminger pcap_q->rx_stat.err_pkts++; 301*f5ead8f8SStephen Hemminger 302436c089aSDmitry Kozlyuk break; 303*f5ead8f8SStephen Hemminger } 304436c089aSDmitry Kozlyuk 305436c089aSDmitry Kozlyuk mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); 306d70157c1SQiming Chen if (unlikely(mbuf == NULL)) { 307d70157c1SQiming Chen pcap_q->rx_stat.rx_nombuf++; 308436c089aSDmitry Kozlyuk break; 309d70157c1SQiming Chen } 310436c089aSDmitry Kozlyuk 311*f5ead8f8SStephen Hemminger uint32_t len = header->caplen; 312*f5ead8f8SStephen Hemminger if (len <= rte_pktmbuf_tailroom(mbuf)) { 313436c089aSDmitry Kozlyuk /* pcap packet will fit in the mbuf, can copy it */ 314*f5ead8f8SStephen Hemminger rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, len); 315*f5ead8f8SStephen Hemminger mbuf->data_len = len; 316436c089aSDmitry Kozlyuk } else { 317436c089aSDmitry Kozlyuk /* Try read jumbo frame into multi mbufs. */ 318436c089aSDmitry Kozlyuk if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, 319*f5ead8f8SStephen Hemminger mbuf, packet, len) == -1)) { 320d70157c1SQiming Chen pcap_q->rx_stat.err_pkts++; 321436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 322436c089aSDmitry Kozlyuk break; 323436c089aSDmitry Kozlyuk } 324436c089aSDmitry Kozlyuk } 325436c089aSDmitry Kozlyuk 326*f5ead8f8SStephen Hemminger mbuf->pkt_len = len; 327*f5ead8f8SStephen Hemminger uint64_t us = (uint64_t)header->ts.tv_sec * US_PER_S + header->ts.tv_usec; 328*f5ead8f8SStephen Hemminger 329*f5ead8f8SStephen Hemminger *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = us; 330436c089aSDmitry Kozlyuk mbuf->ol_flags |= timestamp_rx_dynflag; 331436c089aSDmitry Kozlyuk mbuf->port = pcap_q->port_id; 332436c089aSDmitry Kozlyuk bufs[num_rx] = mbuf; 333436c089aSDmitry Kozlyuk num_rx++; 334*f5ead8f8SStephen Hemminger rx_bytes += len; 335436c089aSDmitry Kozlyuk } 336436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts += num_rx; 337436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes += rx_bytes; 338436c089aSDmitry Kozlyuk 339436c089aSDmitry Kozlyuk return num_rx; 340436c089aSDmitry Kozlyuk } 341436c089aSDmitry Kozlyuk 342436c089aSDmitry Kozlyuk static uint16_t 343436c089aSDmitry Kozlyuk eth_null_rx(void *queue __rte_unused, 344436c089aSDmitry Kozlyuk struct rte_mbuf **bufs __rte_unused, 345436c089aSDmitry Kozlyuk uint16_t nb_pkts __rte_unused) 346436c089aSDmitry Kozlyuk { 347436c089aSDmitry Kozlyuk return 0; 348436c089aSDmitry Kozlyuk } 349436c089aSDmitry Kozlyuk 350436c089aSDmitry Kozlyuk #define NSEC_PER_SEC 1000000000L 351436c089aSDmitry Kozlyuk 352436c089aSDmitry Kozlyuk /* 353436c089aSDmitry Kozlyuk * This function stores nanoseconds in `tv_usec` field of `struct timeval`, 354436c089aSDmitry Kozlyuk * because `ts` goes directly to nanosecond-precision dump. 355436c089aSDmitry Kozlyuk */ 356436c089aSDmitry Kozlyuk static inline void 357436c089aSDmitry Kozlyuk calculate_timestamp(struct timeval *ts) { 358436c089aSDmitry Kozlyuk uint64_t cycles; 359436c089aSDmitry Kozlyuk struct timespec cur_time; 360436c089aSDmitry Kozlyuk 361436c089aSDmitry Kozlyuk cycles = rte_get_timer_cycles() - start_cycles; 362436c089aSDmitry Kozlyuk cur_time.tv_sec = cycles / hz; 363436c089aSDmitry Kozlyuk cur_time.tv_nsec = (cycles % hz) * NSEC_PER_SEC / hz; 364436c089aSDmitry Kozlyuk 365436c089aSDmitry Kozlyuk ts->tv_sec = start_time.tv_sec + cur_time.tv_sec; 366436c089aSDmitry Kozlyuk ts->tv_usec = start_time.tv_nsec + cur_time.tv_nsec; 367436c089aSDmitry Kozlyuk if (ts->tv_usec >= NSEC_PER_SEC) { 368436c089aSDmitry Kozlyuk ts->tv_usec -= NSEC_PER_SEC; 369436c089aSDmitry Kozlyuk ts->tv_sec += 1; 370436c089aSDmitry Kozlyuk } 371436c089aSDmitry Kozlyuk } 372436c089aSDmitry Kozlyuk 373436c089aSDmitry Kozlyuk /* 374436c089aSDmitry Kozlyuk * Callback to handle writing packets to a pcap file. 375436c089aSDmitry Kozlyuk */ 376436c089aSDmitry Kozlyuk static uint16_t 377436c089aSDmitry Kozlyuk eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 378436c089aSDmitry Kozlyuk { 379436c089aSDmitry Kozlyuk unsigned int i; 380436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 381436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 382436c089aSDmitry Kozlyuk struct pcap_tx_queue *dumper_q = queue; 383436c089aSDmitry Kozlyuk uint16_t num_tx = 0; 384436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 385436c089aSDmitry Kozlyuk struct pcap_pkthdr header; 386436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 387436c089aSDmitry Kozlyuk unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; 388436c089aSDmitry Kozlyuk size_t len, caplen; 389436c089aSDmitry Kozlyuk 390436c089aSDmitry Kozlyuk pp = rte_eth_devices[dumper_q->port_id].process_private; 391436c089aSDmitry Kozlyuk dumper = pp->tx_dumper[dumper_q->queue_id]; 392436c089aSDmitry Kozlyuk 393436c089aSDmitry Kozlyuk if (dumper == NULL || nb_pkts == 0) 394436c089aSDmitry Kozlyuk return 0; 395436c089aSDmitry Kozlyuk 396436c089aSDmitry Kozlyuk /* writes the nb_pkts packets to the previously opened pcap file 397436c089aSDmitry Kozlyuk * dumper */ 398436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 399436c089aSDmitry Kozlyuk mbuf = bufs[i]; 400436c089aSDmitry Kozlyuk len = caplen = rte_pktmbuf_pkt_len(mbuf); 401436c089aSDmitry Kozlyuk if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && 402436c089aSDmitry Kozlyuk len > sizeof(temp_data))) { 403436c089aSDmitry Kozlyuk caplen = sizeof(temp_data); 404436c089aSDmitry Kozlyuk } 405436c089aSDmitry Kozlyuk 406436c089aSDmitry Kozlyuk calculate_timestamp(&header.ts); 407436c089aSDmitry Kozlyuk header.len = len; 408436c089aSDmitry Kozlyuk header.caplen = caplen; 409436c089aSDmitry Kozlyuk /* rte_pktmbuf_read() returns a pointer to the data directly 410436c089aSDmitry Kozlyuk * in the mbuf (when the mbuf is contiguous) or, otherwise, 411436c089aSDmitry Kozlyuk * a pointer to temp_data after copying into it. 412436c089aSDmitry Kozlyuk */ 413436c089aSDmitry Kozlyuk pcap_dump((u_char *)dumper, &header, 414436c089aSDmitry Kozlyuk rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); 415436c089aSDmitry Kozlyuk 416436c089aSDmitry Kozlyuk num_tx++; 417436c089aSDmitry Kozlyuk tx_bytes += caplen; 418436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 419436c089aSDmitry Kozlyuk } 420436c089aSDmitry Kozlyuk 421436c089aSDmitry Kozlyuk /* 422436c089aSDmitry Kozlyuk * Since there's no place to hook a callback when the forwarding 423436c089aSDmitry Kozlyuk * process stops and to make sure the pcap file is actually written, 424436c089aSDmitry Kozlyuk * we flush the pcap dumper within each burst. 425436c089aSDmitry Kozlyuk */ 426436c089aSDmitry Kozlyuk pcap_dump_flush(dumper); 427436c089aSDmitry Kozlyuk dumper_q->tx_stat.pkts += num_tx; 428436c089aSDmitry Kozlyuk dumper_q->tx_stat.bytes += tx_bytes; 429436c089aSDmitry Kozlyuk dumper_q->tx_stat.err_pkts += nb_pkts - num_tx; 430436c089aSDmitry Kozlyuk 431436c089aSDmitry Kozlyuk return nb_pkts; 432436c089aSDmitry Kozlyuk } 433436c089aSDmitry Kozlyuk 434436c089aSDmitry Kozlyuk /* 435436c089aSDmitry Kozlyuk * Callback to handle dropping packets in the infinite rx case. 436436c089aSDmitry Kozlyuk */ 437436c089aSDmitry Kozlyuk static uint16_t 438436c089aSDmitry Kozlyuk eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 439436c089aSDmitry Kozlyuk { 440436c089aSDmitry Kozlyuk unsigned int i; 441436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 442436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx_queue = queue; 443436c089aSDmitry Kozlyuk 444436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0)) 445436c089aSDmitry Kozlyuk return 0; 446436c089aSDmitry Kozlyuk 447436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 448436c089aSDmitry Kozlyuk tx_bytes += bufs[i]->pkt_len; 449436c089aSDmitry Kozlyuk rte_pktmbuf_free(bufs[i]); 450436c089aSDmitry Kozlyuk } 451436c089aSDmitry Kozlyuk 452436c089aSDmitry Kozlyuk tx_queue->tx_stat.pkts += nb_pkts; 453436c089aSDmitry Kozlyuk tx_queue->tx_stat.bytes += tx_bytes; 454436c089aSDmitry Kozlyuk 455436c089aSDmitry Kozlyuk return i; 456436c089aSDmitry Kozlyuk } 457436c089aSDmitry Kozlyuk 458436c089aSDmitry Kozlyuk /* 459436c089aSDmitry Kozlyuk * Callback to handle sending packets through a real NIC. 460436c089aSDmitry Kozlyuk */ 461436c089aSDmitry Kozlyuk static uint16_t 462436c089aSDmitry Kozlyuk eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 463436c089aSDmitry Kozlyuk { 464436c089aSDmitry Kozlyuk unsigned int i; 465436c089aSDmitry Kozlyuk int ret; 466436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 467436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 468436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx_queue = queue; 469436c089aSDmitry Kozlyuk uint16_t num_tx = 0; 470436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 471436c089aSDmitry Kozlyuk pcap_t *pcap; 472436c089aSDmitry Kozlyuk unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; 473436c089aSDmitry Kozlyuk size_t len; 474436c089aSDmitry Kozlyuk 475436c089aSDmitry Kozlyuk pp = rte_eth_devices[tx_queue->port_id].process_private; 476436c089aSDmitry Kozlyuk pcap = pp->tx_pcap[tx_queue->queue_id]; 477436c089aSDmitry Kozlyuk 478436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0 || pcap == NULL)) 479436c089aSDmitry Kozlyuk return 0; 480436c089aSDmitry Kozlyuk 481436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 482436c089aSDmitry Kozlyuk mbuf = bufs[i]; 483436c089aSDmitry Kozlyuk len = rte_pktmbuf_pkt_len(mbuf); 484436c089aSDmitry Kozlyuk if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && 485436c089aSDmitry Kozlyuk len > sizeof(temp_data))) { 486436c089aSDmitry Kozlyuk PMD_LOG(ERR, 487436c089aSDmitry Kozlyuk "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", 488436c089aSDmitry Kozlyuk len, sizeof(temp_data)); 489436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 490436c089aSDmitry Kozlyuk continue; 491436c089aSDmitry Kozlyuk } 492436c089aSDmitry Kozlyuk 493436c089aSDmitry Kozlyuk /* rte_pktmbuf_read() returns a pointer to the data directly 494436c089aSDmitry Kozlyuk * in the mbuf (when the mbuf is contiguous) or, otherwise, 495436c089aSDmitry Kozlyuk * a pointer to temp_data after copying into it. 496436c089aSDmitry Kozlyuk */ 497436c089aSDmitry Kozlyuk ret = pcap_sendpacket(pcap, 498436c089aSDmitry Kozlyuk rte_pktmbuf_read(mbuf, 0, len, temp_data), len); 499436c089aSDmitry Kozlyuk if (unlikely(ret != 0)) 500436c089aSDmitry Kozlyuk break; 501436c089aSDmitry Kozlyuk num_tx++; 502436c089aSDmitry Kozlyuk tx_bytes += len; 503436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 504436c089aSDmitry Kozlyuk } 505436c089aSDmitry Kozlyuk 506436c089aSDmitry Kozlyuk tx_queue->tx_stat.pkts += num_tx; 507436c089aSDmitry Kozlyuk tx_queue->tx_stat.bytes += tx_bytes; 508436c089aSDmitry Kozlyuk tx_queue->tx_stat.err_pkts += i - num_tx; 509436c089aSDmitry Kozlyuk 510436c089aSDmitry Kozlyuk return i; 511436c089aSDmitry Kozlyuk } 512436c089aSDmitry Kozlyuk 513436c089aSDmitry Kozlyuk /* 514436c089aSDmitry Kozlyuk * pcap_open_live wrapper function 515436c089aSDmitry Kozlyuk */ 516436c089aSDmitry Kozlyuk static inline int 517436c089aSDmitry Kozlyuk open_iface_live(const char *iface, pcap_t **pcap) { 518436c089aSDmitry Kozlyuk *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN, 519436c089aSDmitry Kozlyuk RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); 520436c089aSDmitry Kozlyuk 521436c089aSDmitry Kozlyuk if (*pcap == NULL) { 522436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf); 523436c089aSDmitry Kozlyuk return -1; 524436c089aSDmitry Kozlyuk } 525436c089aSDmitry Kozlyuk 52660dd5a70SStephen Hemminger if (pcap_setnonblock(*pcap, 1, errbuf)) { 52760dd5a70SStephen Hemminger PMD_LOG(ERR, "Couldn't set non-blocking on %s: %s", iface, errbuf); 52860dd5a70SStephen Hemminger pcap_close(*pcap); 52960dd5a70SStephen Hemminger return -1; 53060dd5a70SStephen Hemminger } 53160dd5a70SStephen Hemminger 532436c089aSDmitry Kozlyuk return 0; 533436c089aSDmitry Kozlyuk } 534436c089aSDmitry Kozlyuk 535436c089aSDmitry Kozlyuk static int 536436c089aSDmitry Kozlyuk open_single_iface(const char *iface, pcap_t **pcap) 537436c089aSDmitry Kozlyuk { 538436c089aSDmitry Kozlyuk if (open_iface_live(iface, pcap) < 0) { 539436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open interface %s", iface); 540436c089aSDmitry Kozlyuk return -1; 541436c089aSDmitry Kozlyuk } 542436c089aSDmitry Kozlyuk 543436c089aSDmitry Kozlyuk return 0; 544436c089aSDmitry Kozlyuk } 545436c089aSDmitry Kozlyuk 546436c089aSDmitry Kozlyuk static int 547436c089aSDmitry Kozlyuk open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) 548436c089aSDmitry Kozlyuk { 549436c089aSDmitry Kozlyuk pcap_t *tx_pcap; 550436c089aSDmitry Kozlyuk 551436c089aSDmitry Kozlyuk /* 552436c089aSDmitry Kozlyuk * We need to create a dummy empty pcap_t to use it 553436c089aSDmitry Kozlyuk * with pcap_dump_open(). We create big enough an Ethernet 554436c089aSDmitry Kozlyuk * pcap holder. 555436c089aSDmitry Kozlyuk */ 556436c089aSDmitry Kozlyuk tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB, 557436c089aSDmitry Kozlyuk RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO); 558436c089aSDmitry Kozlyuk if (tx_pcap == NULL) { 559436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't create dead pcap"); 560436c089aSDmitry Kozlyuk return -1; 561436c089aSDmitry Kozlyuk } 562436c089aSDmitry Kozlyuk 563436c089aSDmitry Kozlyuk /* The dumper is created using the previous pcap_t reference */ 564436c089aSDmitry Kozlyuk *dumper = pcap_dump_open(tx_pcap, pcap_filename); 565436c089aSDmitry Kozlyuk if (*dumper == NULL) { 566436c089aSDmitry Kozlyuk pcap_close(tx_pcap); 567436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s for writing.", 568436c089aSDmitry Kozlyuk pcap_filename); 569436c089aSDmitry Kozlyuk return -1; 570436c089aSDmitry Kozlyuk } 571436c089aSDmitry Kozlyuk 572436c089aSDmitry Kozlyuk pcap_close(tx_pcap); 573436c089aSDmitry Kozlyuk return 0; 574436c089aSDmitry Kozlyuk } 575436c089aSDmitry Kozlyuk 576436c089aSDmitry Kozlyuk static int 577436c089aSDmitry Kozlyuk open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) 578436c089aSDmitry Kozlyuk { 579436c089aSDmitry Kozlyuk *pcap = pcap_open_offline(pcap_filename, errbuf); 580436c089aSDmitry Kozlyuk if (*pcap == NULL) { 581436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename, 582436c089aSDmitry Kozlyuk errbuf); 583436c089aSDmitry Kozlyuk return -1; 584436c089aSDmitry Kozlyuk } 585436c089aSDmitry Kozlyuk 586436c089aSDmitry Kozlyuk return 0; 587436c089aSDmitry Kozlyuk } 588436c089aSDmitry Kozlyuk 589436c089aSDmitry Kozlyuk static uint64_t 590436c089aSDmitry Kozlyuk count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q) 591436c089aSDmitry Kozlyuk { 592436c089aSDmitry Kozlyuk const u_char *packet; 593436c089aSDmitry Kozlyuk struct pcap_pkthdr header; 594436c089aSDmitry Kozlyuk uint64_t pcap_pkt_count = 0; 595436c089aSDmitry Kozlyuk 596436c089aSDmitry Kozlyuk while ((packet = pcap_next(*pcap, &header))) 597436c089aSDmitry Kozlyuk pcap_pkt_count++; 598436c089aSDmitry Kozlyuk 599436c089aSDmitry Kozlyuk /* The pcap is reopened so it can be used as normal later. */ 600436c089aSDmitry Kozlyuk pcap_close(*pcap); 601436c089aSDmitry Kozlyuk *pcap = NULL; 602436c089aSDmitry Kozlyuk open_single_rx_pcap(pcap_q->name, pcap); 603436c089aSDmitry Kozlyuk 604436c089aSDmitry Kozlyuk return pcap_pkt_count; 605436c089aSDmitry Kozlyuk } 606436c089aSDmitry Kozlyuk 607436c089aSDmitry Kozlyuk static int 608436c089aSDmitry Kozlyuk eth_dev_start(struct rte_eth_dev *dev) 609436c089aSDmitry Kozlyuk { 610436c089aSDmitry Kozlyuk unsigned int i; 611436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 612436c089aSDmitry Kozlyuk struct pmd_process_private *pp = dev->process_private; 613436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx; 614436c089aSDmitry Kozlyuk struct pcap_rx_queue *rx; 615436c089aSDmitry Kozlyuk 616436c089aSDmitry Kozlyuk /* Special iface case. Single pcap is open and shared between tx/rx. */ 617436c089aSDmitry Kozlyuk if (internals->single_iface) { 618436c089aSDmitry Kozlyuk tx = &internals->tx_queue[0]; 619436c089aSDmitry Kozlyuk rx = &internals->rx_queue[0]; 620436c089aSDmitry Kozlyuk 621436c089aSDmitry Kozlyuk if (!pp->tx_pcap[0] && 622436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { 623436c089aSDmitry Kozlyuk if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0) 624436c089aSDmitry Kozlyuk return -1; 625436c089aSDmitry Kozlyuk pp->rx_pcap[0] = pp->tx_pcap[0]; 626436c089aSDmitry Kozlyuk } 627436c089aSDmitry Kozlyuk 628436c089aSDmitry Kozlyuk goto status_up; 629436c089aSDmitry Kozlyuk } 630436c089aSDmitry Kozlyuk 631436c089aSDmitry Kozlyuk /* If not open already, open tx pcaps/dumpers */ 632436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 633436c089aSDmitry Kozlyuk tx = &internals->tx_queue[i]; 634436c089aSDmitry Kozlyuk 635436c089aSDmitry Kozlyuk if (!pp->tx_dumper[i] && 636436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) { 637436c089aSDmitry Kozlyuk if (open_single_tx_pcap(tx->name, 638436c089aSDmitry Kozlyuk &pp->tx_dumper[i]) < 0) 639436c089aSDmitry Kozlyuk return -1; 640436c089aSDmitry Kozlyuk } else if (!pp->tx_pcap[i] && 641436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) { 642436c089aSDmitry Kozlyuk if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0) 643436c089aSDmitry Kozlyuk return -1; 644436c089aSDmitry Kozlyuk } 645436c089aSDmitry Kozlyuk } 646436c089aSDmitry Kozlyuk 647436c089aSDmitry Kozlyuk /* If not open already, open rx pcaps */ 648436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 649436c089aSDmitry Kozlyuk rx = &internals->rx_queue[i]; 650436c089aSDmitry Kozlyuk 651436c089aSDmitry Kozlyuk if (pp->rx_pcap[i] != NULL) 652436c089aSDmitry Kozlyuk continue; 653436c089aSDmitry Kozlyuk 654436c089aSDmitry Kozlyuk if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { 655436c089aSDmitry Kozlyuk if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0) 656436c089aSDmitry Kozlyuk return -1; 657436c089aSDmitry Kozlyuk } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) { 658436c089aSDmitry Kozlyuk if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0) 659436c089aSDmitry Kozlyuk return -1; 660436c089aSDmitry Kozlyuk } 661436c089aSDmitry Kozlyuk } 662436c089aSDmitry Kozlyuk 663436c089aSDmitry Kozlyuk status_up: 664436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) 665436c089aSDmitry Kozlyuk dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 666436c089aSDmitry Kozlyuk 667436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) 668436c089aSDmitry Kozlyuk dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 669436c089aSDmitry Kozlyuk 670295968d1SFerruh Yigit dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 671436c089aSDmitry Kozlyuk 672436c089aSDmitry Kozlyuk return 0; 673436c089aSDmitry Kozlyuk } 674436c089aSDmitry Kozlyuk 675436c089aSDmitry Kozlyuk /* 676436c089aSDmitry Kozlyuk * This function gets called when the current port gets stopped. 677436c089aSDmitry Kozlyuk * Is the only place for us to close all the tx streams dumpers. 678436c089aSDmitry Kozlyuk * If not called the dumpers will be flushed within each tx burst. 679436c089aSDmitry Kozlyuk */ 680436c089aSDmitry Kozlyuk static int 681436c089aSDmitry Kozlyuk eth_dev_stop(struct rte_eth_dev *dev) 682436c089aSDmitry Kozlyuk { 683436c089aSDmitry Kozlyuk unsigned int i; 684436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 685436c089aSDmitry Kozlyuk struct pmd_process_private *pp = dev->process_private; 686436c089aSDmitry Kozlyuk 687436c089aSDmitry Kozlyuk /* Special iface case. Single pcap is open and shared between tx/rx. */ 688436c089aSDmitry Kozlyuk if (internals->single_iface) { 689436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(dev, 0); 690436c089aSDmitry Kozlyuk if (pp->tx_pcap[0] != NULL) { 691436c089aSDmitry Kozlyuk pcap_close(pp->tx_pcap[0]); 692436c089aSDmitry Kozlyuk pp->tx_pcap[0] = NULL; 693436c089aSDmitry Kozlyuk pp->rx_pcap[0] = NULL; 694436c089aSDmitry Kozlyuk } 695436c089aSDmitry Kozlyuk goto status_down; 696436c089aSDmitry Kozlyuk } 697436c089aSDmitry Kozlyuk 698436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 699436c089aSDmitry Kozlyuk if (pp->tx_dumper[i] != NULL) { 700436c089aSDmitry Kozlyuk pcap_dump_close(pp->tx_dumper[i]); 701436c089aSDmitry Kozlyuk pp->tx_dumper[i] = NULL; 702436c089aSDmitry Kozlyuk } 703436c089aSDmitry Kozlyuk 704436c089aSDmitry Kozlyuk if (pp->tx_pcap[i] != NULL) { 705436c089aSDmitry Kozlyuk pcap_close(pp->tx_pcap[i]); 706436c089aSDmitry Kozlyuk pp->tx_pcap[i] = NULL; 707436c089aSDmitry Kozlyuk } 708436c089aSDmitry Kozlyuk } 709436c089aSDmitry Kozlyuk 710436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 711436c089aSDmitry Kozlyuk if (pp->rx_pcap[i] != NULL) { 712436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(dev, i); 713436c089aSDmitry Kozlyuk pcap_close(pp->rx_pcap[i]); 714436c089aSDmitry Kozlyuk pp->rx_pcap[i] = NULL; 715436c089aSDmitry Kozlyuk } 716436c089aSDmitry Kozlyuk } 717436c089aSDmitry Kozlyuk 718436c089aSDmitry Kozlyuk status_down: 719436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) 720436c089aSDmitry Kozlyuk dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 721436c089aSDmitry Kozlyuk 722436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) 723436c089aSDmitry Kozlyuk dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 724436c089aSDmitry Kozlyuk 725295968d1SFerruh Yigit dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 726436c089aSDmitry Kozlyuk 727436c089aSDmitry Kozlyuk return 0; 728436c089aSDmitry Kozlyuk } 729436c089aSDmitry Kozlyuk 730436c089aSDmitry Kozlyuk static int 731436c089aSDmitry Kozlyuk eth_dev_configure(struct rte_eth_dev *dev __rte_unused) 732436c089aSDmitry Kozlyuk { 733436c089aSDmitry Kozlyuk return 0; 734436c089aSDmitry Kozlyuk } 735436c089aSDmitry Kozlyuk 736436c089aSDmitry Kozlyuk static int 737436c089aSDmitry Kozlyuk eth_dev_info(struct rte_eth_dev *dev, 738436c089aSDmitry Kozlyuk struct rte_eth_dev_info *dev_info) 739436c089aSDmitry Kozlyuk { 740436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 741436c089aSDmitry Kozlyuk 742436c089aSDmitry Kozlyuk dev_info->if_index = internals->if_index; 743436c089aSDmitry Kozlyuk dev_info->max_mac_addrs = 1; 744436c089aSDmitry Kozlyuk dev_info->max_rx_pktlen = (uint32_t) -1; 745436c089aSDmitry Kozlyuk dev_info->max_rx_queues = dev->data->nb_rx_queues; 746436c089aSDmitry Kozlyuk dev_info->max_tx_queues = dev->data->nb_tx_queues; 747436c089aSDmitry Kozlyuk dev_info->min_rx_bufsize = 0; 748436c089aSDmitry Kozlyuk 749436c089aSDmitry Kozlyuk return 0; 750436c089aSDmitry Kozlyuk } 751436c089aSDmitry Kozlyuk 752436c089aSDmitry Kozlyuk static int 753436c089aSDmitry Kozlyuk eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 754436c089aSDmitry Kozlyuk { 755436c089aSDmitry Kozlyuk unsigned int i; 756436c089aSDmitry Kozlyuk unsigned long rx_packets_total = 0, rx_bytes_total = 0; 757436c089aSDmitry Kozlyuk unsigned long rx_missed_total = 0; 758d70157c1SQiming Chen unsigned long rx_nombuf_total = 0, rx_err_total = 0; 759436c089aSDmitry Kozlyuk unsigned long tx_packets_total = 0, tx_bytes_total = 0; 760436c089aSDmitry Kozlyuk unsigned long tx_packets_err_total = 0; 761436c089aSDmitry Kozlyuk const struct pmd_internals *internal = dev->data->dev_private; 762436c089aSDmitry Kozlyuk 763436c089aSDmitry Kozlyuk for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 764436c089aSDmitry Kozlyuk i < dev->data->nb_rx_queues; i++) { 765436c089aSDmitry Kozlyuk stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts; 766436c089aSDmitry Kozlyuk stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes; 767d70157c1SQiming Chen rx_nombuf_total += internal->rx_queue[i].rx_stat.rx_nombuf; 768d70157c1SQiming Chen rx_err_total += internal->rx_queue[i].rx_stat.err_pkts; 769436c089aSDmitry Kozlyuk rx_packets_total += stats->q_ipackets[i]; 770436c089aSDmitry Kozlyuk rx_bytes_total += stats->q_ibytes[i]; 771436c089aSDmitry Kozlyuk rx_missed_total += queue_missed_stat_get(dev, i); 772436c089aSDmitry Kozlyuk } 773436c089aSDmitry Kozlyuk 774436c089aSDmitry Kozlyuk for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 775436c089aSDmitry Kozlyuk i < dev->data->nb_tx_queues; i++) { 776436c089aSDmitry Kozlyuk stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts; 777436c089aSDmitry Kozlyuk stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes; 778436c089aSDmitry Kozlyuk tx_packets_total += stats->q_opackets[i]; 779436c089aSDmitry Kozlyuk tx_bytes_total += stats->q_obytes[i]; 780436c089aSDmitry Kozlyuk tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts; 781436c089aSDmitry Kozlyuk } 782436c089aSDmitry Kozlyuk 783436c089aSDmitry Kozlyuk stats->ipackets = rx_packets_total; 784436c089aSDmitry Kozlyuk stats->ibytes = rx_bytes_total; 785436c089aSDmitry Kozlyuk stats->imissed = rx_missed_total; 786d70157c1SQiming Chen stats->ierrors = rx_err_total; 787d70157c1SQiming Chen stats->rx_nombuf = rx_nombuf_total; 788436c089aSDmitry Kozlyuk stats->opackets = tx_packets_total; 789436c089aSDmitry Kozlyuk stats->obytes = tx_bytes_total; 790436c089aSDmitry Kozlyuk stats->oerrors = tx_packets_err_total; 791436c089aSDmitry Kozlyuk 792436c089aSDmitry Kozlyuk return 0; 793436c089aSDmitry Kozlyuk } 794436c089aSDmitry Kozlyuk 795436c089aSDmitry Kozlyuk static int 796436c089aSDmitry Kozlyuk eth_stats_reset(struct rte_eth_dev *dev) 797436c089aSDmitry Kozlyuk { 798436c089aSDmitry Kozlyuk unsigned int i; 799436c089aSDmitry Kozlyuk struct pmd_internals *internal = dev->data->dev_private; 800436c089aSDmitry Kozlyuk 801436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 802436c089aSDmitry Kozlyuk internal->rx_queue[i].rx_stat.pkts = 0; 803436c089aSDmitry Kozlyuk internal->rx_queue[i].rx_stat.bytes = 0; 804d70157c1SQiming Chen internal->rx_queue[i].rx_stat.err_pkts = 0; 805d70157c1SQiming Chen internal->rx_queue[i].rx_stat.rx_nombuf = 0; 806436c089aSDmitry Kozlyuk queue_missed_stat_reset(dev, i); 807436c089aSDmitry Kozlyuk } 808436c089aSDmitry Kozlyuk 809436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 810436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.pkts = 0; 811436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.bytes = 0; 812436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.err_pkts = 0; 813436c089aSDmitry Kozlyuk } 814436c089aSDmitry Kozlyuk 815436c089aSDmitry Kozlyuk return 0; 816436c089aSDmitry Kozlyuk } 817436c089aSDmitry Kozlyuk 818436c089aSDmitry Kozlyuk static inline void 819436c089aSDmitry Kozlyuk infinite_rx_ring_free(struct rte_ring *pkts) 820436c089aSDmitry Kozlyuk { 821436c089aSDmitry Kozlyuk struct rte_mbuf *bufs; 822436c089aSDmitry Kozlyuk 823436c089aSDmitry Kozlyuk while (!rte_ring_dequeue(pkts, (void **)&bufs)) 824436c089aSDmitry Kozlyuk rte_pktmbuf_free(bufs); 825436c089aSDmitry Kozlyuk 826436c089aSDmitry Kozlyuk rte_ring_free(pkts); 827436c089aSDmitry Kozlyuk } 828436c089aSDmitry Kozlyuk 829436c089aSDmitry Kozlyuk static int 830436c089aSDmitry Kozlyuk eth_dev_close(struct rte_eth_dev *dev) 831436c089aSDmitry Kozlyuk { 832436c089aSDmitry Kozlyuk unsigned int i; 833436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 834436c089aSDmitry Kozlyuk 835436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d", 836436c089aSDmitry Kozlyuk rte_socket_id()); 837436c089aSDmitry Kozlyuk 838436c089aSDmitry Kozlyuk eth_dev_stop(dev); 839436c089aSDmitry Kozlyuk 840436c089aSDmitry Kozlyuk rte_free(dev->process_private); 841436c089aSDmitry Kozlyuk 842436c089aSDmitry Kozlyuk if (rte_eal_process_type() != RTE_PROC_PRIMARY) 843436c089aSDmitry Kozlyuk return 0; 844436c089aSDmitry Kozlyuk 845436c089aSDmitry Kozlyuk /* Device wide flag, but cleanup must be performed per queue. */ 846436c089aSDmitry Kozlyuk if (internals->infinite_rx) { 847436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 848436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; 849436c089aSDmitry Kozlyuk 850436c089aSDmitry Kozlyuk /* 851436c089aSDmitry Kozlyuk * 'pcap_q->pkts' can be NULL if 'eth_dev_close()' 852436c089aSDmitry Kozlyuk * called before 'eth_rx_queue_setup()' has been called 853436c089aSDmitry Kozlyuk */ 854436c089aSDmitry Kozlyuk if (pcap_q->pkts == NULL) 855436c089aSDmitry Kozlyuk continue; 856436c089aSDmitry Kozlyuk 857436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 858436c089aSDmitry Kozlyuk } 859436c089aSDmitry Kozlyuk } 860436c089aSDmitry Kozlyuk 861436c089aSDmitry Kozlyuk if (internals->phy_mac == 0) 862436c089aSDmitry Kozlyuk /* not dynamically allocated, must not be freed */ 863436c089aSDmitry Kozlyuk dev->data->mac_addrs = NULL; 864436c089aSDmitry Kozlyuk 865436c089aSDmitry Kozlyuk return 0; 866436c089aSDmitry Kozlyuk } 867436c089aSDmitry Kozlyuk 868436c089aSDmitry Kozlyuk static int 869436c089aSDmitry Kozlyuk eth_link_update(struct rte_eth_dev *dev __rte_unused, 870436c089aSDmitry Kozlyuk int wait_to_complete __rte_unused) 871436c089aSDmitry Kozlyuk { 872436c089aSDmitry Kozlyuk return 0; 873436c089aSDmitry Kozlyuk } 874436c089aSDmitry Kozlyuk 875436c089aSDmitry Kozlyuk static int 876436c089aSDmitry Kozlyuk eth_rx_queue_setup(struct rte_eth_dev *dev, 877436c089aSDmitry Kozlyuk uint16_t rx_queue_id, 878436c089aSDmitry Kozlyuk uint16_t nb_rx_desc __rte_unused, 879436c089aSDmitry Kozlyuk unsigned int socket_id __rte_unused, 880436c089aSDmitry Kozlyuk const struct rte_eth_rxconf *rx_conf __rte_unused, 881436c089aSDmitry Kozlyuk struct rte_mempool *mb_pool) 882436c089aSDmitry Kozlyuk { 883436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 884436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; 885436c089aSDmitry Kozlyuk 886436c089aSDmitry Kozlyuk pcap_q->mb_pool = mb_pool; 887436c089aSDmitry Kozlyuk pcap_q->port_id = dev->data->port_id; 888436c089aSDmitry Kozlyuk pcap_q->queue_id = rx_queue_id; 889436c089aSDmitry Kozlyuk dev->data->rx_queues[rx_queue_id] = pcap_q; 890436c089aSDmitry Kozlyuk 891436c089aSDmitry Kozlyuk if (internals->infinite_rx) { 892436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 893436c089aSDmitry Kozlyuk char ring_name[RTE_RING_NAMESIZE]; 894436c089aSDmitry Kozlyuk static uint32_t ring_number; 895436c089aSDmitry Kozlyuk uint64_t pcap_pkt_count = 0; 896436c089aSDmitry Kozlyuk struct rte_mbuf *bufs[1]; 897436c089aSDmitry Kozlyuk pcap_t **pcap; 898436c089aSDmitry Kozlyuk 899436c089aSDmitry Kozlyuk pp = rte_eth_devices[pcap_q->port_id].process_private; 900436c089aSDmitry Kozlyuk pcap = &pp->rx_pcap[pcap_q->queue_id]; 901436c089aSDmitry Kozlyuk 902436c089aSDmitry Kozlyuk if (unlikely(*pcap == NULL)) 903436c089aSDmitry Kozlyuk return -ENOENT; 904436c089aSDmitry Kozlyuk 905436c089aSDmitry Kozlyuk pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q); 906436c089aSDmitry Kozlyuk 907436c089aSDmitry Kozlyuk snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32, 908436c089aSDmitry Kozlyuk ring_number); 909436c089aSDmitry Kozlyuk 910436c089aSDmitry Kozlyuk pcap_q->pkts = rte_ring_create(ring_name, 911436c089aSDmitry Kozlyuk rte_align64pow2(pcap_pkt_count + 1), 0, 912436c089aSDmitry Kozlyuk RING_F_SP_ENQ | RING_F_SC_DEQ); 913436c089aSDmitry Kozlyuk ring_number++; 914436c089aSDmitry Kozlyuk if (!pcap_q->pkts) 915436c089aSDmitry Kozlyuk return -ENOENT; 916436c089aSDmitry Kozlyuk 917436c089aSDmitry Kozlyuk /* Fill ring with packets from PCAP file one by one. */ 918436c089aSDmitry Kozlyuk while (eth_pcap_rx(pcap_q, bufs, 1)) { 919436c089aSDmitry Kozlyuk /* Check for multiseg mbufs. */ 920436c089aSDmitry Kozlyuk if (bufs[0]->nb_segs != 1) { 921436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 922436c089aSDmitry Kozlyuk PMD_LOG(ERR, 923436c089aSDmitry Kozlyuk "Multiseg mbufs are not supported in infinite_rx mode."); 924436c089aSDmitry Kozlyuk return -EINVAL; 925436c089aSDmitry Kozlyuk } 926436c089aSDmitry Kozlyuk 927436c089aSDmitry Kozlyuk rte_ring_enqueue_bulk(pcap_q->pkts, 928436c089aSDmitry Kozlyuk (void * const *)bufs, 1, NULL); 929436c089aSDmitry Kozlyuk } 930436c089aSDmitry Kozlyuk 931436c089aSDmitry Kozlyuk if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) { 932436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 933436c089aSDmitry Kozlyuk PMD_LOG(ERR, 934436c089aSDmitry Kozlyuk "Not enough mbufs to accommodate packets in pcap file. " 935436c089aSDmitry Kozlyuk "At least %" PRIu64 " mbufs per queue is required.", 936436c089aSDmitry Kozlyuk pcap_pkt_count); 937436c089aSDmitry Kozlyuk return -EINVAL; 938436c089aSDmitry Kozlyuk } 939436c089aSDmitry Kozlyuk 940436c089aSDmitry Kozlyuk /* 941436c089aSDmitry Kozlyuk * Reset the stats for this queue since eth_pcap_rx calls above 942436c089aSDmitry Kozlyuk * didn't result in the application receiving packets. 943436c089aSDmitry Kozlyuk */ 944436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts = 0; 945436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes = 0; 946436c089aSDmitry Kozlyuk } 947436c089aSDmitry Kozlyuk 948436c089aSDmitry Kozlyuk return 0; 949436c089aSDmitry Kozlyuk } 950436c089aSDmitry Kozlyuk 951436c089aSDmitry Kozlyuk static int 952436c089aSDmitry Kozlyuk eth_tx_queue_setup(struct rte_eth_dev *dev, 953436c089aSDmitry Kozlyuk uint16_t tx_queue_id, 954436c089aSDmitry Kozlyuk uint16_t nb_tx_desc __rte_unused, 955436c089aSDmitry Kozlyuk unsigned int socket_id __rte_unused, 956436c089aSDmitry Kozlyuk const struct rte_eth_txconf *tx_conf __rte_unused) 957436c089aSDmitry Kozlyuk { 958436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 959436c089aSDmitry Kozlyuk struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; 960436c089aSDmitry Kozlyuk 961436c089aSDmitry Kozlyuk pcap_q->port_id = dev->data->port_id; 962436c089aSDmitry Kozlyuk pcap_q->queue_id = tx_queue_id; 963436c089aSDmitry Kozlyuk dev->data->tx_queues[tx_queue_id] = pcap_q; 964436c089aSDmitry Kozlyuk 965436c089aSDmitry Kozlyuk return 0; 966436c089aSDmitry Kozlyuk } 967436c089aSDmitry Kozlyuk 968436c089aSDmitry Kozlyuk static int 969436c089aSDmitry Kozlyuk eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 970436c089aSDmitry Kozlyuk { 971436c089aSDmitry Kozlyuk dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 972436c089aSDmitry Kozlyuk 973436c089aSDmitry Kozlyuk return 0; 974436c089aSDmitry Kozlyuk } 975436c089aSDmitry Kozlyuk 976436c089aSDmitry Kozlyuk static int 977436c089aSDmitry Kozlyuk eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 978436c089aSDmitry Kozlyuk { 979436c089aSDmitry Kozlyuk dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 980436c089aSDmitry Kozlyuk 981436c089aSDmitry Kozlyuk return 0; 982436c089aSDmitry Kozlyuk } 983436c089aSDmitry Kozlyuk 984436c089aSDmitry Kozlyuk static int 985436c089aSDmitry Kozlyuk eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 986436c089aSDmitry Kozlyuk { 987436c089aSDmitry Kozlyuk dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 988436c089aSDmitry Kozlyuk 989436c089aSDmitry Kozlyuk return 0; 990436c089aSDmitry Kozlyuk } 991436c089aSDmitry Kozlyuk 992436c089aSDmitry Kozlyuk static int 993436c089aSDmitry Kozlyuk eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 994436c089aSDmitry Kozlyuk { 995436c089aSDmitry Kozlyuk dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 996436c089aSDmitry Kozlyuk 997436c089aSDmitry Kozlyuk return 0; 998436c089aSDmitry Kozlyuk } 999436c089aSDmitry Kozlyuk 1000436c089aSDmitry Kozlyuk static const struct eth_dev_ops ops = { 1001436c089aSDmitry Kozlyuk .dev_start = eth_dev_start, 1002436c089aSDmitry Kozlyuk .dev_stop = eth_dev_stop, 1003436c089aSDmitry Kozlyuk .dev_close = eth_dev_close, 1004436c089aSDmitry Kozlyuk .dev_configure = eth_dev_configure, 1005436c089aSDmitry Kozlyuk .dev_infos_get = eth_dev_info, 1006436c089aSDmitry Kozlyuk .rx_queue_setup = eth_rx_queue_setup, 1007436c089aSDmitry Kozlyuk .tx_queue_setup = eth_tx_queue_setup, 1008436c089aSDmitry Kozlyuk .rx_queue_start = eth_rx_queue_start, 1009436c089aSDmitry Kozlyuk .tx_queue_start = eth_tx_queue_start, 1010436c089aSDmitry Kozlyuk .rx_queue_stop = eth_rx_queue_stop, 1011436c089aSDmitry Kozlyuk .tx_queue_stop = eth_tx_queue_stop, 1012436c089aSDmitry Kozlyuk .link_update = eth_link_update, 1013436c089aSDmitry Kozlyuk .stats_get = eth_stats_get, 1014436c089aSDmitry Kozlyuk .stats_reset = eth_stats_reset, 1015436c089aSDmitry Kozlyuk }; 1016436c089aSDmitry Kozlyuk 1017436c089aSDmitry Kozlyuk static int 1018436c089aSDmitry Kozlyuk add_queue(struct pmd_devargs *pmd, const char *name, const char *type, 1019436c089aSDmitry Kozlyuk pcap_t *pcap, pcap_dumper_t *dumper) 1020436c089aSDmitry Kozlyuk { 1021436c089aSDmitry Kozlyuk if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES) 1022436c089aSDmitry Kozlyuk return -1; 1023436c089aSDmitry Kozlyuk if (pcap) 1024436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].pcap = pcap; 1025436c089aSDmitry Kozlyuk if (dumper) 1026436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].dumper = dumper; 1027436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].name = name; 1028436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].type = type; 1029436c089aSDmitry Kozlyuk pmd->num_of_queue++; 1030436c089aSDmitry Kozlyuk return 0; 1031436c089aSDmitry Kozlyuk } 1032436c089aSDmitry Kozlyuk 1033436c089aSDmitry Kozlyuk /* 1034436c089aSDmitry Kozlyuk * Function handler that opens the pcap file for reading a stores a 1035436c089aSDmitry Kozlyuk * reference of it for use it later on. 1036436c089aSDmitry Kozlyuk */ 1037436c089aSDmitry Kozlyuk static int 1038436c089aSDmitry Kozlyuk open_rx_pcap(const char *key, const char *value, void *extra_args) 1039436c089aSDmitry Kozlyuk { 1040436c089aSDmitry Kozlyuk const char *pcap_filename = value; 1041436c089aSDmitry Kozlyuk struct pmd_devargs *rx = extra_args; 1042436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1043436c089aSDmitry Kozlyuk 1044436c089aSDmitry Kozlyuk if (open_single_rx_pcap(pcap_filename, &pcap) < 0) 1045436c089aSDmitry Kozlyuk return -1; 1046436c089aSDmitry Kozlyuk 1047436c089aSDmitry Kozlyuk if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) { 1048436c089aSDmitry Kozlyuk pcap_close(pcap); 1049436c089aSDmitry Kozlyuk return -1; 1050436c089aSDmitry Kozlyuk } 1051436c089aSDmitry Kozlyuk 1052436c089aSDmitry Kozlyuk return 0; 1053436c089aSDmitry Kozlyuk } 1054436c089aSDmitry Kozlyuk 1055436c089aSDmitry Kozlyuk /* 1056436c089aSDmitry Kozlyuk * Opens a pcap file for writing and stores a reference to it 1057436c089aSDmitry Kozlyuk * for use it later on. 1058436c089aSDmitry Kozlyuk */ 1059436c089aSDmitry Kozlyuk static int 1060436c089aSDmitry Kozlyuk open_tx_pcap(const char *key, const char *value, void *extra_args) 1061436c089aSDmitry Kozlyuk { 1062436c089aSDmitry Kozlyuk const char *pcap_filename = value; 1063436c089aSDmitry Kozlyuk struct pmd_devargs *dumpers = extra_args; 1064436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 1065436c089aSDmitry Kozlyuk 1066436c089aSDmitry Kozlyuk if (open_single_tx_pcap(pcap_filename, &dumper) < 0) 1067436c089aSDmitry Kozlyuk return -1; 1068436c089aSDmitry Kozlyuk 1069436c089aSDmitry Kozlyuk if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) { 1070436c089aSDmitry Kozlyuk pcap_dump_close(dumper); 1071436c089aSDmitry Kozlyuk return -1; 1072436c089aSDmitry Kozlyuk } 1073436c089aSDmitry Kozlyuk 1074436c089aSDmitry Kozlyuk return 0; 1075436c089aSDmitry Kozlyuk } 1076436c089aSDmitry Kozlyuk 1077436c089aSDmitry Kozlyuk /* 1078436c089aSDmitry Kozlyuk * Opens an interface for reading and writing 1079436c089aSDmitry Kozlyuk */ 1080436c089aSDmitry Kozlyuk static inline int 1081436c089aSDmitry Kozlyuk open_rx_tx_iface(const char *key, const char *value, void *extra_args) 1082436c089aSDmitry Kozlyuk { 1083436c089aSDmitry Kozlyuk const char *iface = value; 1084436c089aSDmitry Kozlyuk struct pmd_devargs *tx = extra_args; 1085436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1086436c089aSDmitry Kozlyuk 1087436c089aSDmitry Kozlyuk if (open_single_iface(iface, &pcap) < 0) 1088436c089aSDmitry Kozlyuk return -1; 1089436c089aSDmitry Kozlyuk 1090436c089aSDmitry Kozlyuk tx->queue[0].pcap = pcap; 1091436c089aSDmitry Kozlyuk tx->queue[0].name = iface; 1092436c089aSDmitry Kozlyuk tx->queue[0].type = key; 1093436c089aSDmitry Kozlyuk 1094436c089aSDmitry Kozlyuk return 0; 1095436c089aSDmitry Kozlyuk } 1096436c089aSDmitry Kozlyuk 1097436c089aSDmitry Kozlyuk static inline int 1098436c089aSDmitry Kozlyuk set_iface_direction(const char *iface, pcap_t *pcap, 1099436c089aSDmitry Kozlyuk pcap_direction_t direction) 1100436c089aSDmitry Kozlyuk { 1101436c089aSDmitry Kozlyuk const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT"; 1102436c089aSDmitry Kozlyuk if (pcap_setdirection(pcap, direction) < 0) { 1103f665790aSDavid Marchand PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s", 1104436c089aSDmitry Kozlyuk iface, direction_str, pcap_geterr(pcap)); 1105436c089aSDmitry Kozlyuk return -1; 1106436c089aSDmitry Kozlyuk } 1107f665790aSDavid Marchand PMD_LOG(INFO, "Setting %s pcap direction %s", 1108436c089aSDmitry Kozlyuk iface, direction_str); 1109436c089aSDmitry Kozlyuk return 0; 1110436c089aSDmitry Kozlyuk } 1111436c089aSDmitry Kozlyuk 1112436c089aSDmitry Kozlyuk static inline int 1113436c089aSDmitry Kozlyuk open_iface(const char *key, const char *value, void *extra_args) 1114436c089aSDmitry Kozlyuk { 1115436c089aSDmitry Kozlyuk const char *iface = value; 1116436c089aSDmitry Kozlyuk struct pmd_devargs *pmd = extra_args; 1117436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1118436c089aSDmitry Kozlyuk 1119436c089aSDmitry Kozlyuk if (open_single_iface(iface, &pcap) < 0) 1120436c089aSDmitry Kozlyuk return -1; 1121436c089aSDmitry Kozlyuk if (add_queue(pmd, iface, key, pcap, NULL) < 0) { 1122436c089aSDmitry Kozlyuk pcap_close(pcap); 1123436c089aSDmitry Kozlyuk return -1; 1124436c089aSDmitry Kozlyuk } 1125436c089aSDmitry Kozlyuk 1126436c089aSDmitry Kozlyuk return 0; 1127436c089aSDmitry Kozlyuk } 1128436c089aSDmitry Kozlyuk 1129436c089aSDmitry Kozlyuk /* 1130436c089aSDmitry Kozlyuk * Opens a NIC for reading packets from it 1131436c089aSDmitry Kozlyuk */ 1132436c089aSDmitry Kozlyuk static inline int 1133436c089aSDmitry Kozlyuk open_rx_iface(const char *key, const char *value, void *extra_args) 1134436c089aSDmitry Kozlyuk { 1135436c089aSDmitry Kozlyuk int ret = open_iface(key, value, extra_args); 1136436c089aSDmitry Kozlyuk if (ret < 0) 1137436c089aSDmitry Kozlyuk return ret; 1138436c089aSDmitry Kozlyuk if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) { 1139436c089aSDmitry Kozlyuk struct pmd_devargs *pmd = extra_args; 1140436c089aSDmitry Kozlyuk unsigned int qid = pmd->num_of_queue - 1; 1141436c089aSDmitry Kozlyuk 1142436c089aSDmitry Kozlyuk set_iface_direction(pmd->queue[qid].name, 1143436c089aSDmitry Kozlyuk pmd->queue[qid].pcap, 1144436c089aSDmitry Kozlyuk PCAP_D_IN); 1145436c089aSDmitry Kozlyuk } 1146436c089aSDmitry Kozlyuk 1147436c089aSDmitry Kozlyuk return 0; 1148436c089aSDmitry Kozlyuk } 1149436c089aSDmitry Kozlyuk 1150436c089aSDmitry Kozlyuk static inline int 1151436c089aSDmitry Kozlyuk rx_iface_args_process(const char *key, const char *value, void *extra_args) 1152436c089aSDmitry Kozlyuk { 1153436c089aSDmitry Kozlyuk if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 || 1154436c089aSDmitry Kozlyuk strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) 1155436c089aSDmitry Kozlyuk return open_rx_iface(key, value, extra_args); 1156436c089aSDmitry Kozlyuk 1157436c089aSDmitry Kozlyuk return 0; 1158436c089aSDmitry Kozlyuk } 1159436c089aSDmitry Kozlyuk 1160436c089aSDmitry Kozlyuk /* 1161436c089aSDmitry Kozlyuk * Opens a NIC for writing packets to it 1162436c089aSDmitry Kozlyuk */ 1163436c089aSDmitry Kozlyuk static int 1164436c089aSDmitry Kozlyuk open_tx_iface(const char *key, const char *value, void *extra_args) 1165436c089aSDmitry Kozlyuk { 1166436c089aSDmitry Kozlyuk return open_iface(key, value, extra_args); 1167436c089aSDmitry Kozlyuk } 1168436c089aSDmitry Kozlyuk 1169436c089aSDmitry Kozlyuk static int 1170436c089aSDmitry Kozlyuk select_phy_mac(const char *key __rte_unused, const char *value, 1171436c089aSDmitry Kozlyuk void *extra_args) 1172436c089aSDmitry Kozlyuk { 1173436c089aSDmitry Kozlyuk if (extra_args) { 1174436c089aSDmitry Kozlyuk const int phy_mac = atoi(value); 1175436c089aSDmitry Kozlyuk int *enable_phy_mac = extra_args; 1176436c089aSDmitry Kozlyuk 1177436c089aSDmitry Kozlyuk if (phy_mac) 1178436c089aSDmitry Kozlyuk *enable_phy_mac = 1; 1179436c089aSDmitry Kozlyuk } 1180436c089aSDmitry Kozlyuk return 0; 1181436c089aSDmitry Kozlyuk } 1182436c089aSDmitry Kozlyuk 1183436c089aSDmitry Kozlyuk static int 1184436c089aSDmitry Kozlyuk get_infinite_rx_arg(const char *key __rte_unused, 1185436c089aSDmitry Kozlyuk const char *value, void *extra_args) 1186436c089aSDmitry Kozlyuk { 1187436c089aSDmitry Kozlyuk if (extra_args) { 1188436c089aSDmitry Kozlyuk const int infinite_rx = atoi(value); 1189436c089aSDmitry Kozlyuk int *enable_infinite_rx = extra_args; 1190436c089aSDmitry Kozlyuk 1191436c089aSDmitry Kozlyuk if (infinite_rx > 0) 1192436c089aSDmitry Kozlyuk *enable_infinite_rx = 1; 1193436c089aSDmitry Kozlyuk } 1194436c089aSDmitry Kozlyuk return 0; 1195436c089aSDmitry Kozlyuk } 1196436c089aSDmitry Kozlyuk 1197436c089aSDmitry Kozlyuk static int 1198436c089aSDmitry Kozlyuk pmd_init_internals(struct rte_vdev_device *vdev, 1199436c089aSDmitry Kozlyuk const unsigned int nb_rx_queues, 1200436c089aSDmitry Kozlyuk const unsigned int nb_tx_queues, 1201436c089aSDmitry Kozlyuk struct pmd_internals **internals, 1202436c089aSDmitry Kozlyuk struct rte_eth_dev **eth_dev) 1203436c089aSDmitry Kozlyuk { 1204436c089aSDmitry Kozlyuk struct rte_eth_dev_data *data; 1205436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1206436c089aSDmitry Kozlyuk unsigned int numa_node = vdev->device.numa_node; 1207436c089aSDmitry Kozlyuk 1208436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", 1209436c089aSDmitry Kozlyuk numa_node); 1210436c089aSDmitry Kozlyuk 1211436c089aSDmitry Kozlyuk pp = (struct pmd_process_private *) 1212436c089aSDmitry Kozlyuk rte_zmalloc(NULL, sizeof(struct pmd_process_private), 1213436c089aSDmitry Kozlyuk RTE_CACHE_LINE_SIZE); 1214436c089aSDmitry Kozlyuk 1215436c089aSDmitry Kozlyuk if (pp == NULL) { 1216436c089aSDmitry Kozlyuk PMD_LOG(ERR, 1217436c089aSDmitry Kozlyuk "Failed to allocate memory for process private"); 1218436c089aSDmitry Kozlyuk return -1; 1219436c089aSDmitry Kozlyuk } 1220436c089aSDmitry Kozlyuk 1221436c089aSDmitry Kozlyuk /* reserve an ethdev entry */ 1222436c089aSDmitry Kozlyuk *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); 1223436c089aSDmitry Kozlyuk if (!(*eth_dev)) { 1224436c089aSDmitry Kozlyuk rte_free(pp); 1225436c089aSDmitry Kozlyuk return -1; 1226436c089aSDmitry Kozlyuk } 1227436c089aSDmitry Kozlyuk (*eth_dev)->process_private = pp; 1228436c089aSDmitry Kozlyuk /* now put it all together 1229436c089aSDmitry Kozlyuk * - store queue data in internals, 1230436c089aSDmitry Kozlyuk * - store numa_node info in eth_dev 1231436c089aSDmitry Kozlyuk * - point eth_dev_data to internals 1232436c089aSDmitry Kozlyuk * - and point eth_dev structure to new eth_dev_data structure 1233436c089aSDmitry Kozlyuk */ 1234436c089aSDmitry Kozlyuk *internals = (*eth_dev)->data->dev_private; 1235436c089aSDmitry Kozlyuk /* 1236436c089aSDmitry Kozlyuk * Interface MAC = 02:70:63:61:70:<iface_idx> 1237436c089aSDmitry Kozlyuk * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx' 1238436c089aSDmitry Kozlyuk * where the middle 4 characters are converted to hex. 1239436c089aSDmitry Kozlyuk */ 1240436c089aSDmitry Kozlyuk (*internals)->eth_addr = (struct rte_ether_addr) { 1241436c089aSDmitry Kozlyuk .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ } 1242436c089aSDmitry Kozlyuk }; 1243436c089aSDmitry Kozlyuk (*internals)->phy_mac = 0; 1244436c089aSDmitry Kozlyuk data = (*eth_dev)->data; 1245436c089aSDmitry Kozlyuk data->nb_rx_queues = (uint16_t)nb_rx_queues; 1246436c089aSDmitry Kozlyuk data->nb_tx_queues = (uint16_t)nb_tx_queues; 1247436c089aSDmitry Kozlyuk data->dev_link = pmd_link; 1248436c089aSDmitry Kozlyuk data->mac_addrs = &(*internals)->eth_addr; 1249436c089aSDmitry Kozlyuk data->promiscuous = 1; 1250436c089aSDmitry Kozlyuk data->all_multicast = 1; 1251436c089aSDmitry Kozlyuk data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1252436c089aSDmitry Kozlyuk 1253436c089aSDmitry Kozlyuk /* 1254436c089aSDmitry Kozlyuk * NOTE: we'll replace the data element, of originally allocated 1255436c089aSDmitry Kozlyuk * eth_dev so the rings are local per-process 1256436c089aSDmitry Kozlyuk */ 1257436c089aSDmitry Kozlyuk (*eth_dev)->dev_ops = &ops; 1258436c089aSDmitry Kozlyuk 1259436c089aSDmitry Kozlyuk strlcpy((*internals)->devargs, rte_vdev_device_args(vdev), 1260436c089aSDmitry Kozlyuk ETH_PCAP_ARG_MAXLEN); 1261436c089aSDmitry Kozlyuk 1262436c089aSDmitry Kozlyuk return 0; 1263436c089aSDmitry Kozlyuk } 1264436c089aSDmitry Kozlyuk 1265436c089aSDmitry Kozlyuk static int 1266436c089aSDmitry Kozlyuk eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev, 1267436c089aSDmitry Kozlyuk const unsigned int numa_node) 1268436c089aSDmitry Kozlyuk { 1269436c089aSDmitry Kozlyuk void *mac_addrs; 1270436c089aSDmitry Kozlyuk struct rte_ether_addr mac; 1271436c089aSDmitry Kozlyuk 1272436c089aSDmitry Kozlyuk if (osdep_iface_mac_get(if_name, &mac) < 0) 1273436c089aSDmitry Kozlyuk return -1; 1274436c089aSDmitry Kozlyuk 1275436c089aSDmitry Kozlyuk mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node); 1276436c089aSDmitry Kozlyuk if (mac_addrs == NULL) 1277436c089aSDmitry Kozlyuk return -1; 1278436c089aSDmitry Kozlyuk 1279436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Setting phy MAC for %s", if_name); 1280436c089aSDmitry Kozlyuk rte_memcpy(mac_addrs, mac.addr_bytes, RTE_ETHER_ADDR_LEN); 1281436c089aSDmitry Kozlyuk eth_dev->data->mac_addrs = mac_addrs; 1282436c089aSDmitry Kozlyuk return 0; 1283436c089aSDmitry Kozlyuk } 1284436c089aSDmitry Kozlyuk 1285436c089aSDmitry Kozlyuk static int 1286436c089aSDmitry Kozlyuk eth_from_pcaps_common(struct rte_vdev_device *vdev, 1287436c089aSDmitry Kozlyuk struct pmd_devargs_all *devargs_all, 1288436c089aSDmitry Kozlyuk struct pmd_internals **internals, struct rte_eth_dev **eth_dev) 1289436c089aSDmitry Kozlyuk { 1290436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1291436c089aSDmitry Kozlyuk struct pmd_devargs *rx_queues = &devargs_all->rx_queues; 1292436c089aSDmitry Kozlyuk struct pmd_devargs *tx_queues = &devargs_all->tx_queues; 1293436c089aSDmitry Kozlyuk const unsigned int nb_rx_queues = rx_queues->num_of_queue; 1294436c089aSDmitry Kozlyuk const unsigned int nb_tx_queues = tx_queues->num_of_queue; 1295436c089aSDmitry Kozlyuk unsigned int i; 1296436c089aSDmitry Kozlyuk 1297436c089aSDmitry Kozlyuk if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals, 1298436c089aSDmitry Kozlyuk eth_dev) < 0) 1299436c089aSDmitry Kozlyuk return -1; 1300436c089aSDmitry Kozlyuk 1301436c089aSDmitry Kozlyuk pp = (*eth_dev)->process_private; 1302436c089aSDmitry Kozlyuk for (i = 0; i < nb_rx_queues; i++) { 1303436c089aSDmitry Kozlyuk struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; 1304436c089aSDmitry Kozlyuk struct devargs_queue *queue = &rx_queues->queue[i]; 1305436c089aSDmitry Kozlyuk 1306436c089aSDmitry Kozlyuk pp->rx_pcap[i] = queue->pcap; 1307436c089aSDmitry Kozlyuk strlcpy(rx->name, queue->name, sizeof(rx->name)); 1308436c089aSDmitry Kozlyuk strlcpy(rx->type, queue->type, sizeof(rx->type)); 1309436c089aSDmitry Kozlyuk } 1310436c089aSDmitry Kozlyuk 1311436c089aSDmitry Kozlyuk for (i = 0; i < nb_tx_queues; i++) { 1312436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; 1313436c089aSDmitry Kozlyuk struct devargs_queue *queue = &tx_queues->queue[i]; 1314436c089aSDmitry Kozlyuk 1315436c089aSDmitry Kozlyuk pp->tx_dumper[i] = queue->dumper; 1316436c089aSDmitry Kozlyuk pp->tx_pcap[i] = queue->pcap; 1317436c089aSDmitry Kozlyuk strlcpy(tx->name, queue->name, sizeof(tx->name)); 1318436c089aSDmitry Kozlyuk strlcpy(tx->type, queue->type, sizeof(tx->type)); 1319436c089aSDmitry Kozlyuk } 1320436c089aSDmitry Kozlyuk 1321436c089aSDmitry Kozlyuk return 0; 1322436c089aSDmitry Kozlyuk } 1323436c089aSDmitry Kozlyuk 1324436c089aSDmitry Kozlyuk static int 1325436c089aSDmitry Kozlyuk eth_from_pcaps(struct rte_vdev_device *vdev, 1326436c089aSDmitry Kozlyuk struct pmd_devargs_all *devargs_all) 1327436c089aSDmitry Kozlyuk { 1328436c089aSDmitry Kozlyuk struct pmd_internals *internals = NULL; 1329436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1330436c089aSDmitry Kozlyuk struct pmd_devargs *rx_queues = &devargs_all->rx_queues; 1331436c089aSDmitry Kozlyuk int single_iface = devargs_all->single_iface; 1332436c089aSDmitry Kozlyuk unsigned int infinite_rx = devargs_all->infinite_rx; 1333436c089aSDmitry Kozlyuk int ret; 1334436c089aSDmitry Kozlyuk 1335436c089aSDmitry Kozlyuk ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev); 1336436c089aSDmitry Kozlyuk 1337436c089aSDmitry Kozlyuk if (ret < 0) 1338436c089aSDmitry Kozlyuk return ret; 1339436c089aSDmitry Kozlyuk 1340436c089aSDmitry Kozlyuk /* store weather we are using a single interface for rx/tx or not */ 1341436c089aSDmitry Kozlyuk internals->single_iface = single_iface; 1342436c089aSDmitry Kozlyuk 1343436c089aSDmitry Kozlyuk if (single_iface) { 1344436c089aSDmitry Kozlyuk internals->if_index = 1345436c089aSDmitry Kozlyuk osdep_iface_index_get(rx_queues->queue[0].name); 1346436c089aSDmitry Kozlyuk 134723f3dac4SStephen Hemminger /* phy_mac arg is applied only if "iface" devarg is provided */ 1348436c089aSDmitry Kozlyuk if (rx_queues->phy_mac) { 1349436c089aSDmitry Kozlyuk if (eth_pcap_update_mac(rx_queues->queue[0].name, 1350436c089aSDmitry Kozlyuk eth_dev, vdev->device.numa_node) == 0) 1351436c089aSDmitry Kozlyuk internals->phy_mac = 1; 1352436c089aSDmitry Kozlyuk } 1353436c089aSDmitry Kozlyuk } 1354436c089aSDmitry Kozlyuk 1355436c089aSDmitry Kozlyuk internals->infinite_rx = infinite_rx; 1356436c089aSDmitry Kozlyuk /* Assign rx ops. */ 1357436c089aSDmitry Kozlyuk if (infinite_rx) 1358436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx_infinite; 1359436c089aSDmitry Kozlyuk else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface || 1360436c089aSDmitry Kozlyuk single_iface) 1361436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx; 1362436c089aSDmitry Kozlyuk else 1363436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_null_rx; 1364436c089aSDmitry Kozlyuk 1365436c089aSDmitry Kozlyuk /* Assign tx ops. */ 1366436c089aSDmitry Kozlyuk if (devargs_all->is_tx_pcap) 1367436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; 1368436c089aSDmitry Kozlyuk else if (devargs_all->is_tx_iface || single_iface) 1369436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx; 1370436c089aSDmitry Kozlyuk else 1371436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_tx_drop; 1372436c089aSDmitry Kozlyuk 1373436c089aSDmitry Kozlyuk rte_eth_dev_probing_finish(eth_dev); 1374436c089aSDmitry Kozlyuk return 0; 1375436c089aSDmitry Kozlyuk } 1376436c089aSDmitry Kozlyuk 1377da0280b3SQiming Chen static void 1378da0280b3SQiming Chen eth_release_pcaps(struct pmd_devargs *pcaps, 1379da0280b3SQiming Chen struct pmd_devargs *dumpers, 1380da0280b3SQiming Chen int single_iface) 1381da0280b3SQiming Chen { 1382da0280b3SQiming Chen unsigned int i; 1383da0280b3SQiming Chen 1384da0280b3SQiming Chen if (single_iface) { 1385da0280b3SQiming Chen if (pcaps->queue[0].pcap) 1386da0280b3SQiming Chen pcap_close(pcaps->queue[0].pcap); 1387da0280b3SQiming Chen return; 1388da0280b3SQiming Chen } 1389da0280b3SQiming Chen 1390da0280b3SQiming Chen for (i = 0; i < dumpers->num_of_queue; i++) { 1391da0280b3SQiming Chen if (dumpers->queue[i].dumper) 1392da0280b3SQiming Chen pcap_dump_close(dumpers->queue[i].dumper); 1393da0280b3SQiming Chen 1394da0280b3SQiming Chen if (dumpers->queue[i].pcap) 1395da0280b3SQiming Chen pcap_close(dumpers->queue[i].pcap); 1396da0280b3SQiming Chen } 1397da0280b3SQiming Chen 1398da0280b3SQiming Chen for (i = 0; i < pcaps->num_of_queue; i++) { 1399da0280b3SQiming Chen if (pcaps->queue[i].pcap) 1400da0280b3SQiming Chen pcap_close(pcaps->queue[i].pcap); 1401da0280b3SQiming Chen } 1402da0280b3SQiming Chen } 1403da0280b3SQiming Chen 1404436c089aSDmitry Kozlyuk static int 1405436c089aSDmitry Kozlyuk pmd_pcap_probe(struct rte_vdev_device *dev) 1406436c089aSDmitry Kozlyuk { 1407436c089aSDmitry Kozlyuk const char *name; 1408436c089aSDmitry Kozlyuk struct rte_kvargs *kvlist; 1409436c089aSDmitry Kozlyuk struct pmd_devargs pcaps = {0}; 1410436c089aSDmitry Kozlyuk struct pmd_devargs dumpers = {0}; 1411436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1412436c089aSDmitry Kozlyuk struct pmd_internals *internal; 1413436c089aSDmitry Kozlyuk int ret = 0; 1414436c089aSDmitry Kozlyuk 1415436c089aSDmitry Kozlyuk struct pmd_devargs_all devargs_all = { 1416436c089aSDmitry Kozlyuk .single_iface = 0, 1417436c089aSDmitry Kozlyuk .is_tx_pcap = 0, 1418436c089aSDmitry Kozlyuk .is_tx_iface = 0, 1419436c089aSDmitry Kozlyuk .infinite_rx = 0, 1420436c089aSDmitry Kozlyuk }; 1421436c089aSDmitry Kozlyuk 1422436c089aSDmitry Kozlyuk name = rte_vdev_device_name(dev); 1423436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); 1424436c089aSDmitry Kozlyuk 1425436c089aSDmitry Kozlyuk timespec_get(&start_time, TIME_UTC); 1426436c089aSDmitry Kozlyuk start_cycles = rte_get_timer_cycles(); 1427436c089aSDmitry Kozlyuk hz = rte_get_timer_hz(); 1428436c089aSDmitry Kozlyuk 1429436c089aSDmitry Kozlyuk ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset, 1430436c089aSDmitry Kozlyuk ×tamp_rx_dynflag); 1431436c089aSDmitry Kozlyuk if (ret != 0) { 1432436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Failed to register Rx timestamp field/flag"); 1433436c089aSDmitry Kozlyuk return -1; 1434436c089aSDmitry Kozlyuk } 1435436c089aSDmitry Kozlyuk 1436436c089aSDmitry Kozlyuk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1437436c089aSDmitry Kozlyuk eth_dev = rte_eth_dev_attach_secondary(name); 1438436c089aSDmitry Kozlyuk if (!eth_dev) { 1439436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Failed to probe %s", name); 1440436c089aSDmitry Kozlyuk return -1; 1441436c089aSDmitry Kozlyuk } 1442436c089aSDmitry Kozlyuk 1443436c089aSDmitry Kozlyuk internal = eth_dev->data->dev_private; 1444436c089aSDmitry Kozlyuk 1445436c089aSDmitry Kozlyuk kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); 1446436c089aSDmitry Kozlyuk if (kvlist == NULL) 1447436c089aSDmitry Kozlyuk return -1; 1448436c089aSDmitry Kozlyuk } else { 1449436c089aSDmitry Kozlyuk kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), 1450436c089aSDmitry Kozlyuk valid_arguments); 1451436c089aSDmitry Kozlyuk if (kvlist == NULL) 1452436c089aSDmitry Kozlyuk return -1; 1453436c089aSDmitry Kozlyuk } 1454436c089aSDmitry Kozlyuk 1455436c089aSDmitry Kozlyuk /* 1456436c089aSDmitry Kozlyuk * If iface argument is passed we open the NICs and use them for 1457436c089aSDmitry Kozlyuk * reading / writing 1458436c089aSDmitry Kozlyuk */ 1459436c089aSDmitry Kozlyuk if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) { 1460436c089aSDmitry Kozlyuk 1461436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG, 1462436c089aSDmitry Kozlyuk &open_rx_tx_iface, &pcaps); 1463436c089aSDmitry Kozlyuk if (ret < 0) 1464436c089aSDmitry Kozlyuk goto free_kvlist; 1465436c089aSDmitry Kozlyuk 1466436c089aSDmitry Kozlyuk dumpers.queue[0] = pcaps.queue[0]; 1467436c089aSDmitry Kozlyuk 1468436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG, 1469436c089aSDmitry Kozlyuk &select_phy_mac, &pcaps.phy_mac); 1470436c089aSDmitry Kozlyuk if (ret < 0) 1471436c089aSDmitry Kozlyuk goto free_kvlist; 1472436c089aSDmitry Kozlyuk 1473436c089aSDmitry Kozlyuk dumpers.phy_mac = pcaps.phy_mac; 1474436c089aSDmitry Kozlyuk 1475436c089aSDmitry Kozlyuk devargs_all.single_iface = 1; 1476436c089aSDmitry Kozlyuk pcaps.num_of_queue = 1; 1477436c089aSDmitry Kozlyuk dumpers.num_of_queue = 1; 1478436c089aSDmitry Kozlyuk 1479436c089aSDmitry Kozlyuk goto create_eth; 1480436c089aSDmitry Kozlyuk } 1481436c089aSDmitry Kozlyuk 1482436c089aSDmitry Kozlyuk /* 1483436c089aSDmitry Kozlyuk * We check whether we want to open a RX stream from a real NIC, a 1484436c089aSDmitry Kozlyuk * pcap file or open a dummy RX stream 1485436c089aSDmitry Kozlyuk */ 1486436c089aSDmitry Kozlyuk devargs_all.is_rx_pcap = 1487436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; 1488436c089aSDmitry Kozlyuk devargs_all.is_rx_iface = 1489436c089aSDmitry Kozlyuk (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) + 1490436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0; 1491436c089aSDmitry Kozlyuk pcaps.num_of_queue = 0; 1492436c089aSDmitry Kozlyuk 1493436c089aSDmitry Kozlyuk devargs_all.is_tx_pcap = 1494436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; 1495436c089aSDmitry Kozlyuk devargs_all.is_tx_iface = 1496436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0; 1497436c089aSDmitry Kozlyuk dumpers.num_of_queue = 0; 1498436c089aSDmitry Kozlyuk 1499436c089aSDmitry Kozlyuk if (devargs_all.is_rx_pcap) { 1500436c089aSDmitry Kozlyuk /* 1501436c089aSDmitry Kozlyuk * We check whether we want to infinitely rx the pcap file. 1502436c089aSDmitry Kozlyuk */ 1503436c089aSDmitry Kozlyuk unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist, 1504436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG); 1505436c089aSDmitry Kozlyuk 1506436c089aSDmitry Kozlyuk if (infinite_rx_arg_cnt == 1) { 1507436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, 1508436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG, 1509436c089aSDmitry Kozlyuk &get_infinite_rx_arg, 1510436c089aSDmitry Kozlyuk &devargs_all.infinite_rx); 1511436c089aSDmitry Kozlyuk if (ret < 0) 1512436c089aSDmitry Kozlyuk goto free_kvlist; 1513436c089aSDmitry Kozlyuk PMD_LOG(INFO, "infinite_rx has been %s for %s", 1514436c089aSDmitry Kozlyuk devargs_all.infinite_rx ? "enabled" : "disabled", 1515436c089aSDmitry Kozlyuk name); 1516436c089aSDmitry Kozlyuk 1517436c089aSDmitry Kozlyuk } else if (infinite_rx_arg_cnt > 1) { 1518436c089aSDmitry Kozlyuk PMD_LOG(WARNING, "infinite_rx has not been enabled since the " 1519436c089aSDmitry Kozlyuk "argument has been provided more than once " 1520436c089aSDmitry Kozlyuk "for %s", name); 1521436c089aSDmitry Kozlyuk } 1522436c089aSDmitry Kozlyuk 1523436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, 1524436c089aSDmitry Kozlyuk &open_rx_pcap, &pcaps); 1525436c089aSDmitry Kozlyuk } else if (devargs_all.is_rx_iface) { 1526436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, NULL, 1527436c089aSDmitry Kozlyuk &rx_iface_args_process, &pcaps); 1528436c089aSDmitry Kozlyuk } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) { 1529436c089aSDmitry Kozlyuk unsigned int i; 1530436c089aSDmitry Kozlyuk 1531436c089aSDmitry Kozlyuk /* Count number of tx queue args passed before dummy rx queue 1532436c089aSDmitry Kozlyuk * creation so a dummy rx queue can be created for each tx queue 1533436c089aSDmitry Kozlyuk */ 1534436c089aSDmitry Kozlyuk unsigned int num_tx_queues = 1535436c089aSDmitry Kozlyuk (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) + 1536436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG)); 1537436c089aSDmitry Kozlyuk 1538436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided."); 1539436c089aSDmitry Kozlyuk 1540436c089aSDmitry Kozlyuk /* Creating a dummy rx queue for each tx queue passed */ 1541436c089aSDmitry Kozlyuk for (i = 0; i < num_tx_queues; i++) 1542436c089aSDmitry Kozlyuk ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL, 1543436c089aSDmitry Kozlyuk NULL); 1544436c089aSDmitry Kozlyuk } else { 1545436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Error - No rx or tx queues provided"); 1546436c089aSDmitry Kozlyuk ret = -ENOENT; 1547436c089aSDmitry Kozlyuk } 1548436c089aSDmitry Kozlyuk if (ret < 0) 1549436c089aSDmitry Kozlyuk goto free_kvlist; 1550436c089aSDmitry Kozlyuk 1551436c089aSDmitry Kozlyuk /* 1552436c089aSDmitry Kozlyuk * We check whether we want to open a TX stream to a real NIC, 1553436c089aSDmitry Kozlyuk * a pcap file, or drop packets on tx 1554436c089aSDmitry Kozlyuk */ 1555436c089aSDmitry Kozlyuk if (devargs_all.is_tx_pcap) { 1556436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, 1557436c089aSDmitry Kozlyuk &open_tx_pcap, &dumpers); 1558436c089aSDmitry Kozlyuk } else if (devargs_all.is_tx_iface) { 1559436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, 1560436c089aSDmitry Kozlyuk &open_tx_iface, &dumpers); 1561436c089aSDmitry Kozlyuk } else { 1562436c089aSDmitry Kozlyuk unsigned int i; 1563436c089aSDmitry Kozlyuk 1564436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided."); 1565436c089aSDmitry Kozlyuk 1566436c089aSDmitry Kozlyuk /* Add 1 dummy queue per rxq which counts and drops packets. */ 1567436c089aSDmitry Kozlyuk for (i = 0; i < pcaps.num_of_queue; i++) 1568436c089aSDmitry Kozlyuk ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL, 1569436c089aSDmitry Kozlyuk NULL); 1570436c089aSDmitry Kozlyuk } 1571436c089aSDmitry Kozlyuk 1572436c089aSDmitry Kozlyuk if (ret < 0) 1573436c089aSDmitry Kozlyuk goto free_kvlist; 1574436c089aSDmitry Kozlyuk 1575436c089aSDmitry Kozlyuk create_eth: 1576436c089aSDmitry Kozlyuk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1577436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1578436c089aSDmitry Kozlyuk unsigned int i; 1579436c089aSDmitry Kozlyuk 1580436c089aSDmitry Kozlyuk internal = eth_dev->data->dev_private; 1581436c089aSDmitry Kozlyuk pp = (struct pmd_process_private *) 1582436c089aSDmitry Kozlyuk rte_zmalloc(NULL, 1583436c089aSDmitry Kozlyuk sizeof(struct pmd_process_private), 1584436c089aSDmitry Kozlyuk RTE_CACHE_LINE_SIZE); 1585436c089aSDmitry Kozlyuk 1586436c089aSDmitry Kozlyuk if (pp == NULL) { 1587436c089aSDmitry Kozlyuk PMD_LOG(ERR, 1588436c089aSDmitry Kozlyuk "Failed to allocate memory for process private"); 1589436c089aSDmitry Kozlyuk ret = -1; 1590436c089aSDmitry Kozlyuk goto free_kvlist; 1591436c089aSDmitry Kozlyuk } 1592436c089aSDmitry Kozlyuk 1593436c089aSDmitry Kozlyuk eth_dev->dev_ops = &ops; 1594436c089aSDmitry Kozlyuk eth_dev->device = &dev->device; 1595436c089aSDmitry Kozlyuk 1596436c089aSDmitry Kozlyuk /* setup process private */ 1597436c089aSDmitry Kozlyuk for (i = 0; i < pcaps.num_of_queue; i++) 1598436c089aSDmitry Kozlyuk pp->rx_pcap[i] = pcaps.queue[i].pcap; 1599436c089aSDmitry Kozlyuk 1600436c089aSDmitry Kozlyuk for (i = 0; i < dumpers.num_of_queue; i++) { 1601436c089aSDmitry Kozlyuk pp->tx_dumper[i] = dumpers.queue[i].dumper; 1602436c089aSDmitry Kozlyuk pp->tx_pcap[i] = dumpers.queue[i].pcap; 1603436c089aSDmitry Kozlyuk } 1604436c089aSDmitry Kozlyuk 1605436c089aSDmitry Kozlyuk eth_dev->process_private = pp; 1606436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx; 1607436c089aSDmitry Kozlyuk if (devargs_all.is_tx_pcap) 1608436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; 1609436c089aSDmitry Kozlyuk else 1610436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx; 1611436c089aSDmitry Kozlyuk 1612436c089aSDmitry Kozlyuk rte_eth_dev_probing_finish(eth_dev); 1613436c089aSDmitry Kozlyuk goto free_kvlist; 1614436c089aSDmitry Kozlyuk } 1615436c089aSDmitry Kozlyuk 1616436c089aSDmitry Kozlyuk devargs_all.rx_queues = pcaps; 1617436c089aSDmitry Kozlyuk devargs_all.tx_queues = dumpers; 1618436c089aSDmitry Kozlyuk 1619436c089aSDmitry Kozlyuk ret = eth_from_pcaps(dev, &devargs_all); 1620436c089aSDmitry Kozlyuk 1621436c089aSDmitry Kozlyuk free_kvlist: 1622436c089aSDmitry Kozlyuk rte_kvargs_free(kvlist); 1623436c089aSDmitry Kozlyuk 1624da0280b3SQiming Chen if (ret < 0) 1625da0280b3SQiming Chen eth_release_pcaps(&pcaps, &dumpers, devargs_all.single_iface); 1626da0280b3SQiming Chen 1627436c089aSDmitry Kozlyuk return ret; 1628436c089aSDmitry Kozlyuk } 1629436c089aSDmitry Kozlyuk 1630436c089aSDmitry Kozlyuk static int 1631436c089aSDmitry Kozlyuk pmd_pcap_remove(struct rte_vdev_device *dev) 1632436c089aSDmitry Kozlyuk { 1633436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1634436c089aSDmitry Kozlyuk 1635436c089aSDmitry Kozlyuk if (!dev) 1636436c089aSDmitry Kozlyuk return -1; 1637436c089aSDmitry Kozlyuk 1638436c089aSDmitry Kozlyuk eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); 1639436c089aSDmitry Kozlyuk if (eth_dev == NULL) 1640436c089aSDmitry Kozlyuk return 0; /* port already released */ 1641436c089aSDmitry Kozlyuk 1642436c089aSDmitry Kozlyuk eth_dev_close(eth_dev); 1643436c089aSDmitry Kozlyuk rte_eth_dev_release_port(eth_dev); 1644436c089aSDmitry Kozlyuk 1645436c089aSDmitry Kozlyuk return 0; 1646436c089aSDmitry Kozlyuk } 1647436c089aSDmitry Kozlyuk 1648436c089aSDmitry Kozlyuk static struct rte_vdev_driver pmd_pcap_drv = { 1649436c089aSDmitry Kozlyuk .probe = pmd_pcap_probe, 1650436c089aSDmitry Kozlyuk .remove = pmd_pcap_remove, 1651436c089aSDmitry Kozlyuk }; 1652436c089aSDmitry Kozlyuk 1653436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv); 1654436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap); 1655436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_PARAM_STRING(net_pcap, 1656436c089aSDmitry Kozlyuk ETH_PCAP_RX_PCAP_ARG "=<string> " 1657436c089aSDmitry Kozlyuk ETH_PCAP_TX_PCAP_ARG "=<string> " 1658436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_ARG "=<ifc> " 1659436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> " 1660436c089aSDmitry Kozlyuk ETH_PCAP_TX_IFACE_ARG "=<ifc> " 1661436c089aSDmitry Kozlyuk ETH_PCAP_IFACE_ARG "=<ifc> " 1662436c089aSDmitry Kozlyuk ETH_PCAP_PHY_MAC_ARG "=<int>" 1663436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG "=<0|1>"); 1664