1436c089aSDmitry Kozlyuk /* SPDX-License-Identifier: BSD-3-Clause 2436c089aSDmitry Kozlyuk * Copyright(c) 2010-2016 Intel Corporation. 3436c089aSDmitry Kozlyuk * Copyright(c) 2014 6WIND S.A. 4436c089aSDmitry Kozlyuk * All rights reserved. 5436c089aSDmitry Kozlyuk */ 6436c089aSDmitry Kozlyuk 772b452c5SDmitry Kozlyuk #include <stdlib.h> 8436c089aSDmitry Kozlyuk #include <time.h> 9436c089aSDmitry Kozlyuk 10436c089aSDmitry Kozlyuk #include <pcap.h> 11436c089aSDmitry Kozlyuk 12436c089aSDmitry Kozlyuk #include <rte_cycles.h> 13436c089aSDmitry Kozlyuk #include <ethdev_driver.h> 14436c089aSDmitry Kozlyuk #include <ethdev_vdev.h> 15436c089aSDmitry Kozlyuk #include <rte_kvargs.h> 16436c089aSDmitry Kozlyuk #include <rte_malloc.h> 17436c089aSDmitry Kozlyuk #include <rte_mbuf.h> 18436c089aSDmitry Kozlyuk #include <rte_mbuf_dyn.h> 194851ef2bSDavid Marchand #include <bus_vdev_driver.h> 20b5674be4SDmitry Kozlyuk #include <rte_os_shim.h> 21436c089aSDmitry Kozlyuk 22436c089aSDmitry Kozlyuk #include "pcap_osdep.h" 23436c089aSDmitry Kozlyuk 24436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535 25436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN 26436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_PROMISC 1 27436c089aSDmitry Kozlyuk #define RTE_ETH_PCAP_TIMEOUT -1 28436c089aSDmitry Kozlyuk 29436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_PCAP_ARG "rx_pcap" 30436c089aSDmitry Kozlyuk #define ETH_PCAP_TX_PCAP_ARG "tx_pcap" 31436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_IFACE_ARG "rx_iface" 32436c089aSDmitry Kozlyuk #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in" 33436c089aSDmitry Kozlyuk #define ETH_PCAP_TX_IFACE_ARG "tx_iface" 34436c089aSDmitry Kozlyuk #define ETH_PCAP_IFACE_ARG "iface" 35436c089aSDmitry Kozlyuk #define ETH_PCAP_PHY_MAC_ARG "phy_mac" 36436c089aSDmitry Kozlyuk #define ETH_PCAP_INFINITE_RX_ARG "infinite_rx" 37436c089aSDmitry Kozlyuk 38436c089aSDmitry Kozlyuk #define ETH_PCAP_ARG_MAXLEN 64 39436c089aSDmitry Kozlyuk 40436c089aSDmitry Kozlyuk #define RTE_PMD_PCAP_MAX_QUEUES 16 41436c089aSDmitry Kozlyuk 42436c089aSDmitry Kozlyuk static char errbuf[PCAP_ERRBUF_SIZE]; 43436c089aSDmitry Kozlyuk static struct timespec start_time; 44436c089aSDmitry Kozlyuk static uint64_t start_cycles; 45436c089aSDmitry Kozlyuk static uint64_t hz; 46436c089aSDmitry Kozlyuk static uint8_t iface_idx; 47436c089aSDmitry Kozlyuk 48436c089aSDmitry Kozlyuk static uint64_t timestamp_rx_dynflag; 49436c089aSDmitry Kozlyuk static int timestamp_dynfield_offset = -1; 50436c089aSDmitry Kozlyuk 51436c089aSDmitry Kozlyuk struct queue_stat { 52436c089aSDmitry Kozlyuk volatile unsigned long pkts; 53436c089aSDmitry Kozlyuk volatile unsigned long bytes; 54436c089aSDmitry Kozlyuk volatile unsigned long err_pkts; 55d70157c1SQiming Chen volatile unsigned long rx_nombuf; 56436c089aSDmitry Kozlyuk }; 57436c089aSDmitry Kozlyuk 58436c089aSDmitry Kozlyuk struct queue_missed_stat { 59436c089aSDmitry Kozlyuk /* last value retrieved from pcap */ 60436c089aSDmitry Kozlyuk unsigned int pcap; 61436c089aSDmitry Kozlyuk /* stores values lost by pcap stop or rollover */ 62436c089aSDmitry Kozlyuk unsigned long mnemonic; 63436c089aSDmitry Kozlyuk /* value on last reset */ 64436c089aSDmitry Kozlyuk unsigned long reset; 65436c089aSDmitry Kozlyuk }; 66436c089aSDmitry Kozlyuk 67436c089aSDmitry Kozlyuk struct pcap_rx_queue { 68436c089aSDmitry Kozlyuk uint16_t port_id; 69436c089aSDmitry Kozlyuk uint16_t queue_id; 70436c089aSDmitry Kozlyuk struct rte_mempool *mb_pool; 71436c089aSDmitry Kozlyuk struct queue_stat rx_stat; 72436c089aSDmitry Kozlyuk struct queue_missed_stat missed_stat; 73436c089aSDmitry Kozlyuk char name[PATH_MAX]; 74436c089aSDmitry Kozlyuk char type[ETH_PCAP_ARG_MAXLEN]; 75436c089aSDmitry Kozlyuk 76436c089aSDmitry Kozlyuk /* Contains pre-generated packets to be looped through */ 77436c089aSDmitry Kozlyuk struct rte_ring *pkts; 78436c089aSDmitry Kozlyuk }; 79436c089aSDmitry Kozlyuk 80436c089aSDmitry Kozlyuk struct pcap_tx_queue { 81436c089aSDmitry Kozlyuk uint16_t port_id; 82436c089aSDmitry Kozlyuk uint16_t queue_id; 83436c089aSDmitry Kozlyuk struct queue_stat tx_stat; 84436c089aSDmitry Kozlyuk char name[PATH_MAX]; 85436c089aSDmitry Kozlyuk char type[ETH_PCAP_ARG_MAXLEN]; 86436c089aSDmitry Kozlyuk }; 87436c089aSDmitry Kozlyuk 88436c089aSDmitry Kozlyuk struct pmd_internals { 89436c089aSDmitry Kozlyuk struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES]; 90436c089aSDmitry Kozlyuk struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES]; 91436c089aSDmitry Kozlyuk char devargs[ETH_PCAP_ARG_MAXLEN]; 92436c089aSDmitry Kozlyuk struct rte_ether_addr eth_addr; 93436c089aSDmitry Kozlyuk int if_index; 94436c089aSDmitry Kozlyuk int single_iface; 95436c089aSDmitry Kozlyuk int phy_mac; 96436c089aSDmitry Kozlyuk unsigned int infinite_rx; 97436c089aSDmitry Kozlyuk }; 98436c089aSDmitry Kozlyuk 99436c089aSDmitry Kozlyuk struct pmd_process_private { 100436c089aSDmitry Kozlyuk pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; 101436c089aSDmitry Kozlyuk pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; 102436c089aSDmitry Kozlyuk pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES]; 103436c089aSDmitry Kozlyuk }; 104436c089aSDmitry Kozlyuk 105436c089aSDmitry Kozlyuk struct pmd_devargs { 106436c089aSDmitry Kozlyuk unsigned int num_of_queue; 107436c089aSDmitry Kozlyuk struct devargs_queue { 108436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 109436c089aSDmitry Kozlyuk pcap_t *pcap; 110436c089aSDmitry Kozlyuk const char *name; 111436c089aSDmitry Kozlyuk const char *type; 112436c089aSDmitry Kozlyuk } queue[RTE_PMD_PCAP_MAX_QUEUES]; 113436c089aSDmitry Kozlyuk int phy_mac; 114436c089aSDmitry Kozlyuk }; 115436c089aSDmitry Kozlyuk 116436c089aSDmitry Kozlyuk struct pmd_devargs_all { 117436c089aSDmitry Kozlyuk struct pmd_devargs rx_queues; 118436c089aSDmitry Kozlyuk struct pmd_devargs tx_queues; 119436c089aSDmitry Kozlyuk int single_iface; 120436c089aSDmitry Kozlyuk unsigned int is_tx_pcap; 121436c089aSDmitry Kozlyuk unsigned int is_tx_iface; 122436c089aSDmitry Kozlyuk unsigned int is_rx_pcap; 123436c089aSDmitry Kozlyuk unsigned int is_rx_iface; 124436c089aSDmitry Kozlyuk unsigned int infinite_rx; 125436c089aSDmitry Kozlyuk }; 126436c089aSDmitry Kozlyuk 127436c089aSDmitry Kozlyuk static const char *valid_arguments[] = { 128436c089aSDmitry Kozlyuk ETH_PCAP_RX_PCAP_ARG, 129436c089aSDmitry Kozlyuk ETH_PCAP_TX_PCAP_ARG, 130436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_ARG, 131436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_IN_ARG, 132436c089aSDmitry Kozlyuk ETH_PCAP_TX_IFACE_ARG, 133436c089aSDmitry Kozlyuk ETH_PCAP_IFACE_ARG, 134436c089aSDmitry Kozlyuk ETH_PCAP_PHY_MAC_ARG, 135436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG, 136436c089aSDmitry Kozlyuk NULL 137436c089aSDmitry Kozlyuk }; 138436c089aSDmitry Kozlyuk 139436c089aSDmitry Kozlyuk static struct rte_eth_link pmd_link = { 140295968d1SFerruh Yigit .link_speed = RTE_ETH_SPEED_NUM_10G, 141295968d1SFerruh Yigit .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, 142295968d1SFerruh Yigit .link_status = RTE_ETH_LINK_DOWN, 143295968d1SFerruh Yigit .link_autoneg = RTE_ETH_LINK_FIXED, 144436c089aSDmitry Kozlyuk }; 145436c089aSDmitry Kozlyuk 146eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE); 147436c089aSDmitry Kozlyuk 148436c089aSDmitry Kozlyuk static struct queue_missed_stat* 149436c089aSDmitry Kozlyuk queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid) 150436c089aSDmitry Kozlyuk { 151436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 152436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 153436c089aSDmitry Kozlyuk &internals->rx_queue[qid].missed_stat; 154436c089aSDmitry Kozlyuk const struct pmd_process_private *pp = dev->process_private; 155436c089aSDmitry Kozlyuk pcap_t *pcap = pp->rx_pcap[qid]; 156436c089aSDmitry Kozlyuk struct pcap_stat stat; 157436c089aSDmitry Kozlyuk 158436c089aSDmitry Kozlyuk if (!pcap || (pcap_stats(pcap, &stat) != 0)) 159436c089aSDmitry Kozlyuk return missed_stat; 160436c089aSDmitry Kozlyuk 161436c089aSDmitry Kozlyuk /* rollover check - best effort fixup assuming single rollover */ 162436c089aSDmitry Kozlyuk if (stat.ps_drop < missed_stat->pcap) 163436c089aSDmitry Kozlyuk missed_stat->mnemonic += UINT_MAX; 164436c089aSDmitry Kozlyuk missed_stat->pcap = stat.ps_drop; 165436c089aSDmitry Kozlyuk 166436c089aSDmitry Kozlyuk return missed_stat; 167436c089aSDmitry Kozlyuk } 168436c089aSDmitry Kozlyuk 169436c089aSDmitry Kozlyuk static void 170436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid) 171436c089aSDmitry Kozlyuk { 172436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 173436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 174436c089aSDmitry Kozlyuk 175436c089aSDmitry Kozlyuk missed_stat->mnemonic += missed_stat->pcap; 176436c089aSDmitry Kozlyuk missed_stat->pcap = 0; 177436c089aSDmitry Kozlyuk } 178436c089aSDmitry Kozlyuk 179436c089aSDmitry Kozlyuk static void 180436c089aSDmitry Kozlyuk queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid) 181436c089aSDmitry Kozlyuk { 182436c089aSDmitry Kozlyuk struct queue_missed_stat *missed_stat = 183436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 184436c089aSDmitry Kozlyuk 185436c089aSDmitry Kozlyuk missed_stat->reset = missed_stat->pcap; 186436c089aSDmitry Kozlyuk missed_stat->mnemonic = 0; 187436c089aSDmitry Kozlyuk } 188436c089aSDmitry Kozlyuk 189436c089aSDmitry Kozlyuk static unsigned long 190436c089aSDmitry Kozlyuk queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid) 191436c089aSDmitry Kozlyuk { 192436c089aSDmitry Kozlyuk const struct queue_missed_stat *missed_stat = 193436c089aSDmitry Kozlyuk queue_missed_stat_update(dev, qid); 194436c089aSDmitry Kozlyuk 195436c089aSDmitry Kozlyuk return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset; 196436c089aSDmitry Kozlyuk } 197436c089aSDmitry Kozlyuk 198436c089aSDmitry Kozlyuk static int 199436c089aSDmitry Kozlyuk eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, 200436c089aSDmitry Kozlyuk const u_char *data, uint16_t data_len) 201436c089aSDmitry Kozlyuk { 202436c089aSDmitry Kozlyuk /* Copy the first segment. */ 203436c089aSDmitry Kozlyuk uint16_t len = rte_pktmbuf_tailroom(mbuf); 204436c089aSDmitry Kozlyuk struct rte_mbuf *m = mbuf; 205436c089aSDmitry Kozlyuk 206436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len); 207436c089aSDmitry Kozlyuk data_len -= len; 208436c089aSDmitry Kozlyuk data += len; 209436c089aSDmitry Kozlyuk 210436c089aSDmitry Kozlyuk while (data_len > 0) { 211436c089aSDmitry Kozlyuk /* Allocate next mbuf and point to that. */ 212436c089aSDmitry Kozlyuk m->next = rte_pktmbuf_alloc(mb_pool); 213436c089aSDmitry Kozlyuk 214436c089aSDmitry Kozlyuk if (unlikely(!m->next)) 215436c089aSDmitry Kozlyuk return -1; 216436c089aSDmitry Kozlyuk 217436c089aSDmitry Kozlyuk m = m->next; 218436c089aSDmitry Kozlyuk 219436c089aSDmitry Kozlyuk /* Headroom is not needed in chained mbufs. */ 220436c089aSDmitry Kozlyuk rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m)); 221436c089aSDmitry Kozlyuk m->pkt_len = 0; 222436c089aSDmitry Kozlyuk m->data_len = 0; 223436c089aSDmitry Kozlyuk 224436c089aSDmitry Kozlyuk /* Copy next segment. */ 225436c089aSDmitry Kozlyuk len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len); 226436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_append(m, len), data, len); 227436c089aSDmitry Kozlyuk 228436c089aSDmitry Kozlyuk mbuf->nb_segs++; 229436c089aSDmitry Kozlyuk data_len -= len; 230436c089aSDmitry Kozlyuk data += len; 231436c089aSDmitry Kozlyuk } 232436c089aSDmitry Kozlyuk 233436c089aSDmitry Kozlyuk return mbuf->nb_segs; 234436c089aSDmitry Kozlyuk } 235436c089aSDmitry Kozlyuk 236436c089aSDmitry Kozlyuk static uint16_t 237436c089aSDmitry Kozlyuk eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 238436c089aSDmitry Kozlyuk { 239436c089aSDmitry Kozlyuk int i; 240436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = queue; 241436c089aSDmitry Kozlyuk uint32_t rx_bytes = 0; 242436c089aSDmitry Kozlyuk 243436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0)) 244436c089aSDmitry Kozlyuk return 0; 245436c089aSDmitry Kozlyuk 246436c089aSDmitry Kozlyuk if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0) 247436c089aSDmitry Kozlyuk return 0; 248436c089aSDmitry Kozlyuk 249436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 250436c089aSDmitry Kozlyuk struct rte_mbuf *pcap_buf; 251436c089aSDmitry Kozlyuk int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf); 252436c089aSDmitry Kozlyuk if (err) 253436c089aSDmitry Kozlyuk return i; 254436c089aSDmitry Kozlyuk 255436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), 256436c089aSDmitry Kozlyuk rte_pktmbuf_mtod(pcap_buf, void *), 257436c089aSDmitry Kozlyuk pcap_buf->data_len); 258436c089aSDmitry Kozlyuk bufs[i]->data_len = pcap_buf->data_len; 259436c089aSDmitry Kozlyuk bufs[i]->pkt_len = pcap_buf->pkt_len; 260436c089aSDmitry Kozlyuk bufs[i]->port = pcap_q->port_id; 261436c089aSDmitry Kozlyuk rx_bytes += pcap_buf->data_len; 262436c089aSDmitry Kozlyuk 263436c089aSDmitry Kozlyuk /* Enqueue packet back on ring to allow infinite rx. */ 264436c089aSDmitry Kozlyuk rte_ring_enqueue(pcap_q->pkts, pcap_buf); 265436c089aSDmitry Kozlyuk } 266436c089aSDmitry Kozlyuk 267436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts += i; 268436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes += rx_bytes; 269436c089aSDmitry Kozlyuk 270436c089aSDmitry Kozlyuk return i; 271436c089aSDmitry Kozlyuk } 272436c089aSDmitry Kozlyuk 273436c089aSDmitry Kozlyuk static uint16_t 274436c089aSDmitry Kozlyuk eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 275436c089aSDmitry Kozlyuk { 276436c089aSDmitry Kozlyuk unsigned int i; 277436c089aSDmitry Kozlyuk struct pcap_pkthdr header; 278436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 279436c089aSDmitry Kozlyuk const u_char *packet; 280436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 281436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = queue; 282436c089aSDmitry Kozlyuk uint16_t num_rx = 0; 283436c089aSDmitry Kozlyuk uint32_t rx_bytes = 0; 284436c089aSDmitry Kozlyuk pcap_t *pcap; 285436c089aSDmitry Kozlyuk 286436c089aSDmitry Kozlyuk pp = rte_eth_devices[pcap_q->port_id].process_private; 287436c089aSDmitry Kozlyuk pcap = pp->rx_pcap[pcap_q->queue_id]; 288436c089aSDmitry Kozlyuk 289436c089aSDmitry Kozlyuk if (unlikely(pcap == NULL || nb_pkts == 0)) 290436c089aSDmitry Kozlyuk return 0; 291436c089aSDmitry Kozlyuk 292436c089aSDmitry Kozlyuk /* Reads the given number of packets from the pcap file one by one 293436c089aSDmitry Kozlyuk * and copies the packet data into a newly allocated mbuf to return. 294436c089aSDmitry Kozlyuk */ 295436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 296436c089aSDmitry Kozlyuk /* Get the next PCAP packet */ 297436c089aSDmitry Kozlyuk packet = pcap_next(pcap, &header); 298436c089aSDmitry Kozlyuk if (unlikely(packet == NULL)) 299436c089aSDmitry Kozlyuk break; 300436c089aSDmitry Kozlyuk 301436c089aSDmitry Kozlyuk mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); 302d70157c1SQiming Chen if (unlikely(mbuf == NULL)) { 303d70157c1SQiming Chen pcap_q->rx_stat.rx_nombuf++; 304436c089aSDmitry Kozlyuk break; 305d70157c1SQiming Chen } 306436c089aSDmitry Kozlyuk 307436c089aSDmitry Kozlyuk if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { 308436c089aSDmitry Kozlyuk /* pcap packet will fit in the mbuf, can copy it */ 309436c089aSDmitry Kozlyuk rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, 310436c089aSDmitry Kozlyuk header.caplen); 311436c089aSDmitry Kozlyuk mbuf->data_len = (uint16_t)header.caplen; 312436c089aSDmitry Kozlyuk } else { 313436c089aSDmitry Kozlyuk /* Try read jumbo frame into multi mbufs. */ 314436c089aSDmitry Kozlyuk if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, 315436c089aSDmitry Kozlyuk mbuf, 316436c089aSDmitry Kozlyuk packet, 317436c089aSDmitry Kozlyuk header.caplen) == -1)) { 318d70157c1SQiming Chen pcap_q->rx_stat.err_pkts++; 319436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 320436c089aSDmitry Kozlyuk break; 321436c089aSDmitry Kozlyuk } 322436c089aSDmitry Kozlyuk } 323436c089aSDmitry Kozlyuk 324436c089aSDmitry Kozlyuk mbuf->pkt_len = (uint16_t)header.caplen; 325436c089aSDmitry Kozlyuk *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, 326436c089aSDmitry Kozlyuk rte_mbuf_timestamp_t *) = 327436c089aSDmitry Kozlyuk (uint64_t)header.ts.tv_sec * 1000000 + 328436c089aSDmitry Kozlyuk header.ts.tv_usec; 329436c089aSDmitry Kozlyuk mbuf->ol_flags |= timestamp_rx_dynflag; 330436c089aSDmitry Kozlyuk mbuf->port = pcap_q->port_id; 331436c089aSDmitry Kozlyuk bufs[num_rx] = mbuf; 332436c089aSDmitry Kozlyuk num_rx++; 333436c089aSDmitry Kozlyuk rx_bytes += header.caplen; 334436c089aSDmitry Kozlyuk } 335436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts += num_rx; 336436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes += rx_bytes; 337436c089aSDmitry Kozlyuk 338436c089aSDmitry Kozlyuk return num_rx; 339436c089aSDmitry Kozlyuk } 340436c089aSDmitry Kozlyuk 341436c089aSDmitry Kozlyuk static uint16_t 342436c089aSDmitry Kozlyuk eth_null_rx(void *queue __rte_unused, 343436c089aSDmitry Kozlyuk struct rte_mbuf **bufs __rte_unused, 344436c089aSDmitry Kozlyuk uint16_t nb_pkts __rte_unused) 345436c089aSDmitry Kozlyuk { 346436c089aSDmitry Kozlyuk return 0; 347436c089aSDmitry Kozlyuk } 348436c089aSDmitry Kozlyuk 349436c089aSDmitry Kozlyuk #define NSEC_PER_SEC 1000000000L 350436c089aSDmitry Kozlyuk 351436c089aSDmitry Kozlyuk /* 352436c089aSDmitry Kozlyuk * This function stores nanoseconds in `tv_usec` field of `struct timeval`, 353436c089aSDmitry Kozlyuk * because `ts` goes directly to nanosecond-precision dump. 354436c089aSDmitry Kozlyuk */ 355436c089aSDmitry Kozlyuk static inline void 356436c089aSDmitry Kozlyuk calculate_timestamp(struct timeval *ts) { 357436c089aSDmitry Kozlyuk uint64_t cycles; 358436c089aSDmitry Kozlyuk struct timespec cur_time; 359436c089aSDmitry Kozlyuk 360436c089aSDmitry Kozlyuk cycles = rte_get_timer_cycles() - start_cycles; 361436c089aSDmitry Kozlyuk cur_time.tv_sec = cycles / hz; 362436c089aSDmitry Kozlyuk cur_time.tv_nsec = (cycles % hz) * NSEC_PER_SEC / hz; 363436c089aSDmitry Kozlyuk 364436c089aSDmitry Kozlyuk ts->tv_sec = start_time.tv_sec + cur_time.tv_sec; 365436c089aSDmitry Kozlyuk ts->tv_usec = start_time.tv_nsec + cur_time.tv_nsec; 366436c089aSDmitry Kozlyuk if (ts->tv_usec >= NSEC_PER_SEC) { 367436c089aSDmitry Kozlyuk ts->tv_usec -= NSEC_PER_SEC; 368436c089aSDmitry Kozlyuk ts->tv_sec += 1; 369436c089aSDmitry Kozlyuk } 370436c089aSDmitry Kozlyuk } 371436c089aSDmitry Kozlyuk 372436c089aSDmitry Kozlyuk /* 373436c089aSDmitry Kozlyuk * Callback to handle writing packets to a pcap file. 374436c089aSDmitry Kozlyuk */ 375436c089aSDmitry Kozlyuk static uint16_t 376436c089aSDmitry Kozlyuk eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 377436c089aSDmitry Kozlyuk { 378436c089aSDmitry Kozlyuk unsigned int i; 379436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 380436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 381436c089aSDmitry Kozlyuk struct pcap_tx_queue *dumper_q = queue; 382436c089aSDmitry Kozlyuk uint16_t num_tx = 0; 383436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 384436c089aSDmitry Kozlyuk struct pcap_pkthdr header; 385436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 386436c089aSDmitry Kozlyuk unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; 387436c089aSDmitry Kozlyuk size_t len, caplen; 388436c089aSDmitry Kozlyuk 389436c089aSDmitry Kozlyuk pp = rte_eth_devices[dumper_q->port_id].process_private; 390436c089aSDmitry Kozlyuk dumper = pp->tx_dumper[dumper_q->queue_id]; 391436c089aSDmitry Kozlyuk 392436c089aSDmitry Kozlyuk if (dumper == NULL || nb_pkts == 0) 393436c089aSDmitry Kozlyuk return 0; 394436c089aSDmitry Kozlyuk 395436c089aSDmitry Kozlyuk /* writes the nb_pkts packets to the previously opened pcap file 396436c089aSDmitry Kozlyuk * dumper */ 397436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 398436c089aSDmitry Kozlyuk mbuf = bufs[i]; 399436c089aSDmitry Kozlyuk len = caplen = rte_pktmbuf_pkt_len(mbuf); 400436c089aSDmitry Kozlyuk if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && 401436c089aSDmitry Kozlyuk len > sizeof(temp_data))) { 402436c089aSDmitry Kozlyuk caplen = sizeof(temp_data); 403436c089aSDmitry Kozlyuk } 404436c089aSDmitry Kozlyuk 405436c089aSDmitry Kozlyuk calculate_timestamp(&header.ts); 406436c089aSDmitry Kozlyuk header.len = len; 407436c089aSDmitry Kozlyuk header.caplen = caplen; 408436c089aSDmitry Kozlyuk /* rte_pktmbuf_read() returns a pointer to the data directly 409436c089aSDmitry Kozlyuk * in the mbuf (when the mbuf is contiguous) or, otherwise, 410436c089aSDmitry Kozlyuk * a pointer to temp_data after copying into it. 411436c089aSDmitry Kozlyuk */ 412436c089aSDmitry Kozlyuk pcap_dump((u_char *)dumper, &header, 413436c089aSDmitry Kozlyuk rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); 414436c089aSDmitry Kozlyuk 415436c089aSDmitry Kozlyuk num_tx++; 416436c089aSDmitry Kozlyuk tx_bytes += caplen; 417436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 418436c089aSDmitry Kozlyuk } 419436c089aSDmitry Kozlyuk 420436c089aSDmitry Kozlyuk /* 421436c089aSDmitry Kozlyuk * Since there's no place to hook a callback when the forwarding 422436c089aSDmitry Kozlyuk * process stops and to make sure the pcap file is actually written, 423436c089aSDmitry Kozlyuk * we flush the pcap dumper within each burst. 424436c089aSDmitry Kozlyuk */ 425436c089aSDmitry Kozlyuk pcap_dump_flush(dumper); 426436c089aSDmitry Kozlyuk dumper_q->tx_stat.pkts += num_tx; 427436c089aSDmitry Kozlyuk dumper_q->tx_stat.bytes += tx_bytes; 428436c089aSDmitry Kozlyuk dumper_q->tx_stat.err_pkts += nb_pkts - num_tx; 429436c089aSDmitry Kozlyuk 430436c089aSDmitry Kozlyuk return nb_pkts; 431436c089aSDmitry Kozlyuk } 432436c089aSDmitry Kozlyuk 433436c089aSDmitry Kozlyuk /* 434436c089aSDmitry Kozlyuk * Callback to handle dropping packets in the infinite rx case. 435436c089aSDmitry Kozlyuk */ 436436c089aSDmitry Kozlyuk static uint16_t 437436c089aSDmitry Kozlyuk eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 438436c089aSDmitry Kozlyuk { 439436c089aSDmitry Kozlyuk unsigned int i; 440436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 441436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx_queue = queue; 442436c089aSDmitry Kozlyuk 443436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0)) 444436c089aSDmitry Kozlyuk return 0; 445436c089aSDmitry Kozlyuk 446436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 447436c089aSDmitry Kozlyuk tx_bytes += bufs[i]->pkt_len; 448436c089aSDmitry Kozlyuk rte_pktmbuf_free(bufs[i]); 449436c089aSDmitry Kozlyuk } 450436c089aSDmitry Kozlyuk 451436c089aSDmitry Kozlyuk tx_queue->tx_stat.pkts += nb_pkts; 452436c089aSDmitry Kozlyuk tx_queue->tx_stat.bytes += tx_bytes; 453436c089aSDmitry Kozlyuk 454436c089aSDmitry Kozlyuk return i; 455436c089aSDmitry Kozlyuk } 456436c089aSDmitry Kozlyuk 457436c089aSDmitry Kozlyuk /* 458436c089aSDmitry Kozlyuk * Callback to handle sending packets through a real NIC. 459436c089aSDmitry Kozlyuk */ 460436c089aSDmitry Kozlyuk static uint16_t 461436c089aSDmitry Kozlyuk eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 462436c089aSDmitry Kozlyuk { 463436c089aSDmitry Kozlyuk unsigned int i; 464436c089aSDmitry Kozlyuk int ret; 465436c089aSDmitry Kozlyuk struct rte_mbuf *mbuf; 466436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 467436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx_queue = queue; 468436c089aSDmitry Kozlyuk uint16_t num_tx = 0; 469436c089aSDmitry Kozlyuk uint32_t tx_bytes = 0; 470436c089aSDmitry Kozlyuk pcap_t *pcap; 471436c089aSDmitry Kozlyuk unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; 472436c089aSDmitry Kozlyuk size_t len; 473436c089aSDmitry Kozlyuk 474436c089aSDmitry Kozlyuk pp = rte_eth_devices[tx_queue->port_id].process_private; 475436c089aSDmitry Kozlyuk pcap = pp->tx_pcap[tx_queue->queue_id]; 476436c089aSDmitry Kozlyuk 477436c089aSDmitry Kozlyuk if (unlikely(nb_pkts == 0 || pcap == NULL)) 478436c089aSDmitry Kozlyuk return 0; 479436c089aSDmitry Kozlyuk 480436c089aSDmitry Kozlyuk for (i = 0; i < nb_pkts; i++) { 481436c089aSDmitry Kozlyuk mbuf = bufs[i]; 482436c089aSDmitry Kozlyuk len = rte_pktmbuf_pkt_len(mbuf); 483436c089aSDmitry Kozlyuk if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && 484436c089aSDmitry Kozlyuk len > sizeof(temp_data))) { 485436c089aSDmitry Kozlyuk PMD_LOG(ERR, 486436c089aSDmitry Kozlyuk "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", 487436c089aSDmitry Kozlyuk len, sizeof(temp_data)); 488436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 489436c089aSDmitry Kozlyuk continue; 490436c089aSDmitry Kozlyuk } 491436c089aSDmitry Kozlyuk 492436c089aSDmitry Kozlyuk /* rte_pktmbuf_read() returns a pointer to the data directly 493436c089aSDmitry Kozlyuk * in the mbuf (when the mbuf is contiguous) or, otherwise, 494436c089aSDmitry Kozlyuk * a pointer to temp_data after copying into it. 495436c089aSDmitry Kozlyuk */ 496436c089aSDmitry Kozlyuk ret = pcap_sendpacket(pcap, 497436c089aSDmitry Kozlyuk rte_pktmbuf_read(mbuf, 0, len, temp_data), len); 498436c089aSDmitry Kozlyuk if (unlikely(ret != 0)) 499436c089aSDmitry Kozlyuk break; 500436c089aSDmitry Kozlyuk num_tx++; 501436c089aSDmitry Kozlyuk tx_bytes += len; 502436c089aSDmitry Kozlyuk rte_pktmbuf_free(mbuf); 503436c089aSDmitry Kozlyuk } 504436c089aSDmitry Kozlyuk 505436c089aSDmitry Kozlyuk tx_queue->tx_stat.pkts += num_tx; 506436c089aSDmitry Kozlyuk tx_queue->tx_stat.bytes += tx_bytes; 507436c089aSDmitry Kozlyuk tx_queue->tx_stat.err_pkts += i - num_tx; 508436c089aSDmitry Kozlyuk 509436c089aSDmitry Kozlyuk return i; 510436c089aSDmitry Kozlyuk } 511436c089aSDmitry Kozlyuk 512436c089aSDmitry Kozlyuk /* 513436c089aSDmitry Kozlyuk * pcap_open_live wrapper function 514436c089aSDmitry Kozlyuk */ 515436c089aSDmitry Kozlyuk static inline int 516436c089aSDmitry Kozlyuk open_iface_live(const char *iface, pcap_t **pcap) { 517436c089aSDmitry Kozlyuk *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN, 518436c089aSDmitry Kozlyuk RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); 519436c089aSDmitry Kozlyuk 520436c089aSDmitry Kozlyuk if (*pcap == NULL) { 521436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf); 522436c089aSDmitry Kozlyuk return -1; 523436c089aSDmitry Kozlyuk } 524436c089aSDmitry Kozlyuk 525436c089aSDmitry Kozlyuk return 0; 526436c089aSDmitry Kozlyuk } 527436c089aSDmitry Kozlyuk 528436c089aSDmitry Kozlyuk static int 529436c089aSDmitry Kozlyuk open_single_iface(const char *iface, pcap_t **pcap) 530436c089aSDmitry Kozlyuk { 531436c089aSDmitry Kozlyuk if (open_iface_live(iface, pcap) < 0) { 532436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open interface %s", iface); 533436c089aSDmitry Kozlyuk return -1; 534436c089aSDmitry Kozlyuk } 535436c089aSDmitry Kozlyuk 536436c089aSDmitry Kozlyuk return 0; 537436c089aSDmitry Kozlyuk } 538436c089aSDmitry Kozlyuk 539436c089aSDmitry Kozlyuk static int 540436c089aSDmitry Kozlyuk open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) 541436c089aSDmitry Kozlyuk { 542436c089aSDmitry Kozlyuk pcap_t *tx_pcap; 543436c089aSDmitry Kozlyuk 544436c089aSDmitry Kozlyuk /* 545436c089aSDmitry Kozlyuk * We need to create a dummy empty pcap_t to use it 546436c089aSDmitry Kozlyuk * with pcap_dump_open(). We create big enough an Ethernet 547436c089aSDmitry Kozlyuk * pcap holder. 548436c089aSDmitry Kozlyuk */ 549436c089aSDmitry Kozlyuk tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB, 550436c089aSDmitry Kozlyuk RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO); 551436c089aSDmitry Kozlyuk if (tx_pcap == NULL) { 552436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't create dead pcap"); 553436c089aSDmitry Kozlyuk return -1; 554436c089aSDmitry Kozlyuk } 555436c089aSDmitry Kozlyuk 556436c089aSDmitry Kozlyuk /* The dumper is created using the previous pcap_t reference */ 557436c089aSDmitry Kozlyuk *dumper = pcap_dump_open(tx_pcap, pcap_filename); 558436c089aSDmitry Kozlyuk if (*dumper == NULL) { 559436c089aSDmitry Kozlyuk pcap_close(tx_pcap); 560436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s for writing.", 561436c089aSDmitry Kozlyuk pcap_filename); 562436c089aSDmitry Kozlyuk return -1; 563436c089aSDmitry Kozlyuk } 564436c089aSDmitry Kozlyuk 565436c089aSDmitry Kozlyuk pcap_close(tx_pcap); 566436c089aSDmitry Kozlyuk return 0; 567436c089aSDmitry Kozlyuk } 568436c089aSDmitry Kozlyuk 569436c089aSDmitry Kozlyuk static int 570436c089aSDmitry Kozlyuk open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) 571436c089aSDmitry Kozlyuk { 572436c089aSDmitry Kozlyuk *pcap = pcap_open_offline(pcap_filename, errbuf); 573436c089aSDmitry Kozlyuk if (*pcap == NULL) { 574436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename, 575436c089aSDmitry Kozlyuk errbuf); 576436c089aSDmitry Kozlyuk return -1; 577436c089aSDmitry Kozlyuk } 578436c089aSDmitry Kozlyuk 579436c089aSDmitry Kozlyuk return 0; 580436c089aSDmitry Kozlyuk } 581436c089aSDmitry Kozlyuk 582436c089aSDmitry Kozlyuk static uint64_t 583436c089aSDmitry Kozlyuk count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q) 584436c089aSDmitry Kozlyuk { 585436c089aSDmitry Kozlyuk const u_char *packet; 586436c089aSDmitry Kozlyuk struct pcap_pkthdr header; 587436c089aSDmitry Kozlyuk uint64_t pcap_pkt_count = 0; 588436c089aSDmitry Kozlyuk 589436c089aSDmitry Kozlyuk while ((packet = pcap_next(*pcap, &header))) 590436c089aSDmitry Kozlyuk pcap_pkt_count++; 591436c089aSDmitry Kozlyuk 592436c089aSDmitry Kozlyuk /* The pcap is reopened so it can be used as normal later. */ 593436c089aSDmitry Kozlyuk pcap_close(*pcap); 594436c089aSDmitry Kozlyuk *pcap = NULL; 595436c089aSDmitry Kozlyuk open_single_rx_pcap(pcap_q->name, pcap); 596436c089aSDmitry Kozlyuk 597436c089aSDmitry Kozlyuk return pcap_pkt_count; 598436c089aSDmitry Kozlyuk } 599436c089aSDmitry Kozlyuk 600436c089aSDmitry Kozlyuk static int 601436c089aSDmitry Kozlyuk eth_dev_start(struct rte_eth_dev *dev) 602436c089aSDmitry Kozlyuk { 603436c089aSDmitry Kozlyuk unsigned int i; 604436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 605436c089aSDmitry Kozlyuk struct pmd_process_private *pp = dev->process_private; 606436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx; 607436c089aSDmitry Kozlyuk struct pcap_rx_queue *rx; 608436c089aSDmitry Kozlyuk 609436c089aSDmitry Kozlyuk /* Special iface case. Single pcap is open and shared between tx/rx. */ 610436c089aSDmitry Kozlyuk if (internals->single_iface) { 611436c089aSDmitry Kozlyuk tx = &internals->tx_queue[0]; 612436c089aSDmitry Kozlyuk rx = &internals->rx_queue[0]; 613436c089aSDmitry Kozlyuk 614436c089aSDmitry Kozlyuk if (!pp->tx_pcap[0] && 615436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { 616436c089aSDmitry Kozlyuk if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0) 617436c089aSDmitry Kozlyuk return -1; 618436c089aSDmitry Kozlyuk pp->rx_pcap[0] = pp->tx_pcap[0]; 619436c089aSDmitry Kozlyuk } 620436c089aSDmitry Kozlyuk 621436c089aSDmitry Kozlyuk goto status_up; 622436c089aSDmitry Kozlyuk } 623436c089aSDmitry Kozlyuk 624436c089aSDmitry Kozlyuk /* If not open already, open tx pcaps/dumpers */ 625436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 626436c089aSDmitry Kozlyuk tx = &internals->tx_queue[i]; 627436c089aSDmitry Kozlyuk 628436c089aSDmitry Kozlyuk if (!pp->tx_dumper[i] && 629436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) { 630436c089aSDmitry Kozlyuk if (open_single_tx_pcap(tx->name, 631436c089aSDmitry Kozlyuk &pp->tx_dumper[i]) < 0) 632436c089aSDmitry Kozlyuk return -1; 633436c089aSDmitry Kozlyuk } else if (!pp->tx_pcap[i] && 634436c089aSDmitry Kozlyuk strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) { 635436c089aSDmitry Kozlyuk if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0) 636436c089aSDmitry Kozlyuk return -1; 637436c089aSDmitry Kozlyuk } 638436c089aSDmitry Kozlyuk } 639436c089aSDmitry Kozlyuk 640436c089aSDmitry Kozlyuk /* If not open already, open rx pcaps */ 641436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 642436c089aSDmitry Kozlyuk rx = &internals->rx_queue[i]; 643436c089aSDmitry Kozlyuk 644436c089aSDmitry Kozlyuk if (pp->rx_pcap[i] != NULL) 645436c089aSDmitry Kozlyuk continue; 646436c089aSDmitry Kozlyuk 647436c089aSDmitry Kozlyuk if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { 648436c089aSDmitry Kozlyuk if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0) 649436c089aSDmitry Kozlyuk return -1; 650436c089aSDmitry Kozlyuk } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) { 651436c089aSDmitry Kozlyuk if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0) 652436c089aSDmitry Kozlyuk return -1; 653436c089aSDmitry Kozlyuk } 654436c089aSDmitry Kozlyuk } 655436c089aSDmitry Kozlyuk 656436c089aSDmitry Kozlyuk status_up: 657436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) 658436c089aSDmitry Kozlyuk dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 659436c089aSDmitry Kozlyuk 660436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) 661436c089aSDmitry Kozlyuk dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 662436c089aSDmitry Kozlyuk 663295968d1SFerruh Yigit dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 664436c089aSDmitry Kozlyuk 665436c089aSDmitry Kozlyuk return 0; 666436c089aSDmitry Kozlyuk } 667436c089aSDmitry Kozlyuk 668436c089aSDmitry Kozlyuk /* 669436c089aSDmitry Kozlyuk * This function gets called when the current port gets stopped. 670436c089aSDmitry Kozlyuk * Is the only place for us to close all the tx streams dumpers. 671436c089aSDmitry Kozlyuk * If not called the dumpers will be flushed within each tx burst. 672436c089aSDmitry Kozlyuk */ 673436c089aSDmitry Kozlyuk static int 674436c089aSDmitry Kozlyuk eth_dev_stop(struct rte_eth_dev *dev) 675436c089aSDmitry Kozlyuk { 676436c089aSDmitry Kozlyuk unsigned int i; 677436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 678436c089aSDmitry Kozlyuk struct pmd_process_private *pp = dev->process_private; 679436c089aSDmitry Kozlyuk 680436c089aSDmitry Kozlyuk /* Special iface case. Single pcap is open and shared between tx/rx. */ 681436c089aSDmitry Kozlyuk if (internals->single_iface) { 682436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(dev, 0); 683436c089aSDmitry Kozlyuk if (pp->tx_pcap[0] != NULL) { 684436c089aSDmitry Kozlyuk pcap_close(pp->tx_pcap[0]); 685436c089aSDmitry Kozlyuk pp->tx_pcap[0] = NULL; 686436c089aSDmitry Kozlyuk pp->rx_pcap[0] = NULL; 687436c089aSDmitry Kozlyuk } 688436c089aSDmitry Kozlyuk goto status_down; 689436c089aSDmitry Kozlyuk } 690436c089aSDmitry Kozlyuk 691436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 692436c089aSDmitry Kozlyuk if (pp->tx_dumper[i] != NULL) { 693436c089aSDmitry Kozlyuk pcap_dump_close(pp->tx_dumper[i]); 694436c089aSDmitry Kozlyuk pp->tx_dumper[i] = NULL; 695436c089aSDmitry Kozlyuk } 696436c089aSDmitry Kozlyuk 697436c089aSDmitry Kozlyuk if (pp->tx_pcap[i] != NULL) { 698436c089aSDmitry Kozlyuk pcap_close(pp->tx_pcap[i]); 699436c089aSDmitry Kozlyuk pp->tx_pcap[i] = NULL; 700436c089aSDmitry Kozlyuk } 701436c089aSDmitry Kozlyuk } 702436c089aSDmitry Kozlyuk 703436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 704436c089aSDmitry Kozlyuk if (pp->rx_pcap[i] != NULL) { 705436c089aSDmitry Kozlyuk queue_missed_stat_on_stop_update(dev, i); 706436c089aSDmitry Kozlyuk pcap_close(pp->rx_pcap[i]); 707436c089aSDmitry Kozlyuk pp->rx_pcap[i] = NULL; 708436c089aSDmitry Kozlyuk } 709436c089aSDmitry Kozlyuk } 710436c089aSDmitry Kozlyuk 711436c089aSDmitry Kozlyuk status_down: 712436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) 713436c089aSDmitry Kozlyuk dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 714436c089aSDmitry Kozlyuk 715436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) 716436c089aSDmitry Kozlyuk dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 717436c089aSDmitry Kozlyuk 718295968d1SFerruh Yigit dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 719436c089aSDmitry Kozlyuk 720436c089aSDmitry Kozlyuk return 0; 721436c089aSDmitry Kozlyuk } 722436c089aSDmitry Kozlyuk 723436c089aSDmitry Kozlyuk static int 724436c089aSDmitry Kozlyuk eth_dev_configure(struct rte_eth_dev *dev __rte_unused) 725436c089aSDmitry Kozlyuk { 726436c089aSDmitry Kozlyuk return 0; 727436c089aSDmitry Kozlyuk } 728436c089aSDmitry Kozlyuk 729436c089aSDmitry Kozlyuk static int 730436c089aSDmitry Kozlyuk eth_dev_info(struct rte_eth_dev *dev, 731436c089aSDmitry Kozlyuk struct rte_eth_dev_info *dev_info) 732436c089aSDmitry Kozlyuk { 733436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 734436c089aSDmitry Kozlyuk 735436c089aSDmitry Kozlyuk dev_info->if_index = internals->if_index; 736436c089aSDmitry Kozlyuk dev_info->max_mac_addrs = 1; 737436c089aSDmitry Kozlyuk dev_info->max_rx_pktlen = (uint32_t) -1; 738436c089aSDmitry Kozlyuk dev_info->max_rx_queues = dev->data->nb_rx_queues; 739436c089aSDmitry Kozlyuk dev_info->max_tx_queues = dev->data->nb_tx_queues; 740436c089aSDmitry Kozlyuk dev_info->min_rx_bufsize = 0; 741436c089aSDmitry Kozlyuk 742436c089aSDmitry Kozlyuk return 0; 743436c089aSDmitry Kozlyuk } 744436c089aSDmitry Kozlyuk 745436c089aSDmitry Kozlyuk static int 746436c089aSDmitry Kozlyuk eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 747436c089aSDmitry Kozlyuk { 748436c089aSDmitry Kozlyuk unsigned int i; 749436c089aSDmitry Kozlyuk unsigned long rx_packets_total = 0, rx_bytes_total = 0; 750436c089aSDmitry Kozlyuk unsigned long rx_missed_total = 0; 751d70157c1SQiming Chen unsigned long rx_nombuf_total = 0, rx_err_total = 0; 752436c089aSDmitry Kozlyuk unsigned long tx_packets_total = 0, tx_bytes_total = 0; 753436c089aSDmitry Kozlyuk unsigned long tx_packets_err_total = 0; 754436c089aSDmitry Kozlyuk const struct pmd_internals *internal = dev->data->dev_private; 755436c089aSDmitry Kozlyuk 756436c089aSDmitry Kozlyuk for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 757436c089aSDmitry Kozlyuk i < dev->data->nb_rx_queues; i++) { 758436c089aSDmitry Kozlyuk stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts; 759436c089aSDmitry Kozlyuk stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes; 760d70157c1SQiming Chen rx_nombuf_total += internal->rx_queue[i].rx_stat.rx_nombuf; 761d70157c1SQiming Chen rx_err_total += internal->rx_queue[i].rx_stat.err_pkts; 762436c089aSDmitry Kozlyuk rx_packets_total += stats->q_ipackets[i]; 763436c089aSDmitry Kozlyuk rx_bytes_total += stats->q_ibytes[i]; 764436c089aSDmitry Kozlyuk rx_missed_total += queue_missed_stat_get(dev, i); 765436c089aSDmitry Kozlyuk } 766436c089aSDmitry Kozlyuk 767436c089aSDmitry Kozlyuk for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 768436c089aSDmitry Kozlyuk i < dev->data->nb_tx_queues; i++) { 769436c089aSDmitry Kozlyuk stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts; 770436c089aSDmitry Kozlyuk stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes; 771436c089aSDmitry Kozlyuk tx_packets_total += stats->q_opackets[i]; 772436c089aSDmitry Kozlyuk tx_bytes_total += stats->q_obytes[i]; 773436c089aSDmitry Kozlyuk tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts; 774436c089aSDmitry Kozlyuk } 775436c089aSDmitry Kozlyuk 776436c089aSDmitry Kozlyuk stats->ipackets = rx_packets_total; 777436c089aSDmitry Kozlyuk stats->ibytes = rx_bytes_total; 778436c089aSDmitry Kozlyuk stats->imissed = rx_missed_total; 779d70157c1SQiming Chen stats->ierrors = rx_err_total; 780d70157c1SQiming Chen stats->rx_nombuf = rx_nombuf_total; 781436c089aSDmitry Kozlyuk stats->opackets = tx_packets_total; 782436c089aSDmitry Kozlyuk stats->obytes = tx_bytes_total; 783436c089aSDmitry Kozlyuk stats->oerrors = tx_packets_err_total; 784436c089aSDmitry Kozlyuk 785436c089aSDmitry Kozlyuk return 0; 786436c089aSDmitry Kozlyuk } 787436c089aSDmitry Kozlyuk 788436c089aSDmitry Kozlyuk static int 789436c089aSDmitry Kozlyuk eth_stats_reset(struct rte_eth_dev *dev) 790436c089aSDmitry Kozlyuk { 791436c089aSDmitry Kozlyuk unsigned int i; 792436c089aSDmitry Kozlyuk struct pmd_internals *internal = dev->data->dev_private; 793436c089aSDmitry Kozlyuk 794436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 795436c089aSDmitry Kozlyuk internal->rx_queue[i].rx_stat.pkts = 0; 796436c089aSDmitry Kozlyuk internal->rx_queue[i].rx_stat.bytes = 0; 797d70157c1SQiming Chen internal->rx_queue[i].rx_stat.err_pkts = 0; 798d70157c1SQiming Chen internal->rx_queue[i].rx_stat.rx_nombuf = 0; 799436c089aSDmitry Kozlyuk queue_missed_stat_reset(dev, i); 800436c089aSDmitry Kozlyuk } 801436c089aSDmitry Kozlyuk 802436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_tx_queues; i++) { 803436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.pkts = 0; 804436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.bytes = 0; 805436c089aSDmitry Kozlyuk internal->tx_queue[i].tx_stat.err_pkts = 0; 806436c089aSDmitry Kozlyuk } 807436c089aSDmitry Kozlyuk 808436c089aSDmitry Kozlyuk return 0; 809436c089aSDmitry Kozlyuk } 810436c089aSDmitry Kozlyuk 811436c089aSDmitry Kozlyuk static inline void 812436c089aSDmitry Kozlyuk infinite_rx_ring_free(struct rte_ring *pkts) 813436c089aSDmitry Kozlyuk { 814436c089aSDmitry Kozlyuk struct rte_mbuf *bufs; 815436c089aSDmitry Kozlyuk 816436c089aSDmitry Kozlyuk while (!rte_ring_dequeue(pkts, (void **)&bufs)) 817436c089aSDmitry Kozlyuk rte_pktmbuf_free(bufs); 818436c089aSDmitry Kozlyuk 819436c089aSDmitry Kozlyuk rte_ring_free(pkts); 820436c089aSDmitry Kozlyuk } 821436c089aSDmitry Kozlyuk 822436c089aSDmitry Kozlyuk static int 823436c089aSDmitry Kozlyuk eth_dev_close(struct rte_eth_dev *dev) 824436c089aSDmitry Kozlyuk { 825436c089aSDmitry Kozlyuk unsigned int i; 826436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 827436c089aSDmitry Kozlyuk 828436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d", 829436c089aSDmitry Kozlyuk rte_socket_id()); 830436c089aSDmitry Kozlyuk 831436c089aSDmitry Kozlyuk eth_dev_stop(dev); 832436c089aSDmitry Kozlyuk 833436c089aSDmitry Kozlyuk rte_free(dev->process_private); 834436c089aSDmitry Kozlyuk 835436c089aSDmitry Kozlyuk if (rte_eal_process_type() != RTE_PROC_PRIMARY) 836436c089aSDmitry Kozlyuk return 0; 837436c089aSDmitry Kozlyuk 838436c089aSDmitry Kozlyuk /* Device wide flag, but cleanup must be performed per queue. */ 839436c089aSDmitry Kozlyuk if (internals->infinite_rx) { 840436c089aSDmitry Kozlyuk for (i = 0; i < dev->data->nb_rx_queues; i++) { 841436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; 842436c089aSDmitry Kozlyuk 843436c089aSDmitry Kozlyuk /* 844436c089aSDmitry Kozlyuk * 'pcap_q->pkts' can be NULL if 'eth_dev_close()' 845436c089aSDmitry Kozlyuk * called before 'eth_rx_queue_setup()' has been called 846436c089aSDmitry Kozlyuk */ 847436c089aSDmitry Kozlyuk if (pcap_q->pkts == NULL) 848436c089aSDmitry Kozlyuk continue; 849436c089aSDmitry Kozlyuk 850436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 851436c089aSDmitry Kozlyuk } 852436c089aSDmitry Kozlyuk } 853436c089aSDmitry Kozlyuk 854436c089aSDmitry Kozlyuk if (internals->phy_mac == 0) 855436c089aSDmitry Kozlyuk /* not dynamically allocated, must not be freed */ 856436c089aSDmitry Kozlyuk dev->data->mac_addrs = NULL; 857436c089aSDmitry Kozlyuk 858436c089aSDmitry Kozlyuk return 0; 859436c089aSDmitry Kozlyuk } 860436c089aSDmitry Kozlyuk 861436c089aSDmitry Kozlyuk static int 862436c089aSDmitry Kozlyuk eth_link_update(struct rte_eth_dev *dev __rte_unused, 863436c089aSDmitry Kozlyuk int wait_to_complete __rte_unused) 864436c089aSDmitry Kozlyuk { 865436c089aSDmitry Kozlyuk return 0; 866436c089aSDmitry Kozlyuk } 867436c089aSDmitry Kozlyuk 868436c089aSDmitry Kozlyuk static int 869436c089aSDmitry Kozlyuk eth_rx_queue_setup(struct rte_eth_dev *dev, 870436c089aSDmitry Kozlyuk uint16_t rx_queue_id, 871436c089aSDmitry Kozlyuk uint16_t nb_rx_desc __rte_unused, 872436c089aSDmitry Kozlyuk unsigned int socket_id __rte_unused, 873436c089aSDmitry Kozlyuk const struct rte_eth_rxconf *rx_conf __rte_unused, 874436c089aSDmitry Kozlyuk struct rte_mempool *mb_pool) 875436c089aSDmitry Kozlyuk { 876436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 877436c089aSDmitry Kozlyuk struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; 878436c089aSDmitry Kozlyuk 879436c089aSDmitry Kozlyuk pcap_q->mb_pool = mb_pool; 880436c089aSDmitry Kozlyuk pcap_q->port_id = dev->data->port_id; 881436c089aSDmitry Kozlyuk pcap_q->queue_id = rx_queue_id; 882436c089aSDmitry Kozlyuk dev->data->rx_queues[rx_queue_id] = pcap_q; 883436c089aSDmitry Kozlyuk 884436c089aSDmitry Kozlyuk if (internals->infinite_rx) { 885436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 886436c089aSDmitry Kozlyuk char ring_name[RTE_RING_NAMESIZE]; 887436c089aSDmitry Kozlyuk static uint32_t ring_number; 888436c089aSDmitry Kozlyuk uint64_t pcap_pkt_count = 0; 889436c089aSDmitry Kozlyuk struct rte_mbuf *bufs[1]; 890436c089aSDmitry Kozlyuk pcap_t **pcap; 891436c089aSDmitry Kozlyuk 892436c089aSDmitry Kozlyuk pp = rte_eth_devices[pcap_q->port_id].process_private; 893436c089aSDmitry Kozlyuk pcap = &pp->rx_pcap[pcap_q->queue_id]; 894436c089aSDmitry Kozlyuk 895436c089aSDmitry Kozlyuk if (unlikely(*pcap == NULL)) 896436c089aSDmitry Kozlyuk return -ENOENT; 897436c089aSDmitry Kozlyuk 898436c089aSDmitry Kozlyuk pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q); 899436c089aSDmitry Kozlyuk 900436c089aSDmitry Kozlyuk snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32, 901436c089aSDmitry Kozlyuk ring_number); 902436c089aSDmitry Kozlyuk 903436c089aSDmitry Kozlyuk pcap_q->pkts = rte_ring_create(ring_name, 904436c089aSDmitry Kozlyuk rte_align64pow2(pcap_pkt_count + 1), 0, 905436c089aSDmitry Kozlyuk RING_F_SP_ENQ | RING_F_SC_DEQ); 906436c089aSDmitry Kozlyuk ring_number++; 907436c089aSDmitry Kozlyuk if (!pcap_q->pkts) 908436c089aSDmitry Kozlyuk return -ENOENT; 909436c089aSDmitry Kozlyuk 910436c089aSDmitry Kozlyuk /* Fill ring with packets from PCAP file one by one. */ 911436c089aSDmitry Kozlyuk while (eth_pcap_rx(pcap_q, bufs, 1)) { 912436c089aSDmitry Kozlyuk /* Check for multiseg mbufs. */ 913436c089aSDmitry Kozlyuk if (bufs[0]->nb_segs != 1) { 914436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 915436c089aSDmitry Kozlyuk PMD_LOG(ERR, 916436c089aSDmitry Kozlyuk "Multiseg mbufs are not supported in infinite_rx mode."); 917436c089aSDmitry Kozlyuk return -EINVAL; 918436c089aSDmitry Kozlyuk } 919436c089aSDmitry Kozlyuk 920436c089aSDmitry Kozlyuk rte_ring_enqueue_bulk(pcap_q->pkts, 921436c089aSDmitry Kozlyuk (void * const *)bufs, 1, NULL); 922436c089aSDmitry Kozlyuk } 923436c089aSDmitry Kozlyuk 924436c089aSDmitry Kozlyuk if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) { 925436c089aSDmitry Kozlyuk infinite_rx_ring_free(pcap_q->pkts); 926436c089aSDmitry Kozlyuk PMD_LOG(ERR, 927436c089aSDmitry Kozlyuk "Not enough mbufs to accommodate packets in pcap file. " 928436c089aSDmitry Kozlyuk "At least %" PRIu64 " mbufs per queue is required.", 929436c089aSDmitry Kozlyuk pcap_pkt_count); 930436c089aSDmitry Kozlyuk return -EINVAL; 931436c089aSDmitry Kozlyuk } 932436c089aSDmitry Kozlyuk 933436c089aSDmitry Kozlyuk /* 934436c089aSDmitry Kozlyuk * Reset the stats for this queue since eth_pcap_rx calls above 935436c089aSDmitry Kozlyuk * didn't result in the application receiving packets. 936436c089aSDmitry Kozlyuk */ 937436c089aSDmitry Kozlyuk pcap_q->rx_stat.pkts = 0; 938436c089aSDmitry Kozlyuk pcap_q->rx_stat.bytes = 0; 939436c089aSDmitry Kozlyuk } 940436c089aSDmitry Kozlyuk 941436c089aSDmitry Kozlyuk return 0; 942436c089aSDmitry Kozlyuk } 943436c089aSDmitry Kozlyuk 944436c089aSDmitry Kozlyuk static int 945436c089aSDmitry Kozlyuk eth_tx_queue_setup(struct rte_eth_dev *dev, 946436c089aSDmitry Kozlyuk uint16_t tx_queue_id, 947436c089aSDmitry Kozlyuk uint16_t nb_tx_desc __rte_unused, 948436c089aSDmitry Kozlyuk unsigned int socket_id __rte_unused, 949436c089aSDmitry Kozlyuk const struct rte_eth_txconf *tx_conf __rte_unused) 950436c089aSDmitry Kozlyuk { 951436c089aSDmitry Kozlyuk struct pmd_internals *internals = dev->data->dev_private; 952436c089aSDmitry Kozlyuk struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; 953436c089aSDmitry Kozlyuk 954436c089aSDmitry Kozlyuk pcap_q->port_id = dev->data->port_id; 955436c089aSDmitry Kozlyuk pcap_q->queue_id = tx_queue_id; 956436c089aSDmitry Kozlyuk dev->data->tx_queues[tx_queue_id] = pcap_q; 957436c089aSDmitry Kozlyuk 958436c089aSDmitry Kozlyuk return 0; 959436c089aSDmitry Kozlyuk } 960436c089aSDmitry Kozlyuk 961436c089aSDmitry Kozlyuk static int 962436c089aSDmitry Kozlyuk eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 963436c089aSDmitry Kozlyuk { 964436c089aSDmitry Kozlyuk dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 965436c089aSDmitry Kozlyuk 966436c089aSDmitry Kozlyuk return 0; 967436c089aSDmitry Kozlyuk } 968436c089aSDmitry Kozlyuk 969436c089aSDmitry Kozlyuk static int 970436c089aSDmitry Kozlyuk eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 971436c089aSDmitry Kozlyuk { 972436c089aSDmitry Kozlyuk dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 973436c089aSDmitry Kozlyuk 974436c089aSDmitry Kozlyuk return 0; 975436c089aSDmitry Kozlyuk } 976436c089aSDmitry Kozlyuk 977436c089aSDmitry Kozlyuk static int 978436c089aSDmitry Kozlyuk eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 979436c089aSDmitry Kozlyuk { 980436c089aSDmitry Kozlyuk dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 981436c089aSDmitry Kozlyuk 982436c089aSDmitry Kozlyuk return 0; 983436c089aSDmitry Kozlyuk } 984436c089aSDmitry Kozlyuk 985436c089aSDmitry Kozlyuk static int 986436c089aSDmitry Kozlyuk eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 987436c089aSDmitry Kozlyuk { 988436c089aSDmitry Kozlyuk dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 989436c089aSDmitry Kozlyuk 990436c089aSDmitry Kozlyuk return 0; 991436c089aSDmitry Kozlyuk } 992436c089aSDmitry Kozlyuk 993436c089aSDmitry Kozlyuk static const struct eth_dev_ops ops = { 994436c089aSDmitry Kozlyuk .dev_start = eth_dev_start, 995436c089aSDmitry Kozlyuk .dev_stop = eth_dev_stop, 996436c089aSDmitry Kozlyuk .dev_close = eth_dev_close, 997436c089aSDmitry Kozlyuk .dev_configure = eth_dev_configure, 998436c089aSDmitry Kozlyuk .dev_infos_get = eth_dev_info, 999436c089aSDmitry Kozlyuk .rx_queue_setup = eth_rx_queue_setup, 1000436c089aSDmitry Kozlyuk .tx_queue_setup = eth_tx_queue_setup, 1001436c089aSDmitry Kozlyuk .rx_queue_start = eth_rx_queue_start, 1002436c089aSDmitry Kozlyuk .tx_queue_start = eth_tx_queue_start, 1003436c089aSDmitry Kozlyuk .rx_queue_stop = eth_rx_queue_stop, 1004436c089aSDmitry Kozlyuk .tx_queue_stop = eth_tx_queue_stop, 1005436c089aSDmitry Kozlyuk .link_update = eth_link_update, 1006436c089aSDmitry Kozlyuk .stats_get = eth_stats_get, 1007436c089aSDmitry Kozlyuk .stats_reset = eth_stats_reset, 1008436c089aSDmitry Kozlyuk }; 1009436c089aSDmitry Kozlyuk 1010436c089aSDmitry Kozlyuk static int 1011436c089aSDmitry Kozlyuk add_queue(struct pmd_devargs *pmd, const char *name, const char *type, 1012436c089aSDmitry Kozlyuk pcap_t *pcap, pcap_dumper_t *dumper) 1013436c089aSDmitry Kozlyuk { 1014436c089aSDmitry Kozlyuk if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES) 1015436c089aSDmitry Kozlyuk return -1; 1016436c089aSDmitry Kozlyuk if (pcap) 1017436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].pcap = pcap; 1018436c089aSDmitry Kozlyuk if (dumper) 1019436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].dumper = dumper; 1020436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].name = name; 1021436c089aSDmitry Kozlyuk pmd->queue[pmd->num_of_queue].type = type; 1022436c089aSDmitry Kozlyuk pmd->num_of_queue++; 1023436c089aSDmitry Kozlyuk return 0; 1024436c089aSDmitry Kozlyuk } 1025436c089aSDmitry Kozlyuk 1026436c089aSDmitry Kozlyuk /* 1027436c089aSDmitry Kozlyuk * Function handler that opens the pcap file for reading a stores a 1028436c089aSDmitry Kozlyuk * reference of it for use it later on. 1029436c089aSDmitry Kozlyuk */ 1030436c089aSDmitry Kozlyuk static int 1031436c089aSDmitry Kozlyuk open_rx_pcap(const char *key, const char *value, void *extra_args) 1032436c089aSDmitry Kozlyuk { 1033436c089aSDmitry Kozlyuk const char *pcap_filename = value; 1034436c089aSDmitry Kozlyuk struct pmd_devargs *rx = extra_args; 1035436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1036436c089aSDmitry Kozlyuk 1037436c089aSDmitry Kozlyuk if (open_single_rx_pcap(pcap_filename, &pcap) < 0) 1038436c089aSDmitry Kozlyuk return -1; 1039436c089aSDmitry Kozlyuk 1040436c089aSDmitry Kozlyuk if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) { 1041436c089aSDmitry Kozlyuk pcap_close(pcap); 1042436c089aSDmitry Kozlyuk return -1; 1043436c089aSDmitry Kozlyuk } 1044436c089aSDmitry Kozlyuk 1045436c089aSDmitry Kozlyuk return 0; 1046436c089aSDmitry Kozlyuk } 1047436c089aSDmitry Kozlyuk 1048436c089aSDmitry Kozlyuk /* 1049436c089aSDmitry Kozlyuk * Opens a pcap file for writing and stores a reference to it 1050436c089aSDmitry Kozlyuk * for use it later on. 1051436c089aSDmitry Kozlyuk */ 1052436c089aSDmitry Kozlyuk static int 1053436c089aSDmitry Kozlyuk open_tx_pcap(const char *key, const char *value, void *extra_args) 1054436c089aSDmitry Kozlyuk { 1055436c089aSDmitry Kozlyuk const char *pcap_filename = value; 1056436c089aSDmitry Kozlyuk struct pmd_devargs *dumpers = extra_args; 1057436c089aSDmitry Kozlyuk pcap_dumper_t *dumper; 1058436c089aSDmitry Kozlyuk 1059436c089aSDmitry Kozlyuk if (open_single_tx_pcap(pcap_filename, &dumper) < 0) 1060436c089aSDmitry Kozlyuk return -1; 1061436c089aSDmitry Kozlyuk 1062436c089aSDmitry Kozlyuk if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) { 1063436c089aSDmitry Kozlyuk pcap_dump_close(dumper); 1064436c089aSDmitry Kozlyuk return -1; 1065436c089aSDmitry Kozlyuk } 1066436c089aSDmitry Kozlyuk 1067436c089aSDmitry Kozlyuk return 0; 1068436c089aSDmitry Kozlyuk } 1069436c089aSDmitry Kozlyuk 1070436c089aSDmitry Kozlyuk /* 1071436c089aSDmitry Kozlyuk * Opens an interface for reading and writing 1072436c089aSDmitry Kozlyuk */ 1073436c089aSDmitry Kozlyuk static inline int 1074436c089aSDmitry Kozlyuk open_rx_tx_iface(const char *key, const char *value, void *extra_args) 1075436c089aSDmitry Kozlyuk { 1076436c089aSDmitry Kozlyuk const char *iface = value; 1077436c089aSDmitry Kozlyuk struct pmd_devargs *tx = extra_args; 1078436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1079436c089aSDmitry Kozlyuk 1080436c089aSDmitry Kozlyuk if (open_single_iface(iface, &pcap) < 0) 1081436c089aSDmitry Kozlyuk return -1; 1082436c089aSDmitry Kozlyuk 1083436c089aSDmitry Kozlyuk tx->queue[0].pcap = pcap; 1084436c089aSDmitry Kozlyuk tx->queue[0].name = iface; 1085436c089aSDmitry Kozlyuk tx->queue[0].type = key; 1086436c089aSDmitry Kozlyuk 1087436c089aSDmitry Kozlyuk return 0; 1088436c089aSDmitry Kozlyuk } 1089436c089aSDmitry Kozlyuk 1090436c089aSDmitry Kozlyuk static inline int 1091436c089aSDmitry Kozlyuk set_iface_direction(const char *iface, pcap_t *pcap, 1092436c089aSDmitry Kozlyuk pcap_direction_t direction) 1093436c089aSDmitry Kozlyuk { 1094436c089aSDmitry Kozlyuk const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT"; 1095436c089aSDmitry Kozlyuk if (pcap_setdirection(pcap, direction) < 0) { 1096436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n", 1097436c089aSDmitry Kozlyuk iface, direction_str, pcap_geterr(pcap)); 1098436c089aSDmitry Kozlyuk return -1; 1099436c089aSDmitry Kozlyuk } 1100436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Setting %s pcap direction %s\n", 1101436c089aSDmitry Kozlyuk iface, direction_str); 1102436c089aSDmitry Kozlyuk return 0; 1103436c089aSDmitry Kozlyuk } 1104436c089aSDmitry Kozlyuk 1105436c089aSDmitry Kozlyuk static inline int 1106436c089aSDmitry Kozlyuk open_iface(const char *key, const char *value, void *extra_args) 1107436c089aSDmitry Kozlyuk { 1108436c089aSDmitry Kozlyuk const char *iface = value; 1109436c089aSDmitry Kozlyuk struct pmd_devargs *pmd = extra_args; 1110436c089aSDmitry Kozlyuk pcap_t *pcap = NULL; 1111436c089aSDmitry Kozlyuk 1112436c089aSDmitry Kozlyuk if (open_single_iface(iface, &pcap) < 0) 1113436c089aSDmitry Kozlyuk return -1; 1114436c089aSDmitry Kozlyuk if (add_queue(pmd, iface, key, pcap, NULL) < 0) { 1115436c089aSDmitry Kozlyuk pcap_close(pcap); 1116436c089aSDmitry Kozlyuk return -1; 1117436c089aSDmitry Kozlyuk } 1118436c089aSDmitry Kozlyuk 1119436c089aSDmitry Kozlyuk return 0; 1120436c089aSDmitry Kozlyuk } 1121436c089aSDmitry Kozlyuk 1122436c089aSDmitry Kozlyuk /* 1123436c089aSDmitry Kozlyuk * Opens a NIC for reading packets from it 1124436c089aSDmitry Kozlyuk */ 1125436c089aSDmitry Kozlyuk static inline int 1126436c089aSDmitry Kozlyuk open_rx_iface(const char *key, const char *value, void *extra_args) 1127436c089aSDmitry Kozlyuk { 1128436c089aSDmitry Kozlyuk int ret = open_iface(key, value, extra_args); 1129436c089aSDmitry Kozlyuk if (ret < 0) 1130436c089aSDmitry Kozlyuk return ret; 1131436c089aSDmitry Kozlyuk if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) { 1132436c089aSDmitry Kozlyuk struct pmd_devargs *pmd = extra_args; 1133436c089aSDmitry Kozlyuk unsigned int qid = pmd->num_of_queue - 1; 1134436c089aSDmitry Kozlyuk 1135436c089aSDmitry Kozlyuk set_iface_direction(pmd->queue[qid].name, 1136436c089aSDmitry Kozlyuk pmd->queue[qid].pcap, 1137436c089aSDmitry Kozlyuk PCAP_D_IN); 1138436c089aSDmitry Kozlyuk } 1139436c089aSDmitry Kozlyuk 1140436c089aSDmitry Kozlyuk return 0; 1141436c089aSDmitry Kozlyuk } 1142436c089aSDmitry Kozlyuk 1143436c089aSDmitry Kozlyuk static inline int 1144436c089aSDmitry Kozlyuk rx_iface_args_process(const char *key, const char *value, void *extra_args) 1145436c089aSDmitry Kozlyuk { 1146436c089aSDmitry Kozlyuk if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 || 1147436c089aSDmitry Kozlyuk strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) 1148436c089aSDmitry Kozlyuk return open_rx_iface(key, value, extra_args); 1149436c089aSDmitry Kozlyuk 1150436c089aSDmitry Kozlyuk return 0; 1151436c089aSDmitry Kozlyuk } 1152436c089aSDmitry Kozlyuk 1153436c089aSDmitry Kozlyuk /* 1154436c089aSDmitry Kozlyuk * Opens a NIC for writing packets to it 1155436c089aSDmitry Kozlyuk */ 1156436c089aSDmitry Kozlyuk static int 1157436c089aSDmitry Kozlyuk open_tx_iface(const char *key, const char *value, void *extra_args) 1158436c089aSDmitry Kozlyuk { 1159436c089aSDmitry Kozlyuk return open_iface(key, value, extra_args); 1160436c089aSDmitry Kozlyuk } 1161436c089aSDmitry Kozlyuk 1162436c089aSDmitry Kozlyuk static int 1163436c089aSDmitry Kozlyuk select_phy_mac(const char *key __rte_unused, const char *value, 1164436c089aSDmitry Kozlyuk void *extra_args) 1165436c089aSDmitry Kozlyuk { 1166436c089aSDmitry Kozlyuk if (extra_args) { 1167436c089aSDmitry Kozlyuk const int phy_mac = atoi(value); 1168436c089aSDmitry Kozlyuk int *enable_phy_mac = extra_args; 1169436c089aSDmitry Kozlyuk 1170436c089aSDmitry Kozlyuk if (phy_mac) 1171436c089aSDmitry Kozlyuk *enable_phy_mac = 1; 1172436c089aSDmitry Kozlyuk } 1173436c089aSDmitry Kozlyuk return 0; 1174436c089aSDmitry Kozlyuk } 1175436c089aSDmitry Kozlyuk 1176436c089aSDmitry Kozlyuk static int 1177436c089aSDmitry Kozlyuk get_infinite_rx_arg(const char *key __rte_unused, 1178436c089aSDmitry Kozlyuk const char *value, void *extra_args) 1179436c089aSDmitry Kozlyuk { 1180436c089aSDmitry Kozlyuk if (extra_args) { 1181436c089aSDmitry Kozlyuk const int infinite_rx = atoi(value); 1182436c089aSDmitry Kozlyuk int *enable_infinite_rx = extra_args; 1183436c089aSDmitry Kozlyuk 1184436c089aSDmitry Kozlyuk if (infinite_rx > 0) 1185436c089aSDmitry Kozlyuk *enable_infinite_rx = 1; 1186436c089aSDmitry Kozlyuk } 1187436c089aSDmitry Kozlyuk return 0; 1188436c089aSDmitry Kozlyuk } 1189436c089aSDmitry Kozlyuk 1190436c089aSDmitry Kozlyuk static int 1191436c089aSDmitry Kozlyuk pmd_init_internals(struct rte_vdev_device *vdev, 1192436c089aSDmitry Kozlyuk const unsigned int nb_rx_queues, 1193436c089aSDmitry Kozlyuk const unsigned int nb_tx_queues, 1194436c089aSDmitry Kozlyuk struct pmd_internals **internals, 1195436c089aSDmitry Kozlyuk struct rte_eth_dev **eth_dev) 1196436c089aSDmitry Kozlyuk { 1197436c089aSDmitry Kozlyuk struct rte_eth_dev_data *data; 1198436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1199436c089aSDmitry Kozlyuk unsigned int numa_node = vdev->device.numa_node; 1200436c089aSDmitry Kozlyuk 1201436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", 1202436c089aSDmitry Kozlyuk numa_node); 1203436c089aSDmitry Kozlyuk 1204436c089aSDmitry Kozlyuk pp = (struct pmd_process_private *) 1205436c089aSDmitry Kozlyuk rte_zmalloc(NULL, sizeof(struct pmd_process_private), 1206436c089aSDmitry Kozlyuk RTE_CACHE_LINE_SIZE); 1207436c089aSDmitry Kozlyuk 1208436c089aSDmitry Kozlyuk if (pp == NULL) { 1209436c089aSDmitry Kozlyuk PMD_LOG(ERR, 1210436c089aSDmitry Kozlyuk "Failed to allocate memory for process private"); 1211436c089aSDmitry Kozlyuk return -1; 1212436c089aSDmitry Kozlyuk } 1213436c089aSDmitry Kozlyuk 1214436c089aSDmitry Kozlyuk /* reserve an ethdev entry */ 1215436c089aSDmitry Kozlyuk *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); 1216436c089aSDmitry Kozlyuk if (!(*eth_dev)) { 1217436c089aSDmitry Kozlyuk rte_free(pp); 1218436c089aSDmitry Kozlyuk return -1; 1219436c089aSDmitry Kozlyuk } 1220436c089aSDmitry Kozlyuk (*eth_dev)->process_private = pp; 1221436c089aSDmitry Kozlyuk /* now put it all together 1222436c089aSDmitry Kozlyuk * - store queue data in internals, 1223436c089aSDmitry Kozlyuk * - store numa_node info in eth_dev 1224436c089aSDmitry Kozlyuk * - point eth_dev_data to internals 1225436c089aSDmitry Kozlyuk * - and point eth_dev structure to new eth_dev_data structure 1226436c089aSDmitry Kozlyuk */ 1227436c089aSDmitry Kozlyuk *internals = (*eth_dev)->data->dev_private; 1228436c089aSDmitry Kozlyuk /* 1229436c089aSDmitry Kozlyuk * Interface MAC = 02:70:63:61:70:<iface_idx> 1230436c089aSDmitry Kozlyuk * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx' 1231436c089aSDmitry Kozlyuk * where the middle 4 characters are converted to hex. 1232436c089aSDmitry Kozlyuk */ 1233436c089aSDmitry Kozlyuk (*internals)->eth_addr = (struct rte_ether_addr) { 1234436c089aSDmitry Kozlyuk .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ } 1235436c089aSDmitry Kozlyuk }; 1236436c089aSDmitry Kozlyuk (*internals)->phy_mac = 0; 1237436c089aSDmitry Kozlyuk data = (*eth_dev)->data; 1238436c089aSDmitry Kozlyuk data->nb_rx_queues = (uint16_t)nb_rx_queues; 1239436c089aSDmitry Kozlyuk data->nb_tx_queues = (uint16_t)nb_tx_queues; 1240436c089aSDmitry Kozlyuk data->dev_link = pmd_link; 1241436c089aSDmitry Kozlyuk data->mac_addrs = &(*internals)->eth_addr; 1242436c089aSDmitry Kozlyuk data->promiscuous = 1; 1243436c089aSDmitry Kozlyuk data->all_multicast = 1; 1244436c089aSDmitry Kozlyuk data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1245436c089aSDmitry Kozlyuk 1246436c089aSDmitry Kozlyuk /* 1247436c089aSDmitry Kozlyuk * NOTE: we'll replace the data element, of originally allocated 1248436c089aSDmitry Kozlyuk * eth_dev so the rings are local per-process 1249436c089aSDmitry Kozlyuk */ 1250436c089aSDmitry Kozlyuk (*eth_dev)->dev_ops = &ops; 1251436c089aSDmitry Kozlyuk 1252436c089aSDmitry Kozlyuk strlcpy((*internals)->devargs, rte_vdev_device_args(vdev), 1253436c089aSDmitry Kozlyuk ETH_PCAP_ARG_MAXLEN); 1254436c089aSDmitry Kozlyuk 1255436c089aSDmitry Kozlyuk return 0; 1256436c089aSDmitry Kozlyuk } 1257436c089aSDmitry Kozlyuk 1258436c089aSDmitry Kozlyuk static int 1259436c089aSDmitry Kozlyuk eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev, 1260436c089aSDmitry Kozlyuk const unsigned int numa_node) 1261436c089aSDmitry Kozlyuk { 1262436c089aSDmitry Kozlyuk void *mac_addrs; 1263436c089aSDmitry Kozlyuk struct rte_ether_addr mac; 1264436c089aSDmitry Kozlyuk 1265436c089aSDmitry Kozlyuk if (osdep_iface_mac_get(if_name, &mac) < 0) 1266436c089aSDmitry Kozlyuk return -1; 1267436c089aSDmitry Kozlyuk 1268436c089aSDmitry Kozlyuk mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node); 1269436c089aSDmitry Kozlyuk if (mac_addrs == NULL) 1270436c089aSDmitry Kozlyuk return -1; 1271436c089aSDmitry Kozlyuk 1272436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Setting phy MAC for %s", if_name); 1273436c089aSDmitry Kozlyuk rte_memcpy(mac_addrs, mac.addr_bytes, RTE_ETHER_ADDR_LEN); 1274436c089aSDmitry Kozlyuk eth_dev->data->mac_addrs = mac_addrs; 1275436c089aSDmitry Kozlyuk return 0; 1276436c089aSDmitry Kozlyuk } 1277436c089aSDmitry Kozlyuk 1278436c089aSDmitry Kozlyuk static int 1279436c089aSDmitry Kozlyuk eth_from_pcaps_common(struct rte_vdev_device *vdev, 1280436c089aSDmitry Kozlyuk struct pmd_devargs_all *devargs_all, 1281436c089aSDmitry Kozlyuk struct pmd_internals **internals, struct rte_eth_dev **eth_dev) 1282436c089aSDmitry Kozlyuk { 1283436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1284436c089aSDmitry Kozlyuk struct pmd_devargs *rx_queues = &devargs_all->rx_queues; 1285436c089aSDmitry Kozlyuk struct pmd_devargs *tx_queues = &devargs_all->tx_queues; 1286436c089aSDmitry Kozlyuk const unsigned int nb_rx_queues = rx_queues->num_of_queue; 1287436c089aSDmitry Kozlyuk const unsigned int nb_tx_queues = tx_queues->num_of_queue; 1288436c089aSDmitry Kozlyuk unsigned int i; 1289436c089aSDmitry Kozlyuk 1290436c089aSDmitry Kozlyuk if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals, 1291436c089aSDmitry Kozlyuk eth_dev) < 0) 1292436c089aSDmitry Kozlyuk return -1; 1293436c089aSDmitry Kozlyuk 1294436c089aSDmitry Kozlyuk pp = (*eth_dev)->process_private; 1295436c089aSDmitry Kozlyuk for (i = 0; i < nb_rx_queues; i++) { 1296436c089aSDmitry Kozlyuk struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; 1297436c089aSDmitry Kozlyuk struct devargs_queue *queue = &rx_queues->queue[i]; 1298436c089aSDmitry Kozlyuk 1299436c089aSDmitry Kozlyuk pp->rx_pcap[i] = queue->pcap; 1300436c089aSDmitry Kozlyuk strlcpy(rx->name, queue->name, sizeof(rx->name)); 1301436c089aSDmitry Kozlyuk strlcpy(rx->type, queue->type, sizeof(rx->type)); 1302436c089aSDmitry Kozlyuk } 1303436c089aSDmitry Kozlyuk 1304436c089aSDmitry Kozlyuk for (i = 0; i < nb_tx_queues; i++) { 1305436c089aSDmitry Kozlyuk struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; 1306436c089aSDmitry Kozlyuk struct devargs_queue *queue = &tx_queues->queue[i]; 1307436c089aSDmitry Kozlyuk 1308436c089aSDmitry Kozlyuk pp->tx_dumper[i] = queue->dumper; 1309436c089aSDmitry Kozlyuk pp->tx_pcap[i] = queue->pcap; 1310436c089aSDmitry Kozlyuk strlcpy(tx->name, queue->name, sizeof(tx->name)); 1311436c089aSDmitry Kozlyuk strlcpy(tx->type, queue->type, sizeof(tx->type)); 1312436c089aSDmitry Kozlyuk } 1313436c089aSDmitry Kozlyuk 1314436c089aSDmitry Kozlyuk return 0; 1315436c089aSDmitry Kozlyuk } 1316436c089aSDmitry Kozlyuk 1317436c089aSDmitry Kozlyuk static int 1318436c089aSDmitry Kozlyuk eth_from_pcaps(struct rte_vdev_device *vdev, 1319436c089aSDmitry Kozlyuk struct pmd_devargs_all *devargs_all) 1320436c089aSDmitry Kozlyuk { 1321436c089aSDmitry Kozlyuk struct pmd_internals *internals = NULL; 1322436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1323436c089aSDmitry Kozlyuk struct pmd_devargs *rx_queues = &devargs_all->rx_queues; 1324436c089aSDmitry Kozlyuk int single_iface = devargs_all->single_iface; 1325436c089aSDmitry Kozlyuk unsigned int infinite_rx = devargs_all->infinite_rx; 1326436c089aSDmitry Kozlyuk int ret; 1327436c089aSDmitry Kozlyuk 1328436c089aSDmitry Kozlyuk ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev); 1329436c089aSDmitry Kozlyuk 1330436c089aSDmitry Kozlyuk if (ret < 0) 1331436c089aSDmitry Kozlyuk return ret; 1332436c089aSDmitry Kozlyuk 1333436c089aSDmitry Kozlyuk /* store weather we are using a single interface for rx/tx or not */ 1334436c089aSDmitry Kozlyuk internals->single_iface = single_iface; 1335436c089aSDmitry Kozlyuk 1336436c089aSDmitry Kozlyuk if (single_iface) { 1337436c089aSDmitry Kozlyuk internals->if_index = 1338436c089aSDmitry Kozlyuk osdep_iface_index_get(rx_queues->queue[0].name); 1339436c089aSDmitry Kozlyuk 1340*23f3dac4SStephen Hemminger /* phy_mac arg is applied only if "iface" devarg is provided */ 1341436c089aSDmitry Kozlyuk if (rx_queues->phy_mac) { 1342436c089aSDmitry Kozlyuk if (eth_pcap_update_mac(rx_queues->queue[0].name, 1343436c089aSDmitry Kozlyuk eth_dev, vdev->device.numa_node) == 0) 1344436c089aSDmitry Kozlyuk internals->phy_mac = 1; 1345436c089aSDmitry Kozlyuk } 1346436c089aSDmitry Kozlyuk } 1347436c089aSDmitry Kozlyuk 1348436c089aSDmitry Kozlyuk internals->infinite_rx = infinite_rx; 1349436c089aSDmitry Kozlyuk /* Assign rx ops. */ 1350436c089aSDmitry Kozlyuk if (infinite_rx) 1351436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx_infinite; 1352436c089aSDmitry Kozlyuk else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface || 1353436c089aSDmitry Kozlyuk single_iface) 1354436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx; 1355436c089aSDmitry Kozlyuk else 1356436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_null_rx; 1357436c089aSDmitry Kozlyuk 1358436c089aSDmitry Kozlyuk /* Assign tx ops. */ 1359436c089aSDmitry Kozlyuk if (devargs_all->is_tx_pcap) 1360436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; 1361436c089aSDmitry Kozlyuk else if (devargs_all->is_tx_iface || single_iface) 1362436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx; 1363436c089aSDmitry Kozlyuk else 1364436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_tx_drop; 1365436c089aSDmitry Kozlyuk 1366436c089aSDmitry Kozlyuk rte_eth_dev_probing_finish(eth_dev); 1367436c089aSDmitry Kozlyuk return 0; 1368436c089aSDmitry Kozlyuk } 1369436c089aSDmitry Kozlyuk 1370da0280b3SQiming Chen static void 1371da0280b3SQiming Chen eth_release_pcaps(struct pmd_devargs *pcaps, 1372da0280b3SQiming Chen struct pmd_devargs *dumpers, 1373da0280b3SQiming Chen int single_iface) 1374da0280b3SQiming Chen { 1375da0280b3SQiming Chen unsigned int i; 1376da0280b3SQiming Chen 1377da0280b3SQiming Chen if (single_iface) { 1378da0280b3SQiming Chen if (pcaps->queue[0].pcap) 1379da0280b3SQiming Chen pcap_close(pcaps->queue[0].pcap); 1380da0280b3SQiming Chen return; 1381da0280b3SQiming Chen } 1382da0280b3SQiming Chen 1383da0280b3SQiming Chen for (i = 0; i < dumpers->num_of_queue; i++) { 1384da0280b3SQiming Chen if (dumpers->queue[i].dumper) 1385da0280b3SQiming Chen pcap_dump_close(dumpers->queue[i].dumper); 1386da0280b3SQiming Chen 1387da0280b3SQiming Chen if (dumpers->queue[i].pcap) 1388da0280b3SQiming Chen pcap_close(dumpers->queue[i].pcap); 1389da0280b3SQiming Chen } 1390da0280b3SQiming Chen 1391da0280b3SQiming Chen for (i = 0; i < pcaps->num_of_queue; i++) { 1392da0280b3SQiming Chen if (pcaps->queue[i].pcap) 1393da0280b3SQiming Chen pcap_close(pcaps->queue[i].pcap); 1394da0280b3SQiming Chen } 1395da0280b3SQiming Chen } 1396da0280b3SQiming Chen 1397436c089aSDmitry Kozlyuk static int 1398436c089aSDmitry Kozlyuk pmd_pcap_probe(struct rte_vdev_device *dev) 1399436c089aSDmitry Kozlyuk { 1400436c089aSDmitry Kozlyuk const char *name; 1401436c089aSDmitry Kozlyuk struct rte_kvargs *kvlist; 1402436c089aSDmitry Kozlyuk struct pmd_devargs pcaps = {0}; 1403436c089aSDmitry Kozlyuk struct pmd_devargs dumpers = {0}; 1404436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1405436c089aSDmitry Kozlyuk struct pmd_internals *internal; 1406436c089aSDmitry Kozlyuk int ret = 0; 1407436c089aSDmitry Kozlyuk 1408436c089aSDmitry Kozlyuk struct pmd_devargs_all devargs_all = { 1409436c089aSDmitry Kozlyuk .single_iface = 0, 1410436c089aSDmitry Kozlyuk .is_tx_pcap = 0, 1411436c089aSDmitry Kozlyuk .is_tx_iface = 0, 1412436c089aSDmitry Kozlyuk .infinite_rx = 0, 1413436c089aSDmitry Kozlyuk }; 1414436c089aSDmitry Kozlyuk 1415436c089aSDmitry Kozlyuk name = rte_vdev_device_name(dev); 1416436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); 1417436c089aSDmitry Kozlyuk 1418436c089aSDmitry Kozlyuk timespec_get(&start_time, TIME_UTC); 1419436c089aSDmitry Kozlyuk start_cycles = rte_get_timer_cycles(); 1420436c089aSDmitry Kozlyuk hz = rte_get_timer_hz(); 1421436c089aSDmitry Kozlyuk 1422436c089aSDmitry Kozlyuk ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset, 1423436c089aSDmitry Kozlyuk ×tamp_rx_dynflag); 1424436c089aSDmitry Kozlyuk if (ret != 0) { 1425436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Failed to register Rx timestamp field/flag"); 1426436c089aSDmitry Kozlyuk return -1; 1427436c089aSDmitry Kozlyuk } 1428436c089aSDmitry Kozlyuk 1429436c089aSDmitry Kozlyuk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1430436c089aSDmitry Kozlyuk eth_dev = rte_eth_dev_attach_secondary(name); 1431436c089aSDmitry Kozlyuk if (!eth_dev) { 1432436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Failed to probe %s", name); 1433436c089aSDmitry Kozlyuk return -1; 1434436c089aSDmitry Kozlyuk } 1435436c089aSDmitry Kozlyuk 1436436c089aSDmitry Kozlyuk internal = eth_dev->data->dev_private; 1437436c089aSDmitry Kozlyuk 1438436c089aSDmitry Kozlyuk kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); 1439436c089aSDmitry Kozlyuk if (kvlist == NULL) 1440436c089aSDmitry Kozlyuk return -1; 1441436c089aSDmitry Kozlyuk } else { 1442436c089aSDmitry Kozlyuk kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), 1443436c089aSDmitry Kozlyuk valid_arguments); 1444436c089aSDmitry Kozlyuk if (kvlist == NULL) 1445436c089aSDmitry Kozlyuk return -1; 1446436c089aSDmitry Kozlyuk } 1447436c089aSDmitry Kozlyuk 1448436c089aSDmitry Kozlyuk /* 1449436c089aSDmitry Kozlyuk * If iface argument is passed we open the NICs and use them for 1450436c089aSDmitry Kozlyuk * reading / writing 1451436c089aSDmitry Kozlyuk */ 1452436c089aSDmitry Kozlyuk if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) { 1453436c089aSDmitry Kozlyuk 1454436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG, 1455436c089aSDmitry Kozlyuk &open_rx_tx_iface, &pcaps); 1456436c089aSDmitry Kozlyuk if (ret < 0) 1457436c089aSDmitry Kozlyuk goto free_kvlist; 1458436c089aSDmitry Kozlyuk 1459436c089aSDmitry Kozlyuk dumpers.queue[0] = pcaps.queue[0]; 1460436c089aSDmitry Kozlyuk 1461436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG, 1462436c089aSDmitry Kozlyuk &select_phy_mac, &pcaps.phy_mac); 1463436c089aSDmitry Kozlyuk if (ret < 0) 1464436c089aSDmitry Kozlyuk goto free_kvlist; 1465436c089aSDmitry Kozlyuk 1466436c089aSDmitry Kozlyuk dumpers.phy_mac = pcaps.phy_mac; 1467436c089aSDmitry Kozlyuk 1468436c089aSDmitry Kozlyuk devargs_all.single_iface = 1; 1469436c089aSDmitry Kozlyuk pcaps.num_of_queue = 1; 1470436c089aSDmitry Kozlyuk dumpers.num_of_queue = 1; 1471436c089aSDmitry Kozlyuk 1472436c089aSDmitry Kozlyuk goto create_eth; 1473436c089aSDmitry Kozlyuk } 1474436c089aSDmitry Kozlyuk 1475436c089aSDmitry Kozlyuk /* 1476436c089aSDmitry Kozlyuk * We check whether we want to open a RX stream from a real NIC, a 1477436c089aSDmitry Kozlyuk * pcap file or open a dummy RX stream 1478436c089aSDmitry Kozlyuk */ 1479436c089aSDmitry Kozlyuk devargs_all.is_rx_pcap = 1480436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; 1481436c089aSDmitry Kozlyuk devargs_all.is_rx_iface = 1482436c089aSDmitry Kozlyuk (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) + 1483436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0; 1484436c089aSDmitry Kozlyuk pcaps.num_of_queue = 0; 1485436c089aSDmitry Kozlyuk 1486436c089aSDmitry Kozlyuk devargs_all.is_tx_pcap = 1487436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; 1488436c089aSDmitry Kozlyuk devargs_all.is_tx_iface = 1489436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0; 1490436c089aSDmitry Kozlyuk dumpers.num_of_queue = 0; 1491436c089aSDmitry Kozlyuk 1492436c089aSDmitry Kozlyuk if (devargs_all.is_rx_pcap) { 1493436c089aSDmitry Kozlyuk /* 1494436c089aSDmitry Kozlyuk * We check whether we want to infinitely rx the pcap file. 1495436c089aSDmitry Kozlyuk */ 1496436c089aSDmitry Kozlyuk unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist, 1497436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG); 1498436c089aSDmitry Kozlyuk 1499436c089aSDmitry Kozlyuk if (infinite_rx_arg_cnt == 1) { 1500436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, 1501436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG, 1502436c089aSDmitry Kozlyuk &get_infinite_rx_arg, 1503436c089aSDmitry Kozlyuk &devargs_all.infinite_rx); 1504436c089aSDmitry Kozlyuk if (ret < 0) 1505436c089aSDmitry Kozlyuk goto free_kvlist; 1506436c089aSDmitry Kozlyuk PMD_LOG(INFO, "infinite_rx has been %s for %s", 1507436c089aSDmitry Kozlyuk devargs_all.infinite_rx ? "enabled" : "disabled", 1508436c089aSDmitry Kozlyuk name); 1509436c089aSDmitry Kozlyuk 1510436c089aSDmitry Kozlyuk } else if (infinite_rx_arg_cnt > 1) { 1511436c089aSDmitry Kozlyuk PMD_LOG(WARNING, "infinite_rx has not been enabled since the " 1512436c089aSDmitry Kozlyuk "argument has been provided more than once " 1513436c089aSDmitry Kozlyuk "for %s", name); 1514436c089aSDmitry Kozlyuk } 1515436c089aSDmitry Kozlyuk 1516436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, 1517436c089aSDmitry Kozlyuk &open_rx_pcap, &pcaps); 1518436c089aSDmitry Kozlyuk } else if (devargs_all.is_rx_iface) { 1519436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, NULL, 1520436c089aSDmitry Kozlyuk &rx_iface_args_process, &pcaps); 1521436c089aSDmitry Kozlyuk } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) { 1522436c089aSDmitry Kozlyuk unsigned int i; 1523436c089aSDmitry Kozlyuk 1524436c089aSDmitry Kozlyuk /* Count number of tx queue args passed before dummy rx queue 1525436c089aSDmitry Kozlyuk * creation so a dummy rx queue can be created for each tx queue 1526436c089aSDmitry Kozlyuk */ 1527436c089aSDmitry Kozlyuk unsigned int num_tx_queues = 1528436c089aSDmitry Kozlyuk (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) + 1529436c089aSDmitry Kozlyuk rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG)); 1530436c089aSDmitry Kozlyuk 1531436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided."); 1532436c089aSDmitry Kozlyuk 1533436c089aSDmitry Kozlyuk /* Creating a dummy rx queue for each tx queue passed */ 1534436c089aSDmitry Kozlyuk for (i = 0; i < num_tx_queues; i++) 1535436c089aSDmitry Kozlyuk ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL, 1536436c089aSDmitry Kozlyuk NULL); 1537436c089aSDmitry Kozlyuk } else { 1538436c089aSDmitry Kozlyuk PMD_LOG(ERR, "Error - No rx or tx queues provided"); 1539436c089aSDmitry Kozlyuk ret = -ENOENT; 1540436c089aSDmitry Kozlyuk } 1541436c089aSDmitry Kozlyuk if (ret < 0) 1542436c089aSDmitry Kozlyuk goto free_kvlist; 1543436c089aSDmitry Kozlyuk 1544436c089aSDmitry Kozlyuk /* 1545436c089aSDmitry Kozlyuk * We check whether we want to open a TX stream to a real NIC, 1546436c089aSDmitry Kozlyuk * a pcap file, or drop packets on tx 1547436c089aSDmitry Kozlyuk */ 1548436c089aSDmitry Kozlyuk if (devargs_all.is_tx_pcap) { 1549436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, 1550436c089aSDmitry Kozlyuk &open_tx_pcap, &dumpers); 1551436c089aSDmitry Kozlyuk } else if (devargs_all.is_tx_iface) { 1552436c089aSDmitry Kozlyuk ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, 1553436c089aSDmitry Kozlyuk &open_tx_iface, &dumpers); 1554436c089aSDmitry Kozlyuk } else { 1555436c089aSDmitry Kozlyuk unsigned int i; 1556436c089aSDmitry Kozlyuk 1557436c089aSDmitry Kozlyuk PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided."); 1558436c089aSDmitry Kozlyuk 1559436c089aSDmitry Kozlyuk /* Add 1 dummy queue per rxq which counts and drops packets. */ 1560436c089aSDmitry Kozlyuk for (i = 0; i < pcaps.num_of_queue; i++) 1561436c089aSDmitry Kozlyuk ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL, 1562436c089aSDmitry Kozlyuk NULL); 1563436c089aSDmitry Kozlyuk } 1564436c089aSDmitry Kozlyuk 1565436c089aSDmitry Kozlyuk if (ret < 0) 1566436c089aSDmitry Kozlyuk goto free_kvlist; 1567436c089aSDmitry Kozlyuk 1568436c089aSDmitry Kozlyuk create_eth: 1569436c089aSDmitry Kozlyuk if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1570436c089aSDmitry Kozlyuk struct pmd_process_private *pp; 1571436c089aSDmitry Kozlyuk unsigned int i; 1572436c089aSDmitry Kozlyuk 1573436c089aSDmitry Kozlyuk internal = eth_dev->data->dev_private; 1574436c089aSDmitry Kozlyuk pp = (struct pmd_process_private *) 1575436c089aSDmitry Kozlyuk rte_zmalloc(NULL, 1576436c089aSDmitry Kozlyuk sizeof(struct pmd_process_private), 1577436c089aSDmitry Kozlyuk RTE_CACHE_LINE_SIZE); 1578436c089aSDmitry Kozlyuk 1579436c089aSDmitry Kozlyuk if (pp == NULL) { 1580436c089aSDmitry Kozlyuk PMD_LOG(ERR, 1581436c089aSDmitry Kozlyuk "Failed to allocate memory for process private"); 1582436c089aSDmitry Kozlyuk ret = -1; 1583436c089aSDmitry Kozlyuk goto free_kvlist; 1584436c089aSDmitry Kozlyuk } 1585436c089aSDmitry Kozlyuk 1586436c089aSDmitry Kozlyuk eth_dev->dev_ops = &ops; 1587436c089aSDmitry Kozlyuk eth_dev->device = &dev->device; 1588436c089aSDmitry Kozlyuk 1589436c089aSDmitry Kozlyuk /* setup process private */ 1590436c089aSDmitry Kozlyuk for (i = 0; i < pcaps.num_of_queue; i++) 1591436c089aSDmitry Kozlyuk pp->rx_pcap[i] = pcaps.queue[i].pcap; 1592436c089aSDmitry Kozlyuk 1593436c089aSDmitry Kozlyuk for (i = 0; i < dumpers.num_of_queue; i++) { 1594436c089aSDmitry Kozlyuk pp->tx_dumper[i] = dumpers.queue[i].dumper; 1595436c089aSDmitry Kozlyuk pp->tx_pcap[i] = dumpers.queue[i].pcap; 1596436c089aSDmitry Kozlyuk } 1597436c089aSDmitry Kozlyuk 1598436c089aSDmitry Kozlyuk eth_dev->process_private = pp; 1599436c089aSDmitry Kozlyuk eth_dev->rx_pkt_burst = eth_pcap_rx; 1600436c089aSDmitry Kozlyuk if (devargs_all.is_tx_pcap) 1601436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; 1602436c089aSDmitry Kozlyuk else 1603436c089aSDmitry Kozlyuk eth_dev->tx_pkt_burst = eth_pcap_tx; 1604436c089aSDmitry Kozlyuk 1605436c089aSDmitry Kozlyuk rte_eth_dev_probing_finish(eth_dev); 1606436c089aSDmitry Kozlyuk goto free_kvlist; 1607436c089aSDmitry Kozlyuk } 1608436c089aSDmitry Kozlyuk 1609436c089aSDmitry Kozlyuk devargs_all.rx_queues = pcaps; 1610436c089aSDmitry Kozlyuk devargs_all.tx_queues = dumpers; 1611436c089aSDmitry Kozlyuk 1612436c089aSDmitry Kozlyuk ret = eth_from_pcaps(dev, &devargs_all); 1613436c089aSDmitry Kozlyuk 1614436c089aSDmitry Kozlyuk free_kvlist: 1615436c089aSDmitry Kozlyuk rte_kvargs_free(kvlist); 1616436c089aSDmitry Kozlyuk 1617da0280b3SQiming Chen if (ret < 0) 1618da0280b3SQiming Chen eth_release_pcaps(&pcaps, &dumpers, devargs_all.single_iface); 1619da0280b3SQiming Chen 1620436c089aSDmitry Kozlyuk return ret; 1621436c089aSDmitry Kozlyuk } 1622436c089aSDmitry Kozlyuk 1623436c089aSDmitry Kozlyuk static int 1624436c089aSDmitry Kozlyuk pmd_pcap_remove(struct rte_vdev_device *dev) 1625436c089aSDmitry Kozlyuk { 1626436c089aSDmitry Kozlyuk struct rte_eth_dev *eth_dev = NULL; 1627436c089aSDmitry Kozlyuk 1628436c089aSDmitry Kozlyuk if (!dev) 1629436c089aSDmitry Kozlyuk return -1; 1630436c089aSDmitry Kozlyuk 1631436c089aSDmitry Kozlyuk eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); 1632436c089aSDmitry Kozlyuk if (eth_dev == NULL) 1633436c089aSDmitry Kozlyuk return 0; /* port already released */ 1634436c089aSDmitry Kozlyuk 1635436c089aSDmitry Kozlyuk eth_dev_close(eth_dev); 1636436c089aSDmitry Kozlyuk rte_eth_dev_release_port(eth_dev); 1637436c089aSDmitry Kozlyuk 1638436c089aSDmitry Kozlyuk return 0; 1639436c089aSDmitry Kozlyuk } 1640436c089aSDmitry Kozlyuk 1641436c089aSDmitry Kozlyuk static struct rte_vdev_driver pmd_pcap_drv = { 1642436c089aSDmitry Kozlyuk .probe = pmd_pcap_probe, 1643436c089aSDmitry Kozlyuk .remove = pmd_pcap_remove, 1644436c089aSDmitry Kozlyuk }; 1645436c089aSDmitry Kozlyuk 1646436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv); 1647436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap); 1648436c089aSDmitry Kozlyuk RTE_PMD_REGISTER_PARAM_STRING(net_pcap, 1649436c089aSDmitry Kozlyuk ETH_PCAP_RX_PCAP_ARG "=<string> " 1650436c089aSDmitry Kozlyuk ETH_PCAP_TX_PCAP_ARG "=<string> " 1651436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_ARG "=<ifc> " 1652436c089aSDmitry Kozlyuk ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> " 1653436c089aSDmitry Kozlyuk ETH_PCAP_TX_IFACE_ARG "=<ifc> " 1654436c089aSDmitry Kozlyuk ETH_PCAP_IFACE_ARG "=<ifc> " 1655436c089aSDmitry Kozlyuk ETH_PCAP_PHY_MAC_ARG "=<int>" 1656436c089aSDmitry Kozlyuk ETH_PCAP_INFINITE_RX_ARG "=<0|1>"); 1657