1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/stat.h> 15 16 #include <rte_common.h> 17 #include <rte_byteorder.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_cycles.h> 21 #include <rte_memory.h> 22 #include <rte_memcpy.h> 23 #include <rte_launch.h> 24 #include <rte_eal.h> 25 #include <rte_per_lcore.h> 26 #include <rte_lcore.h> 27 #include <rte_atomic.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_mempool.h> 30 #include <rte_mbuf.h> 31 #include <rte_interrupts.h> 32 #include <rte_pci.h> 33 #include <rte_ether.h> 34 #include <rte_ethdev.h> 35 #include <rte_ip.h> 36 #include <rte_string_fns.h> 37 #include <rte_flow.h> 38 39 #include "testpmd.h" 40 41 /* 42 * Forwarding of packets in MAC mode. 43 * Change the source and the destination Ethernet addressed of packets 44 * before forwarding them. 45 */ 46 static void 47 pkt_burst_mac_forward(struct fwd_stream *fs) 48 { 49 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 50 struct rte_port *txp; 51 struct rte_mbuf *mb; 52 struct ether_hdr *eth_hdr; 53 uint32_t retry; 54 uint16_t nb_rx; 55 uint16_t nb_tx; 56 uint16_t i; 57 uint64_t ol_flags = 0; 58 uint64_t tx_offloads; 59 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 60 uint64_t start_tsc; 61 uint64_t end_tsc; 62 uint64_t core_cycles; 63 #endif 64 65 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 66 start_tsc = rte_rdtsc(); 67 #endif 68 69 /* 70 * Receive a burst of packets and forward them. 71 */ 72 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, 73 nb_pkt_per_burst); 74 if (unlikely(nb_rx == 0)) 75 return; 76 77 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 78 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; 79 #endif 80 fs->rx_packets += nb_rx; 81 txp = &ports[fs->tx_port]; 82 tx_offloads = txp->dev_conf.txmode.offloads; 83 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) 84 ol_flags = PKT_TX_VLAN_PKT; 85 if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) 86 ol_flags |= PKT_TX_QINQ_PKT; 87 if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) 88 ol_flags |= PKT_TX_MACSEC; 89 for (i = 0; i < nb_rx; i++) { 90 if (likely(i < nb_rx - 1)) 91 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], 92 void *)); 93 mb = pkts_burst[i]; 94 eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); 95 ether_addr_copy(&peer_eth_addrs[fs->peer_addr], 96 ð_hdr->d_addr); 97 ether_addr_copy(&ports[fs->tx_port].eth_addr, 98 ð_hdr->s_addr); 99 mb->ol_flags = ol_flags; 100 mb->l2_len = sizeof(struct ether_hdr); 101 mb->l3_len = sizeof(struct ipv4_hdr); 102 mb->vlan_tci = txp->tx_vlan_id; 103 mb->vlan_tci_outer = txp->tx_vlan_id_outer; 104 } 105 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); 106 /* 107 * Retry if necessary 108 */ 109 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { 110 retry = 0; 111 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { 112 rte_delay_us(burst_tx_delay_time); 113 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, 114 &pkts_burst[nb_tx], nb_rx - nb_tx); 115 } 116 } 117 118 fs->tx_packets += nb_tx; 119 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 120 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; 121 #endif 122 if (unlikely(nb_tx < nb_rx)) { 123 fs->fwd_dropped += (nb_rx - nb_tx); 124 do { 125 rte_pktmbuf_free(pkts_burst[nb_tx]); 126 } while (++nb_tx < nb_rx); 127 } 128 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 129 end_tsc = rte_rdtsc(); 130 core_cycles = (end_tsc - start_tsc); 131 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); 132 #endif 133 } 134 135 struct fwd_engine mac_fwd_engine = { 136 .fwd_mode_name = "mac", 137 .port_fwd_begin = NULL, 138 .port_fwd_end = NULL, 139 .packet_fwd = pkt_burst_mac_forward, 140 }; 141