1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2014-2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stdarg.h> 6 #include <stdio.h> 7 #include <stdint.h> 8 #include <unistd.h> 9 #include <inttypes.h> 10 11 #include <sys/queue.h> 12 #include <sys/stat.h> 13 14 #include <rte_common.h> 15 #include <rte_ether.h> 16 #include <rte_ethdev.h> 17 #include <rte_ip.h> 18 #include <rte_flow.h> 19 20 #include "macswap_common.h" 21 #include "testpmd.h" 22 23 24 static inline void 25 swap_mac(struct rte_ether_hdr *eth_hdr) 26 { 27 struct rte_ether_addr addr; 28 29 /* Swap dest and src mac addresses. */ 30 rte_ether_addr_copy(ð_hdr->d_addr, &addr); 31 rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr); 32 rte_ether_addr_copy(&addr, ð_hdr->s_addr); 33 } 34 35 static inline void 36 swap_ipv4(struct rte_ipv4_hdr *ipv4_hdr) 37 { 38 rte_be32_t addr; 39 40 /* Swap dest and src ipv4 addresses. */ 41 addr = ipv4_hdr->src_addr; 42 ipv4_hdr->src_addr = ipv4_hdr->dst_addr; 43 ipv4_hdr->dst_addr = addr; 44 } 45 46 static inline void 47 swap_ipv6(struct rte_ipv6_hdr *ipv6_hdr) 48 { 49 uint8_t addr[16]; 50 51 /* Swap dest and src ipv6 addresses. */ 52 memcpy(&addr, &ipv6_hdr->src_addr, 16); 53 memcpy(&ipv6_hdr->src_addr, &ipv6_hdr->dst_addr, 16); 54 memcpy(&ipv6_hdr->dst_addr, &addr, 16); 55 } 56 57 static inline void 58 swap_tcp(struct rte_tcp_hdr *tcp_hdr) 59 { 60 rte_be16_t port; 61 62 /* Swap dest and src tcp port. */ 63 port = tcp_hdr->src_port; 64 tcp_hdr->src_port = tcp_hdr->dst_port; 65 tcp_hdr->dst_port = port; 66 } 67 68 static inline void 69 swap_udp(struct rte_udp_hdr *udp_hdr) 70 { 71 rte_be16_t port; 72 73 /* Swap dest and src udp port */ 74 port = udp_hdr->src_port; 75 udp_hdr->src_port = udp_hdr->dst_port; 76 udp_hdr->dst_port = port; 77 } 78 79 /* 80 * 5 tuple swap forwarding mode: Swap the source and the destination of layers 81 * 2,3,4. Swaps source and destination for MAC, IPv4/IPv6, UDP/TCP. 82 * Parses each layer and swaps it. When the next layer doesn't match it stops. 83 */ 84 static void 85 pkt_burst_5tuple_swap(struct fwd_stream *fs) 86 { 87 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 88 struct rte_port *txp; 89 struct rte_mbuf *mb; 90 uint16_t next_proto; 91 uint64_t ol_flags; 92 uint16_t proto; 93 uint16_t nb_rx; 94 uint16_t nb_tx; 95 uint32_t retry; 96 97 int i; 98 union { 99 struct rte_ether_hdr *eth; 100 struct rte_vlan_hdr *vlan; 101 struct rte_ipv4_hdr *ipv4; 102 struct rte_ipv6_hdr *ipv6; 103 struct rte_tcp_hdr *tcp; 104 struct rte_udp_hdr *udp; 105 uint8_t *byte; 106 } h; 107 108 uint64_t start_tsc = 0; 109 110 get_start_cycles(&start_tsc); 111 112 /* 113 * Receive a burst of packets and forward them. 114 */ 115 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, 116 nb_pkt_per_burst); 117 inc_rx_burst_stats(fs, nb_rx); 118 if (unlikely(nb_rx == 0)) 119 return; 120 121 fs->rx_packets += nb_rx; 122 txp = &ports[fs->tx_port]; 123 ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads); 124 vlan_qinq_set(pkts_burst, nb_rx, ol_flags, 125 txp->tx_vlan_id, txp->tx_vlan_id_outer); 126 for (i = 0; i < nb_rx; i++) { 127 if (likely(i < nb_rx - 1)) 128 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i+1], 129 void *)); 130 mb = pkts_burst[i]; 131 h.eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); 132 proto = h.eth->ether_type; 133 swap_mac(h.eth); 134 mb->l2_len = sizeof(struct rte_ether_hdr); 135 h.eth++; 136 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || 137 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { 138 proto = h.vlan->eth_proto; 139 h.vlan++; 140 mb->l2_len += sizeof(struct rte_vlan_hdr); 141 } 142 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { 143 swap_ipv4(h.ipv4); 144 next_proto = h.ipv4->next_proto_id; 145 mb->l3_len = rte_ipv4_hdr_len(h.ipv4); 146 h.byte += mb->l3_len; 147 } else if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV6)) { 148 swap_ipv6(h.ipv6); 149 next_proto = h.ipv6->proto; 150 h.ipv6++; 151 mb->l3_len = sizeof(struct rte_ipv6_hdr); 152 } else { 153 mbuf_field_set(mb, ol_flags); 154 continue; 155 } 156 if (next_proto == IPPROTO_UDP) { 157 swap_udp(h.udp); 158 mb->l4_len = sizeof(struct rte_udp_hdr); 159 } else if (next_proto == IPPROTO_TCP) { 160 swap_tcp(h.tcp); 161 mb->l4_len = (h.tcp->data_off & 0xf0) >> 2; 162 } 163 mbuf_field_set(mb, ol_flags); 164 } 165 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); 166 /* 167 * Retry if necessary 168 */ 169 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { 170 retry = 0; 171 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { 172 rte_delay_us(burst_tx_delay_time); 173 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, 174 &pkts_burst[nb_tx], nb_rx - nb_tx); 175 } 176 } 177 fs->tx_packets += nb_tx; 178 inc_tx_burst_stats(fs, nb_tx); 179 if (unlikely(nb_tx < nb_rx)) { 180 fs->fwd_dropped += (nb_rx - nb_tx); 181 do { 182 rte_pktmbuf_free(pkts_burst[nb_tx]); 183 } while (++nb_tx < nb_rx); 184 } 185 get_end_cycles(fs, start_tsc); 186 } 187 188 struct fwd_engine five_tuple_swap_fwd_engine = { 189 .fwd_mode_name = "5tswap", 190 .port_fwd_begin = NULL, 191 .port_fwd_end = NULL, 192 .packet_fwd = pkt_burst_5tuple_swap, 193 }; 194