1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2014-2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stdarg.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 13 #include <sys/queue.h> 14 #include <sys/stat.h> 15 16 #include <rte_common.h> 17 #include <rte_byteorder.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_cycles.h> 21 #include <rte_memory.h> 22 #include <rte_memcpy.h> 23 #include <rte_launch.h> 24 #include <rte_eal.h> 25 #include <rte_per_lcore.h> 26 #include <rte_lcore.h> 27 #include <rte_atomic.h> 28 #include <rte_branch_prediction.h> 29 #include <rte_mempool.h> 30 #include <rte_mbuf.h> 31 #include <rte_interrupts.h> 32 #include <rte_pci.h> 33 #include <rte_ether.h> 34 #include <rte_ethdev.h> 35 #include <rte_ip.h> 36 #include <rte_tcp.h> 37 #include <rte_udp.h> 38 #include <rte_string_fns.h> 39 #include <rte_flow.h> 40 41 #include "testpmd.h" 42 43 /* hardcoded configuration (for now) */ 44 static unsigned cfg_n_flows = 1024; 45 static uint32_t cfg_ip_src = RTE_IPV4(10, 254, 0, 0); 46 static uint32_t cfg_ip_dst = RTE_IPV4(10, 253, 0, 0); 47 static uint16_t cfg_udp_src = 1000; 48 static uint16_t cfg_udp_dst = 1001; 49 static struct rte_ether_addr cfg_ether_src = 50 {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }}; 51 static struct rte_ether_addr cfg_ether_dst = 52 {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }}; 53 54 #define IP_DEFTTL 64 /* from RFC 1340. */ 55 56 static inline uint16_t 57 ip_sum(const unaligned_uint16_t *hdr, int hdr_len) 58 { 59 uint32_t sum = 0; 60 61 while (hdr_len > 1) 62 { 63 sum += *hdr++; 64 if (sum & 0x80000000) 65 sum = (sum & 0xFFFF) + (sum >> 16); 66 hdr_len -= 2; 67 } 68 69 while (sum >> 16) 70 sum = (sum & 0xFFFF) + (sum >> 16); 71 72 return ~sum; 73 } 74 75 /* 76 * Multi-flow generation mode. 77 * 78 * We originate a bunch of flows (varying destination IP addresses), and 79 * terminate receive traffic. Received traffic is simply discarded, but we 80 * still do so in order to maintain traffic statistics. 81 */ 82 static void 83 pkt_burst_flow_gen(struct fwd_stream *fs) 84 { 85 unsigned pkt_size = tx_pkt_length - 4; /* Adjust FCS */ 86 struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; 87 struct rte_mempool *mbp; 88 struct rte_mbuf *pkt; 89 struct rte_ether_hdr *eth_hdr; 90 struct rte_ipv4_hdr *ip_hdr; 91 struct rte_udp_hdr *udp_hdr; 92 uint16_t vlan_tci, vlan_tci_outer; 93 uint64_t ol_flags = 0; 94 uint16_t nb_rx; 95 uint16_t nb_tx; 96 uint16_t nb_pkt; 97 uint16_t i; 98 uint32_t retry; 99 uint64_t tx_offloads; 100 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 101 uint64_t start_tsc; 102 uint64_t end_tsc; 103 uint64_t core_cycles; 104 #endif 105 static int next_flow = 0; 106 107 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 108 start_tsc = rte_rdtsc(); 109 #endif 110 111 /* Receive a burst of packets and discard them. */ 112 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, 113 nb_pkt_per_burst); 114 fs->rx_packets += nb_rx; 115 116 for (i = 0; i < nb_rx; i++) 117 rte_pktmbuf_free(pkts_burst[i]); 118 119 mbp = current_fwd_lcore()->mbp; 120 vlan_tci = ports[fs->tx_port].tx_vlan_id; 121 vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer; 122 123 tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads; 124 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) 125 ol_flags |= PKT_TX_VLAN_PKT; 126 if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) 127 ol_flags |= PKT_TX_QINQ_PKT; 128 if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) 129 ol_flags |= PKT_TX_MACSEC; 130 131 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { 132 pkt = rte_mbuf_raw_alloc(mbp); 133 if (!pkt) 134 break; 135 136 pkt->data_len = pkt_size; 137 pkt->next = NULL; 138 139 /* Initialize Ethernet header. */ 140 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 141 rte_ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr); 142 rte_ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr); 143 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 144 145 /* Initialize IP header. */ 146 ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1); 147 memset(ip_hdr, 0, sizeof(*ip_hdr)); 148 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; 149 ip_hdr->type_of_service = 0; 150 ip_hdr->fragment_offset = 0; 151 ip_hdr->time_to_live = IP_DEFTTL; 152 ip_hdr->next_proto_id = IPPROTO_UDP; 153 ip_hdr->packet_id = 0; 154 ip_hdr->src_addr = rte_cpu_to_be_32(cfg_ip_src); 155 ip_hdr->dst_addr = rte_cpu_to_be_32(cfg_ip_dst + 156 next_flow); 157 ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size - 158 sizeof(*eth_hdr)); 159 ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr, 160 sizeof(*ip_hdr)); 161 162 /* Initialize UDP header. */ 163 udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1); 164 udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src); 165 udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst); 166 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ 167 udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size - 168 sizeof(*eth_hdr) - 169 sizeof(*ip_hdr)); 170 pkt->nb_segs = 1; 171 pkt->pkt_len = pkt_size; 172 pkt->ol_flags &= EXT_ATTACHED_MBUF; 173 pkt->ol_flags |= ol_flags; 174 pkt->vlan_tci = vlan_tci; 175 pkt->vlan_tci_outer = vlan_tci_outer; 176 pkt->l2_len = sizeof(struct rte_ether_hdr); 177 pkt->l3_len = sizeof(struct rte_ipv4_hdr); 178 pkts_burst[nb_pkt] = pkt; 179 180 next_flow = (next_flow + 1) % cfg_n_flows; 181 } 182 183 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); 184 /* 185 * Retry if necessary 186 */ 187 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { 188 retry = 0; 189 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { 190 rte_delay_us(burst_tx_delay_time); 191 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, 192 &pkts_burst[nb_tx], nb_rx - nb_tx); 193 } 194 } 195 fs->tx_packets += nb_tx; 196 197 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS 198 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; 199 #endif 200 if (unlikely(nb_tx < nb_pkt)) { 201 /* Back out the flow counter. */ 202 next_flow -= (nb_pkt - nb_tx); 203 while (next_flow < 0) 204 next_flow += cfg_n_flows; 205 206 do { 207 rte_pktmbuf_free(pkts_burst[nb_tx]); 208 } while (++nb_tx < nb_pkt); 209 } 210 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES 211 end_tsc = rte_rdtsc(); 212 core_cycles = (end_tsc - start_tsc); 213 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); 214 #endif 215 } 216 217 struct fwd_engine flow_gen_engine = { 218 .fwd_mode_name = "flowgen", 219 .port_fwd_begin = NULL, 220 .port_fwd_end = NULL, 221 .packet_fwd = pkt_burst_flow_gen, 222 }; 223