1*6d239dd5SPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause 2*6d239dd5SPavan Nikhilesh * Copyright(c) 2016-2017 Intel Corporation 3*6d239dd5SPavan Nikhilesh */ 4*6d239dd5SPavan Nikhilesh 5*6d239dd5SPavan Nikhilesh #include <getopt.h> 6*6d239dd5SPavan Nikhilesh #include <stdint.h> 7*6d239dd5SPavan Nikhilesh #include <stdio.h> 8*6d239dd5SPavan Nikhilesh #include <signal.h> 9*6d239dd5SPavan Nikhilesh #include <sched.h> 10*6d239dd5SPavan Nikhilesh 11*6d239dd5SPavan Nikhilesh #include "pipeline_common.h" 12*6d239dd5SPavan Nikhilesh 13*6d239dd5SPavan Nikhilesh struct config_data cdata = { 14*6d239dd5SPavan Nikhilesh .num_packets = (1L << 25), /* do ~32M packets */ 15*6d239dd5SPavan Nikhilesh .num_fids = 512, 16*6d239dd5SPavan Nikhilesh .queue_type = RTE_SCHED_TYPE_ATOMIC, 17*6d239dd5SPavan Nikhilesh .next_qid = {-1}, 18*6d239dd5SPavan Nikhilesh .qid = {-1}, 19*6d239dd5SPavan Nikhilesh .num_stages = 1, 20*6d239dd5SPavan Nikhilesh .worker_cq_depth = 16 21*6d239dd5SPavan Nikhilesh }; 22*6d239dd5SPavan Nikhilesh 23*6d239dd5SPavan Nikhilesh static bool 24*6d239dd5SPavan Nikhilesh core_in_use(unsigned int lcore_id) { 25*6d239dd5SPavan Nikhilesh return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] || 26*6d239dd5SPavan Nikhilesh fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]); 27*6d239dd5SPavan Nikhilesh } 28*6d239dd5SPavan Nikhilesh 29*6d239dd5SPavan Nikhilesh static void 30*6d239dd5SPavan Nikhilesh eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent, 31*6d239dd5SPavan Nikhilesh void *userdata) 32*6d239dd5SPavan Nikhilesh { 33*6d239dd5SPavan Nikhilesh int port_id = (uintptr_t) userdata; 34*6d239dd5SPavan Nikhilesh unsigned int _sent = 0; 35*6d239dd5SPavan Nikhilesh 36*6d239dd5SPavan Nikhilesh do { 37*6d239dd5SPavan Nikhilesh /* Note: hard-coded TX queue */ 38*6d239dd5SPavan Nikhilesh _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent], 39*6d239dd5SPavan Nikhilesh unsent - _sent); 40*6d239dd5SPavan Nikhilesh } while (_sent != unsent); 41*6d239dd5SPavan Nikhilesh } 42*6d239dd5SPavan Nikhilesh 43*6d239dd5SPavan Nikhilesh /* 44*6d239dd5SPavan Nikhilesh * Parse the coremask given as argument (hexadecimal string) and fill 45*6d239dd5SPavan Nikhilesh * the global configuration (core role and core count) with the parsed 46*6d239dd5SPavan Nikhilesh * value. 47*6d239dd5SPavan Nikhilesh */ 48*6d239dd5SPavan Nikhilesh static int xdigit2val(unsigned char c) 49*6d239dd5SPavan Nikhilesh { 50*6d239dd5SPavan Nikhilesh int val; 51*6d239dd5SPavan Nikhilesh 52*6d239dd5SPavan Nikhilesh if (isdigit(c)) 53*6d239dd5SPavan Nikhilesh val = c - '0'; 54*6d239dd5SPavan Nikhilesh else if (isupper(c)) 55*6d239dd5SPavan Nikhilesh val = c - 'A' + 10; 56*6d239dd5SPavan Nikhilesh else 57*6d239dd5SPavan Nikhilesh val = c - 'a' + 10; 58*6d239dd5SPavan Nikhilesh return val; 59*6d239dd5SPavan Nikhilesh } 60*6d239dd5SPavan Nikhilesh 61*6d239dd5SPavan Nikhilesh static uint64_t 62*6d239dd5SPavan Nikhilesh parse_coremask(const char *coremask) 63*6d239dd5SPavan Nikhilesh { 64*6d239dd5SPavan Nikhilesh int i, j, idx = 0; 65*6d239dd5SPavan Nikhilesh unsigned int count = 0; 66*6d239dd5SPavan Nikhilesh char c; 67*6d239dd5SPavan Nikhilesh int val; 68*6d239dd5SPavan Nikhilesh uint64_t mask = 0; 69*6d239dd5SPavan Nikhilesh const int32_t BITS_HEX = 4; 70*6d239dd5SPavan Nikhilesh 71*6d239dd5SPavan Nikhilesh if (coremask == NULL) 72*6d239dd5SPavan Nikhilesh return -1; 73*6d239dd5SPavan Nikhilesh /* Remove all blank characters ahead and after . 74*6d239dd5SPavan Nikhilesh * Remove 0x/0X if exists. 75*6d239dd5SPavan Nikhilesh */ 76*6d239dd5SPavan Nikhilesh while (isblank(*coremask)) 77*6d239dd5SPavan Nikhilesh coremask++; 78*6d239dd5SPavan Nikhilesh if (coremask[0] == '0' && ((coremask[1] == 'x') 79*6d239dd5SPavan Nikhilesh || (coremask[1] == 'X'))) 80*6d239dd5SPavan Nikhilesh coremask += 2; 81*6d239dd5SPavan Nikhilesh i = strlen(coremask); 82*6d239dd5SPavan Nikhilesh while ((i > 0) && isblank(coremask[i - 1])) 83*6d239dd5SPavan Nikhilesh i--; 84*6d239dd5SPavan Nikhilesh if (i == 0) 85*6d239dd5SPavan Nikhilesh return -1; 86*6d239dd5SPavan Nikhilesh 87*6d239dd5SPavan Nikhilesh for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) { 88*6d239dd5SPavan Nikhilesh c = coremask[i]; 89*6d239dd5SPavan Nikhilesh if (isxdigit(c) == 0) { 90*6d239dd5SPavan Nikhilesh /* invalid characters */ 91*6d239dd5SPavan Nikhilesh return -1; 92*6d239dd5SPavan Nikhilesh } 93*6d239dd5SPavan Nikhilesh val = xdigit2val(c); 94*6d239dd5SPavan Nikhilesh for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) { 95*6d239dd5SPavan Nikhilesh if ((1 << j) & val) { 96*6d239dd5SPavan Nikhilesh mask |= (1UL << idx); 97*6d239dd5SPavan Nikhilesh count++; 98*6d239dd5SPavan Nikhilesh } 99*6d239dd5SPavan Nikhilesh } 100*6d239dd5SPavan Nikhilesh } 101*6d239dd5SPavan Nikhilesh for (; i >= 0; i--) 102*6d239dd5SPavan Nikhilesh if (coremask[i] != '0') 103*6d239dd5SPavan Nikhilesh return -1; 104*6d239dd5SPavan Nikhilesh if (count == 0) 105*6d239dd5SPavan Nikhilesh return -1; 106*6d239dd5SPavan Nikhilesh return mask; 107*6d239dd5SPavan Nikhilesh } 108*6d239dd5SPavan Nikhilesh 109*6d239dd5SPavan Nikhilesh static struct option long_options[] = { 110*6d239dd5SPavan Nikhilesh {"workers", required_argument, 0, 'w'}, 111*6d239dd5SPavan Nikhilesh {"packets", required_argument, 0, 'n'}, 112*6d239dd5SPavan Nikhilesh {"atomic-flows", required_argument, 0, 'f'}, 113*6d239dd5SPavan Nikhilesh {"num_stages", required_argument, 0, 's'}, 114*6d239dd5SPavan Nikhilesh {"rx-mask", required_argument, 0, 'r'}, 115*6d239dd5SPavan Nikhilesh {"tx-mask", required_argument, 0, 't'}, 116*6d239dd5SPavan Nikhilesh {"sched-mask", required_argument, 0, 'e'}, 117*6d239dd5SPavan Nikhilesh {"cq-depth", required_argument, 0, 'c'}, 118*6d239dd5SPavan Nikhilesh {"work-cycles", required_argument, 0, 'W'}, 119*6d239dd5SPavan Nikhilesh {"mempool-size", required_argument, 0, 'm'}, 120*6d239dd5SPavan Nikhilesh {"queue-priority", no_argument, 0, 'P'}, 121*6d239dd5SPavan Nikhilesh {"parallel", no_argument, 0, 'p'}, 122*6d239dd5SPavan Nikhilesh {"ordered", no_argument, 0, 'o'}, 123*6d239dd5SPavan Nikhilesh {"quiet", no_argument, 0, 'q'}, 124*6d239dd5SPavan Nikhilesh {"use-atq", no_argument, 0, 'a'}, 125*6d239dd5SPavan Nikhilesh {"dump", no_argument, 0, 'D'}, 126*6d239dd5SPavan Nikhilesh {0, 0, 0, 0} 127*6d239dd5SPavan Nikhilesh }; 128*6d239dd5SPavan Nikhilesh 129*6d239dd5SPavan Nikhilesh static void 130*6d239dd5SPavan Nikhilesh usage(void) 131*6d239dd5SPavan Nikhilesh { 132*6d239dd5SPavan Nikhilesh const char *usage_str = 133*6d239dd5SPavan Nikhilesh " Usage: eventdev_demo [options]\n" 134*6d239dd5SPavan Nikhilesh " Options:\n" 135*6d239dd5SPavan Nikhilesh " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n" 136*6d239dd5SPavan Nikhilesh " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n" 137*6d239dd5SPavan Nikhilesh " -s, --num_stages=N Use N atomic stages (default 1)\n" 138*6d239dd5SPavan Nikhilesh " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n" 139*6d239dd5SPavan Nikhilesh " -w, --worker-mask=core mask Run worker on CPUs in core mask\n" 140*6d239dd5SPavan Nikhilesh " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n" 141*6d239dd5SPavan Nikhilesh " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n" 142*6d239dd5SPavan Nikhilesh " -c --cq-depth=N Worker CQ depth (default 16)\n" 143*6d239dd5SPavan Nikhilesh " -W --work-cycles=N Worker cycles (default 0)\n" 144*6d239dd5SPavan Nikhilesh " -P --queue-priority Enable scheduler queue prioritization\n" 145*6d239dd5SPavan Nikhilesh " -o, --ordered Use ordered scheduling\n" 146*6d239dd5SPavan Nikhilesh " -p, --parallel Use parallel scheduling\n" 147*6d239dd5SPavan Nikhilesh " -q, --quiet Minimize printed output\n" 148*6d239dd5SPavan Nikhilesh " -a, --use-atq Use all type queues\n" 149*6d239dd5SPavan Nikhilesh " -m, --mempool-size=N Dictate the mempool size\n" 150*6d239dd5SPavan Nikhilesh " -D, --dump Print detailed statistics before exit" 151*6d239dd5SPavan Nikhilesh "\n"; 152*6d239dd5SPavan Nikhilesh fprintf(stderr, "%s", usage_str); 153*6d239dd5SPavan Nikhilesh exit(1); 154*6d239dd5SPavan Nikhilesh } 155*6d239dd5SPavan Nikhilesh 156*6d239dd5SPavan Nikhilesh static void 157*6d239dd5SPavan Nikhilesh parse_app_args(int argc, char **argv) 158*6d239dd5SPavan Nikhilesh { 159*6d239dd5SPavan Nikhilesh /* Parse cli options*/ 160*6d239dd5SPavan Nikhilesh int option_index; 161*6d239dd5SPavan Nikhilesh int c; 162*6d239dd5SPavan Nikhilesh opterr = 0; 163*6d239dd5SPavan Nikhilesh uint64_t rx_lcore_mask = 0; 164*6d239dd5SPavan Nikhilesh uint64_t tx_lcore_mask = 0; 165*6d239dd5SPavan Nikhilesh uint64_t sched_lcore_mask = 0; 166*6d239dd5SPavan Nikhilesh uint64_t worker_lcore_mask = 0; 167*6d239dd5SPavan Nikhilesh int i; 168*6d239dd5SPavan Nikhilesh 169*6d239dd5SPavan Nikhilesh for (;;) { 170*6d239dd5SPavan Nikhilesh c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:m:paoPqDW:", 171*6d239dd5SPavan Nikhilesh long_options, &option_index); 172*6d239dd5SPavan Nikhilesh if (c == -1) 173*6d239dd5SPavan Nikhilesh break; 174*6d239dd5SPavan Nikhilesh 175*6d239dd5SPavan Nikhilesh int popcnt = 0; 176*6d239dd5SPavan Nikhilesh switch (c) { 177*6d239dd5SPavan Nikhilesh case 'n': 178*6d239dd5SPavan Nikhilesh cdata.num_packets = (int64_t)atol(optarg); 179*6d239dd5SPavan Nikhilesh if (cdata.num_packets == 0) 180*6d239dd5SPavan Nikhilesh cdata.num_packets = INT64_MAX; 181*6d239dd5SPavan Nikhilesh break; 182*6d239dd5SPavan Nikhilesh case 'f': 183*6d239dd5SPavan Nikhilesh cdata.num_fids = (unsigned int)atoi(optarg); 184*6d239dd5SPavan Nikhilesh break; 185*6d239dd5SPavan Nikhilesh case 's': 186*6d239dd5SPavan Nikhilesh cdata.num_stages = (unsigned int)atoi(optarg); 187*6d239dd5SPavan Nikhilesh break; 188*6d239dd5SPavan Nikhilesh case 'c': 189*6d239dd5SPavan Nikhilesh cdata.worker_cq_depth = (unsigned int)atoi(optarg); 190*6d239dd5SPavan Nikhilesh break; 191*6d239dd5SPavan Nikhilesh case 'W': 192*6d239dd5SPavan Nikhilesh cdata.worker_cycles = (unsigned int)atoi(optarg); 193*6d239dd5SPavan Nikhilesh break; 194*6d239dd5SPavan Nikhilesh case 'P': 195*6d239dd5SPavan Nikhilesh cdata.enable_queue_priorities = 1; 196*6d239dd5SPavan Nikhilesh break; 197*6d239dd5SPavan Nikhilesh case 'o': 198*6d239dd5SPavan Nikhilesh cdata.queue_type = RTE_SCHED_TYPE_ORDERED; 199*6d239dd5SPavan Nikhilesh break; 200*6d239dd5SPavan Nikhilesh case 'p': 201*6d239dd5SPavan Nikhilesh cdata.queue_type = RTE_SCHED_TYPE_PARALLEL; 202*6d239dd5SPavan Nikhilesh break; 203*6d239dd5SPavan Nikhilesh case 'a': 204*6d239dd5SPavan Nikhilesh cdata.all_type_queues = 1; 205*6d239dd5SPavan Nikhilesh break; 206*6d239dd5SPavan Nikhilesh case 'q': 207*6d239dd5SPavan Nikhilesh cdata.quiet = 1; 208*6d239dd5SPavan Nikhilesh break; 209*6d239dd5SPavan Nikhilesh case 'D': 210*6d239dd5SPavan Nikhilesh cdata.dump_dev = 1; 211*6d239dd5SPavan Nikhilesh break; 212*6d239dd5SPavan Nikhilesh case 'w': 213*6d239dd5SPavan Nikhilesh worker_lcore_mask = parse_coremask(optarg); 214*6d239dd5SPavan Nikhilesh break; 215*6d239dd5SPavan Nikhilesh case 'r': 216*6d239dd5SPavan Nikhilesh rx_lcore_mask = parse_coremask(optarg); 217*6d239dd5SPavan Nikhilesh popcnt = __builtin_popcountll(rx_lcore_mask); 218*6d239dd5SPavan Nikhilesh fdata->rx_single = (popcnt == 1); 219*6d239dd5SPavan Nikhilesh break; 220*6d239dd5SPavan Nikhilesh case 't': 221*6d239dd5SPavan Nikhilesh tx_lcore_mask = parse_coremask(optarg); 222*6d239dd5SPavan Nikhilesh popcnt = __builtin_popcountll(tx_lcore_mask); 223*6d239dd5SPavan Nikhilesh fdata->tx_single = (popcnt == 1); 224*6d239dd5SPavan Nikhilesh break; 225*6d239dd5SPavan Nikhilesh case 'e': 226*6d239dd5SPavan Nikhilesh sched_lcore_mask = parse_coremask(optarg); 227*6d239dd5SPavan Nikhilesh popcnt = __builtin_popcountll(sched_lcore_mask); 228*6d239dd5SPavan Nikhilesh fdata->sched_single = (popcnt == 1); 229*6d239dd5SPavan Nikhilesh break; 230*6d239dd5SPavan Nikhilesh case 'm': 231*6d239dd5SPavan Nikhilesh cdata.num_mbuf = (uint64_t)atol(optarg); 232*6d239dd5SPavan Nikhilesh break; 233*6d239dd5SPavan Nikhilesh default: 234*6d239dd5SPavan Nikhilesh usage(); 235*6d239dd5SPavan Nikhilesh } 236*6d239dd5SPavan Nikhilesh } 237*6d239dd5SPavan Nikhilesh 238*6d239dd5SPavan Nikhilesh cdata.worker_lcore_mask = worker_lcore_mask; 239*6d239dd5SPavan Nikhilesh cdata.sched_lcore_mask = sched_lcore_mask; 240*6d239dd5SPavan Nikhilesh cdata.rx_lcore_mask = rx_lcore_mask; 241*6d239dd5SPavan Nikhilesh cdata.tx_lcore_mask = tx_lcore_mask; 242*6d239dd5SPavan Nikhilesh 243*6d239dd5SPavan Nikhilesh if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES) 244*6d239dd5SPavan Nikhilesh usage(); 245*6d239dd5SPavan Nikhilesh 246*6d239dd5SPavan Nikhilesh for (i = 0; i < MAX_NUM_CORE; i++) { 247*6d239dd5SPavan Nikhilesh fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i)); 248*6d239dd5SPavan Nikhilesh fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i)); 249*6d239dd5SPavan Nikhilesh fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i)); 250*6d239dd5SPavan Nikhilesh fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i)); 251*6d239dd5SPavan Nikhilesh 252*6d239dd5SPavan Nikhilesh if (fdata->worker_core[i]) 253*6d239dd5SPavan Nikhilesh cdata.num_workers++; 254*6d239dd5SPavan Nikhilesh if (core_in_use(i)) 255*6d239dd5SPavan Nikhilesh cdata.active_cores++; 256*6d239dd5SPavan Nikhilesh } 257*6d239dd5SPavan Nikhilesh } 258*6d239dd5SPavan Nikhilesh 259*6d239dd5SPavan Nikhilesh /* 260*6d239dd5SPavan Nikhilesh * Initializes a given port using global settings and with the RX buffers 261*6d239dd5SPavan Nikhilesh * coming from the mbuf_pool passed as a parameter. 262*6d239dd5SPavan Nikhilesh */ 263*6d239dd5SPavan Nikhilesh static inline int 264*6d239dd5SPavan Nikhilesh port_init(uint8_t port, struct rte_mempool *mbuf_pool) 265*6d239dd5SPavan Nikhilesh { 266*6d239dd5SPavan Nikhilesh static const struct rte_eth_conf port_conf_default = { 267*6d239dd5SPavan Nikhilesh .rxmode = { 268*6d239dd5SPavan Nikhilesh .mq_mode = ETH_MQ_RX_RSS, 269*6d239dd5SPavan Nikhilesh .max_rx_pkt_len = ETHER_MAX_LEN, 270*6d239dd5SPavan Nikhilesh .ignore_offload_bitfield = 1, 271*6d239dd5SPavan Nikhilesh }, 272*6d239dd5SPavan Nikhilesh .rx_adv_conf = { 273*6d239dd5SPavan Nikhilesh .rss_conf = { 274*6d239dd5SPavan Nikhilesh .rss_hf = ETH_RSS_IP | 275*6d239dd5SPavan Nikhilesh ETH_RSS_TCP | 276*6d239dd5SPavan Nikhilesh ETH_RSS_UDP, 277*6d239dd5SPavan Nikhilesh } 278*6d239dd5SPavan Nikhilesh } 279*6d239dd5SPavan Nikhilesh }; 280*6d239dd5SPavan Nikhilesh const uint16_t rx_rings = 1, tx_rings = 1; 281*6d239dd5SPavan Nikhilesh const uint16_t rx_ring_size = 512, tx_ring_size = 512; 282*6d239dd5SPavan Nikhilesh struct rte_eth_conf port_conf = port_conf_default; 283*6d239dd5SPavan Nikhilesh int retval; 284*6d239dd5SPavan Nikhilesh uint16_t q; 285*6d239dd5SPavan Nikhilesh struct rte_eth_dev_info dev_info; 286*6d239dd5SPavan Nikhilesh struct rte_eth_txconf txconf; 287*6d239dd5SPavan Nikhilesh 288*6d239dd5SPavan Nikhilesh if (port >= rte_eth_dev_count()) 289*6d239dd5SPavan Nikhilesh return -1; 290*6d239dd5SPavan Nikhilesh 291*6d239dd5SPavan Nikhilesh rte_eth_dev_info_get(port, &dev_info); 292*6d239dd5SPavan Nikhilesh if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 293*6d239dd5SPavan Nikhilesh port_conf.txmode.offloads |= 294*6d239dd5SPavan Nikhilesh DEV_TX_OFFLOAD_MBUF_FAST_FREE; 295*6d239dd5SPavan Nikhilesh 296*6d239dd5SPavan Nikhilesh /* Configure the Ethernet device. */ 297*6d239dd5SPavan Nikhilesh retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); 298*6d239dd5SPavan Nikhilesh if (retval != 0) 299*6d239dd5SPavan Nikhilesh return retval; 300*6d239dd5SPavan Nikhilesh 301*6d239dd5SPavan Nikhilesh /* Allocate and set up 1 RX queue per Ethernet port. */ 302*6d239dd5SPavan Nikhilesh for (q = 0; q < rx_rings; q++) { 303*6d239dd5SPavan Nikhilesh retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, 304*6d239dd5SPavan Nikhilesh rte_eth_dev_socket_id(port), NULL, mbuf_pool); 305*6d239dd5SPavan Nikhilesh if (retval < 0) 306*6d239dd5SPavan Nikhilesh return retval; 307*6d239dd5SPavan Nikhilesh } 308*6d239dd5SPavan Nikhilesh 309*6d239dd5SPavan Nikhilesh txconf = dev_info.default_txconf; 310*6d239dd5SPavan Nikhilesh txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; 311*6d239dd5SPavan Nikhilesh txconf.offloads = port_conf_default.txmode.offloads; 312*6d239dd5SPavan Nikhilesh /* Allocate and set up 1 TX queue per Ethernet port. */ 313*6d239dd5SPavan Nikhilesh for (q = 0; q < tx_rings; q++) { 314*6d239dd5SPavan Nikhilesh retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, 315*6d239dd5SPavan Nikhilesh rte_eth_dev_socket_id(port), &txconf); 316*6d239dd5SPavan Nikhilesh if (retval < 0) 317*6d239dd5SPavan Nikhilesh return retval; 318*6d239dd5SPavan Nikhilesh } 319*6d239dd5SPavan Nikhilesh 320*6d239dd5SPavan Nikhilesh /* Start the Ethernet port. */ 321*6d239dd5SPavan Nikhilesh retval = rte_eth_dev_start(port); 322*6d239dd5SPavan Nikhilesh if (retval < 0) 323*6d239dd5SPavan Nikhilesh return retval; 324*6d239dd5SPavan Nikhilesh 325*6d239dd5SPavan Nikhilesh /* Display the port MAC address. */ 326*6d239dd5SPavan Nikhilesh struct ether_addr addr; 327*6d239dd5SPavan Nikhilesh rte_eth_macaddr_get(port, &addr); 328*6d239dd5SPavan Nikhilesh printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 329*6d239dd5SPavan Nikhilesh " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", 330*6d239dd5SPavan Nikhilesh (unsigned int)port, 331*6d239dd5SPavan Nikhilesh addr.addr_bytes[0], addr.addr_bytes[1], 332*6d239dd5SPavan Nikhilesh addr.addr_bytes[2], addr.addr_bytes[3], 333*6d239dd5SPavan Nikhilesh addr.addr_bytes[4], addr.addr_bytes[5]); 334*6d239dd5SPavan Nikhilesh 335*6d239dd5SPavan Nikhilesh /* Enable RX in promiscuous mode for the Ethernet device. */ 336*6d239dd5SPavan Nikhilesh rte_eth_promiscuous_enable(port); 337*6d239dd5SPavan Nikhilesh 338*6d239dd5SPavan Nikhilesh return 0; 339*6d239dd5SPavan Nikhilesh } 340*6d239dd5SPavan Nikhilesh 341*6d239dd5SPavan Nikhilesh static int 342*6d239dd5SPavan Nikhilesh init_ports(unsigned int num_ports) 343*6d239dd5SPavan Nikhilesh { 344*6d239dd5SPavan Nikhilesh uint8_t portid; 345*6d239dd5SPavan Nikhilesh unsigned int i; 346*6d239dd5SPavan Nikhilesh 347*6d239dd5SPavan Nikhilesh if (!cdata.num_mbuf) 348*6d239dd5SPavan Nikhilesh cdata.num_mbuf = 16384 * num_ports; 349*6d239dd5SPavan Nikhilesh 350*6d239dd5SPavan Nikhilesh struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool", 351*6d239dd5SPavan Nikhilesh /* mbufs */ cdata.num_mbuf, 352*6d239dd5SPavan Nikhilesh /* cache_size */ 512, 353*6d239dd5SPavan Nikhilesh /* priv_size*/ 0, 354*6d239dd5SPavan Nikhilesh /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE, 355*6d239dd5SPavan Nikhilesh rte_socket_id()); 356*6d239dd5SPavan Nikhilesh 357*6d239dd5SPavan Nikhilesh for (portid = 0; portid < num_ports; portid++) 358*6d239dd5SPavan Nikhilesh if (port_init(portid, mp) != 0) 359*6d239dd5SPavan Nikhilesh rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", 360*6d239dd5SPavan Nikhilesh portid); 361*6d239dd5SPavan Nikhilesh 362*6d239dd5SPavan Nikhilesh for (i = 0; i < num_ports; i++) { 363*6d239dd5SPavan Nikhilesh void *userdata = (void *)(uintptr_t) i; 364*6d239dd5SPavan Nikhilesh fdata->tx_buf[i] = 365*6d239dd5SPavan Nikhilesh rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0); 366*6d239dd5SPavan Nikhilesh if (fdata->tx_buf[i] == NULL) 367*6d239dd5SPavan Nikhilesh rte_panic("Out of memory\n"); 368*6d239dd5SPavan Nikhilesh rte_eth_tx_buffer_init(fdata->tx_buf[i], 32); 369*6d239dd5SPavan Nikhilesh rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i], 370*6d239dd5SPavan Nikhilesh eth_tx_buffer_retry, 371*6d239dd5SPavan Nikhilesh userdata); 372*6d239dd5SPavan Nikhilesh } 373*6d239dd5SPavan Nikhilesh 374*6d239dd5SPavan Nikhilesh return 0; 375*6d239dd5SPavan Nikhilesh } 376*6d239dd5SPavan Nikhilesh 377*6d239dd5SPavan Nikhilesh static void 378*6d239dd5SPavan Nikhilesh do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id) 379*6d239dd5SPavan Nikhilesh { 380*6d239dd5SPavan Nikhilesh int i; 381*6d239dd5SPavan Nikhilesh uint8_t mt_unsafe = 0; 382*6d239dd5SPavan Nikhilesh uint8_t burst = 0; 383*6d239dd5SPavan Nikhilesh 384*6d239dd5SPavan Nikhilesh for (i = 0; i < nb_ethdev; i++) { 385*6d239dd5SPavan Nikhilesh struct rte_eth_dev_info dev_info; 386*6d239dd5SPavan Nikhilesh memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); 387*6d239dd5SPavan Nikhilesh 388*6d239dd5SPavan Nikhilesh rte_eth_dev_info_get(i, &dev_info); 389*6d239dd5SPavan Nikhilesh /* Check if it is safe ask worker to tx. */ 390*6d239dd5SPavan Nikhilesh mt_unsafe |= !(dev_info.tx_offload_capa & 391*6d239dd5SPavan Nikhilesh DEV_TX_OFFLOAD_MT_LOCKFREE); 392*6d239dd5SPavan Nikhilesh } 393*6d239dd5SPavan Nikhilesh 394*6d239dd5SPavan Nikhilesh struct rte_event_dev_info eventdev_info; 395*6d239dd5SPavan Nikhilesh memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info)); 396*6d239dd5SPavan Nikhilesh 397*6d239dd5SPavan Nikhilesh rte_event_dev_info_get(eventdev_id, &eventdev_info); 398*6d239dd5SPavan Nikhilesh burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 : 399*6d239dd5SPavan Nikhilesh 0; 400*6d239dd5SPavan Nikhilesh 401*6d239dd5SPavan Nikhilesh if (mt_unsafe) 402*6d239dd5SPavan Nikhilesh set_worker_generic_setup_data(&fdata->cap, burst); 403*6d239dd5SPavan Nikhilesh else 404*6d239dd5SPavan Nikhilesh set_worker_tx_setup_data(&fdata->cap, burst); 405*6d239dd5SPavan Nikhilesh } 406*6d239dd5SPavan Nikhilesh 407*6d239dd5SPavan Nikhilesh static void 408*6d239dd5SPavan Nikhilesh signal_handler(int signum) 409*6d239dd5SPavan Nikhilesh { 410*6d239dd5SPavan Nikhilesh if (fdata->done) 411*6d239dd5SPavan Nikhilesh rte_exit(1, "Exiting on signal %d\n", signum); 412*6d239dd5SPavan Nikhilesh if (signum == SIGINT || signum == SIGTERM) { 413*6d239dd5SPavan Nikhilesh printf("\n\nSignal %d received, preparing to exit...\n", 414*6d239dd5SPavan Nikhilesh signum); 415*6d239dd5SPavan Nikhilesh fdata->done = 1; 416*6d239dd5SPavan Nikhilesh } 417*6d239dd5SPavan Nikhilesh if (signum == SIGTSTP) 418*6d239dd5SPavan Nikhilesh rte_event_dev_dump(0, stdout); 419*6d239dd5SPavan Nikhilesh } 420*6d239dd5SPavan Nikhilesh 421*6d239dd5SPavan Nikhilesh static inline uint64_t 422*6d239dd5SPavan Nikhilesh port_stat(int dev_id, int32_t p) 423*6d239dd5SPavan Nikhilesh { 424*6d239dd5SPavan Nikhilesh char statname[64]; 425*6d239dd5SPavan Nikhilesh snprintf(statname, sizeof(statname), "port_%u_rx", p); 426*6d239dd5SPavan Nikhilesh return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL); 427*6d239dd5SPavan Nikhilesh } 428*6d239dd5SPavan Nikhilesh 429*6d239dd5SPavan Nikhilesh int 430*6d239dd5SPavan Nikhilesh main(int argc, char **argv) 431*6d239dd5SPavan Nikhilesh { 432*6d239dd5SPavan Nikhilesh struct worker_data *worker_data; 433*6d239dd5SPavan Nikhilesh unsigned int num_ports; 434*6d239dd5SPavan Nikhilesh int lcore_id; 435*6d239dd5SPavan Nikhilesh int err; 436*6d239dd5SPavan Nikhilesh 437*6d239dd5SPavan Nikhilesh signal(SIGINT, signal_handler); 438*6d239dd5SPavan Nikhilesh signal(SIGTERM, signal_handler); 439*6d239dd5SPavan Nikhilesh signal(SIGTSTP, signal_handler); 440*6d239dd5SPavan Nikhilesh 441*6d239dd5SPavan Nikhilesh err = rte_eal_init(argc, argv); 442*6d239dd5SPavan Nikhilesh if (err < 0) 443*6d239dd5SPavan Nikhilesh rte_panic("Invalid EAL arguments\n"); 444*6d239dd5SPavan Nikhilesh 445*6d239dd5SPavan Nikhilesh argc -= err; 446*6d239dd5SPavan Nikhilesh argv += err; 447*6d239dd5SPavan Nikhilesh 448*6d239dd5SPavan Nikhilesh fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0); 449*6d239dd5SPavan Nikhilesh if (fdata == NULL) 450*6d239dd5SPavan Nikhilesh rte_panic("Out of memory\n"); 451*6d239dd5SPavan Nikhilesh 452*6d239dd5SPavan Nikhilesh /* Parse cli options*/ 453*6d239dd5SPavan Nikhilesh parse_app_args(argc, argv); 454*6d239dd5SPavan Nikhilesh 455*6d239dd5SPavan Nikhilesh num_ports = rte_eth_dev_count(); 456*6d239dd5SPavan Nikhilesh if (num_ports == 0) 457*6d239dd5SPavan Nikhilesh rte_panic("No ethernet ports found\n"); 458*6d239dd5SPavan Nikhilesh 459*6d239dd5SPavan Nikhilesh const unsigned int cores_needed = cdata.active_cores; 460*6d239dd5SPavan Nikhilesh 461*6d239dd5SPavan Nikhilesh if (!cdata.quiet) { 462*6d239dd5SPavan Nikhilesh printf(" Config:\n"); 463*6d239dd5SPavan Nikhilesh printf("\tports: %u\n", num_ports); 464*6d239dd5SPavan Nikhilesh printf("\tworkers: %u\n", cdata.num_workers); 465*6d239dd5SPavan Nikhilesh printf("\tpackets: %"PRIi64"\n", cdata.num_packets); 466*6d239dd5SPavan Nikhilesh printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities); 467*6d239dd5SPavan Nikhilesh if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED) 468*6d239dd5SPavan Nikhilesh printf("\tqid0 type: ordered\n"); 469*6d239dd5SPavan Nikhilesh if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC) 470*6d239dd5SPavan Nikhilesh printf("\tqid0 type: atomic\n"); 471*6d239dd5SPavan Nikhilesh printf("\tCores available: %u\n", rte_lcore_count()); 472*6d239dd5SPavan Nikhilesh printf("\tCores used: %u\n", cores_needed); 473*6d239dd5SPavan Nikhilesh } 474*6d239dd5SPavan Nikhilesh 475*6d239dd5SPavan Nikhilesh if (rte_lcore_count() < cores_needed) 476*6d239dd5SPavan Nikhilesh rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(), 477*6d239dd5SPavan Nikhilesh cores_needed); 478*6d239dd5SPavan Nikhilesh 479*6d239dd5SPavan Nikhilesh const unsigned int ndevs = rte_event_dev_count(); 480*6d239dd5SPavan Nikhilesh if (ndevs == 0) 481*6d239dd5SPavan Nikhilesh rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n"); 482*6d239dd5SPavan Nikhilesh if (ndevs > 1) 483*6d239dd5SPavan Nikhilesh fprintf(stderr, "Warning: More than one eventdev, using idx 0"); 484*6d239dd5SPavan Nikhilesh 485*6d239dd5SPavan Nikhilesh 486*6d239dd5SPavan Nikhilesh do_capability_setup(num_ports, 0); 487*6d239dd5SPavan Nikhilesh fdata->cap.check_opt(); 488*6d239dd5SPavan Nikhilesh 489*6d239dd5SPavan Nikhilesh worker_data = rte_calloc(0, cdata.num_workers, 490*6d239dd5SPavan Nikhilesh sizeof(worker_data[0]), 0); 491*6d239dd5SPavan Nikhilesh if (worker_data == NULL) 492*6d239dd5SPavan Nikhilesh rte_panic("rte_calloc failed\n"); 493*6d239dd5SPavan Nikhilesh 494*6d239dd5SPavan Nikhilesh int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data); 495*6d239dd5SPavan Nikhilesh if (dev_id < 0) 496*6d239dd5SPavan Nikhilesh rte_exit(EXIT_FAILURE, "Error setting up eventdev\n"); 497*6d239dd5SPavan Nikhilesh 498*6d239dd5SPavan Nikhilesh init_ports(num_ports); 499*6d239dd5SPavan Nikhilesh fdata->cap.adptr_setup(num_ports); 500*6d239dd5SPavan Nikhilesh 501*6d239dd5SPavan Nikhilesh int worker_idx = 0; 502*6d239dd5SPavan Nikhilesh RTE_LCORE_FOREACH_SLAVE(lcore_id) { 503*6d239dd5SPavan Nikhilesh if (lcore_id >= MAX_NUM_CORE) 504*6d239dd5SPavan Nikhilesh break; 505*6d239dd5SPavan Nikhilesh 506*6d239dd5SPavan Nikhilesh if (!fdata->rx_core[lcore_id] && 507*6d239dd5SPavan Nikhilesh !fdata->worker_core[lcore_id] && 508*6d239dd5SPavan Nikhilesh !fdata->tx_core[lcore_id] && 509*6d239dd5SPavan Nikhilesh !fdata->sched_core[lcore_id]) 510*6d239dd5SPavan Nikhilesh continue; 511*6d239dd5SPavan Nikhilesh 512*6d239dd5SPavan Nikhilesh if (fdata->rx_core[lcore_id]) 513*6d239dd5SPavan Nikhilesh printf( 514*6d239dd5SPavan Nikhilesh "[%s()] lcore %d executing NIC Rx\n", 515*6d239dd5SPavan Nikhilesh __func__, lcore_id); 516*6d239dd5SPavan Nikhilesh 517*6d239dd5SPavan Nikhilesh if (fdata->tx_core[lcore_id]) 518*6d239dd5SPavan Nikhilesh printf( 519*6d239dd5SPavan Nikhilesh "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n", 520*6d239dd5SPavan Nikhilesh __func__, lcore_id, cons_data.port_id); 521*6d239dd5SPavan Nikhilesh 522*6d239dd5SPavan Nikhilesh if (fdata->sched_core[lcore_id]) 523*6d239dd5SPavan Nikhilesh printf("[%s()] lcore %d executing scheduler\n", 524*6d239dd5SPavan Nikhilesh __func__, lcore_id); 525*6d239dd5SPavan Nikhilesh 526*6d239dd5SPavan Nikhilesh if (fdata->worker_core[lcore_id]) 527*6d239dd5SPavan Nikhilesh printf( 528*6d239dd5SPavan Nikhilesh "[%s()] lcore %d executing worker, using eventdev port %u\n", 529*6d239dd5SPavan Nikhilesh __func__, lcore_id, 530*6d239dd5SPavan Nikhilesh worker_data[worker_idx].port_id); 531*6d239dd5SPavan Nikhilesh 532*6d239dd5SPavan Nikhilesh err = rte_eal_remote_launch(fdata->cap.worker, 533*6d239dd5SPavan Nikhilesh &worker_data[worker_idx], lcore_id); 534*6d239dd5SPavan Nikhilesh if (err) { 535*6d239dd5SPavan Nikhilesh rte_panic("Failed to launch worker on core %d\n", 536*6d239dd5SPavan Nikhilesh lcore_id); 537*6d239dd5SPavan Nikhilesh continue; 538*6d239dd5SPavan Nikhilesh } 539*6d239dd5SPavan Nikhilesh if (fdata->worker_core[lcore_id]) 540*6d239dd5SPavan Nikhilesh worker_idx++; 541*6d239dd5SPavan Nikhilesh } 542*6d239dd5SPavan Nikhilesh 543*6d239dd5SPavan Nikhilesh lcore_id = rte_lcore_id(); 544*6d239dd5SPavan Nikhilesh 545*6d239dd5SPavan Nikhilesh if (core_in_use(lcore_id)) 546*6d239dd5SPavan Nikhilesh fdata->cap.worker(&worker_data[worker_idx++]); 547*6d239dd5SPavan Nikhilesh 548*6d239dd5SPavan Nikhilesh rte_eal_mp_wait_lcore(); 549*6d239dd5SPavan Nikhilesh 550*6d239dd5SPavan Nikhilesh if (cdata.dump_dev) 551*6d239dd5SPavan Nikhilesh rte_event_dev_dump(dev_id, stdout); 552*6d239dd5SPavan Nikhilesh 553*6d239dd5SPavan Nikhilesh if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) != 554*6d239dd5SPavan Nikhilesh (uint64_t)-ENOTSUP)) { 555*6d239dd5SPavan Nikhilesh printf("\nPort Workload distribution:\n"); 556*6d239dd5SPavan Nikhilesh uint32_t i; 557*6d239dd5SPavan Nikhilesh uint64_t tot_pkts = 0; 558*6d239dd5SPavan Nikhilesh uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0}; 559*6d239dd5SPavan Nikhilesh for (i = 0; i < cdata.num_workers; i++) { 560*6d239dd5SPavan Nikhilesh pkts_per_wkr[i] = 561*6d239dd5SPavan Nikhilesh port_stat(dev_id, worker_data[i].port_id); 562*6d239dd5SPavan Nikhilesh tot_pkts += pkts_per_wkr[i]; 563*6d239dd5SPavan Nikhilesh } 564*6d239dd5SPavan Nikhilesh for (i = 0; i < cdata.num_workers; i++) { 565*6d239dd5SPavan Nikhilesh float pc = pkts_per_wkr[i] * 100 / 566*6d239dd5SPavan Nikhilesh ((float)tot_pkts); 567*6d239dd5SPavan Nikhilesh printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n", 568*6d239dd5SPavan Nikhilesh i, pc, pkts_per_wkr[i]); 569*6d239dd5SPavan Nikhilesh } 570*6d239dd5SPavan Nikhilesh 571*6d239dd5SPavan Nikhilesh } 572*6d239dd5SPavan Nikhilesh 573*6d239dd5SPavan Nikhilesh return 0; 574*6d239dd5SPavan Nikhilesh } 575