13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 23998e2a0SBruce Richardson * Copyright(c) 2016 Intel Corporation 3d299106eSSergio Gonzalez Monroy */ 4d299106eSSergio Gonzalez Monroy 565e3a202SLukasz Bartosik #include <stdbool.h> 6d299106eSSergio Gonzalez Monroy #include <stdio.h> 7d299106eSSergio Gonzalez Monroy #include <stdlib.h> 8d299106eSSergio Gonzalez Monroy #include <stdint.h> 9d299106eSSergio Gonzalez Monroy #include <inttypes.h> 10d299106eSSergio Gonzalez Monroy #include <sys/types.h> 1155d4c775SDaniel Mrzyglod #include <netinet/in.h> 1255d4c775SDaniel Mrzyglod #include <netinet/ip.h> 13906257e9SSergio Gonzalez Monroy #include <netinet/ip6.h> 14d299106eSSergio Gonzalez Monroy #include <string.h> 15d299106eSSergio Gonzalez Monroy #include <sys/queue.h> 16d299106eSSergio Gonzalez Monroy #include <stdarg.h> 17d299106eSSergio Gonzalez Monroy #include <errno.h> 1865e3a202SLukasz Bartosik #include <signal.h> 19d299106eSSergio Gonzalez Monroy #include <getopt.h> 20d299106eSSergio Gonzalez Monroy 21d299106eSSergio Gonzalez Monroy #include <rte_common.h> 2265e3a202SLukasz Bartosik #include <rte_bitmap.h> 23d299106eSSergio Gonzalez Monroy #include <rte_byteorder.h> 24d299106eSSergio Gonzalez Monroy #include <rte_log.h> 25d299106eSSergio Gonzalez Monroy #include <rte_eal.h> 26d299106eSSergio Gonzalez Monroy #include <rte_launch.h> 27d299106eSSergio Gonzalez Monroy #include <rte_cycles.h> 28d299106eSSergio Gonzalez Monroy #include <rte_prefetch.h> 29d299106eSSergio Gonzalez Monroy #include <rte_lcore.h> 30d299106eSSergio Gonzalez Monroy #include <rte_per_lcore.h> 31d299106eSSergio Gonzalez Monroy #include <rte_branch_prediction.h> 32d299106eSSergio Gonzalez Monroy #include <rte_interrupts.h> 33d299106eSSergio Gonzalez Monroy #include <rte_random.h> 34d299106eSSergio Gonzalez Monroy #include <rte_debug.h> 35d299106eSSergio Gonzalez Monroy #include <rte_ether.h> 36d299106eSSergio Gonzalez Monroy #include <rte_ethdev.h> 37d299106eSSergio Gonzalez Monroy #include <rte_mempool.h> 38d299106eSSergio Gonzalez Monroy #include <rte_mbuf.h> 39d299106eSSergio Gonzalez Monroy #include <rte_acl.h> 40d299106eSSergio Gonzalez Monroy #include <rte_lpm.h> 41906257e9SSergio Gonzalez Monroy #include <rte_lpm6.h> 42d299106eSSergio Gonzalez Monroy #include <rte_hash.h> 43d299106eSSergio Gonzalez Monroy #include <rte_jhash.h> 44d299106eSSergio Gonzalez Monroy #include <rte_cryptodev.h> 45fa4de2ccSAnoob Joseph #include <rte_security.h> 4665e3a202SLukasz Bartosik #include <rte_eventdev.h> 470dbe550aSVolodymyr Fialko #include <rte_event_crypto_adapter.h> 481e05895eSMarcin Smoczynski #include <rte_ip.h> 49b01d1cd2SKonstantin Ananyev #include <rte_ip_frag.h> 501329602bSAnoob Joseph #include <rte_alarm.h> 513e7b7dd8SRadu Nicolau #include <rte_telemetry.h> 52d299106eSSergio Gonzalez Monroy 5365e3a202SLukasz Bartosik #include "event_helper.h" 548e693616SAnoob Joseph #include "flow.h" 55d299106eSSergio Gonzalez Monroy #include "ipsec.h" 569ad50c29SLukasz Bartosik #include "ipsec_worker.h" 570d547ed0SFan Zhang #include "parser.h" 582cf67788SVladimir Medvedkin #include "sad.h" 59d299106eSSergio Gonzalez Monroy 606eb3ba03SRahul Bhansali #if defined(__ARM_NEON) 616eb3ba03SRahul Bhansali #include "ipsec_lpm_neon.h" 626eb3ba03SRahul Bhansali #endif 636eb3ba03SRahul Bhansali 6465e3a202SLukasz Bartosik volatile bool force_quit; 6565e3a202SLukasz Bartosik 66d299106eSSergio Gonzalez Monroy #define MAX_JUMBO_PKT_LEN 9600 67d299106eSSergio Gonzalez Monroy 68d299106eSSergio Gonzalez Monroy #define MEMPOOL_CACHE_SIZE 256 69d299106eSSergio Gonzalez Monroy 70f48cc7afSAnoob Joseph #define CDEV_MAP_ENTRIES 16384 71d299106eSSergio Gonzalez Monroy #define CDEV_MP_CACHE_SZ 64 7257ddbf7eSVladimir Medvedkin #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */ 73d299106eSSergio Gonzalez Monroy #define MAX_QUEUE_PAIRS 1 74d299106eSSergio Gonzalez Monroy 75d299106eSSergio Gonzalez Monroy #define MAX_LCORE_PARAMS 1024 76d299106eSSergio Gonzalez Monroy 77d299106eSSergio Gonzalez Monroy /* 78d299106eSSergio Gonzalez Monroy * Configurable number of RX/TX ring descriptors 79d299106eSSergio Gonzalez Monroy */ 80867a6c66SKevin Laatz #define IPSEC_SECGW_RX_DESC_DEFAULT 1024 81867a6c66SKevin Laatz #define IPSEC_SECGW_TX_DESC_DEFAULT 1024 82d299106eSSergio Gonzalez Monroy static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; 83d299106eSSergio Gonzalez Monroy static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; 84d299106eSSergio Gonzalez Monroy 855401bdc1SVolodymyr Fialko /* 865401bdc1SVolodymyr Fialko * Configurable number of descriptors per queue pair 875401bdc1SVolodymyr Fialko */ 881d5078c6SVolodymyr Fialko uint32_t qp_desc_nb = 2048; 895401bdc1SVolodymyr Fialko 90d299106eSSergio Gonzalez Monroy #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ 917622291bSKonstantin Ananyev (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ 927622291bSKonstantin Ananyev (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ 937622291bSKonstantin Ananyev (addr)->addr_bytes[4], (addr)->addr_bytes[5], \ 94d299106eSSergio Gonzalez Monroy 0, 0) 95d299106eSSergio Gonzalez Monroy 96b01d1cd2SKonstantin Ananyev #define FRAG_TBL_BUCKET_ENTRIES 4 97bba1db35SMarcin Smoczynski #define MAX_FRAG_TTL_NS (10LL * NS_PER_S) 98b01d1cd2SKonstantin Ananyev 99b01d1cd2SKonstantin Ananyev #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) 100b01d1cd2SKonstantin Ananyev 101d299106eSSergio Gonzalez Monroy struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { 102fbe58580SRahul Bhansali { {{0}}, {{0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a}} }, 103fbe58580SRahul Bhansali { {{0}}, {{0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9}} }, 104fbe58580SRahul Bhansali { {{0}}, {{0x00, 0x16, 0x3e, 0x08, 0x69, 0x26}} }, 105fbe58580SRahul Bhansali { {{0}}, {{0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd}} } 106d299106eSSergio Gonzalez Monroy }; 107d299106eSSergio Gonzalez Monroy 108b1a6b1e9SVolodymyr Fialko struct offloads tx_offloads; 109b1a6b1e9SVolodymyr Fialko 1106eb3ba03SRahul Bhansali /* 1116eb3ba03SRahul Bhansali * To hold ethernet header per port, which will be applied 1126eb3ba03SRahul Bhansali * to outgoing packets. 1136eb3ba03SRahul Bhansali */ 1146eb3ba03SRahul Bhansali xmm_t val_eth[RTE_MAX_ETHPORTS]; 1156eb3ba03SRahul Bhansali 116513f192bSAnkur Dwivedi struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; 117513f192bSAnkur Dwivedi 118c5227350SAnoob Joseph #define CMD_LINE_OPT_CONFIG "config" 119c5227350SAnoob Joseph #define CMD_LINE_OPT_SINGLE_SA "single-sa" 120c5227350SAnoob Joseph #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" 12165e3a202SLukasz Bartosik #define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode" 12265e3a202SLukasz Bartosik #define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type" 12303128be4SKonstantin Ananyev #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload" 12403128be4SKonstantin Ananyev #define CMD_LINE_OPT_TX_OFFLOAD "txoffload" 125b01d1cd2SKonstantin Ananyev #define CMD_LINE_OPT_REASSEMBLE "reassemble" 126b01d1cd2SKonstantin Ananyev #define CMD_LINE_OPT_MTU "mtu" 127bba1db35SMarcin Smoczynski #define CMD_LINE_OPT_FRAG_TTL "frag-ttl" 12886738ebeSSrujana Challa #define CMD_LINE_OPT_EVENT_VECTOR "event-vector" 12986738ebeSSrujana Challa #define CMD_LINE_OPT_VECTOR_SIZE "vector-size" 13086738ebeSSrujana Challa #define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo" 13148a39871SNithin Dabilpuram #define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz" 13248a39871SNithin Dabilpuram #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool" 1335401bdc1SVolodymyr Fialko #define CMD_LINE_OPT_QP_DESC_NB "desc-nb" 134c5227350SAnoob Joseph 13565e3a202SLukasz Bartosik #define CMD_LINE_ARG_EVENT "event" 13665e3a202SLukasz Bartosik #define CMD_LINE_ARG_POLL "poll" 13765e3a202SLukasz Bartosik #define CMD_LINE_ARG_ORDERED "ordered" 13865e3a202SLukasz Bartosik #define CMD_LINE_ARG_ATOMIC "atomic" 13965e3a202SLukasz Bartosik #define CMD_LINE_ARG_PARALLEL "parallel" 14065e3a202SLukasz Bartosik 141c5227350SAnoob Joseph enum { 142c5227350SAnoob Joseph /* long options mapped to a short option */ 143c5227350SAnoob Joseph 144c5227350SAnoob Joseph /* first long only option value must be >= 256, so that we won't 145c5227350SAnoob Joseph * conflict with short options 146c5227350SAnoob Joseph */ 147c5227350SAnoob Joseph CMD_LINE_OPT_MIN_NUM = 256, 148c5227350SAnoob Joseph CMD_LINE_OPT_CONFIG_NUM, 149c5227350SAnoob Joseph CMD_LINE_OPT_SINGLE_SA_NUM, 150c5227350SAnoob Joseph CMD_LINE_OPT_CRYPTODEV_MASK_NUM, 15165e3a202SLukasz Bartosik CMD_LINE_OPT_TRANSFER_MODE_NUM, 15265e3a202SLukasz Bartosik CMD_LINE_OPT_SCHEDULE_TYPE_NUM, 15303128be4SKonstantin Ananyev CMD_LINE_OPT_RX_OFFLOAD_NUM, 15403128be4SKonstantin Ananyev CMD_LINE_OPT_TX_OFFLOAD_NUM, 155b01d1cd2SKonstantin Ananyev CMD_LINE_OPT_REASSEMBLE_NUM, 156b01d1cd2SKonstantin Ananyev CMD_LINE_OPT_MTU_NUM, 157bba1db35SMarcin Smoczynski CMD_LINE_OPT_FRAG_TTL_NUM, 15886738ebeSSrujana Challa CMD_LINE_OPT_EVENT_VECTOR_NUM, 15986738ebeSSrujana Challa CMD_LINE_OPT_VECTOR_SIZE_NUM, 16086738ebeSSrujana Challa CMD_LINE_OPT_VECTOR_TIMEOUT_NUM, 16148a39871SNithin Dabilpuram CMD_LINE_OPT_VECTOR_POOL_SZ_NUM, 16248a39871SNithin Dabilpuram CMD_LINE_OPT_PER_PORT_POOL_NUM, 1635401bdc1SVolodymyr Fialko CMD_LINE_OPT_QP_DESC_NB_NUM, 164c5227350SAnoob Joseph }; 165c5227350SAnoob Joseph 166c5227350SAnoob Joseph static const struct option lgopts[] = { 167c5227350SAnoob Joseph {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, 168c5227350SAnoob Joseph {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, 169c5227350SAnoob Joseph {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, 17065e3a202SLukasz Bartosik {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM}, 17165e3a202SLukasz Bartosik {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM}, 17203128be4SKonstantin Ananyev {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM}, 17303128be4SKonstantin Ananyev {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM}, 174b01d1cd2SKonstantin Ananyev {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM}, 17537a29f00STao Zhu {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM}, 176bba1db35SMarcin Smoczynski {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM}, 17786738ebeSSrujana Challa {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM}, 17886738ebeSSrujana Challa {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM}, 17986738ebeSSrujana Challa {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM}, 18048a39871SNithin Dabilpuram {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM}, 18148a39871SNithin Dabilpuram {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM}, 1825401bdc1SVolodymyr Fialko {CMD_LINE_OPT_QP_DESC_NB, 1, 0, CMD_LINE_OPT_QP_DESC_NB_NUM}, 183c5227350SAnoob Joseph {NULL, 0, 0, 0} 184c5227350SAnoob Joseph }; 185c5227350SAnoob Joseph 1864965dda0SLukasz Bartosik uint32_t unprotected_port_mask; 1874965dda0SLukasz Bartosik uint32_t single_sa_idx; 188d299106eSSergio Gonzalez Monroy /* mask of enabled ports */ 189d299106eSSergio Gonzalez Monroy static uint32_t enabled_port_mask; 1902c68fe79SAkhil Goyal static uint64_t enabled_cryptodev_mask = UINT64_MAX; 19123d3a468SNithin Dabilpuram static int32_t promiscuous_on; 192d299106eSSergio Gonzalez Monroy static int32_t numa_on = 1; /**< NUMA is enabled by default. */ 193d299106eSSergio Gonzalez Monroy static uint32_t nb_lcores; 1940d76e22dSNithin Dabilpuram uint32_t single_sa; 19586738ebeSSrujana Challa uint32_t nb_bufs_in_pool; 196d299106eSSergio Gonzalez Monroy 19703128be4SKonstantin Ananyev /* 19803128be4SKonstantin Ananyev * RX/TX HW offload capabilities to enable/use on ethernet ports. 19903128be4SKonstantin Ananyev * By default all capabilities are enabled. 20003128be4SKonstantin Ananyev */ 20103128be4SKonstantin Ananyev static uint64_t dev_rx_offload = UINT64_MAX; 20203128be4SKonstantin Ananyev static uint64_t dev_tx_offload = UINT64_MAX; 20303128be4SKonstantin Ananyev 204b01d1cd2SKonstantin Ananyev /* 205b01d1cd2SKonstantin Ananyev * global values that determine multi-seg policy 206b01d1cd2SKonstantin Ananyev */ 207dcbf9ad5SNithin Dabilpuram uint32_t frag_tbl_sz; 208b01d1cd2SKonstantin Ananyev static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE; 209dcbf9ad5SNithin Dabilpuram uint32_t mtu_size = RTE_ETHER_MTU; 210bba1db35SMarcin Smoczynski static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; 21128008936SRadu Nicolau static uint32_t stats_interval; 212b01d1cd2SKonstantin Ananyev 2135a032a71SKonstantin Ananyev /* application wide librte_ipsec/SA parameters */ 2142cf67788SVladimir Medvedkin struct app_sa_prm app_sa_prm = { 2152cf67788SVladimir Medvedkin .enable = 0, 2169a1cc8f1STejasree Kondoj .cache_sz = SA_CACHE_SZ, 2179a1cc8f1STejasree Kondoj .udp_encap = 0 2182cf67788SVladimir Medvedkin }; 219ba66534fSMarcin Smoczynski static const char *cfgfile; 2205a032a71SKonstantin Ananyev 2217e06c0deSTyler Retzlaff struct __rte_cache_aligned lcore_params { 22247523597SZhiyong Yang uint16_t port_id; 223b23c5bd7SSivaprasad Tummala uint16_t queue_id; 2244b978938SSivaprasad Tummala uint32_t lcore_id; 2257e06c0deSTyler Retzlaff }; 226d299106eSSergio Gonzalez Monroy 227d299106eSSergio Gonzalez Monroy static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; 228d299106eSSergio Gonzalez Monroy 229d299106eSSergio Gonzalez Monroy static struct lcore_params *lcore_params; 230d299106eSSergio Gonzalez Monroy static uint16_t nb_lcore_params; 231d299106eSSergio Gonzalez Monroy 232d299106eSSergio Gonzalez Monroy static struct rte_hash *cdev_map_in; 233d299106eSSergio Gonzalez Monroy static struct rte_hash *cdev_map_out; 234d299106eSSergio Gonzalez Monroy 235dcbf9ad5SNithin Dabilpuram struct lcore_conf lcore_conf[RTE_MAX_LCORE]; 236d299106eSSergio Gonzalez Monroy 237d299106eSSergio Gonzalez Monroy static struct rte_eth_conf port_conf = { 238d299106eSSergio Gonzalez Monroy .rxmode = { 239295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_RX_RSS, 240295968d1SFerruh Yigit .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM, 241d299106eSSergio Gonzalez Monroy }, 242d299106eSSergio Gonzalez Monroy .rx_adv_conf = { 243d299106eSSergio Gonzalez Monroy .rss_conf = { 244d299106eSSergio Gonzalez Monroy .rss_key = NULL, 245295968d1SFerruh Yigit .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | 246295968d1SFerruh Yigit RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP, 247d299106eSSergio Gonzalez Monroy }, 248d299106eSSergio Gonzalez Monroy }, 249d299106eSSergio Gonzalez Monroy .txmode = { 250295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_TX_NONE, 251d299106eSSergio Gonzalez Monroy }, 252d299106eSSergio Gonzalez Monroy }; 253d299106eSSergio Gonzalez Monroy 2544965dda0SLukasz Bartosik struct socket_ctx socket_ctx[NB_SOCKETS]; 255d299106eSSergio Gonzalez Monroy 25648a39871SNithin Dabilpuram bool per_port_pool; 25748a39871SNithin Dabilpuram 2580d76e22dSNithin Dabilpuram uint16_t wrkr_flags; 259b01d1cd2SKonstantin Ananyev /* 260b01d1cd2SKonstantin Ananyev * Determine is multi-segment support required: 261b01d1cd2SKonstantin Ananyev * - either frame buffer size is smaller then mtu 2627be78d02SJosh Soref * - or reassemble support is requested 263b01d1cd2SKonstantin Ananyev */ 264b01d1cd2SKonstantin Ananyev static int 265b01d1cd2SKonstantin Ananyev multi_seg_required(void) 266b01d1cd2SKonstantin Ananyev { 267b01d1cd2SKonstantin Ananyev return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM > 268b01d1cd2SKonstantin Ananyev frame_buf_size || frag_tbl_sz != 0); 269b01d1cd2SKonstantin Ananyev } 270b01d1cd2SKonstantin Ananyev 2711329602bSAnoob Joseph 2726f1d5c0bSRadu Nicolau struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; 2736f1d5c0bSRadu Nicolau 2741329602bSAnoob Joseph /* Print out statistics on packet distribution */ 2751329602bSAnoob Joseph static void 2761329602bSAnoob Joseph print_stats_cb(__rte_unused void *param) 2771329602bSAnoob Joseph { 2781329602bSAnoob Joseph uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; 279d8d51d4fSRahul Bhansali uint64_t total_frag_packets_dropped = 0; 2801329602bSAnoob Joseph float burst_percent, rx_per_call, tx_per_call; 2811329602bSAnoob Joseph unsigned int coreid; 2821329602bSAnoob Joseph 2831329602bSAnoob Joseph total_packets_dropped = 0; 2841329602bSAnoob Joseph total_packets_tx = 0; 2851329602bSAnoob Joseph total_packets_rx = 0; 2861329602bSAnoob Joseph 2871329602bSAnoob Joseph const char clr[] = { 27, '[', '2', 'J', '\0' }; 2881329602bSAnoob Joseph const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; 2891329602bSAnoob Joseph 2901329602bSAnoob Joseph /* Clear screen and move to top left */ 2911329602bSAnoob Joseph printf("%s%s", clr, topLeft); 2921329602bSAnoob Joseph 2931329602bSAnoob Joseph printf("\nCore statistics ===================================="); 2941329602bSAnoob Joseph 2951329602bSAnoob Joseph for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) { 2961329602bSAnoob Joseph /* skip disabled cores */ 2971329602bSAnoob Joseph if (rte_lcore_is_enabled(coreid) == 0) 2981329602bSAnoob Joseph continue; 2991329602bSAnoob Joseph burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/ 3001329602bSAnoob Joseph core_statistics[coreid].rx; 3011329602bSAnoob Joseph rx_per_call = (float)(core_statistics[coreid].rx)/ 3021329602bSAnoob Joseph core_statistics[coreid].rx_call; 3031329602bSAnoob Joseph tx_per_call = (float)(core_statistics[coreid].tx)/ 3041329602bSAnoob Joseph core_statistics[coreid].tx_call; 3051329602bSAnoob Joseph printf("\nStatistics for core %u ------------------------------" 3061329602bSAnoob Joseph "\nPackets received: %20"PRIu64 3071329602bSAnoob Joseph "\nPackets sent: %24"PRIu64 3081329602bSAnoob Joseph "\nPackets dropped: %21"PRIu64 309d8d51d4fSRahul Bhansali "\nFrag Packets dropped: %16"PRIu64 3101329602bSAnoob Joseph "\nBurst percent: %23.2f" 3111329602bSAnoob Joseph "\nPackets per Rx call: %17.2f" 3121329602bSAnoob Joseph "\nPackets per Tx call: %17.2f", 3131329602bSAnoob Joseph coreid, 3141329602bSAnoob Joseph core_statistics[coreid].rx, 3151329602bSAnoob Joseph core_statistics[coreid].tx, 3161329602bSAnoob Joseph core_statistics[coreid].dropped, 317d8d51d4fSRahul Bhansali core_statistics[coreid].frag_dropped, 3181329602bSAnoob Joseph burst_percent, 3191329602bSAnoob Joseph rx_per_call, 3201329602bSAnoob Joseph tx_per_call); 3211329602bSAnoob Joseph 3221329602bSAnoob Joseph total_packets_dropped += core_statistics[coreid].dropped; 323d8d51d4fSRahul Bhansali total_frag_packets_dropped += core_statistics[coreid].frag_dropped; 3241329602bSAnoob Joseph total_packets_tx += core_statistics[coreid].tx; 3251329602bSAnoob Joseph total_packets_rx += core_statistics[coreid].rx; 3261329602bSAnoob Joseph } 3271329602bSAnoob Joseph printf("\nAggregate statistics ===============================" 3281329602bSAnoob Joseph "\nTotal packets received: %14"PRIu64 3291329602bSAnoob Joseph "\nTotal packets sent: %18"PRIu64 330d8d51d4fSRahul Bhansali "\nTotal packets dropped: %15"PRIu64 331d8d51d4fSRahul Bhansali "\nTotal frag packets dropped: %10"PRIu64, 3321329602bSAnoob Joseph total_packets_rx, 3331329602bSAnoob Joseph total_packets_tx, 334d8d51d4fSRahul Bhansali total_packets_dropped, 335d8d51d4fSRahul Bhansali total_frag_packets_dropped); 3361329602bSAnoob Joseph printf("\n====================================================\n"); 3371329602bSAnoob Joseph 33828008936SRadu Nicolau rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL); 3391329602bSAnoob Joseph } 3401329602bSAnoob Joseph 341d87152e7SKonstantin Ananyev static void 342d87152e7SKonstantin Ananyev split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) 343d87152e7SKonstantin Ananyev { 344d87152e7SKonstantin Ananyev uint32_t i, n4, n6; 345d87152e7SKonstantin Ananyev struct ip *ip; 346d87152e7SKonstantin Ananyev struct rte_mbuf *m; 347d87152e7SKonstantin Ananyev 348d87152e7SKonstantin Ananyev n4 = trf->ip4.num; 349d87152e7SKonstantin Ananyev n6 = trf->ip6.num; 350d87152e7SKonstantin Ananyev 351d87152e7SKonstantin Ananyev for (i = 0; i < num; i++) { 352d87152e7SKonstantin Ananyev 353d87152e7SKonstantin Ananyev m = mb[i]; 354d87152e7SKonstantin Ananyev ip = rte_pktmbuf_mtod(m, struct ip *); 355d87152e7SKonstantin Ananyev 356d87152e7SKonstantin Ananyev if (ip->ip_v == IPVERSION) { 357d87152e7SKonstantin Ananyev trf->ip4.pkts[n4] = m; 358d87152e7SKonstantin Ananyev trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m, 359d87152e7SKonstantin Ananyev uint8_t *, offsetof(struct ip, ip_p)); 360d87152e7SKonstantin Ananyev n4++; 361d87152e7SKonstantin Ananyev } else if (ip->ip_v == IP6_VERSION) { 362d87152e7SKonstantin Ananyev trf->ip6.pkts[n6] = m; 363d87152e7SKonstantin Ananyev trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m, 364d87152e7SKonstantin Ananyev uint8_t *, 365d87152e7SKonstantin Ananyev offsetof(struct ip6_hdr, ip6_nxt)); 366d87152e7SKonstantin Ananyev n6++; 367d87152e7SKonstantin Ananyev } else 3681329602bSAnoob Joseph free_pkts(&m, 1); 369d87152e7SKonstantin Ananyev } 370d87152e7SKonstantin Ananyev 371d87152e7SKonstantin Ananyev trf->ip4.num = n4; 372d87152e7SKonstantin Ananyev trf->ip6.num = n6; 373d87152e7SKonstantin Ananyev } 374d87152e7SKonstantin Ananyev 375d87152e7SKonstantin Ananyev 376906257e9SSergio Gonzalez Monroy static inline void 377d299106eSSergio Gonzalez Monroy process_pkts_inbound(struct ipsec_ctx *ipsec_ctx, 378d299106eSSergio Gonzalez Monroy struct ipsec_traffic *traffic) 379d299106eSSergio Gonzalez Monroy { 3803e7b7dd8SRadu Nicolau unsigned int lcoreid = rte_lcore_id(); 381d87152e7SKonstantin Ananyev uint16_t nb_pkts_in, n_ip4, n_ip6; 382d299106eSSergio Gonzalez Monroy 3830a08ab54SSergio Gonzalez Monroy n_ip4 = traffic->ip4.num; 3840a08ab54SSergio Gonzalez Monroy n_ip6 = traffic->ip6.num; 3850a08ab54SSergio Gonzalez Monroy 3863e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 387d87152e7SKonstantin Ananyev nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, 388d87152e7SKonstantin Ananyev traffic->ipsec.num, MAX_PKT_BURST); 389d87152e7SKonstantin Ananyev split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in); 3903e5f4625SKonstantin Ananyev } else { 3913e5f4625SKonstantin Ananyev inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, 3923e5f4625SKonstantin Ananyev traffic->ipsec.saptr, traffic->ipsec.num); 3933e5f4625SKonstantin Ananyev ipsec_process(ipsec_ctx, traffic); 3943e5f4625SKonstantin Ananyev } 395d299106eSSergio Gonzalez Monroy 3963e7b7dd8SRadu Nicolau inbound_sp_sa(ipsec_ctx->sp4_ctx, 3973e7b7dd8SRadu Nicolau ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4, 3983e7b7dd8SRadu Nicolau &core_statistics[lcoreid].inbound.spd4); 399906257e9SSergio Gonzalez Monroy 4003e7b7dd8SRadu Nicolau inbound_sp_sa(ipsec_ctx->sp6_ctx, 4013e7b7dd8SRadu Nicolau ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6, 4023e7b7dd8SRadu Nicolau &core_statistics[lcoreid].inbound.spd6); 403906257e9SSergio Gonzalez Monroy } 404906257e9SSergio Gonzalez Monroy 405906257e9SSergio Gonzalez Monroy static inline void 4063e7b7dd8SRadu Nicolau outbound_spd_lookup(struct sp_ctx *sp, 4073e7b7dd8SRadu Nicolau struct traffic_type *ip, 4083e7b7dd8SRadu Nicolau struct traffic_type *ipsec, 4093e7b7dd8SRadu Nicolau struct ipsec_spd_stats *stats) 410906257e9SSergio Gonzalez Monroy { 411906257e9SSergio Gonzalez Monroy struct rte_mbuf *m; 412906257e9SSergio Gonzalez Monroy uint32_t i, j, sa_idx; 413906257e9SSergio Gonzalez Monroy 4146e1892a6SSergio Gonzalez Monroy if (ip->num == 0 || sp == NULL) 415906257e9SSergio Gonzalez Monroy return; 416906257e9SSergio Gonzalez Monroy 417906257e9SSergio Gonzalez Monroy rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, 418906257e9SSergio Gonzalez Monroy ip->num, DEFAULT_MAX_CATEGORIES); 419d299106eSSergio Gonzalez Monroy 4203e7b7dd8SRadu Nicolau for (i = 0, j = 0; i < ip->num; i++) { 421906257e9SSergio Gonzalez Monroy m = ip->pkts[i]; 422df3e1d94SVladimir Medvedkin sa_idx = ip->res[i] - 1; 4233e7b7dd8SRadu Nicolau 4243e7b7dd8SRadu Nicolau if (unlikely(ip->res[i] == DISCARD)) { 4251329602bSAnoob Joseph free_pkts(&m, 1); 4263e7b7dd8SRadu Nicolau 4273e7b7dd8SRadu Nicolau stats->discard++; 4283e7b7dd8SRadu Nicolau } else if (unlikely(ip->res[i] == BYPASS)) { 429c1fe6dbfSKonstantin Ananyev ip->pkts[j++] = m; 4303e7b7dd8SRadu Nicolau 4313e7b7dd8SRadu Nicolau stats->bypass++; 4323e7b7dd8SRadu Nicolau } else { 433906257e9SSergio Gonzalez Monroy ipsec->res[ipsec->num] = sa_idx; 434906257e9SSergio Gonzalez Monroy ipsec->pkts[ipsec->num++] = m; 4353e7b7dd8SRadu Nicolau 4363e7b7dd8SRadu Nicolau stats->protect++; 43749757b68SKonstantin Ananyev } 438d299106eSSergio Gonzalez Monroy } 439906257e9SSergio Gonzalez Monroy ip->num = j; 440d299106eSSergio Gonzalez Monroy } 441d299106eSSergio Gonzalez Monroy 442d299106eSSergio Gonzalez Monroy static inline void 443d299106eSSergio Gonzalez Monroy process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, 444d299106eSSergio Gonzalez Monroy struct ipsec_traffic *traffic) 445d299106eSSergio Gonzalez Monroy { 446d299106eSSergio Gonzalez Monroy struct rte_mbuf *m; 447906257e9SSergio Gonzalez Monroy uint16_t idx, nb_pkts_out, i; 4483e7b7dd8SRadu Nicolau unsigned int lcoreid = rte_lcore_id(); 449d299106eSSergio Gonzalez Monroy 450d299106eSSergio Gonzalez Monroy /* Drop any IPsec traffic from protected ports */ 4511329602bSAnoob Joseph free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); 452d299106eSSergio Gonzalez Monroy 453906257e9SSergio Gonzalez Monroy traffic->ipsec.num = 0; 454d299106eSSergio Gonzalez Monroy 4553e7b7dd8SRadu Nicolau outbound_spd_lookup(ipsec_ctx->sp4_ctx, 4563e7b7dd8SRadu Nicolau &traffic->ip4, &traffic->ipsec, 4573e7b7dd8SRadu Nicolau &core_statistics[lcoreid].outbound.spd4); 458d299106eSSergio Gonzalez Monroy 4593e7b7dd8SRadu Nicolau outbound_spd_lookup(ipsec_ctx->sp6_ctx, 4603e7b7dd8SRadu Nicolau &traffic->ip6, &traffic->ipsec, 4613e7b7dd8SRadu Nicolau &core_statistics[lcoreid].outbound.spd6); 462906257e9SSergio Gonzalez Monroy 4633e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 4643e5f4625SKonstantin Ananyev 465906257e9SSergio Gonzalez Monroy nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, 466906257e9SSergio Gonzalez Monroy traffic->ipsec.res, traffic->ipsec.num, 467d299106eSSergio Gonzalez Monroy MAX_PKT_BURST); 468d299106eSSergio Gonzalez Monroy 469d299106eSSergio Gonzalez Monroy for (i = 0; i < nb_pkts_out; i++) { 470906257e9SSergio Gonzalez Monroy m = traffic->ipsec.pkts[i]; 471906257e9SSergio Gonzalez Monroy struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); 472906257e9SSergio Gonzalez Monroy if (ip->ip_v == IPVERSION) { 473906257e9SSergio Gonzalez Monroy idx = traffic->ip4.num++; 474906257e9SSergio Gonzalez Monroy traffic->ip4.pkts[idx] = m; 475906257e9SSergio Gonzalez Monroy } else { 476906257e9SSergio Gonzalez Monroy idx = traffic->ip6.num++; 477906257e9SSergio Gonzalez Monroy traffic->ip6.pkts[idx] = m; 478906257e9SSergio Gonzalez Monroy } 479d299106eSSergio Gonzalez Monroy } 4803e5f4625SKonstantin Ananyev } else { 4813e5f4625SKonstantin Ananyev outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, 4823e5f4625SKonstantin Ananyev traffic->ipsec.saptr, traffic->ipsec.num); 4833e5f4625SKonstantin Ananyev ipsec_process(ipsec_ctx, traffic); 4843e5f4625SKonstantin Ananyev } 485d299106eSSergio Gonzalez Monroy } 486d299106eSSergio Gonzalez Monroy 487d299106eSSergio Gonzalez Monroy static inline void 488d299106eSSergio Gonzalez Monroy process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, 489d299106eSSergio Gonzalez Monroy struct ipsec_traffic *traffic) 490d299106eSSergio Gonzalez Monroy { 491906257e9SSergio Gonzalez Monroy struct rte_mbuf *m; 492906257e9SSergio Gonzalez Monroy uint32_t nb_pkts_in, i, idx; 493d299106eSSergio Gonzalez Monroy 4943e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 4953e5f4625SKonstantin Ananyev 496906257e9SSergio Gonzalez Monroy nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, 497906257e9SSergio Gonzalez Monroy traffic->ipsec.num, MAX_PKT_BURST); 498906257e9SSergio Gonzalez Monroy 499906257e9SSergio Gonzalez Monroy for (i = 0; i < nb_pkts_in; i++) { 500906257e9SSergio Gonzalez Monroy m = traffic->ipsec.pkts[i]; 501906257e9SSergio Gonzalez Monroy struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); 502906257e9SSergio Gonzalez Monroy if (ip->ip_v == IPVERSION) { 503906257e9SSergio Gonzalez Monroy idx = traffic->ip4.num++; 504906257e9SSergio Gonzalez Monroy traffic->ip4.pkts[idx] = m; 505906257e9SSergio Gonzalez Monroy } else { 506906257e9SSergio Gonzalez Monroy idx = traffic->ip6.num++; 507906257e9SSergio Gonzalez Monroy traffic->ip6.pkts[idx] = m; 508906257e9SSergio Gonzalez Monroy } 509906257e9SSergio Gonzalez Monroy } 5103e5f4625SKonstantin Ananyev } else { 5113e5f4625SKonstantin Ananyev inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, 5123e5f4625SKonstantin Ananyev traffic->ipsec.saptr, traffic->ipsec.num); 5133e5f4625SKonstantin Ananyev ipsec_process(ipsec_ctx, traffic); 5143e5f4625SKonstantin Ananyev } 515d299106eSSergio Gonzalez Monroy } 516d299106eSSergio Gonzalez Monroy 517d299106eSSergio Gonzalez Monroy static inline void 518d299106eSSergio Gonzalez Monroy process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, 519d299106eSSergio Gonzalez Monroy struct ipsec_traffic *traffic) 520d299106eSSergio Gonzalez Monroy { 521906257e9SSergio Gonzalez Monroy struct rte_mbuf *m; 522aed6eb10SKonstantin Ananyev uint32_t nb_pkts_out, i, n; 523906257e9SSergio Gonzalez Monroy struct ip *ip; 524d299106eSSergio Gonzalez Monroy 525d299106eSSergio Gonzalez Monroy /* Drop any IPsec traffic from protected ports */ 5261329602bSAnoob Joseph free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); 527d299106eSSergio Gonzalez Monroy 528aed6eb10SKonstantin Ananyev n = 0; 529d299106eSSergio Gonzalez Monroy 530aed6eb10SKonstantin Ananyev for (i = 0; i < traffic->ip4.num; i++) { 531aed6eb10SKonstantin Ananyev traffic->ipsec.pkts[n] = traffic->ip4.pkts[i]; 532aed6eb10SKonstantin Ananyev traffic->ipsec.res[n++] = single_sa_idx; 533aed6eb10SKonstantin Ananyev } 534d299106eSSergio Gonzalez Monroy 535aed6eb10SKonstantin Ananyev for (i = 0; i < traffic->ip6.num; i++) { 536aed6eb10SKonstantin Ananyev traffic->ipsec.pkts[n] = traffic->ip6.pkts[i]; 537aed6eb10SKonstantin Ananyev traffic->ipsec.res[n++] = single_sa_idx; 538aed6eb10SKonstantin Ananyev } 539906257e9SSergio Gonzalez Monroy 540aed6eb10SKonstantin Ananyev traffic->ip4.num = 0; 541aed6eb10SKonstantin Ananyev traffic->ip6.num = 0; 542aed6eb10SKonstantin Ananyev traffic->ipsec.num = n; 543aed6eb10SKonstantin Ananyev 5443e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 5453e5f4625SKonstantin Ananyev 546aed6eb10SKonstantin Ananyev nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, 547aed6eb10SKonstantin Ananyev traffic->ipsec.res, traffic->ipsec.num, 548d299106eSSergio Gonzalez Monroy MAX_PKT_BURST); 549d299106eSSergio Gonzalez Monroy 550906257e9SSergio Gonzalez Monroy /* They all sue the same SA (ip4 or ip6 tunnel) */ 5513e5f4625SKonstantin Ananyev m = traffic->ipsec.pkts[0]; 552906257e9SSergio Gonzalez Monroy ip = rte_pktmbuf_mtod(m, struct ip *); 553aed6eb10SKonstantin Ananyev if (ip->ip_v == IPVERSION) { 554906257e9SSergio Gonzalez Monroy traffic->ip4.num = nb_pkts_out; 555aed6eb10SKonstantin Ananyev for (i = 0; i < nb_pkts_out; i++) 556aed6eb10SKonstantin Ananyev traffic->ip4.pkts[i] = traffic->ipsec.pkts[i]; 557aed6eb10SKonstantin Ananyev } else { 558906257e9SSergio Gonzalez Monroy traffic->ip6.num = nb_pkts_out; 559aed6eb10SKonstantin Ananyev for (i = 0; i < nb_pkts_out; i++) 560aed6eb10SKonstantin Ananyev traffic->ip6.pkts[i] = traffic->ipsec.pkts[i]; 561aed6eb10SKonstantin Ananyev } 5623e5f4625SKonstantin Ananyev } else { 5633e5f4625SKonstantin Ananyev outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, 5643e5f4625SKonstantin Ananyev traffic->ipsec.saptr, traffic->ipsec.num); 5653e5f4625SKonstantin Ananyev ipsec_process(ipsec_ctx, traffic); 5663e5f4625SKonstantin Ananyev } 567d299106eSSergio Gonzalez Monroy } 568d299106eSSergio Gonzalez Monroy 569906257e9SSergio Gonzalez Monroy static inline void 570d299106eSSergio Gonzalez Monroy process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, 5716b9dabfdSAnoob Joseph uint16_t nb_pkts, uint16_t portid, void *ctx) 572d299106eSSergio Gonzalez Monroy { 573d299106eSSergio Gonzalez Monroy struct ipsec_traffic traffic; 574d299106eSSergio Gonzalez Monroy 575c7e6d808SNithin Dabilpuram prepare_traffic(ctx, pkts, &traffic, nb_pkts); 576d299106eSSergio Gonzalez Monroy 577906257e9SSergio Gonzalez Monroy if (unlikely(single_sa)) { 5784965dda0SLukasz Bartosik if (is_unprotected_port(portid)) 579d299106eSSergio Gonzalez Monroy process_pkts_inbound_nosp(&qconf->inbound, &traffic); 580d299106eSSergio Gonzalez Monroy else 581d299106eSSergio Gonzalez Monroy process_pkts_outbound_nosp(&qconf->outbound, &traffic); 582d299106eSSergio Gonzalez Monroy } else { 5834965dda0SLukasz Bartosik if (is_unprotected_port(portid)) 584d299106eSSergio Gonzalez Monroy process_pkts_inbound(&qconf->inbound, &traffic); 585d299106eSSergio Gonzalez Monroy else 586d299106eSSergio Gonzalez Monroy process_pkts_outbound(&qconf->outbound, &traffic); 587d299106eSSergio Gonzalez Monroy } 588d299106eSSergio Gonzalez Monroy 5896eb3ba03SRahul Bhansali #if defined __ARM_NEON 5906eb3ba03SRahul Bhansali /* Neon optimized packet routing */ 5916eb3ba03SRahul Bhansali route4_pkts_neon(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num, 5926eb3ba03SRahul Bhansali qconf->outbound.ipv4_offloads, true); 5936eb3ba03SRahul Bhansali route6_pkts_neon(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); 5946eb3ba03SRahul Bhansali #else 5954fbfa6c7SNithin Dabilpuram route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num, 5964fbfa6c7SNithin Dabilpuram qconf->outbound.ipv4_offloads, true); 597906257e9SSergio Gonzalez Monroy route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); 5986eb3ba03SRahul Bhansali #endif 599d299106eSSergio Gonzalez Monroy } 600d299106eSSergio Gonzalez Monroy 601d299106eSSergio Gonzalez Monroy static inline void 602d87152e7SKonstantin Ananyev drain_crypto_buffers(struct lcore_conf *qconf) 603d87152e7SKonstantin Ananyev { 604d87152e7SKonstantin Ananyev uint32_t i; 605d87152e7SKonstantin Ananyev struct ipsec_ctx *ctx; 606d87152e7SKonstantin Ananyev 607d87152e7SKonstantin Ananyev /* drain inbound buffers*/ 608d87152e7SKonstantin Ananyev ctx = &qconf->inbound; 609d87152e7SKonstantin Ananyev for (i = 0; i != ctx->nb_qps; i++) { 610d87152e7SKonstantin Ananyev if (ctx->tbl[i].len != 0) 611d87152e7SKonstantin Ananyev enqueue_cop_burst(ctx->tbl + i); 612d87152e7SKonstantin Ananyev } 613d87152e7SKonstantin Ananyev 614d87152e7SKonstantin Ananyev /* drain outbound buffers*/ 615d87152e7SKonstantin Ananyev ctx = &qconf->outbound; 616d87152e7SKonstantin Ananyev for (i = 0; i != ctx->nb_qps; i++) { 617d87152e7SKonstantin Ananyev if (ctx->tbl[i].len != 0) 618d87152e7SKonstantin Ananyev enqueue_cop_burst(ctx->tbl + i); 619d87152e7SKonstantin Ananyev } 620d87152e7SKonstantin Ananyev } 621d87152e7SKonstantin Ananyev 622d87152e7SKonstantin Ananyev static void 623d87152e7SKonstantin Ananyev drain_inbound_crypto_queues(const struct lcore_conf *qconf, 624d87152e7SKonstantin Ananyev struct ipsec_ctx *ctx) 625d87152e7SKonstantin Ananyev { 626d87152e7SKonstantin Ananyev uint32_t n; 627d87152e7SKonstantin Ananyev struct ipsec_traffic trf; 6283e7b7dd8SRadu Nicolau unsigned int lcoreid = rte_lcore_id(); 629*88948ff3STejasree Kondoj const int nb_pkts = RTE_DIM(trf.ipsec.pkts); 630d87152e7SKonstantin Ananyev 6313e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 6323e5f4625SKonstantin Ananyev 633d87152e7SKonstantin Ananyev /* dequeue packets from crypto-queue */ 634d87152e7SKonstantin Ananyev n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts, 635*88948ff3STejasree Kondoj RTE_MIN(MAX_PKT_BURST, nb_pkts)); 636d87152e7SKonstantin Ananyev 637d87152e7SKonstantin Ananyev trf.ip4.num = 0; 638d87152e7SKonstantin Ananyev trf.ip6.num = 0; 639d87152e7SKonstantin Ananyev 640d87152e7SKonstantin Ananyev /* split traffic by ipv4-ipv6 */ 641d87152e7SKonstantin Ananyev split46_traffic(&trf, trf.ipsec.pkts, n); 6423e5f4625SKonstantin Ananyev } else 6433e5f4625SKonstantin Ananyev ipsec_cqp_process(ctx, &trf); 644d87152e7SKonstantin Ananyev 645d87152e7SKonstantin Ananyev /* process ipv4 packets */ 6463e5f4625SKonstantin Ananyev if (trf.ip4.num != 0) { 6473e7b7dd8SRadu Nicolau inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0, 6483e7b7dd8SRadu Nicolau &core_statistics[lcoreid].inbound.spd4); 6494fbfa6c7SNithin Dabilpuram route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num, 6504fbfa6c7SNithin Dabilpuram qconf->outbound.ipv4_offloads, true); 6513e5f4625SKonstantin Ananyev } 652d87152e7SKonstantin Ananyev 653d87152e7SKonstantin Ananyev /* process ipv6 packets */ 6543e5f4625SKonstantin Ananyev if (trf.ip6.num != 0) { 6553e7b7dd8SRadu Nicolau inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0, 6563e7b7dd8SRadu Nicolau &core_statistics[lcoreid].inbound.spd6); 657d87152e7SKonstantin Ananyev route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); 658d87152e7SKonstantin Ananyev } 6593e5f4625SKonstantin Ananyev } 660d87152e7SKonstantin Ananyev 661d87152e7SKonstantin Ananyev static void 662d87152e7SKonstantin Ananyev drain_outbound_crypto_queues(const struct lcore_conf *qconf, 663d87152e7SKonstantin Ananyev struct ipsec_ctx *ctx) 664d87152e7SKonstantin Ananyev { 665d87152e7SKonstantin Ananyev uint32_t n; 666d87152e7SKonstantin Ananyev struct ipsec_traffic trf; 667*88948ff3STejasree Kondoj const int nb_pkts = RTE_DIM(trf.ipsec.pkts); 668d87152e7SKonstantin Ananyev 6693e5f4625SKonstantin Ananyev if (app_sa_prm.enable == 0) { 6703e5f4625SKonstantin Ananyev 671d87152e7SKonstantin Ananyev /* dequeue packets from crypto-queue */ 672d87152e7SKonstantin Ananyev n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts, 673*88948ff3STejasree Kondoj RTE_MIN(MAX_PKT_BURST, nb_pkts)); 674d87152e7SKonstantin Ananyev 675d87152e7SKonstantin Ananyev trf.ip4.num = 0; 676d87152e7SKonstantin Ananyev trf.ip6.num = 0; 677d87152e7SKonstantin Ananyev 678d87152e7SKonstantin Ananyev /* split traffic by ipv4-ipv6 */ 679d87152e7SKonstantin Ananyev split46_traffic(&trf, trf.ipsec.pkts, n); 6803e5f4625SKonstantin Ananyev } else 6813e5f4625SKonstantin Ananyev ipsec_cqp_process(ctx, &trf); 682d87152e7SKonstantin Ananyev 683d87152e7SKonstantin Ananyev /* process ipv4 packets */ 6843e5f4625SKonstantin Ananyev if (trf.ip4.num != 0) 6854fbfa6c7SNithin Dabilpuram route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num, 6864fbfa6c7SNithin Dabilpuram qconf->outbound.ipv4_offloads, true); 687d87152e7SKonstantin Ananyev 688d87152e7SKonstantin Ananyev /* process ipv6 packets */ 6893e5f4625SKonstantin Ananyev if (trf.ip6.num != 0) 690d87152e7SKonstantin Ananyev route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); 691d87152e7SKonstantin Ananyev } 692d87152e7SKonstantin Ananyev 693d299106eSSergio Gonzalez Monroy /* main processing loop */ 6944965dda0SLukasz Bartosik void 6954965dda0SLukasz Bartosik ipsec_poll_mode_worker(void) 696d299106eSSergio Gonzalez Monroy { 697d299106eSSergio Gonzalez Monroy struct rte_mbuf *pkts[MAX_PKT_BURST]; 698d299106eSSergio Gonzalez Monroy uint32_t lcore_id; 699d299106eSSergio Gonzalez Monroy uint64_t prev_tsc, diff_tsc, cur_tsc; 700b23c5bd7SSivaprasad Tummala uint16_t i, nb_rx, portid, queueid; 701d299106eSSergio Gonzalez Monroy struct lcore_conf *qconf; 7022cf67788SVladimir Medvedkin int32_t rc, socket_id; 703d299106eSSergio Gonzalez Monroy const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 704d299106eSSergio Gonzalez Monroy / US_PER_S * BURST_TX_DRAIN_US; 705d299106eSSergio Gonzalez Monroy struct lcore_rx_queue *rxql; 706d299106eSSergio Gonzalez Monroy 707d299106eSSergio Gonzalez Monroy prev_tsc = 0; 708d299106eSSergio Gonzalez Monroy lcore_id = rte_lcore_id(); 709d299106eSSergio Gonzalez Monroy qconf = &lcore_conf[lcore_id]; 710d299106eSSergio Gonzalez Monroy rxql = qconf->rx_queue_list; 711d299106eSSergio Gonzalez Monroy socket_id = rte_lcore_to_socket_id(lcore_id); 712d299106eSSergio Gonzalez Monroy 713906257e9SSergio Gonzalez Monroy qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4; 714906257e9SSergio Gonzalez Monroy qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6; 715906257e9SSergio Gonzalez Monroy qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; 716906257e9SSergio Gonzalez Monroy qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; 717906257e9SSergio Gonzalez Monroy qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in; 718d299106eSSergio Gonzalez Monroy qconf->inbound.cdev_map = cdev_map_in; 719a8ade121SVolodymyr Fialko qconf->inbound.lcore_id = lcore_id; 720906257e9SSergio Gonzalez Monroy qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; 721906257e9SSergio Gonzalez Monroy qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; 722906257e9SSergio Gonzalez Monroy qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out; 723d299106eSSergio Gonzalez Monroy qconf->outbound.cdev_map = cdev_map_out; 724a8ade121SVolodymyr Fialko qconf->outbound.lcore_id = lcore_id; 725b01d1cd2SKonstantin Ananyev qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; 726d299106eSSergio Gonzalez Monroy 7272cf67788SVladimir Medvedkin rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); 7282cf67788SVladimir Medvedkin if (rc != 0) { 7292cf67788SVladimir Medvedkin RTE_LOG(ERR, IPSEC, 7302cf67788SVladimir Medvedkin "SAD cache init on lcore %u, failed with code: %d\n", 7312cf67788SVladimir Medvedkin lcore_id, rc); 7324965dda0SLukasz Bartosik return; 7332cf67788SVladimir Medvedkin } 7342cf67788SVladimir Medvedkin 735d299106eSSergio Gonzalez Monroy if (qconf->nb_rx_queue == 0) { 736da7a540eSBernard Iremonger RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 737da7a540eSBernard Iremonger lcore_id); 7384965dda0SLukasz Bartosik return; 739d299106eSSergio Gonzalez Monroy } 740d299106eSSergio Gonzalez Monroy 741d299106eSSergio Gonzalez Monroy RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 742d299106eSSergio Gonzalez Monroy 743d299106eSSergio Gonzalez Monroy for (i = 0; i < qconf->nb_rx_queue; i++) { 744d299106eSSergio Gonzalez Monroy portid = rxql[i].port_id; 745d299106eSSergio Gonzalez Monroy queueid = rxql[i].queue_id; 746d299106eSSergio Gonzalez Monroy RTE_LOG(INFO, IPSEC, 747b23c5bd7SSivaprasad Tummala " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", 748d299106eSSergio Gonzalez Monroy lcore_id, portid, queueid); 749d299106eSSergio Gonzalez Monroy } 750d299106eSSergio Gonzalez Monroy 7514965dda0SLukasz Bartosik while (!force_quit) { 752d299106eSSergio Gonzalez Monroy cur_tsc = rte_rdtsc(); 753d299106eSSergio Gonzalez Monroy 754d299106eSSergio Gonzalez Monroy /* TX queue buffer drain */ 755d299106eSSergio Gonzalez Monroy diff_tsc = cur_tsc - prev_tsc; 756d299106eSSergio Gonzalez Monroy 757d299106eSSergio Gonzalez Monroy if (unlikely(diff_tsc > drain_tsc)) { 758d87152e7SKonstantin Ananyev drain_tx_buffers(qconf); 759d87152e7SKonstantin Ananyev drain_crypto_buffers(qconf); 760d299106eSSergio Gonzalez Monroy prev_tsc = cur_tsc; 761d299106eSSergio Gonzalez Monroy } 762d299106eSSergio Gonzalez Monroy 763d299106eSSergio Gonzalez Monroy for (i = 0; i < qconf->nb_rx_queue; ++i) { 764d87152e7SKonstantin Ananyev 765d87152e7SKonstantin Ananyev /* Read packets from RX queues */ 766d299106eSSergio Gonzalez Monroy portid = rxql[i].port_id; 767d299106eSSergio Gonzalez Monroy queueid = rxql[i].queue_id; 768d299106eSSergio Gonzalez Monroy nb_rx = rte_eth_rx_burst(portid, queueid, 769d299106eSSergio Gonzalez Monroy pkts, MAX_PKT_BURST); 770d299106eSSergio Gonzalez Monroy 7711329602bSAnoob Joseph if (nb_rx > 0) { 7721329602bSAnoob Joseph core_stats_update_rx(nb_rx); 773c7e6d808SNithin Dabilpuram process_pkts(qconf, pkts, nb_rx, portid, 774c7e6d808SNithin Dabilpuram rxql->sec_ctx); 7751329602bSAnoob Joseph } 776d87152e7SKonstantin Ananyev 777d87152e7SKonstantin Ananyev /* dequeue and process completed crypto-ops */ 7784965dda0SLukasz Bartosik if (is_unprotected_port(portid)) 779d87152e7SKonstantin Ananyev drain_inbound_crypto_queues(qconf, 780d87152e7SKonstantin Ananyev &qconf->inbound); 781d87152e7SKonstantin Ananyev else 782d87152e7SKonstantin Ananyev drain_outbound_crypto_queues(qconf, 783d87152e7SKonstantin Ananyev &qconf->outbound); 784d299106eSSergio Gonzalez Monroy } 785d299106eSSergio Gonzalez Monroy } 786d299106eSSergio Gonzalez Monroy } 787d299106eSSergio Gonzalez Monroy 7886738c0a9SPraveen Shetty int 7896738c0a9SPraveen Shetty check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) 7906738c0a9SPraveen Shetty { 7916738c0a9SPraveen Shetty uint16_t i; 792b23c5bd7SSivaprasad Tummala uint16_t portid, queueid; 7936738c0a9SPraveen Shetty 7946738c0a9SPraveen Shetty for (i = 0; i < nb_lcore_params; ++i) { 7956738c0a9SPraveen Shetty portid = lcore_params_array[i].port_id; 7966738c0a9SPraveen Shetty if (portid == fdir_portid) { 7976738c0a9SPraveen Shetty queueid = lcore_params_array[i].queue_id; 7986738c0a9SPraveen Shetty if (queueid == fdir_qid) 7996738c0a9SPraveen Shetty break; 8006738c0a9SPraveen Shetty } 8016738c0a9SPraveen Shetty 8026738c0a9SPraveen Shetty if (i == nb_lcore_params - 1) 8036738c0a9SPraveen Shetty return -1; 8046738c0a9SPraveen Shetty } 8056738c0a9SPraveen Shetty 8066738c0a9SPraveen Shetty return 1; 8076738c0a9SPraveen Shetty } 8086738c0a9SPraveen Shetty 809d299106eSSergio Gonzalez Monroy static int32_t 81065e3a202SLukasz Bartosik check_poll_mode_params(struct eh_conf *eh_conf) 811d299106eSSergio Gonzalez Monroy { 8124b978938SSivaprasad Tummala uint32_t lcore; 813a9dbe180SThomas Monjalon uint16_t portid; 814d299106eSSergio Gonzalez Monroy uint16_t i; 815d299106eSSergio Gonzalez Monroy int32_t socket_id; 816d299106eSSergio Gonzalez Monroy 81765e3a202SLukasz Bartosik if (!eh_conf) 81865e3a202SLukasz Bartosik return -EINVAL; 81965e3a202SLukasz Bartosik 82065e3a202SLukasz Bartosik if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL) 82165e3a202SLukasz Bartosik return 0; 82265e3a202SLukasz Bartosik 823d299106eSSergio Gonzalez Monroy if (lcore_params == NULL) { 824d299106eSSergio Gonzalez Monroy printf("Error: No port/queue/core mappings\n"); 825d299106eSSergio Gonzalez Monroy return -1; 826d299106eSSergio Gonzalez Monroy } 827d299106eSSergio Gonzalez Monroy 828d299106eSSergio Gonzalez Monroy for (i = 0; i < nb_lcore_params; ++i) { 829d299106eSSergio Gonzalez Monroy lcore = lcore_params[i].lcore_id; 830d299106eSSergio Gonzalez Monroy if (!rte_lcore_is_enabled(lcore)) { 8314b978938SSivaprasad Tummala printf("error: lcore %u is not enabled in " 832d299106eSSergio Gonzalez Monroy "lcore mask\n", lcore); 833d299106eSSergio Gonzalez Monroy return -1; 834d299106eSSergio Gonzalez Monroy } 835d299106eSSergio Gonzalez Monroy socket_id = rte_lcore_to_socket_id(lcore); 836d299106eSSergio Gonzalez Monroy if (socket_id != 0 && numa_on == 0) { 8374b978938SSivaprasad Tummala printf("warning: lcore %u is on socket %d " 838d299106eSSergio Gonzalez Monroy "with numa off\n", 839d299106eSSergio Gonzalez Monroy lcore, socket_id); 840d299106eSSergio Gonzalez Monroy } 841d299106eSSergio Gonzalez Monroy portid = lcore_params[i].port_id; 842d299106eSSergio Gonzalez Monroy if ((enabled_port_mask & (1 << portid)) == 0) { 843d299106eSSergio Gonzalez Monroy printf("port %u is not enabled in port mask\n", portid); 844d299106eSSergio Gonzalez Monroy return -1; 845d299106eSSergio Gonzalez Monroy } 846a9dbe180SThomas Monjalon if (!rte_eth_dev_is_valid_port(portid)) { 847d299106eSSergio Gonzalez Monroy printf("port %u is not present on the board\n", portid); 848d299106eSSergio Gonzalez Monroy return -1; 849d299106eSSergio Gonzalez Monroy } 850d299106eSSergio Gonzalez Monroy } 851d299106eSSergio Gonzalez Monroy return 0; 852d299106eSSergio Gonzalez Monroy } 853d299106eSSergio Gonzalez Monroy 854b23c5bd7SSivaprasad Tummala static uint16_t 85547523597SZhiyong Yang get_port_nb_rx_queues(const uint16_t port) 856d299106eSSergio Gonzalez Monroy { 857d299106eSSergio Gonzalez Monroy int32_t queue = -1; 858d299106eSSergio Gonzalez Monroy uint16_t i; 859d299106eSSergio Gonzalez Monroy 860d299106eSSergio Gonzalez Monroy for (i = 0; i < nb_lcore_params; ++i) { 861d299106eSSergio Gonzalez Monroy if (lcore_params[i].port_id == port && 862d299106eSSergio Gonzalez Monroy lcore_params[i].queue_id > queue) 863d299106eSSergio Gonzalez Monroy queue = lcore_params[i].queue_id; 864d299106eSSergio Gonzalez Monroy } 865b23c5bd7SSivaprasad Tummala return (uint16_t)(++queue); 866d299106eSSergio Gonzalez Monroy } 867d299106eSSergio Gonzalez Monroy 868d299106eSSergio Gonzalez Monroy static int32_t 869d299106eSSergio Gonzalez Monroy init_lcore_rx_queues(void) 870d299106eSSergio Gonzalez Monroy { 871d299106eSSergio Gonzalez Monroy uint16_t i, nb_rx_queue; 8724b978938SSivaprasad Tummala uint32_t lcore; 873d299106eSSergio Gonzalez Monroy 874d299106eSSergio Gonzalez Monroy for (i = 0; i < nb_lcore_params; ++i) { 875d299106eSSergio Gonzalez Monroy lcore = lcore_params[i].lcore_id; 876d299106eSSergio Gonzalez Monroy nb_rx_queue = lcore_conf[lcore].nb_rx_queue; 877d299106eSSergio Gonzalez Monroy if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { 878d299106eSSergio Gonzalez Monroy printf("error: too many queues (%u) for lcore: %u\n", 879d299106eSSergio Gonzalez Monroy nb_rx_queue + 1, lcore); 880d299106eSSergio Gonzalez Monroy return -1; 881d299106eSSergio Gonzalez Monroy } 882d299106eSSergio Gonzalez Monroy lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = 883d299106eSSergio Gonzalez Monroy lcore_params[i].port_id; 884d299106eSSergio Gonzalez Monroy lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = 885d299106eSSergio Gonzalez Monroy lcore_params[i].queue_id; 886d299106eSSergio Gonzalez Monroy lcore_conf[lcore].nb_rx_queue++; 887d299106eSSergio Gonzalez Monroy } 888d299106eSSergio Gonzalez Monroy return 0; 889d299106eSSergio Gonzalez Monroy } 890d299106eSSergio Gonzalez Monroy 891d299106eSSergio Gonzalez Monroy /* display usage */ 892d299106eSSergio Gonzalez Monroy static void 893d299106eSSergio Gonzalez Monroy print_usage(const char *prgname) 894d299106eSSergio Gonzalez Monroy { 895ae43ebbeSAnoob Joseph fprintf(stderr, "%s [EAL options] --" 896ae43ebbeSAnoob Joseph " -p PORTMASK" 897ae43ebbeSAnoob Joseph " [-P]" 898ae43ebbeSAnoob Joseph " [-u PORTMASK]" 899ae43ebbeSAnoob Joseph " [-j FRAMESIZE]" 9005a032a71SKonstantin Ananyev " [-l]" 9015a032a71SKonstantin Ananyev " [-w REPLAY_WINDOW_SIZE]" 9025a032a71SKonstantin Ananyev " [-e]" 9035a032a71SKonstantin Ananyev " [-a]" 9042cf67788SVladimir Medvedkin " [-c]" 90528008936SRadu Nicolau " [-t STATS_INTERVAL]" 9067338a34eSLukasz Bartosik " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]" 907ae43ebbeSAnoob Joseph " -f CONFIG_FILE" 908ae43ebbeSAnoob Joseph " --config (port,queue,lcore)[,(port,queue,lcore)]" 909ae43ebbeSAnoob Joseph " [--single-sa SAIDX]" 910ae43ebbeSAnoob Joseph " [--cryptodev_mask MASK]" 91165e3a202SLukasz Bartosik " [--transfer-mode MODE]" 91265e3a202SLukasz Bartosik " [--event-schedule-type TYPE]" 91303128be4SKonstantin Ananyev " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]" 91403128be4SKonstantin Ananyev " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]" 915b01d1cd2SKonstantin Ananyev " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]" 916b01d1cd2SKonstantin Ananyev " [--" CMD_LINE_OPT_MTU " MTU]" 91786738ebeSSrujana Challa " [--event-vector]" 91886738ebeSSrujana Challa " [--vector-size SIZE]" 91986738ebeSSrujana Challa " [--vector-tmo TIMEOUT in ns]" 9205401bdc1SVolodymyr Fialko " [--" CMD_LINE_OPT_QP_DESC_NB " NUMBER_OF_DESC]" 921ae43ebbeSAnoob Joseph "\n\n" 922ae43ebbeSAnoob Joseph " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" 923ae43ebbeSAnoob Joseph " -P : Enable promiscuous mode\n" 924ae43ebbeSAnoob Joseph " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n" 925b01d1cd2SKonstantin Ananyev " -j FRAMESIZE: Data buffer size, minimum (and default)\n" 926b01d1cd2SKonstantin Ananyev " value: RTE_MBUF_DEFAULT_BUF_SIZE\n" 9275a032a71SKonstantin Ananyev " -l enables code-path that uses librte_ipsec\n" 9285a032a71SKonstantin Ananyev " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n" 9295a032a71SKonstantin Ananyev " size for each SA\n" 9305a032a71SKonstantin Ananyev " -e enables ESN\n" 9315a032a71SKonstantin Ananyev " -a enables SA SQN atomic behaviour\n" 9322cf67788SVladimir Medvedkin " -c specifies inbound SAD cache size,\n" 9332cf67788SVladimir Medvedkin " zero value disables the cache (default value: 128)\n" 93428008936SRadu Nicolau " -t specifies statistics screen update interval,\n" 93528008936SRadu Nicolau " zero disables statistics screen (default value: 0)\n" 9367338a34eSLukasz Bartosik " -s number of mbufs in packet pool, if not specified number\n" 9377338a34eSLukasz Bartosik " of mbufs will be calculated based on number of cores,\n" 9387338a34eSLukasz Bartosik " ports and crypto queues\n" 939ae43ebbeSAnoob Joseph " -f CONFIG_FILE: Configuration file\n" 94065e3a202SLukasz Bartosik " --config (port,queue,lcore): Rx queue configuration. In poll\n" 94165e3a202SLukasz Bartosik " mode determines which queues from\n" 94265e3a202SLukasz Bartosik " which ports are mapped to which cores.\n" 94365e3a202SLukasz Bartosik " In event mode this option is not used\n" 94465e3a202SLukasz Bartosik " as packets are dynamically scheduled\n" 94565e3a202SLukasz Bartosik " to cores by HW.\n" 9464965dda0SLukasz Bartosik " --single-sa SAIDX: In poll mode use single SA index for\n" 9474965dda0SLukasz Bartosik " outbound traffic, bypassing the SP\n" 9484965dda0SLukasz Bartosik " In event mode selects driver submode,\n" 9494965dda0SLukasz Bartosik " SA index value is ignored\n" 950ae43ebbeSAnoob Joseph " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" 951ae43ebbeSAnoob Joseph " devices to configure\n" 95265e3a202SLukasz Bartosik " --transfer-mode MODE\n" 95365e3a202SLukasz Bartosik " \"poll\" : Packet transfer via polling (default)\n" 95465e3a202SLukasz Bartosik " \"event\" : Packet transfer via event device\n" 95565e3a202SLukasz Bartosik " --event-schedule-type TYPE queue schedule type, used only when\n" 95665e3a202SLukasz Bartosik " transfer mode is set to event\n" 95765e3a202SLukasz Bartosik " \"ordered\" : Ordered (default)\n" 95865e3a202SLukasz Bartosik " \"atomic\" : Atomic\n" 95965e3a202SLukasz Bartosik " \"parallel\" : Parallel\n" 96003128be4SKonstantin Ananyev " --" CMD_LINE_OPT_RX_OFFLOAD 96103128be4SKonstantin Ananyev ": bitmask of the RX HW offload capabilities to enable/use\n" 962295968d1SFerruh Yigit " (RTE_ETH_RX_OFFLOAD_*)\n" 96303128be4SKonstantin Ananyev " --" CMD_LINE_OPT_TX_OFFLOAD 96403128be4SKonstantin Ananyev ": bitmask of the TX HW offload capabilities to enable/use\n" 965295968d1SFerruh Yigit " (RTE_ETH_TX_OFFLOAD_*)\n" 966b01d1cd2SKonstantin Ananyev " --" CMD_LINE_OPT_REASSEMBLE " NUM" 967b01d1cd2SKonstantin Ananyev ": max number of entries in reassemble(fragment) table\n" 968b01d1cd2SKonstantin Ananyev " (zero (default value) disables reassembly)\n" 969b01d1cd2SKonstantin Ananyev " --" CMD_LINE_OPT_MTU " MTU" 970b01d1cd2SKonstantin Ananyev ": MTU value on all ports (default value: 1500)\n" 971b01d1cd2SKonstantin Ananyev " outgoing packets with bigger size will be fragmented\n" 972b01d1cd2SKonstantin Ananyev " incoming packets with bigger size will be discarded\n" 973bba1db35SMarcin Smoczynski " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS" 974bba1db35SMarcin Smoczynski ": fragments lifetime in nanoseconds, default\n" 975bba1db35SMarcin Smoczynski " and maximum value is 10.000.000.000 ns (10 s)\n" 97686738ebeSSrujana Challa " --event-vector enables event vectorization\n" 97786738ebeSSrujana Challa " --vector-size Max vector size (default value: 16)\n" 97886738ebeSSrujana Challa " --vector-tmo Max vector timeout in nanoseconds" 97986738ebeSSrujana Challa " (default value: 102400)\n" 98048a39871SNithin Dabilpuram " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n" 98148a39871SNithin Dabilpuram " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n" 98248a39871SNithin Dabilpuram " (default value is based on mbuf count)\n" 9835401bdc1SVolodymyr Fialko " --" CMD_LINE_OPT_QP_DESC_NB " DESC_NB" 9845401bdc1SVolodymyr Fialko ": Number of descriptors per queue pair (default value: 2048)\n" 985ae43ebbeSAnoob Joseph "\n", 9860d547ed0SFan Zhang prgname); 987d299106eSSergio Gonzalez Monroy } 988d299106eSSergio Gonzalez Monroy 98903128be4SKonstantin Ananyev static int 99003128be4SKonstantin Ananyev parse_mask(const char *str, uint64_t *val) 99103128be4SKonstantin Ananyev { 99203128be4SKonstantin Ananyev char *end; 99303128be4SKonstantin Ananyev unsigned long t; 99403128be4SKonstantin Ananyev 99503128be4SKonstantin Ananyev errno = 0; 99603128be4SKonstantin Ananyev t = strtoul(str, &end, 0); 99703128be4SKonstantin Ananyev if (errno != 0 || end[0] != 0) 99803128be4SKonstantin Ananyev return -EINVAL; 99903128be4SKonstantin Ananyev 100003128be4SKonstantin Ananyev *val = t; 100103128be4SKonstantin Ananyev return 0; 100203128be4SKonstantin Ananyev } 100303128be4SKonstantin Ananyev 1004d299106eSSergio Gonzalez Monroy static int32_t 1005d299106eSSergio Gonzalez Monroy parse_portmask(const char *portmask) 1006d299106eSSergio Gonzalez Monroy { 1007d299106eSSergio Gonzalez Monroy char *end = NULL; 1008d299106eSSergio Gonzalez Monroy unsigned long pm; 1009d299106eSSergio Gonzalez Monroy 1010ea28ab88SNithin Dabilpuram errno = 0; 1011ea28ab88SNithin Dabilpuram 1012d299106eSSergio Gonzalez Monroy /* parse hexadecimal string */ 1013d299106eSSergio Gonzalez Monroy pm = strtoul(portmask, &end, 16); 1014d299106eSSergio Gonzalez Monroy if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 1015d299106eSSergio Gonzalez Monroy return -1; 1016d299106eSSergio Gonzalez Monroy 1017d299106eSSergio Gonzalez Monroy if ((pm == 0) && errno) 1018d299106eSSergio Gonzalez Monroy return -1; 1019d299106eSSergio Gonzalez Monroy 1020d299106eSSergio Gonzalez Monroy return pm; 1021d299106eSSergio Gonzalez Monroy } 1022d299106eSSergio Gonzalez Monroy 1023bba1db35SMarcin Smoczynski static int64_t 1024d299106eSSergio Gonzalez Monroy parse_decimal(const char *str) 1025d299106eSSergio Gonzalez Monroy { 1026d299106eSSergio Gonzalez Monroy char *end = NULL; 1027bba1db35SMarcin Smoczynski uint64_t num; 1028d299106eSSergio Gonzalez Monroy 1029bba1db35SMarcin Smoczynski num = strtoull(str, &end, 10); 1030bba1db35SMarcin Smoczynski if ((str[0] == '\0') || (end == NULL) || (*end != '\0') 1031bba1db35SMarcin Smoczynski || num > INT64_MAX) 1032d299106eSSergio Gonzalez Monroy return -1; 1033d299106eSSergio Gonzalez Monroy 1034d299106eSSergio Gonzalez Monroy return num; 1035d299106eSSergio Gonzalez Monroy } 1036d299106eSSergio Gonzalez Monroy 1037d299106eSSergio Gonzalez Monroy static int32_t 1038d299106eSSergio Gonzalez Monroy parse_config(const char *q_arg) 1039d299106eSSergio Gonzalez Monroy { 1040d299106eSSergio Gonzalez Monroy char s[256]; 1041d299106eSSergio Gonzalez Monroy const char *p, *p0 = q_arg; 1042d299106eSSergio Gonzalez Monroy char *end; 1043d299106eSSergio Gonzalez Monroy enum fieldnames { 1044d299106eSSergio Gonzalez Monroy FLD_PORT = 0, 1045d299106eSSergio Gonzalez Monroy FLD_QUEUE, 1046d299106eSSergio Gonzalez Monroy FLD_LCORE, 1047d299106eSSergio Gonzalez Monroy _NUM_FLD 1048d299106eSSergio Gonzalez Monroy }; 1049906257e9SSergio Gonzalez Monroy unsigned long int_fld[_NUM_FLD]; 1050d299106eSSergio Gonzalez Monroy char *str_fld[_NUM_FLD]; 1051d299106eSSergio Gonzalez Monroy int32_t i; 1052d299106eSSergio Gonzalez Monroy uint32_t size; 10534b978938SSivaprasad Tummala uint32_t max_fld[_NUM_FLD] = { 1054548de909SSivaprasad Tummala RTE_MAX_ETHPORTS, 10554b978938SSivaprasad Tummala RTE_MAX_QUEUES_PER_PORT, 10564b978938SSivaprasad Tummala RTE_MAX_LCORE 10574b978938SSivaprasad Tummala }; 1058d299106eSSergio Gonzalez Monroy 1059d299106eSSergio Gonzalez Monroy nb_lcore_params = 0; 1060d299106eSSergio Gonzalez Monroy 1061d299106eSSergio Gonzalez Monroy while ((p = strchr(p0, '(')) != NULL) { 1062d299106eSSergio Gonzalez Monroy ++p; 1063d299106eSSergio Gonzalez Monroy p0 = strchr(p, ')'); 1064d299106eSSergio Gonzalez Monroy if (p0 == NULL) 1065d299106eSSergio Gonzalez Monroy return -1; 1066d299106eSSergio Gonzalez Monroy 1067d299106eSSergio Gonzalez Monroy size = p0 - p; 1068d299106eSSergio Gonzalez Monroy if (size >= sizeof(s)) 1069d299106eSSergio Gonzalez Monroy return -1; 1070d299106eSSergio Gonzalez Monroy 1071d299106eSSergio Gonzalez Monroy snprintf(s, sizeof(s), "%.*s", size, p); 1072d299106eSSergio Gonzalez Monroy if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != 1073d299106eSSergio Gonzalez Monroy _NUM_FLD) 1074d299106eSSergio Gonzalez Monroy return -1; 1075d299106eSSergio Gonzalez Monroy for (i = 0; i < _NUM_FLD; i++) { 1076d299106eSSergio Gonzalez Monroy errno = 0; 1077d299106eSSergio Gonzalez Monroy int_fld[i] = strtoul(str_fld[i], &end, 0); 1078b23c5bd7SSivaprasad Tummala if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) 1079d299106eSSergio Gonzalez Monroy return -1; 1080d299106eSSergio Gonzalez Monroy } 1081d299106eSSergio Gonzalez Monroy if (nb_lcore_params >= MAX_LCORE_PARAMS) { 1082d299106eSSergio Gonzalez Monroy printf("exceeded max number of lcore params: %hu\n", 1083d299106eSSergio Gonzalez Monroy nb_lcore_params); 1084d299106eSSergio Gonzalez Monroy return -1; 1085d299106eSSergio Gonzalez Monroy } 1086d299106eSSergio Gonzalez Monroy lcore_params_array[nb_lcore_params].port_id = 1087548de909SSivaprasad Tummala (uint16_t)int_fld[FLD_PORT]; 1088d299106eSSergio Gonzalez Monroy lcore_params_array[nb_lcore_params].queue_id = 1089b23c5bd7SSivaprasad Tummala (uint16_t)int_fld[FLD_QUEUE]; 1090d299106eSSergio Gonzalez Monroy lcore_params_array[nb_lcore_params].lcore_id = 10914b978938SSivaprasad Tummala (uint32_t)int_fld[FLD_LCORE]; 1092d299106eSSergio Gonzalez Monroy ++nb_lcore_params; 1093d299106eSSergio Gonzalez Monroy } 1094d299106eSSergio Gonzalez Monroy lcore_params = lcore_params_array; 1095d299106eSSergio Gonzalez Monroy return 0; 1096d299106eSSergio Gonzalez Monroy } 1097d299106eSSergio Gonzalez Monroy 10985a032a71SKonstantin Ananyev static void 10995a032a71SKonstantin Ananyev print_app_sa_prm(const struct app_sa_prm *prm) 11005a032a71SKonstantin Ananyev { 11015a032a71SKonstantin Ananyev printf("librte_ipsec usage: %s\n", 11025a032a71SKonstantin Ananyev (prm->enable == 0) ? "disabled" : "enabled"); 11035a032a71SKonstantin Ananyev 11045a032a71SKonstantin Ananyev printf("replay window size: %u\n", prm->window_size); 11055a032a71SKonstantin Ananyev printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled"); 11065a032a71SKonstantin Ananyev printf("SA flags: %#" PRIx64 "\n", prm->flags); 1107bba1db35SMarcin Smoczynski printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns); 11085a032a71SKonstantin Ananyev } 11095a032a71SKonstantin Ananyev 111065e3a202SLukasz Bartosik static int 111165e3a202SLukasz Bartosik parse_transfer_mode(struct eh_conf *conf, const char *optarg) 111265e3a202SLukasz Bartosik { 111365e3a202SLukasz Bartosik if (!strcmp(CMD_LINE_ARG_POLL, optarg)) 111465e3a202SLukasz Bartosik conf->mode = EH_PKT_TRANSFER_MODE_POLL; 111565e3a202SLukasz Bartosik else if (!strcmp(CMD_LINE_ARG_EVENT, optarg)) 111665e3a202SLukasz Bartosik conf->mode = EH_PKT_TRANSFER_MODE_EVENT; 111765e3a202SLukasz Bartosik else { 111865e3a202SLukasz Bartosik printf("Unsupported packet transfer mode\n"); 111965e3a202SLukasz Bartosik return -EINVAL; 112065e3a202SLukasz Bartosik } 112165e3a202SLukasz Bartosik 112265e3a202SLukasz Bartosik return 0; 112365e3a202SLukasz Bartosik } 112465e3a202SLukasz Bartosik 112565e3a202SLukasz Bartosik static int 112665e3a202SLukasz Bartosik parse_schedule_type(struct eh_conf *conf, const char *optarg) 112765e3a202SLukasz Bartosik { 112865e3a202SLukasz Bartosik struct eventmode_conf *em_conf = NULL; 112965e3a202SLukasz Bartosik 113065e3a202SLukasz Bartosik /* Get eventmode conf */ 113165e3a202SLukasz Bartosik em_conf = conf->mode_params; 113265e3a202SLukasz Bartosik 113365e3a202SLukasz Bartosik if (!strcmp(CMD_LINE_ARG_ORDERED, optarg)) 113465e3a202SLukasz Bartosik em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; 113565e3a202SLukasz Bartosik else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg)) 113665e3a202SLukasz Bartosik em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC; 113765e3a202SLukasz Bartosik else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg)) 113865e3a202SLukasz Bartosik em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL; 113965e3a202SLukasz Bartosik else { 114065e3a202SLukasz Bartosik printf("Unsupported queue schedule type\n"); 114165e3a202SLukasz Bartosik return -EINVAL; 114265e3a202SLukasz Bartosik } 114365e3a202SLukasz Bartosik 114465e3a202SLukasz Bartosik return 0; 114565e3a202SLukasz Bartosik } 114665e3a202SLukasz Bartosik 1147d299106eSSergio Gonzalez Monroy static int32_t 114865e3a202SLukasz Bartosik parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) 1149d299106eSSergio Gonzalez Monroy { 1150bba1db35SMarcin Smoczynski int opt; 1151bba1db35SMarcin Smoczynski int64_t ret; 1152d299106eSSergio Gonzalez Monroy char **argvopt; 1153d299106eSSergio Gonzalez Monroy int32_t option_index; 1154d299106eSSergio Gonzalez Monroy char *prgname = argv[0]; 11550d547ed0SFan Zhang int32_t f_present = 0; 115686738ebeSSrujana Challa struct eventmode_conf *em_conf = NULL; 1157d299106eSSergio Gonzalez Monroy 1158d299106eSSergio Gonzalez Monroy argvopt = argv; 1159d299106eSSergio Gonzalez Monroy 116028008936SRadu Nicolau while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:", 1161d299106eSSergio Gonzalez Monroy lgopts, &option_index)) != EOF) { 1162d299106eSSergio Gonzalez Monroy 1163d299106eSSergio Gonzalez Monroy switch (opt) { 1164d299106eSSergio Gonzalez Monroy case 'p': 1165d299106eSSergio Gonzalez Monroy enabled_port_mask = parse_portmask(optarg); 1166d299106eSSergio Gonzalez Monroy if (enabled_port_mask == 0) { 1167d299106eSSergio Gonzalez Monroy printf("invalid portmask\n"); 1168d299106eSSergio Gonzalez Monroy print_usage(prgname); 1169d299106eSSergio Gonzalez Monroy return -1; 1170d299106eSSergio Gonzalez Monroy } 1171d299106eSSergio Gonzalez Monroy break; 1172d299106eSSergio Gonzalez Monroy case 'P': 1173d299106eSSergio Gonzalez Monroy printf("Promiscuous mode selected\n"); 1174d299106eSSergio Gonzalez Monroy promiscuous_on = 1; 1175d299106eSSergio Gonzalez Monroy break; 1176d299106eSSergio Gonzalez Monroy case 'u': 1177d299106eSSergio Gonzalez Monroy unprotected_port_mask = parse_portmask(optarg); 1178d299106eSSergio Gonzalez Monroy if (unprotected_port_mask == 0) { 1179d299106eSSergio Gonzalez Monroy printf("invalid unprotected portmask\n"); 1180d299106eSSergio Gonzalez Monroy print_usage(prgname); 1181d299106eSSergio Gonzalez Monroy return -1; 1182d299106eSSergio Gonzalez Monroy } 1183d299106eSSergio Gonzalez Monroy break; 11840d547ed0SFan Zhang case 'f': 11850d547ed0SFan Zhang if (f_present == 1) { 11860d547ed0SFan Zhang printf("\"-f\" option present more than " 11870d547ed0SFan Zhang "once!\n"); 11880d547ed0SFan Zhang print_usage(prgname); 11890d547ed0SFan Zhang return -1; 11900d547ed0SFan Zhang } 1191ba66534fSMarcin Smoczynski cfgfile = optarg; 11920d547ed0SFan Zhang f_present = 1; 11930d547ed0SFan Zhang break; 11947338a34eSLukasz Bartosik 11957338a34eSLukasz Bartosik case 's': 11967338a34eSLukasz Bartosik ret = parse_decimal(optarg); 11977338a34eSLukasz Bartosik if (ret < 0) { 11987338a34eSLukasz Bartosik printf("Invalid number of buffers in a pool: " 11997338a34eSLukasz Bartosik "%s\n", optarg); 12007338a34eSLukasz Bartosik print_usage(prgname); 12017338a34eSLukasz Bartosik return -1; 12027338a34eSLukasz Bartosik } 12037338a34eSLukasz Bartosik 12047338a34eSLukasz Bartosik nb_bufs_in_pool = ret; 12057338a34eSLukasz Bartosik break; 12067338a34eSLukasz Bartosik 1207bbabfe6eSRadu Nicolau case 'j': 1208b01d1cd2SKonstantin Ananyev ret = parse_decimal(optarg); 1209b01d1cd2SKonstantin Ananyev if (ret < RTE_MBUF_DEFAULT_BUF_SIZE || 1210b01d1cd2SKonstantin Ananyev ret > UINT16_MAX) { 1211b01d1cd2SKonstantin Ananyev printf("Invalid frame buffer size value: %s\n", 1212b01d1cd2SKonstantin Ananyev optarg); 1213bbabfe6eSRadu Nicolau print_usage(prgname); 1214bbabfe6eSRadu Nicolau return -1; 1215bbabfe6eSRadu Nicolau } 1216b01d1cd2SKonstantin Ananyev frame_buf_size = ret; 1217b01d1cd2SKonstantin Ananyev printf("Custom frame buffer size %u\n", frame_buf_size); 1218bbabfe6eSRadu Nicolau break; 12195a032a71SKonstantin Ananyev case 'l': 12205a032a71SKonstantin Ananyev app_sa_prm.enable = 1; 12215a032a71SKonstantin Ananyev break; 12225a032a71SKonstantin Ananyev case 'w': 12235a032a71SKonstantin Ananyev app_sa_prm.window_size = parse_decimal(optarg); 12245a032a71SKonstantin Ananyev break; 12255a032a71SKonstantin Ananyev case 'e': 12265a032a71SKonstantin Ananyev app_sa_prm.enable_esn = 1; 12275a032a71SKonstantin Ananyev break; 12285a032a71SKonstantin Ananyev case 'a': 12295a032a71SKonstantin Ananyev app_sa_prm.enable = 1; 12305a032a71SKonstantin Ananyev app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM; 12315a032a71SKonstantin Ananyev break; 12322cf67788SVladimir Medvedkin case 'c': 12332cf67788SVladimir Medvedkin ret = parse_decimal(optarg); 12342cf67788SVladimir Medvedkin if (ret < 0) { 12352cf67788SVladimir Medvedkin printf("Invalid SA cache size: %s\n", optarg); 12362cf67788SVladimir Medvedkin print_usage(prgname); 12372cf67788SVladimir Medvedkin return -1; 12382cf67788SVladimir Medvedkin } 12392cf67788SVladimir Medvedkin app_sa_prm.cache_sz = ret; 12402cf67788SVladimir Medvedkin break; 124128008936SRadu Nicolau case 't': 124228008936SRadu Nicolau ret = parse_decimal(optarg); 124328008936SRadu Nicolau if (ret < 0) { 124428008936SRadu Nicolau printf("Invalid interval value: %s\n", optarg); 124528008936SRadu Nicolau print_usage(prgname); 124628008936SRadu Nicolau return -1; 124728008936SRadu Nicolau } 124828008936SRadu Nicolau stats_interval = ret; 124928008936SRadu Nicolau break; 1250c5227350SAnoob Joseph case CMD_LINE_OPT_CONFIG_NUM: 1251c5227350SAnoob Joseph ret = parse_config(optarg); 1252c5227350SAnoob Joseph if (ret) { 1253c5227350SAnoob Joseph printf("Invalid config\n"); 1254d299106eSSergio Gonzalez Monroy print_usage(prgname); 1255d299106eSSergio Gonzalez Monroy return -1; 1256d299106eSSergio Gonzalez Monroy } 1257d299106eSSergio Gonzalez Monroy break; 1258c5227350SAnoob Joseph case CMD_LINE_OPT_SINGLE_SA_NUM: 1259c5227350SAnoob Joseph ret = parse_decimal(optarg); 1260bba1db35SMarcin Smoczynski if (ret == -1 || ret > UINT32_MAX) { 1261c5227350SAnoob Joseph printf("Invalid argument[sa_idx]\n"); 1262c5227350SAnoob Joseph print_usage(prgname); 1263c5227350SAnoob Joseph return -1; 1264c5227350SAnoob Joseph } 1265c5227350SAnoob Joseph 1266c5227350SAnoob Joseph /* else */ 1267c5227350SAnoob Joseph single_sa = 1; 1268c5227350SAnoob Joseph single_sa_idx = ret; 126965e3a202SLukasz Bartosik eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; 12700d76e22dSNithin Dabilpuram wrkr_flags |= SS_F; 1271c5227350SAnoob Joseph printf("Configured with single SA index %u\n", 1272c5227350SAnoob Joseph single_sa_idx); 1273c5227350SAnoob Joseph break; 1274c5227350SAnoob Joseph case CMD_LINE_OPT_CRYPTODEV_MASK_NUM: 1275c5227350SAnoob Joseph ret = parse_portmask(optarg); 1276c5227350SAnoob Joseph if (ret == -1) { 1277c5227350SAnoob Joseph printf("Invalid argument[portmask]\n"); 1278c5227350SAnoob Joseph print_usage(prgname); 1279c5227350SAnoob Joseph return -1; 1280c5227350SAnoob Joseph } 1281c5227350SAnoob Joseph 1282c5227350SAnoob Joseph /* else */ 1283c5227350SAnoob Joseph enabled_cryptodev_mask = ret; 1284c5227350SAnoob Joseph break; 128565e3a202SLukasz Bartosik 128665e3a202SLukasz Bartosik case CMD_LINE_OPT_TRANSFER_MODE_NUM: 128765e3a202SLukasz Bartosik ret = parse_transfer_mode(eh_conf, optarg); 128865e3a202SLukasz Bartosik if (ret < 0) { 128965e3a202SLukasz Bartosik printf("Invalid packet transfer mode\n"); 129065e3a202SLukasz Bartosik print_usage(prgname); 129165e3a202SLukasz Bartosik return -1; 129265e3a202SLukasz Bartosik } 129365e3a202SLukasz Bartosik break; 129465e3a202SLukasz Bartosik 129565e3a202SLukasz Bartosik case CMD_LINE_OPT_SCHEDULE_TYPE_NUM: 129665e3a202SLukasz Bartosik ret = parse_schedule_type(eh_conf, optarg); 129765e3a202SLukasz Bartosik if (ret < 0) { 129865e3a202SLukasz Bartosik printf("Invalid queue schedule type\n"); 129965e3a202SLukasz Bartosik print_usage(prgname); 130065e3a202SLukasz Bartosik return -1; 130165e3a202SLukasz Bartosik } 130265e3a202SLukasz Bartosik break; 130365e3a202SLukasz Bartosik 130403128be4SKonstantin Ananyev case CMD_LINE_OPT_RX_OFFLOAD_NUM: 130503128be4SKonstantin Ananyev ret = parse_mask(optarg, &dev_rx_offload); 130603128be4SKonstantin Ananyev if (ret != 0) { 130703128be4SKonstantin Ananyev printf("Invalid argument for \'%s\': %s\n", 130803128be4SKonstantin Ananyev CMD_LINE_OPT_RX_OFFLOAD, optarg); 130903128be4SKonstantin Ananyev print_usage(prgname); 131003128be4SKonstantin Ananyev return -1; 131103128be4SKonstantin Ananyev } 131203128be4SKonstantin Ananyev break; 131303128be4SKonstantin Ananyev case CMD_LINE_OPT_TX_OFFLOAD_NUM: 131403128be4SKonstantin Ananyev ret = parse_mask(optarg, &dev_tx_offload); 131503128be4SKonstantin Ananyev if (ret != 0) { 131603128be4SKonstantin Ananyev printf("Invalid argument for \'%s\': %s\n", 131703128be4SKonstantin Ananyev CMD_LINE_OPT_TX_OFFLOAD, optarg); 131803128be4SKonstantin Ananyev print_usage(prgname); 131903128be4SKonstantin Ananyev return -1; 132003128be4SKonstantin Ananyev } 132103128be4SKonstantin Ananyev break; 1322b01d1cd2SKonstantin Ananyev case CMD_LINE_OPT_REASSEMBLE_NUM: 1323b01d1cd2SKonstantin Ananyev ret = parse_decimal(optarg); 1324bba1db35SMarcin Smoczynski if (ret < 0 || ret > UINT32_MAX) { 1325b01d1cd2SKonstantin Ananyev printf("Invalid argument for \'%s\': %s\n", 1326b01d1cd2SKonstantin Ananyev CMD_LINE_OPT_REASSEMBLE, optarg); 1327b01d1cd2SKonstantin Ananyev print_usage(prgname); 1328b01d1cd2SKonstantin Ananyev return -1; 1329b01d1cd2SKonstantin Ananyev } 1330b01d1cd2SKonstantin Ananyev frag_tbl_sz = ret; 1331b01d1cd2SKonstantin Ananyev break; 1332b01d1cd2SKonstantin Ananyev case CMD_LINE_OPT_MTU_NUM: 1333b01d1cd2SKonstantin Ananyev ret = parse_decimal(optarg); 1334b01d1cd2SKonstantin Ananyev if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) { 1335b01d1cd2SKonstantin Ananyev printf("Invalid argument for \'%s\': %s\n", 1336b01d1cd2SKonstantin Ananyev CMD_LINE_OPT_MTU, optarg); 1337b01d1cd2SKonstantin Ananyev print_usage(prgname); 1338b01d1cd2SKonstantin Ananyev return -1; 1339b01d1cd2SKonstantin Ananyev } 1340b01d1cd2SKonstantin Ananyev mtu_size = ret; 1341b01d1cd2SKonstantin Ananyev break; 1342bba1db35SMarcin Smoczynski case CMD_LINE_OPT_FRAG_TTL_NUM: 1343bba1db35SMarcin Smoczynski ret = parse_decimal(optarg); 1344bba1db35SMarcin Smoczynski if (ret < 0 || ret > MAX_FRAG_TTL_NS) { 1345bba1db35SMarcin Smoczynski printf("Invalid argument for \'%s\': %s\n", 1346bba1db35SMarcin Smoczynski CMD_LINE_OPT_MTU, optarg); 1347bba1db35SMarcin Smoczynski print_usage(prgname); 1348bba1db35SMarcin Smoczynski return -1; 1349bba1db35SMarcin Smoczynski } 1350bba1db35SMarcin Smoczynski frag_ttl_ns = ret; 1351bba1db35SMarcin Smoczynski break; 135286738ebeSSrujana Challa case CMD_LINE_OPT_EVENT_VECTOR_NUM: 135386738ebeSSrujana Challa em_conf = eh_conf->mode_params; 135486738ebeSSrujana Challa em_conf->ext_params.event_vector = 1; 135586738ebeSSrujana Challa break; 135686738ebeSSrujana Challa case CMD_LINE_OPT_VECTOR_SIZE_NUM: 135786738ebeSSrujana Challa ret = parse_decimal(optarg); 135886738ebeSSrujana Challa 1359a2b445b8SNithin Dabilpuram if (ret > MAX_PKT_BURST_VEC) { 136086738ebeSSrujana Challa printf("Invalid argument for \'%s\': %s\n", 136186738ebeSSrujana Challa CMD_LINE_OPT_VECTOR_SIZE, optarg); 136286738ebeSSrujana Challa print_usage(prgname); 136386738ebeSSrujana Challa return -1; 136486738ebeSSrujana Challa } 136586738ebeSSrujana Challa em_conf = eh_conf->mode_params; 136686738ebeSSrujana Challa em_conf->ext_params.vector_size = ret; 136786738ebeSSrujana Challa break; 136886738ebeSSrujana Challa case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM: 136986738ebeSSrujana Challa ret = parse_decimal(optarg); 137086738ebeSSrujana Challa 137186738ebeSSrujana Challa em_conf = eh_conf->mode_params; 137286738ebeSSrujana Challa em_conf->vector_tmo_ns = ret; 137386738ebeSSrujana Challa break; 137448a39871SNithin Dabilpuram case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM: 137548a39871SNithin Dabilpuram ret = parse_decimal(optarg); 137648a39871SNithin Dabilpuram 137748a39871SNithin Dabilpuram em_conf = eh_conf->mode_params; 137848a39871SNithin Dabilpuram em_conf->vector_pool_sz = ret; 137948a39871SNithin Dabilpuram break; 138048a39871SNithin Dabilpuram case CMD_LINE_OPT_PER_PORT_POOL_NUM: 138148a39871SNithin Dabilpuram per_port_pool = 1; 138248a39871SNithin Dabilpuram break; 13835401bdc1SVolodymyr Fialko case CMD_LINE_OPT_QP_DESC_NB_NUM: 13845401bdc1SVolodymyr Fialko qp_desc_nb = parse_decimal(optarg); 13855401bdc1SVolodymyr Fialko break; 1386d299106eSSergio Gonzalez Monroy default: 1387d299106eSSergio Gonzalez Monroy print_usage(prgname); 1388d299106eSSergio Gonzalez Monroy return -1; 1389d299106eSSergio Gonzalez Monroy } 1390d299106eSSergio Gonzalez Monroy } 1391d299106eSSergio Gonzalez Monroy 13920d547ed0SFan Zhang if (f_present == 0) { 13930d547ed0SFan Zhang printf("Mandatory option \"-f\" not present\n"); 13940d547ed0SFan Zhang return -1; 13950d547ed0SFan Zhang } 13960d547ed0SFan Zhang 1397b01d1cd2SKonstantin Ananyev /* check do we need to enable multi-seg support */ 1398b01d1cd2SKonstantin Ananyev if (multi_seg_required()) { 1399b01d1cd2SKonstantin Ananyev /* legacy mode doesn't support multi-seg */ 1400b01d1cd2SKonstantin Ananyev app_sa_prm.enable = 1; 1401b01d1cd2SKonstantin Ananyev printf("frame buf size: %u, mtu: %u, " 1402b01d1cd2SKonstantin Ananyev "number of reassemble entries: %u\n" 1403b01d1cd2SKonstantin Ananyev "multi-segment support is required\n", 1404b01d1cd2SKonstantin Ananyev frame_buf_size, mtu_size, frag_tbl_sz); 1405b01d1cd2SKonstantin Ananyev } 1406b01d1cd2SKonstantin Ananyev 14075a032a71SKonstantin Ananyev print_app_sa_prm(&app_sa_prm); 14085a032a71SKonstantin Ananyev 1409d299106eSSergio Gonzalez Monroy if (optind >= 0) 1410d299106eSSergio Gonzalez Monroy argv[optind-1] = prgname; 1411d299106eSSergio Gonzalez Monroy 1412d299106eSSergio Gonzalez Monroy ret = optind-1; 14139d5ca532SKeith Wiles optind = 1; /* reset getopt lib */ 1414d299106eSSergio Gonzalez Monroy return ret; 1415d299106eSSergio Gonzalez Monroy } 1416d299106eSSergio Gonzalez Monroy 1417d299106eSSergio Gonzalez Monroy static void 14186d13ea8eSOlivier Matz print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 1419d299106eSSergio Gonzalez Monroy { 142035b2d13fSOlivier Matz char buf[RTE_ETHER_ADDR_FMT_SIZE]; 142135b2d13fSOlivier Matz rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 1422d299106eSSergio Gonzalez Monroy printf("%s%s", name, buf); 1423d299106eSSergio Gonzalez Monroy } 1424d299106eSSergio Gonzalez Monroy 14257622291bSKonstantin Ananyev /* 14267622291bSKonstantin Ananyev * Update destination ethaddr for the port. 14277622291bSKonstantin Ananyev */ 14287622291bSKonstantin Ananyev int 14296d13ea8eSOlivier Matz add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) 14307622291bSKonstantin Ananyev { 14317831bcf0SKonstantin Ananyev if (port >= RTE_DIM(ethaddr_tbl)) 14327622291bSKonstantin Ananyev return -EINVAL; 14337622291bSKonstantin Ananyev 1434fbe58580SRahul Bhansali rte_ether_addr_copy(addr, ðaddr_tbl[port].dst); 1435fbe58580SRahul Bhansali rte_ether_addr_copy(addr, (struct rte_ether_addr *)(val_eth + port)); 14367622291bSKonstantin Ananyev return 0; 14377622291bSKonstantin Ananyev } 14387622291bSKonstantin Ananyev 1439d299106eSSergio Gonzalez Monroy /* Check the link status of all ports in up to 9s, and print them finally */ 1440d299106eSSergio Gonzalez Monroy static void 14418728ccf3SThomas Monjalon check_all_ports_link_status(uint32_t port_mask) 1442d299106eSSergio Gonzalez Monroy { 1443d299106eSSergio Gonzalez Monroy #define CHECK_INTERVAL 100 /* 100ms */ 1444d299106eSSergio Gonzalez Monroy #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 144547523597SZhiyong Yang uint16_t portid; 144647523597SZhiyong Yang uint8_t count, all_ports_up, print_flag = 0; 1447d299106eSSergio Gonzalez Monroy struct rte_eth_link link; 144822e5c73bSIgor Romanov int ret; 1449db4e8135SIvan Dyukov char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; 1450d299106eSSergio Gonzalez Monroy 1451d299106eSSergio Gonzalez Monroy printf("\nChecking link status"); 1452d299106eSSergio Gonzalez Monroy fflush(stdout); 1453d299106eSSergio Gonzalez Monroy for (count = 0; count <= MAX_CHECK_TIME; count++) { 1454d299106eSSergio Gonzalez Monroy all_ports_up = 1; 14558728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) { 1456d299106eSSergio Gonzalez Monroy if ((port_mask & (1 << portid)) == 0) 1457d299106eSSergio Gonzalez Monroy continue; 1458d299106eSSergio Gonzalez Monroy memset(&link, 0, sizeof(link)); 145922e5c73bSIgor Romanov ret = rte_eth_link_get_nowait(portid, &link); 146022e5c73bSIgor Romanov if (ret < 0) { 146122e5c73bSIgor Romanov all_ports_up = 0; 146222e5c73bSIgor Romanov if (print_flag == 1) 146322e5c73bSIgor Romanov printf("Port %u link get failed: %s\n", 146422e5c73bSIgor Romanov portid, rte_strerror(-ret)); 146522e5c73bSIgor Romanov continue; 146622e5c73bSIgor Romanov } 1467d299106eSSergio Gonzalez Monroy /* print link status if flag set */ 1468d299106eSSergio Gonzalez Monroy if (print_flag == 1) { 1469db4e8135SIvan Dyukov rte_eth_link_to_str(link_status_text, 1470db4e8135SIvan Dyukov sizeof(link_status_text), &link); 1471db4e8135SIvan Dyukov printf("Port %d %s\n", portid, 1472db4e8135SIvan Dyukov link_status_text); 1473d299106eSSergio Gonzalez Monroy continue; 1474d299106eSSergio Gonzalez Monroy } 1475d299106eSSergio Gonzalez Monroy /* clear all_ports_up flag if any link down */ 1476295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_DOWN) { 1477d299106eSSergio Gonzalez Monroy all_ports_up = 0; 1478d299106eSSergio Gonzalez Monroy break; 1479d299106eSSergio Gonzalez Monroy } 1480d299106eSSergio Gonzalez Monroy } 1481d299106eSSergio Gonzalez Monroy /* after finally printing all link status, get out */ 1482d299106eSSergio Gonzalez Monroy if (print_flag == 1) 1483d299106eSSergio Gonzalez Monroy break; 1484d299106eSSergio Gonzalez Monroy 1485d299106eSSergio Gonzalez Monroy if (all_ports_up == 0) { 1486d299106eSSergio Gonzalez Monroy printf("."); 1487d299106eSSergio Gonzalez Monroy fflush(stdout); 1488d299106eSSergio Gonzalez Monroy rte_delay_ms(CHECK_INTERVAL); 1489d299106eSSergio Gonzalez Monroy } 1490d299106eSSergio Gonzalez Monroy 1491d299106eSSergio Gonzalez Monroy /* set the print_flag if all ports up or timeout */ 1492d299106eSSergio Gonzalez Monroy if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1493d299106eSSergio Gonzalez Monroy print_flag = 1; 1494d299106eSSergio Gonzalez Monroy printf("done\n"); 1495d299106eSSergio Gonzalez Monroy } 1496d299106eSSergio Gonzalez Monroy } 1497d299106eSSergio Gonzalez Monroy } 1498d299106eSSergio Gonzalez Monroy 1499d299106eSSergio Gonzalez Monroy static int32_t 1500a8ade121SVolodymyr Fialko add_mapping(const char *str, uint16_t cdev_id, 1501d299106eSSergio Gonzalez Monroy uint16_t qp, struct lcore_params *params, 1502d299106eSSergio Gonzalez Monroy struct ipsec_ctx *ipsec_ctx, 1503d299106eSSergio Gonzalez Monroy const struct rte_cryptodev_capabilities *cipher, 150415f81cbfSAviad Yehezkel const struct rte_cryptodev_capabilities *auth, 150515f81cbfSAviad Yehezkel const struct rte_cryptodev_capabilities *aead) 1506d299106eSSergio Gonzalez Monroy { 1507d299106eSSergio Gonzalez Monroy int32_t ret = 0; 1508d299106eSSergio Gonzalez Monroy unsigned long i; 1509d299106eSSergio Gonzalez Monroy struct cdev_key key = { 0 }; 1510d299106eSSergio Gonzalez Monroy 1511d299106eSSergio Gonzalez Monroy key.lcore_id = params->lcore_id; 1512d299106eSSergio Gonzalez Monroy if (cipher) 1513d299106eSSergio Gonzalez Monroy key.cipher_algo = cipher->sym.cipher.algo; 1514d299106eSSergio Gonzalez Monroy if (auth) 1515d299106eSSergio Gonzalez Monroy key.auth_algo = auth->sym.auth.algo; 151615f81cbfSAviad Yehezkel if (aead) 151715f81cbfSAviad Yehezkel key.aead_algo = aead->sym.aead.algo; 1518d299106eSSergio Gonzalez Monroy 1519a8ade121SVolodymyr Fialko ret = rte_hash_lookup(ipsec_ctx->cdev_map, &key); 1520d299106eSSergio Gonzalez Monroy if (ret != -ENOENT) 1521d299106eSSergio Gonzalez Monroy return 0; 1522d299106eSSergio Gonzalez Monroy 1523d299106eSSergio Gonzalez Monroy for (i = 0; i < ipsec_ctx->nb_qps; i++) 1524d299106eSSergio Gonzalez Monroy if (ipsec_ctx->tbl[i].id == cdev_id) 1525d299106eSSergio Gonzalez Monroy break; 1526d299106eSSergio Gonzalez Monroy 1527d299106eSSergio Gonzalez Monroy if (i == ipsec_ctx->nb_qps) { 1528d299106eSSergio Gonzalez Monroy if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) { 1529d299106eSSergio Gonzalez Monroy printf("Maximum number of crypto devices assigned to " 1530d299106eSSergio Gonzalez Monroy "a core, increase MAX_QP_PER_LCORE value\n"); 1531d299106eSSergio Gonzalez Monroy return 0; 1532d299106eSSergio Gonzalez Monroy } 1533d299106eSSergio Gonzalez Monroy ipsec_ctx->tbl[i].id = cdev_id; 1534d299106eSSergio Gonzalez Monroy ipsec_ctx->tbl[i].qp = qp; 1535d299106eSSergio Gonzalez Monroy ipsec_ctx->nb_qps++; 1536d299106eSSergio Gonzalez Monroy printf("%s cdev mapping: lcore %u using cdev %u qp %u " 1537d299106eSSergio Gonzalez Monroy "(cdev_id_qp %lu)\n", str, key.lcore_id, 1538d299106eSSergio Gonzalez Monroy cdev_id, qp, i); 1539d299106eSSergio Gonzalez Monroy } 1540d299106eSSergio Gonzalez Monroy 1541a8ade121SVolodymyr Fialko ret = rte_hash_add_key_data(ipsec_ctx->cdev_map, &key, (void *)i); 1542d299106eSSergio Gonzalez Monroy if (ret < 0) { 15437be78d02SJosh Soref printf("Failed to insert cdev mapping for (lcore %u, " 1544d299106eSSergio Gonzalez Monroy "cdev %u, qp %u), errno %d\n", 1545d299106eSSergio Gonzalez Monroy key.lcore_id, ipsec_ctx->tbl[i].id, 1546d299106eSSergio Gonzalez Monroy ipsec_ctx->tbl[i].qp, ret); 1547d299106eSSergio Gonzalez Monroy return 0; 1548d299106eSSergio Gonzalez Monroy } 1549d299106eSSergio Gonzalez Monroy 1550d299106eSSergio Gonzalez Monroy return 1; 1551d299106eSSergio Gonzalez Monroy } 1552d299106eSSergio Gonzalez Monroy 1553d299106eSSergio Gonzalez Monroy static int32_t 1554253265f8SVolodymyr Fialko add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id, 1555d299106eSSergio Gonzalez Monroy uint16_t qp, struct lcore_params *params) 1556d299106eSSergio Gonzalez Monroy { 1557d299106eSSergio Gonzalez Monroy int32_t ret = 0; 1558d299106eSSergio Gonzalez Monroy const struct rte_cryptodev_capabilities *i, *j; 1559d299106eSSergio Gonzalez Monroy struct lcore_conf *qconf; 1560d299106eSSergio Gonzalez Monroy struct ipsec_ctx *ipsec_ctx; 1561d299106eSSergio Gonzalez Monroy const char *str; 156232d76fd7SAkhil Goyal void *sec_ctx; 156332d76fd7SAkhil Goyal const struct rte_security_capability *sec_cap; 1564d299106eSSergio Gonzalez Monroy 1565d299106eSSergio Gonzalez Monroy qconf = &lcore_conf[params->lcore_id]; 1566d299106eSSergio Gonzalez Monroy 1567a8ade121SVolodymyr Fialko if (!is_unprotected_port(params->port_id)) { 1568d299106eSSergio Gonzalez Monroy ipsec_ctx = &qconf->outbound; 1569a8ade121SVolodymyr Fialko ipsec_ctx->cdev_map = cdev_map_out; 1570d299106eSSergio Gonzalez Monroy str = "Outbound"; 1571d299106eSSergio Gonzalez Monroy } else { 1572d299106eSSergio Gonzalez Monroy ipsec_ctx = &qconf->inbound; 1573a8ade121SVolodymyr Fialko ipsec_ctx->cdev_map = cdev_map_in; 1574d299106eSSergio Gonzalez Monroy str = "Inbound"; 1575d299106eSSergio Gonzalez Monroy } 1576d299106eSSergio Gonzalez Monroy 15777be78d02SJosh Soref /* Required cryptodevs with operation chaining */ 157832d76fd7SAkhil Goyal if (!(dev_info->feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING) && 157932d76fd7SAkhil Goyal !(dev_info->feature_flags & RTE_CRYPTODEV_FF_SECURITY)) 1580d299106eSSergio Gonzalez Monroy return ret; 1581d299106eSSergio Gonzalez Monroy 1582d299106eSSergio Gonzalez Monroy for (i = dev_info->capabilities; 1583d299106eSSergio Gonzalez Monroy i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { 1584d299106eSSergio Gonzalez Monroy if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 1585d299106eSSergio Gonzalez Monroy continue; 1586d299106eSSergio Gonzalez Monroy 158715f81cbfSAviad Yehezkel if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { 1588a8ade121SVolodymyr Fialko ret |= add_mapping(str, cdev_id, qp, params, 158915f81cbfSAviad Yehezkel ipsec_ctx, NULL, NULL, i); 159015f81cbfSAviad Yehezkel continue; 159115f81cbfSAviad Yehezkel } 159215f81cbfSAviad Yehezkel 1593d299106eSSergio Gonzalez Monroy if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) 1594d299106eSSergio Gonzalez Monroy continue; 1595d299106eSSergio Gonzalez Monroy 1596d299106eSSergio Gonzalez Monroy for (j = dev_info->capabilities; 1597d299106eSSergio Gonzalez Monroy j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { 1598d299106eSSergio Gonzalez Monroy if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 1599d299106eSSergio Gonzalez Monroy continue; 1600d299106eSSergio Gonzalez Monroy 1601d299106eSSergio Gonzalez Monroy if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) 1602d299106eSSergio Gonzalez Monroy continue; 1603d299106eSSergio Gonzalez Monroy 1604a8ade121SVolodymyr Fialko ret |= add_mapping(str, cdev_id, qp, params, 160515f81cbfSAviad Yehezkel ipsec_ctx, i, j, NULL); 1606d299106eSSergio Gonzalez Monroy } 1607d299106eSSergio Gonzalez Monroy } 1608d299106eSSergio Gonzalez Monroy 160932d76fd7SAkhil Goyal sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id); 161032d76fd7SAkhil Goyal if (sec_ctx == NULL) 161132d76fd7SAkhil Goyal return ret; 161232d76fd7SAkhil Goyal 161332d76fd7SAkhil Goyal sec_cap = rte_security_capabilities_get(sec_ctx); 161432d76fd7SAkhil Goyal if (sec_cap == NULL) 161532d76fd7SAkhil Goyal return ret; 161632d76fd7SAkhil Goyal 161732d76fd7SAkhil Goyal for (i = sec_cap->crypto_capabilities; 161832d76fd7SAkhil Goyal i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { 161932d76fd7SAkhil Goyal if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 162032d76fd7SAkhil Goyal continue; 162132d76fd7SAkhil Goyal 162232d76fd7SAkhil Goyal if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { 162332d76fd7SAkhil Goyal ret |= add_mapping(str, cdev_id, qp, params, 162432d76fd7SAkhil Goyal ipsec_ctx, NULL, NULL, i); 162532d76fd7SAkhil Goyal continue; 162632d76fd7SAkhil Goyal } 162732d76fd7SAkhil Goyal 162832d76fd7SAkhil Goyal if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) 162932d76fd7SAkhil Goyal continue; 163032d76fd7SAkhil Goyal 163132d76fd7SAkhil Goyal for (j = sec_cap->crypto_capabilities; 163232d76fd7SAkhil Goyal j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { 163332d76fd7SAkhil Goyal if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 163432d76fd7SAkhil Goyal continue; 163532d76fd7SAkhil Goyal 163632d76fd7SAkhil Goyal if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) 163732d76fd7SAkhil Goyal continue; 163832d76fd7SAkhil Goyal 163932d76fd7SAkhil Goyal ret |= add_mapping(str, cdev_id, qp, params, 164032d76fd7SAkhil Goyal ipsec_ctx, i, j, NULL); 164132d76fd7SAkhil Goyal } 164232d76fd7SAkhil Goyal } 164332d76fd7SAkhil Goyal 1644d299106eSSergio Gonzalez Monroy return ret; 1645d299106eSSergio Gonzalez Monroy } 1646d299106eSSergio Gonzalez Monroy 1647253265f8SVolodymyr Fialko static uint16_t 1648253265f8SVolodymyr Fialko map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id, 1649253265f8SVolodymyr Fialko const struct rte_cryptodev_info *cdev_info, 1650253265f8SVolodymyr Fialko uint16_t *last_used_lcore_id) 1651253265f8SVolodymyr Fialko { 1652253265f8SVolodymyr Fialko uint16_t nb_qp = 0, i = 0, max_nb_qps; 1653253265f8SVolodymyr Fialko 1654253265f8SVolodymyr Fialko /* For event lookaside mode all sessions are bound to single qp. 1655253265f8SVolodymyr Fialko * It's enough to bind one core, since all cores will share same qp 1656253265f8SVolodymyr Fialko * Event inline mode do not use this functionality. 1657253265f8SVolodymyr Fialko */ 1658253265f8SVolodymyr Fialko if (mode == EH_PKT_TRANSFER_MODE_EVENT) { 1659253265f8SVolodymyr Fialko add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]); 1660253265f8SVolodymyr Fialko return 1; 1661253265f8SVolodymyr Fialko } 1662253265f8SVolodymyr Fialko 1663253265f8SVolodymyr Fialko /* Check if there are enough queue pairs for all configured cores */ 1664253265f8SVolodymyr Fialko max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs); 1665253265f8SVolodymyr Fialko 1666253265f8SVolodymyr Fialko while (nb_qp < max_nb_qps && i < nb_lcore_params) { 1667253265f8SVolodymyr Fialko if (add_cdev_mapping(cdev_info, cdev_id, nb_qp, 1668253265f8SVolodymyr Fialko &lcore_params[*last_used_lcore_id])) 1669253265f8SVolodymyr Fialko nb_qp++; 1670253265f8SVolodymyr Fialko (*last_used_lcore_id)++; 1671253265f8SVolodymyr Fialko *last_used_lcore_id %= nb_lcore_params; 1672253265f8SVolodymyr Fialko i++; 1673253265f8SVolodymyr Fialko } 1674253265f8SVolodymyr Fialko 1675253265f8SVolodymyr Fialko return nb_qp; 1676253265f8SVolodymyr Fialko } 1677253265f8SVolodymyr Fialko 16782c68fe79SAkhil Goyal /* Check if the device is enabled by cryptodev_mask */ 16792c68fe79SAkhil Goyal static int 16802c68fe79SAkhil Goyal check_cryptodev_mask(uint8_t cdev_id) 16812c68fe79SAkhil Goyal { 16822c68fe79SAkhil Goyal if (enabled_cryptodev_mask & (1 << cdev_id)) 16832c68fe79SAkhil Goyal return 0; 16842c68fe79SAkhil Goyal 16852c68fe79SAkhil Goyal return -1; 16862c68fe79SAkhil Goyal } 16872c68fe79SAkhil Goyal 16887338a34eSLukasz Bartosik static uint16_t 1689253265f8SVolodymyr Fialko cryptodevs_init(enum eh_pkt_transfer_mode mode) 1690d299106eSSergio Gonzalez Monroy { 1691253265f8SVolodymyr Fialko struct rte_hash_parameters params = { 0 }; 1692d299106eSSergio Gonzalez Monroy struct rte_cryptodev_config dev_conf; 1693d299106eSSergio Gonzalez Monroy struct rte_cryptodev_qp_conf qp_conf; 1694253265f8SVolodymyr Fialko uint16_t idx, qp, total_nb_qps; 16953a690d5aSBernard Iremonger int16_t cdev_id; 1696d299106eSSergio Gonzalez Monroy 1697b01d1cd2SKonstantin Ananyev const uint64_t mseg_flag = multi_seg_required() ? 1698b01d1cd2SKonstantin Ananyev RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0; 1699b01d1cd2SKonstantin Ananyev 1700d299106eSSergio Gonzalez Monroy params.entries = CDEV_MAP_ENTRIES; 1701d299106eSSergio Gonzalez Monroy params.key_len = sizeof(struct cdev_key); 1702d299106eSSergio Gonzalez Monroy params.hash_func = rte_jhash; 1703d299106eSSergio Gonzalez Monroy params.hash_func_init_val = 0; 1704d299106eSSergio Gonzalez Monroy params.socket_id = rte_socket_id(); 1705d299106eSSergio Gonzalez Monroy 1706d299106eSSergio Gonzalez Monroy params.name = "cdev_map_in"; 1707d299106eSSergio Gonzalez Monroy cdev_map_in = rte_hash_create(¶ms); 1708d299106eSSergio Gonzalez Monroy if (cdev_map_in == NULL) 1709d299106eSSergio Gonzalez Monroy rte_panic("Failed to create cdev_map hash table, errno = %d\n", 1710d299106eSSergio Gonzalez Monroy rte_errno); 1711d299106eSSergio Gonzalez Monroy 1712d299106eSSergio Gonzalez Monroy params.name = "cdev_map_out"; 1713d299106eSSergio Gonzalez Monroy cdev_map_out = rte_hash_create(¶ms); 1714d299106eSSergio Gonzalez Monroy if (cdev_map_out == NULL) 1715d299106eSSergio Gonzalez Monroy rte_panic("Failed to create cdev_map hash table, errno = %d\n", 1716d299106eSSergio Gonzalez Monroy rte_errno); 1717d299106eSSergio Gonzalez Monroy 1718d299106eSSergio Gonzalez Monroy printf("lcore/cryptodev/qp mappings:\n"); 1719d299106eSSergio Gonzalez Monroy 1720d299106eSSergio Gonzalez Monroy idx = 0; 17217338a34eSLukasz Bartosik total_nb_qps = 0; 17222c68fe79SAkhil Goyal for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 1723d299106eSSergio Gonzalez Monroy struct rte_cryptodev_info cdev_info; 1724d299106eSSergio Gonzalez Monroy 17252c68fe79SAkhil Goyal if (check_cryptodev_mask((uint8_t)cdev_id)) 17262c68fe79SAkhil Goyal continue; 17272c68fe79SAkhil Goyal 1728d299106eSSergio Gonzalez Monroy rte_cryptodev_info_get(cdev_id, &cdev_info); 1729d299106eSSergio Gonzalez Monroy 1730b01d1cd2SKonstantin Ananyev if ((mseg_flag & cdev_info.feature_flags) != mseg_flag) 1731b01d1cd2SKonstantin Ananyev rte_exit(EXIT_FAILURE, 1732b01d1cd2SKonstantin Ananyev "Device %hd does not support \'%s\' feature\n", 1733b01d1cd2SKonstantin Ananyev cdev_id, 1734b01d1cd2SKonstantin Ananyev rte_cryptodev_get_feature_name(mseg_flag)); 1735b01d1cd2SKonstantin Ananyev 1736d299106eSSergio Gonzalez Monroy 1737253265f8SVolodymyr Fialko qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx); 1738d299106eSSergio Gonzalez Monroy if (qp == 0) 1739d299106eSSergio Gonzalez Monroy continue; 1740d299106eSSergio Gonzalez Monroy 17417338a34eSLukasz Bartosik total_nb_qps += qp; 1742d299106eSSergio Gonzalez Monroy dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); 174311fa5850SKai Ji /* Use the first socket if SOCKET_ID_ANY is returned. */ 174411fa5850SKai Ji if (dev_conf.socket_id == SOCKET_ID_ANY) 174511fa5850SKai Ji dev_conf.socket_id = 0; 1746d299106eSSergio Gonzalez Monroy dev_conf.nb_queue_pairs = qp; 1747c9030ae3SAnoob Joseph dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; 1748d299106eSSergio Gonzalez Monroy 17490f95a8c7SPablo de Lara uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; 175004fa1906SVladimir Medvedkin if (dev_max_sess != 0 && 175104fa1906SVladimir Medvedkin dev_max_sess < get_nb_crypto_sessions()) 17520f95a8c7SPablo de Lara rte_exit(EXIT_FAILURE, 17530f95a8c7SPablo de Lara "Device does not support at least %u " 175404fa1906SVladimir Medvedkin "sessions", get_nb_crypto_sessions()); 17550f95a8c7SPablo de Lara 1756f7db6f82SPablo de Lara if (rte_cryptodev_configure(cdev_id, &dev_conf)) 17572c59bd32SSlawomir Mrozowicz rte_panic("Failed to initialize cryptodev %u\n", 1758d299106eSSergio Gonzalez Monroy cdev_id); 1759d299106eSSergio Gonzalez Monroy 17605401bdc1SVolodymyr Fialko qp_conf.nb_descriptors = qp_desc_nb; 1761725d2a7fSFan Zhang qp_conf.mp_session = 1762725d2a7fSFan Zhang socket_ctx[dev_conf.socket_id].session_pool; 1763d299106eSSergio Gonzalez Monroy for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) 1764d299106eSSergio Gonzalez Monroy if (rte_cryptodev_queue_pair_setup(cdev_id, qp, 1765725d2a7fSFan Zhang &qp_conf, dev_conf.socket_id)) 1766d299106eSSergio Gonzalez Monroy rte_panic("Failed to setup queue %u for " 1767d299106eSSergio Gonzalez Monroy "cdev_id %u\n", 0, cdev_id); 1768b86eeb2aSHemant Agrawal 1769b86eeb2aSHemant Agrawal if (rte_cryptodev_start(cdev_id)) 1770b86eeb2aSHemant Agrawal rte_panic("Failed to start cryptodev %u\n", 1771b86eeb2aSHemant Agrawal cdev_id); 1772d299106eSSergio Gonzalez Monroy } 1773d299106eSSergio Gonzalez Monroy 1774d299106eSSergio Gonzalez Monroy printf("\n"); 1775d299106eSSergio Gonzalez Monroy 17767338a34eSLukasz Bartosik return total_nb_qps; 1777d299106eSSergio Gonzalez Monroy } 1778d299106eSSergio Gonzalez Monroy 1779d04bb1c5SNithin Dabilpuram static int 1780d04bb1c5SNithin Dabilpuram check_ptype(int portid) 1781d04bb1c5SNithin Dabilpuram { 1782d04bb1c5SNithin Dabilpuram int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0; 1783d04bb1c5SNithin Dabilpuram int i, nb_ptypes; 1784d04bb1c5SNithin Dabilpuram uint32_t mask; 1785d04bb1c5SNithin Dabilpuram 1786d04bb1c5SNithin Dabilpuram mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | 1787d04bb1c5SNithin Dabilpuram RTE_PTYPE_TUNNEL_MASK); 1788d04bb1c5SNithin Dabilpuram 1789d04bb1c5SNithin Dabilpuram nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0); 1790d04bb1c5SNithin Dabilpuram if (nb_ptypes <= 0) 1791d04bb1c5SNithin Dabilpuram return 0; 1792d04bb1c5SNithin Dabilpuram 1793d04bb1c5SNithin Dabilpuram uint32_t ptypes[nb_ptypes]; 1794d04bb1c5SNithin Dabilpuram 1795d04bb1c5SNithin Dabilpuram nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes); 1796d04bb1c5SNithin Dabilpuram for (i = 0; i < nb_ptypes; ++i) { 1797d04bb1c5SNithin Dabilpuram if (RTE_ETH_IS_IPV4_HDR(ptypes[i])) 1798d04bb1c5SNithin Dabilpuram l3_ipv4 = 1; 1799d04bb1c5SNithin Dabilpuram if (RTE_ETH_IS_IPV6_HDR(ptypes[i])) 1800d04bb1c5SNithin Dabilpuram l3_ipv6 = 1; 1801d04bb1c5SNithin Dabilpuram if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) 1802d04bb1c5SNithin Dabilpuram tunnel_esp = 1; 1803d04bb1c5SNithin Dabilpuram if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 1804d04bb1c5SNithin Dabilpuram l4_udp = 1; 1805d04bb1c5SNithin Dabilpuram } 1806d04bb1c5SNithin Dabilpuram 1807d04bb1c5SNithin Dabilpuram if (l3_ipv4 == 0) 1808d04bb1c5SNithin Dabilpuram printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid); 1809d04bb1c5SNithin Dabilpuram 1810d04bb1c5SNithin Dabilpuram if (l3_ipv6 == 0) 1811d04bb1c5SNithin Dabilpuram printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid); 1812d04bb1c5SNithin Dabilpuram 1813d04bb1c5SNithin Dabilpuram if (l4_udp == 0) 1814d04bb1c5SNithin Dabilpuram printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid); 1815d04bb1c5SNithin Dabilpuram 1816d04bb1c5SNithin Dabilpuram if (tunnel_esp == 0) 1817d04bb1c5SNithin Dabilpuram printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid); 1818d04bb1c5SNithin Dabilpuram 1819d04bb1c5SNithin Dabilpuram if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp) 1820d04bb1c5SNithin Dabilpuram return 1; 1821d04bb1c5SNithin Dabilpuram 1822d04bb1c5SNithin Dabilpuram return 0; 1823d04bb1c5SNithin Dabilpuram 1824d04bb1c5SNithin Dabilpuram } 1825d04bb1c5SNithin Dabilpuram 1826d04bb1c5SNithin Dabilpuram static inline void 1827d04bb1c5SNithin Dabilpuram parse_ptype(struct rte_mbuf *m) 1828d04bb1c5SNithin Dabilpuram { 1829d04bb1c5SNithin Dabilpuram uint32_t packet_type = RTE_PTYPE_UNKNOWN; 1830d04bb1c5SNithin Dabilpuram const struct rte_ipv4_hdr *iph4; 1831d04bb1c5SNithin Dabilpuram const struct rte_ipv6_hdr *iph6; 1832d04bb1c5SNithin Dabilpuram const struct rte_ether_hdr *eth; 1833d04bb1c5SNithin Dabilpuram const struct rte_udp_hdr *udp; 1834d04bb1c5SNithin Dabilpuram uint16_t nat_port, ether_type; 1835d04bb1c5SNithin Dabilpuram int next_proto = 0; 1836d04bb1c5SNithin Dabilpuram size_t ext_len = 0; 1837d04bb1c5SNithin Dabilpuram const uint8_t *p; 1838d04bb1c5SNithin Dabilpuram uint32_t l3len; 1839d04bb1c5SNithin Dabilpuram 1840d04bb1c5SNithin Dabilpuram eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 1841d04bb1c5SNithin Dabilpuram ether_type = eth->ether_type; 1842d04bb1c5SNithin Dabilpuram 1843d04bb1c5SNithin Dabilpuram if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 1844d04bb1c5SNithin Dabilpuram iph4 = (const struct rte_ipv4_hdr *)(eth + 1); 1845d04bb1c5SNithin Dabilpuram l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) * 1846d04bb1c5SNithin Dabilpuram RTE_IPV4_IHL_MULTIPLIER); 1847d04bb1c5SNithin Dabilpuram 1848d04bb1c5SNithin Dabilpuram if (l3len == sizeof(struct rte_ipv4_hdr)) 1849d04bb1c5SNithin Dabilpuram packet_type |= RTE_PTYPE_L3_IPV4; 1850d04bb1c5SNithin Dabilpuram else 1851d04bb1c5SNithin Dabilpuram packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 1852d04bb1c5SNithin Dabilpuram 1853d04bb1c5SNithin Dabilpuram next_proto = iph4->next_proto_id; 1854d04bb1c5SNithin Dabilpuram p = (const uint8_t *)iph4; 1855d04bb1c5SNithin Dabilpuram } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 1856d04bb1c5SNithin Dabilpuram iph6 = (const struct rte_ipv6_hdr *)(eth + 1); 1857d04bb1c5SNithin Dabilpuram l3len = sizeof(struct ip6_hdr); 1858d04bb1c5SNithin Dabilpuram 1859d04bb1c5SNithin Dabilpuram /* determine l3 header size up to ESP extension */ 1860d04bb1c5SNithin Dabilpuram next_proto = iph6->proto; 1861d04bb1c5SNithin Dabilpuram p = (const uint8_t *)iph6; 1862d04bb1c5SNithin Dabilpuram while (next_proto != IPPROTO_ESP && l3len < m->data_len && 1863d04bb1c5SNithin Dabilpuram (next_proto = rte_ipv6_get_next_ext(p + l3len, 1864d04bb1c5SNithin Dabilpuram next_proto, &ext_len)) >= 0) 1865d04bb1c5SNithin Dabilpuram l3len += ext_len; 1866d04bb1c5SNithin Dabilpuram 1867d04bb1c5SNithin Dabilpuram /* Skip IPv6 header exceeds first segment length */ 1868d04bb1c5SNithin Dabilpuram if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len)) 1869d04bb1c5SNithin Dabilpuram goto exit; 1870d04bb1c5SNithin Dabilpuram 1871d04bb1c5SNithin Dabilpuram if (l3len == sizeof(struct ip6_hdr)) 1872d04bb1c5SNithin Dabilpuram packet_type |= RTE_PTYPE_L3_IPV6; 1873d04bb1c5SNithin Dabilpuram else 1874d04bb1c5SNithin Dabilpuram packet_type |= RTE_PTYPE_L3_IPV6_EXT; 1875d04bb1c5SNithin Dabilpuram } 1876d04bb1c5SNithin Dabilpuram 1877d04bb1c5SNithin Dabilpuram switch (next_proto) { 1878d04bb1c5SNithin Dabilpuram case IPPROTO_ESP: 1879d04bb1c5SNithin Dabilpuram packet_type |= RTE_PTYPE_TUNNEL_ESP; 1880d04bb1c5SNithin Dabilpuram break; 1881d04bb1c5SNithin Dabilpuram case IPPROTO_UDP: 1882d04bb1c5SNithin Dabilpuram if (app_sa_prm.udp_encap == 1) { 1883d04bb1c5SNithin Dabilpuram udp = (const struct rte_udp_hdr *)(p + l3len); 1884d04bb1c5SNithin Dabilpuram nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); 1885d04bb1c5SNithin Dabilpuram if (udp->src_port == nat_port || 1886d04bb1c5SNithin Dabilpuram udp->dst_port == nat_port) 1887d04bb1c5SNithin Dabilpuram packet_type |= 1888d04bb1c5SNithin Dabilpuram MBUF_PTYPE_TUNNEL_ESP_IN_UDP; 1889d04bb1c5SNithin Dabilpuram } 1890d04bb1c5SNithin Dabilpuram break; 1891d04bb1c5SNithin Dabilpuram default: 1892d04bb1c5SNithin Dabilpuram break; 1893d04bb1c5SNithin Dabilpuram } 1894d04bb1c5SNithin Dabilpuram exit: 189582df99aaSRadu Nicolau m->packet_type |= packet_type; 1896d04bb1c5SNithin Dabilpuram } 1897d04bb1c5SNithin Dabilpuram 1898d04bb1c5SNithin Dabilpuram static uint16_t 1899d04bb1c5SNithin Dabilpuram parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused, 1900d04bb1c5SNithin Dabilpuram struct rte_mbuf *pkts[], uint16_t nb_pkts, 1901d04bb1c5SNithin Dabilpuram uint16_t max_pkts __rte_unused, 1902d04bb1c5SNithin Dabilpuram void *user_param __rte_unused) 1903d04bb1c5SNithin Dabilpuram { 1904d04bb1c5SNithin Dabilpuram uint32_t i; 1905d04bb1c5SNithin Dabilpuram 1906d04bb1c5SNithin Dabilpuram if (unlikely(nb_pkts == 0)) 1907d04bb1c5SNithin Dabilpuram return nb_pkts; 1908d04bb1c5SNithin Dabilpuram 1909d04bb1c5SNithin Dabilpuram rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *)); 1910d04bb1c5SNithin Dabilpuram for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) { 1911d04bb1c5SNithin Dabilpuram rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], 1912d04bb1c5SNithin Dabilpuram struct ether_hdr *)); 1913d04bb1c5SNithin Dabilpuram parse_ptype(pkts[i]); 1914d04bb1c5SNithin Dabilpuram } 1915d04bb1c5SNithin Dabilpuram parse_ptype(pkts[i]); 1916d04bb1c5SNithin Dabilpuram 1917d04bb1c5SNithin Dabilpuram return nb_pkts; 1918d04bb1c5SNithin Dabilpuram } 1919d04bb1c5SNithin Dabilpuram 1920d299106eSSergio Gonzalez Monroy static void 1921d8d51d4fSRahul Bhansali port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, 1922d8d51d4fSRahul Bhansali uint8_t hw_reassembly) 1923d299106eSSergio Gonzalez Monroy { 1924d299106eSSergio Gonzalez Monroy struct rte_eth_dev_info dev_info; 1925d299106eSSergio Gonzalez Monroy struct rte_eth_txconf *txconf; 1926d299106eSSergio Gonzalez Monroy uint16_t nb_tx_queue, nb_rx_queue; 19274b978938SSivaprasad Tummala uint16_t tx_queueid, rx_queueid, queue; 19284b978938SSivaprasad Tummala uint32_t lcore_id; 1929d299106eSSergio Gonzalez Monroy int32_t ret, socket_id; 1930d299106eSSergio Gonzalez Monroy struct lcore_conf *qconf; 19316d13ea8eSOlivier Matz struct rte_ether_addr ethaddr; 19326162b8b3SShahaf Shuler struct rte_eth_conf local_port_conf = port_conf; 1933d8d51d4fSRahul Bhansali struct rte_eth_ip_reassembly_params reass_capa = {0}; 1934d04bb1c5SNithin Dabilpuram int ptype_supported; 1935d299106eSSergio Gonzalez Monroy 193603ad0e5cSIvan Ilchenko ret = rte_eth_dev_info_get(portid, &dev_info); 193703ad0e5cSIvan Ilchenko if (ret != 0) 193803ad0e5cSIvan Ilchenko rte_exit(EXIT_FAILURE, 193903ad0e5cSIvan Ilchenko "Error during getting device (port %u) info: %s\n", 194003ad0e5cSIvan Ilchenko portid, strerror(-ret)); 1941d299106eSSergio Gonzalez Monroy 19427be78d02SJosh Soref /* limit allowed HW offloads, as user requested */ 194303128be4SKonstantin Ananyev dev_info.rx_offload_capa &= dev_rx_offload; 194403128be4SKonstantin Ananyev dev_info.tx_offload_capa &= dev_tx_offload; 194503128be4SKonstantin Ananyev 1946d299106eSSergio Gonzalez Monroy printf("Configuring device port %u:\n", portid); 1947d299106eSSergio Gonzalez Monroy 194870febdcfSIgor Romanov ret = rte_eth_macaddr_get(portid, ðaddr); 194970febdcfSIgor Romanov if (ret != 0) 195070febdcfSIgor Romanov rte_exit(EXIT_FAILURE, 195170febdcfSIgor Romanov "Error getting MAC address (port %u): %s\n", 195270febdcfSIgor Romanov portid, rte_strerror(-ret)); 195370febdcfSIgor Romanov 1954fbe58580SRahul Bhansali rte_ether_addr_copy(ðaddr, ðaddr_tbl[portid].src); 19556eb3ba03SRahul Bhansali 1956fbe58580SRahul Bhansali rte_ether_addr_copy(ðaddr_tbl[portid].dst, 19576eb3ba03SRahul Bhansali (struct rte_ether_addr *)(val_eth + portid)); 1958fbe58580SRahul Bhansali 1959fbe58580SRahul Bhansali rte_ether_addr_copy(ðaddr_tbl[portid].src, 19606eb3ba03SRahul Bhansali (struct rte_ether_addr *)(val_eth + portid) + 1); 19616eb3ba03SRahul Bhansali 1962d299106eSSergio Gonzalez Monroy print_ethaddr("Address: ", ðaddr); 1963d299106eSSergio Gonzalez Monroy printf("\n"); 1964d299106eSSergio Gonzalez Monroy 1965d299106eSSergio Gonzalez Monroy nb_rx_queue = get_port_nb_rx_queues(portid); 1966d299106eSSergio Gonzalez Monroy nb_tx_queue = nb_lcores; 1967d299106eSSergio Gonzalez Monroy 1968d299106eSSergio Gonzalez Monroy if (nb_rx_queue > dev_info.max_rx_queues) 1969d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Error: queue %u not available " 1970d299106eSSergio Gonzalez Monroy "(max rx queue is %u)\n", 1971d299106eSSergio Gonzalez Monroy nb_rx_queue, dev_info.max_rx_queues); 1972d299106eSSergio Gonzalez Monroy 1973d299106eSSergio Gonzalez Monroy if (nb_tx_queue > dev_info.max_tx_queues) 1974d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Error: queue %u not available " 1975d299106eSSergio Gonzalez Monroy "(max tx queue is %u)\n", 1976d299106eSSergio Gonzalez Monroy nb_tx_queue, dev_info.max_tx_queues); 1977d299106eSSergio Gonzalez Monroy 1978d299106eSSergio Gonzalez Monroy printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", 1979d299106eSSergio Gonzalez Monroy nb_rx_queue, nb_tx_queue); 1980d299106eSSergio Gonzalez Monroy 19811bb4a528SFerruh Yigit local_port_conf.rxmode.mtu = mtu_size; 1982b01d1cd2SKonstantin Ananyev 1983b01d1cd2SKonstantin Ananyev if (multi_seg_required()) { 1984295968d1SFerruh Yigit local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 1985295968d1SFerruh Yigit local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 1986bbabfe6eSRadu Nicolau } 1987bbabfe6eSRadu Nicolau 198803128be4SKonstantin Ananyev local_port_conf.rxmode.offloads |= req_rx_offloads; 198903128be4SKonstantin Ananyev local_port_conf.txmode.offloads |= req_tx_offloads; 199003128be4SKonstantin Ananyev 199103128be4SKonstantin Ananyev /* Check that all required capabilities are supported */ 199203128be4SKonstantin Ananyev if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) != 199303128be4SKonstantin Ananyev local_port_conf.rxmode.offloads) 199403128be4SKonstantin Ananyev rte_exit(EXIT_FAILURE, 199503128be4SKonstantin Ananyev "Error: port %u required RX offloads: 0x%" PRIx64 19967be78d02SJosh Soref ", available RX offloads: 0x%" PRIx64 "\n", 199703128be4SKonstantin Ananyev portid, local_port_conf.rxmode.offloads, 199803128be4SKonstantin Ananyev dev_info.rx_offload_capa); 199903128be4SKonstantin Ananyev 200003128be4SKonstantin Ananyev if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) != 200103128be4SKonstantin Ananyev local_port_conf.txmode.offloads) 200203128be4SKonstantin Ananyev rte_exit(EXIT_FAILURE, 200303128be4SKonstantin Ananyev "Error: port %u required TX offloads: 0x%" PRIx64 20047be78d02SJosh Soref ", available TX offloads: 0x%" PRIx64 "\n", 200503128be4SKonstantin Ananyev portid, local_port_conf.txmode.offloads, 200603128be4SKonstantin Ananyev dev_info.tx_offload_capa); 200703128be4SKonstantin Ananyev 2008295968d1SFerruh Yigit if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 20096162b8b3SShahaf Shuler local_port_conf.txmode.offloads |= 2010295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 20114f5701f2SFerruh Yigit 20127be78d02SJosh Soref printf("port %u configuring rx_offloads=0x%" PRIx64 201303128be4SKonstantin Ananyev ", tx_offloads=0x%" PRIx64 "\n", 201403128be4SKonstantin Ananyev portid, local_port_conf.rxmode.offloads, 201503128be4SKonstantin Ananyev local_port_conf.txmode.offloads); 201603128be4SKonstantin Ananyev 20174f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 20184f5701f2SFerruh Yigit dev_info.flow_type_rss_offloads; 20194f5701f2SFerruh Yigit if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 20204f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf) { 20214f5701f2SFerruh Yigit printf("Port %u modified RSS hash function based on hardware support," 20224f5701f2SFerruh Yigit "requested:%#"PRIx64" configured:%#"PRIx64"\n", 20234f5701f2SFerruh Yigit portid, 20244f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf, 20254f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf); 20264f5701f2SFerruh Yigit } 20274f5701f2SFerruh Yigit 2028d299106eSSergio Gonzalez Monroy ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, 20296162b8b3SShahaf Shuler &local_port_conf); 2030d299106eSSergio Gonzalez Monroy if (ret < 0) 2031d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Cannot configure device: " 2032d299106eSSergio Gonzalez Monroy "err=%d, port=%d\n", ret, portid); 2033d299106eSSergio Gonzalez Monroy 203460efb44fSRoman Zhukov ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); 203560efb44fSRoman Zhukov if (ret < 0) 203660efb44fSRoman Zhukov rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " 203760efb44fSRoman Zhukov "err=%d, port=%d\n", ret, portid); 203860efb44fSRoman Zhukov 2039d04bb1c5SNithin Dabilpuram /* Check if required ptypes are supported */ 2040d04bb1c5SNithin Dabilpuram ptype_supported = check_ptype(portid); 2041d04bb1c5SNithin Dabilpuram if (!ptype_supported) 2042d04bb1c5SNithin Dabilpuram printf("Port %d: softly parse packet type info\n", portid); 2043d04bb1c5SNithin Dabilpuram 2044d299106eSSergio Gonzalez Monroy /* init one TX queue per lcore */ 2045d299106eSSergio Gonzalez Monroy tx_queueid = 0; 2046d299106eSSergio Gonzalez Monroy for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 2047d299106eSSergio Gonzalez Monroy if (rte_lcore_is_enabled(lcore_id) == 0) 2048d299106eSSergio Gonzalez Monroy continue; 2049d299106eSSergio Gonzalez Monroy 2050d299106eSSergio Gonzalez Monroy if (numa_on) 2051d299106eSSergio Gonzalez Monroy socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); 2052d299106eSSergio Gonzalez Monroy else 2053d299106eSSergio Gonzalez Monroy socket_id = 0; 2054d299106eSSergio Gonzalez Monroy 2055d299106eSSergio Gonzalez Monroy /* init TX queue */ 2056d299106eSSergio Gonzalez Monroy printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); 2057d299106eSSergio Gonzalez Monroy 2058d299106eSSergio Gonzalez Monroy txconf = &dev_info.default_txconf; 20596162b8b3SShahaf Shuler txconf->offloads = local_port_conf.txmode.offloads; 2060d299106eSSergio Gonzalez Monroy 2061d299106eSSergio Gonzalez Monroy ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, 2062d299106eSSergio Gonzalez Monroy socket_id, txconf); 2063d299106eSSergio Gonzalez Monroy if (ret < 0) 2064d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " 2065d299106eSSergio Gonzalez Monroy "err=%d, port=%d\n", ret, portid); 2066d299106eSSergio Gonzalez Monroy 2067d299106eSSergio Gonzalez Monroy qconf = &lcore_conf[lcore_id]; 2068d299106eSSergio Gonzalez Monroy qconf->tx_queue_id[portid] = tx_queueid; 206903128be4SKonstantin Ananyev 2070d299106eSSergio Gonzalez Monroy tx_queueid++; 2071d299106eSSergio Gonzalez Monroy 2072d299106eSSergio Gonzalez Monroy /* init RX queues */ 2073d299106eSSergio Gonzalez Monroy for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { 20746162b8b3SShahaf Shuler struct rte_eth_rxconf rxq_conf; 207548a39871SNithin Dabilpuram struct rte_mempool *pool; 20766162b8b3SShahaf Shuler 2077d299106eSSergio Gonzalez Monroy if (portid != qconf->rx_queue_list[queue].port_id) 2078d299106eSSergio Gonzalez Monroy continue; 2079d299106eSSergio Gonzalez Monroy 2080d299106eSSergio Gonzalez Monroy rx_queueid = qconf->rx_queue_list[queue].queue_id; 2081d299106eSSergio Gonzalez Monroy 2082d299106eSSergio Gonzalez Monroy printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, 2083d299106eSSergio Gonzalez Monroy socket_id); 2084d299106eSSergio Gonzalez Monroy 20856162b8b3SShahaf Shuler rxq_conf = dev_info.default_rxconf; 20866162b8b3SShahaf Shuler rxq_conf.offloads = local_port_conf.rxmode.offloads; 208748a39871SNithin Dabilpuram 208848a39871SNithin Dabilpuram if (per_port_pool) 208948a39871SNithin Dabilpuram pool = socket_ctx[socket_id].mbuf_pool[portid]; 209048a39871SNithin Dabilpuram else 209148a39871SNithin Dabilpuram pool = socket_ctx[socket_id].mbuf_pool[0]; 209248a39871SNithin Dabilpuram 2093d299106eSSergio Gonzalez Monroy ret = rte_eth_rx_queue_setup(portid, rx_queueid, 209448a39871SNithin Dabilpuram nb_rxd, socket_id, &rxq_conf, pool); 2095d299106eSSergio Gonzalez Monroy if (ret < 0) 2096d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, 2097d299106eSSergio Gonzalez Monroy "rte_eth_rx_queue_setup: err=%d, " 2098d299106eSSergio Gonzalez Monroy "port=%d\n", ret, portid); 2099d04bb1c5SNithin Dabilpuram 2100d04bb1c5SNithin Dabilpuram /* Register Rx callback if ptypes are not supported */ 2101d04bb1c5SNithin Dabilpuram if (!ptype_supported && 2102179e9b44SShihong Wang !rte_eth_add_rx_callback(portid, rx_queueid, 2103d04bb1c5SNithin Dabilpuram parse_ptype_cb, NULL)) { 2104d04bb1c5SNithin Dabilpuram printf("Failed to add rx callback: port=%d, " 2105179e9b44SShihong Wang "rx_queueid=%d\n", portid, rx_queueid); 2106d04bb1c5SNithin Dabilpuram } 2107d04bb1c5SNithin Dabilpuram 2108d04bb1c5SNithin Dabilpuram 2109d299106eSSergio Gonzalez Monroy } 2110d299106eSSergio Gonzalez Monroy } 2111d8d51d4fSRahul Bhansali 2112d8d51d4fSRahul Bhansali if (hw_reassembly) { 2113d8d51d4fSRahul Bhansali rte_eth_ip_reassembly_capability_get(portid, &reass_capa); 2114d8d51d4fSRahul Bhansali reass_capa.timeout_ms = frag_ttl_ns; 2115d8d51d4fSRahul Bhansali rte_eth_ip_reassembly_conf_set(portid, &reass_capa); 2116d8d51d4fSRahul Bhansali } 2117d299106eSSergio Gonzalez Monroy printf("\n"); 2118d299106eSSergio Gonzalez Monroy } 2119d299106eSSergio Gonzalez Monroy 21203a690d5aSBernard Iremonger static size_t 21213a690d5aSBernard Iremonger max_session_size(void) 21223a690d5aSBernard Iremonger { 21233a690d5aSBernard Iremonger size_t max_sz, sz; 21243a690d5aSBernard Iremonger void *sec_ctx; 21253a690d5aSBernard Iremonger int16_t cdev_id, port_id, n; 21263a690d5aSBernard Iremonger 21273a690d5aSBernard Iremonger max_sz = 0; 21283a690d5aSBernard Iremonger n = rte_cryptodev_count(); 21293a690d5aSBernard Iremonger for (cdev_id = 0; cdev_id != n; cdev_id++) { 21303a690d5aSBernard Iremonger sz = rte_cryptodev_sym_get_private_session_size(cdev_id); 21313a690d5aSBernard Iremonger if (sz > max_sz) 21323a690d5aSBernard Iremonger max_sz = sz; 21333a690d5aSBernard Iremonger /* 21343a690d5aSBernard Iremonger * If crypto device is security capable, need to check the 21353a690d5aSBernard Iremonger * size of security session as well. 21363a690d5aSBernard Iremonger */ 21373a690d5aSBernard Iremonger 21383a690d5aSBernard Iremonger /* Get security context of the crypto device */ 21393a690d5aSBernard Iremonger sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id); 21403a690d5aSBernard Iremonger if (sec_ctx == NULL) 21413a690d5aSBernard Iremonger continue; 21423a690d5aSBernard Iremonger 21433a690d5aSBernard Iremonger /* Get size of security session */ 21443a690d5aSBernard Iremonger sz = rte_security_session_get_size(sec_ctx); 21453a690d5aSBernard Iremonger if (sz > max_sz) 21463a690d5aSBernard Iremonger max_sz = sz; 21473a690d5aSBernard Iremonger } 21483a690d5aSBernard Iremonger 21493a690d5aSBernard Iremonger RTE_ETH_FOREACH_DEV(port_id) { 21503a690d5aSBernard Iremonger if ((enabled_port_mask & (1 << port_id)) == 0) 21513a690d5aSBernard Iremonger continue; 21523a690d5aSBernard Iremonger 21533a690d5aSBernard Iremonger sec_ctx = rte_eth_dev_get_sec_ctx(port_id); 21543a690d5aSBernard Iremonger if (sec_ctx == NULL) 21553a690d5aSBernard Iremonger continue; 21563a690d5aSBernard Iremonger 21573a690d5aSBernard Iremonger sz = rte_security_session_get_size(sec_ctx); 21583a690d5aSBernard Iremonger if (sz > max_sz) 21593a690d5aSBernard Iremonger max_sz = sz; 21603a690d5aSBernard Iremonger } 21613a690d5aSBernard Iremonger 21623a690d5aSBernard Iremonger return max_sz; 21633a690d5aSBernard Iremonger } 21643a690d5aSBernard Iremonger 21653a690d5aSBernard Iremonger static void 21663a690d5aSBernard Iremonger session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) 21673a690d5aSBernard Iremonger { 21683a690d5aSBernard Iremonger char mp_name[RTE_MEMPOOL_NAMESIZE]; 21693a690d5aSBernard Iremonger struct rte_mempool *sess_mp; 217004fa1906SVladimir Medvedkin uint32_t nb_sess; 21713a690d5aSBernard Iremonger 21723a690d5aSBernard Iremonger snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 21733a690d5aSBernard Iremonger "sess_mp_%u", socket_id); 217404fa1906SVladimir Medvedkin nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * 2175e30b2833SAkhil Goyal rte_lcore_count()); 217657ddbf7eSVladimir Medvedkin nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ * 217757ddbf7eSVladimir Medvedkin CDEV_MP_CACHE_MULTIPLIER); 21783a690d5aSBernard Iremonger sess_mp = rte_cryptodev_sym_session_pool_create( 21790dbe550aSVolodymyr Fialko mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 21800dbe550aSVolodymyr Fialko 0, socket_id); 21813a690d5aSBernard Iremonger ctx->session_pool = sess_mp; 21823a690d5aSBernard Iremonger 21833a690d5aSBernard Iremonger if (ctx->session_pool == NULL) 21843a690d5aSBernard Iremonger rte_exit(EXIT_FAILURE, 21853a690d5aSBernard Iremonger "Cannot init session pool on socket %d\n", socket_id); 21863a690d5aSBernard Iremonger else 21873a690d5aSBernard Iremonger printf("Allocated session pool on socket %d\n", socket_id); 21883a690d5aSBernard Iremonger } 21893a690d5aSBernard Iremonger 21903a690d5aSBernard Iremonger static void 219148a39871SNithin Dabilpuram pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid, 219248a39871SNithin Dabilpuram uint32_t nb_mbuf) 2193d299106eSSergio Gonzalez Monroy { 2194d299106eSSergio Gonzalez Monroy char s[64]; 2195b01d1cd2SKonstantin Ananyev int32_t ms; 2196d299106eSSergio Gonzalez Monroy 219748a39871SNithin Dabilpuram 219848a39871SNithin Dabilpuram /* mbuf_pool is initialised by the pool_init() function*/ 219948a39871SNithin Dabilpuram if (socket_ctx[socket_id].mbuf_pool[portid]) 220048a39871SNithin Dabilpuram return; 220148a39871SNithin Dabilpuram 220248a39871SNithin Dabilpuram snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid); 220348a39871SNithin Dabilpuram ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf, 220448a39871SNithin Dabilpuram MEMPOOL_CACHE_SIZE, 220548a39871SNithin Dabilpuram ipsec_metadata_size(), 220648a39871SNithin Dabilpuram frame_buf_size, 220748a39871SNithin Dabilpuram socket_id); 2208b01d1cd2SKonstantin Ananyev 2209b01d1cd2SKonstantin Ananyev /* 2210b01d1cd2SKonstantin Ananyev * if multi-segment support is enabled, then create a pool 221148a39871SNithin Dabilpuram * for indirect mbufs. This is not per-port but global. 2212b01d1cd2SKonstantin Ananyev */ 2213b01d1cd2SKonstantin Ananyev ms = multi_seg_required(); 221448a39871SNithin Dabilpuram if (ms != 0 && !ctx->mbuf_pool_indir) { 2215b01d1cd2SKonstantin Ananyev snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id); 2216b01d1cd2SKonstantin Ananyev ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf, 2217b01d1cd2SKonstantin Ananyev MEMPOOL_CACHE_SIZE, 0, 0, socket_id); 2218b01d1cd2SKonstantin Ananyev } 2219b01d1cd2SKonstantin Ananyev 222048a39871SNithin Dabilpuram if (ctx->mbuf_pool[portid] == NULL || 222148a39871SNithin Dabilpuram (ms != 0 && ctx->mbuf_pool_indir == NULL)) 2222d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", 2223d299106eSSergio Gonzalez Monroy socket_id); 2224d299106eSSergio Gonzalez Monroy else 2225d299106eSSergio Gonzalez Monroy printf("Allocated mbuf pool on socket %d\n", socket_id); 2226d299106eSSergio Gonzalez Monroy } 2227d299106eSSergio Gonzalez Monroy 2228fa4de2ccSAnoob Joseph static int 2229fa4de2ccSAnoob Joseph inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type, 2230fa4de2ccSAnoob Joseph void *param, void *ret_param) 2231fa4de2ccSAnoob Joseph { 2232fa4de2ccSAnoob Joseph uint64_t md; 2233fa4de2ccSAnoob Joseph struct rte_eth_event_ipsec_desc *event_desc = NULL; 2234fa4de2ccSAnoob Joseph 2235fa4de2ccSAnoob Joseph RTE_SET_USED(param); 223668d25915SSrujana Challa RTE_SET_USED(port_id); 2237fa4de2ccSAnoob Joseph 2238fa4de2ccSAnoob Joseph if (type != RTE_ETH_EVENT_IPSEC) 2239fa4de2ccSAnoob Joseph return -1; 2240fa4de2ccSAnoob Joseph 2241fa4de2ccSAnoob Joseph event_desc = ret_param; 2242fa4de2ccSAnoob Joseph if (event_desc == NULL) { 2243fa4de2ccSAnoob Joseph printf("Event descriptor not set\n"); 2244fa4de2ccSAnoob Joseph return -1; 2245fa4de2ccSAnoob Joseph } 2246fa4de2ccSAnoob Joseph 2247fa4de2ccSAnoob Joseph md = event_desc->metadata; 2248fa4de2ccSAnoob Joseph 224968d25915SSrujana Challa if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) { 225068d25915SSrujana Challa if (md == 0) 225168d25915SSrujana Challa return -1; 225268d25915SSrujana Challa } 2253fa4de2ccSAnoob Joseph else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) { 2254fa4de2ccSAnoob Joseph printf("Invalid IPsec event reported\n"); 2255fa4de2ccSAnoob Joseph return -1; 2256fa4de2ccSAnoob Joseph } 2257fa4de2ccSAnoob Joseph 2258fa4de2ccSAnoob Joseph return -1; 2259fa4de2ccSAnoob Joseph } 2260fa4de2ccSAnoob Joseph 2261fe105decSRadu Nicolau static int 2262fe105decSRadu Nicolau ethdev_reset_event_callback(uint16_t port_id, 2263fe105decSRadu Nicolau enum rte_eth_event_type type, 2264fe105decSRadu Nicolau void *param __rte_unused, void *ret_param __rte_unused) 2265fe105decSRadu Nicolau { 2266fe105decSRadu Nicolau printf("Reset Event on port id %d type %d\n", port_id, type); 2267fe105decSRadu Nicolau printf("Force quit application"); 2268fe105decSRadu Nicolau force_quit = true; 2269fe105decSRadu Nicolau return 0; 2270fe105decSRadu Nicolau } 2271fe105decSRadu Nicolau 2272b01d1cd2SKonstantin Ananyev static uint16_t 2273b01d1cd2SKonstantin Ananyev rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue, 2274b01d1cd2SKonstantin Ananyev struct rte_mbuf *pkt[], uint16_t nb_pkts, 2275b01d1cd2SKonstantin Ananyev __rte_unused uint16_t max_pkts, void *user_param) 2276b01d1cd2SKonstantin Ananyev { 2277b01d1cd2SKonstantin Ananyev uint64_t tm; 2278b01d1cd2SKonstantin Ananyev uint32_t i, k; 2279b01d1cd2SKonstantin Ananyev struct lcore_conf *lc; 2280b01d1cd2SKonstantin Ananyev struct rte_mbuf *mb; 2281b01d1cd2SKonstantin Ananyev struct rte_ether_hdr *eth; 2282b01d1cd2SKonstantin Ananyev 2283b01d1cd2SKonstantin Ananyev lc = user_param; 2284b01d1cd2SKonstantin Ananyev k = 0; 2285b01d1cd2SKonstantin Ananyev tm = 0; 2286b01d1cd2SKonstantin Ananyev 2287b01d1cd2SKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2288b01d1cd2SKonstantin Ananyev 2289b01d1cd2SKonstantin Ananyev mb = pkt[i]; 2290b01d1cd2SKonstantin Ananyev eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); 2291b01d1cd2SKonstantin Ananyev if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 2292b01d1cd2SKonstantin Ananyev 2293b01d1cd2SKonstantin Ananyev struct rte_ipv4_hdr *iph; 2294b01d1cd2SKonstantin Ananyev 2295b01d1cd2SKonstantin Ananyev iph = (struct rte_ipv4_hdr *)(eth + 1); 2296b01d1cd2SKonstantin Ananyev if (rte_ipv4_frag_pkt_is_fragmented(iph)) { 2297b01d1cd2SKonstantin Ananyev 2298b01d1cd2SKonstantin Ananyev mb->l2_len = sizeof(*eth); 2299b01d1cd2SKonstantin Ananyev mb->l3_len = sizeof(*iph); 2300b01d1cd2SKonstantin Ananyev tm = (tm != 0) ? tm : rte_rdtsc(); 2301b01d1cd2SKonstantin Ananyev mb = rte_ipv4_frag_reassemble_packet( 2302b01d1cd2SKonstantin Ananyev lc->frag.tbl, &lc->frag.dr, 2303b01d1cd2SKonstantin Ananyev mb, tm, iph); 2304b01d1cd2SKonstantin Ananyev 2305b01d1cd2SKonstantin Ananyev if (mb != NULL) { 2306b01d1cd2SKonstantin Ananyev /* fix ip cksum after reassemble. */ 2307b01d1cd2SKonstantin Ananyev iph = rte_pktmbuf_mtod_offset(mb, 2308b01d1cd2SKonstantin Ananyev struct rte_ipv4_hdr *, 2309b01d1cd2SKonstantin Ananyev mb->l2_len); 2310b01d1cd2SKonstantin Ananyev iph->hdr_checksum = 0; 2311b01d1cd2SKonstantin Ananyev iph->hdr_checksum = rte_ipv4_cksum(iph); 2312b01d1cd2SKonstantin Ananyev } 2313b01d1cd2SKonstantin Ananyev } 2314b01d1cd2SKonstantin Ananyev } else if (eth->ether_type == 2315b01d1cd2SKonstantin Ananyev rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 2316b01d1cd2SKonstantin Ananyev 2317b01d1cd2SKonstantin Ananyev struct rte_ipv6_hdr *iph; 2318b7fc82ecSKonstantin Ananyev struct rte_ipv6_fragment_ext *fh; 2319b01d1cd2SKonstantin Ananyev 2320b01d1cd2SKonstantin Ananyev iph = (struct rte_ipv6_hdr *)(eth + 1); 2321b01d1cd2SKonstantin Ananyev fh = rte_ipv6_frag_get_ipv6_fragment_header(iph); 2322b01d1cd2SKonstantin Ananyev if (fh != NULL) { 2323b01d1cd2SKonstantin Ananyev mb->l2_len = sizeof(*eth); 2324b01d1cd2SKonstantin Ananyev mb->l3_len = (uintptr_t)fh - (uintptr_t)iph + 2325b01d1cd2SKonstantin Ananyev sizeof(*fh); 2326b01d1cd2SKonstantin Ananyev tm = (tm != 0) ? tm : rte_rdtsc(); 2327b01d1cd2SKonstantin Ananyev mb = rte_ipv6_frag_reassemble_packet( 2328b01d1cd2SKonstantin Ananyev lc->frag.tbl, &lc->frag.dr, 2329b01d1cd2SKonstantin Ananyev mb, tm, iph, fh); 2330b01d1cd2SKonstantin Ananyev if (mb != NULL) 2331b01d1cd2SKonstantin Ananyev /* fix l3_len after reassemble. */ 2332b01d1cd2SKonstantin Ananyev mb->l3_len = mb->l3_len - sizeof(*fh); 2333b01d1cd2SKonstantin Ananyev } 2334b01d1cd2SKonstantin Ananyev } 2335b01d1cd2SKonstantin Ananyev 2336b01d1cd2SKonstantin Ananyev pkt[k] = mb; 2337b01d1cd2SKonstantin Ananyev k += (mb != NULL); 2338b01d1cd2SKonstantin Ananyev } 2339b01d1cd2SKonstantin Ananyev 2340b01d1cd2SKonstantin Ananyev /* some fragments were encountered, drain death row */ 2341b01d1cd2SKonstantin Ananyev if (tm != 0) 2342b01d1cd2SKonstantin Ananyev rte_ip_frag_free_death_row(&lc->frag.dr, 0); 2343b01d1cd2SKonstantin Ananyev 2344b01d1cd2SKonstantin Ananyev return k; 2345b01d1cd2SKonstantin Ananyev } 2346b01d1cd2SKonstantin Ananyev 2347b01d1cd2SKonstantin Ananyev 2348b01d1cd2SKonstantin Ananyev static int 2349b01d1cd2SKonstantin Ananyev reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid) 2350b01d1cd2SKonstantin Ananyev { 2351b01d1cd2SKonstantin Ananyev int32_t sid; 2352b01d1cd2SKonstantin Ananyev uint32_t i; 2353b01d1cd2SKonstantin Ananyev uint64_t frag_cycles; 2354b01d1cd2SKonstantin Ananyev const struct lcore_rx_queue *rxq; 2355b01d1cd2SKonstantin Ananyev const struct rte_eth_rxtx_callback *cb; 2356b01d1cd2SKonstantin Ananyev 2357b01d1cd2SKonstantin Ananyev /* create fragment table */ 2358b01d1cd2SKonstantin Ananyev sid = rte_lcore_to_socket_id(cid); 2359bba1db35SMarcin Smoczynski frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) / 2360bba1db35SMarcin Smoczynski NS_PER_S * frag_ttl_ns; 2361b01d1cd2SKonstantin Ananyev 2362b01d1cd2SKonstantin Ananyev lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz, 2363b01d1cd2SKonstantin Ananyev FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid); 2364b01d1cd2SKonstantin Ananyev if (lc->frag.tbl == NULL) { 2365b01d1cd2SKonstantin Ananyev printf("%s(%u): failed to create fragment table of size: %u, " 2366b01d1cd2SKonstantin Ananyev "error code: %d\n", 2367b01d1cd2SKonstantin Ananyev __func__, cid, frag_tbl_sz, rte_errno); 2368b01d1cd2SKonstantin Ananyev return -ENOMEM; 2369b01d1cd2SKonstantin Ananyev } 2370b01d1cd2SKonstantin Ananyev 2371b01d1cd2SKonstantin Ananyev /* setup reassemble RX callbacks for all queues */ 2372b01d1cd2SKonstantin Ananyev for (i = 0; i != lc->nb_rx_queue; i++) { 2373b01d1cd2SKonstantin Ananyev 2374b01d1cd2SKonstantin Ananyev rxq = lc->rx_queue_list + i; 2375b01d1cd2SKonstantin Ananyev cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id, 2376b01d1cd2SKonstantin Ananyev rx_callback, lc); 2377b01d1cd2SKonstantin Ananyev if (cb == NULL) { 2378b01d1cd2SKonstantin Ananyev printf("%s(%u): failed to install RX callback for " 2379b01d1cd2SKonstantin Ananyev "portid=%u, queueid=%u, error code: %d\n", 2380b01d1cd2SKonstantin Ananyev __func__, cid, 2381b01d1cd2SKonstantin Ananyev rxq->port_id, rxq->queue_id, rte_errno); 2382b01d1cd2SKonstantin Ananyev return -ENOMEM; 2383b01d1cd2SKonstantin Ananyev } 2384b01d1cd2SKonstantin Ananyev } 2385b01d1cd2SKonstantin Ananyev 2386b01d1cd2SKonstantin Ananyev return 0; 2387b01d1cd2SKonstantin Ananyev } 2388b01d1cd2SKonstantin Ananyev 2389b01d1cd2SKonstantin Ananyev static int 2390b01d1cd2SKonstantin Ananyev reassemble_init(void) 2391b01d1cd2SKonstantin Ananyev { 2392b01d1cd2SKonstantin Ananyev int32_t rc; 2393b01d1cd2SKonstantin Ananyev uint32_t i, lc; 2394b01d1cd2SKonstantin Ananyev 2395b01d1cd2SKonstantin Ananyev rc = 0; 2396b01d1cd2SKonstantin Ananyev for (i = 0; i != nb_lcore_params; i++) { 2397b01d1cd2SKonstantin Ananyev lc = lcore_params[i].lcore_id; 2398b01d1cd2SKonstantin Ananyev rc = reassemble_lcore_init(lcore_conf + lc, lc); 2399b01d1cd2SKonstantin Ananyev if (rc != 0) 2400b01d1cd2SKonstantin Ananyev break; 2401b01d1cd2SKonstantin Ananyev } 2402b01d1cd2SKonstantin Ananyev 2403b01d1cd2SKonstantin Ananyev return rc; 2404b01d1cd2SKonstantin Ananyev } 2405b01d1cd2SKonstantin Ananyev 2406513f192bSAnkur Dwivedi static void 2407513f192bSAnkur Dwivedi create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) 2408513f192bSAnkur Dwivedi { 2409513f192bSAnkur Dwivedi struct rte_flow_action action[2]; 2410513f192bSAnkur Dwivedi struct rte_flow_item pattern[2]; 2411513f192bSAnkur Dwivedi struct rte_flow_attr attr = {0}; 2412513f192bSAnkur Dwivedi struct rte_flow_error err; 2413513f192bSAnkur Dwivedi struct rte_flow *flow; 2414513f192bSAnkur Dwivedi int ret; 2415513f192bSAnkur Dwivedi 2416295968d1SFerruh Yigit if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) 2417513f192bSAnkur Dwivedi return; 2418513f192bSAnkur Dwivedi 2419513f192bSAnkur Dwivedi /* Add the default rte_flow to enable SECURITY for all ESP packets */ 2420513f192bSAnkur Dwivedi 2421513f192bSAnkur Dwivedi pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; 2422513f192bSAnkur Dwivedi pattern[0].spec = NULL; 2423513f192bSAnkur Dwivedi pattern[0].mask = NULL; 2424513f192bSAnkur Dwivedi pattern[0].last = NULL; 2425513f192bSAnkur Dwivedi pattern[1].type = RTE_FLOW_ITEM_TYPE_END; 2426513f192bSAnkur Dwivedi 2427513f192bSAnkur Dwivedi action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; 2428513f192bSAnkur Dwivedi action[0].conf = NULL; 2429513f192bSAnkur Dwivedi action[1].type = RTE_FLOW_ACTION_TYPE_END; 2430513f192bSAnkur Dwivedi action[1].conf = NULL; 2431513f192bSAnkur Dwivedi 2432513f192bSAnkur Dwivedi attr.ingress = 1; 2433513f192bSAnkur Dwivedi 2434513f192bSAnkur Dwivedi ret = rte_flow_validate(port_id, &attr, pattern, action, &err); 2435513f192bSAnkur Dwivedi if (ret) 2436513f192bSAnkur Dwivedi return; 2437513f192bSAnkur Dwivedi 2438513f192bSAnkur Dwivedi flow = rte_flow_create(port_id, &attr, pattern, action, &err); 2439513f192bSAnkur Dwivedi if (flow == NULL) 2440513f192bSAnkur Dwivedi return; 2441513f192bSAnkur Dwivedi 2442513f192bSAnkur Dwivedi flow_info_tbl[port_id].rx_def_flow = flow; 2443513f192bSAnkur Dwivedi RTE_LOG(INFO, IPSEC, 2444513f192bSAnkur Dwivedi "Created default flow enabling SECURITY for all ESP traffic on port %d\n", 2445513f192bSAnkur Dwivedi port_id); 2446513f192bSAnkur Dwivedi } 2447513f192bSAnkur Dwivedi 244865e3a202SLukasz Bartosik static void 244965e3a202SLukasz Bartosik signal_handler(int signum) 245065e3a202SLukasz Bartosik { 245165e3a202SLukasz Bartosik if (signum == SIGINT || signum == SIGTERM) { 245265e3a202SLukasz Bartosik printf("\n\nSignal %d received, preparing to exit...\n", 245365e3a202SLukasz Bartosik signum); 245465e3a202SLukasz Bartosik force_quit = true; 245565e3a202SLukasz Bartosik } 245665e3a202SLukasz Bartosik } 245765e3a202SLukasz Bartosik 245865e3a202SLukasz Bartosik static void 24590dbe550aSVolodymyr Fialko ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa, 24600dbe550aSVolodymyr Fialko struct eventmode_conf *em_conf) 246165e3a202SLukasz Bartosik { 246265e3a202SLukasz Bartosik struct rte_ipsec_session *ips; 246365e3a202SLukasz Bartosik int32_t i; 246465e3a202SLukasz Bartosik 246565e3a202SLukasz Bartosik if (!sa || !nb_sa) 246665e3a202SLukasz Bartosik return; 246765e3a202SLukasz Bartosik 246865e3a202SLukasz Bartosik for (i = 0; i < nb_sa; i++) { 246965e3a202SLukasz Bartosik ips = ipsec_get_primary_session(&sa[i]); 24700dbe550aSVolodymyr Fialko if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) 24710dbe550aSVolodymyr Fialko em_conf->enable_event_crypto_adapter = true; 24720dbe550aSVolodymyr Fialko else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 24730dbe550aSVolodymyr Fialko rte_exit(EXIT_FAILURE, "Event mode supports inline " 24740dbe550aSVolodymyr Fialko "and lookaside protocol sessions\n"); 247565e3a202SLukasz Bartosik } 247665e3a202SLukasz Bartosik 247765e3a202SLukasz Bartosik } 247865e3a202SLukasz Bartosik 247965e3a202SLukasz Bartosik static int32_t 248065e3a202SLukasz Bartosik check_event_mode_params(struct eh_conf *eh_conf) 248165e3a202SLukasz Bartosik { 248265e3a202SLukasz Bartosik struct eventmode_conf *em_conf = NULL; 248365e3a202SLukasz Bartosik struct lcore_params *params; 248465e3a202SLukasz Bartosik uint16_t portid; 248565e3a202SLukasz Bartosik 248665e3a202SLukasz Bartosik if (!eh_conf || !eh_conf->mode_params) 248765e3a202SLukasz Bartosik return -EINVAL; 248865e3a202SLukasz Bartosik 248965e3a202SLukasz Bartosik /* Get eventmode conf */ 249065e3a202SLukasz Bartosik em_conf = eh_conf->mode_params; 249165e3a202SLukasz Bartosik 249265e3a202SLukasz Bartosik if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL && 249365e3a202SLukasz Bartosik em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) { 249465e3a202SLukasz Bartosik printf("error: option --event-schedule-type applies only to " 249565e3a202SLukasz Bartosik "event mode\n"); 249665e3a202SLukasz Bartosik return -EINVAL; 249765e3a202SLukasz Bartosik } 249865e3a202SLukasz Bartosik 249965e3a202SLukasz Bartosik if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT) 250065e3a202SLukasz Bartosik return 0; 250165e3a202SLukasz Bartosik 250265e3a202SLukasz Bartosik /* Set schedule type to ORDERED if it wasn't explicitly set by user */ 250365e3a202SLukasz Bartosik if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET) 250465e3a202SLukasz Bartosik em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; 250565e3a202SLukasz Bartosik 250665e3a202SLukasz Bartosik /* 25070dbe550aSVolodymyr Fialko * Event mode currently supports inline and lookaside protocol 25080dbe550aSVolodymyr Fialko * sessions. If there are other types of sessions configured then exit 25090dbe550aSVolodymyr Fialko * with error. 251065e3a202SLukasz Bartosik */ 25110dbe550aSVolodymyr Fialko ev_mode_sess_verify(sa_in, nb_sa_in, em_conf); 25120dbe550aSVolodymyr Fialko ev_mode_sess_verify(sa_out, nb_sa_out, em_conf); 251365e3a202SLukasz Bartosik 251465e3a202SLukasz Bartosik /* Option --config does not apply to event mode */ 251565e3a202SLukasz Bartosik if (nb_lcore_params > 0) { 251665e3a202SLukasz Bartosik printf("error: option --config applies only to poll mode\n"); 251765e3a202SLukasz Bartosik return -EINVAL; 251865e3a202SLukasz Bartosik } 251965e3a202SLukasz Bartosik 252065e3a202SLukasz Bartosik /* 252165e3a202SLukasz Bartosik * In order to use the same port_init routine for both poll and event 252265e3a202SLukasz Bartosik * modes initialize lcore_params with one queue for each eth port 252365e3a202SLukasz Bartosik */ 252465e3a202SLukasz Bartosik lcore_params = lcore_params_array; 252565e3a202SLukasz Bartosik RTE_ETH_FOREACH_DEV(portid) { 252665e3a202SLukasz Bartosik if ((enabled_port_mask & (1 << portid)) == 0) 252765e3a202SLukasz Bartosik continue; 252865e3a202SLukasz Bartosik 252965e3a202SLukasz Bartosik params = &lcore_params[nb_lcore_params++]; 253065e3a202SLukasz Bartosik params->port_id = portid; 253165e3a202SLukasz Bartosik params->queue_id = 0; 253265e3a202SLukasz Bartosik params->lcore_id = rte_get_next_lcore(0, 0, 1); 253365e3a202SLukasz Bartosik } 253465e3a202SLukasz Bartosik 253565e3a202SLukasz Bartosik return 0; 253665e3a202SLukasz Bartosik } 253765e3a202SLukasz Bartosik 25388e814e18SVolodymyr Fialko static int 25398e814e18SVolodymyr Fialko one_session_free(struct rte_ipsec_session *ips) 25408e814e18SVolodymyr Fialko { 25418e814e18SVolodymyr Fialko int32_t ret = 0; 25428e814e18SVolodymyr Fialko 25438e814e18SVolodymyr Fialko if (ips->type == RTE_SECURITY_ACTION_TYPE_NONE || 25448e814e18SVolodymyr Fialko ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 25458e814e18SVolodymyr Fialko /* Session has not been created */ 25468e814e18SVolodymyr Fialko if (ips->crypto.ses == NULL) 25478e814e18SVolodymyr Fialko return 0; 25488e814e18SVolodymyr Fialko 2549bdce2564SAkhil Goyal ret = rte_cryptodev_sym_session_free(ips->crypto.dev_id, 25508e814e18SVolodymyr Fialko ips->crypto.ses); 25518e814e18SVolodymyr Fialko } else { 25528e814e18SVolodymyr Fialko /* Session has not been created */ 25538e814e18SVolodymyr Fialko if (ips->security.ctx == NULL || ips->security.ses == NULL) 25548e814e18SVolodymyr Fialko return 0; 25558e814e18SVolodymyr Fialko 25568e814e18SVolodymyr Fialko ret = rte_security_session_destroy(ips->security.ctx, 25578e814e18SVolodymyr Fialko ips->security.ses); 25588e814e18SVolodymyr Fialko } 25598e814e18SVolodymyr Fialko 25608e814e18SVolodymyr Fialko return ret; 25618e814e18SVolodymyr Fialko } 25628e814e18SVolodymyr Fialko 256365e3a202SLukasz Bartosik static void 25648e814e18SVolodymyr Fialko sessions_free(struct sa_ctx *sa_ctx) 256565e3a202SLukasz Bartosik { 256665e3a202SLukasz Bartosik struct rte_ipsec_session *ips; 256765e3a202SLukasz Bartosik struct ipsec_sa *sa; 256865e3a202SLukasz Bartosik int32_t ret; 256965e3a202SLukasz Bartosik uint32_t i; 257065e3a202SLukasz Bartosik 257165e3a202SLukasz Bartosik if (!sa_ctx) 257265e3a202SLukasz Bartosik return; 257365e3a202SLukasz Bartosik 257465e3a202SLukasz Bartosik for (i = 0; i < sa_ctx->nb_sa; i++) { 257565e3a202SLukasz Bartosik 257665e3a202SLukasz Bartosik sa = &sa_ctx->sa[i]; 257765e3a202SLukasz Bartosik if (!sa->spi) 257865e3a202SLukasz Bartosik continue; 257965e3a202SLukasz Bartosik 258065e3a202SLukasz Bartosik ips = ipsec_get_primary_session(sa); 25818e814e18SVolodymyr Fialko ret = one_session_free(ips); 258265e3a202SLukasz Bartosik if (ret) 258365e3a202SLukasz Bartosik RTE_LOG(ERR, IPSEC, "Failed to destroy security " 258465e3a202SLukasz Bartosik "session type %d, spi %d\n", 258565e3a202SLukasz Bartosik ips->type, sa->spi); 258665e3a202SLukasz Bartosik } 258765e3a202SLukasz Bartosik } 258865e3a202SLukasz Bartosik 25897338a34eSLukasz Bartosik static uint32_t 25907338a34eSLukasz Bartosik calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq, 25917338a34eSLukasz Bartosik uint32_t nb_txq) 25927338a34eSLukasz Bartosik { 25937338a34eSLukasz Bartosik return RTE_MAX((nb_rxq * nb_rxd + 25947338a34eSLukasz Bartosik nb_ports * nb_lcores * MAX_PKT_BURST + 25957338a34eSLukasz Bartosik nb_ports * nb_txq * nb_txd + 25967338a34eSLukasz Bartosik nb_lcores * MEMPOOL_CACHE_SIZE + 25975401bdc1SVolodymyr Fialko nb_crypto_qp * qp_desc_nb + 25987338a34eSLukasz Bartosik nb_lcores * frag_tbl_sz * 25997338a34eSLukasz Bartosik FRAG_TBL_BUCKET_ENTRIES), 26007338a34eSLukasz Bartosik 8192U); 26017338a34eSLukasz Bartosik } 26027338a34eSLukasz Bartosik 26033e7b7dd8SRadu Nicolau 26043e7b7dd8SRadu Nicolau static int 26053e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused, 26063e7b7dd8SRadu Nicolau const char *params, struct rte_tel_data *data) 26073e7b7dd8SRadu Nicolau { 26083e7b7dd8SRadu Nicolau uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0; 26093e7b7dd8SRadu Nicolau unsigned int coreid; 26103e7b7dd8SRadu Nicolau 26113e7b7dd8SRadu Nicolau rte_tel_data_start_dict(data); 26123e7b7dd8SRadu Nicolau 26133e7b7dd8SRadu Nicolau if (params) { 26143e7b7dd8SRadu Nicolau coreid = (uint32_t)atoi(params); 26153e7b7dd8SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) 26163e7b7dd8SRadu Nicolau return -EINVAL; 26173e7b7dd8SRadu Nicolau 26183e7b7dd8SRadu Nicolau total_pkts_dropped = core_statistics[coreid].dropped; 26193e7b7dd8SRadu Nicolau total_pkts_tx = core_statistics[coreid].tx; 26203e7b7dd8SRadu Nicolau total_pkts_rx = core_statistics[coreid].rx; 26213e7b7dd8SRadu Nicolau 26223e7b7dd8SRadu Nicolau } else { 26233e7b7dd8SRadu Nicolau for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) { 26243e7b7dd8SRadu Nicolau 26253e7b7dd8SRadu Nicolau /* skip disabled cores */ 26263e7b7dd8SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) 26273e7b7dd8SRadu Nicolau continue; 26283e7b7dd8SRadu Nicolau 26293e7b7dd8SRadu Nicolau total_pkts_dropped += core_statistics[coreid].dropped; 26303e7b7dd8SRadu Nicolau total_pkts_tx += core_statistics[coreid].tx; 26313e7b7dd8SRadu Nicolau total_pkts_rx += core_statistics[coreid].rx; 26323e7b7dd8SRadu Nicolau } 26333e7b7dd8SRadu Nicolau } 26343e7b7dd8SRadu Nicolau 26353e7b7dd8SRadu Nicolau /* add telemetry key/values pairs */ 2636af0785a2SBruce Richardson rte_tel_data_add_dict_uint(data, "packets received", total_pkts_rx); 26373e7b7dd8SRadu Nicolau 2638af0785a2SBruce Richardson rte_tel_data_add_dict_uint(data, "packets transmitted", total_pkts_tx); 26393e7b7dd8SRadu Nicolau 2640af0785a2SBruce Richardson rte_tel_data_add_dict_uint(data, "packets dropped", 26413e7b7dd8SRadu Nicolau total_pkts_dropped); 26423e7b7dd8SRadu Nicolau 26433e7b7dd8SRadu Nicolau 26443e7b7dd8SRadu Nicolau return 0; 26453e7b7dd8SRadu Nicolau } 26463e7b7dd8SRadu Nicolau 26473e7b7dd8SRadu Nicolau static void 26483e7b7dd8SRadu Nicolau update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid) 26493e7b7dd8SRadu Nicolau { 26503e7b7dd8SRadu Nicolau struct ipsec_core_statistics *lcore_stats; 26513e7b7dd8SRadu Nicolau 26523e7b7dd8SRadu Nicolau /* skip disabled cores */ 26533e7b7dd8SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) 26543e7b7dd8SRadu Nicolau return; 26553e7b7dd8SRadu Nicolau 26563e7b7dd8SRadu Nicolau lcore_stats = &core_statistics[coreid]; 26573e7b7dd8SRadu Nicolau 26583e7b7dd8SRadu Nicolau total->rx = lcore_stats->rx; 26593e7b7dd8SRadu Nicolau total->dropped = lcore_stats->dropped; 2660d8d51d4fSRahul Bhansali total->frag_dropped = lcore_stats->frag_dropped; 26613e7b7dd8SRadu Nicolau total->tx = lcore_stats->tx; 26623e7b7dd8SRadu Nicolau 26633e7b7dd8SRadu Nicolau /* outbound stats */ 26643e7b7dd8SRadu Nicolau total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect; 26653e7b7dd8SRadu Nicolau total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass; 26663e7b7dd8SRadu Nicolau total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard; 26673e7b7dd8SRadu Nicolau 26683e7b7dd8SRadu Nicolau total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect; 26693e7b7dd8SRadu Nicolau total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass; 26703e7b7dd8SRadu Nicolau total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard; 26713e7b7dd8SRadu Nicolau 26723e7b7dd8SRadu Nicolau total->outbound.sad.miss += lcore_stats->outbound.sad.miss; 26733e7b7dd8SRadu Nicolau 26743e7b7dd8SRadu Nicolau /* inbound stats */ 26753e7b7dd8SRadu Nicolau total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect; 26763e7b7dd8SRadu Nicolau total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass; 26773e7b7dd8SRadu Nicolau total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard; 26783e7b7dd8SRadu Nicolau 26793e7b7dd8SRadu Nicolau total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect; 26803e7b7dd8SRadu Nicolau total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass; 26813e7b7dd8SRadu Nicolau total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard; 26823e7b7dd8SRadu Nicolau 26833e7b7dd8SRadu Nicolau total->inbound.sad.miss += lcore_stats->inbound.sad.miss; 26843e7b7dd8SRadu Nicolau 26853e7b7dd8SRadu Nicolau 26863e7b7dd8SRadu Nicolau /* routing stats */ 26873e7b7dd8SRadu Nicolau total->lpm4.miss += lcore_stats->lpm4.miss; 26883e7b7dd8SRadu Nicolau total->lpm6.miss += lcore_stats->lpm6.miss; 26893e7b7dd8SRadu Nicolau } 26903e7b7dd8SRadu Nicolau 26913e7b7dd8SRadu Nicolau static void 26923e7b7dd8SRadu Nicolau update_statistics(struct ipsec_core_statistics *total, uint32_t coreid) 26933e7b7dd8SRadu Nicolau { 26943e7b7dd8SRadu Nicolau memset(total, 0, sizeof(*total)); 26953e7b7dd8SRadu Nicolau 26963e7b7dd8SRadu Nicolau if (coreid != UINT32_MAX) { 26973e7b7dd8SRadu Nicolau update_lcore_statistics(total, coreid); 26983e7b7dd8SRadu Nicolau } else { 26993e7b7dd8SRadu Nicolau for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) 27003e7b7dd8SRadu Nicolau update_lcore_statistics(total, coreid); 27013e7b7dd8SRadu Nicolau } 27023e7b7dd8SRadu Nicolau } 27033e7b7dd8SRadu Nicolau 27043e7b7dd8SRadu Nicolau static int 27053e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused, 27063e7b7dd8SRadu Nicolau const char *params, struct rte_tel_data *data) 27073e7b7dd8SRadu Nicolau { 27083e7b7dd8SRadu Nicolau struct ipsec_core_statistics total_stats; 27093e7b7dd8SRadu Nicolau 27103e7b7dd8SRadu Nicolau struct rte_tel_data *spd4_data = rte_tel_data_alloc(); 27113e7b7dd8SRadu Nicolau struct rte_tel_data *spd6_data = rte_tel_data_alloc(); 27123e7b7dd8SRadu Nicolau struct rte_tel_data *sad_data = rte_tel_data_alloc(); 27133e7b7dd8SRadu Nicolau unsigned int coreid = UINT32_MAX; 27145d1a17e6SRadu Nicolau int rc = 0; 27153e7b7dd8SRadu Nicolau 27163e7b7dd8SRadu Nicolau /* verify allocated telemetry data structures */ 27175d1a17e6SRadu Nicolau if (!spd4_data || !spd6_data || !sad_data) { 27185d1a17e6SRadu Nicolau rc = -ENOMEM; 27195d1a17e6SRadu Nicolau goto exit; 27205d1a17e6SRadu Nicolau } 27213e7b7dd8SRadu Nicolau 27223e7b7dd8SRadu Nicolau /* initialize telemetry data structs as dicts */ 27233e7b7dd8SRadu Nicolau rte_tel_data_start_dict(data); 27243e7b7dd8SRadu Nicolau 27253e7b7dd8SRadu Nicolau rte_tel_data_start_dict(spd4_data); 27263e7b7dd8SRadu Nicolau rte_tel_data_start_dict(spd6_data); 27273e7b7dd8SRadu Nicolau rte_tel_data_start_dict(sad_data); 27283e7b7dd8SRadu Nicolau 27293e7b7dd8SRadu Nicolau if (params) { 27303e7b7dd8SRadu Nicolau coreid = (uint32_t)atoi(params); 27315d1a17e6SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) { 27325d1a17e6SRadu Nicolau rc = -EINVAL; 27335d1a17e6SRadu Nicolau goto exit; 27345d1a17e6SRadu Nicolau } 27353e7b7dd8SRadu Nicolau } 27363e7b7dd8SRadu Nicolau 27373e7b7dd8SRadu Nicolau update_statistics(&total_stats, coreid); 27383e7b7dd8SRadu Nicolau 27393e7b7dd8SRadu Nicolau /* add spd 4 telemetry key/values pairs */ 27403e7b7dd8SRadu Nicolau 2741af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "protect", 27423e7b7dd8SRadu Nicolau total_stats.outbound.spd4.protect); 2743af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "bypass", 27443e7b7dd8SRadu Nicolau total_stats.outbound.spd4.bypass); 2745af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "discard", 27463e7b7dd8SRadu Nicolau total_stats.outbound.spd4.discard); 27473e7b7dd8SRadu Nicolau 27483e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0); 27493e7b7dd8SRadu Nicolau 27503e7b7dd8SRadu Nicolau /* add spd 6 telemetry key/values pairs */ 27513e7b7dd8SRadu Nicolau 2752af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "protect", 27533e7b7dd8SRadu Nicolau total_stats.outbound.spd6.protect); 2754af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "bypass", 27553e7b7dd8SRadu Nicolau total_stats.outbound.spd6.bypass); 2756af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "discard", 27573e7b7dd8SRadu Nicolau total_stats.outbound.spd6.discard); 27583e7b7dd8SRadu Nicolau 27593e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0); 27603e7b7dd8SRadu Nicolau 27613e7b7dd8SRadu Nicolau /* add sad telemetry key/values pairs */ 27623e7b7dd8SRadu Nicolau 2763af0785a2SBruce Richardson rte_tel_data_add_dict_uint(sad_data, "miss", 27643e7b7dd8SRadu Nicolau total_stats.outbound.sad.miss); 27653e7b7dd8SRadu Nicolau 27663e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "sad", sad_data, 0); 27673e7b7dd8SRadu Nicolau 27685d1a17e6SRadu Nicolau exit: 27695d1a17e6SRadu Nicolau if (rc) { 27705d1a17e6SRadu Nicolau rte_tel_data_free(spd4_data); 27715d1a17e6SRadu Nicolau rte_tel_data_free(spd6_data); 27725d1a17e6SRadu Nicolau rte_tel_data_free(sad_data); 27735d1a17e6SRadu Nicolau } 27745d1a17e6SRadu Nicolau return rc; 27753e7b7dd8SRadu Nicolau } 27763e7b7dd8SRadu Nicolau 27773e7b7dd8SRadu Nicolau static int 27783e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused, 27793e7b7dd8SRadu Nicolau const char *params, struct rte_tel_data *data) 27803e7b7dd8SRadu Nicolau { 27813e7b7dd8SRadu Nicolau struct ipsec_core_statistics total_stats; 27823e7b7dd8SRadu Nicolau 27833e7b7dd8SRadu Nicolau struct rte_tel_data *spd4_data = rte_tel_data_alloc(); 27843e7b7dd8SRadu Nicolau struct rte_tel_data *spd6_data = rte_tel_data_alloc(); 27853e7b7dd8SRadu Nicolau struct rte_tel_data *sad_data = rte_tel_data_alloc(); 27863e7b7dd8SRadu Nicolau unsigned int coreid = UINT32_MAX; 27875d1a17e6SRadu Nicolau int rc = 0; 27883e7b7dd8SRadu Nicolau 27893e7b7dd8SRadu Nicolau /* verify allocated telemetry data structures */ 27905d1a17e6SRadu Nicolau if (!spd4_data || !spd6_data || !sad_data) { 27915d1a17e6SRadu Nicolau rc = -ENOMEM; 27925d1a17e6SRadu Nicolau goto exit; 27935d1a17e6SRadu Nicolau } 27943e7b7dd8SRadu Nicolau 27953e7b7dd8SRadu Nicolau /* initialize telemetry data structs as dicts */ 27963e7b7dd8SRadu Nicolau rte_tel_data_start_dict(data); 27973e7b7dd8SRadu Nicolau rte_tel_data_start_dict(spd4_data); 27983e7b7dd8SRadu Nicolau rte_tel_data_start_dict(spd6_data); 27993e7b7dd8SRadu Nicolau rte_tel_data_start_dict(sad_data); 28003e7b7dd8SRadu Nicolau 28013e7b7dd8SRadu Nicolau /* add children dicts to parent dict */ 28023e7b7dd8SRadu Nicolau 28033e7b7dd8SRadu Nicolau if (params) { 28043e7b7dd8SRadu Nicolau coreid = (uint32_t)atoi(params); 28055d1a17e6SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) { 28065d1a17e6SRadu Nicolau rc = -EINVAL; 28075d1a17e6SRadu Nicolau goto exit; 28085d1a17e6SRadu Nicolau } 28093e7b7dd8SRadu Nicolau } 28103e7b7dd8SRadu Nicolau 28113e7b7dd8SRadu Nicolau update_statistics(&total_stats, coreid); 28123e7b7dd8SRadu Nicolau 28133e7b7dd8SRadu Nicolau /* add sad telemetry key/values pairs */ 28143e7b7dd8SRadu Nicolau 2815af0785a2SBruce Richardson rte_tel_data_add_dict_uint(sad_data, "miss", 28163e7b7dd8SRadu Nicolau total_stats.inbound.sad.miss); 28173e7b7dd8SRadu Nicolau 28183e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "sad", sad_data, 0); 28193e7b7dd8SRadu Nicolau 28203e7b7dd8SRadu Nicolau /* add spd 4 telemetry key/values pairs */ 28213e7b7dd8SRadu Nicolau 2822af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "protect", 28233e7b7dd8SRadu Nicolau total_stats.inbound.spd4.protect); 2824af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "bypass", 28253e7b7dd8SRadu Nicolau total_stats.inbound.spd4.bypass); 2826af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd4_data, "discard", 28273e7b7dd8SRadu Nicolau total_stats.inbound.spd4.discard); 28283e7b7dd8SRadu Nicolau 28293e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0); 28303e7b7dd8SRadu Nicolau 28313e7b7dd8SRadu Nicolau /* add spd 6 telemetry key/values pairs */ 28323e7b7dd8SRadu Nicolau 2833af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "protect", 28343e7b7dd8SRadu Nicolau total_stats.inbound.spd6.protect); 2835af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "bypass", 28363e7b7dd8SRadu Nicolau total_stats.inbound.spd6.bypass); 2837af0785a2SBruce Richardson rte_tel_data_add_dict_uint(spd6_data, "discard", 28383e7b7dd8SRadu Nicolau total_stats.inbound.spd6.discard); 28393e7b7dd8SRadu Nicolau 28403e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0); 28413e7b7dd8SRadu Nicolau 28425d1a17e6SRadu Nicolau exit: 28435d1a17e6SRadu Nicolau if (rc) { 28445d1a17e6SRadu Nicolau rte_tel_data_free(spd4_data); 28455d1a17e6SRadu Nicolau rte_tel_data_free(spd6_data); 28465d1a17e6SRadu Nicolau rte_tel_data_free(sad_data); 28475d1a17e6SRadu Nicolau } 28485d1a17e6SRadu Nicolau return rc; 28493e7b7dd8SRadu Nicolau } 28503e7b7dd8SRadu Nicolau 28513e7b7dd8SRadu Nicolau static int 28523e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused, 28533e7b7dd8SRadu Nicolau const char *params, struct rte_tel_data *data) 28543e7b7dd8SRadu Nicolau { 28553e7b7dd8SRadu Nicolau struct ipsec_core_statistics total_stats; 28563e7b7dd8SRadu Nicolau 28573e7b7dd8SRadu Nicolau struct rte_tel_data *lpm4_data = rte_tel_data_alloc(); 28583e7b7dd8SRadu Nicolau struct rte_tel_data *lpm6_data = rte_tel_data_alloc(); 28593e7b7dd8SRadu Nicolau unsigned int coreid = UINT32_MAX; 28605d1a17e6SRadu Nicolau int rc = 0; 28615d1a17e6SRadu Nicolau 28625d1a17e6SRadu Nicolau /* verify allocated telemetry data structures */ 28635d1a17e6SRadu Nicolau if (!lpm4_data || !lpm6_data) { 28645d1a17e6SRadu Nicolau rc = -ENOMEM; 28655d1a17e6SRadu Nicolau goto exit; 28665d1a17e6SRadu Nicolau } 28673e7b7dd8SRadu Nicolau 28683e7b7dd8SRadu Nicolau /* initialize telemetry data structs as dicts */ 28693e7b7dd8SRadu Nicolau rte_tel_data_start_dict(data); 28703e7b7dd8SRadu Nicolau rte_tel_data_start_dict(lpm4_data); 28713e7b7dd8SRadu Nicolau rte_tel_data_start_dict(lpm6_data); 28723e7b7dd8SRadu Nicolau 28733e7b7dd8SRadu Nicolau 28743e7b7dd8SRadu Nicolau if (params) { 28753e7b7dd8SRadu Nicolau coreid = (uint32_t)atoi(params); 28765d1a17e6SRadu Nicolau if (rte_lcore_is_enabled(coreid) == 0) { 28775d1a17e6SRadu Nicolau rc = -EINVAL; 28785d1a17e6SRadu Nicolau goto exit; 28795d1a17e6SRadu Nicolau } 28803e7b7dd8SRadu Nicolau } 28813e7b7dd8SRadu Nicolau 28823e7b7dd8SRadu Nicolau update_statistics(&total_stats, coreid); 28833e7b7dd8SRadu Nicolau 28843e7b7dd8SRadu Nicolau /* add lpm 4 telemetry key/values pairs */ 2885af0785a2SBruce Richardson rte_tel_data_add_dict_uint(lpm4_data, "miss", total_stats.lpm4.miss); 28863e7b7dd8SRadu Nicolau 28873e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0); 28883e7b7dd8SRadu Nicolau 28893e7b7dd8SRadu Nicolau /* add lpm 6 telemetry key/values pairs */ 2890af0785a2SBruce Richardson rte_tel_data_add_dict_uint(lpm6_data, "miss", total_stats.lpm6.miss); 28913e7b7dd8SRadu Nicolau 28923e7b7dd8SRadu Nicolau rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0); 28933e7b7dd8SRadu Nicolau 28945d1a17e6SRadu Nicolau exit: 28955d1a17e6SRadu Nicolau if (rc) { 28965d1a17e6SRadu Nicolau rte_tel_data_free(lpm4_data); 28975d1a17e6SRadu Nicolau rte_tel_data_free(lpm6_data); 28985d1a17e6SRadu Nicolau } 28995d1a17e6SRadu Nicolau return rc; 29003e7b7dd8SRadu Nicolau } 29013e7b7dd8SRadu Nicolau 29023e7b7dd8SRadu Nicolau static void 29033e7b7dd8SRadu Nicolau ipsec_secgw_telemetry_init(void) 29043e7b7dd8SRadu Nicolau { 29053e7b7dd8SRadu Nicolau rte_telemetry_register_cmd("/examples/ipsec-secgw/stats", 29063e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats, 29073e7b7dd8SRadu Nicolau "Returns global stats. " 29083e7b7dd8SRadu Nicolau "Optional Parameters: int <logical core id>"); 29093e7b7dd8SRadu Nicolau 29103e7b7dd8SRadu Nicolau rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound", 29113e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_outbound, 29123e7b7dd8SRadu Nicolau "Returns outbound global stats. " 29133e7b7dd8SRadu Nicolau "Optional Parameters: int <logical core id>"); 29143e7b7dd8SRadu Nicolau 29153e7b7dd8SRadu Nicolau rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound", 29163e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_inbound, 29173e7b7dd8SRadu Nicolau "Returns inbound global stats. " 29183e7b7dd8SRadu Nicolau "Optional Parameters: int <logical core id>"); 29193e7b7dd8SRadu Nicolau 29203e7b7dd8SRadu Nicolau rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing", 29213e7b7dd8SRadu Nicolau handle_telemetry_cmd_ipsec_secgw_stats_routing, 29223e7b7dd8SRadu Nicolau "Returns routing stats. " 29233e7b7dd8SRadu Nicolau "Optional Parameters: int <logical core id>"); 29243e7b7dd8SRadu Nicolau } 29253e7b7dd8SRadu Nicolau 2926d299106eSSergio Gonzalez Monroy int32_t 2927d299106eSSergio Gonzalez Monroy main(int32_t argc, char **argv) 2928d299106eSSergio Gonzalez Monroy { 2929d299106eSSergio Gonzalez Monroy int32_t ret; 29307338a34eSLukasz Bartosik uint32_t lcore_id, nb_txq, nb_rxq = 0; 293165e3a202SLukasz Bartosik uint32_t cdev_id; 29323a690d5aSBernard Iremonger uint32_t i; 293347523597SZhiyong Yang uint8_t socket_id; 29347338a34eSLukasz Bartosik uint16_t portid, nb_crypto_qp, nb_ports = 0; 2935513f192bSAnkur Dwivedi uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; 2936513f192bSAnkur Dwivedi uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; 2937d8d51d4fSRahul Bhansali uint8_t req_hw_reassembly[RTE_MAX_ETHPORTS]; 293865e3a202SLukasz Bartosik struct eh_conf *eh_conf = NULL; 29394edcee19SNithin Dabilpuram uint32_t ipv4_cksum_port_mask = 0; 29403a690d5aSBernard Iremonger size_t sess_sz; 2941d299106eSSergio Gonzalez Monroy 29427338a34eSLukasz Bartosik nb_bufs_in_pool = 0; 29437338a34eSLukasz Bartosik 2944d299106eSSergio Gonzalez Monroy /* init EAL */ 2945d299106eSSergio Gonzalez Monroy ret = rte_eal_init(argc, argv); 2946d299106eSSergio Gonzalez Monroy if (ret < 0) 2947d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); 2948d299106eSSergio Gonzalez Monroy argc -= ret; 2949d299106eSSergio Gonzalez Monroy argv += ret; 2950d299106eSSergio Gonzalez Monroy 295165e3a202SLukasz Bartosik force_quit = false; 295265e3a202SLukasz Bartosik signal(SIGINT, signal_handler); 295365e3a202SLukasz Bartosik signal(SIGTERM, signal_handler); 295465e3a202SLukasz Bartosik 295565e3a202SLukasz Bartosik /* initialize event helper configuration */ 295665e3a202SLukasz Bartosik eh_conf = eh_conf_init(); 295765e3a202SLukasz Bartosik if (eh_conf == NULL) 295865e3a202SLukasz Bartosik rte_exit(EXIT_FAILURE, "Failed to init event helper config"); 295965e3a202SLukasz Bartosik 2960d299106eSSergio Gonzalez Monroy /* parse application arguments (after the EAL ones) */ 296165e3a202SLukasz Bartosik ret = parse_args(argc, argv, eh_conf); 2962d299106eSSergio Gonzalez Monroy if (ret < 0) 2963d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Invalid parameters\n"); 2964d299106eSSergio Gonzalez Monroy 29653e7b7dd8SRadu Nicolau ipsec_secgw_telemetry_init(); 29663e7b7dd8SRadu Nicolau 2967ba66534fSMarcin Smoczynski /* parse configuration file */ 2968ba66534fSMarcin Smoczynski if (parse_cfg_file(cfgfile) < 0) { 2969ba66534fSMarcin Smoczynski printf("parsing file \"%s\" failed\n", 2970ba66534fSMarcin Smoczynski optarg); 2971ba66534fSMarcin Smoczynski print_usage(argv[0]); 2972ba66534fSMarcin Smoczynski return -1; 2973ba66534fSMarcin Smoczynski } 2974ba66534fSMarcin Smoczynski 2975d299106eSSergio Gonzalez Monroy if ((unprotected_port_mask & enabled_port_mask) != 2976d299106eSSergio Gonzalez Monroy unprotected_port_mask) 2977d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", 2978d299106eSSergio Gonzalez Monroy unprotected_port_mask); 2979d299106eSSergio Gonzalez Monroy 2980a15f7b7dSVolodymyr Fialko if (unprotected_port_mask && !nb_sa_in) 2981a15f7b7dSVolodymyr Fialko rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n"); 2982a15f7b7dSVolodymyr Fialko 298365e3a202SLukasz Bartosik if (check_poll_mode_params(eh_conf) < 0) 298465e3a202SLukasz Bartosik rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n"); 298565e3a202SLukasz Bartosik 298665e3a202SLukasz Bartosik if (check_event_mode_params(eh_conf) < 0) 298765e3a202SLukasz Bartosik rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n"); 2988d299106eSSergio Gonzalez Monroy 2989d299106eSSergio Gonzalez Monroy ret = init_lcore_rx_queues(); 2990d299106eSSergio Gonzalez Monroy if (ret < 0) 2991d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); 2992d299106eSSergio Gonzalez Monroy 2993d299106eSSergio Gonzalez Monroy nb_lcores = rte_lcore_count(); 2994d299106eSSergio Gonzalez Monroy 29953a690d5aSBernard Iremonger sess_sz = max_session_size(); 29963a690d5aSBernard Iremonger 2997844baa10SLukasz Bartosik /* 2998844baa10SLukasz Bartosik * In event mode request minimum number of crypto queues 2999844baa10SLukasz Bartosik * to be reserved equal to number of ports. 3000844baa10SLukasz Bartosik */ 3001844baa10SLukasz Bartosik if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) 3002844baa10SLukasz Bartosik nb_crypto_qp = rte_eth_dev_count_avail(); 3003844baa10SLukasz Bartosik else 3004844baa10SLukasz Bartosik nb_crypto_qp = 0; 3005844baa10SLukasz Bartosik 3006844baa10SLukasz Bartosik nb_crypto_qp = cryptodevs_init(nb_crypto_qp); 30077338a34eSLukasz Bartosik 30087338a34eSLukasz Bartosik if (nb_bufs_in_pool == 0) { 30097338a34eSLukasz Bartosik RTE_ETH_FOREACH_DEV(portid) { 30107338a34eSLukasz Bartosik if ((enabled_port_mask & (1 << portid)) == 0) 30117338a34eSLukasz Bartosik continue; 30127338a34eSLukasz Bartosik nb_ports++; 30137338a34eSLukasz Bartosik nb_rxq += get_port_nb_rx_queues(portid); 30147338a34eSLukasz Bartosik } 30157338a34eSLukasz Bartosik 30167338a34eSLukasz Bartosik nb_txq = nb_lcores; 30177338a34eSLukasz Bartosik 30187338a34eSLukasz Bartosik nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp, 30197338a34eSLukasz Bartosik nb_rxq, nb_txq); 30207338a34eSLukasz Bartosik } 30217338a34eSLukasz Bartosik 3022d299106eSSergio Gonzalez Monroy for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 3023d299106eSSergio Gonzalez Monroy if (rte_lcore_is_enabled(lcore_id) == 0) 3024d299106eSSergio Gonzalez Monroy continue; 3025d299106eSSergio Gonzalez Monroy 3026d299106eSSergio Gonzalez Monroy if (numa_on) 3027d299106eSSergio Gonzalez Monroy socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); 3028d299106eSSergio Gonzalez Monroy else 3029d299106eSSergio Gonzalez Monroy socket_id = 0; 3030d299106eSSergio Gonzalez Monroy 303148a39871SNithin Dabilpuram if (per_port_pool) { 303248a39871SNithin Dabilpuram RTE_ETH_FOREACH_DEV(portid) { 303348a39871SNithin Dabilpuram if ((enabled_port_mask & (1 << portid)) == 0) 3034d299106eSSergio Gonzalez Monroy continue; 3035d299106eSSergio Gonzalez Monroy 303648a39871SNithin Dabilpuram pool_init(&socket_ctx[socket_id], socket_id, 303748a39871SNithin Dabilpuram portid, nb_bufs_in_pool); 303848a39871SNithin Dabilpuram } 303948a39871SNithin Dabilpuram } else { 304048a39871SNithin Dabilpuram pool_init(&socket_ctx[socket_id], socket_id, 0, 304148a39871SNithin Dabilpuram nb_bufs_in_pool); 304248a39871SNithin Dabilpuram } 304348a39871SNithin Dabilpuram 304448a39871SNithin Dabilpuram if (socket_ctx[socket_id].session_pool) 304548a39871SNithin Dabilpuram continue; 304648a39871SNithin Dabilpuram 30473a690d5aSBernard Iremonger session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); 3048d299106eSSergio Gonzalez Monroy } 30497338a34eSLukasz Bartosik printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool); 3050d299106eSSergio Gonzalez Monroy 30518728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) { 3052d299106eSSergio Gonzalez Monroy if ((enabled_port_mask & (1 << portid)) == 0) 3053d299106eSSergio Gonzalez Monroy continue; 3054d299106eSSergio Gonzalez Monroy 3055d8d51d4fSRahul Bhansali sa_check_offloads(portid, &req_rx_offloads[portid], &req_tx_offloads[portid], 3056d8d51d4fSRahul Bhansali &req_hw_reassembly[portid]); 3057d8d51d4fSRahul Bhansali port_init(portid, req_rx_offloads[portid], req_tx_offloads[portid], 3058d8d51d4fSRahul Bhansali req_hw_reassembly[portid]); 30594edcee19SNithin Dabilpuram if ((req_tx_offloads[portid] & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 3060c05f2e96SRadu Nicolau ipv4_cksum_port_mask |= 1U << portid; 30614edcee19SNithin Dabilpuram } 30624edcee19SNithin Dabilpuram 3063b1a6b1e9SVolodymyr Fialko tx_offloads.ipv4_offloads = RTE_MBUF_F_TX_IPV4; 3064b1a6b1e9SVolodymyr Fialko tx_offloads.ipv6_offloads = RTE_MBUF_F_TX_IPV6; 30654edcee19SNithin Dabilpuram /* Update per lcore checksum offload support only if all ports support it */ 30664edcee19SNithin Dabilpuram if (ipv4_cksum_port_mask == enabled_port_mask) 3067b1a6b1e9SVolodymyr Fialko tx_offloads.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM; 3068b1a6b1e9SVolodymyr Fialko 3069b1a6b1e9SVolodymyr Fialko lcore_id = 0; 3070b1a6b1e9SVolodymyr Fialko RTE_LCORE_FOREACH(lcore_id) { 3071b1a6b1e9SVolodymyr Fialko /* Pre-populate pkt offloads based on capabilities */ 3072b1a6b1e9SVolodymyr Fialko lcore_conf[lcore_id].outbound.ipv4_offloads = tx_offloads.ipv4_offloads; 3073b1a6b1e9SVolodymyr Fialko lcore_conf[lcore_id].outbound.ipv6_offloads = tx_offloads.ipv6_offloads; 3074d299106eSSergio Gonzalez Monroy } 3075d299106eSSergio Gonzalez Monroy 307665e3a202SLukasz Bartosik /* 307765e3a202SLukasz Bartosik * Set the enabled port mask in helper config for use by helper 307865e3a202SLukasz Bartosik * sub-system. This will be used while initializing devices using 307965e3a202SLukasz Bartosik * helper sub-system. 308065e3a202SLukasz Bartosik */ 308165e3a202SLukasz Bartosik eh_conf->eth_portmask = enabled_port_mask; 308265e3a202SLukasz Bartosik 308365e3a202SLukasz Bartosik /* Initialize eventmode components */ 308465e3a202SLukasz Bartosik ret = eh_devs_init(eh_conf); 308565e3a202SLukasz Bartosik if (ret < 0) 308665e3a202SLukasz Bartosik rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); 308765e3a202SLukasz Bartosik 3088d299106eSSergio Gonzalez Monroy /* start ports */ 30898728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(portid) { 3090d299106eSSergio Gonzalez Monroy if ((enabled_port_mask & (1 << portid)) == 0) 3091d299106eSSergio Gonzalez Monroy continue; 3092d299106eSSergio Gonzalez Monroy 3093d299106eSSergio Gonzalez Monroy ret = rte_eth_dev_start(portid); 3094d299106eSSergio Gonzalez Monroy if (ret < 0) 3095d299106eSSergio Gonzalez Monroy rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " 3096d299106eSSergio Gonzalez Monroy "err=%d, port=%d\n", ret, portid); 3097b0c6a0f1SNithin Dabilpuram 3098b0c6a0f1SNithin Dabilpuram /* Create flow after starting the device */ 3099b0c6a0f1SNithin Dabilpuram create_default_ipsec_flow(portid, req_rx_offloads[portid]); 3100b0c6a0f1SNithin Dabilpuram 3101d299106eSSergio Gonzalez Monroy /* 3102d299106eSSergio Gonzalez Monroy * If enabled, put device in promiscuous mode. 3103d299106eSSergio Gonzalez Monroy * This allows IO forwarding mode to forward packets 3104d299106eSSergio Gonzalez Monroy * to itself through 2 cross-connected ports of the 3105d299106eSSergio Gonzalez Monroy * target machine. 3106d299106eSSergio Gonzalez Monroy */ 3107f430bbceSIvan Ilchenko if (promiscuous_on) { 3108f430bbceSIvan Ilchenko ret = rte_eth_promiscuous_enable(portid); 3109f430bbceSIvan Ilchenko if (ret != 0) 3110f430bbceSIvan Ilchenko rte_exit(EXIT_FAILURE, 3111f430bbceSIvan Ilchenko "rte_eth_promiscuous_enable: err=%s, port=%d\n", 3112f430bbceSIvan Ilchenko rte_strerror(-ret), portid); 3113f430bbceSIvan Ilchenko } 3114fa4de2ccSAnoob Joseph 3115fe105decSRadu Nicolau rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET, 3116fe105decSRadu Nicolau ethdev_reset_event_callback, NULL); 3117fe105decSRadu Nicolau 3118fa4de2ccSAnoob Joseph rte_eth_dev_callback_register(portid, 3119fa4de2ccSAnoob Joseph RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); 3120d299106eSSergio Gonzalez Monroy } 3121d299106eSSergio Gonzalez Monroy 3122b01d1cd2SKonstantin Ananyev /* fragment reassemble is enabled */ 3123b01d1cd2SKonstantin Ananyev if (frag_tbl_sz != 0) { 3124b01d1cd2SKonstantin Ananyev ret = reassemble_init(); 3125b01d1cd2SKonstantin Ananyev if (ret != 0) 3126b01d1cd2SKonstantin Ananyev rte_exit(EXIT_FAILURE, "failed at reassemble init"); 3127b01d1cd2SKonstantin Ananyev } 3128b01d1cd2SKonstantin Ananyev 31293a690d5aSBernard Iremonger /* Replicate each context per socket */ 31303a690d5aSBernard Iremonger for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { 31313a690d5aSBernard Iremonger socket_id = rte_socket_id_by_idx(i); 313248a39871SNithin Dabilpuram if ((socket_ctx[socket_id].session_pool != NULL) && 31333a690d5aSBernard Iremonger (socket_ctx[socket_id].sa_in == NULL) && 31343a690d5aSBernard Iremonger (socket_ctx[socket_id].sa_out == NULL)) { 31356938fc92SVolodymyr Fialko sa_init(&socket_ctx[socket_id], socket_id, lcore_conf, 31366938fc92SVolodymyr Fialko eh_conf->mode_params); 31373a690d5aSBernard Iremonger sp4_init(&socket_ctx[socket_id], socket_id); 31383a690d5aSBernard Iremonger sp6_init(&socket_ctx[socket_id], socket_id); 31393a690d5aSBernard Iremonger rt_init(&socket_ctx[socket_id], socket_id); 31403a690d5aSBernard Iremonger } 31413a690d5aSBernard Iremonger } 31423a690d5aSBernard Iremonger 31438e693616SAnoob Joseph flow_init(); 31448e693616SAnoob Joseph 3145c7e6d808SNithin Dabilpuram /* Get security context if available and only if dynamic field is 3146c7e6d808SNithin Dabilpuram * registered for fast path access. 3147c7e6d808SNithin Dabilpuram */ 3148c7e6d808SNithin Dabilpuram if (!rte_security_dynfield_is_registered()) 3149c7e6d808SNithin Dabilpuram goto skip_sec_ctx; 3150c7e6d808SNithin Dabilpuram 3151c7e6d808SNithin Dabilpuram for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 3152c7e6d808SNithin Dabilpuram for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) { 3153c7e6d808SNithin Dabilpuram portid = lcore_conf[lcore_id].rx_queue_list[i].port_id; 3154c7e6d808SNithin Dabilpuram lcore_conf[lcore_id].rx_queue_list[i].sec_ctx = 3155c7e6d808SNithin Dabilpuram rte_eth_dev_get_sec_ctx(portid); 3156c7e6d808SNithin Dabilpuram } 3157c7e6d808SNithin Dabilpuram } 3158c7e6d808SNithin Dabilpuram skip_sec_ctx: 3159c7e6d808SNithin Dabilpuram 31608728ccf3SThomas Monjalon check_all_ports_link_status(enabled_port_mask); 3161d299106eSSergio Gonzalez Monroy 316228008936SRadu Nicolau if (stats_interval > 0) 316328008936SRadu Nicolau rte_eal_alarm_set(stats_interval * US_PER_S, 316428008936SRadu Nicolau print_stats_cb, NULL); 316528008936SRadu Nicolau else 31661329602bSAnoob Joseph RTE_LOG(INFO, IPSEC, "Stats display disabled\n"); 31671329602bSAnoob Joseph 3168d299106eSSergio Gonzalez Monroy /* launch per-lcore init on every lcore */ 3169cb056611SStephen Hemminger rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN); 3170cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 3171d299106eSSergio Gonzalez Monroy if (rte_eal_wait_lcore(lcore_id) < 0) 3172d299106eSSergio Gonzalez Monroy return -1; 3173d299106eSSergio Gonzalez Monroy } 3174d299106eSSergio Gonzalez Monroy 317565e3a202SLukasz Bartosik /* Uninitialize eventmode components */ 317665e3a202SLukasz Bartosik ret = eh_devs_uninit(eh_conf); 317765e3a202SLukasz Bartosik if (ret < 0) 317865e3a202SLukasz Bartosik rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret); 317965e3a202SLukasz Bartosik 318065e3a202SLukasz Bartosik /* Free eventmode configuration memory */ 318165e3a202SLukasz Bartosik eh_conf_uninit(eh_conf); 318265e3a202SLukasz Bartosik 31838e814e18SVolodymyr Fialko /* Destroy inbound and outbound sessions */ 318465e3a202SLukasz Bartosik for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { 318565e3a202SLukasz Bartosik socket_id = rte_socket_id_by_idx(i); 31868e814e18SVolodymyr Fialko sessions_free(socket_ctx[socket_id].sa_in); 31878e814e18SVolodymyr Fialko sessions_free(socket_ctx[socket_id].sa_out); 318865e3a202SLukasz Bartosik } 318965e3a202SLukasz Bartosik 319065e3a202SLukasz Bartosik for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 319165e3a202SLukasz Bartosik printf("Closing cryptodev %d...", cdev_id); 319265e3a202SLukasz Bartosik rte_cryptodev_stop(cdev_id); 319365e3a202SLukasz Bartosik rte_cryptodev_close(cdev_id); 319465e3a202SLukasz Bartosik printf(" Done\n"); 319565e3a202SLukasz Bartosik } 319665e3a202SLukasz Bartosik 3197b3a4baf8SSatheesh Paul flow_print_counters(); 3198b3a4baf8SSatheesh Paul 319965e3a202SLukasz Bartosik RTE_ETH_FOREACH_DEV(portid) { 320065e3a202SLukasz Bartosik if ((enabled_port_mask & (1 << portid)) == 0) 320165e3a202SLukasz Bartosik continue; 320265e3a202SLukasz Bartosik 320365e3a202SLukasz Bartosik printf("Closing port %d...", portid); 320465e3a202SLukasz Bartosik if (flow_info_tbl[portid].rx_def_flow) { 320565e3a202SLukasz Bartosik struct rte_flow_error err; 320665e3a202SLukasz Bartosik 320765e3a202SLukasz Bartosik ret = rte_flow_destroy(portid, 320865e3a202SLukasz Bartosik flow_info_tbl[portid].rx_def_flow, &err); 320965e3a202SLukasz Bartosik if (ret) 321065e3a202SLukasz Bartosik RTE_LOG(ERR, IPSEC, "Failed to destroy flow " 321165e3a202SLukasz Bartosik " for port %u, err msg: %s\n", portid, 321265e3a202SLukasz Bartosik err.message); 321365e3a202SLukasz Bartosik } 3214b55efbabSIvan Ilchenko ret = rte_eth_dev_stop(portid); 3215b55efbabSIvan Ilchenko if (ret != 0) 3216b55efbabSIvan Ilchenko RTE_LOG(ERR, IPSEC, 3217b55efbabSIvan Ilchenko "rte_eth_dev_stop: err=%s, port=%u\n", 3218b55efbabSIvan Ilchenko rte_strerror(-ret), portid); 3219b55efbabSIvan Ilchenko 322065e3a202SLukasz Bartosik rte_eth_dev_close(portid); 322165e3a202SLukasz Bartosik printf(" Done\n"); 322265e3a202SLukasz Bartosik } 322310aa3757SChengchang Tang 322410aa3757SChengchang Tang /* clean up the EAL */ 322510aa3757SChengchang Tang rte_eal_cleanup(); 322665e3a202SLukasz Bartosik printf("Bye...\n"); 322765e3a202SLukasz Bartosik 3228d299106eSSergio Gonzalez Monroy return 0; 3229d299106eSSergio Gonzalez Monroy } 3230