147db46bbSNithin Dabilpuram /* SPDX-License-Identifier: BSD-3-Clause 247db46bbSNithin Dabilpuram * Copyright(C) 2021 Marvell. 347db46bbSNithin Dabilpuram */ 447db46bbSNithin Dabilpuram #include <cnxk_ethdev.h> 547db46bbSNithin Dabilpuram 682529cffSNithin Dabilpuram #include <rte_eventdev.h> 7*92fa0ac7SSrujana Challa #include <rte_pmd_cnxk.h> 882529cffSNithin Dabilpuram 9022f1c1aSNithin Dabilpuram #define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL) 10022f1c1aSNithin Dabilpuram 11eafb1b9aSSatha Rao #define NIX_TM_DFLT_RR_WT 71 12eafb1b9aSSatha Rao 13*92fa0ac7SSrujana Challa const char * 14*92fa0ac7SSrujana Challa rte_pmd_cnxk_model_str_get(void) 15*92fa0ac7SSrujana Challa { 16*92fa0ac7SSrujana Challa return roc_model->name; 17*92fa0ac7SSrujana Challa } 18*92fa0ac7SSrujana Challa 195a4341c8SNithin Dabilpuram static inline uint64_t 205a4341c8SNithin Dabilpuram nix_get_rx_offload_capa(struct cnxk_eth_dev *dev) 215a4341c8SNithin Dabilpuram { 225a4341c8SNithin Dabilpuram uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA; 235a4341c8SNithin Dabilpuram 244093c5a8SKiran Kumar K if (roc_nix_is_vf_or_sdp(&dev->nix) || 254093c5a8SKiran Kumar K dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) 26295968d1SFerruh Yigit capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP; 275a4341c8SNithin Dabilpuram 285a4341c8SNithin Dabilpuram return capa; 295a4341c8SNithin Dabilpuram } 305a4341c8SNithin Dabilpuram 315a4341c8SNithin Dabilpuram static inline uint64_t 325a4341c8SNithin Dabilpuram nix_get_tx_offload_capa(struct cnxk_eth_dev *dev) 335a4341c8SNithin Dabilpuram { 345a4341c8SNithin Dabilpuram RTE_SET_USED(dev); 355a4341c8SNithin Dabilpuram return CNXK_NIX_TX_OFFLOAD_CAPA; 365a4341c8SNithin Dabilpuram } 375a4341c8SNithin Dabilpuram 385a4341c8SNithin Dabilpuram static inline uint32_t 395a4341c8SNithin Dabilpuram nix_get_speed_capa(struct cnxk_eth_dev *dev) 405a4341c8SNithin Dabilpuram { 415a4341c8SNithin Dabilpuram uint32_t speed_capa; 425a4341c8SNithin Dabilpuram 435a4341c8SNithin Dabilpuram /* Auto negotiation disabled */ 44295968d1SFerruh Yigit speed_capa = RTE_ETH_LINK_SPEED_FIXED; 455a4341c8SNithin Dabilpuram if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) { 46295968d1SFerruh Yigit speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G | 47295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G | 48295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G; 495a4341c8SNithin Dabilpuram } 505a4341c8SNithin Dabilpuram 515a4341c8SNithin Dabilpuram return speed_capa; 525a4341c8SNithin Dabilpuram } 535a4341c8SNithin Dabilpuram 54022f1c1aSNithin Dabilpuram static uint32_t 55022f1c1aSNithin Dabilpuram nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp, 56022f1c1aSNithin Dabilpuram uint32_t nb_desc) 57022f1c1aSNithin Dabilpuram { 58022f1c1aSNithin Dabilpuram struct roc_nix_rq *inl_rq; 59022f1c1aSNithin Dabilpuram uint64_t limit; 60022f1c1aSNithin Dabilpuram 618bc924cfSNithin Dabilpuram /* For CN10KB and above, LBP needs minimum CQ size */ 62022f1c1aSNithin Dabilpuram if (!roc_errata_cpt_hang_on_x2p_bp()) 638bc924cfSNithin Dabilpuram return RTE_MAX(nb_desc, (uint32_t)4096); 64022f1c1aSNithin Dabilpuram 65022f1c1aSNithin Dabilpuram /* CQ should be able to hold all buffers in first pass RQ's aura 66022f1c1aSNithin Dabilpuram * this RQ's aura. 67022f1c1aSNithin Dabilpuram */ 68022f1c1aSNithin Dabilpuram inl_rq = roc_nix_inl_dev_rq(nix); 69022f1c1aSNithin Dabilpuram if (!inl_rq) { 70022f1c1aSNithin Dabilpuram /* This itself is going to be inline RQ's aura */ 71022f1c1aSNithin Dabilpuram limit = roc_npa_aura_op_limit_get(mp->pool_id); 72022f1c1aSNithin Dabilpuram } else { 73022f1c1aSNithin Dabilpuram limit = roc_npa_aura_op_limit_get(inl_rq->aura_handle); 74022f1c1aSNithin Dabilpuram /* Also add this RQ's aura if it is different */ 75022f1c1aSNithin Dabilpuram if (inl_rq->aura_handle != mp->pool_id) 76022f1c1aSNithin Dabilpuram limit += roc_npa_aura_op_limit_get(mp->pool_id); 77022f1c1aSNithin Dabilpuram } 78022f1c1aSNithin Dabilpuram nb_desc = PLT_MAX(limit + 1, nb_desc); 79022f1c1aSNithin Dabilpuram if (nb_desc > CNXK_NIX_CQ_INL_CLAMP_MAX) { 80022f1c1aSNithin Dabilpuram plt_warn("Could not setup CQ size to accommodate" 81022f1c1aSNithin Dabilpuram " all buffers in related auras (%" PRIu64 ")", 82022f1c1aSNithin Dabilpuram limit); 83022f1c1aSNithin Dabilpuram nb_desc = CNXK_NIX_CQ_INL_CLAMP_MAX; 84022f1c1aSNithin Dabilpuram } 85022f1c1aSNithin Dabilpuram return nb_desc; 86022f1c1aSNithin Dabilpuram } 87022f1c1aSNithin Dabilpuram 887eabd6c6SNithin Dabilpuram int 897eabd6c6SNithin Dabilpuram cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev) 907eabd6c6SNithin Dabilpuram { 917eabd6c6SNithin Dabilpuram struct roc_nix *nix = &dev->nix; 927eabd6c6SNithin Dabilpuram 937eabd6c6SNithin Dabilpuram plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!", 947eabd6c6SNithin Dabilpuram dev->inb.nb_sess, !!dev->inb.inl_dev); 957eabd6c6SNithin Dabilpuram 967eabd6c6SNithin Dabilpuram /* Change the mode */ 977eabd6c6SNithin Dabilpuram dev->inb.inl_dev = use_inl_dev; 987eabd6c6SNithin Dabilpuram 997eabd6c6SNithin Dabilpuram /* Update RoC for NPC rule insertion */ 1007eabd6c6SNithin Dabilpuram roc_nix_inb_mode_set(nix, use_inl_dev); 1017eabd6c6SNithin Dabilpuram 1027eabd6c6SNithin Dabilpuram /* Setup lookup mem */ 1037eabd6c6SNithin Dabilpuram return cnxk_nix_lookup_mem_sa_base_set(dev); 1047eabd6c6SNithin Dabilpuram } 1057eabd6c6SNithin Dabilpuram 1067eabd6c6SNithin Dabilpuram static int 1077eabd6c6SNithin Dabilpuram nix_security_setup(struct cnxk_eth_dev *dev) 1087eabd6c6SNithin Dabilpuram { 1097eabd6c6SNithin Dabilpuram struct roc_nix *nix = &dev->nix; 1107eabd6c6SNithin Dabilpuram int i, rc = 0; 1117eabd6c6SNithin Dabilpuram 112295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 11361ee9dc8SNithin Dabilpuram /* Setup minimum SA table when inline device is used */ 11461ee9dc8SNithin Dabilpuram nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0; 11561ee9dc8SNithin Dabilpuram nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1; 11661ee9dc8SNithin Dabilpuram 1175351a5f0SNithin Dabilpuram /* Enable custom meta aura when multi-chan is used */ 1185351a5f0SNithin Dabilpuram if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() && 1195351a5f0SNithin Dabilpuram !dev->inb.custom_meta_aura_dis) 1205351a5f0SNithin Dabilpuram nix->custom_meta_aura_ena = true; 1215351a5f0SNithin Dabilpuram 1227eabd6c6SNithin Dabilpuram /* Setup Inline Inbound */ 1237eabd6c6SNithin Dabilpuram rc = roc_nix_inl_inb_init(nix); 1247eabd6c6SNithin Dabilpuram if (rc) { 1257eabd6c6SNithin Dabilpuram plt_err("Failed to initialize nix inline inb, rc=%d", 1267eabd6c6SNithin Dabilpuram rc); 1277eabd6c6SNithin Dabilpuram return rc; 1287eabd6c6SNithin Dabilpuram } 1297eabd6c6SNithin Dabilpuram 1307eabd6c6SNithin Dabilpuram /* By default pick using inline device for poll mode. 1317eabd6c6SNithin Dabilpuram * Will be overridden when event mode rq's are setup. 1327eabd6c6SNithin Dabilpuram */ 1338f80a2e3SNithin Dabilpuram cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev); 1343c3ea76cSSrujana Challa 1353c3ea76cSSrujana Challa /* Allocate memory to be used as dptr for CPT ucode 1363c3ea76cSSrujana Challa * WRITE_SA op. 1373c3ea76cSSrujana Challa */ 1383c3ea76cSSrujana Challa dev->inb.sa_dptr = 1393c3ea76cSSrujana Challa plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0); 1403c3ea76cSSrujana Challa if (!dev->inb.sa_dptr) { 1413c3ea76cSSrujana Challa plt_err("Couldn't allocate memory for SA dptr"); 1423c3ea76cSSrujana Challa rc = -ENOMEM; 1433c3ea76cSSrujana Challa goto cleanup; 1443c3ea76cSSrujana Challa } 145de8c60d1SSrujana Challa dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0); 1467eabd6c6SNithin Dabilpuram } 1477eabd6c6SNithin Dabilpuram 148295968d1SFerruh Yigit if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY || 149295968d1SFerruh Yigit dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 1507eabd6c6SNithin Dabilpuram struct plt_bitmap *bmap; 1517eabd6c6SNithin Dabilpuram size_t bmap_sz; 1527eabd6c6SNithin Dabilpuram void *mem; 1537eabd6c6SNithin Dabilpuram 1547eabd6c6SNithin Dabilpuram /* Setup enough descriptors for all tx queues */ 1557eabd6c6SNithin Dabilpuram nix->outb_nb_desc = dev->outb.nb_desc; 1567eabd6c6SNithin Dabilpuram nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs; 1577eabd6c6SNithin Dabilpuram 1587eabd6c6SNithin Dabilpuram /* Setup Inline Outbound */ 1597eabd6c6SNithin Dabilpuram rc = roc_nix_inl_outb_init(nix); 1607eabd6c6SNithin Dabilpuram if (rc) { 1617eabd6c6SNithin Dabilpuram plt_err("Failed to initialize nix inline outb, rc=%d", 1627eabd6c6SNithin Dabilpuram rc); 1633c3ea76cSSrujana Challa goto sa_dptr_free; 1647eabd6c6SNithin Dabilpuram } 1657eabd6c6SNithin Dabilpuram 1667eabd6c6SNithin Dabilpuram dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix); 1677eabd6c6SNithin Dabilpuram 1683c3ea76cSSrujana Challa /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */ 169295968d1SFerruh Yigit if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) 1703c3ea76cSSrujana Challa return 0; 1713c3ea76cSSrujana Challa 1723c3ea76cSSrujana Challa /* Allocate memory to be used as dptr for CPT ucode 1733c3ea76cSSrujana Challa * WRITE_SA op. 1743c3ea76cSSrujana Challa */ 1753c3ea76cSSrujana Challa dev->outb.sa_dptr = 1763c3ea76cSSrujana Challa plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0); 1773c3ea76cSSrujana Challa if (!dev->outb.sa_dptr) { 1783c3ea76cSSrujana Challa plt_err("Couldn't allocate memory for SA dptr"); 1793c3ea76cSSrujana Challa rc = -ENOMEM; 1803c3ea76cSSrujana Challa goto sa_dptr_free; 1813c3ea76cSSrujana Challa } 1827eabd6c6SNithin Dabilpuram 1837eabd6c6SNithin Dabilpuram rc = -ENOMEM; 1847eabd6c6SNithin Dabilpuram /* Allocate a bitmap to alloc and free sa indexes */ 1857eabd6c6SNithin Dabilpuram bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa); 1867eabd6c6SNithin Dabilpuram mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE); 1877eabd6c6SNithin Dabilpuram if (mem == NULL) { 1887eabd6c6SNithin Dabilpuram plt_err("Outbound SA bmap alloc failed"); 1897eabd6c6SNithin Dabilpuram 1907eabd6c6SNithin Dabilpuram rc |= roc_nix_inl_outb_fini(nix); 1913c3ea76cSSrujana Challa goto sa_dptr_free; 1927eabd6c6SNithin Dabilpuram } 1937eabd6c6SNithin Dabilpuram 1947eabd6c6SNithin Dabilpuram rc = -EIO; 1957eabd6c6SNithin Dabilpuram bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz); 1967eabd6c6SNithin Dabilpuram if (!bmap) { 1977eabd6c6SNithin Dabilpuram plt_err("Outbound SA bmap init failed"); 1987eabd6c6SNithin Dabilpuram 1997eabd6c6SNithin Dabilpuram rc |= roc_nix_inl_outb_fini(nix); 2007eabd6c6SNithin Dabilpuram plt_free(mem); 2013c3ea76cSSrujana Challa goto sa_dptr_free; 2027eabd6c6SNithin Dabilpuram } 2037eabd6c6SNithin Dabilpuram 2047eabd6c6SNithin Dabilpuram for (i = 0; i < dev->outb.max_sa; i++) 2057eabd6c6SNithin Dabilpuram plt_bitmap_set(bmap, i); 2067eabd6c6SNithin Dabilpuram 2077eabd6c6SNithin Dabilpuram dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix); 2087eabd6c6SNithin Dabilpuram dev->outb.sa_bmap_mem = mem; 2097eabd6c6SNithin Dabilpuram dev->outb.sa_bmap = bmap; 210358d02d2SNithin Dabilpuram 211358d02d2SNithin Dabilpuram dev->outb.fc_sw_mem = plt_zmalloc(dev->outb.nb_crypto_qs * 212358d02d2SNithin Dabilpuram RTE_CACHE_LINE_SIZE, 213358d02d2SNithin Dabilpuram RTE_CACHE_LINE_SIZE); 214358d02d2SNithin Dabilpuram if (!dev->outb.fc_sw_mem) { 215358d02d2SNithin Dabilpuram plt_err("Outbound fc sw mem alloc failed"); 216358d02d2SNithin Dabilpuram goto sa_bmap_free; 217358d02d2SNithin Dabilpuram } 2187c67c489SNithin Dabilpuram 2197c67c489SNithin Dabilpuram dev->outb.cpt_eng_caps = roc_nix_inl_eng_caps_get(nix); 2207eabd6c6SNithin Dabilpuram } 2217eabd6c6SNithin Dabilpuram return 0; 2223c3ea76cSSrujana Challa 223358d02d2SNithin Dabilpuram sa_bmap_free: 224358d02d2SNithin Dabilpuram plt_free(dev->outb.sa_bmap_mem); 2253c3ea76cSSrujana Challa sa_dptr_free: 2263c3ea76cSSrujana Challa if (dev->inb.sa_dptr) 2273c3ea76cSSrujana Challa plt_free(dev->inb.sa_dptr); 2283c3ea76cSSrujana Challa if (dev->outb.sa_dptr) 2293c3ea76cSSrujana Challa plt_free(dev->outb.sa_dptr); 2307eabd6c6SNithin Dabilpuram cleanup: 231295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) 2327eabd6c6SNithin Dabilpuram rc |= roc_nix_inl_inb_fini(nix); 2337eabd6c6SNithin Dabilpuram return rc; 2347eabd6c6SNithin Dabilpuram } 2357eabd6c6SNithin Dabilpuram 2367eabd6c6SNithin Dabilpuram static int 2376af19a9dSSunil Kumar Kori nix_meter_fini(struct cnxk_eth_dev *dev) 2386af19a9dSSunil Kumar Kori { 2396af19a9dSSunil Kumar Kori struct cnxk_meter_node *next_mtr = NULL; 2406af19a9dSSunil Kumar Kori struct roc_nix_bpf_objs profs = {0}; 2416af19a9dSSunil Kumar Kori struct cnxk_meter_node *mtr = NULL; 2426af19a9dSSunil Kumar Kori struct cnxk_mtr *fms = &dev->mtr; 2436af19a9dSSunil Kumar Kori struct roc_nix *nix = &dev->nix; 2446af19a9dSSunil Kumar Kori struct roc_nix_rq *rq; 2456af19a9dSSunil Kumar Kori uint32_t i; 246e8cda505SGowrishankar Muthukrishnan int rc = 0; 2476af19a9dSSunil Kumar Kori 2486af19a9dSSunil Kumar Kori RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) { 2496af19a9dSSunil Kumar Kori for (i = 0; i < mtr->rq_num; i++) { 2506af19a9dSSunil Kumar Kori rq = &dev->rqs[mtr->rq_id[i]]; 2516af19a9dSSunil Kumar Kori rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false); 2526af19a9dSSunil Kumar Kori } 2536af19a9dSSunil Kumar Kori 2546af19a9dSSunil Kumar Kori profs.level = mtr->level; 2556af19a9dSSunil Kumar Kori profs.count = 1; 2566af19a9dSSunil Kumar Kori profs.ids[0] = mtr->bpf_id; 2576af19a9dSSunil Kumar Kori rc = roc_nix_bpf_free(nix, &profs, 1); 2586af19a9dSSunil Kumar Kori 2596af19a9dSSunil Kumar Kori if (rc) 2606af19a9dSSunil Kumar Kori return rc; 2616af19a9dSSunil Kumar Kori 2626af19a9dSSunil Kumar Kori TAILQ_REMOVE(fms, mtr, next); 2636af19a9dSSunil Kumar Kori plt_free(mtr); 2646af19a9dSSunil Kumar Kori } 2656af19a9dSSunil Kumar Kori return 0; 2666af19a9dSSunil Kumar Kori } 2676af19a9dSSunil Kumar Kori 2686af19a9dSSunil Kumar Kori static int 2697eabd6c6SNithin Dabilpuram nix_security_release(struct cnxk_eth_dev *dev) 2707eabd6c6SNithin Dabilpuram { 2717eabd6c6SNithin Dabilpuram struct rte_eth_dev *eth_dev = dev->eth_dev; 2727eabd6c6SNithin Dabilpuram struct cnxk_eth_sec_sess *eth_sec, *tvar; 2737eabd6c6SNithin Dabilpuram struct roc_nix *nix = &dev->nix; 2747eabd6c6SNithin Dabilpuram int rc, ret = 0; 2757eabd6c6SNithin Dabilpuram 2767eabd6c6SNithin Dabilpuram /* Cleanup Inline inbound */ 277295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 2787eabd6c6SNithin Dabilpuram /* Destroy inbound sessions */ 2797eabd6c6SNithin Dabilpuram tvar = NULL; 2807eabd6c6SNithin Dabilpuram RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar) 2817eabd6c6SNithin Dabilpuram cnxk_eth_sec_ops.session_destroy(eth_dev, 2827eabd6c6SNithin Dabilpuram eth_sec->sess); 2837eabd6c6SNithin Dabilpuram 2847eabd6c6SNithin Dabilpuram /* Clear lookup mem */ 2857eabd6c6SNithin Dabilpuram cnxk_nix_lookup_mem_sa_base_clear(dev); 2867eabd6c6SNithin Dabilpuram 2877eabd6c6SNithin Dabilpuram rc = roc_nix_inl_inb_fini(nix); 2887eabd6c6SNithin Dabilpuram if (rc) 2897eabd6c6SNithin Dabilpuram plt_err("Failed to cleanup nix inline inb, rc=%d", rc); 2907eabd6c6SNithin Dabilpuram ret |= rc; 2913c3ea76cSSrujana Challa 2924fb24a62SRahul Bhansali cnxk_nix_lookup_mem_metapool_clear(dev); 2934fb24a62SRahul Bhansali 2943c3ea76cSSrujana Challa if (dev->inb.sa_dptr) { 2953c3ea76cSSrujana Challa plt_free(dev->inb.sa_dptr); 2963c3ea76cSSrujana Challa dev->inb.sa_dptr = NULL; 2973c3ea76cSSrujana Challa } 2987eabd6c6SNithin Dabilpuram } 2997eabd6c6SNithin Dabilpuram 3007eabd6c6SNithin Dabilpuram /* Cleanup Inline outbound */ 301295968d1SFerruh Yigit if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY || 302295968d1SFerruh Yigit dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 3037eabd6c6SNithin Dabilpuram /* Destroy outbound sessions */ 3047eabd6c6SNithin Dabilpuram tvar = NULL; 3057eabd6c6SNithin Dabilpuram RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar) 3067eabd6c6SNithin Dabilpuram cnxk_eth_sec_ops.session_destroy(eth_dev, 3077eabd6c6SNithin Dabilpuram eth_sec->sess); 3087eabd6c6SNithin Dabilpuram 3097eabd6c6SNithin Dabilpuram rc = roc_nix_inl_outb_fini(nix); 3107eabd6c6SNithin Dabilpuram if (rc) 3117eabd6c6SNithin Dabilpuram plt_err("Failed to cleanup nix inline outb, rc=%d", rc); 3127eabd6c6SNithin Dabilpuram ret |= rc; 3137eabd6c6SNithin Dabilpuram 3147eabd6c6SNithin Dabilpuram plt_bitmap_free(dev->outb.sa_bmap); 3157eabd6c6SNithin Dabilpuram plt_free(dev->outb.sa_bmap_mem); 3167eabd6c6SNithin Dabilpuram dev->outb.sa_bmap = NULL; 3177eabd6c6SNithin Dabilpuram dev->outb.sa_bmap_mem = NULL; 3183c3ea76cSSrujana Challa if (dev->outb.sa_dptr) { 3193c3ea76cSSrujana Challa plt_free(dev->outb.sa_dptr); 3203c3ea76cSSrujana Challa dev->outb.sa_dptr = NULL; 3213c3ea76cSSrujana Challa } 322358d02d2SNithin Dabilpuram 323358d02d2SNithin Dabilpuram plt_free(dev->outb.fc_sw_mem); 324358d02d2SNithin Dabilpuram dev->outb.fc_sw_mem = NULL; 3257eabd6c6SNithin Dabilpuram } 3267eabd6c6SNithin Dabilpuram 3277eabd6c6SNithin Dabilpuram dev->inb.inl_dev = false; 3287eabd6c6SNithin Dabilpuram roc_nix_inb_mode_set(nix, false); 3297eabd6c6SNithin Dabilpuram dev->nb_rxq_sso = 0; 3307eabd6c6SNithin Dabilpuram dev->inb.nb_sess = 0; 3317eabd6c6SNithin Dabilpuram dev->outb.nb_sess = 0; 3327eabd6c6SNithin Dabilpuram return ret; 3337eabd6c6SNithin Dabilpuram } 3347eabd6c6SNithin Dabilpuram 3358589ec21SSunil Kumar Kori static void 3368589ec21SSunil Kumar Kori nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq) 3378589ec21SSunil Kumar Kori { 3388589ec21SSunil Kumar Kori struct rte_pktmbuf_pool_private *mbp_priv; 3398589ec21SSunil Kumar Kori struct rte_eth_dev *eth_dev; 3408589ec21SSunil Kumar Kori struct cnxk_eth_dev *dev; 3418589ec21SSunil Kumar Kori uint32_t buffsz; 3428589ec21SSunil Kumar Kori 3438589ec21SSunil Kumar Kori dev = rxq->dev; 3448589ec21SSunil Kumar Kori eth_dev = dev->eth_dev; 3458589ec21SSunil Kumar Kori 3468589ec21SSunil Kumar Kori /* Get rx buffer size */ 3478589ec21SSunil Kumar Kori mbp_priv = rte_mempool_get_priv(rxq->qconf.mp); 3488589ec21SSunil Kumar Kori buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 3498589ec21SSunil Kumar Kori 3501bb4a528SFerruh Yigit if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) { 351295968d1SFerruh Yigit dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 352295968d1SFerruh Yigit dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 3538589ec21SSunil Kumar Kori } 3548589ec21SSunil Kumar Kori } 3558589ec21SSunil Kumar Kori 356c7c7c8edSSunil Kumar Kori int 3578589ec21SSunil Kumar Kori nix_recalc_mtu(struct rte_eth_dev *eth_dev) 3588589ec21SSunil Kumar Kori { 3598589ec21SSunil Kumar Kori struct rte_eth_dev_data *data = eth_dev->data; 3608589ec21SSunil Kumar Kori struct cnxk_eth_rxq_sp *rxq; 3618589ec21SSunil Kumar Kori int rc; 3628589ec21SSunil Kumar Kori 3638589ec21SSunil Kumar Kori rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1; 3648589ec21SSunil Kumar Kori /* Setup scatter mode if needed by jumbo */ 3658589ec21SSunil Kumar Kori nix_enable_mseg_on_jumbo(rxq); 3668589ec21SSunil Kumar Kori 3671bb4a528SFerruh Yigit rc = cnxk_nix_mtu_set(eth_dev, data->mtu); 3688589ec21SSunil Kumar Kori if (rc) 3698589ec21SSunil Kumar Kori plt_err("Failed to set default MTU size, rc=%d", rc); 3708589ec21SSunil Kumar Kori 3718589ec21SSunil Kumar Kori return rc; 3728589ec21SSunil Kumar Kori } 3738589ec21SSunil Kumar Kori 374d2bebb1fSSunil Kumar Kori static int 375d2bebb1fSSunil Kumar Kori nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev) 376d2bebb1fSSunil Kumar Kori { 377d2bebb1fSSunil Kumar Kori struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 378e3a73b0bSNithin Dabilpuram enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL; 379d2bebb1fSSunil Kumar Kori struct cnxk_fc_cfg *fc = &dev->fc_cfg; 380d2bebb1fSSunil Kumar Kori int rc; 381d2bebb1fSSunil Kumar Kori 3822b688664SNithin Dabilpuram if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) 383ff1400aaSRadha Mohan Chintakuntla return 0; 384ff1400aaSRadha Mohan Chintakuntla 385e3a73b0bSNithin Dabilpuram /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ 386e3a73b0bSNithin Dabilpuram if (roc_model_is_cn96_ax() && 387e3a73b0bSNithin Dabilpuram dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG) 388e3a73b0bSNithin Dabilpuram fc_mode = ROC_NIX_FC_TX; 389e3a73b0bSNithin Dabilpuram 390e3a73b0bSNithin Dabilpuram /* By default enable flow control */ 391e3a73b0bSNithin Dabilpuram rc = roc_nix_fc_mode_set(&dev->nix, fc_mode); 392d2bebb1fSSunil Kumar Kori if (rc) 393e3a73b0bSNithin Dabilpuram return rc; 394d2bebb1fSSunil Kumar Kori 395074809b9SNithin Dabilpuram fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL : RTE_ETH_FC_TX_PAUSE; 396074809b9SNithin Dabilpuram fc->rx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_RX_PAUSE); 397074809b9SNithin Dabilpuram fc->tx_pause = (fc->mode == RTE_ETH_FC_FULL) || (fc->mode == RTE_ETH_FC_TX_PAUSE); 398d2bebb1fSSunil Kumar Kori return rc; 399d2bebb1fSSunil Kumar Kori } 400d2bebb1fSSunil Kumar Kori 401d2bebb1fSSunil Kumar Kori static int 402d2bebb1fSSunil Kumar Kori nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev) 403d2bebb1fSSunil Kumar Kori { 404d2bebb1fSSunil Kumar Kori struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 405d2bebb1fSSunil Kumar Kori struct cnxk_fc_cfg *fc = &dev->fc_cfg; 406d2bebb1fSSunil Kumar Kori struct rte_eth_fc_conf fc_cfg = {0}; 407d2bebb1fSSunil Kumar Kori 408722282a4SHarman Kalra if (roc_nix_is_sdp(&dev->nix) || roc_nix_is_esw(&dev->nix)) 4092b688664SNithin Dabilpuram return 0; 4102b688664SNithin Dabilpuram 4112b688664SNithin Dabilpuram /* Don't do anything if PFC is enabled */ 4122b688664SNithin Dabilpuram if (dev->pfc_cfg.rx_pause_en || dev->pfc_cfg.tx_pause_en) 413d2bebb1fSSunil Kumar Kori return 0; 414d2bebb1fSSunil Kumar Kori 415d2bebb1fSSunil Kumar Kori fc_cfg.mode = fc->mode; 416d2bebb1fSSunil Kumar Kori 417d2bebb1fSSunil Kumar Kori /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ 418d2bebb1fSSunil Kumar Kori if (roc_model_is_cn96_ax() && 4194093c5a8SKiran Kumar K dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG && 420295968d1SFerruh Yigit (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) { 421d2bebb1fSSunil Kumar Kori fc_cfg.mode = 422295968d1SFerruh Yigit (fc_cfg.mode == RTE_ETH_FC_FULL || 423295968d1SFerruh Yigit fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ? 424295968d1SFerruh Yigit RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE; 425d2bebb1fSSunil Kumar Kori } 426d2bebb1fSSunil Kumar Kori 427d2bebb1fSSunil Kumar Kori return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg); 428d2bebb1fSSunil Kumar Kori } 429d2bebb1fSSunil Kumar Kori 430a86144cdSNithin Dabilpuram uint64_t 431a86144cdSNithin Dabilpuram cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev) 432a86144cdSNithin Dabilpuram { 433a86144cdSNithin Dabilpuram uint16_t port_id = dev->eth_dev->data->port_id; 434a86144cdSNithin Dabilpuram struct rte_mbuf mb_def; 435a86144cdSNithin Dabilpuram uint64_t *tmp; 436a86144cdSNithin Dabilpuram 437a86144cdSNithin Dabilpuram RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); 438a86144cdSNithin Dabilpuram RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - 439a86144cdSNithin Dabilpuram offsetof(struct rte_mbuf, data_off) != 440a86144cdSNithin Dabilpuram 2); 441a86144cdSNithin Dabilpuram RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - 442a86144cdSNithin Dabilpuram offsetof(struct rte_mbuf, data_off) != 443a86144cdSNithin Dabilpuram 4); 444a86144cdSNithin Dabilpuram RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - 445a86144cdSNithin Dabilpuram offsetof(struct rte_mbuf, data_off) != 446a86144cdSNithin Dabilpuram 6); 447a86144cdSNithin Dabilpuram mb_def.nb_segs = 1; 44876dff638SSunil Kumar Kori mb_def.data_off = RTE_PKTMBUF_HEADROOM + 44976dff638SSunil Kumar Kori (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET); 450a86144cdSNithin Dabilpuram mb_def.port = port_id; 451a86144cdSNithin Dabilpuram rte_mbuf_refcnt_set(&mb_def, 1); 452a86144cdSNithin Dabilpuram 453a86144cdSNithin Dabilpuram /* Prevent compiler reordering: rearm_data covers previous fields */ 454a86144cdSNithin Dabilpuram rte_compiler_barrier(); 455a86144cdSNithin Dabilpuram tmp = (uint64_t *)&mb_def.rearm_data; 456a86144cdSNithin Dabilpuram 457a86144cdSNithin Dabilpuram return *tmp; 458a86144cdSNithin Dabilpuram } 459a86144cdSNithin Dabilpuram 460a24af636SNithin Dabilpuram static inline uint8_t 461a24af636SNithin Dabilpuram nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev) 462a24af636SNithin Dabilpuram { 463a24af636SNithin Dabilpuram /* 464a24af636SNithin Dabilpuram * Maximum three segments can be supported with W8, Choose 465a24af636SNithin Dabilpuram * NIX_MAXSQESZ_W16 for multi segment offload. 466a24af636SNithin Dabilpuram */ 467295968d1SFerruh Yigit if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) 468a24af636SNithin Dabilpuram return NIX_MAXSQESZ_W16; 469a24af636SNithin Dabilpuram else 470a24af636SNithin Dabilpuram return NIX_MAXSQESZ_W8; 471a24af636SNithin Dabilpuram } 472a24af636SNithin Dabilpuram 473a24af636SNithin Dabilpuram int 474a24af636SNithin Dabilpuram cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, 475a24af636SNithin Dabilpuram uint16_t nb_desc, uint16_t fp_tx_q_sz, 476a24af636SNithin Dabilpuram const struct rte_eth_txconf *tx_conf) 477a24af636SNithin Dabilpuram { 478a24af636SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 479a24af636SNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 480dd944699SRakesh Kudurumalla struct roc_nix *nix = &dev->nix; 481a24af636SNithin Dabilpuram struct cnxk_eth_txq_sp *txq_sp; 482dd944699SRakesh Kudurumalla struct roc_nix_cq *cq; 483a24af636SNithin Dabilpuram struct roc_nix_sq *sq; 484a24af636SNithin Dabilpuram size_t txq_sz; 485a24af636SNithin Dabilpuram int rc; 486a24af636SNithin Dabilpuram 487a24af636SNithin Dabilpuram /* Free memory prior to re-allocation if needed. */ 488a24af636SNithin Dabilpuram if (eth_dev->data->tx_queues[qid] != NULL) { 489a24af636SNithin Dabilpuram plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); 4907483341aSXueming Li dev_ops->tx_queue_release(eth_dev, qid); 491a24af636SNithin Dabilpuram eth_dev->data->tx_queues[qid] = NULL; 492a24af636SNithin Dabilpuram } 493a24af636SNithin Dabilpuram 4947eabd6c6SNithin Dabilpuram /* When Tx Security offload is enabled, increase tx desc count by 4957eabd6c6SNithin Dabilpuram * max possible outbound desc count. 4967eabd6c6SNithin Dabilpuram */ 497295968d1SFerruh Yigit if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) 4987eabd6c6SNithin Dabilpuram nb_desc += dev->outb.nb_desc; 4997eabd6c6SNithin Dabilpuram 500a24af636SNithin Dabilpuram /* Setup ROC SQ */ 501a24af636SNithin Dabilpuram sq = &dev->sqs[qid]; 502a24af636SNithin Dabilpuram sq->qid = qid; 503a24af636SNithin Dabilpuram sq->nb_desc = nb_desc; 504a24af636SNithin Dabilpuram sq->max_sqe_sz = nix_sq_max_sqe_sz(dev); 5056dc507d3SPavan Nikhilesh if (sq->nb_desc >= CNXK_NIX_DEF_SQ_COUNT) 5066dc507d3SPavan Nikhilesh sq->fc_hyst_bits = 0x1; 507a24af636SNithin Dabilpuram 508dd944699SRakesh Kudurumalla if (nix->tx_compl_ena) { 509dd944699SRakesh Kudurumalla sq->cqid = sq->qid + dev->nb_rxq; 510dd944699SRakesh Kudurumalla sq->cq_ena = 1; 511dd944699SRakesh Kudurumalla cq = &dev->cqs[sq->cqid]; 512dd944699SRakesh Kudurumalla cq->qid = sq->cqid; 513dd944699SRakesh Kudurumalla cq->nb_desc = nb_desc; 514dd944699SRakesh Kudurumalla rc = roc_nix_cq_init(&dev->nix, cq); 515dd944699SRakesh Kudurumalla if (rc) { 516dd944699SRakesh Kudurumalla plt_err("Failed to init cq=%d, rc=%d", cq->qid, rc); 517dd944699SRakesh Kudurumalla return rc; 518dd944699SRakesh Kudurumalla } 519dd944699SRakesh Kudurumalla } 520dd944699SRakesh Kudurumalla 521a24af636SNithin Dabilpuram rc = roc_nix_sq_init(&dev->nix, sq); 522a24af636SNithin Dabilpuram if (rc) { 523a24af636SNithin Dabilpuram plt_err("Failed to init sq=%d, rc=%d", qid, rc); 524a24af636SNithin Dabilpuram return rc; 525a24af636SNithin Dabilpuram } 526a24af636SNithin Dabilpuram 527a24af636SNithin Dabilpuram rc = -ENOMEM; 528a24af636SNithin Dabilpuram txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz; 529a24af636SNithin Dabilpuram txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE); 530a24af636SNithin Dabilpuram if (!txq_sp) { 531a24af636SNithin Dabilpuram plt_err("Failed to alloc tx queue mem"); 532a24af636SNithin Dabilpuram rc |= roc_nix_sq_fini(sq); 533a24af636SNithin Dabilpuram return rc; 534a24af636SNithin Dabilpuram } 535a24af636SNithin Dabilpuram 536a24af636SNithin Dabilpuram txq_sp->dev = dev; 537a24af636SNithin Dabilpuram txq_sp->qid = qid; 538a24af636SNithin Dabilpuram txq_sp->qconf.conf.tx = *tx_conf; 539137fbfc6SNithin Dabilpuram /* Queue config should reflect global offloads */ 540137fbfc6SNithin Dabilpuram txq_sp->qconf.conf.tx.offloads = dev->tx_offloads; 541a24af636SNithin Dabilpuram txq_sp->qconf.nb_desc = nb_desc; 542a24af636SNithin Dabilpuram 543a24af636SNithin Dabilpuram plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p" 544a24af636SNithin Dabilpuram " nb_sqb_bufs=%d sqes_per_sqb_log2=%d", 545a24af636SNithin Dabilpuram qid, sq->fc, dev->tx_offloads, sq->lmt_addr, 546a24af636SNithin Dabilpuram sq->nb_sqb_bufs, sq->sqes_per_sqb_log2); 547a24af636SNithin Dabilpuram 548a24af636SNithin Dabilpuram /* Store start of fast path area */ 549a24af636SNithin Dabilpuram eth_dev->data->tx_queues[qid] = txq_sp + 1; 550a24af636SNithin Dabilpuram eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; 551a24af636SNithin Dabilpuram return 0; 552a24af636SNithin Dabilpuram } 553a24af636SNithin Dabilpuram 554dd944699SRakesh Kudurumalla void 5557483341aSXueming Li cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) 556a24af636SNithin Dabilpuram { 5577483341aSXueming Li void *txq = eth_dev->data->tx_queues[qid]; 558a24af636SNithin Dabilpuram struct cnxk_eth_txq_sp *txq_sp; 559a24af636SNithin Dabilpuram struct cnxk_eth_dev *dev; 560a24af636SNithin Dabilpuram struct roc_nix_sq *sq; 561a24af636SNithin Dabilpuram int rc; 562a24af636SNithin Dabilpuram 563a24af636SNithin Dabilpuram if (!txq) 564a24af636SNithin Dabilpuram return; 565a24af636SNithin Dabilpuram 566a24af636SNithin Dabilpuram txq_sp = cnxk_eth_txq_to_sp(txq); 5677483341aSXueming Li 568a24af636SNithin Dabilpuram dev = txq_sp->dev; 569a24af636SNithin Dabilpuram 570a24af636SNithin Dabilpuram plt_nix_dbg("Releasing txq %u", qid); 571a24af636SNithin Dabilpuram 572a24af636SNithin Dabilpuram /* Cleanup ROC SQ */ 573a24af636SNithin Dabilpuram sq = &dev->sqs[qid]; 574a24af636SNithin Dabilpuram rc = roc_nix_sq_fini(sq); 575a24af636SNithin Dabilpuram if (rc) 576a24af636SNithin Dabilpuram plt_err("Failed to cleanup sq, rc=%d", rc); 577a24af636SNithin Dabilpuram 578a24af636SNithin Dabilpuram /* Finally free */ 579a24af636SNithin Dabilpuram plt_free(txq_sp); 580a24af636SNithin Dabilpuram } 581a24af636SNithin Dabilpuram 5824daf12f4SHanumanth Pothula static int 5834daf12f4SHanumanth Pothula cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, 5844daf12f4SHanumanth Pothula struct rte_mempool **lpb_pool, 5854daf12f4SHanumanth Pothula struct rte_mempool **spb_pool) 5864daf12f4SHanumanth Pothula { 5874daf12f4SHanumanth Pothula struct rte_mempool *pool0; 5884daf12f4SHanumanth Pothula struct rte_mempool *pool1; 5894daf12f4SHanumanth Pothula struct rte_mempool **mp = rx_conf->rx_mempools; 5904daf12f4SHanumanth Pothula const char *platform_ops; 5914daf12f4SHanumanth Pothula struct rte_mempool_ops *ops; 5924daf12f4SHanumanth Pothula 5934daf12f4SHanumanth Pothula if (*lpb_pool || 5944daf12f4SHanumanth Pothula rx_conf->rx_nmempool != CNXK_NIX_NUM_POOLS_MAX) { 5954daf12f4SHanumanth Pothula plt_err("invalid arguments"); 5964daf12f4SHanumanth Pothula return -EINVAL; 5974daf12f4SHanumanth Pothula } 5984daf12f4SHanumanth Pothula 5994daf12f4SHanumanth Pothula if (mp == NULL || mp[0] == NULL || mp[1] == NULL) { 600f665790aSDavid Marchand plt_err("invalid memory pools"); 6014daf12f4SHanumanth Pothula return -EINVAL; 6024daf12f4SHanumanth Pothula } 6034daf12f4SHanumanth Pothula 6044daf12f4SHanumanth Pothula pool0 = mp[0]; 6054daf12f4SHanumanth Pothula pool1 = mp[1]; 6064daf12f4SHanumanth Pothula 6074daf12f4SHanumanth Pothula if (pool0->elt_size > pool1->elt_size) { 6084daf12f4SHanumanth Pothula *lpb_pool = pool0; 6094daf12f4SHanumanth Pothula *spb_pool = pool1; 6104daf12f4SHanumanth Pothula 6114daf12f4SHanumanth Pothula } else { 6124daf12f4SHanumanth Pothula *lpb_pool = pool1; 6134daf12f4SHanumanth Pothula *spb_pool = pool0; 6144daf12f4SHanumanth Pothula } 6154daf12f4SHanumanth Pothula 6164daf12f4SHanumanth Pothula if ((*spb_pool)->pool_id == 0) { 6174daf12f4SHanumanth Pothula plt_err("Invalid pool_id"); 6184daf12f4SHanumanth Pothula return -EINVAL; 6194daf12f4SHanumanth Pothula } 6204daf12f4SHanumanth Pothula 6214daf12f4SHanumanth Pothula platform_ops = rte_mbuf_platform_mempool_ops(); 6224daf12f4SHanumanth Pothula ops = rte_mempool_get_ops((*spb_pool)->ops_index); 6234daf12f4SHanumanth Pothula if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { 6244daf12f4SHanumanth Pothula plt_err("mempool ops should be of cnxk_npa type"); 6254daf12f4SHanumanth Pothula return -EINVAL; 6264daf12f4SHanumanth Pothula } 6274daf12f4SHanumanth Pothula 628f665790aSDavid Marchand plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u", (*spb_pool)->name, 6294daf12f4SHanumanth Pothula (*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); 6304daf12f4SHanumanth Pothula 6314daf12f4SHanumanth Pothula return 0; 6324daf12f4SHanumanth Pothula } 6334daf12f4SHanumanth Pothula 634a86144cdSNithin Dabilpuram int 635a86144cdSNithin Dabilpuram cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, 636022f1c1aSNithin Dabilpuram uint32_t nb_desc, uint16_t fp_rx_q_sz, 637a86144cdSNithin Dabilpuram const struct rte_eth_rxconf *rx_conf, 638a86144cdSNithin Dabilpuram struct rte_mempool *mp) 639a86144cdSNithin Dabilpuram { 640a86144cdSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 6417eabd6c6SNithin Dabilpuram struct roc_nix *nix = &dev->nix; 642a86144cdSNithin Dabilpuram struct cnxk_eth_rxq_sp *rxq_sp; 643a86144cdSNithin Dabilpuram struct rte_mempool_ops *ops; 644a86144cdSNithin Dabilpuram const char *platform_ops; 645a86144cdSNithin Dabilpuram struct roc_nix_rq *rq; 646a86144cdSNithin Dabilpuram struct roc_nix_cq *cq; 647a86144cdSNithin Dabilpuram uint16_t first_skip; 6488f80a2e3SNithin Dabilpuram uint16_t wqe_skip; 649a86144cdSNithin Dabilpuram int rc = -EINVAL; 650a86144cdSNithin Dabilpuram size_t rxq_sz; 6514daf12f4SHanumanth Pothula struct rte_mempool *lpb_pool = mp; 6524daf12f4SHanumanth Pothula struct rte_mempool *spb_pool = NULL; 653a86144cdSNithin Dabilpuram 654a86144cdSNithin Dabilpuram /* Sanity checks */ 655a86144cdSNithin Dabilpuram if (rx_conf->rx_deferred_start == 1) { 656a86144cdSNithin Dabilpuram plt_err("Deferred Rx start is not supported"); 657a86144cdSNithin Dabilpuram goto fail; 658a86144cdSNithin Dabilpuram } 659a86144cdSNithin Dabilpuram 6604daf12f4SHanumanth Pothula if (rx_conf->rx_nmempool > 0) { 6614daf12f4SHanumanth Pothula rc = cnxk_nix_process_rx_conf(rx_conf, &lpb_pool, &spb_pool); 6624daf12f4SHanumanth Pothula if (rc) 6634daf12f4SHanumanth Pothula goto fail; 6644daf12f4SHanumanth Pothula } 6654daf12f4SHanumanth Pothula 666a86144cdSNithin Dabilpuram platform_ops = rte_mbuf_platform_mempool_ops(); 667a86144cdSNithin Dabilpuram /* This driver needs cnxk_npa mempool ops to work */ 6684daf12f4SHanumanth Pothula ops = rte_mempool_get_ops(lpb_pool->ops_index); 669a86144cdSNithin Dabilpuram if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { 670a86144cdSNithin Dabilpuram plt_err("mempool ops should be of cnxk_npa type"); 671a86144cdSNithin Dabilpuram goto fail; 672a86144cdSNithin Dabilpuram } 673a86144cdSNithin Dabilpuram 6744daf12f4SHanumanth Pothula if (lpb_pool->pool_id == 0) { 675a86144cdSNithin Dabilpuram plt_err("Invalid pool_id"); 676a86144cdSNithin Dabilpuram goto fail; 677a86144cdSNithin Dabilpuram } 678a86144cdSNithin Dabilpuram 679a86144cdSNithin Dabilpuram /* Free memory prior to re-allocation if needed */ 680a86144cdSNithin Dabilpuram if (eth_dev->data->rx_queues[qid] != NULL) { 681a86144cdSNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 682a86144cdSNithin Dabilpuram 683a86144cdSNithin Dabilpuram plt_nix_dbg("Freeing memory prior to re-allocation %d", qid); 6847483341aSXueming Li dev_ops->rx_queue_release(eth_dev, qid); 685a86144cdSNithin Dabilpuram eth_dev->data->rx_queues[qid] = NULL; 686a86144cdSNithin Dabilpuram } 687a86144cdSNithin Dabilpuram 68816781725SNithin Dabilpuram /* Its a no-op when inline device is not used */ 68916781725SNithin Dabilpuram if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || 69016781725SNithin Dabilpuram dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) 6914daf12f4SHanumanth Pothula roc_nix_inl_dev_xaq_realloc(lpb_pool->pool_id); 69216781725SNithin Dabilpuram 693022f1c1aSNithin Dabilpuram /* Increase CQ size to Aura size to avoid CQ overflow and 694022f1c1aSNithin Dabilpuram * then CPT buffer leak. 695022f1c1aSNithin Dabilpuram */ 696022f1c1aSNithin Dabilpuram if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) 6974daf12f4SHanumanth Pothula nb_desc = nix_inl_cq_sz_clamp_up(nix, lpb_pool, nb_desc); 698022f1c1aSNithin Dabilpuram 699a86144cdSNithin Dabilpuram /* Setup ROC CQ */ 700a86144cdSNithin Dabilpuram cq = &dev->cqs[qid]; 701a86144cdSNithin Dabilpuram cq->qid = qid; 702a86144cdSNithin Dabilpuram cq->nb_desc = nb_desc; 703a86144cdSNithin Dabilpuram rc = roc_nix_cq_init(&dev->nix, cq); 704a86144cdSNithin Dabilpuram if (rc) { 705a86144cdSNithin Dabilpuram plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc); 706a86144cdSNithin Dabilpuram goto fail; 707a86144cdSNithin Dabilpuram } 708a86144cdSNithin Dabilpuram 709a86144cdSNithin Dabilpuram /* Setup ROC RQ */ 710a86144cdSNithin Dabilpuram rq = &dev->rqs[qid]; 711a86144cdSNithin Dabilpuram rq->qid = qid; 712b059bbb8SKommula Shiva Shankar rq->cqid = cq->qid; 7134daf12f4SHanumanth Pothula rq->aura_handle = lpb_pool->pool_id; 714a86144cdSNithin Dabilpuram rq->flow_tag_width = 32; 715a86144cdSNithin Dabilpuram rq->sso_ena = false; 716a86144cdSNithin Dabilpuram 717a86144cdSNithin Dabilpuram /* Calculate first mbuf skip */ 718a86144cdSNithin Dabilpuram first_skip = (sizeof(struct rte_mbuf)); 719a86144cdSNithin Dabilpuram first_skip += RTE_PKTMBUF_HEADROOM; 7204daf12f4SHanumanth Pothula first_skip += rte_pktmbuf_priv_size(lpb_pool); 721a86144cdSNithin Dabilpuram rq->first_skip = first_skip; 7228bc924cfSNithin Dabilpuram rq->later_skip = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(lpb_pool); 7234daf12f4SHanumanth Pothula rq->lpb_size = lpb_pool->elt_size; 72493c6b6b2SNithin Dabilpuram if (roc_errata_nix_no_meta_aura()) 725c3bb4ba5SNithin Dabilpuram rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY); 726a86144cdSNithin Dabilpuram 7277eabd6c6SNithin Dabilpuram /* Enable Inline IPSec on RQ, will not be used for Poll mode */ 7288f80a2e3SNithin Dabilpuram if (roc_nix_inl_inb_is_enabled(nix) && !dev->inb.inl_dev) { 7297eabd6c6SNithin Dabilpuram rq->ipsech_ena = true; 7308f80a2e3SNithin Dabilpuram /* WQE skip is needed when poll mode is enabled in CN10KA_B0 and above 7318f80a2e3SNithin Dabilpuram * for Inline IPsec traffic to CQ without inline device. 7328f80a2e3SNithin Dabilpuram */ 7338f80a2e3SNithin Dabilpuram wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ); 7348f80a2e3SNithin Dabilpuram wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ; 7358f80a2e3SNithin Dabilpuram rq->wqe_skip = wqe_skip; 7368f80a2e3SNithin Dabilpuram } 7377eabd6c6SNithin Dabilpuram 7384daf12f4SHanumanth Pothula if (spb_pool) { 7394daf12f4SHanumanth Pothula rq->spb_ena = 1; 7404daf12f4SHanumanth Pothula rq->spb_aura_handle = spb_pool->pool_id; 7414daf12f4SHanumanth Pothula rq->spb_size = spb_pool->elt_size; 7424daf12f4SHanumanth Pothula } 7434daf12f4SHanumanth Pothula 744a86144cdSNithin Dabilpuram rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started); 745a86144cdSNithin Dabilpuram if (rc) { 746a86144cdSNithin Dabilpuram plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc); 747a86144cdSNithin Dabilpuram goto cq_fini; 748a86144cdSNithin Dabilpuram } 749a86144cdSNithin Dabilpuram 750a86144cdSNithin Dabilpuram /* Allocate and setup fast path rx queue */ 751a86144cdSNithin Dabilpuram rc = -ENOMEM; 752a86144cdSNithin Dabilpuram rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz; 753a86144cdSNithin Dabilpuram rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE); 754a86144cdSNithin Dabilpuram if (!rxq_sp) { 755a86144cdSNithin Dabilpuram plt_err("Failed to alloc rx queue for rq=%d", qid); 756a86144cdSNithin Dabilpuram goto rq_fini; 757a86144cdSNithin Dabilpuram } 758a86144cdSNithin Dabilpuram 759a86144cdSNithin Dabilpuram /* Setup slow path fields */ 760a86144cdSNithin Dabilpuram rxq_sp->dev = dev; 761a86144cdSNithin Dabilpuram rxq_sp->qid = qid; 762a86144cdSNithin Dabilpuram rxq_sp->qconf.conf.rx = *rx_conf; 763137fbfc6SNithin Dabilpuram /* Queue config should reflect global offloads */ 764137fbfc6SNithin Dabilpuram rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads; 765a86144cdSNithin Dabilpuram rxq_sp->qconf.nb_desc = nb_desc; 7664daf12f4SHanumanth Pothula rxq_sp->qconf.mp = lpb_pool; 76714f7c27aSSunil Kumar Kori rxq_sp->tc = 0; 76814f7c27aSSunil Kumar Kori rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL || 76914f7c27aSSunil Kumar Kori dev->fc_cfg.mode == RTE_ETH_FC_TX_PAUSE); 770a86144cdSNithin Dabilpuram 771295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 77282529cffSNithin Dabilpuram /* Pass a tagmask used to handle error packets in inline device. 77382529cffSNithin Dabilpuram * Ethdev rq's tag_mask field will be overwritten later 77482529cffSNithin Dabilpuram * when sso is setup. 77582529cffSNithin Dabilpuram */ 77682529cffSNithin Dabilpuram rq->tag_mask = 77782529cffSNithin Dabilpuram 0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28); 77882529cffSNithin Dabilpuram 7797eabd6c6SNithin Dabilpuram /* Setup rq reference for inline dev if present */ 780da1ec390SNithin Dabilpuram rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started); 7817eabd6c6SNithin Dabilpuram if (rc) 7827eabd6c6SNithin Dabilpuram goto free_mem; 7837eabd6c6SNithin Dabilpuram } 7847eabd6c6SNithin Dabilpuram 7854daf12f4SHanumanth Pothula plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, lpb_pool->name, nb_desc, 786a86144cdSNithin Dabilpuram cq->nb_desc); 787a86144cdSNithin Dabilpuram 788a86144cdSNithin Dabilpuram /* Store start of fast path area */ 789a86144cdSNithin Dabilpuram eth_dev->data->rx_queues[qid] = rxq_sp + 1; 790a86144cdSNithin Dabilpuram eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; 791a86144cdSNithin Dabilpuram 79276dff638SSunil Kumar Kori /* Calculating delta and freq mult between PTP HI clock and tsc. 79376dff638SSunil Kumar Kori * These are needed in deriving raw clock value from tsc counter. 79476dff638SSunil Kumar Kori * read_clock eth op returns raw clock value. 79576dff638SSunil Kumar Kori */ 796295968d1SFerruh Yigit if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) { 79776dff638SSunil Kumar Kori rc = cnxk_nix_tsc_convert(dev); 79876dff638SSunil Kumar Kori if (rc) { 79976dff638SSunil Kumar Kori plt_err("Failed to calculate delta and freq mult"); 80076dff638SSunil Kumar Kori goto rq_fini; 80176dff638SSunil Kumar Kori } 80276dff638SSunil Kumar Kori } 80376dff638SSunil Kumar Kori 804a86144cdSNithin Dabilpuram return 0; 8057eabd6c6SNithin Dabilpuram free_mem: 8067eabd6c6SNithin Dabilpuram plt_free(rxq_sp); 807a86144cdSNithin Dabilpuram rq_fini: 808a86144cdSNithin Dabilpuram rc |= roc_nix_rq_fini(rq); 809a86144cdSNithin Dabilpuram cq_fini: 810a86144cdSNithin Dabilpuram rc |= roc_nix_cq_fini(cq); 811a86144cdSNithin Dabilpuram fail: 812a86144cdSNithin Dabilpuram return rc; 813a86144cdSNithin Dabilpuram } 814a86144cdSNithin Dabilpuram 815a86144cdSNithin Dabilpuram static void 8167483341aSXueming Li cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) 817a86144cdSNithin Dabilpuram { 8187483341aSXueming Li void *rxq = eth_dev->data->rx_queues[qid]; 819a86144cdSNithin Dabilpuram struct cnxk_eth_rxq_sp *rxq_sp; 820a86144cdSNithin Dabilpuram struct cnxk_eth_dev *dev; 821a86144cdSNithin Dabilpuram struct roc_nix_rq *rq; 822a86144cdSNithin Dabilpuram struct roc_nix_cq *cq; 823a86144cdSNithin Dabilpuram int rc; 824a86144cdSNithin Dabilpuram 825a86144cdSNithin Dabilpuram if (!rxq) 826a86144cdSNithin Dabilpuram return; 827a86144cdSNithin Dabilpuram 828a86144cdSNithin Dabilpuram rxq_sp = cnxk_eth_rxq_to_sp(rxq); 829a86144cdSNithin Dabilpuram dev = rxq_sp->dev; 8307eabd6c6SNithin Dabilpuram rq = &dev->rqs[qid]; 831a86144cdSNithin Dabilpuram 832a86144cdSNithin Dabilpuram plt_nix_dbg("Releasing rxq %u", qid); 833a86144cdSNithin Dabilpuram 8347eabd6c6SNithin Dabilpuram /* Release rq reference for inline dev if present */ 835295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) 8367eabd6c6SNithin Dabilpuram roc_nix_inl_dev_rq_put(rq); 8377eabd6c6SNithin Dabilpuram 838a86144cdSNithin Dabilpuram /* Cleanup ROC RQ */ 839a86144cdSNithin Dabilpuram rc = roc_nix_rq_fini(rq); 840a86144cdSNithin Dabilpuram if (rc) 841a86144cdSNithin Dabilpuram plt_err("Failed to cleanup rq, rc=%d", rc); 842a86144cdSNithin Dabilpuram 843a86144cdSNithin Dabilpuram /* Cleanup ROC CQ */ 844a86144cdSNithin Dabilpuram cq = &dev->cqs[qid]; 845a86144cdSNithin Dabilpuram rc = roc_nix_cq_fini(cq); 846a86144cdSNithin Dabilpuram if (rc) 847a86144cdSNithin Dabilpuram plt_err("Failed to cleanup cq, rc=%d", rc); 848a86144cdSNithin Dabilpuram 849a86144cdSNithin Dabilpuram /* Finally free fast path area */ 850a86144cdSNithin Dabilpuram plt_free(rxq_sp); 851a86144cdSNithin Dabilpuram } 852a86144cdSNithin Dabilpuram 853b75e0acaSNithin Dabilpuram uint32_t 854b75e0acaSNithin Dabilpuram cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, 855b75e0acaSNithin Dabilpuram uint8_t rss_level) 856b75e0acaSNithin Dabilpuram { 857b75e0acaSNithin Dabilpuram uint32_t flow_key_type[RSS_MAX_LEVELS][6] = { 858b75e0acaSNithin Dabilpuram {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP, 859b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC}, 860b75e0acaSNithin Dabilpuram {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6, 861b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP, 862b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC}, 863b75e0acaSNithin Dabilpuram {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4, 864b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6, 865b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP, 866b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP, 867b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP, 868b75e0acaSNithin Dabilpuram FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC} 869b75e0acaSNithin Dabilpuram }; 870b75e0acaSNithin Dabilpuram uint32_t flowkey_cfg = 0; 871b75e0acaSNithin Dabilpuram 872b75e0acaSNithin Dabilpuram dev->ethdev_rss_hf = ethdev_rss; 873b75e0acaSNithin Dabilpuram 874295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD && 8754093c5a8SKiran Kumar K dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) { 876b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B; 8774093c5a8SKiran Kumar K } 878b75e0acaSNithin Dabilpuram 879295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_C_VLAN) 880b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_VLAN; 881b75e0acaSNithin Dabilpuram 882295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY) 883b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC; 884b75e0acaSNithin Dabilpuram 885295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY) 886b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_L3_DST; 887b75e0acaSNithin Dabilpuram 888295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY) 889b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC; 890b75e0acaSNithin Dabilpuram 891295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY) 892b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_L4_DST; 893b75e0acaSNithin Dabilpuram 894b75e0acaSNithin Dabilpuram if (ethdev_rss & RSS_IPV4_ENABLE) 895b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX]; 896b75e0acaSNithin Dabilpuram 897b75e0acaSNithin Dabilpuram if (ethdev_rss & RSS_IPV6_ENABLE) 898b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX]; 899b75e0acaSNithin Dabilpuram 900295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_TCP) 901b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX]; 902b75e0acaSNithin Dabilpuram 903295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_UDP) 904b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX]; 905b75e0acaSNithin Dabilpuram 906295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_SCTP) 907b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX]; 908b75e0acaSNithin Dabilpuram 909295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD) 910b75e0acaSNithin Dabilpuram flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX]; 911b75e0acaSNithin Dabilpuram 912b75e0acaSNithin Dabilpuram if (ethdev_rss & RSS_IPV6_EX_ENABLE) 913b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT; 914b75e0acaSNithin Dabilpuram 915295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_PORT) 916b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_PORT; 917b75e0acaSNithin Dabilpuram 918295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NVGRE) 919b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_NVGRE; 920b75e0acaSNithin Dabilpuram 921295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_VXLAN) 922b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_VXLAN; 923b75e0acaSNithin Dabilpuram 924295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_GENEVE) 925b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_GENEVE; 926b75e0acaSNithin Dabilpuram 927295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_GTPU) 928b75e0acaSNithin Dabilpuram flowkey_cfg |= FLOW_KEY_TYPE_GTPU; 929b75e0acaSNithin Dabilpuram 930b75e0acaSNithin Dabilpuram return flowkey_cfg; 931b75e0acaSNithin Dabilpuram } 932b75e0acaSNithin Dabilpuram 933c9b0bb00SRakesh Kudurumalla static int 934c9b0bb00SRakesh Kudurumalla nix_rxchan_cfg_disable(struct cnxk_eth_dev *dev) 935c9b0bb00SRakesh Kudurumalla { 936c9b0bb00SRakesh Kudurumalla struct roc_nix *nix = &dev->nix; 937c9b0bb00SRakesh Kudurumalla struct roc_nix_fc_cfg fc_cfg; 938c9b0bb00SRakesh Kudurumalla int rc; 939c9b0bb00SRakesh Kudurumalla 940c9b0bb00SRakesh Kudurumalla if (!roc_nix_is_lbk(nix)) 941c9b0bb00SRakesh Kudurumalla return 0; 942c9b0bb00SRakesh Kudurumalla 943c9b0bb00SRakesh Kudurumalla memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); 944c9b0bb00SRakesh Kudurumalla fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; 945c9b0bb00SRakesh Kudurumalla fc_cfg.rxchan_cfg.enable = false; 946c9b0bb00SRakesh Kudurumalla rc = roc_nix_fc_config_set(nix, &fc_cfg); 947c9b0bb00SRakesh Kudurumalla if (rc) { 948c9b0bb00SRakesh Kudurumalla plt_err("Failed to setup flow control, rc=%d(%s)", rc, roc_error_msg_get(rc)); 949c9b0bb00SRakesh Kudurumalla return rc; 950c9b0bb00SRakesh Kudurumalla } 951c9b0bb00SRakesh Kudurumalla return 0; 952c9b0bb00SRakesh Kudurumalla } 953c9b0bb00SRakesh Kudurumalla 954b75e0acaSNithin Dabilpuram static void 955b75e0acaSNithin Dabilpuram nix_free_queue_mem(struct cnxk_eth_dev *dev) 956b75e0acaSNithin Dabilpuram { 957b75e0acaSNithin Dabilpuram plt_free(dev->rqs); 958b75e0acaSNithin Dabilpuram plt_free(dev->cqs); 959b75e0acaSNithin Dabilpuram plt_free(dev->sqs); 960b75e0acaSNithin Dabilpuram dev->rqs = NULL; 961b75e0acaSNithin Dabilpuram dev->cqs = NULL; 962b75e0acaSNithin Dabilpuram dev->sqs = NULL; 963b75e0acaSNithin Dabilpuram } 964b75e0acaSNithin Dabilpuram 965b75e0acaSNithin Dabilpuram static int 966a83db6b3SSunil Kumar Kori nix_ingress_policer_setup(struct cnxk_eth_dev *dev) 967a83db6b3SSunil Kumar Kori { 9685ee3457bSRakesh Kudurumalla struct rte_eth_dev *eth_dev = dev->eth_dev; 9695ee3457bSRakesh Kudurumalla int rc = 0; 9705ee3457bSRakesh Kudurumalla 971a83db6b3SSunil Kumar Kori TAILQ_INIT(&dev->mtr_profiles); 972ffee183eSSunil Kumar Kori TAILQ_INIT(&dev->mtr_policy); 973d0ea0bebSSunil Kumar Kori TAILQ_INIT(&dev->mtr); 974a83db6b3SSunil Kumar Kori 9755ee3457bSRakesh Kudurumalla if (eth_dev->dev_ops->mtr_ops_get == NULL) 9765ee3457bSRakesh Kudurumalla return rc; 9775ee3457bSRakesh Kudurumalla 9785ee3457bSRakesh Kudurumalla return nix_mtr_capabilities_init(eth_dev); 979a83db6b3SSunil Kumar Kori } 980a83db6b3SSunil Kumar Kori 981a83db6b3SSunil Kumar Kori static int 982b75e0acaSNithin Dabilpuram nix_rss_default_setup(struct cnxk_eth_dev *dev) 983b75e0acaSNithin Dabilpuram { 984b75e0acaSNithin Dabilpuram struct rte_eth_dev *eth_dev = dev->eth_dev; 985b75e0acaSNithin Dabilpuram uint8_t rss_hash_level; 986b75e0acaSNithin Dabilpuram uint32_t flowkey_cfg; 987b75e0acaSNithin Dabilpuram uint64_t rss_hf; 988b75e0acaSNithin Dabilpuram 989b75e0acaSNithin Dabilpuram rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 990295968d1SFerruh Yigit rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf); 991b75e0acaSNithin Dabilpuram if (rss_hash_level) 992b75e0acaSNithin Dabilpuram rss_hash_level -= 1; 993b75e0acaSNithin Dabilpuram 994b75e0acaSNithin Dabilpuram flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level); 995b75e0acaSNithin Dabilpuram return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg); 996b75e0acaSNithin Dabilpuram } 997b75e0acaSNithin Dabilpuram 998b75e0acaSNithin Dabilpuram static int 999b75e0acaSNithin Dabilpuram nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) 1000b75e0acaSNithin Dabilpuram { 1001b75e0acaSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 1002b75e0acaSNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 1003b75e0acaSNithin Dabilpuram struct cnxk_eth_qconf *tx_qconf = NULL; 1004b75e0acaSNithin Dabilpuram struct cnxk_eth_qconf *rx_qconf = NULL; 1005b75e0acaSNithin Dabilpuram struct cnxk_eth_rxq_sp *rxq_sp; 1006b75e0acaSNithin Dabilpuram struct cnxk_eth_txq_sp *txq_sp; 1007b75e0acaSNithin Dabilpuram int i, nb_rxq, nb_txq; 1008b75e0acaSNithin Dabilpuram void **txq, **rxq; 1009b75e0acaSNithin Dabilpuram 1010b75e0acaSNithin Dabilpuram nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues); 1011b75e0acaSNithin Dabilpuram nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues); 1012b75e0acaSNithin Dabilpuram 1013b75e0acaSNithin Dabilpuram tx_qconf = malloc(nb_txq * sizeof(*tx_qconf)); 1014b75e0acaSNithin Dabilpuram if (tx_qconf == NULL) { 1015b75e0acaSNithin Dabilpuram plt_err("Failed to allocate memory for tx_qconf"); 1016b75e0acaSNithin Dabilpuram goto fail; 1017b75e0acaSNithin Dabilpuram } 1018b75e0acaSNithin Dabilpuram 1019b75e0acaSNithin Dabilpuram rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf)); 1020b75e0acaSNithin Dabilpuram if (rx_qconf == NULL) { 1021b75e0acaSNithin Dabilpuram plt_err("Failed to allocate memory for rx_qconf"); 1022b75e0acaSNithin Dabilpuram goto fail; 1023b75e0acaSNithin Dabilpuram } 1024b75e0acaSNithin Dabilpuram 1025b75e0acaSNithin Dabilpuram txq = eth_dev->data->tx_queues; 1026b75e0acaSNithin Dabilpuram for (i = 0; i < nb_txq; i++) { 1027b75e0acaSNithin Dabilpuram if (txq[i] == NULL) { 1028b75e0acaSNithin Dabilpuram tx_qconf[i].valid = false; 1029b75e0acaSNithin Dabilpuram plt_info("txq[%d] is already released", i); 1030b75e0acaSNithin Dabilpuram continue; 1031b75e0acaSNithin Dabilpuram } 1032b75e0acaSNithin Dabilpuram txq_sp = cnxk_eth_txq_to_sp(txq[i]); 1033b75e0acaSNithin Dabilpuram memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf)); 1034b75e0acaSNithin Dabilpuram tx_qconf[i].valid = true; 10357483341aSXueming Li dev_ops->tx_queue_release(eth_dev, i); 1036b75e0acaSNithin Dabilpuram eth_dev->data->tx_queues[i] = NULL; 1037b75e0acaSNithin Dabilpuram } 1038b75e0acaSNithin Dabilpuram 1039b75e0acaSNithin Dabilpuram rxq = eth_dev->data->rx_queues; 1040b75e0acaSNithin Dabilpuram for (i = 0; i < nb_rxq; i++) { 1041b75e0acaSNithin Dabilpuram if (rxq[i] == NULL) { 1042b75e0acaSNithin Dabilpuram rx_qconf[i].valid = false; 1043b75e0acaSNithin Dabilpuram plt_info("rxq[%d] is already released", i); 1044b75e0acaSNithin Dabilpuram continue; 1045b75e0acaSNithin Dabilpuram } 1046b75e0acaSNithin Dabilpuram rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]); 1047b75e0acaSNithin Dabilpuram memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf)); 1048b75e0acaSNithin Dabilpuram rx_qconf[i].valid = true; 10497483341aSXueming Li dev_ops->rx_queue_release(eth_dev, i); 1050b75e0acaSNithin Dabilpuram eth_dev->data->rx_queues[i] = NULL; 1051b75e0acaSNithin Dabilpuram } 1052b75e0acaSNithin Dabilpuram 1053b75e0acaSNithin Dabilpuram dev->tx_qconf = tx_qconf; 1054b75e0acaSNithin Dabilpuram dev->rx_qconf = rx_qconf; 1055b75e0acaSNithin Dabilpuram return 0; 1056b75e0acaSNithin Dabilpuram 1057b75e0acaSNithin Dabilpuram fail: 1058b75e0acaSNithin Dabilpuram free(tx_qconf); 1059b75e0acaSNithin Dabilpuram free(rx_qconf); 1060b75e0acaSNithin Dabilpuram return -ENOMEM; 1061b75e0acaSNithin Dabilpuram } 1062b75e0acaSNithin Dabilpuram 1063b75e0acaSNithin Dabilpuram static int 1064b75e0acaSNithin Dabilpuram nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) 1065b75e0acaSNithin Dabilpuram { 1066b75e0acaSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 1067b75e0acaSNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 1068b75e0acaSNithin Dabilpuram struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf; 1069b75e0acaSNithin Dabilpuram struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf; 1070b75e0acaSNithin Dabilpuram int rc, i, nb_rxq, nb_txq; 1071b75e0acaSNithin Dabilpuram 1072b75e0acaSNithin Dabilpuram nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues); 1073b75e0acaSNithin Dabilpuram nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues); 1074b75e0acaSNithin Dabilpuram 1075b75e0acaSNithin Dabilpuram rc = -ENOMEM; 1076b75e0acaSNithin Dabilpuram /* Setup tx & rx queues with previous configuration so 1077b75e0acaSNithin Dabilpuram * that the queues can be functional in cases like ports 1078b75e0acaSNithin Dabilpuram * are started without re configuring queues. 1079b75e0acaSNithin Dabilpuram * 1080b75e0acaSNithin Dabilpuram * Usual re config sequence is like below: 1081b75e0acaSNithin Dabilpuram * port_configure() { 1082b75e0acaSNithin Dabilpuram * if(reconfigure) { 1083b75e0acaSNithin Dabilpuram * queue_release() 1084b75e0acaSNithin Dabilpuram * queue_setup() 1085b75e0acaSNithin Dabilpuram * } 1086b75e0acaSNithin Dabilpuram * queue_configure() { 1087b75e0acaSNithin Dabilpuram * queue_release() 1088b75e0acaSNithin Dabilpuram * queue_setup() 1089b75e0acaSNithin Dabilpuram * } 1090b75e0acaSNithin Dabilpuram * } 1091b75e0acaSNithin Dabilpuram * port_start() 1092b75e0acaSNithin Dabilpuram * 1093b75e0acaSNithin Dabilpuram * In some application's control path, queue_configure() would 1094b75e0acaSNithin Dabilpuram * NOT be invoked for TXQs/RXQs in port_configure(). 1095b75e0acaSNithin Dabilpuram * In such cases, queues can be functional after start as the 1096b75e0acaSNithin Dabilpuram * queues are already setup in port_configure(). 1097b75e0acaSNithin Dabilpuram */ 1098b75e0acaSNithin Dabilpuram for (i = 0; i < nb_txq; i++) { 1099b75e0acaSNithin Dabilpuram if (!tx_qconf[i].valid) 1100b75e0acaSNithin Dabilpuram continue; 1101b75e0acaSNithin Dabilpuram rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0, 1102b75e0acaSNithin Dabilpuram &tx_qconf[i].conf.tx); 1103b75e0acaSNithin Dabilpuram if (rc) { 1104b75e0acaSNithin Dabilpuram plt_err("Failed to setup tx queue rc=%d", rc); 1105b75e0acaSNithin Dabilpuram for (i -= 1; i >= 0; i--) 11067483341aSXueming Li dev_ops->tx_queue_release(eth_dev, i); 1107b75e0acaSNithin Dabilpuram goto fail; 1108b75e0acaSNithin Dabilpuram } 1109b75e0acaSNithin Dabilpuram } 1110b75e0acaSNithin Dabilpuram 1111b75e0acaSNithin Dabilpuram free(tx_qconf); 1112b75e0acaSNithin Dabilpuram tx_qconf = NULL; 1113b75e0acaSNithin Dabilpuram 1114b75e0acaSNithin Dabilpuram for (i = 0; i < nb_rxq; i++) { 1115b75e0acaSNithin Dabilpuram if (!rx_qconf[i].valid) 1116b75e0acaSNithin Dabilpuram continue; 1117b75e0acaSNithin Dabilpuram rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0, 1118b75e0acaSNithin Dabilpuram &rx_qconf[i].conf.rx, 1119b75e0acaSNithin Dabilpuram rx_qconf[i].mp); 1120b75e0acaSNithin Dabilpuram if (rc) { 1121b75e0acaSNithin Dabilpuram plt_err("Failed to setup rx queue rc=%d", rc); 1122b75e0acaSNithin Dabilpuram for (i -= 1; i >= 0; i--) 11237483341aSXueming Li dev_ops->rx_queue_release(eth_dev, i); 1124b75e0acaSNithin Dabilpuram goto tx_queue_release; 1125b75e0acaSNithin Dabilpuram } 1126b75e0acaSNithin Dabilpuram } 1127b75e0acaSNithin Dabilpuram 1128b75e0acaSNithin Dabilpuram free(rx_qconf); 1129b75e0acaSNithin Dabilpuram rx_qconf = NULL; 1130b75e0acaSNithin Dabilpuram 1131b75e0acaSNithin Dabilpuram return 0; 1132b75e0acaSNithin Dabilpuram 1133b75e0acaSNithin Dabilpuram tx_queue_release: 1134b75e0acaSNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 11357483341aSXueming Li dev_ops->tx_queue_release(eth_dev, i); 1136b75e0acaSNithin Dabilpuram fail: 1137b75e0acaSNithin Dabilpuram free(tx_qconf); 1138b75e0acaSNithin Dabilpuram free(rx_qconf); 1139b75e0acaSNithin Dabilpuram 1140b75e0acaSNithin Dabilpuram return rc; 1141b75e0acaSNithin Dabilpuram } 1142b75e0acaSNithin Dabilpuram 1143b75e0acaSNithin Dabilpuram static void 1144b75e0acaSNithin Dabilpuram nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev) 1145b75e0acaSNithin Dabilpuram { 1146b75e0acaSNithin Dabilpuram /* These dummy functions are required for supporting 1147b75e0acaSNithin Dabilpuram * some applications which reconfigure queues without 1148f78c100bSStephen Hemminger * stopping tx burst and rx burst threads. 1149b75e0acaSNithin Dabilpuram * When the queues context is saved, txq/rxqs are released 1150b75e0acaSNithin Dabilpuram * which caused app crash since rx/tx burst is still 1151b75e0acaSNithin Dabilpuram * on different lcores 1152b75e0acaSNithin Dabilpuram */ 1153a41f593fSFerruh Yigit eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 1154a41f593fSFerruh Yigit eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 1155b75e0acaSNithin Dabilpuram rte_mb(); 1156b75e0acaSNithin Dabilpuram } 1157b75e0acaSNithin Dabilpuram 1158b75e0acaSNithin Dabilpuram static int 1159b75e0acaSNithin Dabilpuram nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev) 1160b75e0acaSNithin Dabilpuram { 1161b75e0acaSNithin Dabilpuram uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX]; 1162b75e0acaSNithin Dabilpuram uint8_t tun[ROC_NIX_LSO_TUN_MAX]; 1163b75e0acaSNithin Dabilpuram struct roc_nix *nix = &dev->nix; 1164b75e0acaSNithin Dabilpuram int rc; 1165b75e0acaSNithin Dabilpuram 1166b75e0acaSNithin Dabilpuram rc = roc_nix_lso_fmt_get(nix, udp_tun, tun); 1167b75e0acaSNithin Dabilpuram if (rc) 1168b75e0acaSNithin Dabilpuram return rc; 1169b75e0acaSNithin Dabilpuram 1170b75e0acaSNithin Dabilpuram dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] | 1171b75e0acaSNithin Dabilpuram (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 | 1172b75e0acaSNithin Dabilpuram (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 | 1173b75e0acaSNithin Dabilpuram (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24); 1174b75e0acaSNithin Dabilpuram 1175b75e0acaSNithin Dabilpuram dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 | 1176b75e0acaSNithin Dabilpuram (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 | 1177b75e0acaSNithin Dabilpuram (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 | 1178b75e0acaSNithin Dabilpuram (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56); 1179b75e0acaSNithin Dabilpuram return 0; 1180b75e0acaSNithin Dabilpuram } 1181b75e0acaSNithin Dabilpuram 1182b75e0acaSNithin Dabilpuram static int 1183b75e0acaSNithin Dabilpuram nix_lso_fmt_setup(struct cnxk_eth_dev *dev) 1184b75e0acaSNithin Dabilpuram { 1185b75e0acaSNithin Dabilpuram struct roc_nix *nix = &dev->nix; 1186b75e0acaSNithin Dabilpuram int rc; 1187b75e0acaSNithin Dabilpuram 1188b75e0acaSNithin Dabilpuram /* Nothing much to do if offload is not enabled */ 1189b75e0acaSNithin Dabilpuram if (!(dev->tx_offloads & 1190295968d1SFerruh Yigit (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1191295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))) 1192b75e0acaSNithin Dabilpuram return 0; 1193b75e0acaSNithin Dabilpuram 1194b75e0acaSNithin Dabilpuram /* Setup LSO formats in AF. Its a no-op if other ethdev has 1195b75e0acaSNithin Dabilpuram * already set it up 1196b75e0acaSNithin Dabilpuram */ 1197b75e0acaSNithin Dabilpuram rc = roc_nix_lso_fmt_setup(nix); 1198b75e0acaSNithin Dabilpuram if (rc) 1199b75e0acaSNithin Dabilpuram return rc; 1200b75e0acaSNithin Dabilpuram 1201b75e0acaSNithin Dabilpuram return nix_lso_tun_fmt_update(dev); 1202b75e0acaSNithin Dabilpuram } 1203b75e0acaSNithin Dabilpuram 1204b75e0acaSNithin Dabilpuram int 1205b75e0acaSNithin Dabilpuram cnxk_nix_configure(struct rte_eth_dev *eth_dev) 1206b75e0acaSNithin Dabilpuram { 1207b75e0acaSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 1208b75e0acaSNithin Dabilpuram struct rte_eth_dev_data *data = eth_dev->data; 1209b75e0acaSNithin Dabilpuram struct rte_eth_conf *conf = &data->dev_conf; 1210b75e0acaSNithin Dabilpuram struct rte_eth_rxmode *rxmode = &conf->rxmode; 1211b75e0acaSNithin Dabilpuram struct rte_eth_txmode *txmode = &conf->txmode; 1212b75e0acaSNithin Dabilpuram char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; 1213d2bebb1fSSunil Kumar Kori struct roc_nix_fc_cfg fc_cfg = {0}; 1214b75e0acaSNithin Dabilpuram struct roc_nix *nix = &dev->nix; 1215eb5dc93eSSatha Rao uint16_t nb_rxq, nb_txq, nb_cq; 1216b75e0acaSNithin Dabilpuram struct rte_ether_addr *ea; 1217b75e0acaSNithin Dabilpuram uint64_t rx_cfg; 1218b75e0acaSNithin Dabilpuram void *qs; 1219b75e0acaSNithin Dabilpuram int rc; 1220b75e0acaSNithin Dabilpuram 1221b75e0acaSNithin Dabilpuram rc = -EINVAL; 1222b75e0acaSNithin Dabilpuram 1223b75e0acaSNithin Dabilpuram /* Sanity checks */ 1224b75e0acaSNithin Dabilpuram if (rte_eal_has_hugepages() == 0) { 1225b75e0acaSNithin Dabilpuram plt_err("Huge page is not configured"); 1226b75e0acaSNithin Dabilpuram goto fail_configure; 1227b75e0acaSNithin Dabilpuram } 1228b75e0acaSNithin Dabilpuram 1229b75e0acaSNithin Dabilpuram if (conf->dcb_capability_en == 1) { 1230b75e0acaSNithin Dabilpuram plt_err("dcb enable is not supported"); 1231b75e0acaSNithin Dabilpuram goto fail_configure; 1232b75e0acaSNithin Dabilpuram } 1233b75e0acaSNithin Dabilpuram 1234295968d1SFerruh Yigit if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && 1235295968d1SFerruh Yigit rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) { 1236b75e0acaSNithin Dabilpuram plt_err("Unsupported mq rx mode %d", rxmode->mq_mode); 1237b75e0acaSNithin Dabilpuram goto fail_configure; 1238b75e0acaSNithin Dabilpuram } 1239b75e0acaSNithin Dabilpuram 1240295968d1SFerruh Yigit if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { 1241b75e0acaSNithin Dabilpuram plt_err("Unsupported mq tx mode %d", txmode->mq_mode); 1242b75e0acaSNithin Dabilpuram goto fail_configure; 1243b75e0acaSNithin Dabilpuram } 1244b75e0acaSNithin Dabilpuram 1245b75e0acaSNithin Dabilpuram /* Free the resources allocated from the previous configure */ 1246b75e0acaSNithin Dabilpuram if (dev->configured == 1) { 1247b75e0acaSNithin Dabilpuram /* Unregister queue irq's */ 1248b75e0acaSNithin Dabilpuram roc_nix_unregister_queue_irqs(nix); 1249b75e0acaSNithin Dabilpuram 1250b75e0acaSNithin Dabilpuram /* Unregister CQ irqs if present */ 1251b75e0acaSNithin Dabilpuram if (eth_dev->data->dev_conf.intr_conf.rxq) 1252b75e0acaSNithin Dabilpuram roc_nix_unregister_cq_irqs(nix); 1253b75e0acaSNithin Dabilpuram 1254b75e0acaSNithin Dabilpuram /* Set no-op functions */ 1255b75e0acaSNithin Dabilpuram nix_set_nop_rxtx_function(eth_dev); 1256b75e0acaSNithin Dabilpuram /* Store queue config for later */ 1257b75e0acaSNithin Dabilpuram rc = nix_store_queue_cfg_and_then_release(eth_dev); 1258b75e0acaSNithin Dabilpuram if (rc) 1259b75e0acaSNithin Dabilpuram goto fail_configure; 12607eabd6c6SNithin Dabilpuram 12616af19a9dSSunil Kumar Kori /* Disable and free rte_meter entries */ 12626af19a9dSSunil Kumar Kori rc = nix_meter_fini(dev); 12636af19a9dSSunil Kumar Kori if (rc) 12646af19a9dSSunil Kumar Kori goto fail_configure; 12656af19a9dSSunil Kumar Kori 12667eabd6c6SNithin Dabilpuram /* Cleanup security support */ 12677eabd6c6SNithin Dabilpuram rc = nix_security_release(dev); 12687eabd6c6SNithin Dabilpuram if (rc) 12697eabd6c6SNithin Dabilpuram goto fail_configure; 12707eabd6c6SNithin Dabilpuram 1271b75e0acaSNithin Dabilpuram roc_nix_tm_fini(nix); 1272c9b0bb00SRakesh Kudurumalla nix_rxchan_cfg_disable(dev); 1273b75e0acaSNithin Dabilpuram roc_nix_lf_free(nix); 1274b75e0acaSNithin Dabilpuram } 1275b75e0acaSNithin Dabilpuram 1276b75e0acaSNithin Dabilpuram dev->rx_offloads = rxmode->offloads; 1277b75e0acaSNithin Dabilpuram dev->tx_offloads = txmode->offloads; 1278b75e0acaSNithin Dabilpuram 127903b15238SSrujana Challa if (nix->custom_inb_sa) 128003b15238SSrujana Challa dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY; 128103b15238SSrujana Challa 1282b75e0acaSNithin Dabilpuram /* Prepare rx cfg */ 1283b75e0acaSNithin Dabilpuram rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD; 1284b75e0acaSNithin Dabilpuram if (dev->rx_offloads & 1285295968d1SFerruh Yigit (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) { 1286b75e0acaSNithin Dabilpuram rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4; 1287b75e0acaSNithin Dabilpuram rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4; 1288b75e0acaSNithin Dabilpuram } 1289b75e0acaSNithin Dabilpuram rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | 1290b75e0acaSNithin Dabilpuram ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | 1291b75e0acaSNithin Dabilpuram ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3); 1292b75e0acaSNithin Dabilpuram 1293571a2798SRakesh Kudurumalla rx_cfg &= (ROC_NIX_LF_RX_CFG_RX_ERROR_MASK); 1294571a2798SRakesh Kudurumalla 1295571a2798SRakesh Kudurumalla if (roc_feature_nix_has_drop_re_mask()) 1296571a2798SRakesh Kudurumalla rx_cfg |= (ROC_NIX_RE_CRC8_PCH | ROC_NIX_RE_MACSEC); 1297571a2798SRakesh Kudurumalla 1298295968d1SFerruh Yigit if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 12992a6d9fa8SSrujana Challa rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT; 13002a6d9fa8SSrujana Challa /* Disable drop re if rx offload security is enabled and 13012a6d9fa8SSrujana Challa * platform does not support it. 13022a6d9fa8SSrujana Challa */ 13032a6d9fa8SSrujana Challa if (dev->ipsecd_drop_re_dis) 13042a6d9fa8SSrujana Challa rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE); 13052a6d9fa8SSrujana Challa } 13062a6d9fa8SSrujana Challa 1307b75e0acaSNithin Dabilpuram nb_rxq = RTE_MAX(data->nb_rx_queues, 1); 1308b75e0acaSNithin Dabilpuram nb_txq = RTE_MAX(data->nb_tx_queues, 1); 1309b75e0acaSNithin Dabilpuram 13105875d2caSNithin Dabilpuram if (roc_nix_is_lbk(nix)) 13115875d2caSNithin Dabilpuram nix->enable_loop = eth_dev->data->dev_conf.lpbk_mode; 13125875d2caSNithin Dabilpuram 1313dd944699SRakesh Kudurumalla nix->tx_compl_ena = dev->tx_compl_ena; 1314b059bbb8SKommula Shiva Shankar 1315b75e0acaSNithin Dabilpuram /* Alloc a nix lf */ 1316b75e0acaSNithin Dabilpuram rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg); 1317b75e0acaSNithin Dabilpuram if (rc) { 1318b75e0acaSNithin Dabilpuram plt_err("Failed to init nix_lf rc=%d", rc); 1319b75e0acaSNithin Dabilpuram goto fail_configure; 1320b75e0acaSNithin Dabilpuram } 1321b75e0acaSNithin Dabilpuram 1322217b7978SSrujana Challa if (!roc_nix_is_vf_or_sdp(nix)) { 1323217b7978SSrujana Challa /* Sync same MAC address to CGX/RPM table */ 1324217b7978SSrujana Challa rc = roc_nix_mac_addr_set(nix, dev->mac_addr); 1325217b7978SSrujana Challa if (rc) { 1326217b7978SSrujana Challa plt_err("Failed to set mac addr, rc=%d", rc); 1327217b7978SSrujana Challa goto fail_configure; 1328217b7978SSrujana Challa } 1329217b7978SSrujana Challa } 1330217b7978SSrujana Challa 13318f98e3ecSHarman Kalra /* Check if ptp is enable in PF owning this VF*/ 13328f98e3ecSHarman Kalra if (!roc_nix_is_pf(nix) && (!roc_nix_is_sdp(nix))) 13338f98e3ecSHarman Kalra dev->ptp_en = roc_nix_ptp_is_enable(nix); 13348f98e3ecSHarman Kalra 1335d43a7beeSSatheesh Paul dev->npc.channel = roc_nix_get_base_chan(nix); 1336d43a7beeSSatheesh Paul 1337b75e0acaSNithin Dabilpuram nb_rxq = data->nb_rx_queues; 1338b75e0acaSNithin Dabilpuram nb_txq = data->nb_tx_queues; 1339eb5dc93eSSatha Rao nb_cq = nb_rxq; 1340eb5dc93eSSatha Rao if (nix->tx_compl_ena) 1341eb5dc93eSSatha Rao nb_cq += nb_txq; 1342b75e0acaSNithin Dabilpuram rc = -ENOMEM; 1343b75e0acaSNithin Dabilpuram if (nb_rxq) { 1344b75e0acaSNithin Dabilpuram /* Allocate memory for roc rq's and cq's */ 1345b75e0acaSNithin Dabilpuram qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0); 1346b75e0acaSNithin Dabilpuram if (!qs) { 1347b75e0acaSNithin Dabilpuram plt_err("Failed to alloc rqs"); 1348b75e0acaSNithin Dabilpuram goto free_nix_lf; 1349b75e0acaSNithin Dabilpuram } 1350b75e0acaSNithin Dabilpuram dev->rqs = qs; 1351b75e0acaSNithin Dabilpuram } 1352b75e0acaSNithin Dabilpuram 1353b75e0acaSNithin Dabilpuram if (nb_txq) { 1354b75e0acaSNithin Dabilpuram /* Allocate memory for roc sq's */ 1355b75e0acaSNithin Dabilpuram qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0); 1356b75e0acaSNithin Dabilpuram if (!qs) { 1357b75e0acaSNithin Dabilpuram plt_err("Failed to alloc sqs"); 1358b75e0acaSNithin Dabilpuram goto free_nix_lf; 1359b75e0acaSNithin Dabilpuram } 1360b75e0acaSNithin Dabilpuram dev->sqs = qs; 1361eb5dc93eSSatha Rao } 1362dd944699SRakesh Kudurumalla 1363eb5dc93eSSatha Rao if (nb_cq) { 1364eb5dc93eSSatha Rao qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_cq, 0); 1365dd944699SRakesh Kudurumalla if (!qs) { 1366dd944699SRakesh Kudurumalla plt_err("Failed to alloc cqs"); 1367dd944699SRakesh Kudurumalla goto free_nix_lf; 1368dd944699SRakesh Kudurumalla } 1369dd944699SRakesh Kudurumalla dev->cqs = qs; 1370dd944699SRakesh Kudurumalla } 1371b75e0acaSNithin Dabilpuram 1372b75e0acaSNithin Dabilpuram /* Re-enable NIX LF error interrupts */ 1373b75e0acaSNithin Dabilpuram roc_nix_err_intr_ena_dis(nix, true); 1374b75e0acaSNithin Dabilpuram roc_nix_ras_intr_ena_dis(nix, true); 1375b75e0acaSNithin Dabilpuram 13764093c5a8SKiran Kumar K if (nix->rx_ptp_ena && 13774093c5a8SKiran Kumar K dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) { 1378b75e0acaSNithin Dabilpuram plt_err("Both PTP and switch header enabled"); 1379b75e0acaSNithin Dabilpuram goto free_nix_lf; 1380b75e0acaSNithin Dabilpuram } 1381b75e0acaSNithin Dabilpuram 13825bffab53SKiran Kumar K rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type, 13835bffab53SKiran Kumar K dev->npc.pre_l2_size_offset, 13845bffab53SKiran Kumar K dev->npc.pre_l2_size_offset_mask, 13855bffab53SKiran Kumar K dev->npc.pre_l2_size_shift_dir); 13864093c5a8SKiran Kumar K if (rc) { 13874093c5a8SKiran Kumar K plt_err("Failed to enable switch type nix_lf rc=%d", rc); 13884093c5a8SKiran Kumar K goto free_nix_lf; 13894093c5a8SKiran Kumar K } 13904093c5a8SKiran Kumar K 1391b75e0acaSNithin Dabilpuram /* Setup LSO if needed */ 1392b75e0acaSNithin Dabilpuram rc = nix_lso_fmt_setup(dev); 1393b75e0acaSNithin Dabilpuram if (rc) { 1394b75e0acaSNithin Dabilpuram plt_err("Failed to setup nix lso format fields, rc=%d", rc); 1395b75e0acaSNithin Dabilpuram goto free_nix_lf; 1396b75e0acaSNithin Dabilpuram } 1397b75e0acaSNithin Dabilpuram 1398b75e0acaSNithin Dabilpuram /* Configure RSS */ 1399b75e0acaSNithin Dabilpuram rc = nix_rss_default_setup(dev); 1400b75e0acaSNithin Dabilpuram if (rc) { 1401b75e0acaSNithin Dabilpuram plt_err("Failed to configure rss rc=%d", rc); 1402b75e0acaSNithin Dabilpuram goto free_nix_lf; 1403b75e0acaSNithin Dabilpuram } 1404b75e0acaSNithin Dabilpuram 1405d3654d35SSunil Kumar Kori /* Overwrite default RSS setup if requested by user */ 1406d3654d35SSunil Kumar Kori rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf); 1407d3654d35SSunil Kumar Kori if (rc) { 1408d3654d35SSunil Kumar Kori plt_err("Failed to configure rss rc=%d", rc); 1409d3654d35SSunil Kumar Kori goto free_nix_lf; 1410d3654d35SSunil Kumar Kori } 1411d3654d35SSunil Kumar Kori 1412b75e0acaSNithin Dabilpuram /* Init the default TM scheduler hierarchy */ 1413b75e0acaSNithin Dabilpuram rc = roc_nix_tm_init(nix); 1414b75e0acaSNithin Dabilpuram if (rc) { 1415b75e0acaSNithin Dabilpuram plt_err("Failed to init traffic manager, rc=%d", rc); 1416b75e0acaSNithin Dabilpuram goto free_nix_lf; 1417b75e0acaSNithin Dabilpuram } 1418b75e0acaSNithin Dabilpuram 1419a83db6b3SSunil Kumar Kori rc = nix_ingress_policer_setup(dev); 1420a83db6b3SSunil Kumar Kori if (rc) { 1421a83db6b3SSunil Kumar Kori plt_err("Failed to setup ingress policer rc=%d", rc); 1422a83db6b3SSunil Kumar Kori goto free_nix_lf; 1423a83db6b3SSunil Kumar Kori } 1424a83db6b3SSunil Kumar Kori 1425b75e0acaSNithin Dabilpuram rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false); 1426b75e0acaSNithin Dabilpuram if (rc) { 1427b75e0acaSNithin Dabilpuram plt_err("Failed to enable default tm hierarchy, rc=%d", rc); 1428b75e0acaSNithin Dabilpuram goto tm_fini; 1429b75e0acaSNithin Dabilpuram } 1430b75e0acaSNithin Dabilpuram 1431b75e0acaSNithin Dabilpuram /* Register queue IRQs */ 1432b75e0acaSNithin Dabilpuram rc = roc_nix_register_queue_irqs(nix); 1433b75e0acaSNithin Dabilpuram if (rc) { 1434b75e0acaSNithin Dabilpuram plt_err("Failed to register queue interrupts rc=%d", rc); 1435b75e0acaSNithin Dabilpuram goto tm_fini; 1436b75e0acaSNithin Dabilpuram } 1437b75e0acaSNithin Dabilpuram 1438b75e0acaSNithin Dabilpuram /* Register cq IRQs */ 1439b75e0acaSNithin Dabilpuram if (eth_dev->data->dev_conf.intr_conf.rxq) { 1440b75e0acaSNithin Dabilpuram if (eth_dev->data->nb_rx_queues > dev->nix.cints) { 1441b75e0acaSNithin Dabilpuram plt_err("Rx interrupt cannot be enabled, rxq > %d", 1442b75e0acaSNithin Dabilpuram dev->nix.cints); 1443b75e0acaSNithin Dabilpuram goto q_irq_fini; 1444b75e0acaSNithin Dabilpuram } 1445b75e0acaSNithin Dabilpuram /* Rx interrupt feature cannot work with vector mode because, 1446b75e0acaSNithin Dabilpuram * vector mode does not process packets unless min 4 pkts are 1447b75e0acaSNithin Dabilpuram * received, while cq interrupts are generated even for 1 pkt 1448b75e0acaSNithin Dabilpuram * in the CQ. 1449b75e0acaSNithin Dabilpuram */ 1450b75e0acaSNithin Dabilpuram dev->scalar_ena = true; 1451b75e0acaSNithin Dabilpuram 1452b75e0acaSNithin Dabilpuram rc = roc_nix_register_cq_irqs(nix); 1453b75e0acaSNithin Dabilpuram if (rc) { 1454b75e0acaSNithin Dabilpuram plt_err("Failed to register CQ interrupts rc=%d", rc); 1455b75e0acaSNithin Dabilpuram goto q_irq_fini; 1456b75e0acaSNithin Dabilpuram } 1457b75e0acaSNithin Dabilpuram } 1458b75e0acaSNithin Dabilpuram 14595875d2caSNithin Dabilpuram if (roc_nix_is_lbk(nix)) 14605875d2caSNithin Dabilpuram goto skip_lbk_setup; 14615875d2caSNithin Dabilpuram 1462b75e0acaSNithin Dabilpuram /* Configure loop back mode */ 1463b75e0acaSNithin Dabilpuram rc = roc_nix_mac_loopback_enable(nix, 1464b75e0acaSNithin Dabilpuram eth_dev->data->dev_conf.lpbk_mode); 1465b75e0acaSNithin Dabilpuram if (rc) { 1466b75e0acaSNithin Dabilpuram plt_err("Failed to configure cgx loop back mode rc=%d", rc); 1467b75e0acaSNithin Dabilpuram goto cq_fini; 1468b75e0acaSNithin Dabilpuram } 1469b75e0acaSNithin Dabilpuram 14705875d2caSNithin Dabilpuram skip_lbk_setup: 1471dfe5f0a1SNithin Dabilpuram /* Setup Inline security support */ 1472dfe5f0a1SNithin Dabilpuram rc = nix_security_setup(dev); 1473dfe5f0a1SNithin Dabilpuram if (rc) 1474dfe5f0a1SNithin Dabilpuram goto cq_fini; 1475dfe5f0a1SNithin Dabilpuram 1476d2bebb1fSSunil Kumar Kori /* Init flow control configuration */ 1477722282a4SHarman Kalra if (!roc_nix_is_esw(nix)) { 147858debb81SNithin Dabilpuram fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; 1479d2bebb1fSSunil Kumar Kori fc_cfg.rxchan_cfg.enable = true; 1480d2bebb1fSSunil Kumar Kori rc = roc_nix_fc_config_set(nix, &fc_cfg); 1481d2bebb1fSSunil Kumar Kori if (rc) { 1482d2bebb1fSSunil Kumar Kori plt_err("Failed to initialize flow control rc=%d", rc); 1483d2bebb1fSSunil Kumar Kori goto cq_fini; 1484d2bebb1fSSunil Kumar Kori } 1485722282a4SHarman Kalra } 1486d2bebb1fSSunil Kumar Kori 1487d2bebb1fSSunil Kumar Kori /* Update flow control configuration to PMD */ 1488d2bebb1fSSunil Kumar Kori rc = nix_init_flow_ctrl_config(eth_dev); 1489d2bebb1fSSunil Kumar Kori if (rc) { 1490d2bebb1fSSunil Kumar Kori plt_err("Failed to initialize flow control rc=%d", rc); 1491d2bebb1fSSunil Kumar Kori goto cq_fini; 1492d2bebb1fSSunil Kumar Kori } 14937eabd6c6SNithin Dabilpuram 1494b75e0acaSNithin Dabilpuram /* 1495b75e0acaSNithin Dabilpuram * Restore queue config when reconfigure followed by 1496b75e0acaSNithin Dabilpuram * reconfigure and no queue configure invoked from application case. 1497b75e0acaSNithin Dabilpuram */ 1498b75e0acaSNithin Dabilpuram if (dev->configured == 1) { 1499b75e0acaSNithin Dabilpuram rc = nix_restore_queue_cfg(eth_dev); 1500b75e0acaSNithin Dabilpuram if (rc) 15017eabd6c6SNithin Dabilpuram goto sec_release; 1502b75e0acaSNithin Dabilpuram } 1503b75e0acaSNithin Dabilpuram 1504b75e0acaSNithin Dabilpuram /* Update the mac address */ 1505b75e0acaSNithin Dabilpuram ea = eth_dev->data->mac_addrs; 1506b75e0acaSNithin Dabilpuram memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN); 1507b75e0acaSNithin Dabilpuram if (rte_is_zero_ether_addr(ea)) 1508b75e0acaSNithin Dabilpuram rte_eth_random_addr((uint8_t *)ea); 1509b75e0acaSNithin Dabilpuram 1510b75e0acaSNithin Dabilpuram rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea); 1511b75e0acaSNithin Dabilpuram 1512b75e0acaSNithin Dabilpuram plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d" 1513b75e0acaSNithin Dabilpuram " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "", 1514b75e0acaSNithin Dabilpuram eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq, 1515b75e0acaSNithin Dabilpuram dev->rx_offloads, dev->tx_offloads); 1516b75e0acaSNithin Dabilpuram 1517b75e0acaSNithin Dabilpuram /* All good */ 1518b75e0acaSNithin Dabilpuram dev->configured = 1; 1519b75e0acaSNithin Dabilpuram dev->nb_rxq = data->nb_rx_queues; 1520b75e0acaSNithin Dabilpuram dev->nb_txq = data->nb_tx_queues; 1521b75e0acaSNithin Dabilpuram return 0; 1522b75e0acaSNithin Dabilpuram 15237eabd6c6SNithin Dabilpuram sec_release: 15247eabd6c6SNithin Dabilpuram rc |= nix_security_release(dev); 1525b75e0acaSNithin Dabilpuram cq_fini: 1526b75e0acaSNithin Dabilpuram roc_nix_unregister_cq_irqs(nix); 1527b75e0acaSNithin Dabilpuram q_irq_fini: 1528b75e0acaSNithin Dabilpuram roc_nix_unregister_queue_irqs(nix); 1529b75e0acaSNithin Dabilpuram tm_fini: 1530b75e0acaSNithin Dabilpuram roc_nix_tm_fini(nix); 1531b75e0acaSNithin Dabilpuram free_nix_lf: 1532b75e0acaSNithin Dabilpuram nix_free_queue_mem(dev); 1533c9b0bb00SRakesh Kudurumalla rc |= nix_rxchan_cfg_disable(dev); 1534b75e0acaSNithin Dabilpuram rc |= roc_nix_lf_free(nix); 1535b75e0acaSNithin Dabilpuram fail_configure: 1536b75e0acaSNithin Dabilpuram dev->configured = 0; 1537b75e0acaSNithin Dabilpuram return rc; 1538b75e0acaSNithin Dabilpuram } 1539b75e0acaSNithin Dabilpuram 1540fef6ee07SSunil Kumar Kori int 154106d75440SNithin Dabilpuram cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid) 154206d75440SNithin Dabilpuram { 154306d75440SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 154406d75440SNithin Dabilpuram struct rte_eth_dev_data *data = eth_dev->data; 154506d75440SNithin Dabilpuram struct roc_nix_sq *sq = &dev->sqs[qid]; 154606d75440SNithin Dabilpuram int rc = -EINVAL; 154706d75440SNithin Dabilpuram 154806d75440SNithin Dabilpuram if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED) 154906d75440SNithin Dabilpuram return 0; 155006d75440SNithin Dabilpuram 1551efbf367fSRakesh Kudurumalla rc = roc_nix_sq_ena_dis(sq, true); 155206d75440SNithin Dabilpuram if (rc) { 155306d75440SNithin Dabilpuram plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc); 155406d75440SNithin Dabilpuram goto done; 155506d75440SNithin Dabilpuram } 155606d75440SNithin Dabilpuram 155706d75440SNithin Dabilpuram data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED; 155806d75440SNithin Dabilpuram done: 155906d75440SNithin Dabilpuram return rc; 156006d75440SNithin Dabilpuram } 156106d75440SNithin Dabilpuram 156206d75440SNithin Dabilpuram int 156306d75440SNithin Dabilpuram cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid) 156406d75440SNithin Dabilpuram { 156506d75440SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 156606d75440SNithin Dabilpuram struct rte_eth_dev_data *data = eth_dev->data; 156706d75440SNithin Dabilpuram struct roc_nix_sq *sq = &dev->sqs[qid]; 156806d75440SNithin Dabilpuram int rc; 156906d75440SNithin Dabilpuram 157006d75440SNithin Dabilpuram if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED) 157106d75440SNithin Dabilpuram return 0; 157206d75440SNithin Dabilpuram 1573efbf367fSRakesh Kudurumalla rc = roc_nix_sq_ena_dis(sq, false); 157406d75440SNithin Dabilpuram if (rc) { 157506d75440SNithin Dabilpuram plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid, 157606d75440SNithin Dabilpuram rc); 157706d75440SNithin Dabilpuram goto done; 157806d75440SNithin Dabilpuram } 157906d75440SNithin Dabilpuram 158006d75440SNithin Dabilpuram data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; 158106d75440SNithin Dabilpuram done: 158206d75440SNithin Dabilpuram return rc; 158306d75440SNithin Dabilpuram } 158406d75440SNithin Dabilpuram 158506d75440SNithin Dabilpuram static int 158606d75440SNithin Dabilpuram cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid) 158706d75440SNithin Dabilpuram { 158806d75440SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 158906d75440SNithin Dabilpuram struct rte_eth_dev_data *data = eth_dev->data; 159006d75440SNithin Dabilpuram struct roc_nix_rq *rq = &dev->rqs[qid]; 159106d75440SNithin Dabilpuram int rc; 159206d75440SNithin Dabilpuram 159306d75440SNithin Dabilpuram if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED) 159406d75440SNithin Dabilpuram return 0; 159506d75440SNithin Dabilpuram 159606d75440SNithin Dabilpuram rc = roc_nix_rq_ena_dis(rq, true); 159706d75440SNithin Dabilpuram if (rc) { 159806d75440SNithin Dabilpuram plt_err("Failed to enable rxq=%u, rc=%d", qid, rc); 159906d75440SNithin Dabilpuram goto done; 160006d75440SNithin Dabilpuram } 160106d75440SNithin Dabilpuram 160206d75440SNithin Dabilpuram data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED; 160306d75440SNithin Dabilpuram done: 160406d75440SNithin Dabilpuram return rc; 160506d75440SNithin Dabilpuram } 160606d75440SNithin Dabilpuram 160706d75440SNithin Dabilpuram static int 160806d75440SNithin Dabilpuram cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid) 160906d75440SNithin Dabilpuram { 161006d75440SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 161106d75440SNithin Dabilpuram struct rte_eth_dev_data *data = eth_dev->data; 161206d75440SNithin Dabilpuram struct roc_nix_rq *rq = &dev->rqs[qid]; 161306d75440SNithin Dabilpuram int rc; 161406d75440SNithin Dabilpuram 161506d75440SNithin Dabilpuram if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED) 161606d75440SNithin Dabilpuram return 0; 161706d75440SNithin Dabilpuram 161806d75440SNithin Dabilpuram rc = roc_nix_rq_ena_dis(rq, false); 161906d75440SNithin Dabilpuram if (rc) { 162006d75440SNithin Dabilpuram plt_err("Failed to disable rxq=%u, rc=%d", qid, rc); 162106d75440SNithin Dabilpuram goto done; 162206d75440SNithin Dabilpuram } 162306d75440SNithin Dabilpuram 162406d75440SNithin Dabilpuram data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; 162506d75440SNithin Dabilpuram done: 162606d75440SNithin Dabilpuram return rc; 162706d75440SNithin Dabilpuram } 162806d75440SNithin Dabilpuram 162989df2225SNithin Dabilpuram static int 163089df2225SNithin Dabilpuram cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev) 163189df2225SNithin Dabilpuram { 163289df2225SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 163389df2225SNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 163489df2225SNithin Dabilpuram struct rte_mbuf *rx_pkts[32]; 1635e7bbbcb2SSatha Rao struct rte_eth_link link; 163689df2225SNithin Dabilpuram int count, i, j, rc; 163789df2225SNithin Dabilpuram void *rxq; 163889df2225SNithin Dabilpuram 1639fde2a1cbSRahul Bhansali /* In case of Inline IPSec, will need to avoid disabling the MCAM rules and NPC Rx 1640fde2a1cbSRahul Bhansali * in this routine to continue processing of second pass inflight packets if any. 1641fde2a1cbSRahul Bhansali * Drop of second pass packets will leak first pass buffers on some platforms 1642fde2a1cbSRahul Bhansali * due to hardware limitations. 1643fde2a1cbSRahul Bhansali */ 1644fde2a1cbSRahul Bhansali if (roc_feature_nix_has_second_pass_drop() || 1645fde2a1cbSRahul Bhansali !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) { 1646fbc0fa74SKiran Kumar K /* Disable all the NPC entries */ 1647fbc0fa74SKiran Kumar K rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0); 1648fbc0fa74SKiran Kumar K if (rc) 1649fbc0fa74SKiran Kumar K return rc; 165089df2225SNithin Dabilpuram 1651fde2a1cbSRahul Bhansali /* Disable Rx via NPC */ 1652fde2a1cbSRahul Bhansali roc_nix_npc_rx_ena_dis(&dev->nix, false); 1653fde2a1cbSRahul Bhansali } 1654fde2a1cbSRahul Bhansali 165589df2225SNithin Dabilpuram /* Stop link change events */ 165689df2225SNithin Dabilpuram if (!roc_nix_is_vf_or_sdp(&dev->nix)) 165789df2225SNithin Dabilpuram roc_nix_mac_link_event_start_stop(&dev->nix, false); 165889df2225SNithin Dabilpuram 1659bea5d990SVamsi Attunuru roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false); 1660bea5d990SVamsi Attunuru 1661da1ec390SNithin Dabilpuram /* Stop inline device RQ first */ 1662da1ec390SNithin Dabilpuram if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) 1663da1ec390SNithin Dabilpuram roc_nix_inl_rq_ena_dis(&dev->nix, false); 1664da1ec390SNithin Dabilpuram 166589df2225SNithin Dabilpuram /* Stop rx queues and free up pkts pending */ 166689df2225SNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 166789df2225SNithin Dabilpuram rc = dev_ops->rx_queue_stop(eth_dev, i); 166889df2225SNithin Dabilpuram if (rc) 166989df2225SNithin Dabilpuram continue; 167089df2225SNithin Dabilpuram 167189df2225SNithin Dabilpuram rxq = eth_dev->data->rx_queues[i]; 167289df2225SNithin Dabilpuram count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); 167389df2225SNithin Dabilpuram while (count) { 167489df2225SNithin Dabilpuram for (j = 0; j < count; j++) 167589df2225SNithin Dabilpuram rte_pktmbuf_free(rx_pkts[j]); 167689df2225SNithin Dabilpuram count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); 167789df2225SNithin Dabilpuram } 167889df2225SNithin Dabilpuram } 167989df2225SNithin Dabilpuram 168089df2225SNithin Dabilpuram /* Stop tx queues */ 168189df2225SNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 168289df2225SNithin Dabilpuram dev_ops->tx_queue_stop(eth_dev, i); 168389df2225SNithin Dabilpuram 1684e7bbbcb2SSatha Rao /* Bring down link status internally */ 1685e7bbbcb2SSatha Rao memset(&link, 0, sizeof(link)); 1686e7bbbcb2SSatha Rao rte_eth_linkstatus_set(eth_dev, &link); 1687e7bbbcb2SSatha Rao 168889df2225SNithin Dabilpuram return 0; 168989df2225SNithin Dabilpuram } 169089df2225SNithin Dabilpuram 169189df2225SNithin Dabilpuram int 169289df2225SNithin Dabilpuram cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) 169389df2225SNithin Dabilpuram { 169489df2225SNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 169589df2225SNithin Dabilpuram int rc, i; 169689df2225SNithin Dabilpuram 169776dff638SSunil Kumar Kori if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) { 16988589ec21SSunil Kumar Kori rc = nix_recalc_mtu(eth_dev); 16998589ec21SSunil Kumar Kori if (rc) 17008589ec21SSunil Kumar Kori return rc; 17018589ec21SSunil Kumar Kori } 17028589ec21SSunil Kumar Kori 170389df2225SNithin Dabilpuram /* Start rx queues */ 170489df2225SNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 170589df2225SNithin Dabilpuram rc = cnxk_nix_rx_queue_start(eth_dev, i); 170689df2225SNithin Dabilpuram if (rc) 170789df2225SNithin Dabilpuram return rc; 170889df2225SNithin Dabilpuram } 170989df2225SNithin Dabilpuram 1710da1ec390SNithin Dabilpuram if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { 1711da1ec390SNithin Dabilpuram rc = roc_nix_inl_rq_ena_dis(&dev->nix, true); 1712da1ec390SNithin Dabilpuram if (rc) { 1713da1ec390SNithin Dabilpuram plt_err("Failed to enable Inline device RQ, rc=%d", rc); 1714da1ec390SNithin Dabilpuram return rc; 1715da1ec390SNithin Dabilpuram } 1716da1ec390SNithin Dabilpuram } 1717da1ec390SNithin Dabilpuram 171889df2225SNithin Dabilpuram /* Start tx queues */ 171989df2225SNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 172089df2225SNithin Dabilpuram rc = cnxk_nix_tx_queue_start(eth_dev, i); 172189df2225SNithin Dabilpuram if (rc) 172289df2225SNithin Dabilpuram return rc; 172389df2225SNithin Dabilpuram } 172489df2225SNithin Dabilpuram 1725d2bebb1fSSunil Kumar Kori /* Update Flow control configuration */ 1726d2bebb1fSSunil Kumar Kori rc = nix_update_flow_ctrl_config(eth_dev); 1727d2bebb1fSSunil Kumar Kori if (rc) { 1728d2bebb1fSSunil Kumar Kori plt_err("Failed to enable flow control. error code(%d)", rc); 1729d2bebb1fSSunil Kumar Kori return rc; 1730d2bebb1fSSunil Kumar Kori } 1731d2bebb1fSSunil Kumar Kori 173289df2225SNithin Dabilpuram /* Enable Rx in NPC */ 173389df2225SNithin Dabilpuram rc = roc_nix_npc_rx_ena_dis(&dev->nix, true); 173489df2225SNithin Dabilpuram if (rc) { 173589df2225SNithin Dabilpuram plt_err("Failed to enable NPC rx %d", rc); 173689df2225SNithin Dabilpuram return rc; 173789df2225SNithin Dabilpuram } 173889df2225SNithin Dabilpuram 1739fbc0fa74SKiran Kumar K rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1); 1740fbc0fa74SKiran Kumar K if (rc) { 1741fbc0fa74SKiran Kumar K plt_err("Failed to enable NPC entries %d", rc); 1742fbc0fa74SKiran Kumar K return rc; 1743fbc0fa74SKiran Kumar K } 1744fbc0fa74SKiran Kumar K 174589df2225SNithin Dabilpuram cnxk_nix_toggle_flag_link_cfg(dev, true); 174689df2225SNithin Dabilpuram 174789df2225SNithin Dabilpuram /* Start link change events */ 174889df2225SNithin Dabilpuram if (!roc_nix_is_vf_or_sdp(&dev->nix)) { 174989df2225SNithin Dabilpuram rc = roc_nix_mac_link_event_start_stop(&dev->nix, true); 175089df2225SNithin Dabilpuram if (rc) { 175189df2225SNithin Dabilpuram plt_err("Failed to start cgx link event %d", rc); 175289df2225SNithin Dabilpuram goto rx_disable; 175389df2225SNithin Dabilpuram } 175489df2225SNithin Dabilpuram } 175589df2225SNithin Dabilpuram 175676dff638SSunil Kumar Kori /* Enable PTP if it is requested by the user or already 175776dff638SSunil Kumar Kori * enabled on PF owning this VF 175876dff638SSunil Kumar Kori */ 175976dff638SSunil Kumar Kori memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info)); 1760295968d1SFerruh Yigit if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) 176176dff638SSunil Kumar Kori cnxk_eth_dev_ops.timesync_enable(eth_dev); 176276dff638SSunil Kumar Kori else 176376dff638SSunil Kumar Kori cnxk_eth_dev_ops.timesync_disable(eth_dev); 176476dff638SSunil Kumar Kori 17650efd93a2SRakesh Kudurumalla if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || dev->ptp_en) { 176676dff638SSunil Kumar Kori rc = rte_mbuf_dyn_rx_timestamp_register 176776dff638SSunil Kumar Kori (&dev->tstamp.tstamp_dynfield_offset, 176876dff638SSunil Kumar Kori &dev->tstamp.rx_tstamp_dynflag); 176976dff638SSunil Kumar Kori if (rc != 0) { 177076dff638SSunil Kumar Kori plt_err("Failed to register Rx timestamp field/flag"); 177176dff638SSunil Kumar Kori goto rx_disable; 177276dff638SSunil Kumar Kori } 177376dff638SSunil Kumar Kori } 177476dff638SSunil Kumar Kori 177589df2225SNithin Dabilpuram cnxk_nix_toggle_flag_link_cfg(dev, false); 177689df2225SNithin Dabilpuram 1777bea5d990SVamsi Attunuru roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true); 1778bea5d990SVamsi Attunuru 177989df2225SNithin Dabilpuram return 0; 178089df2225SNithin Dabilpuram 178189df2225SNithin Dabilpuram rx_disable: 178289df2225SNithin Dabilpuram roc_nix_npc_rx_ena_dis(&dev->nix, false); 178389df2225SNithin Dabilpuram cnxk_nix_toggle_flag_link_cfg(dev, false); 178489df2225SNithin Dabilpuram return rc; 178589df2225SNithin Dabilpuram } 178689df2225SNithin Dabilpuram 1787e191360cSSunil Kumar Kori static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev); 1788e191360cSSunil Kumar Kori static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev); 1789e191360cSSunil Kumar Kori 179047db46bbSNithin Dabilpuram /* CNXK platform independent eth dev ops */ 1791dac12650SNithin Dabilpuram struct eth_dev_ops cnxk_eth_dev_ops = { 17928589ec21SSunil Kumar Kori .mtu_set = cnxk_nix_mtu_set, 1793cbb8c809SSunil Kumar Kori .mac_addr_add = cnxk_nix_mac_addr_add, 1794cbb8c809SSunil Kumar Kori .mac_addr_remove = cnxk_nix_mac_addr_del, 17955fe86db2SSunil Kumar Kori .mac_addr_set = cnxk_nix_mac_addr_set, 1796dac12650SNithin Dabilpuram .dev_infos_get = cnxk_nix_info_get, 17970f5ee447SNithin Dabilpuram .link_update = cnxk_nix_link_update, 1798a24af636SNithin Dabilpuram .tx_queue_release = cnxk_nix_tx_queue_release, 1799a86144cdSNithin Dabilpuram .rx_queue_release = cnxk_nix_rx_queue_release, 180089df2225SNithin Dabilpuram .dev_stop = cnxk_nix_dev_stop, 1801e191360cSSunil Kumar Kori .dev_close = cnxk_nix_dev_close, 1802e191360cSSunil Kumar Kori .dev_reset = cnxk_nix_dev_reset, 180306d75440SNithin Dabilpuram .tx_queue_start = cnxk_nix_tx_queue_start, 180406d75440SNithin Dabilpuram .rx_queue_start = cnxk_nix_rx_queue_start, 180506d75440SNithin Dabilpuram .rx_queue_stop = cnxk_nix_rx_queue_stop, 18067ee79e83SNithin Dabilpuram .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get, 18079cc3f341SSunil Kumar Kori .promiscuous_enable = cnxk_nix_promisc_enable, 18089cc3f341SSunil Kumar Kori .promiscuous_disable = cnxk_nix_promisc_disable, 1809325d79c0SSunil Kumar Kori .allmulticast_enable = cnxk_nix_allmulticast_enable, 1810325d79c0SSunil Kumar Kori .allmulticast_disable = cnxk_nix_allmulticast_disable, 1811611c771cSSunil Kumar Kori .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get, 1812611c771cSSunil Kumar Kori .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get, 1813d2bebb1fSSunil Kumar Kori .flow_ctrl_get = cnxk_nix_flow_ctrl_get, 1814d2bebb1fSSunil Kumar Kori .flow_ctrl_set = cnxk_nix_flow_ctrl_set, 181595447135SSunil Kumar Kori .priority_flow_ctrl_queue_config = 181695447135SSunil Kumar Kori cnxk_nix_priority_flow_ctrl_queue_config, 181795447135SSunil Kumar Kori .priority_flow_ctrl_queue_info_get = 181895447135SSunil Kumar Kori cnxk_nix_priority_flow_ctrl_queue_info_get, 1819fef6ee07SSunil Kumar Kori .dev_set_link_up = cnxk_nix_set_link_up, 1820fef6ee07SSunil Kumar Kori .dev_set_link_down = cnxk_nix_set_link_down, 1821aa898299SSunil Kumar Kori .get_module_info = cnxk_nix_get_module_info, 1822aa898299SSunil Kumar Kori .get_module_eeprom = cnxk_nix_get_module_eeprom, 1823a7ce2f54SSunil Kumar Kori .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable, 1824a7ce2f54SSunil Kumar Kori .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable, 182579b175d7SSunil Kumar Kori .pool_ops_supported = cnxk_nix_pool_ops_supported, 18262fced8a1SSatha Rao .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping, 18272fced8a1SSatha Rao .stats_get = cnxk_nix_stats_get, 18282fced8a1SSatha Rao .stats_reset = cnxk_nix_stats_reset, 18298075b057SSatha Rao .xstats_get = cnxk_nix_xstats_get, 18308075b057SSatha Rao .xstats_get_names = cnxk_nix_xstats_get_names, 18318075b057SSatha Rao .xstats_reset = cnxk_nix_xstats_reset, 18328075b057SSatha Rao .xstats_get_by_id = cnxk_nix_xstats_get_by_id, 18338075b057SSatha Rao .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id, 183486ac1c9cSSatha Rao .fw_version_get = cnxk_nix_fw_version_get, 183562dcd934SSatha Rao .rxq_info_get = cnxk_nix_rxq_info_get, 183662dcd934SSatha Rao .txq_info_get = cnxk_nix_txq_info_get, 18374be0b2b1SSunil Kumar Kori .tx_done_cleanup = cnxk_nix_tx_done_cleanup, 1838d43a7beeSSatheesh Paul .flow_ops_get = cnxk_nix_flow_ops_get, 183948a882afSSatha Rao .get_reg = cnxk_nix_dev_get_reg, 184077398b9eSSunil Kumar Kori .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp, 184177398b9eSSunil Kumar Kori .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp, 1842677fb66bSSunil Kumar Kori .timesync_read_time = cnxk_nix_timesync_read_time, 1843677fb66bSSunil Kumar Kori .timesync_write_time = cnxk_nix_timesync_write_time, 1844677fb66bSSunil Kumar Kori .timesync_adjust_time = cnxk_nix_timesync_adjust_time, 18453199a7f6SSunil Kumar Kori .read_clock = cnxk_nix_read_clock, 184600242a68SSatha Rao .reta_update = cnxk_nix_reta_update, 184700242a68SSatha Rao .reta_query = cnxk_nix_reta_query, 184800242a68SSatha Rao .rss_hash_update = cnxk_nix_rss_hash_update, 184900242a68SSatha Rao .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get, 185021cc8401SSunil Kumar Kori .set_mc_addr_list = cnxk_nix_mc_addr_list_configure, 1851bd7b62d7SSatha Rao .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit, 1852bd7b62d7SSatha Rao .tm_ops_get = cnxk_nix_tm_ops_get, 1853a7c236b8SSunil Kumar Kori .mtr_ops_get = cnxk_nix_mtr_ops_get, 1854dac48083SRakesh Kudurumalla .eth_dev_priv_dump = cnxk_nix_eth_dev_priv_dump, 1855b7d3a0feSSunil Kumar Kori .cman_info_get = cnxk_nix_cman_info_get, 1856b7d3a0feSSunil Kumar Kori .cman_config_init = cnxk_nix_cman_config_init, 1857b7d3a0feSSunil Kumar Kori .cman_config_set = cnxk_nix_cman_config_set, 1858b7d3a0feSSunil Kumar Kori .cman_config_get = cnxk_nix_cman_config_get, 185952ac124dSSatha Rao .eth_tx_descriptor_dump = cnxk_nix_tx_descriptor_dump, 1860dac12650SNithin Dabilpuram }; 186147db46bbSNithin Dabilpuram 1862c7b9d9a1SSatha Rao void 1863c7b9d9a1SSatha Rao cnxk_eth_dev_q_err_cb(struct roc_nix *nix, void *data) 1864c7b9d9a1SSatha Rao { 1865c7b9d9a1SSatha Rao struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; 1866c7b9d9a1SSatha Rao struct rte_eth_dev *eth_dev = dev->eth_dev; 1867c7b9d9a1SSatha Rao 1868c7b9d9a1SSatha Rao /* Set the flag and execute application callbacks */ 1869c7b9d9a1SSatha Rao rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_RESET, data); 1870c7b9d9a1SSatha Rao } 1871c7b9d9a1SSatha Rao 187247db46bbSNithin Dabilpuram static int 187347db46bbSNithin Dabilpuram cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) 187447db46bbSNithin Dabilpuram { 187547db46bbSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 18767eabd6c6SNithin Dabilpuram struct rte_security_ctx *sec_ctx; 187747db46bbSNithin Dabilpuram struct roc_nix *nix = &dev->nix; 187847db46bbSNithin Dabilpuram struct rte_pci_device *pci_dev; 187947db46bbSNithin Dabilpuram int rc, max_entries; 188047db46bbSNithin Dabilpuram 188147db46bbSNithin Dabilpuram eth_dev->dev_ops = &cnxk_eth_dev_ops; 188218732189SRahul Bhansali eth_dev->rx_queue_count = cnxk_nix_rx_queue_count; 188318732189SRahul Bhansali eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status; 188418732189SRahul Bhansali eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status; 188547db46bbSNithin Dabilpuram 18867eabd6c6SNithin Dabilpuram /* Alloc security context */ 18877eabd6c6SNithin Dabilpuram sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0); 18887eabd6c6SNithin Dabilpuram if (!sec_ctx) 18897eabd6c6SNithin Dabilpuram return -ENOMEM; 18907eabd6c6SNithin Dabilpuram sec_ctx->device = eth_dev; 18917eabd6c6SNithin Dabilpuram sec_ctx->ops = &cnxk_eth_sec_ops; 189268d25915SSrujana Challa sec_ctx->flags = RTE_SEC_CTX_F_FAST_SET_MDATA; 18937eabd6c6SNithin Dabilpuram eth_dev->security_ctx = sec_ctx; 18947eabd6c6SNithin Dabilpuram 189547db46bbSNithin Dabilpuram /* For secondary processes, the primary has done all the work */ 189647db46bbSNithin Dabilpuram if (rte_eal_process_type() != RTE_PROC_PRIMARY) 189747db46bbSNithin Dabilpuram return 0; 189847db46bbSNithin Dabilpuram 189947db46bbSNithin Dabilpuram pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 190047db46bbSNithin Dabilpuram rte_eth_copy_pci_info(eth_dev, pci_dev); 190147db46bbSNithin Dabilpuram 1902d25433c7SNithin Dabilpuram /* Parse devargs string */ 1903d25433c7SNithin Dabilpuram rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev); 1904d25433c7SNithin Dabilpuram if (rc) { 1905d25433c7SNithin Dabilpuram plt_err("Failed to parse devargs rc=%d", rc); 1906d25433c7SNithin Dabilpuram goto error; 1907d25433c7SNithin Dabilpuram } 1908d25433c7SNithin Dabilpuram 190947db46bbSNithin Dabilpuram /* Initialize base roc nix */ 191047db46bbSNithin Dabilpuram nix->pci_dev = pci_dev; 19112c8438acSSatheesh Paul nix->hw_vlan_ins = true; 19123c100e0eSNithin Dabilpuram nix->port_id = eth_dev->data->port_id; 1913eafb1b9aSSatha Rao /* For better performance set default VF root schedule weight */ 1914eafb1b9aSSatha Rao nix->root_sched_weight = NIX_TM_DFLT_RR_WT; 19154fb24a62SRahul Bhansali if (roc_feature_nix_has_own_meta_aura()) 19164fb24a62SRahul Bhansali nix->local_meta_aura_ena = true; 191747db46bbSNithin Dabilpuram rc = roc_nix_dev_init(nix); 191847db46bbSNithin Dabilpuram if (rc) { 191947db46bbSNithin Dabilpuram plt_err("Failed to initialize roc nix rc=%d", rc); 192047db46bbSNithin Dabilpuram goto error; 192147db46bbSNithin Dabilpuram } 192247db46bbSNithin Dabilpuram 19230f5ee447SNithin Dabilpuram /* Register up msg callbacks */ 19240f5ee447SNithin Dabilpuram roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb); 19250f5ee447SNithin Dabilpuram 19262c809af8SHarman Kalra /* Register up msg callbacks */ 19272c809af8SHarman Kalra roc_nix_mac_link_info_get_cb_register(nix, 19282c809af8SHarman Kalra cnxk_eth_dev_link_status_get_cb); 19292c809af8SHarman Kalra 1930c7b9d9a1SSatha Rao /* Register up msg callbacks */ 1931c7b9d9a1SSatha Rao roc_nix_q_err_cb_register(nix, cnxk_eth_dev_q_err_cb); 1932c7b9d9a1SSatha Rao 1933780b9c89SNithin Dabilpuram /* Register callback for inline meta pool create */ 1934780b9c89SNithin Dabilpuram roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb); 1935780b9c89SNithin Dabilpuram 19367ea18718SRahul Bhansali /* Register callback for inline meta pool create 1:N pool:aura */ 19377ea18718SRahul Bhansali roc_nix_inl_custom_meta_pool_cb_register(cnxk_nix_inl_custom_meta_pool_cb); 19387ea18718SRahul Bhansali 193947db46bbSNithin Dabilpuram dev->eth_dev = eth_dev; 1940b75e0acaSNithin Dabilpuram dev->configured = 0; 19417ee79e83SNithin Dabilpuram dev->ptype_disable = 0; 1942ac35d4bfSSunil Kumar Kori dev->proto = RTE_MTR_COLOR_IN_PROTO_OUTER_VLAN; 194347db46bbSNithin Dabilpuram 1944fd7d681cSNithin Dabilpuram TAILQ_INIT(&dev->inb.list); 1945fd7d681cSNithin Dabilpuram TAILQ_INIT(&dev->outb.list); 1946fd7d681cSNithin Dabilpuram rte_spinlock_init(&dev->inb.lock); 1947fd7d681cSNithin Dabilpuram rte_spinlock_init(&dev->outb.lock); 1948fd7d681cSNithin Dabilpuram 194947db46bbSNithin Dabilpuram /* For vfs, returned max_entries will be 0. but to keep default mac 195047db46bbSNithin Dabilpuram * address, one entry must be allocated. so setting up to 1. 195147db46bbSNithin Dabilpuram */ 195247db46bbSNithin Dabilpuram if (roc_nix_is_vf_or_sdp(nix)) 195347db46bbSNithin Dabilpuram max_entries = 1; 195447db46bbSNithin Dabilpuram else 195547db46bbSNithin Dabilpuram max_entries = roc_nix_mac_max_entries_get(nix); 195647db46bbSNithin Dabilpuram 195747db46bbSNithin Dabilpuram if (max_entries <= 0) { 195847db46bbSNithin Dabilpuram plt_err("Failed to get max entries for mac addr"); 195947db46bbSNithin Dabilpuram rc = -ENOTSUP; 196047db46bbSNithin Dabilpuram goto dev_fini; 196147db46bbSNithin Dabilpuram } 196247db46bbSNithin Dabilpuram 196347db46bbSNithin Dabilpuram eth_dev->data->mac_addrs = 196447db46bbSNithin Dabilpuram rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0); 196547db46bbSNithin Dabilpuram if (eth_dev->data->mac_addrs == NULL) { 196647db46bbSNithin Dabilpuram plt_err("Failed to allocate memory for mac addr"); 196747db46bbSNithin Dabilpuram rc = -ENOMEM; 196847db46bbSNithin Dabilpuram goto dev_fini; 196947db46bbSNithin Dabilpuram } 197047db46bbSNithin Dabilpuram 1971f03d5434SSunil Kumar Kori dev->dmac_idx_map = rte_zmalloc("dmac_idx_map", max_entries * sizeof(int), 0); 1972f03d5434SSunil Kumar Kori if (dev->dmac_idx_map == NULL) { 1973f03d5434SSunil Kumar Kori plt_err("Failed to allocate memory for dmac idx map"); 1974f03d5434SSunil Kumar Kori rc = -ENOMEM; 1975f03d5434SSunil Kumar Kori goto free_mac_addrs; 1976f03d5434SSunil Kumar Kori } 1977f03d5434SSunil Kumar Kori 197847db46bbSNithin Dabilpuram dev->max_mac_entries = max_entries; 197921cc8401SSunil Kumar Kori dev->dmac_filter_count = 1; 198047db46bbSNithin Dabilpuram 198147db46bbSNithin Dabilpuram /* Get mac address */ 198247db46bbSNithin Dabilpuram rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr); 198347db46bbSNithin Dabilpuram if (rc) { 198447db46bbSNithin Dabilpuram plt_err("Failed to get mac addr, rc=%d", rc); 198547db46bbSNithin Dabilpuram goto free_mac_addrs; 198647db46bbSNithin Dabilpuram } 198747db46bbSNithin Dabilpuram 198847db46bbSNithin Dabilpuram /* Update the mac address */ 198947db46bbSNithin Dabilpuram memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN); 199047db46bbSNithin Dabilpuram 19915a4341c8SNithin Dabilpuram /* Union of all capabilities supported by CNXK. 19925a4341c8SNithin Dabilpuram * Platform specific capabilities will be 19935a4341c8SNithin Dabilpuram * updated later. 19945a4341c8SNithin Dabilpuram */ 19955a4341c8SNithin Dabilpuram dev->rx_offload_capa = nix_get_rx_offload_capa(dev); 19965a4341c8SNithin Dabilpuram dev->tx_offload_capa = nix_get_tx_offload_capa(dev); 19975a4341c8SNithin Dabilpuram dev->speed_capa = nix_get_speed_capa(dev); 19985a4341c8SNithin Dabilpuram 199947db46bbSNithin Dabilpuram /* Initialize roc npc */ 20004093c5a8SKiran Kumar K dev->npc.roc_nix = nix; 20014093c5a8SKiran Kumar K rc = roc_npc_init(&dev->npc); 20024093c5a8SKiran Kumar K if (rc) 20034093c5a8SKiran Kumar K goto free_mac_addrs; 20044093c5a8SKiran Kumar K 2005dd3e8765SAkhil Goyal if (roc_feature_nix_has_macsec()) { 2006dd3e8765SAkhil Goyal rc = cnxk_mcs_dev_init(dev, 0); 2007dd3e8765SAkhil Goyal if (rc) { 2008dd3e8765SAkhil Goyal plt_err("Failed to init MCS"); 2009dd3e8765SAkhil Goyal goto free_mac_addrs; 2010dd3e8765SAkhil Goyal } 2011dd3e8765SAkhil Goyal dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP; 2012dd3e8765SAkhil Goyal dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT; 201314598b31SAkhil Goyal 201414598b31SAkhil Goyal TAILQ_INIT(&dev->mcs_list); 2015dd3e8765SAkhil Goyal } 2016dd3e8765SAkhil Goyal 2017722282a4SHarman Kalra /* Reserve a switch domain for eswitch device */ 2018722282a4SHarman Kalra if (pci_dev->id.device_id == PCI_DEVID_CNXK_RVU_ESWITCH_VF) { 2019722282a4SHarman Kalra eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 2020722282a4SHarman Kalra rc = rte_eth_switch_domain_alloc(&dev->switch_domain_id); 2021722282a4SHarman Kalra if (rc) { 2022722282a4SHarman Kalra plt_err("Failed to alloc switch domain: %d", rc); 2023722282a4SHarman Kalra goto free_mac_addrs; 2024722282a4SHarman Kalra } 2025722282a4SHarman Kalra } 2026722282a4SHarman Kalra 2027722282a4SHarman Kalra plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64 " rxoffload_capa=0x%" PRIx64 2028722282a4SHarman Kalra " txoffload_capa=0x%" PRIx64, 2029722282a4SHarman Kalra eth_dev->data->port_id, roc_nix_get_pf(nix), roc_nix_get_vf(nix), 2030722282a4SHarman Kalra CNXK_ETH_DEV_PMD_VERSION, dev->hwcap, dev->rx_offload_capa, 2031722282a4SHarman Kalra dev->tx_offload_capa); 203247db46bbSNithin Dabilpuram return 0; 203347db46bbSNithin Dabilpuram 203447db46bbSNithin Dabilpuram free_mac_addrs: 203547db46bbSNithin Dabilpuram rte_free(eth_dev->data->mac_addrs); 2036f03d5434SSunil Kumar Kori rte_free(dev->dmac_idx_map); 203747db46bbSNithin Dabilpuram dev_fini: 203847db46bbSNithin Dabilpuram roc_nix_dev_fini(nix); 203947db46bbSNithin Dabilpuram error: 204047db46bbSNithin Dabilpuram plt_err("Failed to init nix eth_dev rc=%d", rc); 204147db46bbSNithin Dabilpuram return rc; 204247db46bbSNithin Dabilpuram } 204347db46bbSNithin Dabilpuram 204447db46bbSNithin Dabilpuram static int 2045e191360cSSunil Kumar Kori cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) 204647db46bbSNithin Dabilpuram { 204747db46bbSNithin Dabilpuram struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 204847db46bbSNithin Dabilpuram const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; 20490f7014b1SRakesh Kudurumalla struct cnxk_pfc_cfg *pfc_cfg = &dev->pfc_cfg; 20500f7014b1SRakesh Kudurumalla struct cnxk_fc_cfg *fc_cfg = &dev->fc_cfg; 205195447135SSunil Kumar Kori struct rte_eth_pfc_queue_conf pfc_conf; 205247db46bbSNithin Dabilpuram struct roc_nix *nix = &dev->nix; 205395447135SSunil Kumar Kori struct rte_eth_fc_conf fc_conf; 205447db46bbSNithin Dabilpuram int rc, i; 205547db46bbSNithin Dabilpuram 20567eabd6c6SNithin Dabilpuram plt_free(eth_dev->security_ctx); 20577eabd6c6SNithin Dabilpuram eth_dev->security_ctx = NULL; 20587eabd6c6SNithin Dabilpuram 205947db46bbSNithin Dabilpuram /* Nothing to be done for secondary processes */ 206047db46bbSNithin Dabilpuram if (rte_eal_process_type() != RTE_PROC_PRIMARY) 206147db46bbSNithin Dabilpuram return 0; 206247db46bbSNithin Dabilpuram 206314124e48SNithin Dabilpuram /* Disable switch hdr pkind */ 206414124e48SNithin Dabilpuram roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0); 206514124e48SNithin Dabilpuram 2066b75e0acaSNithin Dabilpuram /* Clear the flag since we are closing down */ 2067b75e0acaSNithin Dabilpuram dev->configured = 0; 2068b75e0acaSNithin Dabilpuram 2069fde2a1cbSRahul Bhansali /* Disable all the NPC entries */ 2070fde2a1cbSRahul Bhansali rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0); 2071fde2a1cbSRahul Bhansali if (rc) 2072fde2a1cbSRahul Bhansali return rc; 2073fde2a1cbSRahul Bhansali 207447db46bbSNithin Dabilpuram roc_nix_npc_rx_ena_dis(nix, false); 207547db46bbSNithin Dabilpuram 207695447135SSunil Kumar Kori /* Restore 802.3 Flow control configuration */ 207795447135SSunil Kumar Kori memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf)); 207895447135SSunil Kumar Kori memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 20790f7014b1SRakesh Kudurumalla if (fc_cfg->rx_pause || fc_cfg->tx_pause) { 208095447135SSunil Kumar Kori fc_conf.mode = RTE_ETH_FC_NONE; 208195447135SSunil Kumar Kori rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf); 20820f7014b1SRakesh Kudurumalla if (rc < 0) 20830f7014b1SRakesh Kudurumalla plt_err("Failed to reset control flow. error code(%d)", 20840f7014b1SRakesh Kudurumalla rc); 20850f7014b1SRakesh Kudurumalla } 20860f7014b1SRakesh Kudurumalla if (pfc_cfg->rx_pause_en || pfc_cfg->tx_pause_en) { 20876cef22dcSHarman Kalra for (i = 0; i < RTE_MAX(eth_dev->data->nb_rx_queues, 20886cef22dcSHarman Kalra eth_dev->data->nb_tx_queues); 20896cef22dcSHarman Kalra i++) { 20900f7014b1SRakesh Kudurumalla pfc_conf.mode = RTE_ETH_FC_NONE; 20916cef22dcSHarman Kalra pfc_conf.rx_pause.tc = ROC_NIX_PFC_CLASS_INVALID; 20926cef22dcSHarman Kalra pfc_conf.rx_pause.tx_qid = i; 20936cef22dcSHarman Kalra pfc_conf.tx_pause.tc = ROC_NIX_PFC_CLASS_INVALID; 20946cef22dcSHarman Kalra pfc_conf.tx_pause.rx_qid = i; 209595447135SSunil Kumar Kori rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev, 209695447135SSunil Kumar Kori &pfc_conf); 2097ae8fb811SSatha Rao if (rc && rc != -ENOTSUP) 20986cef22dcSHarman Kalra plt_err("Failed to reset PFC. error code(%d)", rc); 209995447135SSunil Kumar Kori } 21000f7014b1SRakesh Kudurumalla } 210195447135SSunil Kumar Kori 2102722282a4SHarman Kalra /* Free switch domain ID reserved for eswitch device */ 2103722282a4SHarman Kalra if ((eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) && 2104722282a4SHarman Kalra rte_eth_switch_domain_free(dev->switch_domain_id)) 2105722282a4SHarman Kalra plt_err("Failed to free switch domain"); 2106722282a4SHarman Kalra 21076af19a9dSSunil Kumar Kori /* Disable and free rte_meter entries */ 21086af19a9dSSunil Kumar Kori nix_meter_fini(dev); 21096af19a9dSSunil Kumar Kori 21104093c5a8SKiran Kumar K /* Disable and free rte_flow entries */ 21114093c5a8SKiran Kumar K roc_npc_fini(&dev->npc); 21124093c5a8SKiran Kumar K 21130f5ee447SNithin Dabilpuram /* Disable link status events */ 21140f5ee447SNithin Dabilpuram roc_nix_mac_link_event_start_stop(nix, false); 21150f5ee447SNithin Dabilpuram 21162c809af8SHarman Kalra /* Unregister the link update op, this is required to stop VFs from 21172c809af8SHarman Kalra * receiving link status updates on exit path. 21182c809af8SHarman Kalra */ 21192c809af8SHarman Kalra roc_nix_mac_link_cb_unregister(nix); 21202c809af8SHarman Kalra 212147db46bbSNithin Dabilpuram /* Free up SQs */ 212247db46bbSNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 21237483341aSXueming Li dev_ops->tx_queue_release(eth_dev, i); 212447db46bbSNithin Dabilpuram eth_dev->data->tx_queues[i] = NULL; 212547db46bbSNithin Dabilpuram } 212647db46bbSNithin Dabilpuram eth_dev->data->nb_tx_queues = 0; 212747db46bbSNithin Dabilpuram 212847db46bbSNithin Dabilpuram /* Free up RQ's and CQ's */ 212947db46bbSNithin Dabilpuram for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 21307483341aSXueming Li dev_ops->rx_queue_release(eth_dev, i); 213147db46bbSNithin Dabilpuram eth_dev->data->rx_queues[i] = NULL; 213247db46bbSNithin Dabilpuram } 213347db46bbSNithin Dabilpuram eth_dev->data->nb_rx_queues = 0; 213447db46bbSNithin Dabilpuram 2135dd3e8765SAkhil Goyal if (roc_feature_nix_has_macsec()) 2136dd3e8765SAkhil Goyal cnxk_mcs_dev_fini(dev); 2137dd3e8765SAkhil Goyal 21387eabd6c6SNithin Dabilpuram /* Free security resources */ 21397eabd6c6SNithin Dabilpuram nix_security_release(dev); 21407eabd6c6SNithin Dabilpuram 214147db46bbSNithin Dabilpuram /* Free tm resources */ 214247db46bbSNithin Dabilpuram roc_nix_tm_fini(nix); 214347db46bbSNithin Dabilpuram 214447db46bbSNithin Dabilpuram /* Unregister queue irqs */ 214547db46bbSNithin Dabilpuram roc_nix_unregister_queue_irqs(nix); 214647db46bbSNithin Dabilpuram 214747db46bbSNithin Dabilpuram /* Unregister cq irqs */ 214847db46bbSNithin Dabilpuram if (eth_dev->data->dev_conf.intr_conf.rxq) 214947db46bbSNithin Dabilpuram roc_nix_unregister_cq_irqs(nix); 215047db46bbSNithin Dabilpuram 2151b75e0acaSNithin Dabilpuram /* Free ROC RQ's, SQ's and CQ's memory */ 2152b75e0acaSNithin Dabilpuram nix_free_queue_mem(dev); 2153b75e0acaSNithin Dabilpuram 2154c9b0bb00SRakesh Kudurumalla /* free nix bpid */ 2155c9b0bb00SRakesh Kudurumalla rc = nix_rxchan_cfg_disable(dev); 2156c9b0bb00SRakesh Kudurumalla if (rc) 2157c9b0bb00SRakesh Kudurumalla plt_err("Failed to free nix bpid, rc=%d", rc); 2158c9b0bb00SRakesh Kudurumalla 215947db46bbSNithin Dabilpuram /* Free nix lf resources */ 216047db46bbSNithin Dabilpuram rc = roc_nix_lf_free(nix); 216147db46bbSNithin Dabilpuram if (rc) 216247db46bbSNithin Dabilpuram plt_err("Failed to free nix lf, rc=%d", rc); 216347db46bbSNithin Dabilpuram 2164f03d5434SSunil Kumar Kori rte_free(dev->dmac_idx_map); 2165f03d5434SSunil Kumar Kori dev->dmac_idx_map = NULL; 2166f03d5434SSunil Kumar Kori 216747db46bbSNithin Dabilpuram rte_free(eth_dev->data->mac_addrs); 216847db46bbSNithin Dabilpuram eth_dev->data->mac_addrs = NULL; 216947db46bbSNithin Dabilpuram 217047db46bbSNithin Dabilpuram rc = roc_nix_dev_fini(nix); 217147db46bbSNithin Dabilpuram /* Can be freed later by PMD if NPA LF is in use */ 217247db46bbSNithin Dabilpuram if (rc == -EAGAIN) { 2173e191360cSSunil Kumar Kori if (!reset) 217447db46bbSNithin Dabilpuram eth_dev->data->dev_private = NULL; 217547db46bbSNithin Dabilpuram return 0; 217647db46bbSNithin Dabilpuram } else if (rc) { 217747db46bbSNithin Dabilpuram plt_err("Failed in nix dev fini, rc=%d", rc); 217847db46bbSNithin Dabilpuram } 217947db46bbSNithin Dabilpuram 218047db46bbSNithin Dabilpuram return rc; 218147db46bbSNithin Dabilpuram } 218247db46bbSNithin Dabilpuram 2183e191360cSSunil Kumar Kori static int 2184e191360cSSunil Kumar Kori cnxk_nix_dev_close(struct rte_eth_dev *eth_dev) 2185e191360cSSunil Kumar Kori { 2186e191360cSSunil Kumar Kori cnxk_eth_dev_uninit(eth_dev, false); 2187e191360cSSunil Kumar Kori return 0; 2188e191360cSSunil Kumar Kori } 2189e191360cSSunil Kumar Kori 2190e191360cSSunil Kumar Kori static int 2191e191360cSSunil Kumar Kori cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev) 2192e191360cSSunil Kumar Kori { 2193e191360cSSunil Kumar Kori int rc; 2194e191360cSSunil Kumar Kori 2195e191360cSSunil Kumar Kori rc = cnxk_eth_dev_uninit(eth_dev, true); 2196e191360cSSunil Kumar Kori if (rc) 2197e191360cSSunil Kumar Kori return rc; 2198e191360cSSunil Kumar Kori 2199e191360cSSunil Kumar Kori return cnxk_eth_dev_init(eth_dev); 2200e191360cSSunil Kumar Kori } 2201e191360cSSunil Kumar Kori 220247db46bbSNithin Dabilpuram int 220347db46bbSNithin Dabilpuram cnxk_nix_remove(struct rte_pci_device *pci_dev) 220447db46bbSNithin Dabilpuram { 220547db46bbSNithin Dabilpuram struct rte_eth_dev *eth_dev; 220647db46bbSNithin Dabilpuram struct roc_nix *nix; 220747db46bbSNithin Dabilpuram int rc = -EINVAL; 220847db46bbSNithin Dabilpuram 220947db46bbSNithin Dabilpuram eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 221047db46bbSNithin Dabilpuram if (eth_dev) { 221147db46bbSNithin Dabilpuram /* Cleanup eth dev */ 2212e191360cSSunil Kumar Kori rc = cnxk_eth_dev_uninit(eth_dev, false); 221347db46bbSNithin Dabilpuram if (rc) 221447db46bbSNithin Dabilpuram return rc; 221547db46bbSNithin Dabilpuram 221647db46bbSNithin Dabilpuram rte_eth_dev_release_port(eth_dev); 221747db46bbSNithin Dabilpuram } 221847db46bbSNithin Dabilpuram 221947db46bbSNithin Dabilpuram /* Nothing to be done for secondary processes */ 222047db46bbSNithin Dabilpuram if (rte_eal_process_type() != RTE_PROC_PRIMARY) 222147db46bbSNithin Dabilpuram return 0; 222247db46bbSNithin Dabilpuram 222347db46bbSNithin Dabilpuram /* Check if this device is hosting common resource */ 222447db46bbSNithin Dabilpuram nix = roc_idev_npa_nix_get(); 222514124e48SNithin Dabilpuram if (!nix || nix->pci_dev != pci_dev) 222647db46bbSNithin Dabilpuram return 0; 222747db46bbSNithin Dabilpuram 222847db46bbSNithin Dabilpuram /* Try nix fini now */ 222947db46bbSNithin Dabilpuram rc = roc_nix_dev_fini(nix); 223047db46bbSNithin Dabilpuram if (rc == -EAGAIN) { 223147db46bbSNithin Dabilpuram plt_info("%s: common resource in use by other devices", 223247db46bbSNithin Dabilpuram pci_dev->name); 223347db46bbSNithin Dabilpuram goto exit; 223447db46bbSNithin Dabilpuram } else if (rc) { 223547db46bbSNithin Dabilpuram plt_err("Failed in nix dev fini, rc=%d", rc); 223647db46bbSNithin Dabilpuram goto exit; 223747db46bbSNithin Dabilpuram } 223847db46bbSNithin Dabilpuram 223947db46bbSNithin Dabilpuram /* Free device pointer as rte_ethdev does not have it anymore */ 224047db46bbSNithin Dabilpuram rte_free(nix); 224147db46bbSNithin Dabilpuram exit: 224247db46bbSNithin Dabilpuram return rc; 224347db46bbSNithin Dabilpuram } 224447db46bbSNithin Dabilpuram 224547db46bbSNithin Dabilpuram int 224647db46bbSNithin Dabilpuram cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 224747db46bbSNithin Dabilpuram { 224847db46bbSNithin Dabilpuram int rc; 224947db46bbSNithin Dabilpuram 225047db46bbSNithin Dabilpuram RTE_SET_USED(pci_drv); 225147db46bbSNithin Dabilpuram 225247db46bbSNithin Dabilpuram rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev), 225347db46bbSNithin Dabilpuram cnxk_eth_dev_init); 225447db46bbSNithin Dabilpuram 225547db46bbSNithin Dabilpuram /* On error on secondary, recheck if port exists in primary or 225647db46bbSNithin Dabilpuram * in mid of detach state. 225747db46bbSNithin Dabilpuram */ 225847db46bbSNithin Dabilpuram if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc) 225947db46bbSNithin Dabilpuram if (!rte_eth_dev_allocated(pci_dev->device.name)) 226047db46bbSNithin Dabilpuram return 0; 226147db46bbSNithin Dabilpuram return rc; 226247db46bbSNithin Dabilpuram } 2263