1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2024 Marvell. 3 */ 4 5 #include "cn20k_ethdev.h" 6 #include "cn20k_tx.h" 7 8 static __rte_used inline void 9 pick_tx_func(struct rte_eth_dev *eth_dev, const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX]) 10 { 11 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 12 13 /* [SEC] [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */ 14 eth_dev->tx_pkt_burst = tx_burst[dev->tx_offload_flags & (NIX_TX_OFFLOAD_MAX - 1)]; 15 16 if (eth_dev->data->dev_started) 17 rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = eth_dev->tx_pkt_burst; 18 } 19 20 #if defined(RTE_ARCH_ARM64) 21 static int 22 cn20k_nix_tx_queue_count(void *tx_queue) 23 { 24 struct cn20k_eth_txq *txq = (struct cn20k_eth_txq *)tx_queue; 25 26 return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2); 27 } 28 29 static int 30 cn20k_nix_tx_queue_sec_count(void *tx_queue) 31 { 32 struct cn20k_eth_txq *txq = (struct cn20k_eth_txq *)tx_queue; 33 34 return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc); 35 } 36 37 static void 38 cn20k_eth_set_tx_tmplt_func(struct rte_eth_dev *eth_dev) 39 { 40 #if !defined(CNXK_DIS_TMPLT_FUNC) 41 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 42 43 const eth_tx_burst_t nix_eth_tx_burst[NIX_TX_OFFLOAD_MAX] = { 44 #define T(name, sz, flags) [flags] = cn20k_nix_xmit_pkts_##name, 45 46 NIX_TX_FASTPATH_MODES 47 #undef T 48 }; 49 50 const eth_tx_burst_t nix_eth_tx_burst_mseg[NIX_TX_OFFLOAD_MAX] = { 51 #define T(name, sz, flags) [flags] = cn20k_nix_xmit_pkts_mseg_##name, 52 53 NIX_TX_FASTPATH_MODES 54 #undef T 55 }; 56 57 const eth_tx_burst_t nix_eth_tx_vec_burst[NIX_TX_OFFLOAD_MAX] = { 58 #define T(name, sz, flags) [flags] = cn20k_nix_xmit_pkts_vec_##name, 59 60 NIX_TX_FASTPATH_MODES 61 #undef T 62 }; 63 64 const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[NIX_TX_OFFLOAD_MAX] = { 65 #define T(name, sz, flags) [flags] = cn20k_nix_xmit_pkts_vec_mseg_##name, 66 67 NIX_TX_FASTPATH_MODES 68 #undef T 69 }; 70 71 if (dev->scalar_ena || dev->tx_mark) { 72 pick_tx_func(eth_dev, nix_eth_tx_burst); 73 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) 74 pick_tx_func(eth_dev, nix_eth_tx_burst_mseg); 75 } else { 76 pick_tx_func(eth_dev, nix_eth_tx_vec_burst); 77 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) 78 pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg); 79 } 80 #else 81 RTE_SET_USED(eth_dev); 82 #endif 83 } 84 85 static void 86 cn20k_eth_set_tx_blk_func(struct rte_eth_dev *eth_dev) 87 { 88 #if defined(CNXK_DIS_TMPLT_FUNC) 89 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 90 91 if (dev->scalar_ena || dev->tx_mark) 92 eth_dev->tx_pkt_burst = cn20k_nix_xmit_pkts_all_offload; 93 else 94 eth_dev->tx_pkt_burst = cn20k_nix_xmit_pkts_vec_all_offload; 95 96 if (eth_dev->data->dev_started) 97 rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = eth_dev->tx_pkt_burst; 98 #else 99 RTE_SET_USED(eth_dev); 100 #endif 101 } 102 #endif 103 104 void 105 cn20k_eth_set_tx_function(struct rte_eth_dev *eth_dev) 106 { 107 #if defined(RTE_ARCH_ARM64) 108 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 109 110 cn20k_eth_set_tx_blk_func(eth_dev); 111 cn20k_eth_set_tx_tmplt_func(eth_dev); 112 113 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) 114 eth_dev->tx_queue_count = cn20k_nix_tx_queue_sec_count; 115 else 116 eth_dev->tx_queue_count = cn20k_nix_tx_queue_count; 117 118 rte_atomic_thread_fence(rte_memory_order_release); 119 #else 120 RTE_SET_USED(eth_dev); 121 #endif 122 } 123