1dd8c20eeSPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause
2dd8c20eeSPavan Nikhilesh * Copyright(C) 2021 Marvell.
3dd8c20eeSPavan Nikhilesh */
4dd8c20eeSPavan Nikhilesh
5dd8c20eeSPavan Nikhilesh #include "cn9k_ethdev.h"
6dd8c20eeSPavan Nikhilesh #include "cn9k_tx.h"
7dd8c20eeSPavan Nikhilesh
87d9b1d44SJerin Jacob static __rte_used void
pick_tx_func(struct rte_eth_dev * eth_dev,const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])9dd8c20eeSPavan Nikhilesh pick_tx_func(struct rte_eth_dev *eth_dev,
10dd8c20eeSPavan Nikhilesh const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])
11dd8c20eeSPavan Nikhilesh {
12dd8c20eeSPavan Nikhilesh struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13dd8c20eeSPavan Nikhilesh
14dd8c20eeSPavan Nikhilesh /* [TS] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
15dd8c20eeSPavan Nikhilesh eth_dev->tx_pkt_burst =
16dd8c20eeSPavan Nikhilesh tx_burst[dev->tx_offload_flags & (NIX_TX_OFFLOAD_MAX - 1)];
173cebc8f3SSatheesh Paul
183cebc8f3SSatheesh Paul if (eth_dev->data->dev_started)
193cebc8f3SSatheesh Paul rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
203cebc8f3SSatheesh Paul eth_dev->tx_pkt_burst;
21dd8c20eeSPavan Nikhilesh }
22dd8c20eeSPavan Nikhilesh
23cb6d97a8SSatha Rao #if defined(RTE_ARCH_ARM64)
24cb6d97a8SSatha Rao static int
cn9k_nix_tx_queue_count(void * tx_queue)25cb6d97a8SSatha Rao cn9k_nix_tx_queue_count(void *tx_queue)
26cb6d97a8SSatha Rao {
27cb6d97a8SSatha Rao struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
28cb6d97a8SSatha Rao
29cb6d97a8SSatha Rao return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
30cb6d97a8SSatha Rao }
31cb6d97a8SSatha Rao
32cb6d97a8SSatha Rao static int
cn9k_nix_tx_queue_sec_count(void * tx_queue)33cb6d97a8SSatha Rao cn9k_nix_tx_queue_sec_count(void *tx_queue)
34cb6d97a8SSatha Rao {
35cb6d97a8SSatha Rao struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
36cb6d97a8SSatha Rao
37cb6d97a8SSatha Rao return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
38cb6d97a8SSatha Rao }
39cb6d97a8SSatha Rao
40*eabbac98SPavan Nikhilesh static void
cn9k_eth_set_tx_tmplt_func(struct rte_eth_dev * eth_dev)41*eabbac98SPavan Nikhilesh cn9k_eth_set_tx_tmplt_func(struct rte_eth_dev *eth_dev)
42dd8c20eeSPavan Nikhilesh {
43*eabbac98SPavan Nikhilesh #if !defined(CNXK_DIS_TMPLT_FUNC)
44dd8c20eeSPavan Nikhilesh struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
45dd8c20eeSPavan Nikhilesh
46dd8c20eeSPavan Nikhilesh const eth_tx_burst_t nix_eth_tx_burst[NIX_TX_OFFLOAD_MAX] = {
47dd8c20eeSPavan Nikhilesh #define T(name, sz, flags)[flags] = cn9k_nix_xmit_pkts_##name,
48dd8c20eeSPavan Nikhilesh NIX_TX_FASTPATH_MODES
49dd8c20eeSPavan Nikhilesh #undef T
50dd8c20eeSPavan Nikhilesh };
51dd8c20eeSPavan Nikhilesh
52dd8c20eeSPavan Nikhilesh const eth_tx_burst_t nix_eth_tx_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
53dd8c20eeSPavan Nikhilesh #define T(name, sz, flags)[flags] = cn9k_nix_xmit_pkts_mseg_##name,
54dd8c20eeSPavan Nikhilesh NIX_TX_FASTPATH_MODES
55dd8c20eeSPavan Nikhilesh #undef T
56dd8c20eeSPavan Nikhilesh };
57dd8c20eeSPavan Nikhilesh
58dd8c20eeSPavan Nikhilesh const eth_tx_burst_t nix_eth_tx_vec_burst[NIX_TX_OFFLOAD_MAX] = {
59dd8c20eeSPavan Nikhilesh #define T(name, sz, flags)[flags] = cn9k_nix_xmit_pkts_vec_##name,
60dd8c20eeSPavan Nikhilesh NIX_TX_FASTPATH_MODES
61dd8c20eeSPavan Nikhilesh #undef T
62dd8c20eeSPavan Nikhilesh };
63dd8c20eeSPavan Nikhilesh
64dd8c20eeSPavan Nikhilesh const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
65dd8c20eeSPavan Nikhilesh #define T(name, sz, flags)[flags] = cn9k_nix_xmit_pkts_vec_mseg_##name,
66dd8c20eeSPavan Nikhilesh NIX_TX_FASTPATH_MODES
67dd8c20eeSPavan Nikhilesh #undef T
68dd8c20eeSPavan Nikhilesh };
69dd8c20eeSPavan Nikhilesh
7050e2c7fdSSatha Rao if (dev->scalar_ena || dev->tx_mark) {
71dd8c20eeSPavan Nikhilesh pick_tx_func(eth_dev, nix_eth_tx_burst);
72dd8c20eeSPavan Nikhilesh if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
73dd8c20eeSPavan Nikhilesh pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
74dd8c20eeSPavan Nikhilesh } else {
75dd8c20eeSPavan Nikhilesh pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
76dd8c20eeSPavan Nikhilesh if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
77dd8c20eeSPavan Nikhilesh pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
78dd8c20eeSPavan Nikhilesh }
79*eabbac98SPavan Nikhilesh #else
80*eabbac98SPavan Nikhilesh RTE_SET_USED(eth_dev);
81*eabbac98SPavan Nikhilesh #endif
82*eabbac98SPavan Nikhilesh }
83*eabbac98SPavan Nikhilesh
84*eabbac98SPavan Nikhilesh static void
cn9k_eth_set_tx_blk_func(struct rte_eth_dev * eth_dev)85*eabbac98SPavan Nikhilesh cn9k_eth_set_tx_blk_func(struct rte_eth_dev *eth_dev)
86*eabbac98SPavan Nikhilesh {
87*eabbac98SPavan Nikhilesh #if defined(CNXK_DIS_TMPLT_FUNC)
88*eabbac98SPavan Nikhilesh struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
89*eabbac98SPavan Nikhilesh struct cn9k_eth_txq *txq;
90*eabbac98SPavan Nikhilesh int i;
91*eabbac98SPavan Nikhilesh
92*eabbac98SPavan Nikhilesh for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
93*eabbac98SPavan Nikhilesh txq = (struct cn9k_eth_txq *)eth_dev->data->tx_queues[i];
94*eabbac98SPavan Nikhilesh txq->tx_offload_flags = dev->tx_offload_flags;
95*eabbac98SPavan Nikhilesh }
96*eabbac98SPavan Nikhilesh
97*eabbac98SPavan Nikhilesh if (dev->scalar_ena || dev->tx_mark)
98*eabbac98SPavan Nikhilesh eth_dev->tx_pkt_burst = cn9k_nix_xmit_pkts_all_offload;
99*eabbac98SPavan Nikhilesh else
100*eabbac98SPavan Nikhilesh eth_dev->tx_pkt_burst = cn9k_nix_xmit_pkts_vec_all_offload;
101*eabbac98SPavan Nikhilesh
102*eabbac98SPavan Nikhilesh if (eth_dev->data->dev_started)
103*eabbac98SPavan Nikhilesh rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = eth_dev->tx_pkt_burst;
104*eabbac98SPavan Nikhilesh #else
105*eabbac98SPavan Nikhilesh RTE_SET_USED(eth_dev);
106*eabbac98SPavan Nikhilesh #endif
107*eabbac98SPavan Nikhilesh }
108*eabbac98SPavan Nikhilesh #endif
109*eabbac98SPavan Nikhilesh
110*eabbac98SPavan Nikhilesh void
cn9k_eth_set_tx_function(struct rte_eth_dev * eth_dev)111*eabbac98SPavan Nikhilesh cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
112*eabbac98SPavan Nikhilesh {
113*eabbac98SPavan Nikhilesh #if defined(RTE_ARCH_ARM64)
114*eabbac98SPavan Nikhilesh struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
115*eabbac98SPavan Nikhilesh
116*eabbac98SPavan Nikhilesh cn9k_eth_set_tx_blk_func(eth_dev);
117*eabbac98SPavan Nikhilesh cn9k_eth_set_tx_tmplt_func(eth_dev);
118*eabbac98SPavan Nikhilesh
119cb6d97a8SSatha Rao if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
120cb6d97a8SSatha Rao eth_dev->tx_queue_count = cn9k_nix_tx_queue_sec_count;
121cb6d97a8SSatha Rao else
122cb6d97a8SSatha Rao eth_dev->tx_queue_count = cn9k_nix_tx_queue_count;
123cb6d97a8SSatha Rao
124*eabbac98SPavan Nikhilesh rte_atomic_thread_fence(rte_memory_order_release);
1257d9b1d44SJerin Jacob #else
1267d9b1d44SJerin Jacob RTE_SET_USED(eth_dev);
1277d9b1d44SJerin Jacob #endif
128dd8c20eeSPavan Nikhilesh }
129