xref: /dpdk/drivers/net/cnxk/cn10k_tx_select.c (revision eabbac98af1345157e07c431e18543296c37c355)
1c5b97e98SPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause
2c5b97e98SPavan Nikhilesh  * Copyright(C) 2021 Marvell.
3c5b97e98SPavan Nikhilesh  */
4c5b97e98SPavan Nikhilesh 
5c5b97e98SPavan Nikhilesh #include "cn10k_ethdev.h"
6c5b97e98SPavan Nikhilesh #include "cn10k_tx.h"
7c5b97e98SPavan Nikhilesh 
87d9b1d44SJerin Jacob static __rte_used inline void
pick_tx_func(struct rte_eth_dev * eth_dev,const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])9c5b97e98SPavan Nikhilesh pick_tx_func(struct rte_eth_dev *eth_dev,
10c5b97e98SPavan Nikhilesh 	     const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])
11c5b97e98SPavan Nikhilesh {
12c5b97e98SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13c5b97e98SPavan Nikhilesh 
14c5b97e98SPavan Nikhilesh 	/* [SEC] [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
15c5b97e98SPavan Nikhilesh 	eth_dev->tx_pkt_burst =
16c5b97e98SPavan Nikhilesh 		tx_burst[dev->tx_offload_flags & (NIX_TX_OFFLOAD_MAX - 1)];
173cebc8f3SSatheesh Paul 
183cebc8f3SSatheesh Paul 	if (eth_dev->data->dev_started)
193cebc8f3SSatheesh Paul 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
203cebc8f3SSatheesh Paul 			eth_dev->tx_pkt_burst;
21c5b97e98SPavan Nikhilesh }
22c5b97e98SPavan Nikhilesh 
23cb6d97a8SSatha Rao #if defined(RTE_ARCH_ARM64)
24cb6d97a8SSatha Rao static int
cn10k_nix_tx_queue_count(void * tx_queue)25cb6d97a8SSatha Rao cn10k_nix_tx_queue_count(void *tx_queue)
26cb6d97a8SSatha Rao {
27cb6d97a8SSatha Rao 	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
28cb6d97a8SSatha Rao 
29cb6d97a8SSatha Rao 	return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
30cb6d97a8SSatha Rao }
31cb6d97a8SSatha Rao 
32cb6d97a8SSatha Rao static int
cn10k_nix_tx_queue_sec_count(void * tx_queue)33cb6d97a8SSatha Rao cn10k_nix_tx_queue_sec_count(void *tx_queue)
34cb6d97a8SSatha Rao {
35cb6d97a8SSatha Rao 	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
36cb6d97a8SSatha Rao 
37cb6d97a8SSatha Rao 	return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
38cb6d97a8SSatha Rao }
39cb6d97a8SSatha Rao 
40*eabbac98SPavan Nikhilesh static void
cn10k_eth_set_tx_tmplt_func(struct rte_eth_dev * eth_dev)41*eabbac98SPavan Nikhilesh cn10k_eth_set_tx_tmplt_func(struct rte_eth_dev *eth_dev)
42c5b97e98SPavan Nikhilesh {
43*eabbac98SPavan Nikhilesh #if !defined(CNXK_DIS_TMPLT_FUNC)
44c5b97e98SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
45c5b97e98SPavan Nikhilesh 
46c5b97e98SPavan Nikhilesh 	const eth_tx_burst_t nix_eth_tx_burst[NIX_TX_OFFLOAD_MAX] = {
47c5b97e98SPavan Nikhilesh #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_##name,
48c5b97e98SPavan Nikhilesh 
49c5b97e98SPavan Nikhilesh 		NIX_TX_FASTPATH_MODES
50c5b97e98SPavan Nikhilesh #undef T
51c5b97e98SPavan Nikhilesh 	};
52c5b97e98SPavan Nikhilesh 
53c5b97e98SPavan Nikhilesh 	const eth_tx_burst_t nix_eth_tx_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
54c5b97e98SPavan Nikhilesh #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_mseg_##name,
55c5b97e98SPavan Nikhilesh 
56c5b97e98SPavan Nikhilesh 		NIX_TX_FASTPATH_MODES
57c5b97e98SPavan Nikhilesh #undef T
58c5b97e98SPavan Nikhilesh 	};
59c5b97e98SPavan Nikhilesh 
60c5b97e98SPavan Nikhilesh 	const eth_tx_burst_t nix_eth_tx_vec_burst[NIX_TX_OFFLOAD_MAX] = {
61c5b97e98SPavan Nikhilesh #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_vec_##name,
62c5b97e98SPavan Nikhilesh 
63c5b97e98SPavan Nikhilesh 		NIX_TX_FASTPATH_MODES
64c5b97e98SPavan Nikhilesh #undef T
65c5b97e98SPavan Nikhilesh 	};
66c5b97e98SPavan Nikhilesh 
67c5b97e98SPavan Nikhilesh 	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
68c5b97e98SPavan Nikhilesh #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_vec_mseg_##name,
69c5b97e98SPavan Nikhilesh 
70c5b97e98SPavan Nikhilesh 		NIX_TX_FASTPATH_MODES
71c5b97e98SPavan Nikhilesh #undef T
72c5b97e98SPavan Nikhilesh 	};
73c5b97e98SPavan Nikhilesh 
7450e2c7fdSSatha Rao 	if (dev->scalar_ena || dev->tx_mark) {
75c5b97e98SPavan Nikhilesh 		pick_tx_func(eth_dev, nix_eth_tx_burst);
76c5b97e98SPavan Nikhilesh 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
77c5b97e98SPavan Nikhilesh 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
78c5b97e98SPavan Nikhilesh 	} else {
79c5b97e98SPavan Nikhilesh 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
80c5b97e98SPavan Nikhilesh 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
81c5b97e98SPavan Nikhilesh 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
82c5b97e98SPavan Nikhilesh 	}
83*eabbac98SPavan Nikhilesh #else
84*eabbac98SPavan Nikhilesh 	RTE_SET_USED(eth_dev);
85*eabbac98SPavan Nikhilesh #endif
86*eabbac98SPavan Nikhilesh }
87*eabbac98SPavan Nikhilesh 
88*eabbac98SPavan Nikhilesh static void
cn10k_eth_set_tx_blk_func(struct rte_eth_dev * eth_dev)89*eabbac98SPavan Nikhilesh cn10k_eth_set_tx_blk_func(struct rte_eth_dev *eth_dev)
90*eabbac98SPavan Nikhilesh {
91*eabbac98SPavan Nikhilesh #if defined(CNXK_DIS_TMPLT_FUNC)
92*eabbac98SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
93*eabbac98SPavan Nikhilesh 	struct cn10k_eth_txq *txq;
94*eabbac98SPavan Nikhilesh 	int i;
95*eabbac98SPavan Nikhilesh 
96*eabbac98SPavan Nikhilesh 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
97*eabbac98SPavan Nikhilesh 		txq = (struct cn10k_eth_txq *)eth_dev->data->tx_queues[i];
98*eabbac98SPavan Nikhilesh 		txq->tx_offload_flags = dev->tx_offload_flags;
99*eabbac98SPavan Nikhilesh 	}
100*eabbac98SPavan Nikhilesh 
101*eabbac98SPavan Nikhilesh 	if (dev->scalar_ena || dev->tx_mark)
102*eabbac98SPavan Nikhilesh 		eth_dev->tx_pkt_burst = cn10k_nix_xmit_pkts_all_offload;
103*eabbac98SPavan Nikhilesh 	else
104*eabbac98SPavan Nikhilesh 		eth_dev->tx_pkt_burst = cn10k_nix_xmit_pkts_vec_all_offload;
105*eabbac98SPavan Nikhilesh 
106*eabbac98SPavan Nikhilesh 	if (eth_dev->data->dev_started)
107*eabbac98SPavan Nikhilesh 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = eth_dev->tx_pkt_burst;
108*eabbac98SPavan Nikhilesh #else
109*eabbac98SPavan Nikhilesh 	RTE_SET_USED(eth_dev);
110*eabbac98SPavan Nikhilesh #endif
111*eabbac98SPavan Nikhilesh }
112*eabbac98SPavan Nikhilesh #endif
113*eabbac98SPavan Nikhilesh 
114*eabbac98SPavan Nikhilesh void
cn10k_eth_set_tx_function(struct rte_eth_dev * eth_dev)115*eabbac98SPavan Nikhilesh cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
116*eabbac98SPavan Nikhilesh {
117*eabbac98SPavan Nikhilesh #if defined(RTE_ARCH_ARM64)
118*eabbac98SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
119*eabbac98SPavan Nikhilesh 
120*eabbac98SPavan Nikhilesh 	cn10k_eth_set_tx_blk_func(eth_dev);
121*eabbac98SPavan Nikhilesh 	cn10k_eth_set_tx_tmplt_func(eth_dev);
122*eabbac98SPavan Nikhilesh 
123cb6d97a8SSatha Rao 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
124cb6d97a8SSatha Rao 		eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
125cb6d97a8SSatha Rao 	else
126cb6d97a8SSatha Rao 		eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
127c5b97e98SPavan Nikhilesh 
128*eabbac98SPavan Nikhilesh 	rte_atomic_thread_fence(rte_memory_order_release);
1297d9b1d44SJerin Jacob #else
1307d9b1d44SJerin Jacob 	RTE_SET_USED(eth_dev);
1317d9b1d44SJerin Jacob #endif
132c5b97e98SPavan Nikhilesh }
133