xref: /dpdk/drivers/net/cnxk/cn10k_tx_select.c (revision eabbac98af1345157e07c431e18543296c37c355)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "cn10k_ethdev.h"
6 #include "cn10k_tx.h"
7 
8 static __rte_used inline void
pick_tx_func(struct rte_eth_dev * eth_dev,const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])9 pick_tx_func(struct rte_eth_dev *eth_dev,
10 	     const eth_tx_burst_t tx_burst[NIX_TX_OFFLOAD_MAX])
11 {
12 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 
14 	/* [SEC] [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
15 	eth_dev->tx_pkt_burst =
16 		tx_burst[dev->tx_offload_flags & (NIX_TX_OFFLOAD_MAX - 1)];
17 
18 	if (eth_dev->data->dev_started)
19 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
20 			eth_dev->tx_pkt_burst;
21 }
22 
23 #if defined(RTE_ARCH_ARM64)
24 static int
cn10k_nix_tx_queue_count(void * tx_queue)25 cn10k_nix_tx_queue_count(void *tx_queue)
26 {
27 	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
28 
29 	return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
30 }
31 
32 static int
cn10k_nix_tx_queue_sec_count(void * tx_queue)33 cn10k_nix_tx_queue_sec_count(void *tx_queue)
34 {
35 	struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
36 
37 	return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
38 }
39 
40 static void
cn10k_eth_set_tx_tmplt_func(struct rte_eth_dev * eth_dev)41 cn10k_eth_set_tx_tmplt_func(struct rte_eth_dev *eth_dev)
42 {
43 #if !defined(CNXK_DIS_TMPLT_FUNC)
44 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
45 
46 	const eth_tx_burst_t nix_eth_tx_burst[NIX_TX_OFFLOAD_MAX] = {
47 #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_##name,
48 
49 		NIX_TX_FASTPATH_MODES
50 #undef T
51 	};
52 
53 	const eth_tx_burst_t nix_eth_tx_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
54 #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_mseg_##name,
55 
56 		NIX_TX_FASTPATH_MODES
57 #undef T
58 	};
59 
60 	const eth_tx_burst_t nix_eth_tx_vec_burst[NIX_TX_OFFLOAD_MAX] = {
61 #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_vec_##name,
62 
63 		NIX_TX_FASTPATH_MODES
64 #undef T
65 	};
66 
67 	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[NIX_TX_OFFLOAD_MAX] = {
68 #define T(name, sz, flags)[flags] = cn10k_nix_xmit_pkts_vec_mseg_##name,
69 
70 		NIX_TX_FASTPATH_MODES
71 #undef T
72 	};
73 
74 	if (dev->scalar_ena || dev->tx_mark) {
75 		pick_tx_func(eth_dev, nix_eth_tx_burst);
76 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
77 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
78 	} else {
79 		pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
80 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
81 			pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
82 	}
83 #else
84 	RTE_SET_USED(eth_dev);
85 #endif
86 }
87 
88 static void
cn10k_eth_set_tx_blk_func(struct rte_eth_dev * eth_dev)89 cn10k_eth_set_tx_blk_func(struct rte_eth_dev *eth_dev)
90 {
91 #if defined(CNXK_DIS_TMPLT_FUNC)
92 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
93 	struct cn10k_eth_txq *txq;
94 	int i;
95 
96 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
97 		txq = (struct cn10k_eth_txq *)eth_dev->data->tx_queues[i];
98 		txq->tx_offload_flags = dev->tx_offload_flags;
99 	}
100 
101 	if (dev->scalar_ena || dev->tx_mark)
102 		eth_dev->tx_pkt_burst = cn10k_nix_xmit_pkts_all_offload;
103 	else
104 		eth_dev->tx_pkt_burst = cn10k_nix_xmit_pkts_vec_all_offload;
105 
106 	if (eth_dev->data->dev_started)
107 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst = eth_dev->tx_pkt_burst;
108 #else
109 	RTE_SET_USED(eth_dev);
110 #endif
111 }
112 #endif
113 
114 void
cn10k_eth_set_tx_function(struct rte_eth_dev * eth_dev)115 cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
116 {
117 #if defined(RTE_ARCH_ARM64)
118 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
119 
120 	cn10k_eth_set_tx_blk_func(eth_dev);
121 	cn10k_eth_set_tx_tmplt_func(eth_dev);
122 
123 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
124 		eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
125 	else
126 		eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
127 
128 	rte_atomic_thread_fence(rte_memory_order_release);
129 #else
130 	RTE_SET_USED(eth_dev);
131 #endif
132 }
133