xref: /dpdk/drivers/net/cnxk/cn9k_rx_select.c (revision eabbac98af1345157e07c431e18543296c37c355)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "cn9k_ethdev.h"
6 #include "cn9k_rx.h"
7 
8 static __rte_used void
pick_rx_func(struct rte_eth_dev * eth_dev,const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])9 pick_rx_func(struct rte_eth_dev *eth_dev,
10 	     const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])
11 {
12 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 
14 	/* [TSP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
15 	eth_dev->rx_pkt_burst =
16 		rx_burst[dev->rx_offload_flags & (NIX_RX_OFFLOAD_MAX - 1)];
17 
18 	if (eth_dev->data->dev_started)
19 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
20 			eth_dev->rx_pkt_burst;
21 }
22 
23 #if defined(RTE_ARCH_ARM64)
24 static void
cn9k_eth_set_rx_tmplt_func(struct rte_eth_dev * eth_dev)25 cn9k_eth_set_rx_tmplt_func(struct rte_eth_dev *eth_dev)
26 {
27 #if !defined(CNXK_DIS_TMPLT_FUNC)
28 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
29 
30 	const eth_rx_burst_t nix_eth_rx_burst[NIX_RX_OFFLOAD_MAX] = {
31 #define R(name, flags)[flags] = cn9k_nix_recv_pkts_##name,
32 
33 		NIX_RX_FASTPATH_MODES
34 #undef R
35 	};
36 
37 	const eth_rx_burst_t nix_eth_rx_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
38 #define R(name, flags)[flags] = cn9k_nix_recv_pkts_mseg_##name,
39 
40 		NIX_RX_FASTPATH_MODES
41 #undef R
42 	};
43 
44 	const eth_rx_burst_t nix_eth_rx_vec_burst[NIX_RX_OFFLOAD_MAX] = {
45 #define R(name, flags)[flags] = cn9k_nix_recv_pkts_vec_##name,
46 
47 		NIX_RX_FASTPATH_MODES
48 #undef R
49 	};
50 
51 	const eth_rx_burst_t nix_eth_rx_vec_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
52 #define R(name, flags)[flags] = cn9k_nix_recv_pkts_vec_mseg_##name,
53 
54 		NIX_RX_FASTPATH_MODES
55 #undef R
56 	};
57 
58 	/* Copy multi seg version with no offload for tear down sequence */
59 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
60 		dev->rx_pkt_burst_no_offload = nix_eth_rx_burst_mseg[0];
61 
62 	if (dev->scalar_ena) {
63 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
64 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
65 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
66 	}
67 
68 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
69 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
70 	return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
71 #else
72 	RTE_SET_USED(eth_dev);
73 #endif
74 }
75 
76 static void
cn9k_eth_set_rx_blk_func(struct rte_eth_dev * eth_dev)77 cn9k_eth_set_rx_blk_func(struct rte_eth_dev *eth_dev)
78 {
79 #if defined(CNXK_DIS_TMPLT_FUNC)
80 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
81 
82 	/* Copy multi seg version with no offload for tear down sequence */
83 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
84 		dev->rx_pkt_burst_no_offload = cn9k_nix_recv_pkts_all_offload;
85 
86 	if (dev->scalar_ena) {
87 		eth_dev->rx_pkt_burst = cn9k_nix_recv_pkts_all_offload;
88 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
89 			eth_dev->rx_pkt_burst = cn9k_nix_recv_pkts_all_offload_tst;
90 	} else {
91 		eth_dev->rx_pkt_burst = cn9k_nix_recv_pkts_vec_all_offload;
92 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
93 			eth_dev->rx_pkt_burst = cn9k_nix_recv_pkts_vec_all_offload_tst;
94 	}
95 
96 	if (eth_dev->data->dev_started)
97 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst;
98 #else
99 	RTE_SET_USED(eth_dev);
100 #endif
101 }
102 #endif
103 
104 void
cn9k_eth_set_rx_function(struct rte_eth_dev * eth_dev)105 cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
106 {
107 #if defined(RTE_ARCH_ARM64)
108 	cn9k_eth_set_rx_blk_func(eth_dev);
109 	cn9k_eth_set_rx_tmplt_func(eth_dev);
110 
111 	rte_atomic_thread_fence(rte_memory_order_release);
112 #else
113 	RTE_SET_USED(eth_dev);
114 #endif
115 }
116