xref: /dpdk/drivers/net/cnxk/cn10k_rx_select.c (revision eabbac98af1345157e07c431e18543296c37c355)
1be294749SPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause
2be294749SPavan Nikhilesh  * Copyright(C) 2021 Marvell.
3be294749SPavan Nikhilesh  */
4be294749SPavan Nikhilesh 
5be294749SPavan Nikhilesh #include "cn10k_ethdev.h"
6be294749SPavan Nikhilesh #include "cn10k_rx.h"
7be294749SPavan Nikhilesh 
87d9b1d44SJerin Jacob static __rte_used void
pick_rx_func(struct rte_eth_dev * eth_dev,const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])9be294749SPavan Nikhilesh pick_rx_func(struct rte_eth_dev *eth_dev,
10be294749SPavan Nikhilesh 	     const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])
11be294749SPavan Nikhilesh {
12be294749SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13be294749SPavan Nikhilesh 
14be294749SPavan Nikhilesh 	/* [VLAN] [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
15be294749SPavan Nikhilesh 	eth_dev->rx_pkt_burst =
16be294749SPavan Nikhilesh 		rx_burst[dev->rx_offload_flags & (NIX_RX_OFFLOAD_MAX - 1)];
17be294749SPavan Nikhilesh 
183cebc8f3SSatheesh Paul 	if (eth_dev->data->dev_started)
193cebc8f3SSatheesh Paul 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
203cebc8f3SSatheesh Paul 			eth_dev->rx_pkt_burst;
213cebc8f3SSatheesh Paul 
22*eabbac98SPavan Nikhilesh 	rte_atomic_thread_fence(rte_memory_order_release);
23be294749SPavan Nikhilesh }
24be294749SPavan Nikhilesh 
257752f140SRahul Bhansali static uint16_t __rte_noinline __rte_hot __rte_unused
cn10k_nix_flush_rx(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t pkts)267752f140SRahul Bhansali cn10k_nix_flush_rx(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)
277752f140SRahul Bhansali {
287752f140SRahul Bhansali 	const uint16_t flags = NIX_RX_MULTI_SEG_F | NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
297752f140SRahul Bhansali 	return cn10k_nix_flush_recv_pkts(rx_queue, rx_pkts, pkts, flags);
307752f140SRahul Bhansali }
317752f140SRahul Bhansali 
327d9b1d44SJerin Jacob #if defined(RTE_ARCH_ARM64)
33*eabbac98SPavan Nikhilesh static void
cn10k_eth_set_rx_tmplt_func(struct rte_eth_dev * eth_dev)34*eabbac98SPavan Nikhilesh cn10k_eth_set_rx_tmplt_func(struct rte_eth_dev *eth_dev)
35*eabbac98SPavan Nikhilesh {
36*eabbac98SPavan Nikhilesh #if !defined(CNXK_DIS_TMPLT_FUNC)
37be294749SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
38be294749SPavan Nikhilesh 
39be294749SPavan Nikhilesh 	const eth_rx_burst_t nix_eth_rx_burst[NIX_RX_OFFLOAD_MAX] = {
40be294749SPavan Nikhilesh #define R(name, flags)[flags] = cn10k_nix_recv_pkts_##name,
41be294749SPavan Nikhilesh 
42be294749SPavan Nikhilesh 		NIX_RX_FASTPATH_MODES
43be294749SPavan Nikhilesh #undef R
44be294749SPavan Nikhilesh 	};
45be294749SPavan Nikhilesh 
46be294749SPavan Nikhilesh 	const eth_rx_burst_t nix_eth_rx_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
47be294749SPavan Nikhilesh #define R(name, flags)[flags] = cn10k_nix_recv_pkts_mseg_##name,
48be294749SPavan Nikhilesh 
49be294749SPavan Nikhilesh 		NIX_RX_FASTPATH_MODES
50be294749SPavan Nikhilesh #undef R
51be294749SPavan Nikhilesh 	};
52be294749SPavan Nikhilesh 
53c062f572SVidya Sagar Velumuri 	const eth_rx_burst_t nix_eth_rx_burst_reas[NIX_RX_OFFLOAD_MAX] = {
54c062f572SVidya Sagar Velumuri #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_##name,
55c062f572SVidya Sagar Velumuri 		NIX_RX_FASTPATH_MODES
56c062f572SVidya Sagar Velumuri #undef R
57c062f572SVidya Sagar Velumuri 	};
58c062f572SVidya Sagar Velumuri 
59c062f572SVidya Sagar Velumuri 	const eth_rx_burst_t nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_MAX] = {
60c062f572SVidya Sagar Velumuri #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_mseg_##name,
61c062f572SVidya Sagar Velumuri 		NIX_RX_FASTPATH_MODES
62c062f572SVidya Sagar Velumuri #undef R
63c062f572SVidya Sagar Velumuri 	};
64c062f572SVidya Sagar Velumuri 
65be294749SPavan Nikhilesh 	const eth_rx_burst_t nix_eth_rx_vec_burst[NIX_RX_OFFLOAD_MAX] = {
66be294749SPavan Nikhilesh #define R(name, flags)[flags] = cn10k_nix_recv_pkts_vec_##name,
67be294749SPavan Nikhilesh 
68be294749SPavan Nikhilesh 		NIX_RX_FASTPATH_MODES
69be294749SPavan Nikhilesh #undef R
70be294749SPavan Nikhilesh 	};
71be294749SPavan Nikhilesh 
72be294749SPavan Nikhilesh 	const eth_rx_burst_t nix_eth_rx_vec_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
73be294749SPavan Nikhilesh #define R(name, flags)[flags] = cn10k_nix_recv_pkts_vec_mseg_##name,
74be294749SPavan Nikhilesh 
75be294749SPavan Nikhilesh 		NIX_RX_FASTPATH_MODES
76be294749SPavan Nikhilesh #undef R
77be294749SPavan Nikhilesh 	};
78be294749SPavan Nikhilesh 
79c062f572SVidya Sagar Velumuri 	const eth_rx_burst_t nix_eth_rx_vec_burst_reas[NIX_RX_OFFLOAD_MAX] = {
80c062f572SVidya Sagar Velumuri #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_vec_##name,
81c062f572SVidya Sagar Velumuri 		NIX_RX_FASTPATH_MODES
82c062f572SVidya Sagar Velumuri #undef R
83c062f572SVidya Sagar Velumuri 	};
84c062f572SVidya Sagar Velumuri 
85c062f572SVidya Sagar Velumuri 	const eth_rx_burst_t nix_eth_rx_vec_burst_mseg_reas[NIX_RX_OFFLOAD_MAX] = {
86c062f572SVidya Sagar Velumuri #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_vec_mseg_##name,
87c062f572SVidya Sagar Velumuri 		NIX_RX_FASTPATH_MODES
88c062f572SVidya Sagar Velumuri #undef R
89c062f572SVidya Sagar Velumuri 	};
90c062f572SVidya Sagar Velumuri 
917ea18718SRahul Bhansali 	/* Copy multi seg version with security for tear down sequence */
92be294749SPavan Nikhilesh 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
937752f140SRahul Bhansali 		dev->rx_pkt_burst_no_offload = cn10k_nix_flush_rx;
94be294749SPavan Nikhilesh 
95be294749SPavan Nikhilesh 	if (dev->scalar_ena) {
96c062f572SVidya Sagar Velumuri 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
97c062f572SVidya Sagar Velumuri 			if (dev->rx_offload_flags & NIX_RX_REAS_F)
98c062f572SVidya Sagar Velumuri 				return pick_rx_func(eth_dev,
99c062f572SVidya Sagar Velumuri 						nix_eth_rx_burst_mseg_reas);
100c062f572SVidya Sagar Velumuri 			else
101c062f572SVidya Sagar Velumuri 				return pick_rx_func(eth_dev,
102c062f572SVidya Sagar Velumuri 						nix_eth_rx_burst_mseg);
103c062f572SVidya Sagar Velumuri 		}
104c062f572SVidya Sagar Velumuri 		if (dev->rx_offload_flags & NIX_RX_REAS_F)
105c062f572SVidya Sagar Velumuri 			return pick_rx_func(eth_dev, nix_eth_rx_burst_reas);
106c062f572SVidya Sagar Velumuri 		else
107be294749SPavan Nikhilesh 			return pick_rx_func(eth_dev, nix_eth_rx_burst);
108be294749SPavan Nikhilesh 	}
109be294749SPavan Nikhilesh 
110c062f572SVidya Sagar Velumuri 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
111c062f572SVidya Sagar Velumuri 		if (dev->rx_offload_flags & NIX_RX_REAS_F)
112c062f572SVidya Sagar Velumuri 			return pick_rx_func(eth_dev,
113c062f572SVidya Sagar Velumuri 					nix_eth_rx_vec_burst_mseg_reas);
114c062f572SVidya Sagar Velumuri 		else
115be294749SPavan Nikhilesh 			return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
116c062f572SVidya Sagar Velumuri 	}
117c062f572SVidya Sagar Velumuri 
118c062f572SVidya Sagar Velumuri 	if (dev->rx_offload_flags & NIX_RX_REAS_F)
119c062f572SVidya Sagar Velumuri 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_reas);
120c062f572SVidya Sagar Velumuri 	else
121be294749SPavan Nikhilesh 		return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
1227d9b1d44SJerin Jacob #else
1237d9b1d44SJerin Jacob 	RTE_SET_USED(eth_dev);
1247d9b1d44SJerin Jacob #endif
125be294749SPavan Nikhilesh }
126*eabbac98SPavan Nikhilesh 
127*eabbac98SPavan Nikhilesh static void
cn10k_eth_set_rx_blk_func(struct rte_eth_dev * eth_dev)128*eabbac98SPavan Nikhilesh cn10k_eth_set_rx_blk_func(struct rte_eth_dev *eth_dev)
129*eabbac98SPavan Nikhilesh {
130*eabbac98SPavan Nikhilesh #if defined(CNXK_DIS_TMPLT_FUNC)
131*eabbac98SPavan Nikhilesh 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
132*eabbac98SPavan Nikhilesh 
133*eabbac98SPavan Nikhilesh 	/* Copy multi seg version with security for tear down sequence */
134*eabbac98SPavan Nikhilesh 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135*eabbac98SPavan Nikhilesh 		dev->rx_pkt_burst_no_offload = cn10k_nix_flush_rx;
136*eabbac98SPavan Nikhilesh 
137*eabbac98SPavan Nikhilesh 	if (dev->scalar_ena) {
138*eabbac98SPavan Nikhilesh 		eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_all_offload;
139*eabbac98SPavan Nikhilesh 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
140*eabbac98SPavan Nikhilesh 			eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_all_offload_tst;
141*eabbac98SPavan Nikhilesh 	} else {
142*eabbac98SPavan Nikhilesh 		eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_vec_all_offload;
143*eabbac98SPavan Nikhilesh 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
144*eabbac98SPavan Nikhilesh 			eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_vec_all_offload_tst;
145*eabbac98SPavan Nikhilesh 	}
146*eabbac98SPavan Nikhilesh 
147*eabbac98SPavan Nikhilesh 	if (eth_dev->data->dev_started)
148*eabbac98SPavan Nikhilesh 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst;
149*eabbac98SPavan Nikhilesh #else
150*eabbac98SPavan Nikhilesh 	RTE_SET_USED(eth_dev);
151*eabbac98SPavan Nikhilesh #endif
152*eabbac98SPavan Nikhilesh }
153*eabbac98SPavan Nikhilesh #endif
154*eabbac98SPavan Nikhilesh 
155*eabbac98SPavan Nikhilesh void
cn10k_eth_set_rx_function(struct rte_eth_dev * eth_dev)156*eabbac98SPavan Nikhilesh cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
157*eabbac98SPavan Nikhilesh {
158*eabbac98SPavan Nikhilesh #if defined(RTE_ARCH_ARM64)
159*eabbac98SPavan Nikhilesh 	cn10k_eth_set_rx_blk_func(eth_dev);
160*eabbac98SPavan Nikhilesh 	cn10k_eth_set_rx_tmplt_func(eth_dev);
161*eabbac98SPavan Nikhilesh 
162*eabbac98SPavan Nikhilesh 	rte_atomic_thread_fence(rte_memory_order_release);
163*eabbac98SPavan Nikhilesh #else
164*eabbac98SPavan Nikhilesh 	RTE_SET_USED(eth_dev);
165*eabbac98SPavan Nikhilesh #endif
166*eabbac98SPavan Nikhilesh }
167