1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "cn10k_ethdev.h"
6 #include "cn10k_rx.h"
7
8 static __rte_used void
pick_rx_func(struct rte_eth_dev * eth_dev,const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])9 pick_rx_func(struct rte_eth_dev *eth_dev,
10 const eth_rx_burst_t rx_burst[NIX_RX_OFFLOAD_MAX])
11 {
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13
14 /* [VLAN] [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
15 eth_dev->rx_pkt_burst =
16 rx_burst[dev->rx_offload_flags & (NIX_RX_OFFLOAD_MAX - 1)];
17
18 if (eth_dev->data->dev_started)
19 rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
20 eth_dev->rx_pkt_burst;
21
22 rte_atomic_thread_fence(rte_memory_order_release);
23 }
24
25 static uint16_t __rte_noinline __rte_hot __rte_unused
cn10k_nix_flush_rx(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t pkts)26 cn10k_nix_flush_rx(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)
27 {
28 const uint16_t flags = NIX_RX_MULTI_SEG_F | NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
29 return cn10k_nix_flush_recv_pkts(rx_queue, rx_pkts, pkts, flags);
30 }
31
32 #if defined(RTE_ARCH_ARM64)
33 static void
cn10k_eth_set_rx_tmplt_func(struct rte_eth_dev * eth_dev)34 cn10k_eth_set_rx_tmplt_func(struct rte_eth_dev *eth_dev)
35 {
36 #if !defined(CNXK_DIS_TMPLT_FUNC)
37 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
38
39 const eth_rx_burst_t nix_eth_rx_burst[NIX_RX_OFFLOAD_MAX] = {
40 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_##name,
41
42 NIX_RX_FASTPATH_MODES
43 #undef R
44 };
45
46 const eth_rx_burst_t nix_eth_rx_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
47 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_mseg_##name,
48
49 NIX_RX_FASTPATH_MODES
50 #undef R
51 };
52
53 const eth_rx_burst_t nix_eth_rx_burst_reas[NIX_RX_OFFLOAD_MAX] = {
54 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_##name,
55 NIX_RX_FASTPATH_MODES
56 #undef R
57 };
58
59 const eth_rx_burst_t nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_MAX] = {
60 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_mseg_##name,
61 NIX_RX_FASTPATH_MODES
62 #undef R
63 };
64
65 const eth_rx_burst_t nix_eth_rx_vec_burst[NIX_RX_OFFLOAD_MAX] = {
66 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_vec_##name,
67
68 NIX_RX_FASTPATH_MODES
69 #undef R
70 };
71
72 const eth_rx_burst_t nix_eth_rx_vec_burst_mseg[NIX_RX_OFFLOAD_MAX] = {
73 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_vec_mseg_##name,
74
75 NIX_RX_FASTPATH_MODES
76 #undef R
77 };
78
79 const eth_rx_burst_t nix_eth_rx_vec_burst_reas[NIX_RX_OFFLOAD_MAX] = {
80 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_vec_##name,
81 NIX_RX_FASTPATH_MODES
82 #undef R
83 };
84
85 const eth_rx_burst_t nix_eth_rx_vec_burst_mseg_reas[NIX_RX_OFFLOAD_MAX] = {
86 #define R(name, flags)[flags] = cn10k_nix_recv_pkts_reas_vec_mseg_##name,
87 NIX_RX_FASTPATH_MODES
88 #undef R
89 };
90
91 /* Copy multi seg version with security for tear down sequence */
92 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
93 dev->rx_pkt_burst_no_offload = cn10k_nix_flush_rx;
94
95 if (dev->scalar_ena) {
96 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
97 if (dev->rx_offload_flags & NIX_RX_REAS_F)
98 return pick_rx_func(eth_dev,
99 nix_eth_rx_burst_mseg_reas);
100 else
101 return pick_rx_func(eth_dev,
102 nix_eth_rx_burst_mseg);
103 }
104 if (dev->rx_offload_flags & NIX_RX_REAS_F)
105 return pick_rx_func(eth_dev, nix_eth_rx_burst_reas);
106 else
107 return pick_rx_func(eth_dev, nix_eth_rx_burst);
108 }
109
110 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
111 if (dev->rx_offload_flags & NIX_RX_REAS_F)
112 return pick_rx_func(eth_dev,
113 nix_eth_rx_vec_burst_mseg_reas);
114 else
115 return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
116 }
117
118 if (dev->rx_offload_flags & NIX_RX_REAS_F)
119 return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_reas);
120 else
121 return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
122 #else
123 RTE_SET_USED(eth_dev);
124 #endif
125 }
126
127 static void
cn10k_eth_set_rx_blk_func(struct rte_eth_dev * eth_dev)128 cn10k_eth_set_rx_blk_func(struct rte_eth_dev *eth_dev)
129 {
130 #if defined(CNXK_DIS_TMPLT_FUNC)
131 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
132
133 /* Copy multi seg version with security for tear down sequence */
134 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135 dev->rx_pkt_burst_no_offload = cn10k_nix_flush_rx;
136
137 if (dev->scalar_ena) {
138 eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_all_offload;
139 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
140 eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_all_offload_tst;
141 } else {
142 eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_vec_all_offload;
143 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
144 eth_dev->rx_pkt_burst = cn10k_nix_recv_pkts_vec_all_offload_tst;
145 }
146
147 if (eth_dev->data->dev_started)
148 rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst;
149 #else
150 RTE_SET_USED(eth_dev);
151 #endif
152 }
153 #endif
154
155 void
cn10k_eth_set_rx_function(struct rte_eth_dev * eth_dev)156 cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
157 {
158 #if defined(RTE_ARCH_ARM64)
159 cn10k_eth_set_rx_blk_func(eth_dev);
160 cn10k_eth_set_rx_tmplt_func(eth_dev);
161
162 rte_atomic_thread_fence(rte_memory_order_release);
163 #else
164 RTE_SET_USED(eth_dev);
165 #endif
166 }
167