xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2022 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_compat.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_pmd_dpaa2.h>
14 
15 #include <bus_fslmc_driver.h>
16 #include <dpaa2_hw_pvt.h>
17 #include "dpaa2_tm.h"
18 
19 #include <mc/fsl_dpni.h>
20 #include <mc/fsl_mc_sys.h>
21 
22 #define DPAA2_MIN_RX_BUF_SIZE 512
23 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
24 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
25 
26 #define MAX_TCS			DPNI_MAX_TC
27 #define MAX_RX_QUEUES		128
28 #define MAX_TX_QUEUES		16
29 #define MAX_DPNI		8
30 #define DPAA2_MAX_CHANNELS	16
31 
32 #define DPAA2_RX_DEFAULT_NBDESC 512
33 
34 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
35 			   RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
36 			   VLAN_TAG_SIZE)
37 
38 /*default tc to be used for ,congestion, distribution etc configuration. */
39 #define DPAA2_DEF_TC		0
40 
41 /* Threshold for a Tx queue to *Enter* Congestion state.
42  */
43 #define CONG_ENTER_TX_THRESHOLD   512
44 
45 /* Threshold for a queue to *Exit* Congestion state.
46  */
47 #define CONG_EXIT_TX_THRESHOLD    480
48 
49 #define CONG_RETRY_COUNT 18000
50 
51 /* RX queue tail drop threshold
52  * currently considering 64 KB packets
53  */
54 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
55 #define CONG_RX_OAL	128
56 
57 /* Size of the input SMMU mapped memory required by MC */
58 #define DIST_PARAM_IOVA_SIZE 256
59 
60 /* Enable TX Congestion control support
61  * default is disable
62  */
63 #define DPAA2_TX_CGR_OFF	0x01
64 
65 /* Disable RX tail drop, default is enable */
66 #define DPAA2_RX_TAILDROP_OFF	0x04
67 /* Tx confirmation enabled */
68 #define DPAA2_TX_CONF_ENABLE	0x06
69 
70 /* DPDMUX index for DPMAC */
71 #define DPAA2_DPDMUX_DPMAC_IDX 0
72 
73 /* HW loopback the egress traffic to self ingress*/
74 #define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
75 
76 #define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
77 
78 #define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
79 
80 #define DPAA2_TX_LOOPBACK_MODE \
81 	(DPAA2_TX_MAC_LOOPBACK_MODE | \
82 	DPAA2_TX_SERDES_LOOPBACK_MODE | \
83 	DPAA2_TX_DPNI_LOOPBACK_MODE)
84 
85 #define DPAA2_RSS_OFFLOAD_ALL ( \
86 	RTE_ETH_RSS_L2_PAYLOAD | \
87 	RTE_ETH_RSS_IP | \
88 	RTE_ETH_RSS_UDP | \
89 	RTE_ETH_RSS_TCP | \
90 	RTE_ETH_RSS_SCTP | \
91 	RTE_ETH_RSS_MPLS | \
92 	RTE_ETH_RSS_C_VLAN | \
93 	RTE_ETH_RSS_S_VLAN | \
94 	RTE_ETH_RSS_ESP | \
95 	RTE_ETH_RSS_AH | \
96 	RTE_ETH_RSS_PPPOE)
97 
98 /* LX2 FRC Parsed values (Little Endian) */
99 #define DPAA2_PKT_TYPE_ETHER		0x0060
100 #define DPAA2_PKT_TYPE_IPV4		0x0000
101 #define DPAA2_PKT_TYPE_IPV6		0x0020
102 #define DPAA2_PKT_TYPE_IPV4_EXT \
103 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
104 #define DPAA2_PKT_TYPE_IPV6_EXT \
105 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
106 #define DPAA2_PKT_TYPE_IPV4_TCP \
107 			(0x000e | DPAA2_PKT_TYPE_IPV4)
108 #define DPAA2_PKT_TYPE_IPV6_TCP \
109 			(0x000e | DPAA2_PKT_TYPE_IPV6)
110 #define DPAA2_PKT_TYPE_IPV4_UDP \
111 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
112 #define DPAA2_PKT_TYPE_IPV6_UDP \
113 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
114 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
115 			(0x000f | DPAA2_PKT_TYPE_IPV4)
116 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
117 			(0x000f | DPAA2_PKT_TYPE_IPV6)
118 #define DPAA2_PKT_TYPE_IPV4_ICMP \
119 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
120 #define DPAA2_PKT_TYPE_IPV6_ICMP \
121 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
122 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
123 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
124 
125 /* Global pool used by driver for SG list TX */
126 extern struct rte_mempool *dpaa2_tx_sg_pool;
127 /* Maximum SG segments */
128 #define DPAA2_MAX_SGS 128
129 /* SG pool size */
130 #define DPAA2_POOL_SIZE 2048
131 /* SG pool cache size */
132 #define DPAA2_POOL_CACHE_SIZE 256
133 /* structure to free external and indirect
134  * buffers.
135  */
136 struct sw_buf_free {
137 	/* To which packet this segment belongs */
138 	uint16_t pkt_id;
139 	/* The actual segment */
140 	struct rte_mbuf *seg;
141 };
142 
143 /* enable timestamp in mbuf*/
144 extern bool dpaa2_enable_ts[];
145 extern uint64_t dpaa2_timestamp_rx_dynflag;
146 extern int dpaa2_timestamp_dynfield_offset;
147 
148 #define DPAA2_QOS_TABLE_RECONFIGURE	1
149 #define DPAA2_FS_TABLE_RECONFIGURE	2
150 
151 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
152 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
153 
154 #define DPAA2_FLOW_MAX_KEY_SIZE		16
155 
156 /* Externally defined */
157 extern const struct rte_flow_ops dpaa2_flow_ops;
158 
159 extern const struct rte_tm_ops dpaa2_tm_ops;
160 
161 extern bool dpaa2_enable_err_queue;
162 
163 #define IP_ADDRESS_OFFSET_INVALID (-1)
164 
165 struct dpaa2_key_info {
166 	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
167 	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
168 	/* Special for IP address. */
169 	int ipv4_src_offset;
170 	int ipv4_dst_offset;
171 	int ipv6_src_offset;
172 	int ipv6_dst_offset;
173 	uint8_t key_total_size;
174 };
175 
176 struct dpaa2_key_extract {
177 	struct dpkg_profile_cfg dpkg;
178 	struct dpaa2_key_info key_info;
179 };
180 
181 struct extract_s {
182 	struct dpaa2_key_extract qos_key_extract;
183 	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
184 	uint64_t qos_extract_param;
185 	uint64_t tc_extract_param[MAX_TCS];
186 };
187 
188 struct dpaa2_dev_priv {
189 	void *hw;
190 	int32_t hw_id;
191 	int32_t qdid;
192 	uint16_t token;
193 	uint8_t nb_tx_queues;
194 	uint8_t nb_rx_queues;
195 	uint32_t options;
196 	void *rx_vq[MAX_RX_QUEUES];
197 	void *tx_vq[MAX_TX_QUEUES];
198 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
199 	void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
200 	void *rx_err_vq;
201 	uint8_t flags; /*dpaa2 config flags */
202 	uint8_t max_mac_filters;
203 	uint8_t max_vlan_filters;
204 	uint8_t num_rx_tc;
205 	uint8_t num_tx_tc;
206 	uint16_t qos_entries;
207 	uint16_t fs_entries;
208 	uint8_t dist_queues;
209 	uint8_t num_channels;
210 	uint8_t en_ordered;
211 	uint8_t en_loose_ordered;
212 	uint8_t max_cgs;
213 	uint8_t cgid_in_use[MAX_RX_QUEUES];
214 
215 	struct extract_s extract;
216 
217 	uint16_t ss_offset;
218 	uint64_t ss_iova;
219 	uint64_t ss_param_iova;
220 	/*stores timestamp of last received packet on dev*/
221 	uint64_t rx_timestamp;
222 	/*stores timestamp of last received tx confirmation packet on dev*/
223 	uint64_t tx_timestamp;
224 	/* stores pointer to next tx_conf queue that should be processed,
225 	 * it corresponds to last packet transmitted
226 	 */
227 	struct dpaa2_queue *next_tx_conf_queue;
228 
229 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
230 	rte_spinlock_t lpbk_qp_lock;
231 
232 	uint8_t channel_inuse;
233 	LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
234 	LIST_HEAD(nodes, dpaa2_tm_node) nodes;
235 	LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
236 };
237 
238 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
239 				      struct dpkg_profile_cfg *kg_cfg);
240 
241 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
242 		uint64_t req_dist_set, int tc_index);
243 
244 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
245 			   uint8_t tc_index);
246 
247 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
248 	struct fsl_mc_io *dpni, void *blist);
249 
250 __rte_internal
251 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
252 		int eth_rx_queue_id,
253 		struct dpaa2_dpcon_dev *dpcon,
254 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
255 
256 __rte_internal
257 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
258 		int eth_rx_queue_id);
259 
260 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
261 
262 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
263 				uint16_t nb_pkts);
264 
265 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
266 			       uint16_t nb_pkts);
267 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
268 				      const struct qbman_fd *fd,
269 				      const struct qbman_result *dq,
270 				      struct dpaa2_queue *rxq,
271 				      struct rte_event *ev);
272 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
273 				    const struct qbman_fd *fd,
274 				    const struct qbman_result *dq,
275 				    struct dpaa2_queue *rxq,
276 				    struct rte_event *ev);
277 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
278 				     const struct qbman_fd *fd,
279 				     const struct qbman_result *dq,
280 				     struct dpaa2_queue *rxq,
281 				     struct rte_event *ev);
282 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
283 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
284 			      uint16_t nb_pkts);
285 __rte_internal
286 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
287 		struct rte_mbuf **bufs, uint16_t nb_pkts);
288 
289 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
290 void dpaa2_flow_clean(struct rte_eth_dev *dev);
291 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
292 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
293 
294 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
295 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
296 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
297 					struct timespec *timestamp);
298 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
299 					const struct timespec *timestamp);
300 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
301 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
302 						struct timespec *timestamp,
303 						uint32_t flags __rte_unused);
304 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
305 					  struct timespec *timestamp);
306 
307 int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
308 int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
309 int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
310 	uint16_t qidx, uint64_t cntx,
311 	eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
312 	struct dpaa2_queue **txq,
313 	struct dpaa2_queue **rxq);
314 
315 #endif /* _DPAA2_ETHDEV_H */
316