xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision a41f593f1bce27cd94eae0e85a8085c592b14b30)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
13 
14 #include <rte_fslmc.h>
15 #include <dpaa2_hw_pvt.h>
16 #include "dpaa2_tm.h"
17 
18 #include <mc/fsl_dpni.h>
19 #include <mc/fsl_mc_sys.h>
20 
21 #define DPAA2_MIN_RX_BUF_SIZE 512
22 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
23 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
24 
25 #define MAX_TCS			DPNI_MAX_TC
26 #define MAX_RX_QUEUES		128
27 #define MAX_TX_QUEUES		16
28 #define MAX_DPNI		8
29 #define DPAA2_MAX_CHANNELS	16
30 
31 #define DPAA2_RX_DEFAULT_NBDESC 512
32 
33 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
34 			   RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
35 			   VLAN_TAG_SIZE)
36 
37 /*default tc to be used for ,congestion, distribution etc configuration. */
38 #define DPAA2_DEF_TC		0
39 
40 /* Threshold for a Tx queue to *Enter* Congestion state.
41  */
42 #define CONG_ENTER_TX_THRESHOLD   512
43 
44 /* Threshold for a queue to *Exit* Congestion state.
45  */
46 #define CONG_EXIT_TX_THRESHOLD    480
47 
48 #define CONG_RETRY_COUNT 18000
49 
50 /* RX queue tail drop threshold
51  * currently considering 64 KB packets
52  */
53 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
54 #define CONG_RX_OAL	128
55 
56 /* Size of the input SMMU mapped memory required by MC */
57 #define DIST_PARAM_IOVA_SIZE 256
58 
59 /* Enable TX Congestion control support
60  * default is disable
61  */
62 #define DPAA2_TX_CGR_OFF	0x01
63 
64 /* Disable RX tail drop, default is enable */
65 #define DPAA2_RX_TAILDROP_OFF	0x04
66 /* Tx confirmation enabled */
67 #define DPAA2_TX_CONF_ENABLE	0x06
68 
69 /* HW loopback the egress traffic to self ingress*/
70 #define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
71 
72 #define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
73 
74 #define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
75 
76 #define DPAA2_TX_LOOPBACK_MODE \
77 	(DPAA2_TX_MAC_LOOPBACK_MODE | \
78 	DPAA2_TX_SERDES_LOOPBACK_MODE | \
79 	DPAA2_TX_DPNI_LOOPBACK_MODE)
80 
81 #define DPAA2_RSS_OFFLOAD_ALL ( \
82 	RTE_ETH_RSS_L2_PAYLOAD | \
83 	RTE_ETH_RSS_IP | \
84 	RTE_ETH_RSS_UDP | \
85 	RTE_ETH_RSS_TCP | \
86 	RTE_ETH_RSS_SCTP | \
87 	RTE_ETH_RSS_MPLS | \
88 	RTE_ETH_RSS_C_VLAN | \
89 	RTE_ETH_RSS_S_VLAN | \
90 	RTE_ETH_RSS_ESP | \
91 	RTE_ETH_RSS_AH | \
92 	RTE_ETH_RSS_PPPOE)
93 
94 /* LX2 FRC Parsed values (Little Endian) */
95 #define DPAA2_PKT_TYPE_ETHER		0x0060
96 #define DPAA2_PKT_TYPE_IPV4		0x0000
97 #define DPAA2_PKT_TYPE_IPV6		0x0020
98 #define DPAA2_PKT_TYPE_IPV4_EXT \
99 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
100 #define DPAA2_PKT_TYPE_IPV6_EXT \
101 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
102 #define DPAA2_PKT_TYPE_IPV4_TCP \
103 			(0x000e | DPAA2_PKT_TYPE_IPV4)
104 #define DPAA2_PKT_TYPE_IPV6_TCP \
105 			(0x000e | DPAA2_PKT_TYPE_IPV6)
106 #define DPAA2_PKT_TYPE_IPV4_UDP \
107 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
108 #define DPAA2_PKT_TYPE_IPV6_UDP \
109 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
110 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
111 			(0x000f | DPAA2_PKT_TYPE_IPV4)
112 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
113 			(0x000f | DPAA2_PKT_TYPE_IPV6)
114 #define DPAA2_PKT_TYPE_IPV4_ICMP \
115 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
116 #define DPAA2_PKT_TYPE_IPV6_ICMP \
117 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
118 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
119 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
120 
121 /* enable timestamp in mbuf*/
122 extern bool dpaa2_enable_ts[];
123 extern uint64_t dpaa2_timestamp_rx_dynflag;
124 extern int dpaa2_timestamp_dynfield_offset;
125 
126 #define DPAA2_QOS_TABLE_RECONFIGURE	1
127 #define DPAA2_FS_TABLE_RECONFIGURE	2
128 
129 #define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
130 #define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
131 
132 #define DPAA2_FLOW_MAX_KEY_SIZE		16
133 
134 /* Externally defined */
135 extern const struct rte_flow_ops dpaa2_flow_ops;
136 
137 extern const struct rte_tm_ops dpaa2_tm_ops;
138 
139 extern bool dpaa2_enable_err_queue;
140 
141 #define IP_ADDRESS_OFFSET_INVALID (-1)
142 
143 struct dpaa2_key_info {
144 	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
145 	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
146 	/* Special for IP address. */
147 	int ipv4_src_offset;
148 	int ipv4_dst_offset;
149 	int ipv6_src_offset;
150 	int ipv6_dst_offset;
151 	uint8_t key_total_size;
152 };
153 
154 struct dpaa2_key_extract {
155 	struct dpkg_profile_cfg dpkg;
156 	struct dpaa2_key_info key_info;
157 };
158 
159 struct extract_s {
160 	struct dpaa2_key_extract qos_key_extract;
161 	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
162 	uint64_t qos_extract_param;
163 	uint64_t tc_extract_param[MAX_TCS];
164 };
165 
166 struct dpaa2_dev_priv {
167 	void *hw;
168 	int32_t hw_id;
169 	int32_t qdid;
170 	uint16_t token;
171 	uint8_t nb_tx_queues;
172 	uint8_t nb_rx_queues;
173 	uint32_t options;
174 	void *rx_vq[MAX_RX_QUEUES];
175 	void *tx_vq[MAX_TX_QUEUES];
176 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
177 	void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
178 	void *rx_err_vq;
179 	uint8_t flags; /*dpaa2 config flags */
180 	uint8_t max_mac_filters;
181 	uint8_t max_vlan_filters;
182 	uint8_t num_rx_tc;
183 	uint8_t num_tx_tc;
184 	uint16_t qos_entries;
185 	uint16_t fs_entries;
186 	uint8_t dist_queues;
187 	uint8_t num_channels;
188 	uint8_t en_ordered;
189 	uint8_t en_loose_ordered;
190 	uint8_t max_cgs;
191 	uint8_t cgid_in_use[MAX_RX_QUEUES];
192 
193 	struct extract_s extract;
194 
195 	uint16_t ss_offset;
196 	uint64_t ss_iova;
197 	uint64_t ss_param_iova;
198 	/*stores timestamp of last received packet on dev*/
199 	uint64_t rx_timestamp;
200 	/*stores timestamp of last received tx confirmation packet on dev*/
201 	uint64_t tx_timestamp;
202 	/* stores pointer to next tx_conf queue that should be processed,
203 	 * it corresponds to last packet transmitted
204 	 */
205 	struct dpaa2_queue *next_tx_conf_queue;
206 
207 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
208 	rte_spinlock_t lpbk_qp_lock;
209 
210 	uint8_t channel_inuse;
211 	LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
212 	LIST_HEAD(nodes, dpaa2_tm_node) nodes;
213 	LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
214 };
215 
216 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
217 				      struct dpkg_profile_cfg *kg_cfg);
218 
219 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
220 		uint64_t req_dist_set, int tc_index);
221 
222 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
223 			   uint8_t tc_index);
224 
225 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
226 	struct fsl_mc_io *dpni, void *blist);
227 
228 __rte_internal
229 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
230 		int eth_rx_queue_id,
231 		struct dpaa2_dpcon_dev *dpcon,
232 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
233 
234 __rte_internal
235 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
236 		int eth_rx_queue_id);
237 
238 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
239 
240 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
241 				uint16_t nb_pkts);
242 
243 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
244 			       uint16_t nb_pkts);
245 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
246 				      const struct qbman_fd *fd,
247 				      const struct qbman_result *dq,
248 				      struct dpaa2_queue *rxq,
249 				      struct rte_event *ev);
250 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
251 				    const struct qbman_fd *fd,
252 				    const struct qbman_result *dq,
253 				    struct dpaa2_queue *rxq,
254 				    struct rte_event *ev);
255 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
256 				     const struct qbman_fd *fd,
257 				     const struct qbman_result *dq,
258 				     struct dpaa2_queue *rxq,
259 				     struct rte_event *ev);
260 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
261 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
262 			      uint16_t nb_pkts);
263 __rte_internal
264 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
265 		struct rte_mbuf **bufs, uint16_t nb_pkts);
266 
267 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
268 void dpaa2_flow_clean(struct rte_eth_dev *dev);
269 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
270 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
271 
272 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
273 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
274 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
275 					struct timespec *timestamp);
276 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
277 					const struct timespec *timestamp);
278 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
279 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
280 						struct timespec *timestamp,
281 						uint32_t flags __rte_unused);
282 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
283 					  struct timespec *timestamp);
284 
285 int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
286 int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
287 int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
288 	uint16_t qidx, uint64_t cntx,
289 	eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
290 	struct dpaa2_queue **txq,
291 	struct dpaa2_queue **rxq);
292 
293 #endif /* _DPAA2_ETHDEV_H */
294