xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision fdf7471cccb8be023037c218d1402c0549eb2c8e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2019 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
13 
14 #include <dpaa2_hw_pvt.h>
15 
16 #include <mc/fsl_dpni.h>
17 #include <mc/fsl_mc_sys.h>
18 
19 #define DPAA2_MIN_RX_BUF_SIZE 512
20 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
21 
22 #define MAX_TCS			DPNI_MAX_TC
23 #define MAX_RX_QUEUES		128
24 #define MAX_TX_QUEUES		16
25 #define MAX_DPNI		8
26 
27 /*default tc to be used for ,congestion, distribution etc configuration. */
28 #define DPAA2_DEF_TC		0
29 
30 /* Threshold for a Tx queue to *Enter* Congestion state.
31  */
32 #define CONG_ENTER_TX_THRESHOLD   512
33 
34 /* Threshold for a queue to *Exit* Congestion state.
35  */
36 #define CONG_EXIT_TX_THRESHOLD    480
37 
38 #define CONG_RETRY_COUNT 18000
39 
40 /* RX queue tail drop threshold
41  * currently considering 64 KB packets
42  */
43 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
44 #define CONG_RX_OAL	128
45 
46 /* Size of the input SMMU mapped memory required by MC */
47 #define DIST_PARAM_IOVA_SIZE 256
48 
49 /* Enable TX Congestion control support
50  * default is disable
51  */
52 #define DPAA2_TX_CGR_OFF	0x01
53 
54 /* Disable RX tail drop, default is enable */
55 #define DPAA2_RX_TAILDROP_OFF	0x04
56 
57 #define DPAA2_RSS_OFFLOAD_ALL ( \
58 	ETH_RSS_L2_PAYLOAD | \
59 	ETH_RSS_IP | \
60 	ETH_RSS_UDP | \
61 	ETH_RSS_TCP | \
62 	ETH_RSS_SCTP)
63 
64 /* LX2 FRC Parsed values (Little Endian) */
65 #define DPAA2_PKT_TYPE_ETHER		0x0060
66 #define DPAA2_PKT_TYPE_IPV4		0x0000
67 #define DPAA2_PKT_TYPE_IPV6		0x0020
68 #define DPAA2_PKT_TYPE_IPV4_EXT \
69 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
70 #define DPAA2_PKT_TYPE_IPV6_EXT \
71 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
72 #define DPAA2_PKT_TYPE_IPV4_TCP \
73 			(0x000e | DPAA2_PKT_TYPE_IPV4)
74 #define DPAA2_PKT_TYPE_IPV6_TCP \
75 			(0x000e | DPAA2_PKT_TYPE_IPV6)
76 #define DPAA2_PKT_TYPE_IPV4_UDP \
77 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
78 #define DPAA2_PKT_TYPE_IPV6_UDP \
79 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
80 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
81 			(0x000f | DPAA2_PKT_TYPE_IPV4)
82 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
83 			(0x000f | DPAA2_PKT_TYPE_IPV6)
84 #define DPAA2_PKT_TYPE_IPV4_ICMP \
85 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
86 #define DPAA2_PKT_TYPE_IPV6_ICMP \
87 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
88 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
89 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
90 
91 /* enable timestamp in mbuf*/
92 extern enum pmd_dpaa2_ts dpaa2_enable_ts;
93 
94 #define DPAA2_QOS_TABLE_RECONFIGURE	1
95 #define DPAA2_FS_TABLE_RECONFIGURE	2
96 
97 /*Externaly defined*/
98 extern const struct rte_flow_ops dpaa2_flow_ops;
99 extern enum rte_filter_type dpaa2_filter_type;
100 
101 struct dpaa2_dev_priv {
102 	void *hw;
103 	int32_t hw_id;
104 	int32_t qdid;
105 	uint16_t token;
106 	uint8_t nb_tx_queues;
107 	uint8_t nb_rx_queues;
108 	uint32_t options;
109 	void *rx_vq[MAX_RX_QUEUES];
110 	void *tx_vq[MAX_TX_QUEUES];
111 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
112 	void *tx_conf_vq[MAX_TX_QUEUES];
113 	uint8_t tx_conf_en;
114 	uint8_t max_mac_filters;
115 	uint8_t max_vlan_filters;
116 	uint8_t num_rx_tc;
117 	uint8_t flags; /*dpaa2 config flags */
118 	uint8_t en_ordered;
119 	uint8_t en_loose_ordered;
120 	uint8_t max_cgs;
121 	uint8_t cgid_in_use[MAX_RX_QUEUES];
122 
123 	struct pattern_s {
124 		uint8_t item_count;
125 		uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
126 	} pattern[MAX_TCS + 1];
127 
128 	struct extract_s {
129 		struct dpkg_profile_cfg qos_key_cfg;
130 		struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
131 		uint64_t qos_extract_param;
132 		uint64_t fs_extract_param[MAX_TCS];
133 	} extract;
134 
135 	uint16_t ss_offset;
136 	uint64_t ss_iova;
137 	uint64_t ss_param_iova;
138 #if defined(RTE_LIBRTE_IEEE1588)
139 	/*stores timestamp of last received packet on dev*/
140 	uint64_t rx_timestamp;
141 	/*stores timestamp of last received tx confirmation packet on dev*/
142 	uint64_t tx_timestamp;
143 	/* stores pointer to next tx_conf queue that should be processed,
144 	 * it corresponds to last packet transmitted
145 	 */
146 	struct dpaa2_queue *next_tx_conf_queue;
147 #endif
148 
149 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
150 
151 	LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
152 };
153 
154 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
155 				      struct dpkg_profile_cfg *kg_cfg);
156 
157 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
158 			  uint64_t req_dist_set);
159 
160 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
161 			   uint8_t tc_index);
162 
163 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
164 
165 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
166 		int eth_rx_queue_id,
167 		struct dpaa2_dpcon_dev *dpcon,
168 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
169 
170 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
171 		int eth_rx_queue_id);
172 
173 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
174 
175 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
176 				uint16_t nb_pkts);
177 
178 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
179 			       uint16_t nb_pkts);
180 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
181 				      const struct qbman_fd *fd,
182 				      const struct qbman_result *dq,
183 				      struct dpaa2_queue *rxq,
184 				      struct rte_event *ev);
185 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
186 				    const struct qbman_fd *fd,
187 				    const struct qbman_result *dq,
188 				    struct dpaa2_queue *rxq,
189 				    struct rte_event *ev);
190 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
191 				     const struct qbman_fd *fd,
192 				     const struct qbman_result *dq,
193 				     struct dpaa2_queue *rxq,
194 				     struct rte_event *ev);
195 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
196 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
197 			      uint16_t nb_pkts);
198 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
199 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
200 void dpaa2_flow_clean(struct rte_eth_dev *dev);
201 uint16_t dpaa2_dev_tx_conf(void *queue)  __attribute__((unused));
202 
203 #if defined(RTE_LIBRTE_IEEE1588)
204 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
205 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
206 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
207 					struct timespec *timestamp);
208 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
209 					const struct timespec *timestamp);
210 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
211 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
212 						struct timespec *timestamp,
213 						uint32_t flags __rte_unused);
214 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
215 					  struct timespec *timestamp);
216 #endif
217 #endif /* _DPAA2_ETHDEV_H */
218