xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_event_eth_rx_adapter.h>
12 #include <rte_pmd_dpaa2.h>
13 
14 #include <dpaa2_hw_pvt.h>
15 
16 #include <mc/fsl_dpni.h>
17 #include <mc/fsl_mc_sys.h>
18 
19 #define DPAA2_MIN_RX_BUF_SIZE 512
20 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
21 
22 #define MAX_TCS			DPNI_MAX_TC
23 #define MAX_RX_QUEUES		128
24 #define MAX_TX_QUEUES		16
25 
26 /*default tc to be used for ,congestion, distribution etc configuration. */
27 #define DPAA2_DEF_TC		0
28 
29 /* Threshold for a Tx queue to *Enter* Congestion state.
30  */
31 #define CONG_ENTER_TX_THRESHOLD   512
32 
33 /* Threshold for a queue to *Exit* Congestion state.
34  */
35 #define CONG_EXIT_TX_THRESHOLD    480
36 
37 #define CONG_RETRY_COUNT 18000
38 
39 /* RX queue tail drop threshold
40  * currently considering 32 KB packets
41  */
42 #define CONG_THRESHOLD_RX_Q  (64 * 1024)
43 #define CONG_RX_OAL	128
44 
45 /* Size of the input SMMU mapped memory required by MC */
46 #define DIST_PARAM_IOVA_SIZE 256
47 
48 /* Enable TX Congestion control support
49  * default is disable
50  */
51 #define DPAA2_TX_CGR_OFF	0x01
52 
53 /* Disable RX tail drop, default is enable */
54 #define DPAA2_RX_TAILDROP_OFF	0x04
55 
56 #define DPAA2_RSS_OFFLOAD_ALL ( \
57 	ETH_RSS_IP | \
58 	ETH_RSS_UDP | \
59 	ETH_RSS_TCP | \
60 	ETH_RSS_SCTP)
61 
62 /* LX2 FRC Parsed values (Little Endian) */
63 #define DPAA2_PKT_TYPE_ETHER		0x0060
64 #define DPAA2_PKT_TYPE_IPV4		0x0000
65 #define DPAA2_PKT_TYPE_IPV6		0x0020
66 #define DPAA2_PKT_TYPE_IPV4_EXT \
67 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
68 #define DPAA2_PKT_TYPE_IPV6_EXT \
69 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
70 #define DPAA2_PKT_TYPE_IPV4_TCP \
71 			(0x000e | DPAA2_PKT_TYPE_IPV4)
72 #define DPAA2_PKT_TYPE_IPV6_TCP \
73 			(0x000e | DPAA2_PKT_TYPE_IPV6)
74 #define DPAA2_PKT_TYPE_IPV4_UDP \
75 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
76 #define DPAA2_PKT_TYPE_IPV6_UDP \
77 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
78 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
79 			(0x000f | DPAA2_PKT_TYPE_IPV4)
80 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
81 			(0x000f | DPAA2_PKT_TYPE_IPV6)
82 #define DPAA2_PKT_TYPE_IPV4_ICMP \
83 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
84 #define DPAA2_PKT_TYPE_IPV6_ICMP \
85 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
86 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
87 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
88 
89 /* enable timestamp in mbuf*/
90 extern enum pmd_dpaa2_ts dpaa2_enable_ts;
91 
92 #define DPAA2_QOS_TABLE_RECONFIGURE	1
93 #define DPAA2_FS_TABLE_RECONFIGURE	2
94 
95 /*Externaly defined*/
96 extern const struct rte_flow_ops dpaa2_flow_ops;
97 extern enum rte_filter_type dpaa2_filter_type;
98 
99 struct dpaa2_dev_priv {
100 	void *hw;
101 	int32_t hw_id;
102 	int32_t qdid;
103 	uint16_t token;
104 	uint8_t nb_tx_queues;
105 	uint8_t nb_rx_queues;
106 	uint32_t options;
107 	void *rx_vq[MAX_RX_QUEUES];
108 	void *tx_vq[MAX_TX_QUEUES];
109 
110 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
111 	uint8_t max_mac_filters;
112 	uint8_t max_vlan_filters;
113 	uint8_t num_rx_tc;
114 	uint8_t flags; /*dpaa2 config flags */
115 	uint8_t en_ordered;
116 	uint8_t en_loose_ordered;
117 
118 	struct pattern_s {
119 		uint8_t item_count;
120 		uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
121 	} pattern[MAX_TCS + 1];
122 
123 	struct extract_s {
124 		struct dpkg_profile_cfg qos_key_cfg;
125 		struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
126 		uint64_t qos_extract_param;
127 		uint64_t fs_extract_param[MAX_TCS];
128 	} extract;
129 	LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
130 };
131 
132 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
133 				      struct dpkg_profile_cfg *kg_cfg);
134 
135 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
136 			  uint64_t req_dist_set);
137 
138 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
139 			   uint8_t tc_index);
140 
141 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
142 
143 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
144 		int eth_rx_queue_id,
145 		uint16_t dpcon_id,
146 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
147 
148 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
149 		int eth_rx_queue_id);
150 
151 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
152 				uint16_t nb_pkts);
153 
154 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
155 			       uint16_t nb_pkts);
156 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
157 				      const struct qbman_fd *fd,
158 				      const struct qbman_result *dq,
159 				      struct dpaa2_queue *rxq,
160 				      struct rte_event *ev);
161 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
162 				    const struct qbman_fd *fd,
163 				    const struct qbman_result *dq,
164 				    struct dpaa2_queue *rxq,
165 				    struct rte_event *ev);
166 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
167 				     const struct qbman_fd *fd,
168 				     const struct qbman_result *dq,
169 				     struct dpaa2_queue *rxq,
170 				     struct rte_event *ev);
171 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
172 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
173 			      uint16_t nb_pkts);
174 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
175 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
176 void dpaa2_flow_clean(struct rte_eth_dev *dev);
177 
178 #endif /* _DPAA2_ETHDEV_H */
179