xref: /dpdk/drivers/net/intel/i40e/i40e_ethdev.c (revision b92babc246830ede6c33a2dfa1d6291076b1a81d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <assert.h>
14 
15 #include <rte_common.h>
16 #include <rte_eal.h>
17 #include <rte_string_fns.h>
18 #include <rte_pci.h>
19 #include <bus_pci_driver.h>
20 #include <rte_ether.h>
21 #include <ethdev_driver.h>
22 #include <ethdev_pci.h>
23 #include <rte_memzone.h>
24 #include <rte_malloc.h>
25 #include <rte_memcpy.h>
26 #include <rte_alarm.h>
27 #include <dev_driver.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30 #include <rte_bitmap.h>
31 #include <rte_os_shim.h>
32 
33 #include "i40e_logs.h"
34 #include "base/i40e_prototype.h"
35 #include "base/i40e_adminq_cmd.h"
36 #include "base/i40e_type.h"
37 #include "base/i40e_register.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
40 #include "i40e_rxtx.h"
41 #include "i40e_pf.h"
42 #include "i40e_regs.h"
43 #include "rte_pmd_i40e.h"
44 #include "i40e_hash.h"
45 
46 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
47 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
48 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
49 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
50 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
51 #define ETH_I40E_MBUF_CHECK_ARG       "mbuf_check"
52 
53 #define I40E_CLEAR_PXE_WAIT_MS     200
54 #define I40E_VSI_TSR_QINQ_STRIP		0x4010
55 #define I40E_VSI_TSR(_i)	(0x00050800 + ((_i) * 4))
56 
57 /* Maximun number of capability elements */
58 #define I40E_MAX_CAP_ELE_NUM       128
59 
60 /* Wait count and interval */
61 #define I40E_CHK_Q_ENA_COUNT       1000
62 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
63 
64 /* Maximun number of VSI */
65 #define I40E_MAX_NUM_VSIS          (384UL)
66 
67 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
68 
69 /* Flow control default timer */
70 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
71 
72 /* Flow control enable fwd bit */
73 #define I40E_PRTMAC_FWD_CTRL   0x00000001
74 
75 /* Receive Packet Buffer size */
76 #define I40E_RXPBSIZE (968 * 1024)
77 
78 /* Kilobytes shift */
79 #define I40E_KILOSHIFT 10
80 
81 /* Flow control default high water */
82 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
83 
84 /* Flow control default low water */
85 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
86 
87 /* Receive Average Packet Size in Byte*/
88 #define I40E_PACKET_AVERAGE_SIZE 128
89 
90 /* Mask of PF interrupt causes */
91 #define I40E_PFINT_ICR0_ENA_MASK ( \
92 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
93 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
94 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
95 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
96 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
97 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
98 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
99 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
100 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
101 
102 #define I40E_FLOW_TYPES ( \
103 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
104 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
105 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
106 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
107 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
108 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
109 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
110 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
111 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
112 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
113 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
114 
115 /* Additional timesync values. */
116 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
117 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
118 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
119 #define I40E_PRTTSYN_TSYNENA     0x80000000
120 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
121 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
122 
123 /**
124  * Below are values for writing un-exposed registers suggested
125  * by silicon experts
126  */
127 /* Destination MAC address */
128 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
129 /* Source MAC address */
130 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
131 /* Outer (S-Tag) VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
133 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
134 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
135 /* Single VLAN tag in the inner L2 header */
136 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
137 /* Source IPv4 address */
138 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
139 /* Destination IPv4 address */
140 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
141 /* Source IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
143 /* Destination IPv4 address for X722 */
144 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
145 /* IPv4 Protocol for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
147 /* IPv4 Time to Live for X722 */
148 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
149 /* IPv4 Type of Service (TOS) */
150 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
151 /* IPv4 Protocol */
152 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
153 /* IPv4 Time to Live */
154 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
155 /* Source IPv6 address */
156 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
157 /* Destination IPv6 address */
158 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
159 /* IPv6 Traffic Class (TC) */
160 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
161 /* IPv6 Next Header */
162 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
163 /* IPv6 Hop Limit */
164 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
165 /* Source L4 port */
166 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
167 /* Destination L4 port */
168 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
169 /* SCTP verification tag */
170 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
171 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
172 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
173 /* Source port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
175 /* Destination port of tunneling UDP */
176 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
177 /* UDP Tunneling ID, NVGRE/GRE key */
178 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
179 /* Last ether type */
180 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
181 /* Tunneling outer destination IPv4 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
183 /* Tunneling outer destination IPv6 address */
184 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
185 /* 1st word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
187 /* 2nd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
189 /* 3rd word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
191 /* 4th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
193 /* 5th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
195 /* 6th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
197 /* 7th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
199 /* 8th word of flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
201 /* all 8 words flex payload */
202 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
203 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
204 
205 #define I40E_TRANSLATE_INSET 0
206 #define I40E_TRANSLATE_REG   1
207 
208 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
209 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
210 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
211 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
212 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
213 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
214 
215 /* PCI offset for querying capability */
216 #define PCI_DEV_CAP_REG            0xA4
217 /* PCI offset for enabling/disabling Extended Tag */
218 #define PCI_DEV_CTRL_REG           0xA8
219 /* Bit mask of Extended Tag capability */
220 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
221 /* Bit shift of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
223 /* Bit mask of Extended Tag enable/disable */
224 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
225 
226 #define I40E_GLQF_PIT_IPV4_START	2
227 #define I40E_GLQF_PIT_IPV4_COUNT	2
228 #define I40E_GLQF_PIT_IPV6_START	4
229 #define I40E_GLQF_PIT_IPV6_COUNT	2
230 
231 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a)	\
232 				(((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
233 				 I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
234 
235 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
236 				(((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
237 				 I40E_GLQF_PIT_DEST_OFF_SHIFT)
238 
239 #define I40E_GLQF_PIT_FSIZE_GET(a)	(((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
240 					 I40E_GLQF_PIT_FSIZE_SHIFT)
241 
242 #define I40E_GLQF_PIT_BUILD(off, mask)	(((off) << 16) | (mask))
243 #define I40E_FDIR_FIELD_OFFSET(a)	((a) >> 1)
244 
245 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
246 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
247 static int i40e_dev_configure(struct rte_eth_dev *dev);
248 static int i40e_dev_start(struct rte_eth_dev *dev);
249 static int i40e_dev_stop(struct rte_eth_dev *dev);
250 static int i40e_dev_close(struct rte_eth_dev *dev);
251 static int  i40e_dev_reset(struct rte_eth_dev *dev);
252 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
255 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
256 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
257 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
258 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
259 			       struct rte_eth_stats *stats);
260 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
261 			       struct rte_eth_xstat *xstats, unsigned n);
262 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
263 				     struct rte_eth_xstat_name *xstats_names,
264 				     unsigned limit);
265 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
266 static int i40e_fw_version_get(struct rte_eth_dev *dev,
267 				char *fw_version, size_t fw_size);
268 static int i40e_dev_info_get(struct rte_eth_dev *dev,
269 			     struct rte_eth_dev_info *dev_info);
270 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
271 				uint16_t vlan_id,
272 				int on);
273 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
274 			      enum rte_vlan_type vlan_type,
275 			      uint16_t tpid);
276 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
277 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
278 				      uint16_t queue,
279 				      int on);
280 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
281 static int i40e_dev_led_on(struct rte_eth_dev *dev);
282 static int i40e_dev_led_off(struct rte_eth_dev *dev);
283 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
284 			      struct rte_eth_fc_conf *fc_conf);
285 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
286 			      struct rte_eth_fc_conf *fc_conf);
287 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
288 				       struct rte_eth_pfc_conf *pfc_conf);
289 static int i40e_macaddr_add(struct rte_eth_dev *dev,
290 			    struct rte_ether_addr *mac_addr,
291 			    uint32_t index,
292 			    uint32_t pool);
293 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
294 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
295 				    struct rte_eth_rss_reta_entry64 *reta_conf,
296 				    uint16_t reta_size);
297 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
298 				   struct rte_eth_rss_reta_entry64 *reta_conf,
299 				   uint16_t reta_size);
300 
301 static int i40e_get_cap(struct i40e_hw *hw);
302 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
303 static int i40e_pf_setup(struct i40e_pf *pf);
304 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
305 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
306 static int i40e_dcb_setup(struct rte_eth_dev *dev);
307 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
308 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
309 static void i40e_stat_update_48(struct i40e_hw *hw,
310 			       uint32_t hireg,
311 			       uint32_t loreg,
312 			       bool offset_loaded,
313 			       uint64_t *offset,
314 			       uint64_t *stat);
315 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
316 static void i40e_dev_interrupt_handler(void *param);
317 static void i40e_dev_alarm_handler(void *param);
318 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
319 				uint32_t base, uint32_t num);
320 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
321 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
322 			uint32_t base);
323 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
324 			uint16_t num);
325 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
326 static int i40e_veb_release(struct i40e_veb *veb);
327 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
328 						struct i40e_vsi *vsi);
329 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
330 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
331 					     struct i40e_macvlan_filter *mv_f,
332 					     int num,
333 					     uint16_t vlan);
334 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
335 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
336 				    struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
338 				      struct rte_eth_rss_conf *rss_conf);
339 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
340 					struct rte_eth_udp_tunnel *udp_tunnel);
341 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
342 					struct rte_eth_udp_tunnel *udp_tunnel);
343 static void i40e_filter_input_set_init(struct i40e_pf *pf);
344 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
345 				 const struct rte_flow_ops **ops);
346 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
347 				  struct rte_eth_dcb_info *dcb_info);
348 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
349 static void i40e_configure_registers(struct i40e_hw *hw);
350 static void i40e_hw_init(struct rte_eth_dev *dev);
351 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
352 
353 static int i40e_timesync_enable(struct rte_eth_dev *dev);
354 static int i40e_timesync_disable(struct rte_eth_dev *dev);
355 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
356 					   struct timespec *timestamp,
357 					   uint32_t flags);
358 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
359 					   struct timespec *timestamp);
360 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
361 
362 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
363 
364 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
365 				   struct timespec *timestamp);
366 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
367 				    const struct timespec *timestamp);
368 
369 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
370 					 uint16_t queue_id);
371 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
372 					  uint16_t queue_id);
373 
374 static int i40e_get_regs(struct rte_eth_dev *dev,
375 			 struct rte_dev_reg_info *regs);
376 
377 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
378 
379 static int i40e_get_eeprom(struct rte_eth_dev *dev,
380 			   struct rte_dev_eeprom_info *eeprom);
381 
382 static int i40e_get_module_info(struct rte_eth_dev *dev,
383 				struct rte_eth_dev_module_info *modinfo);
384 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
385 				  struct rte_dev_eeprom_info *info);
386 
387 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
388 				      struct rte_ether_addr *mac_addr);
389 
390 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
391 
392 static int i40e_ethertype_filter_convert(
393 	const struct rte_eth_ethertype_filter *input,
394 	struct i40e_ethertype_filter *filter);
395 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
396 				   struct i40e_ethertype_filter *filter);
397 
398 static int i40e_tunnel_filter_convert(
399 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
400 	struct i40e_tunnel_filter *tunnel_filter);
401 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
402 				struct i40e_tunnel_filter *tunnel_filter);
403 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
404 
405 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
406 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
407 static void i40e_filter_restore(struct i40e_pf *pf);
408 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
409 static int i40e_fec_get_capability(struct rte_eth_dev *dev,
410 	struct rte_eth_fec_capa *speed_fec_capa, unsigned int num);
411 static int i40e_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa);
412 static int i40e_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa);
413 
414 static const char *const valid_keys[] = {
415 	ETH_I40E_FLOATING_VEB_ARG,
416 	ETH_I40E_FLOATING_VEB_LIST_ARG,
417 	ETH_I40E_SUPPORT_MULTI_DRIVER,
418 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
419 	ETH_I40E_VF_MSG_CFG,
420 	ETH_I40E_MBUF_CHECK_ARG,
421 	NULL};
422 
423 static const struct rte_pci_id pci_id_i40e_map[] = {
424 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
425 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
426 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
427 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
428 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
429 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
430 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
431 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
432 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
433 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
434 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
435 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
436 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
437 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
438 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
439 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
440 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
441 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
442 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
443 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
444 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
445 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
446 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
447 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC) },
448 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
449 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
450 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722_A) },
451 	{ .vendor_id = 0, /* sentinel */ },
452 };
453 
454 static const struct eth_dev_ops i40e_eth_dev_ops = {
455 	.dev_configure                = i40e_dev_configure,
456 	.dev_start                    = i40e_dev_start,
457 	.dev_stop                     = i40e_dev_stop,
458 	.dev_close                    = i40e_dev_close,
459 	.dev_reset		      = i40e_dev_reset,
460 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
461 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
462 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
463 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
464 	.dev_set_link_up              = i40e_dev_set_link_up,
465 	.dev_set_link_down            = i40e_dev_set_link_down,
466 	.link_update                  = i40e_dev_link_update,
467 	.stats_get                    = i40e_dev_stats_get,
468 	.xstats_get                   = i40e_dev_xstats_get,
469 	.xstats_get_names             = i40e_dev_xstats_get_names,
470 	.stats_reset                  = i40e_dev_stats_reset,
471 	.xstats_reset                 = i40e_dev_stats_reset,
472 	.fw_version_get               = i40e_fw_version_get,
473 	.dev_infos_get                = i40e_dev_info_get,
474 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
475 	.vlan_filter_set              = i40e_vlan_filter_set,
476 	.vlan_tpid_set                = i40e_vlan_tpid_set,
477 	.vlan_offload_set             = i40e_vlan_offload_set,
478 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
479 	.vlan_pvid_set                = i40e_vlan_pvid_set,
480 	.rx_queue_start               = i40e_dev_rx_queue_start,
481 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
482 	.tx_queue_start               = i40e_dev_tx_queue_start,
483 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
484 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
485 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
486 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
487 	.rx_queue_release             = i40e_dev_rx_queue_release,
488 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
489 	.tx_queue_release             = i40e_dev_tx_queue_release,
490 	.dev_led_on                   = i40e_dev_led_on,
491 	.dev_led_off                  = i40e_dev_led_off,
492 	.flow_ctrl_get                = i40e_flow_ctrl_get,
493 	.flow_ctrl_set                = i40e_flow_ctrl_set,
494 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
495 	.mac_addr_add                 = i40e_macaddr_add,
496 	.mac_addr_remove              = i40e_macaddr_remove,
497 	.reta_update                  = i40e_dev_rss_reta_update,
498 	.reta_query                   = i40e_dev_rss_reta_query,
499 	.rss_hash_update              = i40e_dev_rss_hash_update,
500 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
501 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
502 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
503 	.flow_ops_get                 = i40e_dev_flow_ops_get,
504 	.rxq_info_get                 = i40e_rxq_info_get,
505 	.txq_info_get                 = i40e_txq_info_get,
506 	.recycle_rxq_info_get         = i40e_recycle_rxq_info_get,
507 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
508 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
509 	.timesync_enable              = i40e_timesync_enable,
510 	.timesync_disable             = i40e_timesync_disable,
511 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
512 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
513 	.get_dcb_info                 = i40e_dev_get_dcb_info,
514 	.timesync_adjust_time         = i40e_timesync_adjust_time,
515 	.timesync_read_time           = i40e_timesync_read_time,
516 	.timesync_write_time          = i40e_timesync_write_time,
517 	.get_reg                      = i40e_get_regs,
518 	.get_eeprom_length            = i40e_get_eeprom_length,
519 	.get_eeprom                   = i40e_get_eeprom,
520 	.get_module_info              = i40e_get_module_info,
521 	.get_module_eeprom            = i40e_get_module_eeprom,
522 	.mac_addr_set                 = i40e_set_default_mac_addr,
523 	.mtu_set                      = i40e_dev_mtu_set,
524 	.tm_ops_get                   = i40e_tm_ops_get,
525 	.tx_done_cleanup              = i40e_tx_done_cleanup,
526 	.get_monitor_addr             = i40e_get_monitor_addr,
527 	.fec_get_capability           = i40e_fec_get_capability,
528 	.fec_get                      = i40e_fec_get,
529 	.fec_set                      = i40e_fec_set,
530 };
531 
532 /* store statistics names and its offset in stats structure */
533 struct rte_i40e_xstats_name_off {
534 	char name[RTE_ETH_XSTATS_NAME_SIZE];
535 	int offset;
536 };
537 
538 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
539 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
540 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
541 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
542 	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
543 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
544 		rx_unknown_protocol)},
545 	/*
546 	 * all other offsets are against i40e_eth_stats which is first member
547 	 * in i40e_hw_port_stats, so these offsets are interchangeable
548 	 */
549 	{"rx_size_error_packets", offsetof(struct i40e_hw_port_stats, rx_err1)},
550 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
551 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
552 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
553 	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
554 };
555 
556 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
557 		sizeof(rte_i40e_stats_strings[0]))
558 
559 static const struct rte_i40e_xstats_name_off i40e_mbuf_strings[] = {
560 	{"tx_mbuf_error_packets", offsetof(struct i40e_mbuf_stats, tx_pkt_errors)},
561 };
562 
563 #define I40E_NB_MBUF_XSTATS (sizeof(i40e_mbuf_strings) / sizeof(i40e_mbuf_strings[0]))
564 
565 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
566 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
567 		tx_dropped_link_down)},
568 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
569 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
570 		illegal_bytes)},
571 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
572 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
573 		mac_local_faults)},
574 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
575 		mac_remote_faults)},
576 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
577 		rx_length_errors)},
578 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
579 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
580 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
581 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
582 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
583 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
584 		rx_size_127)},
585 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
586 		rx_size_255)},
587 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
588 		rx_size_511)},
589 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
590 		rx_size_1023)},
591 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
592 		rx_size_1522)},
593 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
594 		rx_size_big)},
595 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
596 		rx_undersize)},
597 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
598 		rx_oversize)},
599 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
600 		mac_short_packet_dropped)},
601 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
602 		rx_fragments)},
603 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
604 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
605 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
606 		tx_size_127)},
607 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
608 		tx_size_255)},
609 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
610 		tx_size_511)},
611 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
612 		tx_size_1023)},
613 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
614 		tx_size_1522)},
615 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
616 		tx_size_big)},
617 	{"rx_flow_director_atr_match_packets",
618 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
619 	{"rx_flow_director_sb_match_packets",
620 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
621 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
622 		tx_lpi_status)},
623 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
624 		rx_lpi_status)},
625 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
626 		tx_lpi_count)},
627 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
628 		rx_lpi_count)},
629 };
630 
631 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
632 		sizeof(rte_i40e_hw_port_strings[0]))
633 
634 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
635 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
636 		priority_xon_rx)},
637 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
638 		priority_xoff_rx)},
639 };
640 
641 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
642 		sizeof(rte_i40e_rxq_prio_strings[0]))
643 
644 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
645 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
646 		priority_xon_tx)},
647 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
648 		priority_xoff_tx)},
649 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
650 		priority_xon_2_xoff)},
651 };
652 
653 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
654 		sizeof(rte_i40e_txq_prio_strings[0]))
655 
656 static int
657 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
658 	struct rte_pci_device *pci_dev)
659 {
660 	char name[RTE_ETH_NAME_MAX_LEN];
661 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
662 	int i, retval;
663 
664 	if (pci_dev->device.devargs) {
665 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
666 				&eth_da, 1);
667 		if (retval < 0)
668 			return retval;
669 	}
670 
671 	if (eth_da.nb_representor_ports > 0 &&
672 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
673 		PMD_DRV_LOG(ERR, "unsupported representor type: %s",
674 			    pci_dev->device.devargs->args);
675 		return -ENOTSUP;
676 	}
677 
678 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
679 		sizeof(struct i40e_adapter),
680 		eth_dev_pci_specific_init, pci_dev,
681 		eth_i40e_dev_init, NULL);
682 
683 	if (retval || eth_da.nb_representor_ports < 1)
684 		return retval;
685 
686 	/* probe VF representor ports */
687 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
688 		pci_dev->device.name);
689 
690 	if (pf_ethdev == NULL)
691 		return -ENODEV;
692 
693 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
694 		struct i40e_vf_representor representor = {
695 			.vf_id = eth_da.representor_ports[i],
696 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
697 				pf_ethdev->data->dev_private)->switch_domain_id,
698 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
699 				pf_ethdev->data->dev_private)
700 		};
701 
702 		/* representor port net_bdf_port */
703 		snprintf(name, sizeof(name), "net_%s_representor_%d",
704 			pci_dev->device.name, eth_da.representor_ports[i]);
705 
706 		retval = rte_eth_dev_create(&pci_dev->device, name,
707 			sizeof(struct i40e_vf_representor), NULL, NULL,
708 			i40e_vf_representor_init, &representor);
709 
710 		if (retval)
711 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
712 				"representor %s.", name);
713 	}
714 
715 	return 0;
716 }
717 
718 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
719 {
720 	struct rte_eth_dev *ethdev;
721 
722 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
723 	if (!ethdev)
724 		return 0;
725 
726 	if (rte_eth_dev_is_repr(ethdev))
727 		return rte_eth_dev_pci_generic_remove(pci_dev,
728 					i40e_vf_representor_uninit);
729 	else
730 		return rte_eth_dev_pci_generic_remove(pci_dev,
731 						eth_i40e_dev_uninit);
732 }
733 
734 static struct rte_pci_driver rte_i40e_pmd = {
735 	.id_table = pci_id_i40e_map,
736 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
737 	.probe = eth_i40e_pci_probe,
738 	.remove = eth_i40e_pci_remove,
739 };
740 
741 static inline void
742 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
743 			 uint32_t reg_val)
744 {
745 	uint32_t ori_reg_val;
746 	struct rte_eth_dev_data *dev_data =
747 		((struct i40e_adapter *)hw->back)->pf.dev_data;
748 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
749 
750 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
751 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
752 	if (ori_reg_val != reg_val)
753 		PMD_DRV_LOG(WARNING,
754 			    "i40e device %s changed global register [0x%08x]."
755 			    " original: 0x%08x, new: 0x%08x",
756 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
757 }
758 
759 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
760 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
761 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
762 
763 #ifndef I40E_GLQF_ORT
764 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
765 #endif
766 #ifndef I40E_GLQF_PIT
767 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
768 #endif
769 #ifndef I40E_GLQF_L3_MAP
770 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
771 #endif
772 
773 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
774 {
775 	/*
776 	 * Initialize registers for parsing packet type of QinQ
777 	 * This should be removed from code once proper
778 	 * configuration API is added to avoid configuration conflicts
779 	 * between ports of the same device.
780 	 */
781 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
782 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
783 }
784 
785 static inline void i40e_config_automask(struct i40e_pf *pf)
786 {
787 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
788 	uint32_t val;
789 
790 	/* INTENA flag is not auto-cleared for interrupt */
791 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
792 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
793 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
794 
795 	/* If support multi-driver, PF will use INT0. */
796 	if (!pf->support_multi_driver)
797 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
798 
799 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
800 }
801 
802 static inline void i40e_clear_automask(struct i40e_pf *pf)
803 {
804 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
805 	uint32_t val;
806 
807 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
808 	val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
809 		 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
810 
811 	if (!pf->support_multi_driver)
812 		val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
813 
814 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
815 }
816 
817 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
818 
819 /*
820  * Add a ethertype filter to drop all flow control frames transmitted
821  * from VSIs.
822 */
823 static void
824 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
825 {
826 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
827 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
828 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
829 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
830 	int ret;
831 
832 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
833 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
834 				pf->main_vsi_seid, 0,
835 				TRUE, NULL, NULL);
836 	if (ret)
837 		PMD_INIT_LOG(ERR,
838 			"Failed to add filter to drop flow control frames from VSIs.");
839 }
840 
841 static int
842 floating_veb_list_handler(__rte_unused const char *key,
843 			  const char *floating_veb_value,
844 			  void *opaque)
845 {
846 	int idx = 0;
847 	unsigned int count = 0;
848 	char *end = NULL;
849 	int min, max;
850 	bool *vf_floating_veb = opaque;
851 
852 	while (isblank(*floating_veb_value))
853 		floating_veb_value++;
854 
855 	/* Reset floating VEB configuration for VFs */
856 	for (idx = 0; idx < I40E_MAX_VF; idx++)
857 		vf_floating_veb[idx] = false;
858 
859 	min = I40E_MAX_VF;
860 	do {
861 		while (isblank(*floating_veb_value))
862 			floating_veb_value++;
863 		if (*floating_veb_value == '\0')
864 			return -1;
865 		errno = 0;
866 		idx = strtoul(floating_veb_value, &end, 10);
867 		if (errno || end == NULL)
868 			return -1;
869 		if (idx < 0)
870 			return -1;
871 		while (isblank(*end))
872 			end++;
873 		if (*end == '-') {
874 			min = idx;
875 		} else if ((*end == ';') || (*end == '\0')) {
876 			max = idx;
877 			if (min == I40E_MAX_VF)
878 				min = idx;
879 			if (max >= I40E_MAX_VF)
880 				max = I40E_MAX_VF - 1;
881 			for (idx = min; idx <= max; idx++) {
882 				vf_floating_veb[idx] = true;
883 				count++;
884 			}
885 			min = I40E_MAX_VF;
886 		} else {
887 			return -1;
888 		}
889 		floating_veb_value = end + 1;
890 	} while (*end != '\0');
891 
892 	if (count == 0)
893 		return -1;
894 
895 	return 0;
896 }
897 
898 static void
899 config_vf_floating_veb(struct rte_devargs *devargs,
900 		       uint16_t floating_veb,
901 		       bool *vf_floating_veb)
902 {
903 	struct rte_kvargs *kvlist;
904 	int i;
905 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
906 
907 	if (!floating_veb)
908 		return;
909 	/* All the VFs attach to the floating VEB by default
910 	 * when the floating VEB is enabled.
911 	 */
912 	for (i = 0; i < I40E_MAX_VF; i++)
913 		vf_floating_veb[i] = true;
914 
915 	if (devargs == NULL)
916 		return;
917 
918 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
919 	if (kvlist == NULL)
920 		return;
921 
922 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
923 		rte_kvargs_free(kvlist);
924 		return;
925 	}
926 	/* When the floating_veb_list parameter exists, all the VFs
927 	 * will attach to the legacy VEB firstly, then configure VFs
928 	 * to the floating VEB according to the floating_veb_list.
929 	 */
930 	if (rte_kvargs_process(kvlist, floating_veb_list,
931 			       floating_veb_list_handler,
932 			       vf_floating_veb) < 0) {
933 		rte_kvargs_free(kvlist);
934 		return;
935 	}
936 	rte_kvargs_free(kvlist);
937 }
938 
939 static int
940 i40e_check_floating_handler(__rte_unused const char *key,
941 			    const char *value,
942 			    __rte_unused void *opaque)
943 {
944 	if (strcmp(value, "1"))
945 		return -1;
946 
947 	return 0;
948 }
949 
950 static int
951 is_floating_veb_supported(struct rte_devargs *devargs)
952 {
953 	struct rte_kvargs *kvlist;
954 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
955 
956 	if (devargs == NULL)
957 		return 0;
958 
959 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
960 	if (kvlist == NULL)
961 		return 0;
962 
963 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
964 		rte_kvargs_free(kvlist);
965 		return 0;
966 	}
967 	/* Floating VEB is enabled when there's key-value:
968 	 * enable_floating_veb=1
969 	 */
970 	if (rte_kvargs_process(kvlist, floating_veb_key,
971 			       i40e_check_floating_handler, NULL) < 0) {
972 		rte_kvargs_free(kvlist);
973 		return 0;
974 	}
975 	rte_kvargs_free(kvlist);
976 
977 	return 1;
978 }
979 
980 static void
981 config_floating_veb(struct rte_eth_dev *dev)
982 {
983 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
984 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
985 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 
987 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
988 
989 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
990 		pf->floating_veb =
991 			is_floating_veb_supported(pci_dev->device.devargs);
992 		config_vf_floating_veb(pci_dev->device.devargs,
993 				       pf->floating_veb,
994 				       pf->floating_veb_list);
995 	} else {
996 		pf->floating_veb = false;
997 	}
998 }
999 
1000 #define I40E_L2_TAGS_S_TAG_SHIFT 1
1001 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
1002 
1003 static int
1004 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
1005 {
1006 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1007 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1008 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
1009 	int ret;
1010 
1011 	struct rte_hash_parameters ethertype_hash_params = {
1012 		.name = ethertype_hash_name,
1013 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
1014 		.key_len = sizeof(struct i40e_ethertype_filter_input),
1015 		.hash_func = rte_hash_crc,
1016 		.hash_func_init_val = 0,
1017 		.socket_id = rte_socket_id(),
1018 	};
1019 
1020 	/* Initialize ethertype filter rule list and hash */
1021 	TAILQ_INIT(&ethertype_rule->ethertype_list);
1022 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1023 		 "ethertype_%s", dev->device->name);
1024 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1025 	if (!ethertype_rule->hash_table) {
1026 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1027 		return -EINVAL;
1028 	}
1029 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1030 				       sizeof(struct i40e_ethertype_filter *) *
1031 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
1032 				       0);
1033 	if (!ethertype_rule->hash_map) {
1034 		PMD_INIT_LOG(ERR,
1035 			     "Failed to allocate memory for ethertype hash map!");
1036 		ret = -ENOMEM;
1037 		goto err_ethertype_hash_map_alloc;
1038 	}
1039 
1040 	return 0;
1041 
1042 err_ethertype_hash_map_alloc:
1043 	rte_hash_free(ethertype_rule->hash_table);
1044 
1045 	return ret;
1046 }
1047 
1048 static int
1049 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1050 {
1051 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1052 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1053 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
1054 	int ret;
1055 
1056 	struct rte_hash_parameters tunnel_hash_params = {
1057 		.name = tunnel_hash_name,
1058 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
1059 		.key_len = sizeof(struct i40e_tunnel_filter_input),
1060 		.hash_func = rte_hash_crc,
1061 		.hash_func_init_val = 0,
1062 		.socket_id = rte_socket_id(),
1063 	};
1064 
1065 	/* Initialize tunnel filter rule list and hash */
1066 	TAILQ_INIT(&tunnel_rule->tunnel_list);
1067 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1068 		 "tunnel_%s", dev->device->name);
1069 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1070 	if (!tunnel_rule->hash_table) {
1071 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1072 		return -EINVAL;
1073 	}
1074 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1075 				    sizeof(struct i40e_tunnel_filter *) *
1076 				    I40E_MAX_TUNNEL_FILTER_NUM,
1077 				    0);
1078 	if (!tunnel_rule->hash_map) {
1079 		PMD_INIT_LOG(ERR,
1080 			     "Failed to allocate memory for tunnel hash map!");
1081 		ret = -ENOMEM;
1082 		goto err_tunnel_hash_map_alloc;
1083 	}
1084 
1085 	return 0;
1086 
1087 err_tunnel_hash_map_alloc:
1088 	rte_hash_free(tunnel_rule->hash_table);
1089 
1090 	return ret;
1091 }
1092 
1093 static int
1094 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1095 {
1096 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1097 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1098 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1099 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1100 	uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1101 	uint32_t best = hw->func_caps.fd_filters_best_effort;
1102 	enum i40e_filter_pctype pctype;
1103 	struct rte_bitmap *bmp = NULL;
1104 	uint32_t bmp_size;
1105 	void *mem = NULL;
1106 	uint32_t i = 0;
1107 	int ret;
1108 
1109 	struct rte_hash_parameters fdir_hash_params = {
1110 		.name = fdir_hash_name,
1111 		.entries = I40E_MAX_FDIR_FILTER_NUM,
1112 		.key_len = sizeof(struct i40e_fdir_input),
1113 		.hash_func = rte_hash_crc,
1114 		.hash_func_init_val = 0,
1115 		.socket_id = rte_socket_id(),
1116 	};
1117 
1118 	/* Initialize flow director filter rule list and hash */
1119 	TAILQ_INIT(&fdir_info->fdir_list);
1120 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1121 		 "fdir_%s", dev->device->name);
1122 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1123 	if (!fdir_info->hash_table) {
1124 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1125 		return -EINVAL;
1126 	}
1127 
1128 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1129 					  sizeof(struct i40e_fdir_filter *) *
1130 					  I40E_MAX_FDIR_FILTER_NUM,
1131 					  0);
1132 	if (!fdir_info->hash_map) {
1133 		PMD_INIT_LOG(ERR,
1134 			     "Failed to allocate memory for fdir hash map!");
1135 		ret = -ENOMEM;
1136 		goto err_fdir_hash_map_alloc;
1137 	}
1138 
1139 	fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1140 			sizeof(struct i40e_fdir_filter) *
1141 			I40E_MAX_FDIR_FILTER_NUM,
1142 			0);
1143 
1144 	if (!fdir_info->fdir_filter_array) {
1145 		PMD_INIT_LOG(ERR,
1146 			     "Failed to allocate memory for fdir filter array!");
1147 		ret = -ENOMEM;
1148 		goto err_fdir_filter_array_alloc;
1149 	}
1150 
1151 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1152 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1153 		pf->fdir.flow_count[pctype] = 0;
1154 
1155 	fdir_info->fdir_space_size = alloc + best;
1156 	fdir_info->fdir_actual_cnt = 0;
1157 	fdir_info->fdir_guarantee_total_space = alloc;
1158 	fdir_info->fdir_guarantee_free_space =
1159 		fdir_info->fdir_guarantee_total_space;
1160 
1161 	PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1162 
1163 	fdir_info->fdir_flow_pool.pool =
1164 			rte_zmalloc("i40e_fdir_entry",
1165 				sizeof(struct i40e_fdir_entry) *
1166 				fdir_info->fdir_space_size,
1167 				0);
1168 
1169 	if (!fdir_info->fdir_flow_pool.pool) {
1170 		PMD_INIT_LOG(ERR,
1171 			     "Failed to allocate memory for bitmap flow!");
1172 		ret = -ENOMEM;
1173 		goto err_fdir_bitmap_flow_alloc;
1174 	}
1175 
1176 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1177 		fdir_info->fdir_flow_pool.pool[i].idx = i;
1178 
1179 	bmp_size =
1180 		rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1181 	mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1182 	if (mem == NULL) {
1183 		PMD_INIT_LOG(ERR,
1184 			     "Failed to allocate memory for fdir bitmap!");
1185 		ret = -ENOMEM;
1186 		goto err_fdir_mem_alloc;
1187 	}
1188 	bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1189 	if (bmp == NULL) {
1190 		PMD_INIT_LOG(ERR,
1191 			     "Failed to initialization fdir bitmap!");
1192 		ret = -ENOMEM;
1193 		goto err_fdir_bmp_alloc;
1194 	}
1195 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1196 		rte_bitmap_set(bmp, i);
1197 
1198 	fdir_info->fdir_flow_pool.bitmap = bmp;
1199 
1200 	return 0;
1201 
1202 err_fdir_bmp_alloc:
1203 	rte_free(mem);
1204 err_fdir_mem_alloc:
1205 	rte_free(fdir_info->fdir_flow_pool.pool);
1206 err_fdir_bitmap_flow_alloc:
1207 	rte_free(fdir_info->fdir_filter_array);
1208 err_fdir_filter_array_alloc:
1209 	rte_free(fdir_info->hash_map);
1210 err_fdir_hash_map_alloc:
1211 	rte_hash_free(fdir_info->hash_table);
1212 
1213 	return ret;
1214 }
1215 
1216 static void
1217 i40e_init_customized_info(struct i40e_pf *pf)
1218 {
1219 	int i;
1220 
1221 	/* Initialize customized pctype */
1222 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1223 		pf->customized_pctype[i].index = i;
1224 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1225 		pf->customized_pctype[i].valid = false;
1226 	}
1227 
1228 	pf->gtp_support = false;
1229 	pf->esp_support = false;
1230 }
1231 
1232 static void
1233 i40e_init_filter_invalidation(struct i40e_pf *pf)
1234 {
1235 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1236 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1237 	uint32_t glqf_ctl_reg = 0;
1238 
1239 	glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1240 	if (!pf->support_multi_driver) {
1241 		fdir_info->fdir_invalprio = 1;
1242 		glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1243 		PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1244 		i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1245 	} else {
1246 		if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1247 			fdir_info->fdir_invalprio = 1;
1248 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1249 		} else {
1250 			fdir_info->fdir_invalprio = 0;
1251 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1252 		}
1253 	}
1254 }
1255 
1256 void
1257 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1258 {
1259 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1261 	struct i40e_queue_regions *info = &pf->queue_region;
1262 	uint16_t i;
1263 
1264 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1265 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1266 
1267 	memset(info, 0, sizeof(struct i40e_queue_regions));
1268 }
1269 
1270 static int
1271 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1272 			       const char *value,
1273 			       void *opaque)
1274 {
1275 	struct i40e_pf *pf;
1276 	unsigned long support_multi_driver;
1277 	char *end;
1278 
1279 	pf = (struct i40e_pf *)opaque;
1280 
1281 	errno = 0;
1282 	support_multi_driver = strtoul(value, &end, 10);
1283 	if (errno != 0 || end == value || *end != 0) {
1284 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
1285 		return -(EINVAL);
1286 	}
1287 
1288 	if (support_multi_driver == 1 || support_multi_driver == 0)
1289 		pf->support_multi_driver = (bool)support_multi_driver;
1290 	else
1291 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1292 			    "enable global configuration by default."
1293 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1294 	return 0;
1295 }
1296 
1297 static int
1298 i40e_support_multi_driver(struct rte_eth_dev *dev)
1299 {
1300 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1301 	struct rte_kvargs *kvlist;
1302 	int kvargs_count;
1303 
1304 	/* Enable global configuration by default */
1305 	pf->support_multi_driver = false;
1306 
1307 	if (!dev->device->devargs)
1308 		return 0;
1309 
1310 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1311 	if (!kvlist)
1312 		return -EINVAL;
1313 
1314 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1315 	if (!kvargs_count) {
1316 		rte_kvargs_free(kvlist);
1317 		return 0;
1318 	}
1319 
1320 	if (kvargs_count > 1)
1321 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1322 			    "the first invalid or last valid one is used !",
1323 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1324 
1325 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1326 			       i40e_parse_multi_drv_handler, pf) < 0) {
1327 		rte_kvargs_free(kvlist);
1328 		return -EINVAL;
1329 	}
1330 
1331 	rte_kvargs_free(kvlist);
1332 	return 0;
1333 }
1334 
1335 static int
1336 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1337 				    uint32_t reg_addr, uint64_t reg_val,
1338 				    struct i40e_asq_cmd_details *cmd_details)
1339 {
1340 	uint64_t ori_reg_val;
1341 	struct rte_eth_dev_data *dev_data =
1342 		((struct i40e_adapter *)hw->back)->pf.dev_data;
1343 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1344 	int ret;
1345 
1346 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1347 	if (ret != I40E_SUCCESS) {
1348 		PMD_DRV_LOG(ERR,
1349 			    "Fail to debug read from 0x%08x",
1350 			    reg_addr);
1351 		return -EIO;
1352 	}
1353 
1354 	if (ori_reg_val != reg_val)
1355 		PMD_DRV_LOG(WARNING,
1356 			    "i40e device %s changed global register [0x%08x]."
1357 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1358 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1359 
1360 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1361 }
1362 
1363 static int
1364 read_vf_msg_config(__rte_unused const char *key,
1365 			       const char *value,
1366 			       void *opaque)
1367 {
1368 	struct i40e_vf_msg_cfg *cfg = opaque;
1369 
1370 	if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1371 			&cfg->ignore_second) != 3) {
1372 		memset(cfg, 0, sizeof(*cfg));
1373 		PMD_DRV_LOG(ERR, "format error! example: "
1374 				"%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1375 		return -EINVAL;
1376 	}
1377 
1378 	/*
1379 	 * If the message validation function been enabled, the 'period'
1380 	 * and 'ignore_second' must greater than 0.
1381 	 */
1382 	if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1383 		memset(cfg, 0, sizeof(*cfg));
1384 		PMD_DRV_LOG(ERR, "%s error! the second and third"
1385 				" number must be greater than 0!",
1386 				ETH_I40E_VF_MSG_CFG);
1387 		return -EINVAL;
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 static int
1394 read_mbuf_check_config(__rte_unused const char *key, const char *value, void *args)
1395 {
1396 	char *cur;
1397 	char *tmp;
1398 	int str_len;
1399 	int valid_len;
1400 
1401 	int ret = 0;
1402 	uint64_t *mc_flags = args;
1403 	char *str2 = strdup(value);
1404 	if (str2 == NULL)
1405 		return -1;
1406 
1407 	str_len = strlen(str2);
1408 	if (str_len == 0) {
1409 		ret = -1;
1410 		goto err_end;
1411 	}
1412 
1413 	/* Try stripping the outer square brackets of the parameter string. */
1414 	str_len = strlen(str2);
1415 	if (str2[0] == '[' && str2[str_len - 1] == ']') {
1416 		if (str_len < 3) {
1417 			ret = -1;
1418 			goto err_end;
1419 		}
1420 		valid_len = str_len - 2;
1421 		memmove(str2, str2 + 1, valid_len);
1422 		memset(str2 + valid_len, '\0', 2);
1423 	}
1424 
1425 	cur = strtok_r(str2, ",", &tmp);
1426 	while (cur != NULL) {
1427 		if (!strcmp(cur, "mbuf"))
1428 			*mc_flags |= I40E_MBUF_CHECK_F_TX_MBUF;
1429 		else if (!strcmp(cur, "size"))
1430 			*mc_flags |= I40E_MBUF_CHECK_F_TX_SIZE;
1431 		else if (!strcmp(cur, "segment"))
1432 			*mc_flags |= I40E_MBUF_CHECK_F_TX_SEGMENT;
1433 		else if (!strcmp(cur, "offload"))
1434 			*mc_flags |= I40E_MBUF_CHECK_F_TX_OFFLOAD;
1435 		else
1436 			PMD_DRV_LOG(ERR, "Unsupported diagnostic type: %s", cur);
1437 		cur = strtok_r(NULL, ",", &tmp);
1438 	}
1439 
1440 err_end:
1441 	free(str2);
1442 	return ret;
1443 }
1444 
1445 static int
1446 i40e_parse_mbuf_check(struct rte_eth_dev *dev)
1447 {
1448 	struct i40e_adapter *ad =
1449 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1450 	struct rte_kvargs *kvlist;
1451 	int kvargs_count;
1452 	int ret = 0;
1453 
1454 	if (!dev->device->devargs)
1455 		return ret;
1456 
1457 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1458 	if (!kvlist)
1459 		return -EINVAL;
1460 
1461 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_MBUF_CHECK_ARG);
1462 	if (!kvargs_count)
1463 		goto free_end;
1464 
1465 	if (kvargs_count > 1) {
1466 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1467 				ETH_I40E_MBUF_CHECK_ARG);
1468 		ret = -EINVAL;
1469 		goto free_end;
1470 	}
1471 
1472 	if (rte_kvargs_process(kvlist, ETH_I40E_MBUF_CHECK_ARG,
1473 			read_mbuf_check_config, &ad->mbuf_check) < 0)
1474 		ret = -EINVAL;
1475 
1476 free_end:
1477 	rte_kvargs_free(kvlist);
1478 	return ret;
1479 }
1480 
1481 static int
1482 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1483 		struct i40e_vf_msg_cfg *msg_cfg)
1484 {
1485 	struct rte_kvargs *kvlist;
1486 	int kvargs_count;
1487 	int ret = 0;
1488 
1489 	memset(msg_cfg, 0, sizeof(*msg_cfg));
1490 
1491 	if (!dev->device->devargs)
1492 		return ret;
1493 
1494 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1495 	if (!kvlist)
1496 		return -EINVAL;
1497 
1498 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1499 	if (!kvargs_count)
1500 		goto free_end;
1501 
1502 	if (kvargs_count > 1) {
1503 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1504 				ETH_I40E_VF_MSG_CFG);
1505 		ret = -EINVAL;
1506 		goto free_end;
1507 	}
1508 
1509 	if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1510 			read_vf_msg_config, msg_cfg) < 0)
1511 		ret = -EINVAL;
1512 
1513 free_end:
1514 	rte_kvargs_free(kvlist);
1515 	return ret;
1516 }
1517 
1518 #define I40E_ALARM_INTERVAL 50000 /* us */
1519 
1520 static int
1521 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1522 {
1523 	struct rte_pci_device *pci_dev;
1524 	struct rte_intr_handle *intr_handle;
1525 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1526 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527 	struct i40e_vsi *vsi;
1528 	int ret;
1529 	uint32_t len, val;
1530 	uint8_t aq_fail = 0;
1531 
1532 	PMD_INIT_FUNC_TRACE();
1533 
1534 	dev->dev_ops = &i40e_eth_dev_ops;
1535 	dev->rx_queue_count = i40e_dev_rx_queue_count;
1536 	dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1537 	dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1538 	dev->rx_pkt_burst = i40e_recv_pkts;
1539 	dev->tx_pkt_burst = i40e_xmit_pkts;
1540 	dev->tx_pkt_prepare = i40e_prep_pkts;
1541 
1542 	/* for secondary processes, we don't initialise any further as primary
1543 	 * has already done this work. Only check we don't need a different
1544 	 * RX function */
1545 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1546 		i40e_set_rx_function(dev);
1547 		i40e_set_tx_function(dev);
1548 		return 0;
1549 	}
1550 	i40e_set_default_ptype_table(dev);
1551 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1552 	intr_handle = pci_dev->intr_handle;
1553 
1554 	rte_eth_copy_pci_info(dev, pci_dev);
1555 
1556 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1557 	pf->dev_data = dev->data;
1558 
1559 	hw->back = I40E_PF_TO_ADAPTER(pf);
1560 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1561 	if (!hw->hw_addr) {
1562 		PMD_INIT_LOG(ERR,
1563 			"Hardware is not available, as address is NULL");
1564 		return -ENODEV;
1565 	}
1566 
1567 	hw->vendor_id = pci_dev->id.vendor_id;
1568 	hw->device_id = pci_dev->id.device_id;
1569 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1570 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1571 	hw->bus.device = pci_dev->addr.devid;
1572 	hw->bus.func = pci_dev->addr.function;
1573 	hw->adapter_stopped = 0;
1574 	hw->adapter_closed = 0;
1575 
1576 	/* Init switch device pointer */
1577 	hw->switch_dev = NULL;
1578 
1579 	/*
1580 	 * Switch Tag value should not be identical to either the First Tag
1581 	 * or Second Tag values. So set something other than common Ethertype
1582 	 * for internal switching.
1583 	 */
1584 	hw->switch_tag = 0xffff;
1585 
1586 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1587 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1588 		PMD_INIT_LOG(ERR, "ERROR: Firmware recovery mode detected. Limiting functionality.");
1589 		return -EIO;
1590 	}
1591 
1592 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1593 	i40e_parse_mbuf_check(dev);
1594 	/* Check if need to support multi-driver */
1595 	i40e_support_multi_driver(dev);
1596 
1597 	/* Make sure all is clean before doing PF reset */
1598 	i40e_clear_hw(hw);
1599 
1600 	/* Reset here to make sure all is clean for each PF */
1601 	ret = i40e_pf_reset(hw);
1602 	if (ret) {
1603 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1604 		return ret;
1605 	}
1606 
1607 	/* Initialize the shared code (base driver) */
1608 	ret = i40e_init_shared_code(hw);
1609 	if (ret) {
1610 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1611 		return ret;
1612 	}
1613 
1614 	/* Initialize the parameters for adminq */
1615 	i40e_init_adminq_parameter(hw);
1616 	ret = i40e_init_adminq(hw);
1617 	if (ret != I40E_SUCCESS) {
1618 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1619 		return -EIO;
1620 	}
1621 	/* Firmware of SFP x722 does not support 802.1ad frames ability */
1622 	if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1623 		hw->device_id == I40E_DEV_ID_SFP_I_X722)
1624 		hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1625 
1626 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1627 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1628 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1629 		     ((hw->nvm.version >> 12) & 0xf),
1630 		     ((hw->nvm.version >> 4) & 0xff),
1631 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1632 
1633 	/* Initialize the hardware */
1634 	i40e_hw_init(dev);
1635 
1636 	i40e_config_automask(pf);
1637 
1638 	i40e_set_default_pctype_table(dev);
1639 
1640 	/*
1641 	 * To work around the NVM issue, initialize registers
1642 	 * for packet type of QinQ by software.
1643 	 * It should be removed once issues are fixed in NVM.
1644 	 */
1645 	if (!pf->support_multi_driver)
1646 		i40e_GLQF_reg_init(hw);
1647 
1648 	/* Initialize the input set for filters (hash and fd) to default value */
1649 	i40e_filter_input_set_init(pf);
1650 
1651 	/* initialise the L3_MAP register */
1652 	if (!pf->support_multi_driver) {
1653 		ret = i40e_aq_debug_write_global_register(hw,
1654 						   I40E_GLQF_L3_MAP(40),
1655 						   0x00000028,	NULL);
1656 		if (ret)
1657 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1658 				     ret);
1659 		PMD_INIT_LOG(DEBUG,
1660 			     "Global register 0x%08x is changed with 0x28",
1661 			     I40E_GLQF_L3_MAP(40));
1662 	}
1663 
1664 	/* Need the special FW version to support floating VEB */
1665 	config_floating_veb(dev);
1666 	/* Clear PXE mode */
1667 	i40e_clear_pxe_mode(hw);
1668 	i40e_dev_sync_phy_type(hw);
1669 
1670 	/*
1671 	 * On X710, performance number is far from the expectation on recent
1672 	 * firmware versions. The fix for this issue may not be integrated in
1673 	 * the following firmware version. So the workaround in software driver
1674 	 * is needed. It needs to modify the initial values of 3 internal only
1675 	 * registers. Note that the workaround can be removed when it is fixed
1676 	 * in firmware in the future.
1677 	 */
1678 	i40e_configure_registers(hw);
1679 
1680 	/* Get hw capabilities */
1681 	ret = i40e_get_cap(hw);
1682 	if (ret != I40E_SUCCESS) {
1683 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1684 		goto err_get_capabilities;
1685 	}
1686 
1687 	/* Initialize parameters for PF */
1688 	ret = i40e_pf_parameter_init(dev);
1689 	if (ret != 0) {
1690 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1691 		goto err_parameter_init;
1692 	}
1693 
1694 	/* Initialize the queue management */
1695 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1696 	if (ret < 0) {
1697 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1698 		goto err_qp_pool_init;
1699 	}
1700 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1701 				hw->func_caps.num_msix_vectors - 1);
1702 	if (ret < 0) {
1703 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1704 		goto err_msix_pool_init;
1705 	}
1706 
1707 	/* Initialize lan hmc */
1708 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1709 				hw->func_caps.num_rx_qp, 0, 0);
1710 	if (ret != I40E_SUCCESS) {
1711 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1712 		goto err_init_lan_hmc;
1713 	}
1714 
1715 	/* Configure lan hmc */
1716 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1717 	if (ret != I40E_SUCCESS) {
1718 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1719 		goto err_configure_lan_hmc;
1720 	}
1721 
1722 	/* Get and check the mac address */
1723 	i40e_get_mac_addr(hw, hw->mac.addr);
1724 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1725 		PMD_INIT_LOG(ERR, "mac address is not valid");
1726 		ret = -EIO;
1727 		goto err_get_mac_addr;
1728 	}
1729 	/* Copy the permanent MAC address */
1730 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1731 			(struct rte_ether_addr *)hw->mac.perm_addr);
1732 
1733 	/* Disable flow control */
1734 	hw->fc.requested_mode = I40E_FC_NONE;
1735 	i40e_set_fc(hw, &aq_fail, TRUE);
1736 
1737 	/* Set the global registers with default ether type value */
1738 	if (!pf->support_multi_driver) {
1739 		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
1740 					 RTE_ETHER_TYPE_VLAN);
1741 		if (ret != I40E_SUCCESS) {
1742 			PMD_INIT_LOG(ERR,
1743 				     "Failed to set the default outer "
1744 				     "VLAN ether type");
1745 			goto err_setup_pf_switch;
1746 		}
1747 	}
1748 
1749 	/* PF setup, which includes VSI setup */
1750 	ret = i40e_pf_setup(pf);
1751 	if (ret) {
1752 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1753 		goto err_setup_pf_switch;
1754 	}
1755 
1756 	vsi = pf->main_vsi;
1757 
1758 	/* Disable double vlan by default */
1759 	i40e_vsi_config_double_vlan(vsi, FALSE);
1760 
1761 	/* Disable S-TAG identification when floating_veb is disabled */
1762 	if (!pf->floating_veb) {
1763 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1764 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1765 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1766 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1767 		}
1768 	}
1769 
1770 	if (!vsi->max_macaddrs)
1771 		len = RTE_ETHER_ADDR_LEN;
1772 	else
1773 		len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1774 
1775 	/* Should be after VSI initialized */
1776 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1777 	if (!dev->data->mac_addrs) {
1778 		PMD_INIT_LOG(ERR,
1779 			"Failed to allocated memory for storing mac address");
1780 		goto err_mac_alloc;
1781 	}
1782 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1783 					&dev->data->mac_addrs[0]);
1784 
1785 	/* Init dcb to sw mode by default */
1786 	ret = i40e_dcb_init_configure(dev, TRUE);
1787 	if (ret != I40E_SUCCESS) {
1788 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1789 		pf->flags &= ~I40E_FLAG_DCB;
1790 	}
1791 	/* Update HW struct after DCB configuration */
1792 	i40e_get_cap(hw);
1793 
1794 	/* initialize pf host driver to setup SRIOV resource if applicable */
1795 	i40e_pf_host_init(dev);
1796 
1797 	/* register callback func to eal lib */
1798 	rte_intr_callback_register(intr_handle,
1799 				   i40e_dev_interrupt_handler, dev);
1800 
1801 	/* configure and enable device interrupt */
1802 	i40e_pf_config_irq0(hw, TRUE);
1803 	i40e_pf_enable_irq0(hw);
1804 
1805 	/* enable uio intr after callback register */
1806 	rte_intr_enable(intr_handle);
1807 
1808 	/* By default disable flexible payload in global configuration */
1809 	if (!pf->support_multi_driver)
1810 		i40e_flex_payload_reg_set_default(hw);
1811 
1812 	/*
1813 	 * Add an ethertype filter to drop all flow control frames transmitted
1814 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1815 	 * frames to wire.
1816 	 */
1817 	i40e_add_tx_flow_control_drop_filter(pf);
1818 
1819 	/* initialize RSS rule list */
1820 	TAILQ_INIT(&pf->rss_config_list);
1821 
1822 	/* initialize Traffic Manager configuration */
1823 	i40e_tm_conf_init(dev);
1824 
1825 	/* Initialize customized information */
1826 	i40e_init_customized_info(pf);
1827 
1828 	/* Initialize the filter invalidation configuration */
1829 	i40e_init_filter_invalidation(pf);
1830 
1831 	ret = i40e_init_ethtype_filter_list(dev);
1832 	if (ret < 0)
1833 		goto err_init_ethtype_filter_list;
1834 	ret = i40e_init_tunnel_filter_list(dev);
1835 	if (ret < 0)
1836 		goto err_init_tunnel_filter_list;
1837 	ret = i40e_init_fdir_filter_list(dev);
1838 	if (ret < 0)
1839 		goto err_init_fdir_filter_list;
1840 
1841 	/* initialize queue region configuration */
1842 	i40e_init_queue_region_conf(dev);
1843 
1844 	/* reset all stats of the device, including pf and main vsi */
1845 	i40e_dev_stats_reset(dev);
1846 
1847 	return 0;
1848 
1849 err_init_fdir_filter_list:
1850 	rte_hash_free(pf->tunnel.hash_table);
1851 	rte_free(pf->tunnel.hash_map);
1852 err_init_tunnel_filter_list:
1853 	rte_hash_free(pf->ethertype.hash_table);
1854 	rte_free(pf->ethertype.hash_map);
1855 err_init_ethtype_filter_list:
1856 	rte_intr_callback_unregister(intr_handle,
1857 		i40e_dev_interrupt_handler, dev);
1858 	rte_free(dev->data->mac_addrs);
1859 	dev->data->mac_addrs = NULL;
1860 err_mac_alloc:
1861 	i40e_vsi_release(pf->main_vsi);
1862 err_setup_pf_switch:
1863 err_get_mac_addr:
1864 err_configure_lan_hmc:
1865 	(void)i40e_shutdown_lan_hmc(hw);
1866 err_init_lan_hmc:
1867 	i40e_res_pool_destroy(&pf->msix_pool);
1868 err_msix_pool_init:
1869 	i40e_res_pool_destroy(&pf->qp_pool);
1870 err_qp_pool_init:
1871 err_parameter_init:
1872 err_get_capabilities:
1873 	(void)i40e_shutdown_adminq(hw);
1874 
1875 	return ret;
1876 }
1877 
1878 static void
1879 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1880 {
1881 	struct i40e_ethertype_filter *p_ethertype;
1882 	struct i40e_ethertype_rule *ethertype_rule;
1883 
1884 	ethertype_rule = &pf->ethertype;
1885 	/* Remove all ethertype filter rules and hash */
1886 	rte_free(ethertype_rule->hash_map);
1887 	rte_hash_free(ethertype_rule->hash_table);
1888 
1889 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1890 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1891 			     p_ethertype, rules);
1892 		rte_free(p_ethertype);
1893 	}
1894 }
1895 
1896 static void
1897 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1898 {
1899 	struct i40e_tunnel_filter *p_tunnel;
1900 	struct i40e_tunnel_rule *tunnel_rule;
1901 
1902 	tunnel_rule = &pf->tunnel;
1903 	/* Remove all tunnel director rules and hash */
1904 	rte_free(tunnel_rule->hash_map);
1905 	rte_hash_free(tunnel_rule->hash_table);
1906 
1907 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1908 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1909 		rte_free(p_tunnel);
1910 	}
1911 }
1912 
1913 static void
1914 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1915 {
1916 	struct i40e_fdir_filter *p_fdir;
1917 	struct i40e_fdir_info *fdir_info;
1918 
1919 	fdir_info = &pf->fdir;
1920 
1921 	/* Remove all flow director rules */
1922 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1923 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1924 }
1925 
1926 static void
1927 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1928 {
1929 	struct i40e_fdir_info *fdir_info;
1930 
1931 	fdir_info = &pf->fdir;
1932 
1933 	/* flow director memory cleanup */
1934 	rte_free(fdir_info->hash_map);
1935 	rte_hash_free(fdir_info->hash_table);
1936 	rte_free(fdir_info->fdir_flow_pool.bitmap);
1937 	rte_free(fdir_info->fdir_flow_pool.pool);
1938 	rte_free(fdir_info->fdir_filter_array);
1939 }
1940 
1941 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1942 {
1943 	/*
1944 	 * Disable by default flexible payload
1945 	 * for corresponding L2/L3/L4 layers.
1946 	 */
1947 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1948 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1949 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1950 }
1951 
1952 static int
1953 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1954 {
1955 	struct i40e_hw *hw;
1956 
1957 	PMD_INIT_FUNC_TRACE();
1958 
1959 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1960 		return 0;
1961 
1962 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1963 
1964 	if (hw->adapter_closed == 0)
1965 		i40e_dev_close(dev);
1966 
1967 	return 0;
1968 }
1969 
1970 static int
1971 i40e_dev_configure(struct rte_eth_dev *dev)
1972 {
1973 	struct i40e_adapter *ad =
1974 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1975 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1976 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1977 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1978 	int i, ret;
1979 
1980 	ret = i40e_dev_sync_phy_type(hw);
1981 	if (ret)
1982 		return ret;
1983 
1984 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1985 	 * bulk allocation or vector Rx preconditions we will reset it.
1986 	 */
1987 	ad->rx_bulk_alloc_allowed = true;
1988 	ad->rx_vec_allowed = true;
1989 	ad->tx_simple_allowed = true;
1990 	ad->tx_vec_allowed = true;
1991 
1992 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1993 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1994 
1995 	ret = i40e_dev_init_vlan(dev);
1996 	if (ret < 0)
1997 		goto err;
1998 
1999 	/* VMDQ setup.
2000 	 *  General PMD call sequence are NIC init, configure,
2001 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
2002 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
2003 	 *  applicable. So, VMDQ setting has to be done before
2004 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
2005 	 *  For RSS setting, it will try to calculate actual configured RX queue
2006 	 *  number, which will be available after rx_queue_setup(). dev_start()
2007 	 *  function is good to place RSS setup.
2008 	 */
2009 	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
2010 		ret = i40e_vmdq_setup(dev);
2011 		if (ret)
2012 			goto err;
2013 	}
2014 
2015 	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2016 		ret = i40e_dcb_setup(dev);
2017 		if (ret) {
2018 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
2019 			goto err_dcb;
2020 		}
2021 	}
2022 
2023 	TAILQ_INIT(&pf->flow_list);
2024 
2025 	return 0;
2026 
2027 err_dcb:
2028 	/* need to release vmdq resource if exists */
2029 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2030 		i40e_vsi_release(pf->vmdq[i].vsi);
2031 		pf->vmdq[i].vsi = NULL;
2032 	}
2033 	rte_free(pf->vmdq);
2034 	pf->vmdq = NULL;
2035 err:
2036 	return ret;
2037 }
2038 
2039 void
2040 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
2041 {
2042 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2043 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2044 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2045 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2046 	uint16_t msix_vect = vsi->msix_intr;
2047 	uint16_t i;
2048 
2049 	for (i = 0; i < vsi->nb_qps; i++) {
2050 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2051 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2052 		rte_wmb();
2053 	}
2054 
2055 	if (vsi->type != I40E_VSI_SRIOV) {
2056 		if (!rte_intr_allow_others(intr_handle)) {
2057 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2058 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2059 			I40E_WRITE_REG(hw,
2060 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2061 				       0);
2062 		} else {
2063 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2064 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2065 			I40E_WRITE_REG(hw,
2066 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2067 						       msix_vect - 1), 0);
2068 		}
2069 	} else {
2070 		uint32_t reg;
2071 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2072 			vsi->user_param + (msix_vect - 1);
2073 
2074 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2075 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2076 	}
2077 	I40E_WRITE_FLUSH(hw);
2078 }
2079 
2080 static void
2081 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2082 		       int base_queue, int nb_queue,
2083 		       uint16_t itr_idx)
2084 {
2085 	int i;
2086 	uint32_t val;
2087 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2088 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2089 
2090 	/* Bind all RX queues to allocated MSIX interrupt */
2091 	for (i = 0; i < nb_queue; i++) {
2092 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2093 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2094 			((base_queue + i + 1) <<
2095 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2096 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2097 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2098 
2099 		if (i == nb_queue - 1)
2100 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2101 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2102 	}
2103 
2104 	/* Write first RX queue to Link list register as the head element */
2105 	if (vsi->type != I40E_VSI_SRIOV) {
2106 		uint16_t interval =
2107 			i40e_calc_itr_interval(1, pf->support_multi_driver);
2108 
2109 		if (msix_vect == I40E_MISC_VEC_ID) {
2110 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2111 				       (base_queue <<
2112 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2113 				       (0x0 <<
2114 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2115 			I40E_WRITE_REG(hw,
2116 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2117 				       interval);
2118 		} else {
2119 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2120 				       (base_queue <<
2121 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2122 				       (0x0 <<
2123 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2124 			I40E_WRITE_REG(hw,
2125 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2126 						       msix_vect - 1),
2127 				       interval);
2128 		}
2129 	} else {
2130 		uint32_t reg;
2131 
2132 		if (msix_vect == I40E_MISC_VEC_ID) {
2133 			I40E_WRITE_REG(hw,
2134 				       I40E_VPINT_LNKLST0(vsi->user_param),
2135 				       (base_queue <<
2136 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2137 				       (0x0 <<
2138 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2139 		} else {
2140 			/* num_msix_vectors_vf needs to minus irq0 */
2141 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2142 				vsi->user_param + (msix_vect - 1);
2143 
2144 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2145 				       (base_queue <<
2146 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2147 				       (0x0 <<
2148 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2149 		}
2150 	}
2151 
2152 	I40E_WRITE_FLUSH(hw);
2153 }
2154 
2155 int
2156 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2157 {
2158 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2159 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2160 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2161 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2162 	uint16_t msix_vect = vsi->msix_intr;
2163 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
2164 				   rte_intr_nb_efd_get(intr_handle));
2165 	uint16_t queue_idx = 0;
2166 	int record = 0;
2167 	int i;
2168 
2169 	for (i = 0; i < vsi->nb_qps; i++) {
2170 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2171 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2172 	}
2173 
2174 	/* VF bind interrupt */
2175 	if (vsi->type == I40E_VSI_SRIOV) {
2176 		if (vsi->nb_msix == 0) {
2177 			PMD_DRV_LOG(ERR, "No msix resource");
2178 			return -EINVAL;
2179 		}
2180 		__vsi_queues_bind_intr(vsi, msix_vect,
2181 				       vsi->base_queue, vsi->nb_qps,
2182 				       itr_idx);
2183 		return 0;
2184 	}
2185 
2186 	/* PF & VMDq bind interrupt */
2187 	if (rte_intr_dp_is_en(intr_handle)) {
2188 		if (vsi->type == I40E_VSI_MAIN) {
2189 			queue_idx = 0;
2190 			record = 1;
2191 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2192 			struct i40e_vsi *main_vsi =
2193 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2194 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2195 			record = 1;
2196 		}
2197 	}
2198 
2199 	for (i = 0; i < vsi->nb_used_qps; i++) {
2200 		if (vsi->nb_msix == 0) {
2201 			PMD_DRV_LOG(ERR, "No msix resource");
2202 			return -EINVAL;
2203 		} else if (nb_msix <= 1) {
2204 			if (!rte_intr_allow_others(intr_handle))
2205 				/* allow to share MISC_VEC_ID */
2206 				msix_vect = I40E_MISC_VEC_ID;
2207 
2208 			/* no enough msix_vect, map all to one */
2209 			__vsi_queues_bind_intr(vsi, msix_vect,
2210 					       vsi->base_queue + i,
2211 					       vsi->nb_used_qps - i,
2212 					       itr_idx);
2213 			for (; !!record && i < vsi->nb_used_qps; i++)
2214 				rte_intr_vec_list_index_set(intr_handle,
2215 						queue_idx + i, msix_vect);
2216 			break;
2217 		}
2218 		/* 1:1 queue/msix_vect mapping */
2219 		__vsi_queues_bind_intr(vsi, msix_vect,
2220 				       vsi->base_queue + i, 1,
2221 				       itr_idx);
2222 		if (!!record)
2223 			if (rte_intr_vec_list_index_set(intr_handle,
2224 						queue_idx + i, msix_vect))
2225 				return -rte_errno;
2226 
2227 		msix_vect++;
2228 		nb_msix--;
2229 	}
2230 
2231 	return 0;
2232 }
2233 
2234 void
2235 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2236 {
2237 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2238 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2239 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2240 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2241 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2242 	uint16_t msix_intr, i;
2243 
2244 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2245 		for (i = 0; i < vsi->nb_msix; i++) {
2246 			msix_intr = vsi->msix_intr + i;
2247 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2248 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2249 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2250 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2251 		}
2252 	else
2253 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2254 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2255 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2256 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2257 
2258 	I40E_WRITE_FLUSH(hw);
2259 }
2260 
2261 void
2262 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2263 {
2264 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2265 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2266 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2267 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2268 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2269 	uint16_t msix_intr, i;
2270 
2271 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2272 		for (i = 0; i < vsi->nb_msix; i++) {
2273 			msix_intr = vsi->msix_intr + i;
2274 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2275 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2276 		}
2277 	else
2278 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2279 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2280 
2281 	I40E_WRITE_FLUSH(hw);
2282 }
2283 
2284 static inline uint8_t
2285 i40e_parse_link_speeds(uint16_t link_speeds)
2286 {
2287 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2288 
2289 	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
2290 		link_speed |= I40E_LINK_SPEED_40GB;
2291 	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
2292 		link_speed |= I40E_LINK_SPEED_25GB;
2293 	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
2294 		link_speed |= I40E_LINK_SPEED_20GB;
2295 	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
2296 		link_speed |= I40E_LINK_SPEED_10GB;
2297 	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
2298 		link_speed |= I40E_LINK_SPEED_1GB;
2299 	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
2300 		link_speed |= I40E_LINK_SPEED_100MB;
2301 
2302 	return link_speed;
2303 }
2304 
2305 static int
2306 i40e_phy_conf_link(struct i40e_hw *hw,
2307 		   uint8_t abilities,
2308 		   uint8_t force_speed,
2309 		   bool is_up)
2310 {
2311 	enum i40e_status_code status;
2312 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2313 	struct i40e_aq_set_phy_config phy_conf;
2314 	enum i40e_aq_phy_type cnt;
2315 	uint8_t avail_speed;
2316 	uint32_t phy_type_mask = 0;
2317 
2318 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2319 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2320 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2321 			I40E_AQ_PHY_FLAG_LOW_POWER;
2322 	int ret = -ENOTSUP;
2323 
2324 	/* To get phy capabilities of available speeds. */
2325 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2326 					      NULL);
2327 	if (status) {
2328 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d",
2329 				status);
2330 		return ret;
2331 	}
2332 	avail_speed = phy_ab.link_speed;
2333 
2334 	/* To get the current phy config. */
2335 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2336 					      NULL);
2337 	if (status) {
2338 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d",
2339 				status);
2340 		return ret;
2341 	}
2342 
2343 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2344 	 * no need to set up again.
2345 	 */
2346 	if (is_up && phy_ab.phy_type != 0 &&
2347 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2348 		     phy_ab.link_speed != 0)
2349 		return I40E_SUCCESS;
2350 
2351 	memset(&phy_conf, 0, sizeof(phy_conf));
2352 
2353 	/* bits 0-2 use the values from get_phy_abilities_resp */
2354 	abilities &= ~mask;
2355 	abilities |= phy_ab.abilities & mask;
2356 
2357 	phy_conf.abilities = abilities;
2358 
2359 	/* If link needs to go up, but the force speed is not supported,
2360 	 * Warn users and config the default available speeds.
2361 	 */
2362 	if (is_up && !(force_speed & avail_speed)) {
2363 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!");
2364 		phy_conf.link_speed = avail_speed;
2365 	} else {
2366 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2367 	}
2368 
2369 	/* PHY type mask needs to include each type except PHY type extension */
2370 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2371 		phy_type_mask |= 1 << cnt;
2372 
2373 	/* use get_phy_abilities_resp value for the rest */
2374 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2375 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2376 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2377 		I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2378 		I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2379 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2380 	phy_conf.eee_capability = phy_ab.eee_capability;
2381 	phy_conf.eeer = phy_ab.eeer_val;
2382 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2383 
2384 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2385 		    phy_ab.abilities, phy_ab.link_speed);
2386 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2387 		    phy_conf.abilities, phy_conf.link_speed);
2388 
2389 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2390 	if (status)
2391 		return ret;
2392 
2393 	return I40E_SUCCESS;
2394 }
2395 
2396 static int
2397 i40e_apply_link_speed(struct rte_eth_dev *dev)
2398 {
2399 	uint8_t speed;
2400 	uint8_t abilities = 0;
2401 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2402 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2403 
2404 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2405 		     I40E_AQ_PHY_LINK_ENABLED;
2406 
2407 	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
2408 		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
2409 				    RTE_ETH_LINK_SPEED_25G |
2410 				    RTE_ETH_LINK_SPEED_20G |
2411 				    RTE_ETH_LINK_SPEED_10G |
2412 				    RTE_ETH_LINK_SPEED_1G |
2413 				    RTE_ETH_LINK_SPEED_100M;
2414 
2415 		abilities |= I40E_AQ_PHY_AN_ENABLED;
2416 	} else {
2417 		abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2418 	}
2419 	speed = i40e_parse_link_speeds(conf->link_speeds);
2420 
2421 	return i40e_phy_conf_link(hw, abilities, speed, true);
2422 }
2423 
2424 static int
2425 i40e_dev_start(struct rte_eth_dev *dev)
2426 {
2427 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2428 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2429 	struct i40e_vsi *main_vsi = pf->main_vsi;
2430 	struct i40e_adapter *ad =
2431 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2432 	int ret, i;
2433 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2434 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2435 	uint32_t intr_vector = 0;
2436 	struct i40e_vsi *vsi;
2437 	uint16_t nb_rxq, nb_txq;
2438 	uint16_t max_frame_size;
2439 
2440 	hw->adapter_stopped = 0;
2441 
2442 	rte_intr_disable(intr_handle);
2443 
2444 	if ((rte_intr_cap_multiple(intr_handle) ||
2445 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2446 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2447 		intr_vector = dev->data->nb_rx_queues;
2448 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
2449 		if (ret)
2450 			return ret;
2451 	}
2452 
2453 	if (rte_intr_dp_is_en(intr_handle)) {
2454 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
2455 						   dev->data->nb_rx_queues)) {
2456 			PMD_INIT_LOG(ERR,
2457 				"Failed to allocate %d rx_queues intr_vec",
2458 				dev->data->nb_rx_queues);
2459 			return -ENOMEM;
2460 		}
2461 	}
2462 
2463 	/* Initialize VSI */
2464 	ret = i40e_dev_rxtx_init(pf);
2465 	if (ret != I40E_SUCCESS) {
2466 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2467 		return ret;
2468 	}
2469 
2470 	/* Map queues with MSIX interrupt */
2471 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2472 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2473 	ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2474 	if (ret < 0)
2475 		return ret;
2476 	i40e_vsi_enable_queues_intr(main_vsi);
2477 
2478 	/* Map VMDQ VSI queues with MSIX interrupt */
2479 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2480 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2481 		ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2482 						I40E_ITR_INDEX_DEFAULT);
2483 		if (ret < 0)
2484 			return ret;
2485 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2486 	}
2487 
2488 	/* Enable all queues which have been configured */
2489 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2490 		ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2491 		if (ret)
2492 			goto rx_err;
2493 	}
2494 
2495 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2496 		ret = i40e_dev_tx_queue_start(dev, nb_txq);
2497 		if (ret)
2498 			goto tx_err;
2499 	}
2500 
2501 	/* Enable receiving broadcast packets */
2502 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2503 	if (ret != I40E_SUCCESS)
2504 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2505 
2506 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2507 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2508 						true, NULL);
2509 		if (ret != I40E_SUCCESS)
2510 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2511 	}
2512 
2513 	/* Enable the VLAN promiscuous mode. */
2514 	if (pf->vfs) {
2515 		for (i = 0; i < pf->vf_num; i++) {
2516 			vsi = pf->vfs[i].vsi;
2517 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2518 						     true, NULL);
2519 		}
2520 	}
2521 
2522 	/* Disable mac loopback mode */
2523 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE) {
2524 		ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MODE_NONE, NULL);
2525 		if (ret != I40E_SUCCESS) {
2526 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2527 			goto tx_err;
2528 		}
2529 	}
2530 
2531 	/* Enable mac loopback mode */
2532 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_EN) {
2533 		if (hw->mac.type == I40E_MAC_X722)
2534 			ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC_LOCAL_X722, NULL);
2535 		else
2536 			ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC, NULL);
2537 		if (ret != I40E_SUCCESS) {
2538 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2539 			goto tx_err;
2540 		}
2541 	}
2542 
2543 	/* Apply link configure */
2544 	ret = i40e_apply_link_speed(dev);
2545 	if (I40E_SUCCESS != ret) {
2546 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
2547 		goto tx_err;
2548 	}
2549 
2550 	if (!rte_intr_allow_others(intr_handle)) {
2551 		rte_intr_callback_unregister(intr_handle,
2552 					     i40e_dev_interrupt_handler,
2553 					     (void *)dev);
2554 		/* configure and enable device interrupt */
2555 		i40e_pf_config_irq0(hw, FALSE);
2556 		i40e_pf_enable_irq0(hw);
2557 
2558 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2559 			PMD_INIT_LOG(INFO,
2560 				"lsc won't enable because of no intr multiplex");
2561 	} else {
2562 		ret = i40e_aq_set_phy_int_mask(hw,
2563 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2564 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2565 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2566 		if (ret != I40E_SUCCESS)
2567 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2568 
2569 		/* Call get_link_info aq command to enable/disable LSE */
2570 		i40e_dev_link_update(dev, 1);
2571 	}
2572 
2573 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2574 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2575 				  i40e_dev_alarm_handler, dev);
2576 	} else {
2577 		/* enable uio intr after callback register */
2578 		rte_intr_enable(intr_handle);
2579 	}
2580 
2581 	i40e_filter_restore(pf);
2582 
2583 	if (pf->tm_conf.root && !pf->tm_conf.committed)
2584 		PMD_DRV_LOG(WARNING,
2585 			    "please call hierarchy_commit() "
2586 			    "before starting the port");
2587 
2588 	max_frame_size = dev->data->mtu ?
2589 		dev->data->mtu + I40E_ETH_OVERHEAD :
2590 		I40E_FRAME_SIZE_MAX;
2591 	ad->max_pkt_len = max_frame_size;
2592 
2593 	/* Set the max frame size to HW*/
2594 	i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
2595 
2596 	return I40E_SUCCESS;
2597 
2598 tx_err:
2599 	for (i = 0; i < nb_txq; i++)
2600 		i40e_dev_tx_queue_stop(dev, i);
2601 rx_err:
2602 	for (i = 0; i < nb_rxq; i++)
2603 		i40e_dev_rx_queue_stop(dev, i);
2604 
2605 	return ret;
2606 }
2607 
2608 static int
2609 i40e_dev_stop(struct rte_eth_dev *dev)
2610 {
2611 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2612 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2613 	struct i40e_vsi *main_vsi = pf->main_vsi;
2614 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2615 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2616 	int i;
2617 
2618 	if (hw->adapter_stopped == 1)
2619 		return 0;
2620 
2621 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2622 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2623 		rte_intr_enable(intr_handle);
2624 	}
2625 
2626 	/* Disable all queues */
2627 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2628 		i40e_dev_tx_queue_stop(dev, i);
2629 
2630 	for (i = 0; i < dev->data->nb_rx_queues; i++)
2631 		i40e_dev_rx_queue_stop(dev, i);
2632 
2633 	/* un-map queues with interrupt registers */
2634 	i40e_vsi_disable_queues_intr(main_vsi);
2635 	i40e_vsi_queues_unbind_intr(main_vsi);
2636 
2637 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2638 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2639 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2640 	}
2641 
2642 	/* Clear all queues and release memory */
2643 	i40e_dev_clear_queues(dev);
2644 
2645 	/* Set link down */
2646 	i40e_dev_set_link_down(dev);
2647 
2648 	if (!rte_intr_allow_others(intr_handle))
2649 		/* resume to the default handler */
2650 		rte_intr_callback_register(intr_handle,
2651 					   i40e_dev_interrupt_handler,
2652 					   (void *)dev);
2653 
2654 	/* Clean datapath event and queue/vec mapping */
2655 	rte_intr_efd_disable(intr_handle);
2656 
2657 	/* Cleanup vector list */
2658 	rte_intr_vec_list_free(intr_handle);
2659 
2660 	/* reset hierarchy commit */
2661 	pf->tm_conf.committed = false;
2662 
2663 	hw->adapter_stopped = 1;
2664 	dev->data->dev_started = 0;
2665 
2666 	pf->adapter->rss_reta_updated = 0;
2667 
2668 	return 0;
2669 }
2670 
2671 static int
2672 i40e_dev_close(struct rte_eth_dev *dev)
2673 {
2674 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2675 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2676 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2677 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2678 	struct i40e_filter_control_settings settings;
2679 	struct rte_flow *p_flow;
2680 	uint32_t reg;
2681 	int i;
2682 	int ret;
2683 	uint8_t aq_fail = 0;
2684 	int retries = 0;
2685 
2686 	PMD_INIT_FUNC_TRACE();
2687 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2688 		return 0;
2689 
2690 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2691 	if (ret)
2692 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2693 
2694 
2695 	ret = i40e_dev_stop(dev);
2696 
2697 	i40e_dev_free_queues(dev);
2698 
2699 	/* Disable interrupt */
2700 	i40e_pf_disable_irq0(hw);
2701 	rte_intr_disable(intr_handle);
2702 
2703 	/* shutdown and destroy the HMC */
2704 	i40e_shutdown_lan_hmc(hw);
2705 
2706 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2707 		i40e_vsi_release(pf->vmdq[i].vsi);
2708 		pf->vmdq[i].vsi = NULL;
2709 	}
2710 	rte_free(pf->vmdq);
2711 	pf->vmdq = NULL;
2712 
2713 	/* release all the existing VSIs and VEBs */
2714 	i40e_vsi_release(pf->main_vsi);
2715 
2716 	/* shutdown the adminq */
2717 	i40e_aq_queue_shutdown(hw, true);
2718 	i40e_shutdown_adminq(hw);
2719 
2720 	i40e_res_pool_destroy(&pf->qp_pool);
2721 	i40e_res_pool_destroy(&pf->msix_pool);
2722 
2723 	/* Disable flexible payload in global configuration */
2724 	if (!pf->support_multi_driver)
2725 		i40e_flex_payload_reg_set_default(hw);
2726 
2727 	/* force a PF reset to clean anything leftover */
2728 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2729 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2730 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2731 	I40E_WRITE_FLUSH(hw);
2732 
2733 	/* Clear PXE mode */
2734 	i40e_clear_pxe_mode(hw);
2735 
2736 	/* Unconfigure filter control */
2737 	memset(&settings, 0, sizeof(settings));
2738 	ret = i40e_set_filter_control(hw, &settings);
2739 	if (ret)
2740 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2741 					ret);
2742 
2743 	/* Disable flow control */
2744 	hw->fc.requested_mode = I40E_FC_NONE;
2745 	i40e_set_fc(hw, &aq_fail, TRUE);
2746 
2747 	/* uninitialize pf host driver */
2748 	i40e_pf_host_uninit(dev);
2749 
2750 	do {
2751 		ret = rte_intr_callback_unregister(intr_handle,
2752 				i40e_dev_interrupt_handler, dev);
2753 		if (ret >= 0 || ret == -ENOENT) {
2754 			break;
2755 		} else if (ret != -EAGAIN) {
2756 			PMD_INIT_LOG(ERR,
2757 				 "intr callback unregister failed: %d",
2758 				 ret);
2759 		}
2760 		i40e_msec_delay(500);
2761 	} while (retries++ < 5);
2762 
2763 	i40e_rm_ethtype_filter_list(pf);
2764 	i40e_rm_tunnel_filter_list(pf);
2765 	i40e_rm_fdir_filter_list(pf);
2766 
2767 	/* Remove all flows */
2768 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2769 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2770 		/* Do not free FDIR flows since they are static allocated */
2771 		if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2772 			rte_free(p_flow);
2773 	}
2774 
2775 	/* release the fdir static allocated memory */
2776 	i40e_fdir_memory_cleanup(pf);
2777 
2778 	/* Remove all Traffic Manager configuration */
2779 	i40e_tm_conf_uninit(dev);
2780 
2781 	i40e_clear_automask(pf);
2782 
2783 	hw->adapter_closed = 1;
2784 	return ret;
2785 }
2786 
2787 /*
2788  * Reset PF device only to re-initialize resources in PMD layer
2789  */
2790 static int
2791 i40e_dev_reset(struct rte_eth_dev *dev)
2792 {
2793 	int ret;
2794 
2795 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2796 	 * its VF to make them align with it. The detailed notification
2797 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2798 	 * To avoid unexpected behavior in VF, currently reset of PF with
2799 	 * SR-IOV activation is not supported. It might be supported later.
2800 	 */
2801 	if (dev->data->sriov.active)
2802 		return -ENOTSUP;
2803 
2804 	ret = eth_i40e_dev_uninit(dev);
2805 	if (ret)
2806 		return ret;
2807 
2808 	ret = eth_i40e_dev_init(dev, NULL);
2809 
2810 	return ret;
2811 }
2812 
2813 static int
2814 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2815 {
2816 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2817 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2818 	struct i40e_vsi *vsi = pf->main_vsi;
2819 	int status;
2820 
2821 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2822 						     true, NULL, true);
2823 	if (status != I40E_SUCCESS) {
2824 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2825 		return -EAGAIN;
2826 	}
2827 
2828 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2829 							TRUE, NULL);
2830 	if (status != I40E_SUCCESS) {
2831 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2832 		/* Rollback unicast promiscuous mode */
2833 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2834 						    false, NULL, true);
2835 		return -EAGAIN;
2836 	}
2837 
2838 	return 0;
2839 }
2840 
2841 static int
2842 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2843 {
2844 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2845 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2846 	struct i40e_vsi *vsi = pf->main_vsi;
2847 	int status;
2848 
2849 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2850 						     false, NULL, true);
2851 	if (status != I40E_SUCCESS) {
2852 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2853 		return -EAGAIN;
2854 	}
2855 
2856 	/* must remain in all_multicast mode */
2857 	if (dev->data->all_multicast == 1)
2858 		return 0;
2859 
2860 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2861 							false, NULL);
2862 	if (status != I40E_SUCCESS) {
2863 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2864 		/* Rollback unicast promiscuous mode */
2865 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2866 						    true, NULL, true);
2867 		return -EAGAIN;
2868 	}
2869 
2870 	return 0;
2871 }
2872 
2873 static int
2874 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2875 {
2876 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2877 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2878 	struct i40e_vsi *vsi = pf->main_vsi;
2879 	int ret;
2880 
2881 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2882 	if (ret != I40E_SUCCESS) {
2883 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2884 		return -EAGAIN;
2885 	}
2886 
2887 	return 0;
2888 }
2889 
2890 static int
2891 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2892 {
2893 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2894 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2895 	struct i40e_vsi *vsi = pf->main_vsi;
2896 	int ret;
2897 
2898 	if (dev->data->promiscuous == 1)
2899 		return 0; /* must remain in all_multicast mode */
2900 
2901 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2902 				vsi->seid, FALSE, NULL);
2903 	if (ret != I40E_SUCCESS) {
2904 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2905 		return -EAGAIN;
2906 	}
2907 
2908 	return 0;
2909 }
2910 
2911 /*
2912  * Set device link up.
2913  */
2914 static int
2915 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2916 {
2917 	/* re-apply link speed setting */
2918 	return i40e_apply_link_speed(dev);
2919 }
2920 
2921 /*
2922  * Set device link down.
2923  */
2924 static int
2925 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2926 {
2927 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2928 	uint8_t abilities = 0;
2929 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2930 
2931 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2932 	return i40e_phy_conf_link(hw, abilities, speed, false);
2933 }
2934 
2935 static __rte_always_inline void
2936 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2937 {
2938 /* Link status registers and values*/
2939 #define I40E_REG_LINK_UP		0x40000080
2940 #define I40E_PRTMAC_MACC		0x001E24E0
2941 #define I40E_REG_MACC_25GB		0x00020000
2942 #define I40E_REG_SPEED_MASK		0x38000000
2943 #define I40E_REG_SPEED_0		0x00000000
2944 #define I40E_REG_SPEED_1		0x08000000
2945 #define I40E_REG_SPEED_2		0x10000000
2946 #define I40E_REG_SPEED_3		0x18000000
2947 #define I40E_REG_SPEED_4		0x20000000
2948 	uint32_t link_speed;
2949 	uint32_t reg_val;
2950 
2951 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0));
2952 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2953 	reg_val &= I40E_REG_LINK_UP;
2954 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2955 
2956 	if (unlikely(link->link_status == 0))
2957 		return;
2958 
2959 	/* Parse the link status */
2960 	switch (link_speed) {
2961 	case I40E_REG_SPEED_0:
2962 		link->link_speed = RTE_ETH_SPEED_NUM_100M;
2963 		break;
2964 	case I40E_REG_SPEED_1:
2965 		link->link_speed = RTE_ETH_SPEED_NUM_1G;
2966 		break;
2967 	case I40E_REG_SPEED_2:
2968 		if (hw->mac.type == I40E_MAC_X722)
2969 			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
2970 		else
2971 			link->link_speed = RTE_ETH_SPEED_NUM_10G;
2972 		break;
2973 	case I40E_REG_SPEED_3:
2974 		if (hw->mac.type == I40E_MAC_X722) {
2975 			link->link_speed = RTE_ETH_SPEED_NUM_5G;
2976 		} else {
2977 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2978 
2979 			if (reg_val & I40E_REG_MACC_25GB)
2980 				link->link_speed = RTE_ETH_SPEED_NUM_25G;
2981 			else
2982 				link->link_speed = RTE_ETH_SPEED_NUM_40G;
2983 		}
2984 		break;
2985 	case I40E_REG_SPEED_4:
2986 		if (hw->mac.type == I40E_MAC_X722)
2987 			link->link_speed = RTE_ETH_SPEED_NUM_10G;
2988 		else
2989 			link->link_speed = RTE_ETH_SPEED_NUM_20G;
2990 		break;
2991 	default:
2992 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2993 		break;
2994 	}
2995 }
2996 
2997 static __rte_always_inline void
2998 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2999 	bool enable_lse, int wait_to_complete)
3000 {
3001 #define CHECK_INTERVAL             100  /* 100ms */
3002 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
3003 	uint32_t rep_cnt = MAX_REPEAT_TIME;
3004 	struct i40e_link_status link_status;
3005 	int status;
3006 
3007 	memset(&link_status, 0, sizeof(link_status));
3008 
3009 	do {
3010 		memset(&link_status, 0, sizeof(link_status));
3011 
3012 		/* Get link status information from hardware */
3013 		status = i40e_aq_get_link_info(hw, enable_lse,
3014 						&link_status, NULL);
3015 		if (unlikely(status != I40E_SUCCESS)) {
3016 			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
3017 			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3018 			PMD_DRV_LOG(ERR, "Failed to get link info");
3019 			return;
3020 		}
3021 
3022 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
3023 		if (!wait_to_complete || link->link_status)
3024 			break;
3025 
3026 		rte_delay_ms(CHECK_INTERVAL);
3027 	} while (--rep_cnt);
3028 
3029 	/* Parse the link status */
3030 	switch (link_status.link_speed) {
3031 	case I40E_LINK_SPEED_100MB:
3032 		link->link_speed = RTE_ETH_SPEED_NUM_100M;
3033 		break;
3034 	case I40E_LINK_SPEED_1GB:
3035 		link->link_speed = RTE_ETH_SPEED_NUM_1G;
3036 		break;
3037 	case I40E_LINK_SPEED_10GB:
3038 		link->link_speed = RTE_ETH_SPEED_NUM_10G;
3039 		break;
3040 	case I40E_LINK_SPEED_20GB:
3041 		link->link_speed = RTE_ETH_SPEED_NUM_20G;
3042 		break;
3043 	case I40E_LINK_SPEED_25GB:
3044 		link->link_speed = RTE_ETH_SPEED_NUM_25G;
3045 		break;
3046 	case I40E_LINK_SPEED_40GB:
3047 		link->link_speed = RTE_ETH_SPEED_NUM_40G;
3048 		break;
3049 	default:
3050 		if (link->link_status)
3051 			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
3052 		else
3053 			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
3054 		break;
3055 	}
3056 }
3057 
3058 int
3059 i40e_dev_link_update(struct rte_eth_dev *dev,
3060 		     int wait_to_complete)
3061 {
3062 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3063 	struct rte_eth_link link;
3064 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3065 	int ret;
3066 
3067 	memset(&link, 0, sizeof(link));
3068 
3069 	/* i40e uses full duplex only */
3070 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3071 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3072 			RTE_ETH_LINK_SPEED_FIXED);
3073 
3074 	if (!wait_to_complete && !enable_lse)
3075 		update_link_reg(hw, &link);
3076 	else
3077 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
3078 
3079 	if (hw->switch_dev)
3080 		rte_eth_linkstatus_get(hw->switch_dev, &link);
3081 
3082 	ret = rte_eth_linkstatus_set(dev, &link);
3083 	i40e_notify_all_vfs_link_status(dev);
3084 
3085 	return ret;
3086 }
3087 
3088 static void
3089 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3090 			  uint32_t loreg, bool offset_loaded, uint64_t *offset,
3091 			  uint64_t *stat, uint64_t *prev_stat)
3092 {
3093 	i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3094 	/* enlarge the limitation when statistics counters overflowed */
3095 	if (offset_loaded) {
3096 		if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3097 			*stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3098 		*stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3099 	}
3100 	*prev_stat = *stat;
3101 }
3102 
3103 /* Get all the statistics of a VSI */
3104 void
3105 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3106 {
3107 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3108 	struct i40e_eth_stats *nes = &vsi->eth_stats;
3109 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3110 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3111 
3112 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3113 				  vsi->offset_loaded, &oes->rx_bytes,
3114 				  &nes->rx_bytes, &vsi->prev_rx_bytes);
3115 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3116 			    vsi->offset_loaded, &oes->rx_unicast,
3117 			    &nes->rx_unicast);
3118 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3119 			    vsi->offset_loaded, &oes->rx_multicast,
3120 			    &nes->rx_multicast);
3121 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3122 			    vsi->offset_loaded, &oes->rx_broadcast,
3123 			    &nes->rx_broadcast);
3124 	/* exclude CRC bytes */
3125 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3126 		nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3127 
3128 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3129 			    &oes->rx_discards, &nes->rx_discards);
3130 	/* GLV_REPC not supported */
3131 	/* GLV_RMPC not supported */
3132 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3133 			    &oes->rx_unknown_protocol,
3134 			    &nes->rx_unknown_protocol);
3135 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3136 				  vsi->offset_loaded, &oes->tx_bytes,
3137 				  &nes->tx_bytes, &vsi->prev_tx_bytes);
3138 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3139 			    vsi->offset_loaded, &oes->tx_unicast,
3140 			    &nes->tx_unicast);
3141 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3142 			    vsi->offset_loaded, &oes->tx_multicast,
3143 			    &nes->tx_multicast);
3144 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3145 			    vsi->offset_loaded,  &oes->tx_broadcast,
3146 			    &nes->tx_broadcast);
3147 	/* GLV_TDPC not supported */
3148 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3149 			    &oes->tx_errors, &nes->tx_errors);
3150 	vsi->offset_loaded = true;
3151 
3152 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3153 		    vsi->vsi_id);
3154 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3155 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3156 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3157 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3158 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3159 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3160 		    nes->rx_unknown_protocol);
3161 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3162 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3163 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3164 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3165 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3166 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3167 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3168 		    vsi->vsi_id);
3169 }
3170 
3171 static void
3172 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3173 {
3174 	unsigned int i;
3175 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3176 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3177 
3178 	/* Get rx/tx bytes of internal transfer packets */
3179 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3180 				  I40E_GLV_GORCL(hw->port),
3181 				  pf->offset_loaded,
3182 				  &pf->internal_stats_offset.rx_bytes,
3183 				  &pf->internal_stats.rx_bytes,
3184 				  &pf->internal_prev_rx_bytes);
3185 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3186 				  I40E_GLV_GOTCL(hw->port),
3187 				  pf->offset_loaded,
3188 				  &pf->internal_stats_offset.tx_bytes,
3189 				  &pf->internal_stats.tx_bytes,
3190 				  &pf->internal_prev_tx_bytes);
3191 	/* Get total internal rx packet count */
3192 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3193 			    I40E_GLV_UPRCL(hw->port),
3194 			    pf->offset_loaded,
3195 			    &pf->internal_stats_offset.rx_unicast,
3196 			    &pf->internal_stats.rx_unicast);
3197 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3198 			    I40E_GLV_MPRCL(hw->port),
3199 			    pf->offset_loaded,
3200 			    &pf->internal_stats_offset.rx_multicast,
3201 			    &pf->internal_stats.rx_multicast);
3202 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3203 			    I40E_GLV_BPRCL(hw->port),
3204 			    pf->offset_loaded,
3205 			    &pf->internal_stats_offset.rx_broadcast,
3206 			    &pf->internal_stats.rx_broadcast);
3207 	/* Get total internal tx packet count */
3208 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3209 			    I40E_GLV_UPTCL(hw->port),
3210 			    pf->offset_loaded,
3211 			    &pf->internal_stats_offset.tx_unicast,
3212 			    &pf->internal_stats.tx_unicast);
3213 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3214 			    I40E_GLV_MPTCL(hw->port),
3215 			    pf->offset_loaded,
3216 			    &pf->internal_stats_offset.tx_multicast,
3217 			    &pf->internal_stats.tx_multicast);
3218 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3219 			    I40E_GLV_BPTCL(hw->port),
3220 			    pf->offset_loaded,
3221 			    &pf->internal_stats_offset.tx_broadcast,
3222 			    &pf->internal_stats.tx_broadcast);
3223 
3224 	/* exclude CRC size */
3225 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3226 		pf->internal_stats.rx_multicast +
3227 		pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3228 
3229 	/* Get statistics of struct i40e_eth_stats */
3230 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3231 				  I40E_GLPRT_GORCL(hw->port),
3232 				  pf->offset_loaded, &os->eth.rx_bytes,
3233 				  &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3234 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3235 			    I40E_GLPRT_UPRCL(hw->port),
3236 			    pf->offset_loaded, &os->eth.rx_unicast,
3237 			    &ns->eth.rx_unicast);
3238 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3239 			    I40E_GLPRT_MPRCL(hw->port),
3240 			    pf->offset_loaded, &os->eth.rx_multicast,
3241 			    &ns->eth.rx_multicast);
3242 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3243 			    I40E_GLPRT_BPRCL(hw->port),
3244 			    pf->offset_loaded, &os->eth.rx_broadcast,
3245 			    &ns->eth.rx_broadcast);
3246 	/* Workaround: CRC size should not be included in byte statistics,
3247 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3248 	 * packet.
3249 	 */
3250 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3251 		ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3252 
3253 	/* exclude internal rx bytes
3254 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3255 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3256 	 * value.
3257 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3258 	 */
3259 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3260 		ns->eth.rx_bytes = 0;
3261 	else
3262 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3263 
3264 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3265 		ns->eth.rx_unicast = 0;
3266 	else
3267 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3268 
3269 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3270 		ns->eth.rx_multicast = 0;
3271 	else
3272 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3273 
3274 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3275 		ns->eth.rx_broadcast = 0;
3276 	else
3277 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3278 
3279 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3280 			    pf->offset_loaded, &os->eth.rx_discards,
3281 			    &ns->eth.rx_discards);
3282 	/* GLPRT_REPC not supported */
3283 	/* GLPRT_RMPC not supported */
3284 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3285 			    pf->offset_loaded,
3286 			    &os->eth.rx_unknown_protocol,
3287 			    &ns->eth.rx_unknown_protocol);
3288 	i40e_stat_update_48(hw, I40E_GL_RXERR1H(hw->pf_id + I40E_MAX_VF),
3289 			    I40E_GL_RXERR1L(hw->pf_id + I40E_MAX_VF),
3290 			    pf->offset_loaded, &os->rx_err1,
3291 			    &ns->rx_err1);
3292 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3293 				  I40E_GLPRT_GOTCL(hw->port),
3294 				  pf->offset_loaded, &os->eth.tx_bytes,
3295 				  &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3296 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3297 			    I40E_GLPRT_UPTCL(hw->port),
3298 			    pf->offset_loaded, &os->eth.tx_unicast,
3299 			    &ns->eth.tx_unicast);
3300 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3301 			    I40E_GLPRT_MPTCL(hw->port),
3302 			    pf->offset_loaded, &os->eth.tx_multicast,
3303 			    &ns->eth.tx_multicast);
3304 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3305 			    I40E_GLPRT_BPTCL(hw->port),
3306 			    pf->offset_loaded, &os->eth.tx_broadcast,
3307 			    &ns->eth.tx_broadcast);
3308 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3309 		ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3310 
3311 	/* exclude internal tx bytes
3312 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3313 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3314 	 * value.
3315 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3316 	 */
3317 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3318 		ns->eth.tx_bytes = 0;
3319 	else
3320 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3321 
3322 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3323 		ns->eth.tx_unicast = 0;
3324 	else
3325 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3326 
3327 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3328 		ns->eth.tx_multicast = 0;
3329 	else
3330 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3331 
3332 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3333 		ns->eth.tx_broadcast = 0;
3334 	else
3335 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3336 
3337 	/* GLPRT_TEPC not supported */
3338 
3339 	/* additional port specific stats */
3340 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3341 			    pf->offset_loaded, &os->tx_dropped_link_down,
3342 			    &ns->tx_dropped_link_down);
3343 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3344 			    pf->offset_loaded, &os->crc_errors,
3345 			    &ns->crc_errors);
3346 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3347 			    pf->offset_loaded, &os->illegal_bytes,
3348 			    &ns->illegal_bytes);
3349 	/* GLPRT_ERRBC not supported */
3350 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3351 			    pf->offset_loaded, &os->mac_local_faults,
3352 			    &ns->mac_local_faults);
3353 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3354 			    pf->offset_loaded, &os->mac_remote_faults,
3355 			    &ns->mac_remote_faults);
3356 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3357 			    pf->offset_loaded, &os->rx_length_errors,
3358 			    &ns->rx_length_errors);
3359 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3360 			    pf->offset_loaded, &os->link_xon_rx,
3361 			    &ns->link_xon_rx);
3362 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3363 			    pf->offset_loaded, &os->link_xoff_rx,
3364 			    &ns->link_xoff_rx);
3365 	for (i = 0; i < 8; i++) {
3366 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3367 				    pf->offset_loaded,
3368 				    &os->priority_xon_rx[i],
3369 				    &ns->priority_xon_rx[i]);
3370 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3371 				    pf->offset_loaded,
3372 				    &os->priority_xoff_rx[i],
3373 				    &ns->priority_xoff_rx[i]);
3374 	}
3375 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3376 			    pf->offset_loaded, &os->link_xon_tx,
3377 			    &ns->link_xon_tx);
3378 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3379 			    pf->offset_loaded, &os->link_xoff_tx,
3380 			    &ns->link_xoff_tx);
3381 	for (i = 0; i < 8; i++) {
3382 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3383 				    pf->offset_loaded,
3384 				    &os->priority_xon_tx[i],
3385 				    &ns->priority_xon_tx[i]);
3386 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3387 				    pf->offset_loaded,
3388 				    &os->priority_xoff_tx[i],
3389 				    &ns->priority_xoff_tx[i]);
3390 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3391 				    pf->offset_loaded,
3392 				    &os->priority_xon_2_xoff[i],
3393 				    &ns->priority_xon_2_xoff[i]);
3394 	}
3395 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3396 			    I40E_GLPRT_PRC64L(hw->port),
3397 			    pf->offset_loaded, &os->rx_size_64,
3398 			    &ns->rx_size_64);
3399 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3400 			    I40E_GLPRT_PRC127L(hw->port),
3401 			    pf->offset_loaded, &os->rx_size_127,
3402 			    &ns->rx_size_127);
3403 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3404 			    I40E_GLPRT_PRC255L(hw->port),
3405 			    pf->offset_loaded, &os->rx_size_255,
3406 			    &ns->rx_size_255);
3407 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3408 			    I40E_GLPRT_PRC511L(hw->port),
3409 			    pf->offset_loaded, &os->rx_size_511,
3410 			    &ns->rx_size_511);
3411 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3412 			    I40E_GLPRT_PRC1023L(hw->port),
3413 			    pf->offset_loaded, &os->rx_size_1023,
3414 			    &ns->rx_size_1023);
3415 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3416 			    I40E_GLPRT_PRC1522L(hw->port),
3417 			    pf->offset_loaded, &os->rx_size_1522,
3418 			    &ns->rx_size_1522);
3419 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3420 			    I40E_GLPRT_PRC9522L(hw->port),
3421 			    pf->offset_loaded, &os->rx_size_big,
3422 			    &ns->rx_size_big);
3423 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3424 			    pf->offset_loaded, &os->rx_undersize,
3425 			    &ns->rx_undersize);
3426 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3427 			    pf->offset_loaded, &os->rx_fragments,
3428 			    &ns->rx_fragments);
3429 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3430 			    pf->offset_loaded, &os->rx_oversize,
3431 			    &ns->rx_oversize);
3432 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3433 			    pf->offset_loaded, &os->rx_jabber,
3434 			    &ns->rx_jabber);
3435 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3436 			    I40E_GLPRT_PTC64L(hw->port),
3437 			    pf->offset_loaded, &os->tx_size_64,
3438 			    &ns->tx_size_64);
3439 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3440 			    I40E_GLPRT_PTC127L(hw->port),
3441 			    pf->offset_loaded, &os->tx_size_127,
3442 			    &ns->tx_size_127);
3443 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3444 			    I40E_GLPRT_PTC255L(hw->port),
3445 			    pf->offset_loaded, &os->tx_size_255,
3446 			    &ns->tx_size_255);
3447 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3448 			    I40E_GLPRT_PTC511L(hw->port),
3449 			    pf->offset_loaded, &os->tx_size_511,
3450 			    &ns->tx_size_511);
3451 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3452 			    I40E_GLPRT_PTC1023L(hw->port),
3453 			    pf->offset_loaded, &os->tx_size_1023,
3454 			    &ns->tx_size_1023);
3455 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3456 			    I40E_GLPRT_PTC1522L(hw->port),
3457 			    pf->offset_loaded, &os->tx_size_1522,
3458 			    &ns->tx_size_1522);
3459 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3460 			    I40E_GLPRT_PTC9522L(hw->port),
3461 			    pf->offset_loaded, &os->tx_size_big,
3462 			    &ns->tx_size_big);
3463 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3464 			   pf->offset_loaded,
3465 			   &os->fd_sb_match, &ns->fd_sb_match);
3466 	/* GLPRT_MSPDC not supported */
3467 	/* GLPRT_XEC not supported */
3468 
3469 	pf->offset_loaded = true;
3470 
3471 	if (pf->main_vsi)
3472 		i40e_update_vsi_stats(pf->main_vsi);
3473 }
3474 
3475 /* Get all statistics of a port */
3476 static int
3477 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3478 {
3479 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3480 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3481 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3482 	struct i40e_vsi *vsi;
3483 	unsigned i;
3484 
3485 	/* call read registers - updates values, now write them to struct */
3486 	i40e_read_stats_registers(pf, hw);
3487 
3488 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3489 			pf->main_vsi->eth_stats.rx_multicast +
3490 			pf->main_vsi->eth_stats.rx_broadcast -
3491 			pf->main_vsi->eth_stats.rx_discards -
3492 			ns->rx_err1;
3493 	stats->opackets = ns->eth.tx_unicast +
3494 			ns->eth.tx_multicast +
3495 			ns->eth.tx_broadcast;
3496 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3497 	stats->obytes   = ns->eth.tx_bytes;
3498 	stats->oerrors  = ns->eth.tx_errors +
3499 			pf->main_vsi->eth_stats.tx_errors;
3500 
3501 	/* Rx Errors */
3502 	stats->imissed  = ns->eth.rx_discards +
3503 			pf->main_vsi->eth_stats.rx_discards;
3504 	stats->ierrors  = ns->crc_errors +
3505 			ns->rx_length_errors + ns->rx_undersize +
3506 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
3507 			ns->rx_err1;
3508 
3509 	if (pf->vfs) {
3510 		for (i = 0; i < pf->vf_num; i++) {
3511 			vsi = pf->vfs[i].vsi;
3512 			i40e_update_vsi_stats(vsi);
3513 
3514 			stats->ipackets += (vsi->eth_stats.rx_unicast +
3515 					vsi->eth_stats.rx_multicast +
3516 					vsi->eth_stats.rx_broadcast -
3517 					vsi->eth_stats.rx_discards);
3518 			stats->ibytes   += vsi->eth_stats.rx_bytes;
3519 			stats->oerrors  += vsi->eth_stats.tx_errors;
3520 			stats->imissed  += vsi->eth_stats.rx_discards;
3521 		}
3522 	}
3523 
3524 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3525 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3526 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3527 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3528 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3529 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3530 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3531 		    ns->eth.rx_unknown_protocol);
3532 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3533 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3534 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3535 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3536 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3537 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3538 
3539 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3540 		    ns->tx_dropped_link_down);
3541 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3542 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3543 		    ns->illegal_bytes);
3544 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3545 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3546 		    ns->mac_local_faults);
3547 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3548 		    ns->mac_remote_faults);
3549 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3550 		    ns->rx_length_errors);
3551 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3552 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3553 	for (i = 0; i < 8; i++) {
3554 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3555 				i, ns->priority_xon_rx[i]);
3556 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3557 				i, ns->priority_xoff_rx[i]);
3558 	}
3559 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3560 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3561 	for (i = 0; i < 8; i++) {
3562 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3563 				i, ns->priority_xon_tx[i]);
3564 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3565 				i, ns->priority_xoff_tx[i]);
3566 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3567 				i, ns->priority_xon_2_xoff[i]);
3568 	}
3569 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3570 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3571 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3572 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3573 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3574 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3575 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3576 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3577 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3578 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3579 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3580 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3581 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3582 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3583 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3584 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3585 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3586 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3587 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3588 			ns->mac_short_packet_dropped);
3589 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3590 		    ns->checksum_error);
3591 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3592 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3593 	return 0;
3594 }
3595 
3596 /* Reset the statistics */
3597 static int
3598 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3599 {
3600 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3601 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3602 
3603 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3604 	pf->offset_loaded = false;
3605 	if (pf->main_vsi)
3606 		pf->main_vsi->offset_loaded = false;
3607 
3608 	/* read the stats, reading current register values into offset */
3609 	i40e_read_stats_registers(pf, hw);
3610 
3611 	memset(&pf->mbuf_stats, 0, sizeof(struct i40e_mbuf_stats));
3612 
3613 	return 0;
3614 }
3615 
3616 static uint32_t
3617 i40e_xstats_calc_num(void)
3618 {
3619 	return I40E_NB_ETH_XSTATS + I40E_NB_MBUF_XSTATS +
3620 		I40E_NB_HW_PORT_XSTATS +
3621 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3622 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3623 }
3624 
3625 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3626 				     struct rte_eth_xstat_name *xstats_names,
3627 				     __rte_unused unsigned limit)
3628 {
3629 	unsigned count = 0;
3630 	unsigned i, prio;
3631 
3632 	if (xstats_names == NULL)
3633 		return i40e_xstats_calc_num();
3634 
3635 	/* Note: limit checked in rte_eth_xstats_names() */
3636 
3637 	/* Get stats from i40e_eth_stats struct */
3638 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3639 		strlcpy(xstats_names[count].name,
3640 			rte_i40e_stats_strings[i].name,
3641 			sizeof(xstats_names[count].name));
3642 		count++;
3643 	}
3644 
3645 	/* Get stats from i40e_mbuf_stats struct */
3646 	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
3647 		strlcpy(xstats_names[count].name,
3648 			i40e_mbuf_strings[i].name,
3649 			sizeof(xstats_names[count].name));
3650 		count++;
3651 	}
3652 
3653 	/* Get individual stats from i40e_hw_port struct */
3654 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3655 		strlcpy(xstats_names[count].name,
3656 			rte_i40e_hw_port_strings[i].name,
3657 			sizeof(xstats_names[count].name));
3658 		count++;
3659 	}
3660 
3661 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3662 		for (prio = 0; prio < 8; prio++) {
3663 			snprintf(xstats_names[count].name,
3664 				 sizeof(xstats_names[count].name),
3665 				 "rx_priority%u_%s", prio,
3666 				 rte_i40e_rxq_prio_strings[i].name);
3667 			count++;
3668 		}
3669 	}
3670 
3671 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3672 		for (prio = 0; prio < 8; prio++) {
3673 			snprintf(xstats_names[count].name,
3674 				 sizeof(xstats_names[count].name),
3675 				 "tx_priority%u_%s", prio,
3676 				 rte_i40e_txq_prio_strings[i].name);
3677 			count++;
3678 		}
3679 	}
3680 	return count;
3681 }
3682 
3683 static void
3684 i40e_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
3685 		struct i40e_mbuf_stats *mbuf_stats)
3686 {
3687 	uint16_t idx;
3688 	struct ci_tx_queue *txq;
3689 
3690 	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
3691 		txq = ethdev->data->tx_queues[idx];
3692 		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
3693 	}
3694 }
3695 
3696 static int
3697 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3698 		    unsigned n)
3699 {
3700 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3701 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3702 	struct i40e_adapter *adapter =
3703 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3704 	struct i40e_mbuf_stats mbuf_stats = {0};
3705 	unsigned i, count, prio;
3706 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3707 
3708 	count = i40e_xstats_calc_num();
3709 	if (n < count)
3710 		return count;
3711 
3712 	i40e_read_stats_registers(pf, hw);
3713 
3714 	if (xstats == NULL)
3715 		return 0;
3716 
3717 	count = 0;
3718 
3719 	if (adapter->mbuf_check)
3720 		i40e_dev_update_mbuf_stats(dev, &mbuf_stats);
3721 
3722 	/* Get stats from i40e_eth_stats struct */
3723 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3724 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3725 			rte_i40e_stats_strings[i].offset);
3726 		xstats[count].id = count;
3727 		count++;
3728 	}
3729 
3730 	/* Get stats from i40e_mbuf_stats struct */
3731 	for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
3732 		xstats[count].value = *(uint64_t *)((char *)&mbuf_stats +
3733 			i40e_mbuf_strings[i].offset);
3734 		xstats[count].id = count;
3735 		count++;
3736 	}
3737 
3738 	/* Get individual stats from i40e_hw_port struct */
3739 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3740 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3741 			rte_i40e_hw_port_strings[i].offset);
3742 		xstats[count].id = count;
3743 		count++;
3744 	}
3745 
3746 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3747 		for (prio = 0; prio < 8; prio++) {
3748 			xstats[count].value =
3749 				*(uint64_t *)(((char *)hw_stats) +
3750 				rte_i40e_rxq_prio_strings[i].offset +
3751 				(sizeof(uint64_t) * prio));
3752 			xstats[count].id = count;
3753 			count++;
3754 		}
3755 	}
3756 
3757 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3758 		for (prio = 0; prio < 8; prio++) {
3759 			xstats[count].value =
3760 				*(uint64_t *)(((char *)hw_stats) +
3761 				rte_i40e_txq_prio_strings[i].offset +
3762 				(sizeof(uint64_t) * prio));
3763 			xstats[count].id = count;
3764 			count++;
3765 		}
3766 	}
3767 
3768 	return count;
3769 }
3770 
3771 static int
3772 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3773 {
3774 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3775 	u32 full_ver;
3776 	u8 ver, patch;
3777 	u16 build;
3778 	int ret;
3779 
3780 	full_ver = hw->nvm.oem_ver;
3781 	ver = (u8)(full_ver >> 24);
3782 	build = (u16)((full_ver >> 8) & 0xffff);
3783 	patch = (u8)(full_ver & 0xff);
3784 
3785 	ret = snprintf(fw_version, fw_size,
3786 		 "%d.%d%d 0x%08x %d.%d.%d",
3787 		 ((hw->nvm.version >> 12) & 0xf),
3788 		 ((hw->nvm.version >> 4) & 0xff),
3789 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3790 		 ver, build, patch);
3791 	if (ret < 0)
3792 		return -EINVAL;
3793 
3794 	ret += 1; /* add the size of '\0' */
3795 	if (fw_size < (size_t)ret)
3796 		return ret;
3797 	else
3798 		return 0;
3799 }
3800 
3801 /*
3802  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3803  * the Rx data path does not hang if the FW LLDP is stopped.
3804  * return true if lldp need to stop
3805  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3806  */
3807 static bool
3808 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3809 {
3810 	double nvm_ver;
3811 	char ver_str[64] = {0};
3812 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3813 
3814 	i40e_fw_version_get(dev, ver_str, 64);
3815 	nvm_ver = atof(ver_str);
3816 	if ((hw->mac.type == I40E_MAC_X722 ||
3817 	     hw->mac.type == I40E_MAC_X722_VF) &&
3818 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3819 		return true;
3820 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3821 		return true;
3822 
3823 	return false;
3824 }
3825 
3826 static int
3827 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3828 {
3829 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3830 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3831 	struct i40e_vsi *vsi = pf->main_vsi;
3832 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3833 
3834 	dev_info->max_rx_queues = vsi->nb_qps;
3835 	dev_info->max_tx_queues = vsi->nb_qps;
3836 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3837 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3838 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3839 	dev_info->max_vfs = pci_dev->max_vfs;
3840 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3841 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3842 	dev_info->rx_queue_offload_capa = 0;
3843 	dev_info->rx_offload_capa =
3844 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
3845 		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
3846 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
3847 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
3848 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3849 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3850 		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
3851 		RTE_ETH_RX_OFFLOAD_SCATTER |
3852 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
3853 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
3854 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
3855 
3856 	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3857 	dev_info->tx_offload_capa =
3858 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
3859 		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
3860 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
3861 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3862 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
3863 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
3864 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3865 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
3866 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
3867 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
3868 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
3869 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
3870 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3871 		dev_info->tx_queue_offload_capa;
3872 	if (hw->mac.type == I40E_MAC_X722) {
3873 		dev_info->tx_offload_capa |=
3874 			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
3875 	}
3876 
3877 	dev_info->dev_capa =
3878 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3879 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3880 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
3881 
3882 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3883 						sizeof(uint32_t);
3884 	dev_info->reta_size = pf->hash_lut_size;
3885 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3886 
3887 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3888 		.rx_thresh = {
3889 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3890 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3891 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3892 		},
3893 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3894 		.rx_drop_en = 0,
3895 		.offloads = 0,
3896 	};
3897 
3898 	dev_info->default_txconf = (struct rte_eth_txconf) {
3899 		.tx_thresh = {
3900 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3901 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3902 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3903 		},
3904 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3905 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3906 		.offloads = 0,
3907 	};
3908 
3909 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3910 		.nb_max = I40E_MAX_RING_DESC,
3911 		.nb_min = I40E_MIN_RING_DESC,
3912 		.nb_align = I40E_ALIGN_RING_DESC,
3913 	};
3914 
3915 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3916 		.nb_max = I40E_MAX_RING_DESC,
3917 		.nb_min = I40E_MIN_RING_DESC,
3918 		.nb_align = I40E_ALIGN_RING_DESC,
3919 		.nb_seg_max = I40E_TX_MAX_SEG,
3920 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3921 	};
3922 
3923 	if (pf->flags & I40E_FLAG_VMDQ) {
3924 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3925 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3926 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3927 						pf->max_nb_vmdq_vsi;
3928 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3929 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3930 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3931 	}
3932 
3933 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3934 		/* For XL710 */
3935 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
3936 		dev_info->default_rxportconf.nb_queues = 2;
3937 		dev_info->default_txportconf.nb_queues = 2;
3938 		if (dev->data->nb_rx_queues == 1)
3939 			dev_info->default_rxportconf.ring_size = 2048;
3940 		else
3941 			dev_info->default_rxportconf.ring_size = 1024;
3942 		if (dev->data->nb_tx_queues == 1)
3943 			dev_info->default_txportconf.ring_size = 1024;
3944 		else
3945 			dev_info->default_txportconf.ring_size = 512;
3946 
3947 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3948 		/* For XXV710 */
3949 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
3950 		dev_info->default_rxportconf.nb_queues = 1;
3951 		dev_info->default_txportconf.nb_queues = 1;
3952 		dev_info->default_rxportconf.ring_size = 256;
3953 		dev_info->default_txportconf.ring_size = 256;
3954 	} else {
3955 		/* For X710 */
3956 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
3957 		dev_info->default_rxportconf.nb_queues = 1;
3958 		dev_info->default_txportconf.nb_queues = 1;
3959 		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
3960 			dev_info->default_rxportconf.ring_size = 512;
3961 			dev_info->default_txportconf.ring_size = 256;
3962 		} else {
3963 			dev_info->default_rxportconf.ring_size = 256;
3964 			dev_info->default_txportconf.ring_size = 256;
3965 		}
3966 	}
3967 	dev_info->default_rxportconf.burst_size = 32;
3968 	dev_info->default_txportconf.burst_size = 32;
3969 
3970 	return 0;
3971 }
3972 
3973 static int
3974 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3975 {
3976 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3977 	struct i40e_vsi *vsi = pf->main_vsi;
3978 	PMD_INIT_FUNC_TRACE();
3979 
3980 	if (on)
3981 		return i40e_vsi_add_vlan(vsi, vlan_id);
3982 	else
3983 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3984 }
3985 
3986 static int
3987 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3988 				enum rte_vlan_type vlan_type,
3989 				uint16_t tpid, int qinq)
3990 {
3991 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3992 	uint64_t reg_r = 0;
3993 	uint64_t reg_w = 0;
3994 	uint16_t reg_id = 3;
3995 	int ret;
3996 
3997 	if (qinq) {
3998 		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
3999 			reg_id = 2;
4000 	}
4001 
4002 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
4003 					  &reg_r, NULL);
4004 	if (ret != I40E_SUCCESS) {
4005 		PMD_DRV_LOG(ERR,
4006 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
4007 			   reg_id);
4008 		return -EIO;
4009 	}
4010 	PMD_DRV_LOG(DEBUG,
4011 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
4012 		    reg_id, reg_r);
4013 
4014 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
4015 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
4016 	if (reg_r == reg_w) {
4017 		PMD_DRV_LOG(DEBUG, "No need to write");
4018 		return 0;
4019 	}
4020 
4021 	ret = i40e_aq_debug_write_global_register(hw,
4022 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
4023 					   reg_w, NULL);
4024 	if (ret != I40E_SUCCESS) {
4025 		PMD_DRV_LOG(ERR,
4026 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
4027 			    reg_id);
4028 		return -EIO;
4029 	}
4030 	PMD_DRV_LOG(DEBUG,
4031 		    "Global register 0x%08x is changed with value 0x%08x",
4032 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
4033 
4034 	return 0;
4035 }
4036 
4037 static int
4038 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
4039 		   enum rte_vlan_type vlan_type,
4040 		   uint16_t tpid)
4041 {
4042 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4043 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4044 	int qinq = dev->data->dev_conf.rxmode.offloads &
4045 		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4046 	u16 sw_flags = 0, valid_flags = 0;
4047 	int ret = 0;
4048 
4049 	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
4050 	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
4051 	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
4052 		PMD_DRV_LOG(ERR,
4053 			    "Unsupported vlan type.");
4054 		return -EINVAL;
4055 	}
4056 
4057 	if (pf->support_multi_driver) {
4058 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
4059 		return -ENOTSUP;
4060 	}
4061 
4062 	/* 802.1ad frames ability is added in NVM API 1.7*/
4063 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
4064 		if (qinq) {
4065 			if (pf->fw8_3gt) {
4066 				sw_flags = I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN;
4067 				valid_flags = I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN;
4068 			}
4069 			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
4070 				hw->first_tag = rte_cpu_to_le_16(tpid);
4071 			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
4072 				hw->second_tag = rte_cpu_to_le_16(tpid);
4073 		} else {
4074 			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
4075 				hw->second_tag = rte_cpu_to_le_16(tpid);
4076 		}
4077 		ret = i40e_aq_set_switch_config(hw, sw_flags,
4078 						valid_flags, 0, NULL);
4079 		if (ret != I40E_SUCCESS) {
4080 			PMD_DRV_LOG(ERR,
4081 				    "Set switch config failed aq_err: %d",
4082 				    hw->aq.asq_last_status);
4083 			ret = -EIO;
4084 		}
4085 	} else
4086 		/* If NVM API < 1.7, keep the register setting */
4087 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
4088 						      tpid, qinq);
4089 
4090 	return ret;
4091 }
4092 
4093 /* Configure outer vlan stripping on or off in QinQ mode */
4094 static int
4095 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
4096 {
4097 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4098 	int ret = I40E_SUCCESS;
4099 	uint32_t reg;
4100 
4101 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4102 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4103 		return -EINVAL;
4104 	}
4105 
4106 	/* Configure for outer VLAN RX stripping */
4107 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4108 
4109 	if (on)
4110 		reg |= I40E_VSI_TSR_QINQ_STRIP;
4111 	else
4112 		reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4113 
4114 	ret = i40e_aq_debug_write_register(hw,
4115 						   I40E_VSI_TSR(vsi->vsi_id),
4116 						   reg, NULL);
4117 	if (ret < 0) {
4118 		PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4119 				    vsi->vsi_id);
4120 		return I40E_ERR_CONFIG;
4121 	}
4122 
4123 	return ret;
4124 }
4125 
4126 static int
4127 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4128 {
4129 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4130 	struct i40e_mac_filter_info *mac_filter;
4131 	struct i40e_vsi *vsi = pf->main_vsi;
4132 	struct rte_eth_rxmode *rxmode;
4133 	struct i40e_mac_filter *f;
4134 	int i, num;
4135 	void *temp;
4136 	int ret;
4137 
4138 	rxmode = &dev->data->dev_conf.rxmode;
4139 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
4140 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4141 			i40e_vsi_config_vlan_filter(vsi, TRUE);
4142 		else
4143 			i40e_vsi_config_vlan_filter(vsi, FALSE);
4144 	}
4145 
4146 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
4147 		/* Enable or disable VLAN stripping */
4148 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4149 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
4150 		else
4151 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
4152 	}
4153 
4154 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
4155 		i = 0;
4156 		num = vsi->mac_num;
4157 		mac_filter = rte_zmalloc("mac_filter_info_data",
4158 				 num * sizeof(*mac_filter), 0);
4159 		if (mac_filter == NULL) {
4160 			PMD_DRV_LOG(ERR, "failed to allocate memory");
4161 			return I40E_ERR_NO_MEMORY;
4162 		}
4163 
4164 		/*
4165 		 * Outer VLAN processing is supported after firmware v8.4, kernel driver
4166 		 * also change the default behavior to support this feature. To align with
4167 		 * kernel driver, set switch config in 'i40e_vlan_tpie_set' to support for
4168 		 * outer VLAN processing. But it is forbidden for firmware to change the
4169 		 * Inner/Outer VLAN configuration while there are MAC/VLAN filters in the
4170 		 * switch table. Therefore, we need to clear the MAC table before setting
4171 		 * config, and then restore the MAC table after setting. This feature is
4172 		 * recommended to be used in firmware v8.6.
4173 		 */
4174 		/* Remove all existing mac */
4175 		RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
4176 			mac_filter[i] = f->mac_info;
4177 			ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
4178 			if (ret)
4179 				PMD_DRV_LOG(ERR, "i40e vsi delete mac fail.");
4180 			i++;
4181 		}
4182 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
4183 			i40e_vsi_config_double_vlan(vsi, TRUE);
4184 			/* Set global registers with default ethertype. */
4185 			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
4186 					   RTE_ETHER_TYPE_VLAN);
4187 			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
4188 					   RTE_ETHER_TYPE_VLAN);
4189 		} else {
4190 			i40e_vsi_config_double_vlan(vsi, FALSE);
4191 		}
4192 		/* Restore all mac */
4193 		for (i = 0; i < num; i++) {
4194 			ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
4195 			if (ret)
4196 				PMD_DRV_LOG(ERR, "i40e vsi add mac fail.");
4197 		}
4198 		rte_free(mac_filter);
4199 	}
4200 
4201 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
4202 		/* Enable or disable outer VLAN stripping */
4203 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4204 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4205 		else
4206 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4207 	}
4208 
4209 	return 0;
4210 }
4211 
4212 static void
4213 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4214 			  __rte_unused uint16_t queue,
4215 			  __rte_unused int on)
4216 {
4217 	PMD_INIT_FUNC_TRACE();
4218 }
4219 
4220 static int
4221 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4222 {
4223 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4224 	struct i40e_vsi *vsi = pf->main_vsi;
4225 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4226 	struct i40e_vsi_vlan_pvid_info info;
4227 
4228 	memset(&info, 0, sizeof(info));
4229 	info.on = on;
4230 	if (info.on)
4231 		info.config.pvid = pvid;
4232 	else {
4233 		info.config.reject.tagged =
4234 				data->dev_conf.txmode.hw_vlan_reject_tagged;
4235 		info.config.reject.untagged =
4236 				data->dev_conf.txmode.hw_vlan_reject_untagged;
4237 	}
4238 
4239 	return i40e_vsi_vlan_pvid_set(vsi, &info);
4240 }
4241 
4242 static int
4243 i40e_dev_led_on(struct rte_eth_dev *dev)
4244 {
4245 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4246 	uint32_t mode = i40e_led_get(hw);
4247 
4248 	if (mode == 0)
4249 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4250 
4251 	return 0;
4252 }
4253 
4254 static int
4255 i40e_dev_led_off(struct rte_eth_dev *dev)
4256 {
4257 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4258 	uint32_t mode = i40e_led_get(hw);
4259 
4260 	if (mode != 0)
4261 		i40e_led_set(hw, 0, false);
4262 
4263 	return 0;
4264 }
4265 
4266 static int
4267 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4268 {
4269 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4270 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4271 
4272 	fc_conf->pause_time = pf->fc_conf.pause_time;
4273 
4274 	/* read out from register, in case they are modified by other port */
4275 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4276 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4277 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4278 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4279 
4280 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4281 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4282 
4283 	 /* Return current mode according to actual setting*/
4284 	switch (hw->fc.current_mode) {
4285 	case I40E_FC_FULL:
4286 		fc_conf->mode = RTE_ETH_FC_FULL;
4287 		break;
4288 	case I40E_FC_TX_PAUSE:
4289 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
4290 		break;
4291 	case I40E_FC_RX_PAUSE:
4292 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
4293 		break;
4294 	case I40E_FC_NONE:
4295 	default:
4296 		fc_conf->mode = RTE_ETH_FC_NONE;
4297 	};
4298 
4299 	return 0;
4300 }
4301 
4302 static int
4303 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4304 {
4305 	uint32_t mflcn_reg, fctrl_reg, reg;
4306 	uint32_t max_high_water;
4307 	uint8_t i, aq_failure;
4308 	int err;
4309 	struct i40e_hw *hw;
4310 	struct i40e_pf *pf;
4311 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4312 		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
4313 		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4314 		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4315 		[RTE_ETH_FC_FULL] = I40E_FC_FULL
4316 	};
4317 
4318 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4319 
4320 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4321 	if ((fc_conf->high_water > max_high_water) ||
4322 			(fc_conf->high_water < fc_conf->low_water)) {
4323 		PMD_INIT_LOG(ERR,
4324 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
4325 			max_high_water);
4326 		return -EINVAL;
4327 	}
4328 
4329 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4330 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4331 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4332 
4333 	pf->fc_conf.pause_time = fc_conf->pause_time;
4334 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4335 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4336 
4337 	PMD_INIT_FUNC_TRACE();
4338 
4339 	/* All the link flow control related enable/disable register
4340 	 * configuration is handle by the F/W
4341 	 */
4342 	err = i40e_set_fc(hw, &aq_failure, true);
4343 	if (err < 0)
4344 		return -ENOSYS;
4345 
4346 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4347 		/* Configure flow control refresh threshold,
4348 		 * the value for stat_tx_pause_refresh_timer[8]
4349 		 * is used for global pause operation.
4350 		 */
4351 
4352 		I40E_WRITE_REG(hw,
4353 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4354 			       pf->fc_conf.pause_time);
4355 
4356 		/* configure the timer value included in transmitted pause
4357 		 * frame,
4358 		 * the value for stat_tx_pause_quanta[8] is used for global
4359 		 * pause operation
4360 		 */
4361 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4362 			       pf->fc_conf.pause_time);
4363 
4364 		fctrl_reg = I40E_READ_REG(hw,
4365 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4366 
4367 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4368 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4369 		else
4370 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4371 
4372 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4373 			       fctrl_reg);
4374 	} else {
4375 		/* Configure pause time (2 TCs per register) */
4376 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4377 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4378 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4379 
4380 		/* Configure flow control refresh threshold value */
4381 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4382 			       pf->fc_conf.pause_time / 2);
4383 
4384 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4385 
4386 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
4387 		 *depending on configuration
4388 		 */
4389 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
4390 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4391 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4392 		} else {
4393 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4394 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4395 		}
4396 
4397 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4398 	}
4399 
4400 	if (!pf->support_multi_driver) {
4401 		/* config water marker both based on the packets and bytes */
4402 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4403 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4404 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4405 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4406 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4407 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4408 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4409 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4410 				  << I40E_KILOSHIFT);
4411 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4412 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4413 				   << I40E_KILOSHIFT);
4414 	} else {
4415 		PMD_DRV_LOG(ERR,
4416 			    "Water marker configuration is not supported.");
4417 	}
4418 
4419 	I40E_WRITE_FLUSH(hw);
4420 
4421 	return 0;
4422 }
4423 
4424 static int
4425 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4426 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4427 {
4428 	PMD_INIT_FUNC_TRACE();
4429 
4430 	return -ENOSYS;
4431 }
4432 
4433 /* Add a MAC address, and update filters */
4434 static int
4435 i40e_macaddr_add(struct rte_eth_dev *dev,
4436 		 struct rte_ether_addr *mac_addr,
4437 		 __rte_unused uint32_t index,
4438 		 uint32_t pool)
4439 {
4440 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4441 	struct i40e_mac_filter_info mac_filter;
4442 	struct i40e_vsi *vsi;
4443 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4444 	int ret;
4445 
4446 	/* If VMDQ not enabled or configured, return */
4447 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4448 			  !pf->nb_cfg_vmdq_vsi)) {
4449 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4450 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4451 			pool);
4452 		return -ENOTSUP;
4453 	}
4454 
4455 	if (pool > pf->nb_cfg_vmdq_vsi) {
4456 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4457 				pool, pf->nb_cfg_vmdq_vsi);
4458 		return -EINVAL;
4459 	}
4460 
4461 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4462 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4463 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4464 	else
4465 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4466 
4467 	if (pool == 0)
4468 		vsi = pf->main_vsi;
4469 	else
4470 		vsi = pf->vmdq[pool - 1].vsi;
4471 
4472 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4473 	if (ret != I40E_SUCCESS) {
4474 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4475 		return -ENODEV;
4476 	}
4477 	return 0;
4478 }
4479 
4480 /* Remove a MAC address, and update filters */
4481 static void
4482 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4483 {
4484 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4485 	struct i40e_vsi *vsi;
4486 	struct rte_eth_dev_data *data = dev->data;
4487 	struct rte_ether_addr *macaddr;
4488 	int ret;
4489 	uint32_t i;
4490 	uint64_t pool_sel;
4491 
4492 	macaddr = &(data->mac_addrs[index]);
4493 
4494 	pool_sel = dev->data->mac_pool_sel[index];
4495 
4496 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4497 		if (pool_sel & (1ULL << i)) {
4498 			if (i == 0)
4499 				vsi = pf->main_vsi;
4500 			else {
4501 				/* No VMDQ pool enabled or configured */
4502 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4503 					(i > pf->nb_cfg_vmdq_vsi)) {
4504 					PMD_DRV_LOG(ERR,
4505 						"No VMDQ pool enabled/configured");
4506 					return;
4507 				}
4508 				vsi = pf->vmdq[i - 1].vsi;
4509 			}
4510 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4511 
4512 			if (ret) {
4513 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4514 				return;
4515 			}
4516 		}
4517 	}
4518 }
4519 
4520 static int
4521 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4522 {
4523 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4524 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4525 	uint32_t reg;
4526 	int ret;
4527 
4528 	if (!lut)
4529 		return -EINVAL;
4530 
4531 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4532 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4533 					  vsi->type != I40E_VSI_SRIOV,
4534 					  lut, lut_size);
4535 		if (ret) {
4536 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4537 			return ret;
4538 		}
4539 	} else {
4540 		uint32_t *lut_dw = (uint32_t *)lut;
4541 		uint16_t i, lut_size_dw = lut_size / 4;
4542 
4543 		if (vsi->type == I40E_VSI_SRIOV) {
4544 			for (i = 0; i <= lut_size_dw; i++) {
4545 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4546 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4547 			}
4548 		} else {
4549 			for (i = 0; i < lut_size_dw; i++)
4550 				lut_dw[i] = I40E_READ_REG(hw,
4551 							  I40E_PFQF_HLUT(i));
4552 		}
4553 	}
4554 
4555 	return 0;
4556 }
4557 
4558 int
4559 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4560 {
4561 	struct i40e_pf *pf;
4562 	struct i40e_hw *hw;
4563 
4564 	if (!vsi || !lut)
4565 		return -EINVAL;
4566 
4567 	pf = I40E_VSI_TO_PF(vsi);
4568 	hw = I40E_VSI_TO_HW(vsi);
4569 
4570 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4571 		enum i40e_status_code status;
4572 
4573 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4574 					     vsi->type != I40E_VSI_SRIOV,
4575 					     lut, lut_size);
4576 		if (status) {
4577 			PMD_DRV_LOG(ERR,
4578 				    "Failed to update RSS lookup table, error status: %d",
4579 				    status);
4580 			return -EIO;
4581 		}
4582 	} else {
4583 		uint32_t *lut_dw = (uint32_t *)lut;
4584 		uint16_t i, lut_size_dw = lut_size / 4;
4585 
4586 		if (vsi->type == I40E_VSI_SRIOV) {
4587 			for (i = 0; i < lut_size_dw; i++)
4588 				I40E_WRITE_REG(
4589 					hw,
4590 					I40E_VFQF_HLUT1(i, vsi->user_param),
4591 					lut_dw[i]);
4592 		} else {
4593 			for (i = 0; i < lut_size_dw; i++)
4594 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4595 					       lut_dw[i]);
4596 		}
4597 		I40E_WRITE_FLUSH(hw);
4598 	}
4599 
4600 	return 0;
4601 }
4602 
4603 static int
4604 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4605 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4606 			 uint16_t reta_size)
4607 {
4608 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4609 	uint16_t i, lut_size = pf->hash_lut_size;
4610 	uint16_t idx, shift;
4611 	uint8_t *lut;
4612 	int ret;
4613 
4614 	if (reta_size != lut_size ||
4615 		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
4616 		PMD_DRV_LOG(ERR,
4617 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4618 			reta_size, lut_size);
4619 		return -EINVAL;
4620 	}
4621 
4622 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4623 	if (!lut) {
4624 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4625 		return -ENOMEM;
4626 	}
4627 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4628 	if (ret)
4629 		goto out;
4630 	for (i = 0; i < reta_size; i++) {
4631 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4632 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4633 		if (reta_conf[idx].mask & (1ULL << shift))
4634 			lut[i] = reta_conf[idx].reta[shift];
4635 	}
4636 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4637 
4638 	pf->adapter->rss_reta_updated = 1;
4639 
4640 out:
4641 	rte_free(lut);
4642 
4643 	return ret;
4644 }
4645 
4646 static int
4647 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4648 			struct rte_eth_rss_reta_entry64 *reta_conf,
4649 			uint16_t reta_size)
4650 {
4651 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4652 	uint16_t i, lut_size = pf->hash_lut_size;
4653 	uint16_t idx, shift;
4654 	uint8_t *lut;
4655 	int ret;
4656 
4657 	if (reta_size != lut_size ||
4658 		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
4659 		PMD_DRV_LOG(ERR,
4660 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4661 			reta_size, lut_size);
4662 		return -EINVAL;
4663 	}
4664 
4665 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4666 	if (!lut) {
4667 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4668 		return -ENOMEM;
4669 	}
4670 
4671 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4672 	if (ret)
4673 		goto out;
4674 	for (i = 0; i < reta_size; i++) {
4675 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4676 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4677 		if (reta_conf[idx].mask & (1ULL << shift))
4678 			reta_conf[idx].reta[shift] = lut[i];
4679 	}
4680 
4681 out:
4682 	rte_free(lut);
4683 
4684 	return ret;
4685 }
4686 
4687 /**
4688  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4689  * @hw:   pointer to the HW structure
4690  * @mem:  pointer to mem struct to fill out
4691  * @size: size of memory requested
4692  * @alignment: what to align the allocation to
4693  **/
4694 enum i40e_status_code
4695 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4696 			struct i40e_dma_mem *mem,
4697 			u64 size,
4698 			u32 alignment)
4699 {
4700 	static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
4701 	const struct rte_memzone *mz = NULL;
4702 	char z_name[RTE_MEMZONE_NAMESIZE];
4703 
4704 	if (!mem)
4705 		return I40E_ERR_PARAM;
4706 
4707 	snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4708 		rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
4709 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4710 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4711 	if (!mz)
4712 		return I40E_ERR_NO_MEMORY;
4713 
4714 	mem->size = size;
4715 	mem->va = mz->addr;
4716 	mem->pa = mz->iova;
4717 	mem->zone = (const void *)mz;
4718 	PMD_DRV_LOG(DEBUG,
4719 		"memzone %s allocated with physical address: %"PRIu64,
4720 		mz->name, mem->pa);
4721 
4722 	return I40E_SUCCESS;
4723 }
4724 
4725 /**
4726  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4727  * @hw:   pointer to the HW structure
4728  * @mem:  ptr to mem struct to free
4729  **/
4730 enum i40e_status_code
4731 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4732 		    struct i40e_dma_mem *mem)
4733 {
4734 	if (!mem)
4735 		return I40E_ERR_PARAM;
4736 
4737 	PMD_DRV_LOG(DEBUG,
4738 		"memzone %s to be freed with physical address: %"PRIu64,
4739 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4740 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4741 	mem->zone = NULL;
4742 	mem->va = NULL;
4743 	mem->pa = (u64)0;
4744 
4745 	return I40E_SUCCESS;
4746 }
4747 
4748 /**
4749  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4750  * @hw:   pointer to the HW structure
4751  * @mem:  pointer to mem struct to fill out
4752  * @size: size of memory requested
4753  **/
4754 enum i40e_status_code
4755 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4756 			 struct i40e_virt_mem *mem,
4757 			 u32 size)
4758 {
4759 	if (!mem)
4760 		return I40E_ERR_PARAM;
4761 
4762 	mem->size = size;
4763 	mem->va = rte_zmalloc("i40e", size, 0);
4764 
4765 	if (mem->va)
4766 		return I40E_SUCCESS;
4767 	else
4768 		return I40E_ERR_NO_MEMORY;
4769 }
4770 
4771 /**
4772  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4773  * @hw:   pointer to the HW structure
4774  * @mem:  pointer to mem struct to free
4775  **/
4776 enum i40e_status_code
4777 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4778 		     struct i40e_virt_mem *mem)
4779 {
4780 	if (!mem)
4781 		return I40E_ERR_PARAM;
4782 
4783 	rte_free(mem->va);
4784 	mem->va = NULL;
4785 
4786 	return I40E_SUCCESS;
4787 }
4788 
4789 /**
4790  * Get the hardware capabilities, which will be parsed
4791  * and saved into struct i40e_hw.
4792  */
4793 static int
4794 i40e_get_cap(struct i40e_hw *hw)
4795 {
4796 	struct i40e_aqc_list_capabilities_element_resp *buf;
4797 	uint16_t len, size = 0;
4798 	int ret;
4799 
4800 	/* Calculate a huge enough buff for saving response data temporarily */
4801 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4802 						I40E_MAX_CAP_ELE_NUM;
4803 	buf = rte_zmalloc("i40e", len, 0);
4804 	if (!buf) {
4805 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4806 		return I40E_ERR_NO_MEMORY;
4807 	}
4808 
4809 	/* Get, parse the capabilities and save it to hw */
4810 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4811 			i40e_aqc_opc_list_func_capabilities, NULL);
4812 	if (ret != I40E_SUCCESS)
4813 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4814 
4815 	/* Free the temporary buffer after being used */
4816 	rte_free(buf);
4817 
4818 	return ret;
4819 }
4820 
4821 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4822 
4823 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4824 		const char *value,
4825 		void *opaque)
4826 {
4827 	struct i40e_pf *pf;
4828 	unsigned long num;
4829 	char *end;
4830 
4831 	pf = (struct i40e_pf *)opaque;
4832 	RTE_SET_USED(key);
4833 
4834 	errno = 0;
4835 	num = strtoul(value, &end, 0);
4836 	if (errno != 0 || end == value || *end != 0) {
4837 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4838 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4839 		return -(EINVAL);
4840 	}
4841 
4842 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4843 		pf->vf_nb_qp_max = (uint16_t)num;
4844 	else
4845 		/* here return 0 to make next valid same argument work */
4846 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4847 			    "power of 2 and equal or less than 16 !, Now it is "
4848 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4849 
4850 	return 0;
4851 }
4852 
4853 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4854 {
4855 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4856 	struct rte_kvargs *kvlist;
4857 	int kvargs_count;
4858 
4859 	/* set default queue number per VF as 4 */
4860 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4861 
4862 	if (dev->device->devargs == NULL)
4863 		return 0;
4864 
4865 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4866 	if (kvlist == NULL)
4867 		return -(EINVAL);
4868 
4869 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4870 	if (!kvargs_count) {
4871 		rte_kvargs_free(kvlist);
4872 		return 0;
4873 	}
4874 
4875 	if (kvargs_count > 1)
4876 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4877 			    "the first invalid or last valid one is used !",
4878 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4879 
4880 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4881 			   i40e_pf_parse_vf_queue_number_handler, pf);
4882 
4883 	rte_kvargs_free(kvlist);
4884 
4885 	return 0;
4886 }
4887 
4888 static int
4889 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4890 {
4891 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4892 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4893 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4894 	uint16_t qp_count = 0, vsi_count = 0;
4895 
4896 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4897 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4898 		return -EINVAL;
4899 	}
4900 
4901 	i40e_pf_config_vf_rxq_number(dev);
4902 
4903 	/* Add the parameter init for LFC */
4904 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4905 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4906 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4907 
4908 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4909 	pf->max_num_vsi = hw->func_caps.num_vsis;
4910 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4911 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4912 
4913 	/* FDir queue/VSI allocation */
4914 	pf->fdir_qp_offset = 0;
4915 	if (hw->func_caps.fd) {
4916 		pf->flags |= I40E_FLAG_FDIR;
4917 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4918 	} else {
4919 		pf->fdir_nb_qps = 0;
4920 	}
4921 	qp_count += pf->fdir_nb_qps;
4922 	vsi_count += 1;
4923 
4924 	/* LAN queue/VSI allocation */
4925 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4926 	if (!hw->func_caps.rss) {
4927 		pf->lan_nb_qps = 1;
4928 	} else {
4929 		pf->flags |= I40E_FLAG_RSS;
4930 		if (hw->mac.type == I40E_MAC_X722)
4931 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4932 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4933 	}
4934 	qp_count += pf->lan_nb_qps;
4935 	vsi_count += 1;
4936 
4937 	/* VF queue/VSI allocation */
4938 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4939 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4940 		pf->flags |= I40E_FLAG_SRIOV;
4941 		pf->vf_nb_qps = pf->vf_nb_qp_max;
4942 		pf->vf_num = pci_dev->max_vfs;
4943 		PMD_DRV_LOG(DEBUG,
4944 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
4945 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4946 	} else {
4947 		pf->vf_nb_qps = 0;
4948 		pf->vf_num = 0;
4949 	}
4950 	qp_count += pf->vf_nb_qps * pf->vf_num;
4951 	vsi_count += pf->vf_num;
4952 
4953 	/* VMDq queue/VSI allocation */
4954 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4955 	pf->vmdq_nb_qps = 0;
4956 	pf->max_nb_vmdq_vsi = 0;
4957 	if (hw->func_caps.vmdq) {
4958 		if (qp_count < hw->func_caps.num_tx_qp &&
4959 			vsi_count < hw->func_caps.num_vsis) {
4960 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4961 				qp_count) / pf->vmdq_nb_qp_max;
4962 
4963 			/* Limit the maximum number of VMDq vsi to the maximum
4964 			 * ethdev can support
4965 			 */
4966 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4967 				hw->func_caps.num_vsis - vsi_count);
4968 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4969 				RTE_ETH_64_POOLS);
4970 			if (pf->max_nb_vmdq_vsi) {
4971 				pf->flags |= I40E_FLAG_VMDQ;
4972 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4973 				PMD_DRV_LOG(DEBUG,
4974 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4975 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4976 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4977 			} else {
4978 				PMD_DRV_LOG(INFO,
4979 					"No enough queues left for VMDq");
4980 			}
4981 		} else {
4982 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4983 		}
4984 	}
4985 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4986 	vsi_count += pf->max_nb_vmdq_vsi;
4987 
4988 	if (hw->func_caps.dcb)
4989 		pf->flags |= I40E_FLAG_DCB;
4990 
4991 	if (qp_count > hw->func_caps.num_tx_qp) {
4992 		PMD_DRV_LOG(ERR,
4993 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
4994 			qp_count, hw->func_caps.num_tx_qp);
4995 		return -EINVAL;
4996 	}
4997 	if (vsi_count > hw->func_caps.num_vsis) {
4998 		PMD_DRV_LOG(ERR,
4999 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
5000 			vsi_count, hw->func_caps.num_vsis);
5001 		return -EINVAL;
5002 	}
5003 
5004 	/**
5005 	 * Enable outer VLAN processing if firmware version is greater
5006 	 * than v8.3
5007 	 */
5008 	if (hw->aq.fw_maj_ver > 8 ||
5009 	    (hw->aq.fw_maj_ver == 8 && hw->aq.fw_min_ver > 3)) {
5010 		pf->fw8_3gt = true;
5011 	} else {
5012 		pf->fw8_3gt = false;
5013 	}
5014 
5015 	return 0;
5016 }
5017 
5018 static int
5019 i40e_pf_get_switch_config(struct i40e_pf *pf)
5020 {
5021 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5022 	struct i40e_aqc_get_switch_config_resp *switch_config;
5023 	struct i40e_aqc_switch_config_element_resp *element;
5024 	uint16_t start_seid = 0, num_reported;
5025 	int ret;
5026 
5027 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
5028 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
5029 	if (!switch_config) {
5030 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
5031 		return -ENOMEM;
5032 	}
5033 
5034 	/* Get the switch configurations */
5035 	ret = i40e_aq_get_switch_config(hw, switch_config,
5036 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
5037 	if (ret != I40E_SUCCESS) {
5038 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
5039 		goto fail;
5040 	}
5041 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
5042 	if (num_reported != 1) { /* The number should be 1 */
5043 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
5044 		goto fail;
5045 	}
5046 
5047 	/* Parse the switch configuration elements */
5048 	element = &(switch_config->element[0]);
5049 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
5050 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
5051 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
5052 	} else
5053 		PMD_DRV_LOG(INFO, "Unknown element type");
5054 
5055 fail:
5056 	rte_free(switch_config);
5057 
5058 	return ret;
5059 }
5060 
5061 static int
5062 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
5063 			uint32_t num)
5064 {
5065 	struct pool_entry *entry;
5066 
5067 	if (pool == NULL || num == 0)
5068 		return -EINVAL;
5069 
5070 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
5071 	if (entry == NULL) {
5072 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
5073 		return -ENOMEM;
5074 	}
5075 
5076 	/* queue heap initialize */
5077 	pool->num_free = num;
5078 	pool->num_alloc = 0;
5079 	pool->base = base;
5080 	LIST_INIT(&pool->alloc_list);
5081 	LIST_INIT(&pool->free_list);
5082 
5083 	/* Initialize element  */
5084 	entry->base = 0;
5085 	entry->len = num;
5086 
5087 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
5088 	return 0;
5089 }
5090 
5091 static void
5092 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
5093 {
5094 	struct pool_entry *entry, *next_entry;
5095 
5096 	if (pool == NULL)
5097 		return;
5098 
5099 	for (entry = LIST_FIRST(&pool->alloc_list);
5100 			entry && (next_entry = LIST_NEXT(entry, next), 1);
5101 			entry = next_entry) {
5102 		LIST_REMOVE(entry, next);
5103 		rte_free(entry);
5104 	}
5105 
5106 	for (entry = LIST_FIRST(&pool->free_list);
5107 			entry && (next_entry = LIST_NEXT(entry, next), 1);
5108 			entry = next_entry) {
5109 		LIST_REMOVE(entry, next);
5110 		rte_free(entry);
5111 	}
5112 
5113 	pool->num_free = 0;
5114 	pool->num_alloc = 0;
5115 	pool->base = 0;
5116 	LIST_INIT(&pool->alloc_list);
5117 	LIST_INIT(&pool->free_list);
5118 }
5119 
5120 static int
5121 i40e_res_pool_free(struct i40e_res_pool_info *pool,
5122 		       uint32_t base)
5123 {
5124 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
5125 	uint32_t pool_offset;
5126 	uint16_t len;
5127 	int insert;
5128 
5129 	if (pool == NULL) {
5130 		PMD_DRV_LOG(ERR, "Invalid parameter");
5131 		return -EINVAL;
5132 	}
5133 
5134 	pool_offset = base - pool->base;
5135 	/* Lookup in alloc list */
5136 	LIST_FOREACH(entry, &pool->alloc_list, next) {
5137 		if (entry->base == pool_offset) {
5138 			valid_entry = entry;
5139 			LIST_REMOVE(entry, next);
5140 			break;
5141 		}
5142 	}
5143 
5144 	/* Not find, return */
5145 	if (valid_entry == NULL) {
5146 		PMD_DRV_LOG(ERR, "Failed to find entry");
5147 		return -EINVAL;
5148 	}
5149 
5150 	/**
5151 	 * Found it, move it to free list  and try to merge.
5152 	 * In order to make merge easier, always sort it by qbase.
5153 	 * Find adjacent prev and last entries.
5154 	 */
5155 	prev = next = NULL;
5156 	LIST_FOREACH(entry, &pool->free_list, next) {
5157 		if (entry->base > valid_entry->base) {
5158 			next = entry;
5159 			break;
5160 		}
5161 		prev = entry;
5162 	}
5163 
5164 	insert = 0;
5165 	len = valid_entry->len;
5166 	/* Try to merge with next one*/
5167 	if (next != NULL) {
5168 		/* Merge with next one */
5169 		if (valid_entry->base + len == next->base) {
5170 			next->base = valid_entry->base;
5171 			next->len += len;
5172 			rte_free(valid_entry);
5173 			valid_entry = next;
5174 			insert = 1;
5175 		}
5176 	}
5177 
5178 	if (prev != NULL) {
5179 		/* Merge with previous one */
5180 		if (prev->base + prev->len == valid_entry->base) {
5181 			prev->len += len;
5182 			/* If it merge with next one, remove next node */
5183 			if (insert == 1) {
5184 				LIST_REMOVE(valid_entry, next);
5185 				rte_free(valid_entry);
5186 				valid_entry = NULL;
5187 			} else {
5188 				rte_free(valid_entry);
5189 				valid_entry = NULL;
5190 				insert = 1;
5191 			}
5192 		}
5193 	}
5194 
5195 	/* Not find any entry to merge, insert */
5196 	if (insert == 0) {
5197 		if (prev != NULL)
5198 			LIST_INSERT_AFTER(prev, valid_entry, next);
5199 		else if (next != NULL)
5200 			LIST_INSERT_BEFORE(next, valid_entry, next);
5201 		else /* It's empty list, insert to head */
5202 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5203 	}
5204 
5205 	pool->num_free += len;
5206 	pool->num_alloc -= len;
5207 
5208 	return 0;
5209 }
5210 
5211 static int
5212 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5213 		       uint16_t num)
5214 {
5215 	struct pool_entry *entry, *valid_entry;
5216 
5217 	if (pool == NULL || num == 0) {
5218 		PMD_DRV_LOG(ERR, "Invalid parameter");
5219 		return -EINVAL;
5220 	}
5221 
5222 	if (pool->num_free < num) {
5223 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5224 			    num, pool->num_free);
5225 		return -ENOMEM;
5226 	}
5227 
5228 	valid_entry = NULL;
5229 	/* Lookup  in free list and find most fit one */
5230 	LIST_FOREACH(entry, &pool->free_list, next) {
5231 		if (entry->len >= num) {
5232 			/* Find best one */
5233 			if (entry->len == num) {
5234 				valid_entry = entry;
5235 				break;
5236 			}
5237 			if (valid_entry == NULL || valid_entry->len > entry->len)
5238 				valid_entry = entry;
5239 		}
5240 	}
5241 
5242 	/* Not find one to satisfy the request, return */
5243 	if (valid_entry == NULL) {
5244 		PMD_DRV_LOG(ERR, "No valid entry found");
5245 		return -ENOMEM;
5246 	}
5247 	/**
5248 	 * The entry have equal queue number as requested,
5249 	 * remove it from alloc_list.
5250 	 */
5251 	if (valid_entry->len == num) {
5252 		LIST_REMOVE(valid_entry, next);
5253 	} else {
5254 		/**
5255 		 * The entry have more numbers than requested,
5256 		 * create a new entry for alloc_list and minus its
5257 		 * queue base and number in free_list.
5258 		 */
5259 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5260 		if (entry == NULL) {
5261 			PMD_DRV_LOG(ERR,
5262 				"Failed to allocate memory for resource pool");
5263 			return -ENOMEM;
5264 		}
5265 		entry->base = valid_entry->base;
5266 		entry->len = num;
5267 		valid_entry->base += num;
5268 		valid_entry->len -= num;
5269 		valid_entry = entry;
5270 	}
5271 
5272 	/* Insert it into alloc list, not sorted */
5273 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5274 
5275 	pool->num_free -= valid_entry->len;
5276 	pool->num_alloc += valid_entry->len;
5277 
5278 	return valid_entry->base + pool->base;
5279 }
5280 
5281 /**
5282  * bitmap_is_subset - Check whether src2 is subset of src1
5283  **/
5284 static inline int
5285 bitmap_is_subset(uint8_t src1, uint8_t src2)
5286 {
5287 	return !((src1 ^ src2) & src2);
5288 }
5289 
5290 static enum i40e_status_code
5291 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5292 {
5293 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5294 
5295 	/* If DCB is not supported, only default TC is supported */
5296 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5297 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5298 		return I40E_NOT_SUPPORTED;
5299 	}
5300 
5301 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5302 		PMD_DRV_LOG(ERR,
5303 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
5304 			hw->func_caps.enabled_tcmap, enabled_tcmap);
5305 		return I40E_NOT_SUPPORTED;
5306 	}
5307 	return I40E_SUCCESS;
5308 }
5309 
5310 int
5311 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5312 				struct i40e_vsi_vlan_pvid_info *info)
5313 {
5314 	struct i40e_hw *hw;
5315 	struct i40e_vsi_context ctxt;
5316 	uint8_t vlan_flags = 0;
5317 	int ret;
5318 
5319 	if (vsi == NULL || info == NULL) {
5320 		PMD_DRV_LOG(ERR, "invalid parameters");
5321 		return I40E_ERR_PARAM;
5322 	}
5323 
5324 	if (info->on) {
5325 		vsi->info.pvid = info->config.pvid;
5326 		/**
5327 		 * If insert pvid is enabled, only tagged pkts are
5328 		 * allowed to be sent out.
5329 		 */
5330 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5331 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5332 	} else {
5333 		vsi->info.pvid = 0;
5334 		if (info->config.reject.tagged == 0)
5335 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5336 
5337 		if (info->config.reject.untagged == 0)
5338 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5339 	}
5340 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5341 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5342 	vsi->info.port_vlan_flags |= vlan_flags;
5343 	vsi->info.valid_sections =
5344 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5345 	memset(&ctxt, 0, sizeof(ctxt));
5346 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5347 	ctxt.seid = vsi->seid;
5348 
5349 	hw = I40E_VSI_TO_HW(vsi);
5350 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5351 	if (ret != I40E_SUCCESS)
5352 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5353 
5354 	return ret;
5355 }
5356 
5357 static int
5358 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5359 {
5360 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5361 	int i, ret;
5362 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5363 
5364 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5365 	if (ret != I40E_SUCCESS)
5366 		return ret;
5367 
5368 	if (!vsi->seid) {
5369 		PMD_DRV_LOG(ERR, "seid not valid");
5370 		return -EINVAL;
5371 	}
5372 
5373 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5374 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5375 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5376 		tc_bw_data.tc_bw_credits[i] =
5377 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5378 
5379 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5380 	if (ret != I40E_SUCCESS) {
5381 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5382 		return ret;
5383 	}
5384 
5385 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5386 					sizeof(vsi->info.qs_handle));
5387 	return I40E_SUCCESS;
5388 }
5389 
5390 static enum i40e_status_code
5391 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5392 				 struct i40e_aqc_vsi_properties_data *info,
5393 				 uint8_t enabled_tcmap)
5394 {
5395 	enum i40e_status_code ret;
5396 	int i, total_tc = 0;
5397 	uint16_t qpnum_per_tc, bsf, qp_idx;
5398 
5399 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5400 	if (ret != I40E_SUCCESS)
5401 		return ret;
5402 
5403 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5404 		if (enabled_tcmap & (1 << i))
5405 			total_tc++;
5406 	if (total_tc == 0)
5407 		total_tc = 1;
5408 	vsi->enabled_tc = enabled_tcmap;
5409 
5410 	/* Number of queues per enabled TC */
5411 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5412 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5413 	bsf = rte_bsf32(qpnum_per_tc);
5414 
5415 	/* Adjust the queue number to actual queues that can be applied */
5416 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5417 		vsi->nb_qps = qpnum_per_tc * total_tc;
5418 
5419 	/**
5420 	 * Configure TC and queue mapping parameters, for enabled TC,
5421 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5422 	 * default queue will serve it.
5423 	 */
5424 	qp_idx = 0;
5425 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5426 		if (vsi->enabled_tc & (1 << i)) {
5427 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5428 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5429 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5430 			qp_idx += qpnum_per_tc;
5431 		} else
5432 			info->tc_mapping[i] = 0;
5433 	}
5434 
5435 	/* Associate queue number with VSI */
5436 	if (vsi->type == I40E_VSI_SRIOV) {
5437 		info->mapping_flags |=
5438 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5439 		for (i = 0; i < vsi->nb_qps; i++)
5440 			info->queue_mapping[i] =
5441 				rte_cpu_to_le_16(vsi->base_queue + i);
5442 	} else {
5443 		info->mapping_flags |=
5444 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5445 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5446 	}
5447 	info->valid_sections |=
5448 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5449 
5450 	return I40E_SUCCESS;
5451 }
5452 
5453 static int
5454 i40e_veb_release(struct i40e_veb *veb)
5455 {
5456 	struct i40e_vsi *vsi;
5457 	struct i40e_hw *hw;
5458 
5459 	if (veb == NULL)
5460 		return -EINVAL;
5461 
5462 	if (!TAILQ_EMPTY(&veb->head)) {
5463 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5464 		return -EACCES;
5465 	}
5466 	/* associate_vsi field is NULL for floating VEB */
5467 	if (veb->associate_vsi != NULL) {
5468 		vsi = veb->associate_vsi;
5469 		hw = I40E_VSI_TO_HW(vsi);
5470 
5471 		vsi->uplink_seid = veb->uplink_seid;
5472 		vsi->veb = NULL;
5473 	} else {
5474 		veb->associate_pf->main_vsi->floating_veb = NULL;
5475 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5476 	}
5477 
5478 	i40e_aq_delete_element(hw, veb->seid, NULL);
5479 	rte_free(veb);
5480 	return I40E_SUCCESS;
5481 }
5482 
5483 /* Setup a veb */
5484 static struct i40e_veb *
5485 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5486 {
5487 	struct i40e_veb *veb;
5488 	int ret;
5489 	struct i40e_hw *hw;
5490 
5491 	if (pf == NULL) {
5492 		PMD_DRV_LOG(ERR,
5493 			    "veb setup failed, associated PF shouldn't null");
5494 		return NULL;
5495 	}
5496 	hw = I40E_PF_TO_HW(pf);
5497 
5498 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5499 	if (!veb) {
5500 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5501 		goto fail;
5502 	}
5503 
5504 	veb->associate_vsi = vsi;
5505 	veb->associate_pf = pf;
5506 	TAILQ_INIT(&veb->head);
5507 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5508 
5509 	/* create floating veb if vsi is NULL */
5510 	if (vsi != NULL) {
5511 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5512 				      I40E_DEFAULT_TCMAP, false,
5513 				      &veb->seid, false, NULL);
5514 	} else {
5515 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5516 				      true, &veb->seid, false, NULL);
5517 	}
5518 
5519 	if (ret != I40E_SUCCESS) {
5520 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5521 			    hw->aq.asq_last_status);
5522 		goto fail;
5523 	}
5524 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5525 
5526 	/* get statistics index */
5527 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5528 				&veb->stats_idx, NULL, NULL, NULL);
5529 	if (ret != I40E_SUCCESS) {
5530 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5531 			    hw->aq.asq_last_status);
5532 		goto fail;
5533 	}
5534 	/* Get VEB bandwidth, to be implemented */
5535 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5536 	if (vsi)
5537 		vsi->uplink_seid = veb->seid;
5538 
5539 	return veb;
5540 fail:
5541 	rte_free(veb);
5542 	return NULL;
5543 }
5544 
5545 int
5546 i40e_vsi_release(struct i40e_vsi *vsi)
5547 {
5548 	struct i40e_pf *pf;
5549 	struct i40e_hw *hw;
5550 	struct i40e_vsi_list *vsi_list;
5551 	void *temp;
5552 	int ret;
5553 	struct i40e_mac_filter *f;
5554 	uint16_t user_param;
5555 
5556 	if (!vsi)
5557 		return I40E_SUCCESS;
5558 
5559 	if (!vsi->adapter)
5560 		return -EFAULT;
5561 
5562 	user_param = vsi->user_param;
5563 
5564 	pf = I40E_VSI_TO_PF(vsi);
5565 	hw = I40E_VSI_TO_HW(vsi);
5566 
5567 	/* VSI has child to attach, release child first */
5568 	if (vsi->veb) {
5569 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5570 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5571 				return -1;
5572 		}
5573 		i40e_veb_release(vsi->veb);
5574 	}
5575 
5576 	if (vsi->floating_veb) {
5577 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head,
5578 			list, temp) {
5579 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5580 				return -1;
5581 		}
5582 	}
5583 
5584 	/* Remove all macvlan filters of the VSI */
5585 	i40e_vsi_remove_all_macvlan_filter(vsi);
5586 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5587 		rte_free(f);
5588 
5589 	if (vsi->type != I40E_VSI_MAIN &&
5590 	    ((vsi->type != I40E_VSI_SRIOV) ||
5591 	    !pf->floating_veb_list[user_param])) {
5592 		/* Remove vsi from parent's sibling list */
5593 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5594 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5595 			return I40E_ERR_PARAM;
5596 		}
5597 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5598 				&vsi->sib_vsi_list, list);
5599 
5600 		/* Remove all switch element of the VSI */
5601 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5602 		if (ret != I40E_SUCCESS)
5603 			PMD_DRV_LOG(ERR, "Failed to delete element");
5604 	}
5605 
5606 	if ((vsi->type == I40E_VSI_SRIOV) &&
5607 	    pf->floating_veb_list[user_param]) {
5608 		/* Remove vsi from parent's sibling list */
5609 		if (vsi->parent_vsi == NULL ||
5610 		    vsi->parent_vsi->floating_veb == NULL) {
5611 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5612 			return I40E_ERR_PARAM;
5613 		}
5614 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5615 			     &vsi->sib_vsi_list, list);
5616 
5617 		/* Remove all switch element of the VSI */
5618 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5619 		if (ret != I40E_SUCCESS)
5620 			PMD_DRV_LOG(ERR, "Failed to delete element");
5621 	}
5622 
5623 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5624 
5625 	if (vsi->type != I40E_VSI_SRIOV)
5626 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5627 	rte_free(vsi);
5628 
5629 	return I40E_SUCCESS;
5630 }
5631 
5632 static int
5633 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5634 {
5635 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5636 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5637 	struct i40e_mac_filter_info filter;
5638 	int ret;
5639 
5640 	if (vsi->type != I40E_VSI_MAIN)
5641 		return I40E_ERR_CONFIG;
5642 	memset(&def_filter, 0, sizeof(def_filter));
5643 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5644 					ETH_ADDR_LEN);
5645 	def_filter.vlan_tag = 0;
5646 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5647 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5648 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5649 	if (ret != I40E_SUCCESS) {
5650 		struct i40e_mac_filter *f;
5651 		struct rte_ether_addr *mac;
5652 
5653 		PMD_DRV_LOG(DEBUG,
5654 			    "Cannot remove the default macvlan filter");
5655 		/* It needs to add the permanent mac into mac list */
5656 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5657 		if (f == NULL) {
5658 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5659 			return I40E_ERR_NO_MEMORY;
5660 		}
5661 		mac = &f->mac_info.mac_addr;
5662 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5663 				ETH_ADDR_LEN);
5664 		f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5665 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5666 		vsi->mac_num++;
5667 
5668 		return ret;
5669 	}
5670 	rte_memcpy(&filter.mac_addr,
5671 		(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5672 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5673 	return i40e_vsi_add_mac(vsi, &filter);
5674 }
5675 
5676 /*
5677  * i40e_vsi_get_bw_config - Query VSI BW Information
5678  * @vsi: the VSI to be queried
5679  *
5680  * Returns 0 on success, negative value on failure
5681  */
5682 static enum i40e_status_code
5683 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5684 {
5685 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5686 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5687 	struct i40e_hw *hw = &vsi->adapter->hw;
5688 	i40e_status ret;
5689 	int i;
5690 	uint32_t bw_max;
5691 
5692 	memset(&bw_config, 0, sizeof(bw_config));
5693 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5694 	if (ret != I40E_SUCCESS) {
5695 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5696 			    hw->aq.asq_last_status);
5697 		return ret;
5698 	}
5699 
5700 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5701 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5702 					&ets_sla_config, NULL);
5703 	if (ret != I40E_SUCCESS) {
5704 		PMD_DRV_LOG(ERR,
5705 			"VSI failed to get TC bandwidth configuration %u",
5706 			hw->aq.asq_last_status);
5707 		return ret;
5708 	}
5709 
5710 	/* store and print out BW info */
5711 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5712 	vsi->bw_info.bw_max = bw_config.max_bw;
5713 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5714 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5715 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5716 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5717 		     I40E_16_BIT_WIDTH);
5718 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5719 		vsi->bw_info.bw_ets_share_credits[i] =
5720 				ets_sla_config.share_credits[i];
5721 		vsi->bw_info.bw_ets_credits[i] =
5722 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5723 		/* 4 bits per TC, 4th bit is reserved */
5724 		vsi->bw_info.bw_ets_max[i] =
5725 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5726 				  RTE_LEN2MASK(3, uint8_t));
5727 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5728 			    vsi->bw_info.bw_ets_share_credits[i]);
5729 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5730 			    vsi->bw_info.bw_ets_credits[i]);
5731 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5732 			    vsi->bw_info.bw_ets_max[i]);
5733 	}
5734 
5735 	return I40E_SUCCESS;
5736 }
5737 
5738 /* i40e_enable_pf_lb
5739  * @pf: pointer to the pf structure
5740  *
5741  * allow loopback on pf
5742  */
5743 static inline void
5744 i40e_enable_pf_lb(struct i40e_pf *pf)
5745 {
5746 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5747 	struct i40e_vsi_context ctxt;
5748 	int ret;
5749 
5750 	/* Use the FW API if FW >= v5.0 */
5751 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5752 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5753 		return;
5754 	}
5755 
5756 	memset(&ctxt, 0, sizeof(ctxt));
5757 	ctxt.seid = pf->main_vsi_seid;
5758 	ctxt.pf_num = hw->pf_id;
5759 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5760 	if (ret) {
5761 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5762 			    ret, hw->aq.asq_last_status);
5763 		return;
5764 	}
5765 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5766 	ctxt.info.valid_sections =
5767 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5768 	ctxt.info.switch_id |=
5769 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5770 
5771 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5772 	if (ret)
5773 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5774 			    hw->aq.asq_last_status);
5775 }
5776 
5777 /* i40e_pf_set_source_prune
5778  * @pf: pointer to the pf structure
5779  * @on: Enable/disable source prune
5780  *
5781  * set source prune on pf
5782  */
5783 int
5784 i40e_pf_set_source_prune(struct i40e_pf *pf, int on)
5785 {
5786 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5787 	struct i40e_vsi_context ctxt;
5788 	int ret;
5789 
5790 	memset(&ctxt, 0, sizeof(ctxt));
5791 	ctxt.seid = pf->main_vsi_seid;
5792 	ctxt.pf_num = hw->pf_id;
5793 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5794 	if (ret) {
5795 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5796 			    ret, hw->aq.asq_last_status);
5797 		return ret;
5798 	}
5799 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5800 	ctxt.info.valid_sections =
5801 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5802 	if (on)
5803 		ctxt.info.switch_id &=
5804 			~rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5805 	else
5806 		ctxt.info.switch_id |=
5807 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5808 
5809 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5810 	if (ret)
5811 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5812 			    hw->aq.asq_last_status);
5813 
5814 	return ret;
5815 }
5816 
5817 /* Setup a VSI */
5818 struct i40e_vsi *
5819 i40e_vsi_setup(struct i40e_pf *pf,
5820 	       enum i40e_vsi_type type,
5821 	       struct i40e_vsi *uplink_vsi,
5822 	       uint16_t user_param)
5823 {
5824 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5825 	struct i40e_vsi *vsi;
5826 	struct i40e_mac_filter_info filter;
5827 	int ret;
5828 	struct i40e_vsi_context ctxt;
5829 	struct rte_ether_addr broadcast =
5830 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5831 
5832 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5833 	    uplink_vsi == NULL) {
5834 		PMD_DRV_LOG(ERR,
5835 			"VSI setup failed, VSI link shouldn't be NULL");
5836 		return NULL;
5837 	}
5838 
5839 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5840 		PMD_DRV_LOG(ERR,
5841 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5842 		return NULL;
5843 	}
5844 
5845 	/* two situations
5846 	 * 1.type is not MAIN and uplink vsi is not NULL
5847 	 * If uplink vsi didn't setup VEB, create one first under veb field
5848 	 * 2.type is SRIOV and the uplink is NULL
5849 	 * If floating VEB is NULL, create one veb under floating veb field
5850 	 */
5851 
5852 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5853 	    uplink_vsi->veb == NULL) {
5854 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5855 
5856 		if (uplink_vsi->veb == NULL) {
5857 			PMD_DRV_LOG(ERR, "VEB setup failed");
5858 			return NULL;
5859 		}
5860 		/* set ALLOWLOOPBACk on pf, when veb is created */
5861 		i40e_enable_pf_lb(pf);
5862 	}
5863 
5864 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5865 	    pf->main_vsi->floating_veb == NULL) {
5866 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5867 
5868 		if (pf->main_vsi->floating_veb == NULL) {
5869 			PMD_DRV_LOG(ERR, "VEB setup failed");
5870 			return NULL;
5871 		}
5872 	}
5873 
5874 	/* source prune is disabled to support VRRP in default*/
5875 	i40e_pf_set_source_prune(pf, 0);
5876 
5877 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5878 	if (!vsi) {
5879 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5880 		return NULL;
5881 	}
5882 	TAILQ_INIT(&vsi->mac_list);
5883 	vsi->type = type;
5884 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5885 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5886 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5887 	vsi->user_param = user_param;
5888 	vsi->vlan_anti_spoof_on = 0;
5889 	vsi->vlan_filter_on = 0;
5890 	/* Allocate queues */
5891 	switch (vsi->type) {
5892 	case I40E_VSI_MAIN  :
5893 		vsi->nb_qps = pf->lan_nb_qps;
5894 		break;
5895 	case I40E_VSI_SRIOV :
5896 		vsi->nb_qps = pf->vf_nb_qps;
5897 		break;
5898 	case I40E_VSI_VMDQ2:
5899 		vsi->nb_qps = pf->vmdq_nb_qps;
5900 		break;
5901 	case I40E_VSI_FDIR:
5902 		vsi->nb_qps = pf->fdir_nb_qps;
5903 		break;
5904 	default:
5905 		goto fail_mem;
5906 	}
5907 	/*
5908 	 * The filter status descriptor is reported in rx queue 0,
5909 	 * while the tx queue for fdir filter programming has no
5910 	 * such constraints, can be non-zero queues.
5911 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5912 	 * To make sure it will use queue 0 pair, queue allocation
5913 	 * need be done before this function is called
5914 	 */
5915 	if (type != I40E_VSI_FDIR) {
5916 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5917 			if (ret < 0) {
5918 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5919 						vsi->seid, ret);
5920 				goto fail_mem;
5921 			}
5922 			vsi->base_queue = ret;
5923 	} else
5924 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5925 
5926 	/* VF has MSIX interrupt in VF range, don't allocate here */
5927 	if (type == I40E_VSI_MAIN) {
5928 		if (pf->support_multi_driver) {
5929 			/* If support multi-driver, need to use INT0 instead of
5930 			 * allocating from msix pool. The Msix pool is init from
5931 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5932 			 * to 1 without calling i40e_res_pool_alloc.
5933 			 */
5934 			vsi->msix_intr = 0;
5935 			vsi->nb_msix = 1;
5936 		} else {
5937 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5938 						  RTE_MIN(vsi->nb_qps,
5939 						     RTE_MAX_RXTX_INTR_VEC_ID));
5940 			if (ret < 0) {
5941 				PMD_DRV_LOG(ERR,
5942 					    "VSI MAIN %d get heap failed %d",
5943 					    vsi->seid, ret);
5944 				goto fail_queue_alloc;
5945 			}
5946 			vsi->msix_intr = ret;
5947 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5948 					       RTE_MAX_RXTX_INTR_VEC_ID);
5949 		}
5950 	} else if (type != I40E_VSI_SRIOV) {
5951 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5952 		if (ret < 0) {
5953 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5954 			if (type != I40E_VSI_FDIR)
5955 				goto fail_queue_alloc;
5956 			vsi->msix_intr = 0;
5957 			vsi->nb_msix = 0;
5958 		} else {
5959 			vsi->msix_intr = ret;
5960 			vsi->nb_msix = 1;
5961 		}
5962 	} else {
5963 		vsi->msix_intr = 0;
5964 		vsi->nb_msix = 0;
5965 	}
5966 
5967 	/* Add VSI */
5968 	if (type == I40E_VSI_MAIN) {
5969 		/* For main VSI, no need to add since it's default one */
5970 		vsi->uplink_seid = pf->mac_seid;
5971 		vsi->seid = pf->main_vsi_seid;
5972 		/* Bind queues with specific MSIX interrupt */
5973 		/**
5974 		 * Needs 2 interrupt at least, one for misc cause which will
5975 		 * enabled from OS side, Another for queues binding the
5976 		 * interrupt from device side only.
5977 		 */
5978 
5979 		/* Get default VSI parameters from hardware */
5980 		memset(&ctxt, 0, sizeof(ctxt));
5981 		ctxt.seid = vsi->seid;
5982 		ctxt.pf_num = hw->pf_id;
5983 		ctxt.uplink_seid = vsi->uplink_seid;
5984 		ctxt.vf_num = 0;
5985 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5986 		if (ret != I40E_SUCCESS) {
5987 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5988 			goto fail_msix_alloc;
5989 		}
5990 		rte_memcpy(&vsi->info, &ctxt.info,
5991 			sizeof(struct i40e_aqc_vsi_properties_data));
5992 		vsi->vsi_id = ctxt.vsi_number;
5993 		vsi->info.valid_sections = 0;
5994 
5995 		/* Configure tc, enabled TC0 only */
5996 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5997 			I40E_SUCCESS) {
5998 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5999 			goto fail_msix_alloc;
6000 		}
6001 
6002 		/* TC, queue mapping */
6003 		memset(&ctxt, 0, sizeof(ctxt));
6004 		vsi->info.valid_sections |=
6005 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6006 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6007 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6008 		rte_memcpy(&ctxt.info, &vsi->info,
6009 			sizeof(struct i40e_aqc_vsi_properties_data));
6010 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6011 						I40E_DEFAULT_TCMAP);
6012 		if (ret != I40E_SUCCESS) {
6013 			PMD_DRV_LOG(ERR,
6014 				"Failed to configure TC queue mapping");
6015 			goto fail_msix_alloc;
6016 		}
6017 		ctxt.seid = vsi->seid;
6018 		ctxt.pf_num = hw->pf_id;
6019 		ctxt.uplink_seid = vsi->uplink_seid;
6020 		ctxt.vf_num = 0;
6021 
6022 		/* Update VSI parameters */
6023 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6024 		if (ret != I40E_SUCCESS) {
6025 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
6026 			goto fail_msix_alloc;
6027 		}
6028 
6029 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
6030 						sizeof(vsi->info.tc_mapping));
6031 		rte_memcpy(&vsi->info.queue_mapping,
6032 				&ctxt.info.queue_mapping,
6033 			sizeof(vsi->info.queue_mapping));
6034 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
6035 		vsi->info.valid_sections = 0;
6036 
6037 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
6038 				ETH_ADDR_LEN);
6039 
6040 		/**
6041 		 * Updating default filter settings are necessary to prevent
6042 		 * reception of tagged packets.
6043 		 * Some old firmware configurations load a default macvlan
6044 		 * filter which accepts both tagged and untagged packets.
6045 		 * The updating is to use a normal filter instead if needed.
6046 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
6047 		 * The firmware with correct configurations load the default
6048 		 * macvlan filter which is expected and cannot be removed.
6049 		 */
6050 		i40e_update_default_filter_setting(vsi);
6051 		i40e_config_qinq(hw, vsi);
6052 	} else if (type == I40E_VSI_SRIOV) {
6053 		memset(&ctxt, 0, sizeof(ctxt));
6054 		/**
6055 		 * For other VSI, the uplink_seid equals to uplink VSI's
6056 		 * uplink_seid since they share same VEB
6057 		 */
6058 		if (uplink_vsi == NULL)
6059 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
6060 		else
6061 			vsi->uplink_seid = uplink_vsi->uplink_seid;
6062 		ctxt.pf_num = hw->pf_id;
6063 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
6064 		ctxt.uplink_seid = vsi->uplink_seid;
6065 		ctxt.connection_type = 0x1;
6066 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6067 
6068 		/* Use the VEB configuration if FW >= v5.0 */
6069 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
6070 			/* Configure switch ID */
6071 			ctxt.info.valid_sections |=
6072 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6073 			ctxt.info.switch_id =
6074 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6075 		}
6076 
6077 		/* Configure port/vlan */
6078 		ctxt.info.valid_sections |=
6079 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6080 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6081 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6082 						hw->func_caps.enabled_tcmap);
6083 		if (ret != I40E_SUCCESS) {
6084 			PMD_DRV_LOG(ERR,
6085 				"Failed to configure TC queue mapping");
6086 			goto fail_msix_alloc;
6087 		}
6088 
6089 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
6090 		ctxt.info.valid_sections |=
6091 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6092 		/**
6093 		 * Since VSI is not created yet, only configure parameter,
6094 		 * will add vsi below.
6095 		 */
6096 
6097 		i40e_config_qinq(hw, vsi);
6098 	} else if (type == I40E_VSI_VMDQ2) {
6099 		memset(&ctxt, 0, sizeof(ctxt));
6100 		/*
6101 		 * For other VSI, the uplink_seid equals to uplink VSI's
6102 		 * uplink_seid since they share same VEB
6103 		 */
6104 		vsi->uplink_seid = uplink_vsi->uplink_seid;
6105 		ctxt.pf_num = hw->pf_id;
6106 		ctxt.vf_num = 0;
6107 		ctxt.uplink_seid = vsi->uplink_seid;
6108 		ctxt.connection_type = 0x1;
6109 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6110 
6111 		ctxt.info.valid_sections |=
6112 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6113 		/* user_param carries flag to enable loop back */
6114 		if (user_param) {
6115 			ctxt.info.switch_id =
6116 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6117 			ctxt.info.switch_id |=
6118 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6119 		}
6120 
6121 		/* Configure port/vlan */
6122 		ctxt.info.valid_sections |=
6123 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6124 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6125 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6126 						I40E_DEFAULT_TCMAP);
6127 		if (ret != I40E_SUCCESS) {
6128 			PMD_DRV_LOG(ERR,
6129 				"Failed to configure TC queue mapping");
6130 			goto fail_msix_alloc;
6131 		}
6132 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6133 		ctxt.info.valid_sections |=
6134 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6135 	} else if (type == I40E_VSI_FDIR) {
6136 		memset(&ctxt, 0, sizeof(ctxt));
6137 		vsi->uplink_seid = uplink_vsi->uplink_seid;
6138 		ctxt.pf_num = hw->pf_id;
6139 		ctxt.vf_num = 0;
6140 		ctxt.uplink_seid = vsi->uplink_seid;
6141 		ctxt.connection_type = 0x1;     /* regular data port */
6142 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6143 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6144 						I40E_DEFAULT_TCMAP);
6145 		if (ret != I40E_SUCCESS) {
6146 			PMD_DRV_LOG(ERR,
6147 				"Failed to configure TC queue mapping.");
6148 			goto fail_msix_alloc;
6149 		}
6150 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6151 		ctxt.info.valid_sections |=
6152 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6153 	} else {
6154 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
6155 		goto fail_msix_alloc;
6156 	}
6157 
6158 	if (vsi->type != I40E_VSI_MAIN) {
6159 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6160 		if (ret != I40E_SUCCESS) {
6161 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
6162 				    hw->aq.asq_last_status);
6163 			goto fail_msix_alloc;
6164 		}
6165 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6166 		vsi->info.valid_sections = 0;
6167 		vsi->seid = ctxt.seid;
6168 		vsi->vsi_id = ctxt.vsi_number;
6169 		vsi->sib_vsi_list.vsi = vsi;
6170 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6171 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6172 					  &vsi->sib_vsi_list, list);
6173 		} else {
6174 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6175 					  &vsi->sib_vsi_list, list);
6176 		}
6177 	}
6178 
6179 	if (vsi->type != I40E_VSI_FDIR) {
6180 		/* MAC/VLAN configuration for non-FDIR VSI*/
6181 		rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6182 		filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
6183 
6184 		ret = i40e_vsi_add_mac(vsi, &filter);
6185 		if (ret != I40E_SUCCESS) {
6186 			PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6187 			goto fail_msix_alloc;
6188 		}
6189 	}
6190 
6191 	/* Get VSI BW information */
6192 	i40e_vsi_get_bw_config(vsi);
6193 	return vsi;
6194 fail_msix_alloc:
6195 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6196 fail_queue_alloc:
6197 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6198 fail_mem:
6199 	rte_free(vsi);
6200 	return NULL;
6201 }
6202 
6203 /* Configure vlan filter on or off */
6204 int
6205 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6206 {
6207 	int i, num;
6208 	struct i40e_mac_filter *f;
6209 	void *temp;
6210 	struct i40e_mac_filter_info *mac_filter;
6211 	enum i40e_mac_filter_type desired_filter;
6212 	int ret = I40E_SUCCESS;
6213 
6214 	if (on) {
6215 		/* Filter to match MAC and VLAN */
6216 		desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6217 	} else {
6218 		/* Filter to match only MAC */
6219 		desired_filter = I40E_MAC_PERFECT_MATCH;
6220 	}
6221 
6222 	num = vsi->mac_num;
6223 
6224 	mac_filter = rte_zmalloc("mac_filter_info_data",
6225 				 num * sizeof(*mac_filter), 0);
6226 	if (mac_filter == NULL) {
6227 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6228 		return I40E_ERR_NO_MEMORY;
6229 	}
6230 
6231 	i = 0;
6232 
6233 	/* Remove all existing mac */
6234 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6235 		mac_filter[i] = f->mac_info;
6236 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6237 		if (ret) {
6238 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6239 				    on ? "enable" : "disable");
6240 			goto DONE;
6241 		}
6242 		i++;
6243 	}
6244 
6245 	/* Override with new filter */
6246 	for (i = 0; i < num; i++) {
6247 		mac_filter[i].filter_type = desired_filter;
6248 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6249 		if (ret) {
6250 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6251 				    on ? "enable" : "disable");
6252 			goto DONE;
6253 		}
6254 	}
6255 
6256 DONE:
6257 	rte_free(mac_filter);
6258 	return ret;
6259 }
6260 
6261 /* Configure vlan stripping on or off */
6262 int
6263 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6264 {
6265 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6266 	struct i40e_vsi_context ctxt;
6267 	uint8_t vlan_flags;
6268 	int ret = I40E_SUCCESS;
6269 
6270 	/* Check if it has been already on or off */
6271 	if (vsi->info.valid_sections &
6272 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6273 		if (on) {
6274 			if ((vsi->info.port_vlan_flags &
6275 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6276 				return 0; /* already on */
6277 		} else {
6278 			if ((vsi->info.port_vlan_flags &
6279 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6280 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
6281 				return 0; /* already off */
6282 		}
6283 	}
6284 
6285 	if (on)
6286 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6287 	else
6288 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6289 	vsi->info.valid_sections =
6290 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6291 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6292 	vsi->info.port_vlan_flags |= vlan_flags;
6293 	ctxt.seid = vsi->seid;
6294 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6295 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6296 	if (ret)
6297 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6298 			    on ? "enable" : "disable");
6299 
6300 	return ret;
6301 }
6302 
6303 static int
6304 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6305 {
6306 	struct rte_eth_dev_data *data = dev->data;
6307 	int ret;
6308 	int mask = 0;
6309 
6310 	/* Apply vlan offload setting */
6311 	mask = RTE_ETH_VLAN_STRIP_MASK |
6312 	       RTE_ETH_QINQ_STRIP_MASK |
6313 	       RTE_ETH_VLAN_FILTER_MASK |
6314 	       RTE_ETH_VLAN_EXTEND_MASK;
6315 
6316 	ret = i40e_vlan_offload_set(dev, mask);
6317 	if (ret) {
6318 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6319 		return ret;
6320 	}
6321 
6322 	/* Apply pvid setting */
6323 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6324 				data->dev_conf.txmode.hw_vlan_insert_pvid);
6325 	if (ret)
6326 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
6327 
6328 	return ret;
6329 }
6330 
6331 static int
6332 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6333 {
6334 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6335 
6336 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6337 }
6338 
6339 static int
6340 i40e_update_flow_control(struct i40e_hw *hw)
6341 {
6342 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6343 	struct i40e_link_status link_status;
6344 	uint32_t rxfc = 0, txfc = 0, reg;
6345 	uint8_t an_info;
6346 	int ret;
6347 
6348 	memset(&link_status, 0, sizeof(link_status));
6349 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6350 	if (ret != I40E_SUCCESS) {
6351 		PMD_DRV_LOG(ERR, "Failed to get link status information");
6352 		goto write_reg; /* Disable flow control */
6353 	}
6354 
6355 	an_info = hw->phy.link_info.an_info;
6356 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6357 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6358 		ret = I40E_ERR_NOT_READY;
6359 		goto write_reg; /* Disable flow control */
6360 	}
6361 	/**
6362 	 * If link auto negotiation is enabled, flow control needs to
6363 	 * be configured according to it
6364 	 */
6365 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
6366 	case I40E_LINK_PAUSE_RXTX:
6367 		rxfc = 1;
6368 		txfc = 1;
6369 		hw->fc.current_mode = I40E_FC_FULL;
6370 		break;
6371 	case I40E_AQ_LINK_PAUSE_RX:
6372 		rxfc = 1;
6373 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6374 		break;
6375 	case I40E_AQ_LINK_PAUSE_TX:
6376 		txfc = 1;
6377 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6378 		break;
6379 	default:
6380 		hw->fc.current_mode = I40E_FC_NONE;
6381 		break;
6382 	}
6383 
6384 write_reg:
6385 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6386 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6387 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6388 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6389 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6390 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6391 
6392 	return ret;
6393 }
6394 
6395 /* PF setup */
6396 static int
6397 i40e_pf_setup(struct i40e_pf *pf)
6398 {
6399 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6400 	struct i40e_filter_control_settings settings;
6401 	struct i40e_vsi *vsi;
6402 	int ret;
6403 
6404 	/* Clear all stats counters */
6405 	pf->offset_loaded = FALSE;
6406 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6407 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6408 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6409 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6410 
6411 	ret = i40e_pf_get_switch_config(pf);
6412 	if (ret != I40E_SUCCESS) {
6413 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6414 		return ret;
6415 	}
6416 
6417 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6418 	if (ret)
6419 		PMD_INIT_LOG(WARNING,
6420 			"failed to allocate switch domain for device %d", ret);
6421 
6422 	if (pf->flags & I40E_FLAG_FDIR) {
6423 		/* make queue allocated first, let FDIR use queue pair 0*/
6424 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6425 		if (ret != I40E_FDIR_QUEUE_ID) {
6426 			PMD_DRV_LOG(ERR,
6427 				"queue allocation fails for FDIR: ret =%d",
6428 				ret);
6429 			pf->flags &= ~I40E_FLAG_FDIR;
6430 		}
6431 	}
6432 	/*  main VSI setup */
6433 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6434 	if (!vsi) {
6435 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6436 		return I40E_ERR_NOT_READY;
6437 	}
6438 	pf->main_vsi = vsi;
6439 
6440 	/* Configure filter control */
6441 	memset(&settings, 0, sizeof(settings));
6442 	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
6443 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6444 	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
6445 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6446 	else {
6447 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6448 			hw->func_caps.rss_table_size);
6449 		return I40E_ERR_PARAM;
6450 	}
6451 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6452 		hw->func_caps.rss_table_size);
6453 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6454 
6455 	/* Enable ethtype and macvlan filters */
6456 	settings.enable_ethtype = TRUE;
6457 	settings.enable_macvlan = TRUE;
6458 	ret = i40e_set_filter_control(hw, &settings);
6459 	if (ret)
6460 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6461 								ret);
6462 
6463 	/* Update flow control according to the auto negotiation */
6464 	i40e_update_flow_control(hw);
6465 
6466 	return I40E_SUCCESS;
6467 }
6468 
6469 int
6470 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6471 {
6472 	uint32_t reg;
6473 	uint16_t j;
6474 
6475 	/**
6476 	 * Set or clear TX Queue Disable flags,
6477 	 * which is required by hardware.
6478 	 */
6479 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6480 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6481 
6482 	/* Wait until the request is finished */
6483 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6484 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6485 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6486 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6487 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6488 							& 0x1))) {
6489 			break;
6490 		}
6491 	}
6492 	if (on) {
6493 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6494 			return I40E_SUCCESS; /* already on, skip next steps */
6495 
6496 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6497 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6498 	} else {
6499 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6500 			return I40E_SUCCESS; /* already off, skip next steps */
6501 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6502 	}
6503 	/* Write the register */
6504 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6505 	/* Check the result */
6506 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6507 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6508 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6509 		if (on) {
6510 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6511 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6512 				break;
6513 		} else {
6514 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6515 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6516 				break;
6517 		}
6518 	}
6519 	/* Check if it is timeout */
6520 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6521 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6522 			    (on ? "enable" : "disable"), q_idx);
6523 		return I40E_ERR_TIMEOUT;
6524 	}
6525 
6526 	return I40E_SUCCESS;
6527 }
6528 
6529 int
6530 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6531 {
6532 	uint32_t reg;
6533 	uint16_t j;
6534 
6535 	/* Wait until the request is finished */
6536 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6537 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6538 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6539 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6540 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6541 			break;
6542 	}
6543 
6544 	if (on) {
6545 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6546 			return I40E_SUCCESS; /* Already on, skip next steps */
6547 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6548 	} else {
6549 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6550 			return I40E_SUCCESS; /* Already off, skip next steps */
6551 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6552 	}
6553 
6554 	/* Write the register */
6555 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6556 	/* Check the result */
6557 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6558 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6559 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6560 		if (on) {
6561 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6562 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6563 				break;
6564 		} else {
6565 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6566 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6567 				break;
6568 		}
6569 	}
6570 
6571 	/* Check if it is timeout */
6572 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6573 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6574 			    (on ? "enable" : "disable"), q_idx);
6575 		return I40E_ERR_TIMEOUT;
6576 	}
6577 
6578 	return I40E_SUCCESS;
6579 }
6580 
6581 /* Initialize VSI for TX */
6582 static int
6583 i40e_dev_tx_init(struct i40e_pf *pf)
6584 {
6585 	struct rte_eth_dev_data *data = pf->dev_data;
6586 	uint16_t i;
6587 	uint32_t ret = I40E_SUCCESS;
6588 	struct ci_tx_queue *txq;
6589 
6590 	for (i = 0; i < data->nb_tx_queues; i++) {
6591 		txq = data->tx_queues[i];
6592 		if (!txq || !txq->q_set)
6593 			continue;
6594 		ret = i40e_tx_queue_init(txq);
6595 		if (ret != I40E_SUCCESS)
6596 			break;
6597 	}
6598 	if (ret == I40E_SUCCESS)
6599 		i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6600 
6601 	return ret;
6602 }
6603 
6604 /* Initialize VSI for RX */
6605 static int
6606 i40e_dev_rx_init(struct i40e_pf *pf)
6607 {
6608 	struct rte_eth_dev_data *data = pf->dev_data;
6609 	int ret = I40E_SUCCESS;
6610 	uint16_t i;
6611 	struct i40e_rx_queue *rxq;
6612 
6613 	i40e_pf_config_rss(pf);
6614 	for (i = 0; i < data->nb_rx_queues; i++) {
6615 		rxq = data->rx_queues[i];
6616 		if (!rxq || !rxq->q_set)
6617 			continue;
6618 
6619 		ret = i40e_rx_queue_init(rxq);
6620 		if (ret != I40E_SUCCESS) {
6621 			PMD_DRV_LOG(ERR,
6622 				"Failed to do RX queue initialization");
6623 			break;
6624 		}
6625 	}
6626 	if (ret == I40E_SUCCESS)
6627 		i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6628 
6629 	return ret;
6630 }
6631 
6632 static int
6633 i40e_dev_rxtx_init(struct i40e_pf *pf)
6634 {
6635 	int err;
6636 
6637 	err = i40e_dev_tx_init(pf);
6638 	if (err) {
6639 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6640 		return err;
6641 	}
6642 	err = i40e_dev_rx_init(pf);
6643 	if (err) {
6644 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6645 		return err;
6646 	}
6647 
6648 	return err;
6649 }
6650 
6651 static int
6652 i40e_vmdq_setup(struct rte_eth_dev *dev)
6653 {
6654 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6655 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6656 	int i, err, conf_vsis, j, loop;
6657 	struct i40e_vsi *vsi;
6658 	struct i40e_vmdq_info *vmdq_info;
6659 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6660 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6661 
6662 	/*
6663 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6664 	 * avoid race condition in VSI creation/destroy.
6665 	 */
6666 	i40e_pf_disable_irq0(hw);
6667 
6668 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6669 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6670 		return -ENOTSUP;
6671 	}
6672 
6673 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6674 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6675 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6676 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6677 			pf->max_nb_vmdq_vsi);
6678 		return -ENOTSUP;
6679 	}
6680 
6681 	if (pf->vmdq != NULL) {
6682 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6683 		return 0;
6684 	}
6685 
6686 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6687 				sizeof(*vmdq_info) * conf_vsis, 0);
6688 
6689 	if (pf->vmdq == NULL) {
6690 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6691 		return -ENOMEM;
6692 	}
6693 
6694 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6695 
6696 	/* Create VMDQ VSI */
6697 	for (i = 0; i < conf_vsis; i++) {
6698 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6699 				vmdq_conf->enable_loop_back);
6700 		if (vsi == NULL) {
6701 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6702 			err = -1;
6703 			goto err_vsi_setup;
6704 		}
6705 		vmdq_info = &pf->vmdq[i];
6706 		vmdq_info->pf = pf;
6707 		vmdq_info->vsi = vsi;
6708 	}
6709 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6710 
6711 	/* Configure Vlan */
6712 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6713 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6714 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6715 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6716 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6717 					vmdq_conf->pool_map[i].vlan_id, j);
6718 
6719 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6720 						vmdq_conf->pool_map[i].vlan_id);
6721 				if (err) {
6722 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6723 					err = -1;
6724 					goto err_vsi_setup;
6725 				}
6726 			}
6727 		}
6728 	}
6729 
6730 	i40e_pf_enable_irq0(hw);
6731 
6732 	return 0;
6733 
6734 err_vsi_setup:
6735 	for (i = 0; i < conf_vsis; i++)
6736 		if (pf->vmdq[i].vsi == NULL)
6737 			break;
6738 		else
6739 			i40e_vsi_release(pf->vmdq[i].vsi);
6740 
6741 	rte_free(pf->vmdq);
6742 	pf->vmdq = NULL;
6743 	i40e_pf_enable_irq0(hw);
6744 	return err;
6745 }
6746 
6747 static void
6748 i40e_stat_update_32(struct i40e_hw *hw,
6749 		   uint32_t reg,
6750 		   bool offset_loaded,
6751 		   uint64_t *offset,
6752 		   uint64_t *stat)
6753 {
6754 	uint64_t new_data;
6755 
6756 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6757 	if (!offset_loaded)
6758 		*offset = new_data;
6759 
6760 	if (new_data >= *offset)
6761 		*stat = (uint64_t)(new_data - *offset);
6762 	else
6763 		*stat = (uint64_t)((new_data +
6764 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6765 }
6766 
6767 static void
6768 i40e_stat_update_48(struct i40e_hw *hw,
6769 		   uint32_t hireg,
6770 		   uint32_t loreg,
6771 		   bool offset_loaded,
6772 		   uint64_t *offset,
6773 		   uint64_t *stat)
6774 {
6775 	uint64_t new_data;
6776 
6777 	if (hw->device_id == I40E_DEV_ID_QEMU) {
6778 		new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6779 		new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6780 				I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6781 	} else {
6782 		new_data = I40E_READ_REG64(hw, loreg);
6783 	}
6784 
6785 	if (!offset_loaded)
6786 		*offset = new_data;
6787 
6788 	if (new_data >= *offset)
6789 		*stat = new_data - *offset;
6790 	else
6791 		*stat = (uint64_t)((new_data +
6792 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6793 
6794 	*stat &= I40E_48_BIT_MASK;
6795 }
6796 
6797 /* Disable IRQ0 */
6798 void
6799 i40e_pf_disable_irq0(struct i40e_hw *hw)
6800 {
6801 	/* Disable all interrupt types */
6802 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6803 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6804 	I40E_WRITE_FLUSH(hw);
6805 }
6806 
6807 /* Enable IRQ0 */
6808 void
6809 i40e_pf_enable_irq0(struct i40e_hw *hw)
6810 {
6811 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6812 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6813 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6814 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6815 	I40E_WRITE_FLUSH(hw);
6816 }
6817 
6818 static void
6819 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6820 {
6821 	/* read pending request and disable first */
6822 	i40e_pf_disable_irq0(hw);
6823 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6824 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6825 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6826 
6827 	if (no_queue)
6828 		/* Link no queues with irq0 */
6829 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6830 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6831 }
6832 
6833 static void
6834 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6835 {
6836 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6837 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6838 	int i;
6839 	uint16_t abs_vf_id;
6840 	uint32_t index, offset, val;
6841 
6842 	if (!pf->vfs)
6843 		return;
6844 	/**
6845 	 * Try to find which VF trigger a reset, use absolute VF id to access
6846 	 * since the reg is global register.
6847 	 */
6848 	for (i = 0; i < pf->vf_num; i++) {
6849 		abs_vf_id = hw->func_caps.vf_base_id + i;
6850 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6851 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6852 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6853 		/* VFR event occurred */
6854 		if (val & (0x1 << offset)) {
6855 			int ret;
6856 
6857 			/* Clear the event first */
6858 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6859 							(0x1 << offset));
6860 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6861 			/**
6862 			 * Only notify a VF reset event occurred,
6863 			 * don't trigger another SW reset
6864 			 */
6865 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6866 			if (ret != I40E_SUCCESS)
6867 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6868 		}
6869 	}
6870 }
6871 
6872 static void
6873 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6874 {
6875 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6876 	int i;
6877 
6878 	for (i = 0; i < pf->vf_num; i++)
6879 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6880 }
6881 
6882 static void
6883 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6884 {
6885 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6886 	struct i40e_arq_event_info info;
6887 	uint16_t pending, opcode;
6888 	int ret;
6889 
6890 	info.buf_len = I40E_AQ_BUF_SZ;
6891 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6892 	if (!info.msg_buf) {
6893 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6894 		return;
6895 	}
6896 
6897 	pending = 1;
6898 	while (pending) {
6899 		ret = i40e_clean_arq_element(hw, &info, &pending);
6900 
6901 		if (ret != I40E_SUCCESS) {
6902 			PMD_DRV_LOG(INFO,
6903 				"Failed to read msg from AdminQ, aq_err: %u",
6904 				hw->aq.asq_last_status);
6905 			break;
6906 		}
6907 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6908 
6909 		switch (opcode) {
6910 		case i40e_aqc_opc_send_msg_to_pf:
6911 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6912 			i40e_pf_host_handle_vf_msg(dev,
6913 					rte_le_to_cpu_16(info.desc.retval),
6914 					rte_le_to_cpu_32(info.desc.cookie_high),
6915 					rte_le_to_cpu_32(info.desc.cookie_low),
6916 					info.msg_buf,
6917 					info.msg_len);
6918 			break;
6919 		case i40e_aqc_opc_get_link_status:
6920 			ret = i40e_dev_link_update(dev, 0);
6921 			if (!ret)
6922 				rte_eth_dev_callback_process(dev,
6923 					RTE_ETH_EVENT_INTR_LSC, NULL);
6924 			break;
6925 		default:
6926 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6927 				    opcode);
6928 			break;
6929 		}
6930 	}
6931 	rte_free(info.msg_buf);
6932 }
6933 
6934 static void
6935 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6936 {
6937 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6938 #define I40E_MDD_CLEAR16 0xFFFF
6939 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6940 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6941 	bool mdd_detected = false;
6942 	struct i40e_pf_vf *vf;
6943 	uint32_t reg;
6944 	int i;
6945 
6946 	/* find what triggered the MDD event */
6947 	reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6948 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6949 		uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6950 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6951 		uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6952 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6953 		uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6954 				I40E_GL_MDET_TX_EVENT_SHIFT;
6955 		uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6956 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6957 					hw->func_caps.base_queue;
6958 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6959 			"queue %d PF number 0x%02x VF number 0x%02x device %s",
6960 				event, queue, pf_num, vf_num, dev->data->name);
6961 		I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6962 		mdd_detected = true;
6963 	}
6964 	reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6965 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6966 		uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6967 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6968 		uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6969 				I40E_GL_MDET_RX_EVENT_SHIFT;
6970 		uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6971 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6972 					hw->func_caps.base_queue;
6973 
6974 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6975 				"queue %d of function 0x%02x device %s",
6976 					event, queue, func, dev->data->name);
6977 		I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6978 		mdd_detected = true;
6979 	}
6980 
6981 	if (mdd_detected) {
6982 		reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6983 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6984 			I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6985 			PMD_DRV_LOG(WARNING, "TX driver issue detected on PF");
6986 		}
6987 		reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6988 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6989 			I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6990 					I40E_MDD_CLEAR16);
6991 			PMD_DRV_LOG(WARNING, "RX driver issue detected on PF");
6992 		}
6993 	}
6994 
6995 	/* see if one of the VFs needs its hand slapped */
6996 	for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6997 		vf = &pf->vfs[i];
6998 		reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6999 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7000 			I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
7001 					I40E_MDD_CLEAR16);
7002 			vf->num_mdd_events++;
7003 			PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
7004 					PRIu64 "times",
7005 					i, vf->num_mdd_events);
7006 		}
7007 
7008 		reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
7009 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7010 			I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
7011 					I40E_MDD_CLEAR16);
7012 			vf->num_mdd_events++;
7013 			PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
7014 					PRIu64 "times",
7015 					i, vf->num_mdd_events);
7016 		}
7017 	}
7018 }
7019 
7020 /**
7021  * Interrupt handler triggered by NIC  for handling
7022  * specific interrupt.
7023  *
7024  * @param handle
7025  *  Pointer to interrupt handle.
7026  * @param param
7027  *  The address of parameter (struct rte_eth_dev *) registered before.
7028  *
7029  * @return
7030  *  void
7031  */
7032 static void
7033 i40e_dev_interrupt_handler(void *param)
7034 {
7035 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7036 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7037 	uint32_t icr0;
7038 
7039 	/* Disable interrupt */
7040 	i40e_pf_disable_irq0(hw);
7041 
7042 	/* read out interrupt causes */
7043 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
7044 
7045 	/* No interrupt event indicated */
7046 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
7047 		PMD_DRV_LOG(INFO, "No interrupt event");
7048 		goto done;
7049 	}
7050 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
7051 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
7052 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
7053 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
7054 		i40e_handle_mdd_event(dev);
7055 	}
7056 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
7057 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7058 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7059 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7060 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7061 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7062 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7063 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
7064 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7065 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7066 
7067 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7068 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7069 		i40e_dev_handle_vfr_event(dev);
7070 	}
7071 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7072 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
7073 		i40e_dev_handle_aq_msg(dev);
7074 	}
7075 
7076 done:
7077 	/* Enable interrupt */
7078 	i40e_pf_enable_irq0(hw);
7079 }
7080 
7081 static void
7082 i40e_dev_alarm_handler(void *param)
7083 {
7084 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7085 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7086 	uint32_t icr0;
7087 
7088 	/* Disable interrupt */
7089 	i40e_pf_disable_irq0(hw);
7090 
7091 	/* read out interrupt causes */
7092 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
7093 
7094 	/* No interrupt event indicated */
7095 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
7096 		goto done;
7097 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
7098 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
7099 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
7100 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
7101 		i40e_handle_mdd_event(dev);
7102 	}
7103 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
7104 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7105 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7106 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7107 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7108 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7109 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7110 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
7111 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7112 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7113 
7114 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7115 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7116 		i40e_dev_handle_vfr_event(dev);
7117 	}
7118 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7119 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
7120 		i40e_dev_handle_aq_msg(dev);
7121 	}
7122 
7123 done:
7124 	/* Enable interrupt */
7125 	i40e_pf_enable_irq0(hw);
7126 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
7127 			  i40e_dev_alarm_handler, dev);
7128 }
7129 
7130 int
7131 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
7132 			 struct i40e_macvlan_filter *filter,
7133 			 int total)
7134 {
7135 	int ele_num, ele_buff_size;
7136 	int num, actual_num, i;
7137 	uint16_t flags;
7138 	int ret = I40E_SUCCESS;
7139 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7140 	struct i40e_aqc_add_macvlan_element_data *req_list;
7141 
7142 	if (filter == NULL  || total == 0)
7143 		return I40E_ERR_PARAM;
7144 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7145 	ele_buff_size = hw->aq.asq_buf_size;
7146 
7147 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
7148 	if (req_list == NULL) {
7149 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
7150 		return I40E_ERR_NO_MEMORY;
7151 	}
7152 
7153 	num = 0;
7154 	do {
7155 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7156 		memset(req_list, 0, ele_buff_size);
7157 
7158 		for (i = 0; i < actual_num; i++) {
7159 			rte_memcpy(req_list[i].mac_addr,
7160 				&filter[num + i].macaddr, ETH_ADDR_LEN);
7161 			req_list[i].vlan_tag =
7162 				rte_cpu_to_le_16(filter[num + i].vlan_id);
7163 
7164 			switch (filter[num + i].filter_type) {
7165 			case I40E_MAC_PERFECT_MATCH:
7166 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
7167 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7168 				break;
7169 			case I40E_MACVLAN_PERFECT_MATCH:
7170 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7171 				break;
7172 			case I40E_MAC_HASH_MATCH:
7173 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7174 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7175 				break;
7176 			case I40E_MACVLAN_HASH_MATCH:
7177 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7178 				break;
7179 			default:
7180 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
7181 				ret = I40E_ERR_PARAM;
7182 				goto DONE;
7183 			}
7184 
7185 			req_list[i].queue_number = 0;
7186 
7187 			req_list[i].flags = rte_cpu_to_le_16(flags);
7188 		}
7189 
7190 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7191 						actual_num, NULL);
7192 		if (ret != I40E_SUCCESS) {
7193 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7194 			goto DONE;
7195 		}
7196 		num += actual_num;
7197 	} while (num < total);
7198 
7199 DONE:
7200 	rte_free(req_list);
7201 	return ret;
7202 }
7203 
7204 int
7205 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7206 			    struct i40e_macvlan_filter *filter,
7207 			    int total)
7208 {
7209 	int ele_num, ele_buff_size;
7210 	int num, actual_num, i;
7211 	uint16_t flags;
7212 	int ret = I40E_SUCCESS;
7213 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7214 	struct i40e_aqc_remove_macvlan_element_data *req_list;
7215 	enum i40e_admin_queue_err aq_status;
7216 
7217 	if (filter == NULL  || total == 0)
7218 		return I40E_ERR_PARAM;
7219 
7220 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7221 	ele_buff_size = hw->aq.asq_buf_size;
7222 
7223 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7224 	if (req_list == NULL) {
7225 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
7226 		return I40E_ERR_NO_MEMORY;
7227 	}
7228 
7229 	num = 0;
7230 	do {
7231 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7232 		memset(req_list, 0, ele_buff_size);
7233 
7234 		for (i = 0; i < actual_num; i++) {
7235 			rte_memcpy(req_list[i].mac_addr,
7236 				&filter[num + i].macaddr, ETH_ADDR_LEN);
7237 			req_list[i].vlan_tag =
7238 				rte_cpu_to_le_16(filter[num + i].vlan_id);
7239 
7240 			switch (filter[num + i].filter_type) {
7241 			case I40E_MAC_PERFECT_MATCH:
7242 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7243 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7244 				break;
7245 			case I40E_MACVLAN_PERFECT_MATCH:
7246 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7247 				break;
7248 			case I40E_MAC_HASH_MATCH:
7249 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7250 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7251 				break;
7252 			case I40E_MACVLAN_HASH_MATCH:
7253 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7254 				break;
7255 			default:
7256 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7257 				ret = I40E_ERR_PARAM;
7258 				goto DONE;
7259 			}
7260 			req_list[i].flags = rte_cpu_to_le_16(flags);
7261 		}
7262 
7263 		ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list,
7264 						actual_num, NULL, &aq_status);
7265 
7266 		if (ret != I40E_SUCCESS) {
7267 			/* Do not report as an error when firmware returns ENOENT */
7268 			if (aq_status == I40E_AQ_RC_ENOENT) {
7269 				ret = I40E_SUCCESS;
7270 			} else {
7271 				PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7272 				goto DONE;
7273 			}
7274 		}
7275 		num += actual_num;
7276 	} while (num < total);
7277 
7278 DONE:
7279 	rte_free(req_list);
7280 	return ret;
7281 }
7282 
7283 /* Find out specific MAC filter */
7284 static struct i40e_mac_filter *
7285 i40e_find_mac_filter(struct i40e_vsi *vsi,
7286 			 struct rte_ether_addr *macaddr)
7287 {
7288 	struct i40e_mac_filter *f;
7289 
7290 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7291 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7292 			return f;
7293 	}
7294 
7295 	return NULL;
7296 }
7297 
7298 static bool
7299 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7300 			 uint16_t vlan_id)
7301 {
7302 	uint32_t vid_idx, vid_bit;
7303 
7304 	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
7305 		return 0;
7306 
7307 	vid_idx = I40E_VFTA_IDX(vlan_id);
7308 	vid_bit = I40E_VFTA_BIT(vlan_id);
7309 
7310 	if (vsi->vfta[vid_idx] & vid_bit)
7311 		return 1;
7312 	else
7313 		return 0;
7314 }
7315 
7316 static void
7317 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7318 		       uint16_t vlan_id, bool on)
7319 {
7320 	uint32_t vid_idx, vid_bit;
7321 
7322 	vid_idx = I40E_VFTA_IDX(vlan_id);
7323 	vid_bit = I40E_VFTA_BIT(vlan_id);
7324 
7325 	if (on)
7326 		vsi->vfta[vid_idx] |= vid_bit;
7327 	else
7328 		vsi->vfta[vid_idx] &= ~vid_bit;
7329 }
7330 
7331 void
7332 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7333 		     uint16_t vlan_id, bool on)
7334 {
7335 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7336 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7337 	int ret;
7338 
7339 	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
7340 		return;
7341 
7342 	i40e_store_vlan_filter(vsi, vlan_id, on);
7343 
7344 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7345 		return;
7346 
7347 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7348 
7349 	if (on) {
7350 		ret = i40e_aq_add_vlan(hw, vsi->seid,
7351 				       &vlan_data, 1, NULL);
7352 		if (ret != I40E_SUCCESS)
7353 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7354 	} else {
7355 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
7356 					  &vlan_data, 1, NULL);
7357 		if (ret != I40E_SUCCESS)
7358 			PMD_DRV_LOG(ERR,
7359 				    "Failed to remove vlan filter");
7360 	}
7361 }
7362 
7363 /**
7364  * Find all vlan options for specific mac addr,
7365  * return with actual vlan found.
7366  */
7367 int
7368 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7369 			   struct i40e_macvlan_filter *mv_f,
7370 			   int num, struct rte_ether_addr *addr)
7371 {
7372 	int i;
7373 	uint32_t j, k;
7374 
7375 	/**
7376 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
7377 	 * although the code looks complex.
7378 	  */
7379 	if (num < vsi->vlan_num)
7380 		return I40E_ERR_PARAM;
7381 
7382 	i = 0;
7383 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
7384 		if (vsi->vfta[j]) {
7385 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7386 				if (vsi->vfta[j] & (1 << k)) {
7387 					if (i > num - 1) {
7388 						PMD_DRV_LOG(ERR,
7389 							"vlan number doesn't match");
7390 						return I40E_ERR_PARAM;
7391 					}
7392 					rte_memcpy(&mv_f[i].macaddr,
7393 							addr, ETH_ADDR_LEN);
7394 					mv_f[i].vlan_id =
7395 						j * I40E_UINT32_BIT_SIZE + k;
7396 					i++;
7397 				}
7398 			}
7399 		}
7400 	}
7401 	return I40E_SUCCESS;
7402 }
7403 
7404 static inline int
7405 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7406 			   struct i40e_macvlan_filter *mv_f,
7407 			   int num,
7408 			   uint16_t vlan)
7409 {
7410 	int i = 0;
7411 	struct i40e_mac_filter *f;
7412 
7413 	if (num < vsi->mac_num)
7414 		return I40E_ERR_PARAM;
7415 
7416 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7417 		if (i > num - 1) {
7418 			PMD_DRV_LOG(ERR, "buffer number not match");
7419 			return I40E_ERR_PARAM;
7420 		}
7421 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7422 				ETH_ADDR_LEN);
7423 		mv_f[i].vlan_id = vlan;
7424 		mv_f[i].filter_type = f->mac_info.filter_type;
7425 		i++;
7426 	}
7427 
7428 	return I40E_SUCCESS;
7429 }
7430 
7431 static int
7432 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7433 {
7434 	int i, j, num;
7435 	struct i40e_mac_filter *f;
7436 	struct i40e_macvlan_filter *mv_f;
7437 	int ret = I40E_SUCCESS;
7438 
7439 	if (vsi == NULL || vsi->mac_num == 0)
7440 		return I40E_ERR_PARAM;
7441 
7442 	/* Case that no vlan is set */
7443 	if (vsi->vlan_num == 0)
7444 		num = vsi->mac_num;
7445 	else
7446 		num = vsi->mac_num * vsi->vlan_num;
7447 
7448 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7449 	if (mv_f == NULL) {
7450 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7451 		return I40E_ERR_NO_MEMORY;
7452 	}
7453 
7454 	i = 0;
7455 	if (vsi->vlan_num == 0) {
7456 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7457 			rte_memcpy(&mv_f[i].macaddr,
7458 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
7459 			mv_f[i].filter_type = f->mac_info.filter_type;
7460 			mv_f[i].vlan_id = 0;
7461 			i++;
7462 		}
7463 	} else {
7464 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7465 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7466 					vsi->vlan_num, &f->mac_info.mac_addr);
7467 			if (ret != I40E_SUCCESS)
7468 				goto DONE;
7469 			for (j = i; j < i + vsi->vlan_num; j++)
7470 				mv_f[j].filter_type = f->mac_info.filter_type;
7471 			i += vsi->vlan_num;
7472 		}
7473 	}
7474 
7475 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7476 DONE:
7477 	rte_free(mv_f);
7478 
7479 	return ret;
7480 }
7481 
7482 int
7483 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7484 {
7485 	struct i40e_macvlan_filter *mv_f;
7486 	int mac_num;
7487 	int ret = I40E_SUCCESS;
7488 
7489 	if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7490 		return I40E_ERR_PARAM;
7491 
7492 	/* If it's already set, just return */
7493 	if (i40e_find_vlan_filter(vsi,vlan))
7494 		return I40E_SUCCESS;
7495 
7496 	mac_num = vsi->mac_num;
7497 
7498 	if (mac_num == 0) {
7499 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7500 		return I40E_ERR_PARAM;
7501 	}
7502 
7503 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7504 
7505 	if (mv_f == NULL) {
7506 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7507 		return I40E_ERR_NO_MEMORY;
7508 	}
7509 
7510 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7511 
7512 	if (ret != I40E_SUCCESS)
7513 		goto DONE;
7514 
7515 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7516 
7517 	if (ret != I40E_SUCCESS)
7518 		goto DONE;
7519 
7520 	i40e_set_vlan_filter(vsi, vlan, 1);
7521 
7522 	vsi->vlan_num++;
7523 	ret = I40E_SUCCESS;
7524 DONE:
7525 	rte_free(mv_f);
7526 	return ret;
7527 }
7528 
7529 int
7530 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7531 {
7532 	struct i40e_macvlan_filter *mv_f;
7533 	int mac_num;
7534 	int ret = I40E_SUCCESS;
7535 
7536 	/**
7537 	 * Vlan 0 is the generic filter for untagged packets
7538 	 * and can't be removed.
7539 	 */
7540 	if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7541 		return I40E_ERR_PARAM;
7542 
7543 	/* If can't find it, just return */
7544 	if (!i40e_find_vlan_filter(vsi, vlan))
7545 		return I40E_ERR_PARAM;
7546 
7547 	mac_num = vsi->mac_num;
7548 
7549 	if (mac_num == 0) {
7550 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7551 		return I40E_ERR_PARAM;
7552 	}
7553 
7554 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7555 
7556 	if (mv_f == NULL) {
7557 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7558 		return I40E_ERR_NO_MEMORY;
7559 	}
7560 
7561 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7562 
7563 	if (ret != I40E_SUCCESS)
7564 		goto DONE;
7565 
7566 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7567 
7568 	if (ret != I40E_SUCCESS)
7569 		goto DONE;
7570 
7571 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7572 	if (vsi->vlan_num == 1) {
7573 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7574 		if (ret != I40E_SUCCESS)
7575 			goto DONE;
7576 
7577 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7578 		if (ret != I40E_SUCCESS)
7579 			goto DONE;
7580 	}
7581 
7582 	i40e_set_vlan_filter(vsi, vlan, 0);
7583 
7584 	vsi->vlan_num--;
7585 	ret = I40E_SUCCESS;
7586 DONE:
7587 	rte_free(mv_f);
7588 	return ret;
7589 }
7590 
7591 int
7592 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7593 {
7594 	struct i40e_mac_filter *f;
7595 	struct i40e_macvlan_filter *mv_f;
7596 	int i, vlan_num = 0;
7597 	int ret = I40E_SUCCESS;
7598 
7599 	/* If it's add and we've config it, return */
7600 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7601 	if (f != NULL)
7602 		return I40E_SUCCESS;
7603 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7604 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7605 
7606 		/**
7607 		 * If vlan_num is 0, that's the first time to add mac,
7608 		 * set mask for vlan_id 0.
7609 		 */
7610 		if (vsi->vlan_num == 0) {
7611 			i40e_set_vlan_filter(vsi, 0, 1);
7612 			vsi->vlan_num = 1;
7613 		}
7614 		vlan_num = vsi->vlan_num;
7615 	} else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7616 			mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7617 		vlan_num = 1;
7618 
7619 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7620 	if (mv_f == NULL) {
7621 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7622 		return I40E_ERR_NO_MEMORY;
7623 	}
7624 
7625 	for (i = 0; i < vlan_num; i++) {
7626 		mv_f[i].filter_type = mac_filter->filter_type;
7627 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7628 				ETH_ADDR_LEN);
7629 	}
7630 
7631 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7632 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7633 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7634 					&mac_filter->mac_addr);
7635 		if (ret != I40E_SUCCESS)
7636 			goto DONE;
7637 	}
7638 
7639 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7640 	if (ret != I40E_SUCCESS)
7641 		goto DONE;
7642 
7643 	/* Add the mac addr into mac list */
7644 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7645 	if (f == NULL) {
7646 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7647 		ret = I40E_ERR_NO_MEMORY;
7648 		goto DONE;
7649 	}
7650 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7651 			ETH_ADDR_LEN);
7652 	f->mac_info.filter_type = mac_filter->filter_type;
7653 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7654 	vsi->mac_num++;
7655 
7656 	ret = I40E_SUCCESS;
7657 DONE:
7658 	rte_free(mv_f);
7659 
7660 	return ret;
7661 }
7662 
7663 int
7664 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7665 {
7666 	struct i40e_mac_filter *f;
7667 	struct i40e_macvlan_filter *mv_f;
7668 	int i, vlan_num;
7669 	enum i40e_mac_filter_type filter_type;
7670 	int ret = I40E_SUCCESS;
7671 
7672 	/* Can't find it, return an error */
7673 	f = i40e_find_mac_filter(vsi, addr);
7674 	if (f == NULL)
7675 		return I40E_ERR_PARAM;
7676 
7677 	vlan_num = vsi->vlan_num;
7678 	filter_type = f->mac_info.filter_type;
7679 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7680 		filter_type == I40E_MACVLAN_HASH_MATCH) {
7681 		if (vlan_num == 0) {
7682 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7683 			return I40E_ERR_PARAM;
7684 		}
7685 	} else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7686 			filter_type == I40E_MAC_HASH_MATCH)
7687 		vlan_num = 1;
7688 
7689 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7690 	if (mv_f == NULL) {
7691 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7692 		return I40E_ERR_NO_MEMORY;
7693 	}
7694 
7695 	for (i = 0; i < vlan_num; i++) {
7696 		mv_f[i].filter_type = filter_type;
7697 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7698 				ETH_ADDR_LEN);
7699 	}
7700 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7701 			filter_type == I40E_MACVLAN_HASH_MATCH) {
7702 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7703 		if (ret != I40E_SUCCESS)
7704 			goto DONE;
7705 	}
7706 
7707 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7708 	if (ret != I40E_SUCCESS)
7709 		goto DONE;
7710 
7711 	/* Remove the mac addr into mac list */
7712 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7713 	rte_free(f);
7714 	vsi->mac_num--;
7715 
7716 	ret = I40E_SUCCESS;
7717 DONE:
7718 	rte_free(mv_f);
7719 	return ret;
7720 }
7721 
7722 /* Configure hash enable flags for RSS */
7723 uint64_t
7724 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7725 {
7726 	uint64_t hena = 0;
7727 	int i;
7728 
7729 	if (!flags)
7730 		return hena;
7731 
7732 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7733 		if (flags & (1ULL << i))
7734 			hena |= adapter->pctypes_tbl[i];
7735 	}
7736 
7737 	return hena;
7738 }
7739 
7740 /* Parse the hash enable flags */
7741 uint64_t
7742 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7743 {
7744 	uint64_t rss_hf = 0;
7745 
7746 	if (!flags)
7747 		return rss_hf;
7748 	int i;
7749 
7750 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7751 		if (flags & adapter->pctypes_tbl[i])
7752 			rss_hf |= (1ULL << i);
7753 	}
7754 	return rss_hf;
7755 }
7756 
7757 /* Disable RSS */
7758 void
7759 i40e_pf_disable_rss(struct i40e_pf *pf)
7760 {
7761 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7762 
7763 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7764 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7765 	I40E_WRITE_FLUSH(hw);
7766 }
7767 
7768 int
7769 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7770 {
7771 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7772 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7773 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7774 			   I40E_VFQF_HKEY_MAX_INDEX :
7775 			   I40E_PFQF_HKEY_MAX_INDEX;
7776 
7777 	if (!key || key_len == 0) {
7778 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7779 		return 0;
7780 	} else if (key_len != (key_idx + 1) *
7781 		sizeof(uint32_t)) {
7782 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7783 		return -EINVAL;
7784 	}
7785 
7786 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7787 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7788 				(struct i40e_aqc_get_set_rss_key_data *)key;
7789 		enum i40e_status_code status =
7790 				i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7791 
7792 		if (status) {
7793 			PMD_DRV_LOG(ERR,
7794 				    "Failed to configure RSS key via AQ, error status: %d",
7795 				    status);
7796 			return -EIO;
7797 		}
7798 	} else {
7799 		uint32_t *hash_key = (uint32_t *)key;
7800 		uint16_t i;
7801 
7802 		if (vsi->type == I40E_VSI_SRIOV) {
7803 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7804 				I40E_WRITE_REG(
7805 					hw,
7806 					I40E_VFQF_HKEY1(i, vsi->user_param),
7807 					hash_key[i]);
7808 
7809 		} else {
7810 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7811 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7812 					       hash_key[i]);
7813 		}
7814 		I40E_WRITE_FLUSH(hw);
7815 	}
7816 
7817 	return 0;
7818 }
7819 
7820 static int
7821 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7822 {
7823 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7824 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7825 	uint32_t reg;
7826 	int ret;
7827 
7828 	if (!key || !key_len)
7829 		return 0;
7830 
7831 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7832 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7833 			(struct i40e_aqc_get_set_rss_key_data *)key);
7834 		if (ret) {
7835 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7836 			return ret;
7837 		}
7838 	} else {
7839 		uint32_t *key_dw = (uint32_t *)key;
7840 		uint16_t i;
7841 
7842 		if (vsi->type == I40E_VSI_SRIOV) {
7843 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7844 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7845 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7846 			}
7847 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7848 				   sizeof(uint32_t);
7849 		} else {
7850 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7851 				reg = I40E_PFQF_HKEY(i);
7852 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7853 			}
7854 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7855 				   sizeof(uint32_t);
7856 		}
7857 	}
7858 	return 0;
7859 }
7860 
7861 static int
7862 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7863 {
7864 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7865 	uint64_t hena;
7866 	int ret;
7867 
7868 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7869 			       rss_conf->rss_key_len);
7870 	if (ret)
7871 		return ret;
7872 
7873 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7874 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7875 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7876 	I40E_WRITE_FLUSH(hw);
7877 
7878 	return 0;
7879 }
7880 
7881 static int
7882 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7883 			 struct rte_eth_rss_conf *rss_conf)
7884 {
7885 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7886 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7887 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7888 	uint64_t hena;
7889 
7890 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7891 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7892 
7893 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7894 		if (rss_hf != 0) /* Enable RSS */
7895 			return -EINVAL;
7896 		return 0; /* Nothing to do */
7897 	}
7898 	/* RSS enabled */
7899 	if (rss_hf == 0) /* Disable RSS */
7900 		return -EINVAL;
7901 
7902 	return i40e_hw_rss_hash_set(pf, rss_conf);
7903 }
7904 
7905 static int
7906 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7907 			   struct rte_eth_rss_conf *rss_conf)
7908 {
7909 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7910 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7911 	uint64_t hena;
7912 	int ret;
7913 
7914 	if (!rss_conf)
7915 		return -EINVAL;
7916 
7917 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7918 			 &rss_conf->rss_key_len);
7919 	if (ret)
7920 		return ret;
7921 
7922 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7923 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7924 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7925 
7926 	return 0;
7927 }
7928 
7929 static int
7930 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7931 {
7932 	switch (filter_type) {
7933 	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
7934 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7935 		break;
7936 	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7937 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7938 		break;
7939 	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
7940 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7941 		break;
7942 	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
7943 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7944 		break;
7945 	case RTE_ETH_TUNNEL_FILTER_IMAC:
7946 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7947 		break;
7948 	case RTE_ETH_TUNNEL_FILTER_OIP:
7949 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7950 		break;
7951 	case RTE_ETH_TUNNEL_FILTER_IIP:
7952 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7953 		break;
7954 	default:
7955 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7956 		return -EINVAL;
7957 	}
7958 
7959 	return 0;
7960 }
7961 
7962 /* Convert tunnel filter structure */
7963 static int
7964 i40e_tunnel_filter_convert(
7965 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7966 	struct i40e_tunnel_filter *tunnel_filter)
7967 {
7968 	rte_ether_addr_copy((struct rte_ether_addr *)
7969 			&cld_filter->element.outer_mac,
7970 		(struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7971 	rte_ether_addr_copy((struct rte_ether_addr *)
7972 			&cld_filter->element.inner_mac,
7973 		(struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7974 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7975 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7976 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7977 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7978 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7979 	else
7980 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7981 	tunnel_filter->input.flags = cld_filter->element.flags;
7982 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7983 	tunnel_filter->queue = cld_filter->element.queue_number;
7984 	rte_memcpy(tunnel_filter->input.general_fields,
7985 		   cld_filter->general_fields,
7986 		   sizeof(cld_filter->general_fields));
7987 
7988 	return 0;
7989 }
7990 
7991 /* Check if there exists the tunnel filter */
7992 struct i40e_tunnel_filter *
7993 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7994 			     const struct i40e_tunnel_filter_input *input)
7995 {
7996 	int ret;
7997 
7998 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7999 	if (ret < 0)
8000 		return NULL;
8001 
8002 	return tunnel_rule->hash_map[ret];
8003 }
8004 
8005 /* Add a tunnel filter into the SW list */
8006 static int
8007 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
8008 			     struct i40e_tunnel_filter *tunnel_filter)
8009 {
8010 	struct i40e_tunnel_rule *rule = &pf->tunnel;
8011 	int ret;
8012 
8013 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
8014 	if (ret < 0) {
8015 		PMD_DRV_LOG(ERR,
8016 			    "Failed to insert tunnel filter to hash table %d!",
8017 			    ret);
8018 		return ret;
8019 	}
8020 	rule->hash_map[ret] = tunnel_filter;
8021 
8022 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
8023 
8024 	return 0;
8025 }
8026 
8027 /* Delete a tunnel filter from the SW list */
8028 int
8029 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
8030 			  struct i40e_tunnel_filter_input *input)
8031 {
8032 	struct i40e_tunnel_rule *rule = &pf->tunnel;
8033 	struct i40e_tunnel_filter *tunnel_filter;
8034 	int ret;
8035 
8036 	ret = rte_hash_del_key(rule->hash_table, input);
8037 	if (ret < 0) {
8038 		PMD_DRV_LOG(ERR,
8039 			    "Failed to delete tunnel filter to hash table %d!",
8040 			    ret);
8041 		return ret;
8042 	}
8043 	tunnel_filter = rule->hash_map[ret];
8044 	rule->hash_map[ret] = NULL;
8045 
8046 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
8047 	rte_free(tunnel_filter);
8048 
8049 	return 0;
8050 }
8051 
8052 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
8053 #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
8054 #define I40E_TR_GENEVE_KEY_MASK			0x8
8055 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
8056 #define I40E_TR_GRE_KEY_MASK			0x400
8057 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
8058 #define I40E_TR_GRE_NO_KEY_MASK			0x8000
8059 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
8060 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
8061 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
8062 #define I40E_DIRECTION_INGRESS_KEY		0x8000
8063 #define I40E_TR_L4_TYPE_TCP			0x2
8064 #define I40E_TR_L4_TYPE_UDP			0x4
8065 #define I40E_TR_L4_TYPE_SCTP			0x8
8066 
8067 static enum
8068 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
8069 {
8070 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8071 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8072 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8073 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8074 	enum i40e_status_code status = I40E_SUCCESS;
8075 
8076 	if (pf->support_multi_driver) {
8077 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8078 		return I40E_NOT_SUPPORTED;
8079 	}
8080 
8081 	memset(&filter_replace, 0,
8082 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8083 	memset(&filter_replace_buf, 0,
8084 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8085 
8086 	/* create L1 filter */
8087 	filter_replace.old_filter_type =
8088 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8089 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8090 	filter_replace.tr_bit = 0;
8091 
8092 	/* Prepare the buffer, 3 entries */
8093 	filter_replace_buf.data[0] =
8094 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8095 	filter_replace_buf.data[0] |=
8096 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8097 	filter_replace_buf.data[2] = 0xFF;
8098 	filter_replace_buf.data[3] = 0xFF;
8099 	filter_replace_buf.data[4] =
8100 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8101 	filter_replace_buf.data[4] |=
8102 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8103 	filter_replace_buf.data[7] = 0xF0;
8104 	filter_replace_buf.data[8]
8105 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
8106 	filter_replace_buf.data[8] |=
8107 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8108 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
8109 		I40E_TR_GENEVE_KEY_MASK |
8110 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
8111 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
8112 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
8113 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
8114 
8115 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8116 					       &filter_replace_buf);
8117 	if (!status && (filter_replace.old_filter_type !=
8118 			filter_replace.new_filter_type))
8119 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8120 			    " original: 0x%x, new: 0x%x",
8121 			    dev->device->name,
8122 			    filter_replace.old_filter_type,
8123 			    filter_replace.new_filter_type);
8124 
8125 	return status;
8126 }
8127 
8128 static enum
8129 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
8130 {
8131 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8132 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8133 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8134 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8135 	enum i40e_status_code status = I40E_SUCCESS;
8136 
8137 	if (pf->support_multi_driver) {
8138 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8139 		return I40E_NOT_SUPPORTED;
8140 	}
8141 
8142 	/* For MPLSoUDP */
8143 	memset(&filter_replace, 0,
8144 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8145 	memset(&filter_replace_buf, 0,
8146 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8147 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8148 		I40E_AQC_MIRROR_CLOUD_FILTER;
8149 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8150 	filter_replace.new_filter_type =
8151 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8152 	/* Prepare the buffer, 2 entries */
8153 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8154 	filter_replace_buf.data[0] |=
8155 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8156 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8157 	filter_replace_buf.data[4] |=
8158 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8159 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8160 					       &filter_replace_buf);
8161 	if (status < 0)
8162 		return status;
8163 	if (filter_replace.old_filter_type !=
8164 	    filter_replace.new_filter_type)
8165 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8166 			    " original: 0x%x, new: 0x%x",
8167 			    dev->device->name,
8168 			    filter_replace.old_filter_type,
8169 			    filter_replace.new_filter_type);
8170 
8171 	/* For MPLSoGRE */
8172 	memset(&filter_replace, 0,
8173 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8174 	memset(&filter_replace_buf, 0,
8175 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8176 
8177 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8178 		I40E_AQC_MIRROR_CLOUD_FILTER;
8179 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8180 	filter_replace.new_filter_type =
8181 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8182 	/* Prepare the buffer, 2 entries */
8183 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8184 	filter_replace_buf.data[0] |=
8185 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8186 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8187 	filter_replace_buf.data[4] |=
8188 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8189 
8190 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8191 					       &filter_replace_buf);
8192 	if (!status && (filter_replace.old_filter_type !=
8193 			filter_replace.new_filter_type))
8194 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8195 			    " original: 0x%x, new: 0x%x",
8196 			    dev->device->name,
8197 			    filter_replace.old_filter_type,
8198 			    filter_replace.new_filter_type);
8199 
8200 	return status;
8201 }
8202 
8203 static enum i40e_status_code
8204 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8205 {
8206 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8207 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8208 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8209 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8210 	enum i40e_status_code status = I40E_SUCCESS;
8211 
8212 	if (pf->support_multi_driver) {
8213 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8214 		return I40E_NOT_SUPPORTED;
8215 	}
8216 
8217 	/* For GTP-C */
8218 	memset(&filter_replace, 0,
8219 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8220 	memset(&filter_replace_buf, 0,
8221 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8222 	/* create L1 filter */
8223 	filter_replace.old_filter_type =
8224 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8225 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8226 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8227 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8228 	/* Prepare the buffer, 2 entries */
8229 	filter_replace_buf.data[0] =
8230 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8231 	filter_replace_buf.data[0] |=
8232 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8233 	filter_replace_buf.data[2] = 0xFF;
8234 	filter_replace_buf.data[3] = 0xFF;
8235 	filter_replace_buf.data[4] =
8236 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8237 	filter_replace_buf.data[4] |=
8238 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8239 	filter_replace_buf.data[6] = 0xFF;
8240 	filter_replace_buf.data[7] = 0xFF;
8241 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8242 					       &filter_replace_buf);
8243 	if (status < 0)
8244 		return status;
8245 	if (filter_replace.old_filter_type !=
8246 	    filter_replace.new_filter_type)
8247 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8248 			    " original: 0x%x, new: 0x%x",
8249 			    dev->device->name,
8250 			    filter_replace.old_filter_type,
8251 			    filter_replace.new_filter_type);
8252 
8253 	/* for GTP-U */
8254 	memset(&filter_replace, 0,
8255 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8256 	memset(&filter_replace_buf, 0,
8257 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8258 	/* create L1 filter */
8259 	filter_replace.old_filter_type =
8260 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8261 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8262 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8263 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8264 	/* Prepare the buffer, 2 entries */
8265 	filter_replace_buf.data[0] =
8266 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8267 	filter_replace_buf.data[0] |=
8268 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8269 	filter_replace_buf.data[2] = 0xFF;
8270 	filter_replace_buf.data[3] = 0xFF;
8271 	filter_replace_buf.data[4] =
8272 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8273 	filter_replace_buf.data[4] |=
8274 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8275 	filter_replace_buf.data[6] = 0xFF;
8276 	filter_replace_buf.data[7] = 0xFF;
8277 
8278 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8279 					       &filter_replace_buf);
8280 	if (!status && (filter_replace.old_filter_type !=
8281 			filter_replace.new_filter_type))
8282 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8283 			    " original: 0x%x, new: 0x%x",
8284 			    dev->device->name,
8285 			    filter_replace.old_filter_type,
8286 			    filter_replace.new_filter_type);
8287 
8288 	return status;
8289 }
8290 
8291 static enum
8292 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8293 {
8294 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8295 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8296 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8297 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8298 	enum i40e_status_code status = I40E_SUCCESS;
8299 
8300 	if (pf->support_multi_driver) {
8301 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8302 		return I40E_NOT_SUPPORTED;
8303 	}
8304 
8305 	/* for GTP-C */
8306 	memset(&filter_replace, 0,
8307 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8308 	memset(&filter_replace_buf, 0,
8309 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8310 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8311 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8312 	filter_replace.new_filter_type =
8313 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8314 	/* Prepare the buffer, 2 entries */
8315 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8316 	filter_replace_buf.data[0] |=
8317 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8318 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8319 	filter_replace_buf.data[4] |=
8320 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8321 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8322 					       &filter_replace_buf);
8323 	if (status < 0)
8324 		return status;
8325 	if (filter_replace.old_filter_type !=
8326 	    filter_replace.new_filter_type)
8327 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8328 			    " original: 0x%x, new: 0x%x",
8329 			    dev->device->name,
8330 			    filter_replace.old_filter_type,
8331 			    filter_replace.new_filter_type);
8332 
8333 	/* for GTP-U */
8334 	memset(&filter_replace, 0,
8335 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8336 	memset(&filter_replace_buf, 0,
8337 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8338 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8339 	filter_replace.old_filter_type =
8340 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8341 	filter_replace.new_filter_type =
8342 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8343 	/* Prepare the buffer, 2 entries */
8344 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8345 	filter_replace_buf.data[0] |=
8346 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8347 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8348 	filter_replace_buf.data[4] |=
8349 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8350 
8351 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8352 					       &filter_replace_buf);
8353 	if (!status && (filter_replace.old_filter_type !=
8354 			filter_replace.new_filter_type))
8355 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8356 			    " original: 0x%x, new: 0x%x",
8357 			    dev->device->name,
8358 			    filter_replace.old_filter_type,
8359 			    filter_replace.new_filter_type);
8360 
8361 	return status;
8362 }
8363 
8364 static enum i40e_status_code
8365 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8366 			    enum i40e_l4_port_type l4_port_type)
8367 {
8368 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8369 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8370 	enum i40e_status_code status = I40E_SUCCESS;
8371 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8372 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8373 
8374 	if (pf->support_multi_driver) {
8375 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8376 		return I40E_NOT_SUPPORTED;
8377 	}
8378 
8379 	memset(&filter_replace, 0,
8380 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8381 	memset(&filter_replace_buf, 0,
8382 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8383 
8384 	/* create L1 filter */
8385 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8386 		filter_replace.old_filter_type =
8387 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8388 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8389 		filter_replace_buf.data[8] =
8390 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8391 	} else {
8392 		filter_replace.old_filter_type =
8393 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8394 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8395 		filter_replace_buf.data[8] =
8396 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8397 	}
8398 
8399 	filter_replace.tr_bit = 0;
8400 	/* Prepare the buffer, 3 entries */
8401 	filter_replace_buf.data[0] =
8402 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8403 	filter_replace_buf.data[0] |=
8404 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8405 	filter_replace_buf.data[2] = 0x00;
8406 	filter_replace_buf.data[3] =
8407 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8408 	filter_replace_buf.data[4] =
8409 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8410 	filter_replace_buf.data[4] |=
8411 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8412 	filter_replace_buf.data[5] = 0x00;
8413 	filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8414 		I40E_TR_L4_TYPE_TCP |
8415 		I40E_TR_L4_TYPE_SCTP;
8416 	filter_replace_buf.data[7] = 0x00;
8417 	filter_replace_buf.data[8] |=
8418 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8419 	filter_replace_buf.data[9] = 0x00;
8420 	filter_replace_buf.data[10] = 0xFF;
8421 	filter_replace_buf.data[11] = 0xFF;
8422 
8423 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8424 					       &filter_replace_buf);
8425 	if (!status && filter_replace.old_filter_type !=
8426 	    filter_replace.new_filter_type)
8427 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8428 			    " original: 0x%x, new: 0x%x",
8429 			    dev->device->name,
8430 			    filter_replace.old_filter_type,
8431 			    filter_replace.new_filter_type);
8432 
8433 	return status;
8434 }
8435 
8436 static enum i40e_status_code
8437 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8438 			       enum i40e_l4_port_type l4_port_type)
8439 {
8440 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8441 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8442 	enum i40e_status_code status = I40E_SUCCESS;
8443 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8444 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8445 
8446 	if (pf->support_multi_driver) {
8447 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8448 		return I40E_NOT_SUPPORTED;
8449 	}
8450 
8451 	memset(&filter_replace, 0,
8452 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8453 	memset(&filter_replace_buf, 0,
8454 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8455 
8456 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8457 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8458 		filter_replace.new_filter_type =
8459 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8460 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8461 	} else {
8462 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8463 		filter_replace.new_filter_type =
8464 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8465 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8466 	}
8467 
8468 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8469 	filter_replace.tr_bit = 0;
8470 	/* Prepare the buffer, 2 entries */
8471 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8472 	filter_replace_buf.data[0] |=
8473 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8474 	filter_replace_buf.data[4] |=
8475 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8476 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8477 					       &filter_replace_buf);
8478 
8479 	if (!status && filter_replace.old_filter_type !=
8480 	    filter_replace.new_filter_type)
8481 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8482 			    " original: 0x%x, new: 0x%x",
8483 			    dev->device->name,
8484 			    filter_replace.old_filter_type,
8485 			    filter_replace.new_filter_type);
8486 
8487 	return status;
8488 }
8489 
8490 int
8491 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8492 		      struct i40e_tunnel_filter_conf *tunnel_filter,
8493 		      uint8_t add)
8494 {
8495 	uint16_t ip_type;
8496 	uint32_t ipv4_addr, ipv4_addr_le;
8497 	uint8_t i, tun_type = 0;
8498 	/* internal variable to convert ipv6 byte order */
8499 	uint32_t convert_ipv6[4];
8500 	int val, ret = 0;
8501 	struct i40e_pf_vf *vf = NULL;
8502 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8503 	struct i40e_vsi *vsi;
8504 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8505 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
8506 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8507 	struct i40e_tunnel_filter *tunnel, *node;
8508 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8509 	uint32_t teid_le;
8510 	bool big_buffer = 0;
8511 
8512 	cld_filter = rte_zmalloc("tunnel_filter",
8513 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8514 			 0);
8515 
8516 	if (cld_filter == NULL) {
8517 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8518 		return -ENOMEM;
8519 	}
8520 	pfilter = cld_filter;
8521 
8522 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
8523 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
8524 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
8525 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
8526 
8527 	pfilter->element.inner_vlan =
8528 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8529 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8530 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8531 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8532 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8533 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
8534 				&ipv4_addr_le,
8535 				sizeof(pfilter->element.ipaddr.v4.data));
8536 	} else {
8537 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8538 		for (i = 0; i < 4; i++) {
8539 			convert_ipv6[i] =
8540 			rte_cpu_to_le_32(rte_be_to_cpu_32(
8541 					 tunnel_filter->ip_addr.ipv6_addr[i]));
8542 		}
8543 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
8544 			   &convert_ipv6,
8545 			   sizeof(pfilter->element.ipaddr.v6.data));
8546 	}
8547 
8548 	/* check tunneled type */
8549 	switch (tunnel_filter->tunnel_type) {
8550 	case I40E_TUNNEL_TYPE_VXLAN:
8551 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8552 		break;
8553 	case I40E_TUNNEL_TYPE_NVGRE:
8554 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8555 		break;
8556 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
8557 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8558 		break;
8559 	case I40E_TUNNEL_TYPE_MPLSoUDP:
8560 		if (!pf->mpls_replace_flag) {
8561 			i40e_replace_mpls_l1_filter(pf);
8562 			i40e_replace_mpls_cloud_filter(pf);
8563 			pf->mpls_replace_flag = 1;
8564 		}
8565 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8566 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8567 			teid_le >> 4;
8568 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8569 			(teid_le & 0xF) << 12;
8570 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8571 			0x40;
8572 		big_buffer = 1;
8573 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8574 		break;
8575 	case I40E_TUNNEL_TYPE_MPLSoGRE:
8576 		if (!pf->mpls_replace_flag) {
8577 			i40e_replace_mpls_l1_filter(pf);
8578 			i40e_replace_mpls_cloud_filter(pf);
8579 			pf->mpls_replace_flag = 1;
8580 		}
8581 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8582 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8583 			teid_le >> 4;
8584 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8585 			(teid_le & 0xF) << 12;
8586 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8587 			0x0;
8588 		big_buffer = 1;
8589 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8590 		break;
8591 	case I40E_TUNNEL_TYPE_GTPC:
8592 		if (!pf->gtp_replace_flag) {
8593 			i40e_replace_gtp_l1_filter(pf);
8594 			i40e_replace_gtp_cloud_filter(pf);
8595 			pf->gtp_replace_flag = 1;
8596 		}
8597 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8598 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8599 			(teid_le >> 16) & 0xFFFF;
8600 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8601 			teid_le & 0xFFFF;
8602 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8603 			0x0;
8604 		big_buffer = 1;
8605 		break;
8606 	case I40E_TUNNEL_TYPE_GTPU:
8607 		if (!pf->gtp_replace_flag) {
8608 			i40e_replace_gtp_l1_filter(pf);
8609 			i40e_replace_gtp_cloud_filter(pf);
8610 			pf->gtp_replace_flag = 1;
8611 		}
8612 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8613 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8614 			(teid_le >> 16) & 0xFFFF;
8615 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8616 			teid_le & 0xFFFF;
8617 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8618 			0x0;
8619 		big_buffer = 1;
8620 		break;
8621 	case I40E_TUNNEL_TYPE_QINQ:
8622 		if (!pf->qinq_replace_flag) {
8623 			ret = i40e_cloud_filter_qinq_create(pf);
8624 			if (ret < 0)
8625 				PMD_DRV_LOG(DEBUG,
8626 					    "QinQ tunnel filter already created.");
8627 			pf->qinq_replace_flag = 1;
8628 		}
8629 		/*	Add in the General fields the values of
8630 		 *	the Outer and Inner VLAN
8631 		 *	Big Buffer should be set, see changes in
8632 		 *	i40e_aq_add_cloud_filters
8633 		 */
8634 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8635 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8636 		big_buffer = 1;
8637 		break;
8638 	case I40E_CLOUD_TYPE_UDP:
8639 	case I40E_CLOUD_TYPE_TCP:
8640 	case I40E_CLOUD_TYPE_SCTP:
8641 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8642 			if (!pf->sport_replace_flag) {
8643 				i40e_replace_port_l1_filter(pf,
8644 						tunnel_filter->l4_port_type);
8645 				i40e_replace_port_cloud_filter(pf,
8646 						tunnel_filter->l4_port_type);
8647 				pf->sport_replace_flag = 1;
8648 			}
8649 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8650 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8651 				I40E_DIRECTION_INGRESS_KEY;
8652 
8653 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8654 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8655 					I40E_TR_L4_TYPE_UDP;
8656 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8657 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8658 					I40E_TR_L4_TYPE_TCP;
8659 			else
8660 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8661 					I40E_TR_L4_TYPE_SCTP;
8662 
8663 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8664 				(teid_le >> 16) & 0xFFFF;
8665 			big_buffer = 1;
8666 		} else {
8667 			if (!pf->dport_replace_flag) {
8668 				i40e_replace_port_l1_filter(pf,
8669 						tunnel_filter->l4_port_type);
8670 				i40e_replace_port_cloud_filter(pf,
8671 						tunnel_filter->l4_port_type);
8672 				pf->dport_replace_flag = 1;
8673 			}
8674 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8675 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8676 				I40E_DIRECTION_INGRESS_KEY;
8677 
8678 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8679 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8680 					I40E_TR_L4_TYPE_UDP;
8681 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8682 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8683 					I40E_TR_L4_TYPE_TCP;
8684 			else
8685 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8686 					I40E_TR_L4_TYPE_SCTP;
8687 
8688 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8689 				(teid_le >> 16) & 0xFFFF;
8690 			big_buffer = 1;
8691 		}
8692 
8693 		break;
8694 	default:
8695 		/* Other tunnel types is not supported. */
8696 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8697 		rte_free(cld_filter);
8698 		return -EINVAL;
8699 	}
8700 
8701 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8702 		pfilter->element.flags =
8703 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8704 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8705 		pfilter->element.flags =
8706 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8707 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8708 		pfilter->element.flags =
8709 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8710 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8711 		pfilter->element.flags =
8712 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8713 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8714 		pfilter->element.flags |=
8715 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8716 	else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8717 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8718 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8719 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8720 			pfilter->element.flags |=
8721 				I40E_AQC_ADD_CLOUD_FILTER_0X11;
8722 		else
8723 			pfilter->element.flags |=
8724 				I40E_AQC_ADD_CLOUD_FILTER_0X10;
8725 	} else {
8726 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8727 						&pfilter->element.flags);
8728 		if (val < 0) {
8729 			rte_free(cld_filter);
8730 			return -EINVAL;
8731 		}
8732 	}
8733 
8734 	pfilter->element.flags |= rte_cpu_to_le_16(
8735 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8736 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8737 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8738 	pfilter->element.queue_number =
8739 		rte_cpu_to_le_16(tunnel_filter->queue_id);
8740 
8741 	if (!tunnel_filter->is_to_vf)
8742 		vsi = pf->main_vsi;
8743 	else {
8744 		if (tunnel_filter->vf_id >= pf->vf_num) {
8745 			PMD_DRV_LOG(ERR, "Invalid argument.");
8746 			rte_free(cld_filter);
8747 			return -EINVAL;
8748 		}
8749 		vf = &pf->vfs[tunnel_filter->vf_id];
8750 		vsi = vf->vsi;
8751 	}
8752 
8753 	/* Check if there is the filter in SW list */
8754 	memset(&check_filter, 0, sizeof(check_filter));
8755 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
8756 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
8757 	check_filter.vf_id = tunnel_filter->vf_id;
8758 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8759 	if (add && node) {
8760 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8761 		rte_free(cld_filter);
8762 		return -EINVAL;
8763 	}
8764 
8765 	if (!add && !node) {
8766 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8767 		rte_free(cld_filter);
8768 		return -EINVAL;
8769 	}
8770 
8771 	if (add) {
8772 		if (big_buffer)
8773 			ret = i40e_aq_add_cloud_filters_bb(hw,
8774 						   vsi->seid, cld_filter, 1);
8775 		else
8776 			ret = i40e_aq_add_cloud_filters(hw,
8777 					vsi->seid, &cld_filter->element, 1);
8778 		if (ret < 0) {
8779 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8780 			rte_free(cld_filter);
8781 			return -ENOTSUP;
8782 		}
8783 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8784 		if (tunnel == NULL) {
8785 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8786 			rte_free(cld_filter);
8787 			return -ENOMEM;
8788 		}
8789 
8790 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8791 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8792 		if (ret < 0)
8793 			rte_free(tunnel);
8794 	} else {
8795 		if (big_buffer)
8796 			ret = i40e_aq_rem_cloud_filters_bb(
8797 				hw, vsi->seid, cld_filter, 1);
8798 		else
8799 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8800 						&cld_filter->element, 1);
8801 		if (ret < 0) {
8802 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8803 			rte_free(cld_filter);
8804 			return -ENOTSUP;
8805 		}
8806 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8807 	}
8808 
8809 	rte_free(cld_filter);
8810 	return ret;
8811 }
8812 
8813 static int
8814 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8815 {
8816 	uint8_t i;
8817 
8818 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8819 		if (pf->vxlan_ports[i] == port)
8820 			return i;
8821 	}
8822 
8823 	return -1;
8824 }
8825 
8826 static int
8827 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8828 {
8829 	int  idx, ret;
8830 	uint8_t filter_idx = 0;
8831 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8832 
8833 	idx = i40e_get_vxlan_port_idx(pf, port);
8834 
8835 	/* Check if port already exists */
8836 	if (idx >= 0) {
8837 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8838 		return -EINVAL;
8839 	}
8840 
8841 	/* Now check if there is space to add the new port */
8842 	idx = i40e_get_vxlan_port_idx(pf, 0);
8843 	if (idx < 0) {
8844 		PMD_DRV_LOG(ERR,
8845 			"Maximum number of UDP ports reached, not adding port %d",
8846 			port);
8847 		return -ENOSPC;
8848 	}
8849 
8850 	ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8851 					&filter_idx, NULL);
8852 	if (ret < 0) {
8853 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8854 		return -1;
8855 	}
8856 
8857 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8858 			 port,  filter_idx);
8859 
8860 	/* New port: add it and mark its index in the bitmap */
8861 	pf->vxlan_ports[idx] = port;
8862 	pf->vxlan_bitmap |= (1 << idx);
8863 
8864 	if (!(pf->flags & I40E_FLAG_VXLAN))
8865 		pf->flags |= I40E_FLAG_VXLAN;
8866 
8867 	return 0;
8868 }
8869 
8870 static int
8871 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8872 {
8873 	int idx;
8874 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8875 
8876 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8877 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8878 		return -EINVAL;
8879 	}
8880 
8881 	idx = i40e_get_vxlan_port_idx(pf, port);
8882 
8883 	if (idx < 0) {
8884 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8885 		return -EINVAL;
8886 	}
8887 
8888 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8889 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8890 		return -1;
8891 	}
8892 
8893 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8894 			port, idx);
8895 
8896 	pf->vxlan_ports[idx] = 0;
8897 	pf->vxlan_bitmap &= ~(1 << idx);
8898 
8899 	if (!pf->vxlan_bitmap)
8900 		pf->flags &= ~I40E_FLAG_VXLAN;
8901 
8902 	return 0;
8903 }
8904 
8905 /* Add UDP tunneling port */
8906 static int
8907 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8908 			     struct rte_eth_udp_tunnel *udp_tunnel)
8909 {
8910 	int ret = 0;
8911 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8912 
8913 	if (udp_tunnel == NULL)
8914 		return -EINVAL;
8915 
8916 	switch (udp_tunnel->prot_type) {
8917 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
8918 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8919 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
8920 		break;
8921 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
8922 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8923 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8924 		break;
8925 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
8926 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
8927 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8928 		ret = -1;
8929 		break;
8930 
8931 	default:
8932 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8933 		ret = -1;
8934 		break;
8935 	}
8936 
8937 	return ret;
8938 }
8939 
8940 /* Remove UDP tunneling port */
8941 static int
8942 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8943 			     struct rte_eth_udp_tunnel *udp_tunnel)
8944 {
8945 	int ret = 0;
8946 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8947 
8948 	if (udp_tunnel == NULL)
8949 		return -EINVAL;
8950 
8951 	switch (udp_tunnel->prot_type) {
8952 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
8953 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
8954 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8955 		break;
8956 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
8957 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
8958 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8959 		ret = -1;
8960 		break;
8961 	default:
8962 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8963 		ret = -1;
8964 		break;
8965 	}
8966 
8967 	return ret;
8968 }
8969 
8970 /* Calculate the maximum number of contiguous PF queues that are configured */
8971 int
8972 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8973 {
8974 	struct rte_eth_dev_data *data = pf->dev_data;
8975 	int i, num;
8976 	struct i40e_rx_queue *rxq;
8977 
8978 	num = 0;
8979 	for (i = 0; i < pf->lan_nb_qps; i++) {
8980 		rxq = data->rx_queues[i];
8981 		if (rxq && rxq->q_set)
8982 			num++;
8983 		else
8984 			break;
8985 	}
8986 
8987 	return num;
8988 }
8989 
8990 /* Reset the global configure of hash function and input sets */
8991 static void
8992 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8993 {
8994 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8995 	uint32_t reg, reg_val;
8996 	int i;
8997 
8998 	/* Reset global RSS function sets */
8999 	reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9000 	if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
9001 		reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
9002 		i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
9003 	}
9004 
9005 	for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
9006 		uint64_t inset;
9007 		int j, pctype;
9008 
9009 		if (hw->mac.type == I40E_MAC_X722)
9010 			pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
9011 		else
9012 			pctype = i;
9013 
9014 		/* Reset pctype insets */
9015 		inset = i40e_get_default_input_set(i);
9016 		if (inset) {
9017 			pf->hash_input_set[pctype] = inset;
9018 			inset = i40e_translate_input_set_reg(hw->mac.type,
9019 							     inset);
9020 
9021 			reg = I40E_GLQF_HASH_INSET(0, pctype);
9022 			i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
9023 			reg = I40E_GLQF_HASH_INSET(1, pctype);
9024 			i40e_check_write_global_reg(hw, reg,
9025 						    (uint32_t)(inset >> 32));
9026 
9027 			/* Clear unused mask registers of the pctype */
9028 			for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
9029 				reg = I40E_GLQF_HASH_MSK(j, pctype);
9030 				i40e_check_write_global_reg(hw, reg, 0);
9031 			}
9032 		}
9033 
9034 		/* Reset pctype symmetric sets */
9035 		reg = I40E_GLQF_HSYM(pctype);
9036 		reg_val = i40e_read_rx_ctl(hw, reg);
9037 		if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
9038 			reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
9039 			i40e_write_global_rx_ctl(hw, reg, reg_val);
9040 		}
9041 	}
9042 	I40E_WRITE_FLUSH(hw);
9043 }
9044 
9045 int
9046 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
9047 {
9048 	struct i40e_hw *hw = &pf->adapter->hw;
9049 	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
9050 	uint32_t i;
9051 	int num;
9052 
9053 	/* If both VMDQ and RSS enabled, not all of PF queues are
9054 	 * configured. It's necessary to calculate the actual PF
9055 	 * queues that are configured.
9056 	 */
9057 	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
9058 		num = i40e_pf_calc_configured_queues_num(pf);
9059 	else
9060 		num = pf->dev_data->nb_rx_queues;
9061 
9062 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
9063 	if (num <= 0)
9064 		return 0;
9065 
9066 	for (i = 0; i < hw->func_caps.rss_table_size; i++)
9067 		lut[i] = (uint8_t)(i % (uint32_t)num);
9068 
9069 	return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
9070 }
9071 
9072 int
9073 i40e_pf_reset_rss_key(struct i40e_pf *pf)
9074 {
9075 	const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
9076 			sizeof(uint32_t);
9077 	uint8_t *rss_key;
9078 
9079 	/* Reset key */
9080 	rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
9081 	if (!rss_key ||
9082 	    pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
9083 		static uint32_t rss_key_default[] = {0x6b793944,
9084 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
9085 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
9086 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
9087 
9088 		rss_key = (uint8_t *)rss_key_default;
9089 	}
9090 
9091 	return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
9092 }
9093 
9094 static int
9095 i40e_pf_rss_reset(struct i40e_pf *pf)
9096 {
9097 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9098 
9099 	int ret;
9100 
9101 	pf->hash_filter_enabled = 0;
9102 	i40e_pf_disable_rss(pf);
9103 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
9104 
9105 	if (!pf->support_multi_driver)
9106 		i40e_pf_global_rss_reset(pf);
9107 
9108 	/* Reset RETA table */
9109 	if (pf->adapter->rss_reta_updated == 0) {
9110 		ret = i40e_pf_reset_rss_reta(pf);
9111 		if (ret)
9112 			return ret;
9113 	}
9114 
9115 	return i40e_pf_reset_rss_key(pf);
9116 }
9117 
9118 /* Configure RSS */
9119 int
9120 i40e_pf_config_rss(struct i40e_pf *pf)
9121 {
9122 	struct i40e_hw *hw;
9123 	enum rte_eth_rx_mq_mode mq_mode;
9124 	uint64_t rss_hf, hena;
9125 	int ret;
9126 
9127 	ret = i40e_pf_rss_reset(pf);
9128 	if (ret) {
9129 		PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
9130 		return ret;
9131 	}
9132 
9133 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
9134 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
9135 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
9136 	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
9137 		return 0;
9138 
9139 	hw = I40E_PF_TO_HW(pf);
9140 	hena = i40e_config_hena(pf->adapter, rss_hf);
9141 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
9142 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
9143 	I40E_WRITE_FLUSH(hw);
9144 
9145 	return 0;
9146 }
9147 
9148 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
9149 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
9150 int
9151 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
9152 {
9153 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9154 	uint32_t val, reg;
9155 	int ret = -EINVAL;
9156 
9157 	if (pf->support_multi_driver) {
9158 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
9159 		return -ENOTSUP;
9160 	}
9161 
9162 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
9163 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
9164 
9165 	if (len == 3) {
9166 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
9167 	} else if (len == 4) {
9168 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
9169 	} else {
9170 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
9171 		return ret;
9172 	}
9173 
9174 	if (reg != val) {
9175 		ret = i40e_aq_debug_write_global_register(hw,
9176 						   I40E_GL_PRS_FVBM(2),
9177 						   reg, NULL);
9178 		if (ret != 0)
9179 			return ret;
9180 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9181 			    "with value 0x%08x",
9182 			    I40E_GL_PRS_FVBM(2), reg);
9183 	} else {
9184 		ret = 0;
9185 	}
9186 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9187 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9188 
9189 	return ret;
9190 }
9191 
9192 /* Set the symmetric hash enable configurations per port */
9193 void
9194 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9195 {
9196 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9197 
9198 	if (enable > 0) {
9199 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
9200 			return;
9201 
9202 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9203 	} else {
9204 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
9205 			return;
9206 
9207 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9208 	}
9209 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9210 	I40E_WRITE_FLUSH(hw);
9211 }
9212 
9213 /**
9214  * Valid input sets for hash and flow director filters per PCTYPE
9215  */
9216 static uint64_t
9217 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9218 		enum rte_filter_type filter)
9219 {
9220 	uint64_t valid;
9221 
9222 	static const uint64_t valid_hash_inset_table[] = {
9223 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9224 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9225 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9226 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9227 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9228 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9229 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9230 			I40E_INSET_FLEX_PAYLOAD,
9231 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9232 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9233 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9234 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9235 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9236 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9237 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9238 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9239 			I40E_INSET_FLEX_PAYLOAD,
9240 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9241 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9242 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9243 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9244 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9245 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9246 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9247 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9248 			I40E_INSET_FLEX_PAYLOAD,
9249 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9250 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9251 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9252 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9253 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9254 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9255 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9256 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9257 			I40E_INSET_FLEX_PAYLOAD,
9258 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9259 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9260 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9261 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9262 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9263 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9264 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9265 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9266 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9267 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9268 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9269 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9270 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9271 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9272 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9273 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9274 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9275 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9276 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9277 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9278 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9279 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9280 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9281 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9282 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9283 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9284 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9285 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9286 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9287 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9288 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9289 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9290 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9291 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9292 			I40E_INSET_FLEX_PAYLOAD,
9293 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9294 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9295 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9296 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9297 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9298 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9299 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9300 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9301 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9302 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9303 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9304 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9305 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9306 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9307 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9308 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9309 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9310 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9311 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9312 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9313 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9314 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9315 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9316 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9317 			I40E_INSET_FLEX_PAYLOAD,
9318 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9319 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9320 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9321 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9322 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9323 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9324 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9325 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9326 			I40E_INSET_FLEX_PAYLOAD,
9327 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9328 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9329 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9330 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9331 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9332 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9333 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9334 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9335 			I40E_INSET_FLEX_PAYLOAD,
9336 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9337 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9338 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9339 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9340 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9341 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9342 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9343 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9344 			I40E_INSET_FLEX_PAYLOAD,
9345 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9346 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9347 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9348 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9349 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9350 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9351 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9352 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9353 			I40E_INSET_FLEX_PAYLOAD,
9354 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9355 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9356 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9357 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9358 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9359 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9360 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9361 			I40E_INSET_FLEX_PAYLOAD,
9362 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9363 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9364 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9365 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9366 			I40E_INSET_FLEX_PAYLOAD,
9367 	};
9368 
9369 	/**
9370 	 * Flow director supports only fields defined in
9371 	 * union rte_eth_fdir_flow.
9372 	 */
9373 	static const uint64_t valid_fdir_inset_table[] = {
9374 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9375 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9376 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9377 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9378 		I40E_INSET_IPV4_TTL,
9379 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9380 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9381 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9382 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9383 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9384 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9385 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9386 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9387 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9388 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9389 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9390 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9391 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9392 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9393 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9394 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9395 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9396 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9397 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9398 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9399 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9400 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9401 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9402 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9403 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9404 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9405 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9406 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9407 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9408 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9409 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9410 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9411 		I40E_INSET_SCTP_VT,
9412 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9413 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9414 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9415 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9416 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9417 		I40E_INSET_IPV4_TTL,
9418 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9419 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9420 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9421 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9422 		I40E_INSET_IPV6_HOP_LIMIT,
9423 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9424 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9425 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9426 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9427 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9428 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9429 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9430 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9431 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9432 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9433 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9434 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9435 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9436 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9437 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9438 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9439 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9440 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9441 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9442 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9443 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9444 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9445 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9446 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9447 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9448 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9449 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9450 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9451 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9452 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9453 		I40E_INSET_SCTP_VT,
9454 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9455 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9456 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9457 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9458 		I40E_INSET_IPV6_HOP_LIMIT,
9459 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9460 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9461 		I40E_INSET_LAST_ETHER_TYPE,
9462 	};
9463 
9464 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9465 		return 0;
9466 	if (filter == RTE_ETH_FILTER_HASH)
9467 		valid = valid_hash_inset_table[pctype];
9468 	else
9469 		valid = valid_fdir_inset_table[pctype];
9470 
9471 	return valid;
9472 }
9473 
9474 /**
9475  * Validate if the input set is allowed for a specific PCTYPE
9476  */
9477 int
9478 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9479 		enum rte_filter_type filter, uint64_t inset)
9480 {
9481 	uint64_t valid;
9482 
9483 	valid = i40e_get_valid_input_set(pctype, filter);
9484 	if (inset & (~valid))
9485 		return -EINVAL;
9486 
9487 	return 0;
9488 }
9489 
9490 /* default input set fields combination per pctype */
9491 uint64_t
9492 i40e_get_default_input_set(uint16_t pctype)
9493 {
9494 	static const uint64_t default_inset_table[] = {
9495 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9496 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9497 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9498 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9499 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9500 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9501 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9502 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9503 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9504 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9505 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9506 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9507 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9508 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9509 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9510 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9511 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9512 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9513 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9514 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9515 			I40E_INSET_SCTP_VT,
9516 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9517 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9518 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9519 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9520 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9521 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9522 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9523 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9524 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9525 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9526 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9527 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9528 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9529 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9530 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9531 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9532 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9533 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9534 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9535 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9536 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9537 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9538 			I40E_INSET_SCTP_VT,
9539 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9540 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9541 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9542 			I40E_INSET_LAST_ETHER_TYPE,
9543 	};
9544 
9545 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9546 		return 0;
9547 
9548 	return default_inset_table[pctype];
9549 }
9550 
9551 /**
9552  * Translate the input set from bit masks to register aware bit masks
9553  * and vice versa
9554  */
9555 uint64_t
9556 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9557 {
9558 	uint64_t val = 0;
9559 	uint16_t i;
9560 
9561 	struct inset_map {
9562 		uint64_t inset;
9563 		uint64_t inset_reg;
9564 	};
9565 
9566 	static const struct inset_map inset_map_common[] = {
9567 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9568 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9569 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9570 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9571 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9572 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9573 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9574 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9575 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9576 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9577 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9578 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9579 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9580 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9581 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9582 		{I40E_INSET_TUNNEL_DMAC,
9583 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9584 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9585 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9586 		{I40E_INSET_TUNNEL_SRC_PORT,
9587 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9588 		{I40E_INSET_TUNNEL_DST_PORT,
9589 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9590 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9591 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9592 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9593 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9594 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9595 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9596 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9597 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9598 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9599 	};
9600 
9601     /* some different registers map in x722*/
9602 	static const struct inset_map inset_map_diff_x722[] = {
9603 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9604 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9605 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9606 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9607 	};
9608 
9609 	static const struct inset_map inset_map_diff_not_x722[] = {
9610 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9611 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9612 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9613 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9614 	};
9615 
9616 	if (input == 0)
9617 		return val;
9618 
9619 	/* Translate input set to register aware inset */
9620 	if (type == I40E_MAC_X722) {
9621 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9622 			if (input & inset_map_diff_x722[i].inset)
9623 				val |= inset_map_diff_x722[i].inset_reg;
9624 		}
9625 	} else {
9626 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9627 			if (input & inset_map_diff_not_x722[i].inset)
9628 				val |= inset_map_diff_not_x722[i].inset_reg;
9629 		}
9630 	}
9631 
9632 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9633 		if (input & inset_map_common[i].inset)
9634 			val |= inset_map_common[i].inset_reg;
9635 	}
9636 
9637 	return val;
9638 }
9639 
9640 static int
9641 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9642 			    uint32_t pit_reg_count, uint32_t hdr_off)
9643 {
9644 	const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9645 	uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9646 	uint32_t i, reg_val, src_off, count;
9647 
9648 	for (i = pit_reg_start; i < pit_reg_end; i++) {
9649 		reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9650 
9651 		src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9652 		count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9653 
9654 		if (src_off <= field_off && (src_off + count) > field_off)
9655 			break;
9656 	}
9657 
9658 	if (i >= pit_reg_end) {
9659 		PMD_DRV_LOG(ERR,
9660 			    "Hardware GLQF_PIT configuration does not support this field mask");
9661 		return -1;
9662 	}
9663 
9664 	return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9665 }
9666 
9667 int
9668 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9669 			     uint32_t *mask, uint8_t nb_elem)
9670 {
9671 	static const uint64_t mask_inset[] = {
9672 		I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9673 		I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9674 
9675 	static const struct {
9676 		uint64_t inset;
9677 		uint32_t mask;
9678 		uint32_t offset;
9679 	} inset_mask_offset_map[] = {
9680 		{ I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9681 		  offsetof(struct rte_ipv4_hdr, type_of_service) },
9682 
9683 		{ I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9684 		  offsetof(struct rte_ipv4_hdr, next_proto_id) },
9685 
9686 		{ I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9687 		  offsetof(struct rte_ipv4_hdr, time_to_live) },
9688 
9689 		{ I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9690 		  offsetof(struct rte_ipv6_hdr, vtc_flow) },
9691 
9692 		{ I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9693 		  offsetof(struct rte_ipv6_hdr, proto) },
9694 
9695 		{ I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9696 		  offsetof(struct rte_ipv6_hdr, hop_limits) },
9697 	};
9698 
9699 	uint32_t i;
9700 	int idx = 0;
9701 
9702 	assert(mask);
9703 	if (!inset)
9704 		return 0;
9705 
9706 	for (i = 0; i < RTE_DIM(mask_inset); i++) {
9707 		/* Clear the inset bit, if no MASK is required,
9708 		 * for example proto + ttl
9709 		 */
9710 		if ((mask_inset[i] & inset) == mask_inset[i]) {
9711 			inset &= ~mask_inset[i];
9712 			if (!inset)
9713 				return 0;
9714 		}
9715 	}
9716 
9717 	for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9718 		uint32_t pit_start, pit_count;
9719 		int offset;
9720 
9721 		if (!(inset_mask_offset_map[i].inset & inset))
9722 			continue;
9723 
9724 		if (inset_mask_offset_map[i].inset &
9725 		    (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9726 		     I40E_INSET_IPV4_TTL)) {
9727 			pit_start = I40E_GLQF_PIT_IPV4_START;
9728 			pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9729 		} else {
9730 			pit_start = I40E_GLQF_PIT_IPV6_START;
9731 			pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9732 		}
9733 
9734 		offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9735 				inset_mask_offset_map[i].offset);
9736 
9737 		if (offset < 0)
9738 			return -EINVAL;
9739 
9740 		if (idx >= nb_elem) {
9741 			PMD_DRV_LOG(ERR,
9742 				    "Configuration of inset mask out of range %u",
9743 				    nb_elem);
9744 			return -ERANGE;
9745 		}
9746 
9747 		mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9748 						inset_mask_offset_map[i].mask);
9749 		idx++;
9750 	}
9751 
9752 	return idx;
9753 }
9754 
9755 void
9756 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9757 {
9758 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9759 
9760 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9761 	if (reg != val)
9762 		i40e_write_rx_ctl(hw, addr, val);
9763 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9764 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
9765 }
9766 
9767 void
9768 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9769 {
9770 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9771 	struct rte_eth_dev_data *dev_data =
9772 		((struct i40e_adapter *)hw->back)->pf.dev_data;
9773 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9774 
9775 	if (reg != val) {
9776 		i40e_write_rx_ctl(hw, addr, val);
9777 		PMD_DRV_LOG(WARNING,
9778 			    "i40e device %s changed global register [0x%08x]."
9779 			    " original: 0x%08x, new: 0x%08x",
9780 			    dev->device->name, addr, reg,
9781 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9782 	}
9783 }
9784 
9785 static void
9786 i40e_filter_input_set_init(struct i40e_pf *pf)
9787 {
9788 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9789 	enum i40e_filter_pctype pctype;
9790 	uint64_t input_set, inset_reg;
9791 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9792 	int num, i;
9793 	uint16_t flow_type;
9794 
9795 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9796 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9797 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9798 
9799 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9800 			continue;
9801 
9802 		input_set = i40e_get_default_input_set(pctype);
9803 
9804 		num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9805 						   I40E_INSET_MASK_NUM_REG);
9806 		if (num < 0)
9807 			return;
9808 		if (pf->support_multi_driver && num > 0) {
9809 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9810 			return;
9811 		}
9812 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9813 					input_set);
9814 
9815 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9816 				      (uint32_t)(inset_reg & UINT32_MAX));
9817 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9818 				     (uint32_t)((inset_reg >>
9819 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
9820 		if (!pf->support_multi_driver) {
9821 			i40e_check_write_global_reg(hw,
9822 					    I40E_GLQF_HASH_INSET(0, pctype),
9823 					    (uint32_t)(inset_reg & UINT32_MAX));
9824 			i40e_check_write_global_reg(hw,
9825 					     I40E_GLQF_HASH_INSET(1, pctype),
9826 					     (uint32_t)((inset_reg >>
9827 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9828 
9829 			for (i = 0; i < num; i++) {
9830 				i40e_check_write_global_reg(hw,
9831 						    I40E_GLQF_FD_MSK(i, pctype),
9832 						    mask_reg[i]);
9833 				i40e_check_write_global_reg(hw,
9834 						  I40E_GLQF_HASH_MSK(i, pctype),
9835 						  mask_reg[i]);
9836 			}
9837 			/*clear unused mask registers of the pctype */
9838 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9839 				i40e_check_write_global_reg(hw,
9840 						    I40E_GLQF_FD_MSK(i, pctype),
9841 						    0);
9842 				i40e_check_write_global_reg(hw,
9843 						  I40E_GLQF_HASH_MSK(i, pctype),
9844 						  0);
9845 			}
9846 		} else {
9847 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9848 		}
9849 		I40E_WRITE_FLUSH(hw);
9850 
9851 		/* store the default input set */
9852 		if (!pf->support_multi_driver)
9853 			pf->hash_input_set[pctype] = input_set;
9854 		pf->fdir.input_set[pctype] = input_set;
9855 	}
9856 }
9857 
9858 int
9859 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9860 		    uint32_t pctype, bool add)
9861 {
9862 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9863 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9864 	uint64_t inset_reg = 0;
9865 	int num, i;
9866 
9867 	if (pf->support_multi_driver) {
9868 		PMD_DRV_LOG(ERR,
9869 			    "Modify input set is not permitted when multi-driver enabled.");
9870 		return -EPERM;
9871 	}
9872 
9873 	/* For X722, get translated pctype in fd pctype register */
9874 	if (hw->mac.type == I40E_MAC_X722)
9875 		pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9876 
9877 	if (add) {
9878 		/* get inset value in register */
9879 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9880 		inset_reg <<= I40E_32_BIT_WIDTH;
9881 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9882 		input_set |= pf->hash_input_set[pctype];
9883 	}
9884 	num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9885 					   I40E_INSET_MASK_NUM_REG);
9886 	if (num < 0)
9887 		return -EINVAL;
9888 
9889 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9890 
9891 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9892 				    (uint32_t)(inset_reg & UINT32_MAX));
9893 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9894 				    (uint32_t)((inset_reg >>
9895 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9896 
9897 	for (i = 0; i < num; i++)
9898 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9899 					    mask_reg[i]);
9900 	/*clear unused mask registers of the pctype */
9901 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9902 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9903 					    0);
9904 	I40E_WRITE_FLUSH(hw);
9905 
9906 	pf->hash_input_set[pctype] = input_set;
9907 	return 0;
9908 }
9909 
9910 /* Convert ethertype filter structure */
9911 static int
9912 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9913 			      struct i40e_ethertype_filter *filter)
9914 {
9915 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9916 		RTE_ETHER_ADDR_LEN);
9917 	filter->input.ether_type = input->ether_type;
9918 	filter->flags = input->flags;
9919 	filter->queue = input->queue;
9920 
9921 	return 0;
9922 }
9923 
9924 /* Check if there exists the ethertype filter */
9925 struct i40e_ethertype_filter *
9926 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9927 				const struct i40e_ethertype_filter_input *input)
9928 {
9929 	int ret;
9930 
9931 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9932 	if (ret < 0)
9933 		return NULL;
9934 
9935 	return ethertype_rule->hash_map[ret];
9936 }
9937 
9938 /* Add ethertype filter in SW list */
9939 static int
9940 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9941 				struct i40e_ethertype_filter *filter)
9942 {
9943 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9944 	int ret;
9945 
9946 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
9947 	if (ret < 0) {
9948 		PMD_DRV_LOG(ERR,
9949 			    "Failed to insert ethertype filter"
9950 			    " to hash table %d!",
9951 			    ret);
9952 		return ret;
9953 	}
9954 	rule->hash_map[ret] = filter;
9955 
9956 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9957 
9958 	return 0;
9959 }
9960 
9961 /* Delete ethertype filter in SW list */
9962 int
9963 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9964 			     struct i40e_ethertype_filter_input *input)
9965 {
9966 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9967 	struct i40e_ethertype_filter *filter;
9968 	int ret;
9969 
9970 	ret = rte_hash_del_key(rule->hash_table, input);
9971 	if (ret < 0) {
9972 		PMD_DRV_LOG(ERR,
9973 			    "Failed to delete ethertype filter"
9974 			    " to hash table %d!",
9975 			    ret);
9976 		return ret;
9977 	}
9978 	filter = rule->hash_map[ret];
9979 	rule->hash_map[ret] = NULL;
9980 
9981 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9982 	rte_free(filter);
9983 
9984 	return 0;
9985 }
9986 
9987 /*
9988  * Configure ethertype filter, which can director packet by filtering
9989  * with mac address and ether_type or only ether_type
9990  */
9991 int
9992 i40e_ethertype_filter_set(struct i40e_pf *pf,
9993 			struct rte_eth_ethertype_filter *filter,
9994 			bool add)
9995 {
9996 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9997 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9998 	struct i40e_ethertype_filter *ethertype_filter, *node;
9999 	struct i40e_ethertype_filter check_filter;
10000 	struct i40e_control_filter_stats stats;
10001 	uint16_t flags = 0;
10002 	int ret;
10003 
10004 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
10005 		PMD_DRV_LOG(ERR, "Invalid queue ID");
10006 		return -EINVAL;
10007 	}
10008 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10009 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10010 		PMD_DRV_LOG(ERR,
10011 			"unsupported ether_type(0x%04x) in control packet filter.",
10012 			filter->ether_type);
10013 		return -EINVAL;
10014 	}
10015 	if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10016 		PMD_DRV_LOG(WARNING,
10017 			"filter vlan ether_type in first tag is not supported.");
10018 
10019 	/* Check if there is the filter in SW list */
10020 	memset(&check_filter, 0, sizeof(check_filter));
10021 	i40e_ethertype_filter_convert(filter, &check_filter);
10022 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10023 					       &check_filter.input);
10024 	if (add && node) {
10025 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10026 		return -EINVAL;
10027 	}
10028 
10029 	if (!add && !node) {
10030 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10031 		return -EINVAL;
10032 	}
10033 
10034 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10035 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10036 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10037 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10038 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10039 
10040 	memset(&stats, 0, sizeof(stats));
10041 	ret = i40e_aq_add_rem_control_packet_filter(hw,
10042 			filter->mac_addr.addr_bytes,
10043 			filter->ether_type, flags,
10044 			pf->main_vsi->seid,
10045 			filter->queue, add, &stats, NULL);
10046 
10047 	PMD_DRV_LOG(INFO,
10048 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10049 		ret, stats.mac_etype_used, stats.etype_used,
10050 		stats.mac_etype_free, stats.etype_free);
10051 	if (ret < 0)
10052 		return -ENOSYS;
10053 
10054 	/* Add or delete a filter in SW list */
10055 	if (add) {
10056 		ethertype_filter = rte_zmalloc("ethertype_filter",
10057 				       sizeof(*ethertype_filter), 0);
10058 		if (ethertype_filter == NULL) {
10059 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10060 			return -ENOMEM;
10061 		}
10062 
10063 		rte_memcpy(ethertype_filter, &check_filter,
10064 			   sizeof(check_filter));
10065 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10066 		if (ret < 0)
10067 			rte_free(ethertype_filter);
10068 	} else {
10069 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10070 	}
10071 
10072 	return ret;
10073 }
10074 
10075 static int
10076 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
10077 		      const struct rte_flow_ops **ops)
10078 {
10079 	if (dev == NULL)
10080 		return -EINVAL;
10081 
10082 	*ops = &i40e_flow_ops;
10083 	return 0;
10084 }
10085 
10086 /*
10087  * Check and enable Extended Tag.
10088  * Enabling Extended Tag is important for 40G performance.
10089  */
10090 static void
10091 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10092 {
10093 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10094 	uint32_t buf = 0;
10095 	int ret;
10096 
10097 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10098 				      PCI_DEV_CAP_REG);
10099 	if (ret < 0) {
10100 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10101 			    PCI_DEV_CAP_REG);
10102 		return;
10103 	}
10104 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10105 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10106 		return;
10107 	}
10108 
10109 	buf = 0;
10110 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10111 				      PCI_DEV_CTRL_REG);
10112 	if (ret < 0) {
10113 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10114 			    PCI_DEV_CTRL_REG);
10115 		return;
10116 	}
10117 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10118 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10119 		return;
10120 	}
10121 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10122 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10123 				       PCI_DEV_CTRL_REG);
10124 	if (ret < 0) {
10125 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10126 			    PCI_DEV_CTRL_REG);
10127 		return;
10128 	}
10129 }
10130 
10131 /*
10132  * As some registers wouldn't be reset unless a global hardware reset,
10133  * hardware initialization is needed to put those registers into an
10134  * expected initial state.
10135  */
10136 static void
10137 i40e_hw_init(struct rte_eth_dev *dev)
10138 {
10139 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10140 
10141 	i40e_enable_extended_tag(dev);
10142 
10143 	/* clear the PF Queue Filter control register */
10144 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10145 
10146 	/* Disable symmetric hash per port */
10147 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
10148 }
10149 
10150 /*
10151  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10152  * however this function will return only one highest pctype index,
10153  * which is not quite correct. This is known problem of i40e driver
10154  * and needs to be fixed later.
10155  */
10156 enum i40e_filter_pctype
10157 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10158 {
10159 	int i;
10160 	uint64_t pctype_mask;
10161 
10162 	if (flow_type < I40E_FLOW_TYPE_MAX) {
10163 		pctype_mask = adapter->pctypes_tbl[flow_type];
10164 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10165 			if (pctype_mask & (1ULL << i))
10166 				return (enum i40e_filter_pctype)i;
10167 		}
10168 	}
10169 	return I40E_FILTER_PCTYPE_INVALID;
10170 }
10171 
10172 uint16_t
10173 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10174 			enum i40e_filter_pctype pctype)
10175 {
10176 	uint16_t flowtype;
10177 	uint64_t pctype_mask = 1ULL << pctype;
10178 
10179 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10180 	     flowtype++) {
10181 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10182 			return flowtype;
10183 	}
10184 
10185 	return RTE_ETH_FLOW_UNKNOWN;
10186 }
10187 
10188 /*
10189  * On X710, performance number is far from the expectation on recent firmware
10190  * versions; on XL710, performance number is also far from the expectation on
10191  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10192  * mode is enabled and port MAC address is equal to the packet destination MAC
10193  * address. The fix for this issue may not be integrated in the following
10194  * firmware version. So the workaround in software driver is needed. It needs
10195  * to modify the initial values of 3 internal only registers for both X710 and
10196  * XL710. Note that the values for X710 or XL710 could be different, and the
10197  * workaround can be removed when it is fixed in firmware in the future.
10198  */
10199 
10200 /* For both X710 and XL710 */
10201 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
10202 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
10203 #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
10204 
10205 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10206 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10207 
10208 /* For X722 */
10209 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10210 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10211 
10212 /* For X710 */
10213 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10214 /* For XL710 */
10215 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10216 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10217 
10218 /*
10219  * GL_SWR_PM_UP_THR:
10220  * The value is not impacted from the link speed, its value is set according
10221  * to the total number of ports for a better pipe-monitor configuration.
10222  */
10223 static bool
10224 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10225 {
10226 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10227 		.device_id = (dev),   \
10228 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10229 
10230 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10231 		.device_id = (dev),   \
10232 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10233 
10234 	static const struct {
10235 		uint16_t device_id;
10236 		uint32_t val;
10237 	} swr_pm_table[] = {
10238 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10239 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10240 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10241 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10242 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10243 
10244 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10245 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10246 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10247 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10248 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10249 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10250 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10251 	};
10252 	uint32_t i;
10253 
10254 	if (value == NULL) {
10255 		PMD_DRV_LOG(ERR, "value is NULL");
10256 		return false;
10257 	}
10258 
10259 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10260 		if (hw->device_id == swr_pm_table[i].device_id) {
10261 			*value = swr_pm_table[i].val;
10262 
10263 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10264 				    "value - 0x%08x",
10265 				    hw->device_id, *value);
10266 			return true;
10267 		}
10268 	}
10269 
10270 	return false;
10271 }
10272 
10273 static int
10274 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10275 {
10276 	enum i40e_status_code status;
10277 	struct i40e_aq_get_phy_abilities_resp phy_ab;
10278 	int ret = -ENOTSUP;
10279 	int retries = 0;
10280 
10281 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10282 					      NULL);
10283 
10284 	while (status) {
10285 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10286 			status);
10287 		retries++;
10288 		rte_delay_us(100000);
10289 		if  (retries < 5)
10290 			status = i40e_aq_get_phy_capabilities(hw, false,
10291 					true, &phy_ab, NULL);
10292 		else
10293 			return ret;
10294 	}
10295 	return 0;
10296 }
10297 
10298 static void
10299 i40e_configure_registers(struct i40e_hw *hw)
10300 {
10301 	static struct {
10302 		uint32_t addr;
10303 		uint64_t val;
10304 	} reg_table[] = {
10305 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10306 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10307 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10308 	};
10309 	uint64_t reg;
10310 	uint32_t i;
10311 	int ret;
10312 
10313 	for (i = 0; i < RTE_DIM(reg_table); i++) {
10314 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10315 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10316 				reg_table[i].val =
10317 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10318 			else /* For X710/XL710/XXV710 */
10319 				if (hw->aq.fw_maj_ver < 6)
10320 					reg_table[i].val =
10321 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10322 				else
10323 					reg_table[i].val =
10324 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10325 		}
10326 
10327 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10328 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10329 				reg_table[i].val =
10330 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10331 			else /* For X710/XL710/XXV710 */
10332 				reg_table[i].val =
10333 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10334 		}
10335 
10336 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10337 			uint32_t cfg_val;
10338 
10339 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10340 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10341 					    "GL_SWR_PM_UP_THR value fixup",
10342 					    hw->device_id);
10343 				continue;
10344 			}
10345 
10346 			reg_table[i].val = cfg_val;
10347 		}
10348 
10349 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10350 							&reg, NULL);
10351 		if (ret < 0) {
10352 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10353 							reg_table[i].addr);
10354 			break;
10355 		}
10356 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10357 						reg_table[i].addr, reg);
10358 		if (reg == reg_table[i].val)
10359 			continue;
10360 
10361 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10362 						reg_table[i].val, NULL);
10363 		if (ret < 0) {
10364 			PMD_DRV_LOG(ERR,
10365 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10366 				reg_table[i].val, reg_table[i].addr);
10367 			break;
10368 		}
10369 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10370 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10371 	}
10372 }
10373 
10374 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10375 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10376 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10377 static int
10378 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10379 {
10380 	uint32_t reg;
10381 	int ret;
10382 
10383 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10384 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10385 		return -EINVAL;
10386 	}
10387 
10388 	/* Configure for double VLAN RX stripping */
10389 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10390 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10391 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10392 		ret = i40e_aq_debug_write_register(hw,
10393 						   I40E_VSI_TSR(vsi->vsi_id),
10394 						   reg, NULL);
10395 		if (ret < 0) {
10396 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10397 				    vsi->vsi_id);
10398 			return I40E_ERR_CONFIG;
10399 		}
10400 	}
10401 
10402 	/* Configure for double VLAN TX insertion */
10403 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10404 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10405 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10406 		ret = i40e_aq_debug_write_register(hw,
10407 						   I40E_VSI_L2TAGSTXVALID(
10408 						   vsi->vsi_id), reg, NULL);
10409 		if (ret < 0) {
10410 			PMD_DRV_LOG(ERR,
10411 				"Failed to update VSI_L2TAGSTXVALID[%d]",
10412 				vsi->vsi_id);
10413 			return I40E_ERR_CONFIG;
10414 		}
10415 	}
10416 
10417 	return 0;
10418 }
10419 
10420 static uint64_t
10421 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10422 {
10423 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10424 	uint64_t systim_cycles;
10425 
10426 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10427 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10428 			<< 32;
10429 
10430 	return systim_cycles;
10431 }
10432 
10433 static uint64_t
10434 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10435 {
10436 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10437 	uint64_t rx_tstamp;
10438 
10439 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10440 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10441 			<< 32;
10442 
10443 	return rx_tstamp;
10444 }
10445 
10446 static uint64_t
10447 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10448 {
10449 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10450 	uint64_t tx_tstamp;
10451 
10452 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10453 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10454 			<< 32;
10455 
10456 	return tx_tstamp;
10457 }
10458 
10459 static void
10460 i40e_start_timecounters(struct rte_eth_dev *dev)
10461 {
10462 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10463 	struct i40e_adapter *adapter = dev->data->dev_private;
10464 	struct rte_eth_link link;
10465 	uint32_t tsync_inc_l;
10466 	uint32_t tsync_inc_h;
10467 
10468 	/* Get current link speed. */
10469 	i40e_dev_link_update(dev, 1);
10470 	rte_eth_linkstatus_get(dev, &link);
10471 
10472 	switch (link.link_speed) {
10473 	case RTE_ETH_SPEED_NUM_40G:
10474 	case RTE_ETH_SPEED_NUM_25G:
10475 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10476 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10477 		break;
10478 	case RTE_ETH_SPEED_NUM_10G:
10479 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10480 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10481 		break;
10482 	case RTE_ETH_SPEED_NUM_1G:
10483 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10484 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10485 		break;
10486 	default:
10487 		tsync_inc_l = 0x0;
10488 		tsync_inc_h = 0x0;
10489 	}
10490 
10491 	/* Set the timesync increment value. */
10492 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10493 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10494 
10495 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10496 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10497 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10498 
10499 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10500 	adapter->systime_tc.cc_shift = 0;
10501 	adapter->systime_tc.nsec_mask = 0;
10502 
10503 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10504 	adapter->rx_tstamp_tc.cc_shift = 0;
10505 	adapter->rx_tstamp_tc.nsec_mask = 0;
10506 
10507 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10508 	adapter->tx_tstamp_tc.cc_shift = 0;
10509 	adapter->tx_tstamp_tc.nsec_mask = 0;
10510 }
10511 
10512 static int
10513 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10514 {
10515 	struct i40e_adapter *adapter = dev->data->dev_private;
10516 
10517 	adapter->systime_tc.nsec += delta;
10518 	adapter->rx_tstamp_tc.nsec += delta;
10519 	adapter->tx_tstamp_tc.nsec += delta;
10520 
10521 	return 0;
10522 }
10523 
10524 static int
10525 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10526 {
10527 	uint64_t ns;
10528 	struct i40e_adapter *adapter = dev->data->dev_private;
10529 
10530 	ns = rte_timespec_to_ns(ts);
10531 
10532 	/* Set the timecounters to a new value. */
10533 	adapter->systime_tc.nsec = ns;
10534 	adapter->rx_tstamp_tc.nsec = ns;
10535 	adapter->tx_tstamp_tc.nsec = ns;
10536 
10537 	return 0;
10538 }
10539 
10540 static int
10541 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10542 {
10543 	uint64_t ns, systime_cycles;
10544 	struct i40e_adapter *adapter = dev->data->dev_private;
10545 
10546 	systime_cycles = i40e_read_systime_cyclecounter(dev);
10547 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10548 	*ts = rte_ns_to_timespec(ns);
10549 
10550 	return 0;
10551 }
10552 
10553 static int
10554 i40e_timesync_enable(struct rte_eth_dev *dev)
10555 {
10556 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10557 	uint32_t tsync_ctl_l;
10558 	uint32_t tsync_ctl_h;
10559 	struct timespec ts;
10560 
10561 	memset(&ts, 0, sizeof(struct timespec));
10562 
10563 	/* Stop the timesync system time. */
10564 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10565 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10566 	/* Reset the timesync system time value. */
10567 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10568 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10569 
10570 	i40e_start_timecounters(dev);
10571 
10572 	/* Clear timesync registers. */
10573 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10574 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10575 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10576 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10577 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10578 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10579 
10580 	/* Enable timestamping of PTP packets. */
10581 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10582 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10583 
10584 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10585 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10586 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10587 
10588 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10589 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10590 
10591 	/* i40e uses zero-based timestamping so only adjust timecounter */
10592 	i40e_timesync_write_time(dev, &ts);
10593 
10594 	return 0;
10595 }
10596 
10597 static int
10598 i40e_timesync_disable(struct rte_eth_dev *dev)
10599 {
10600 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10601 	uint32_t tsync_ctl_l;
10602 	uint32_t tsync_ctl_h;
10603 
10604 	/* Disable timestamping of transmitted PTP packets. */
10605 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10606 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10607 
10608 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10609 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10610 
10611 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10612 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10613 
10614 	/* Reset the timesync increment value. */
10615 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10616 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10617 
10618 	return 0;
10619 }
10620 
10621 static int
10622 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10623 				struct timespec *timestamp, uint32_t flags)
10624 {
10625 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10626 	struct i40e_adapter *adapter = dev->data->dev_private;
10627 	uint32_t sync_status;
10628 	uint32_t index = flags & 0x03;
10629 	uint64_t rx_tstamp_cycles;
10630 	uint64_t ns;
10631 
10632 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10633 	if ((sync_status & (1 << index)) == 0)
10634 		return -EINVAL;
10635 
10636 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10637 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10638 	*timestamp = rte_ns_to_timespec(ns);
10639 
10640 	return 0;
10641 }
10642 
10643 static int
10644 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10645 				struct timespec *timestamp)
10646 {
10647 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10648 	struct i40e_adapter *adapter = dev->data->dev_private;
10649 	uint32_t sync_status;
10650 	uint64_t tx_tstamp_cycles;
10651 	uint64_t ns;
10652 
10653 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10654 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10655 		return -EINVAL;
10656 
10657 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10658 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10659 	*timestamp = rte_ns_to_timespec(ns);
10660 
10661 	return 0;
10662 }
10663 
10664 /*
10665  * i40e_parse_dcb_configure - parse dcb configure from user
10666  * @dev: the device being configured
10667  * @dcb_cfg: pointer of the result of parse
10668  * @*tc_map: bit map of enabled traffic classes
10669  *
10670  * Returns 0 on success, negative value on failure
10671  */
10672 static int
10673 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10674 			 struct i40e_dcbx_config *dcb_cfg,
10675 			 uint8_t *tc_map)
10676 {
10677 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10678 	uint8_t i, tc_bw, bw_lf;
10679 
10680 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10681 
10682 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10683 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10684 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10685 		return -EINVAL;
10686 	}
10687 
10688 	/* assume each tc has the same bw */
10689 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10690 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10691 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10692 	/* to ensure the sum of tcbw is equal to 100 */
10693 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10694 	for (i = 0; i < bw_lf; i++)
10695 		dcb_cfg->etscfg.tcbwtable[i]++;
10696 
10697 	/* assume each tc has the same Transmission Selection Algorithm */
10698 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10699 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10700 
10701 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10702 		dcb_cfg->etscfg.prioritytable[i] =
10703 				dcb_rx_conf->dcb_tc[i];
10704 
10705 	/* FW needs one App to configure HW */
10706 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10707 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10708 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10709 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10710 
10711 	if (dcb_rx_conf->nb_tcs == 0)
10712 		*tc_map = 1; /* tc0 only */
10713 	else
10714 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10715 
10716 	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
10717 		dcb_cfg->pfc.willing = 0;
10718 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10719 		dcb_cfg->pfc.pfcenable = *tc_map;
10720 	}
10721 	return 0;
10722 }
10723 
10724 
10725 static enum i40e_status_code
10726 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10727 			      struct i40e_aqc_vsi_properties_data *info,
10728 			      uint8_t enabled_tcmap)
10729 {
10730 	enum i40e_status_code ret;
10731 	int i, total_tc = 0;
10732 	uint16_t qpnum_per_tc, bsf, qp_idx;
10733 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10734 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10735 	uint16_t used_queues;
10736 
10737 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10738 	if (ret != I40E_SUCCESS)
10739 		return ret;
10740 
10741 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10742 		if (enabled_tcmap & (1 << i))
10743 			total_tc++;
10744 	}
10745 	if (total_tc == 0)
10746 		total_tc = 1;
10747 	vsi->enabled_tc = enabled_tcmap;
10748 
10749 	/* different VSI has different queues assigned */
10750 	if (vsi->type == I40E_VSI_MAIN)
10751 		used_queues = dev_data->nb_rx_queues -
10752 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10753 	else if (vsi->type == I40E_VSI_VMDQ2)
10754 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10755 	else {
10756 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
10757 		return I40E_ERR_NO_AVAILABLE_VSI;
10758 	}
10759 
10760 	qpnum_per_tc = used_queues / total_tc;
10761 	/* Number of queues per enabled TC */
10762 	if (qpnum_per_tc == 0) {
10763 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10764 		return I40E_ERR_INVALID_QP_ID;
10765 	}
10766 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10767 				I40E_MAX_Q_PER_TC);
10768 	bsf = rte_bsf32(qpnum_per_tc);
10769 
10770 	/**
10771 	 * Configure TC and queue mapping parameters, for enabled TC,
10772 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10773 	 * default queue will serve it.
10774 	 */
10775 	qp_idx = 0;
10776 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10777 		if (vsi->enabled_tc & (1 << i)) {
10778 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10779 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10780 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10781 			qp_idx += qpnum_per_tc;
10782 		} else
10783 			info->tc_mapping[i] = 0;
10784 	}
10785 
10786 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10787 	if (vsi->type == I40E_VSI_SRIOV) {
10788 		info->mapping_flags |=
10789 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10790 		for (i = 0; i < vsi->nb_qps; i++)
10791 			info->queue_mapping[i] =
10792 				rte_cpu_to_le_16(vsi->base_queue + i);
10793 	} else {
10794 		info->mapping_flags |=
10795 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10796 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10797 	}
10798 	info->valid_sections |=
10799 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10800 
10801 	return I40E_SUCCESS;
10802 }
10803 
10804 /*
10805  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10806  * @veb: VEB to be configured
10807  * @tc_map: enabled TC bitmap
10808  *
10809  * Returns 0 on success, negative value on failure
10810  */
10811 static enum i40e_status_code
10812 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10813 {
10814 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10815 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10816 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10817 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10818 	enum i40e_status_code ret = I40E_SUCCESS;
10819 	int i;
10820 	uint32_t bw_max;
10821 
10822 	/* Check if enabled_tc is same as existing or new TCs */
10823 	if (veb->enabled_tc == tc_map)
10824 		return ret;
10825 
10826 	/* configure tc bandwidth */
10827 	memset(&veb_bw, 0, sizeof(veb_bw));
10828 	veb_bw.tc_valid_bits = tc_map;
10829 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10830 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10831 		if (tc_map & BIT_ULL(i))
10832 			veb_bw.tc_bw_share_credits[i] = 1;
10833 	}
10834 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10835 						   &veb_bw, NULL);
10836 	if (ret) {
10837 		PMD_INIT_LOG(ERR,
10838 			"AQ command Config switch_comp BW allocation per TC failed = %d",
10839 			hw->aq.asq_last_status);
10840 		return ret;
10841 	}
10842 
10843 	memset(&ets_query, 0, sizeof(ets_query));
10844 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10845 						   &ets_query, NULL);
10846 	if (ret != I40E_SUCCESS) {
10847 		PMD_DRV_LOG(ERR,
10848 			"Failed to get switch_comp ETS configuration %u",
10849 			hw->aq.asq_last_status);
10850 		return ret;
10851 	}
10852 	memset(&bw_query, 0, sizeof(bw_query));
10853 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10854 						  &bw_query, NULL);
10855 	if (ret != I40E_SUCCESS) {
10856 		PMD_DRV_LOG(ERR,
10857 			"Failed to get switch_comp bandwidth configuration %u",
10858 			hw->aq.asq_last_status);
10859 		return ret;
10860 	}
10861 
10862 	/* store and print out BW info */
10863 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10864 	veb->bw_info.bw_max = ets_query.tc_bw_max;
10865 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10866 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10867 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10868 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10869 		     I40E_16_BIT_WIDTH);
10870 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10871 		veb->bw_info.bw_ets_share_credits[i] =
10872 				bw_query.tc_bw_share_credits[i];
10873 		veb->bw_info.bw_ets_credits[i] =
10874 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10875 		/* 4 bits per TC, 4th bit is reserved */
10876 		veb->bw_info.bw_ets_max[i] =
10877 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10878 				  RTE_LEN2MASK(3, uint8_t));
10879 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10880 			    veb->bw_info.bw_ets_share_credits[i]);
10881 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10882 			    veb->bw_info.bw_ets_credits[i]);
10883 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10884 			    veb->bw_info.bw_ets_max[i]);
10885 	}
10886 
10887 	veb->enabled_tc = tc_map;
10888 
10889 	return ret;
10890 }
10891 
10892 
10893 /*
10894  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10895  * @vsi: VSI to be configured
10896  * @tc_map: enabled TC bitmap
10897  *
10898  * Returns 0 on success, negative value on failure
10899  */
10900 static enum i40e_status_code
10901 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10902 {
10903 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10904 	struct i40e_vsi_context ctxt;
10905 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10906 	enum i40e_status_code ret = I40E_SUCCESS;
10907 	int i;
10908 
10909 	/* Check if enabled_tc is same as existing or new TCs */
10910 	if (vsi->enabled_tc == tc_map)
10911 		return ret;
10912 
10913 	/* configure tc bandwidth */
10914 	memset(&bw_data, 0, sizeof(bw_data));
10915 	bw_data.tc_valid_bits = tc_map;
10916 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10917 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10918 		if (tc_map & BIT_ULL(i))
10919 			bw_data.tc_bw_credits[i] = 1;
10920 	}
10921 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10922 	if (ret) {
10923 		PMD_INIT_LOG(ERR,
10924 			"AQ command Config VSI BW allocation per TC failed = %d",
10925 			hw->aq.asq_last_status);
10926 		goto out;
10927 	}
10928 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10929 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10930 
10931 	/* Update Queue Pairs Mapping for currently enabled UPs */
10932 	ctxt.seid = vsi->seid;
10933 	ctxt.pf_num = hw->pf_id;
10934 	ctxt.vf_num = 0;
10935 	ctxt.uplink_seid = vsi->uplink_seid;
10936 	ctxt.info = vsi->info;
10937 	i40e_get_cap(hw);
10938 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10939 	if (ret)
10940 		goto out;
10941 
10942 	/* Update the VSI after updating the VSI queue-mapping information */
10943 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10944 	if (ret) {
10945 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10946 			hw->aq.asq_last_status);
10947 		goto out;
10948 	}
10949 	/* update the local VSI info with updated queue map */
10950 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10951 					sizeof(vsi->info.tc_mapping));
10952 	rte_memcpy(&vsi->info.queue_mapping,
10953 			&ctxt.info.queue_mapping,
10954 		sizeof(vsi->info.queue_mapping));
10955 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
10956 	vsi->info.valid_sections = 0;
10957 
10958 	/* query and update current VSI BW information */
10959 	ret = i40e_vsi_get_bw_config(vsi);
10960 	if (ret) {
10961 		PMD_INIT_LOG(ERR,
10962 			 "Failed updating vsi bw info, err %s aq_err %s",
10963 			 i40e_stat_str(hw, ret),
10964 			 i40e_aq_str(hw, hw->aq.asq_last_status));
10965 		goto out;
10966 	}
10967 
10968 	vsi->enabled_tc = tc_map;
10969 
10970 out:
10971 	return ret;
10972 }
10973 
10974 /*
10975  * i40e_dcb_hw_configure - program the dcb setting to hw
10976  * @pf: pf the configuration is taken on
10977  * @new_cfg: new configuration
10978  * @tc_map: enabled TC bitmap
10979  *
10980  * Returns 0 on success, negative value on failure
10981  */
10982 static enum i40e_status_code
10983 i40e_dcb_hw_configure(struct i40e_pf *pf,
10984 		      struct i40e_dcbx_config *new_cfg,
10985 		      uint8_t tc_map)
10986 {
10987 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10988 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10989 	struct i40e_vsi *main_vsi = pf->main_vsi;
10990 	struct i40e_vsi_list *vsi_list;
10991 	enum i40e_status_code ret;
10992 	int i;
10993 	uint32_t val;
10994 
10995 	/* Use the FW API if FW > v4.4*/
10996 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10997 	      (hw->aq.fw_maj_ver >= 5))) {
10998 		PMD_INIT_LOG(ERR,
10999 			"FW < v4.4, can not use FW LLDP API to configure DCB");
11000 		return I40E_ERR_FIRMWARE_API_VERSION;
11001 	}
11002 
11003 	/* Check if need reconfiguration */
11004 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11005 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11006 		return I40E_SUCCESS;
11007 	}
11008 
11009 	/* Copy the new config to the current config */
11010 	*old_cfg = *new_cfg;
11011 	old_cfg->etsrec = old_cfg->etscfg;
11012 	ret = i40e_set_dcb_config(hw);
11013 	if (ret) {
11014 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11015 			 i40e_stat_str(hw, ret),
11016 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11017 		return ret;
11018 	}
11019 	/* set receive Arbiter to RR mode and ETS scheme by default */
11020 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11021 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11022 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11023 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11024 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11025 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11026 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11027 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11028 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11029 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11030 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11031 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11032 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11033 	}
11034 	/* get local mib to check whether it is configured correctly */
11035 	/* IEEE mode */
11036 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11037 	/* Get Local DCB Config */
11038 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11039 				     &hw->local_dcbx_config);
11040 
11041 	/* if Veb is created, need to update TC of it at first */
11042 	if (main_vsi->veb) {
11043 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11044 		if (ret)
11045 			PMD_INIT_LOG(WARNING,
11046 				 "Failed configuring TC for VEB seid=%d",
11047 				 main_vsi->veb->seid);
11048 	}
11049 	/* Update each VSI */
11050 	i40e_vsi_config_tc(main_vsi, tc_map);
11051 	if (main_vsi->veb) {
11052 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11053 			/* Beside main VSI and VMDQ VSIs, only enable default
11054 			 * TC for other VSIs
11055 			 */
11056 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11057 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11058 							 tc_map);
11059 			else
11060 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11061 							 I40E_DEFAULT_TCMAP);
11062 			if (ret)
11063 				PMD_INIT_LOG(WARNING,
11064 					"Failed configuring TC for VSI seid=%d",
11065 					vsi_list->vsi->seid);
11066 			/* continue */
11067 		}
11068 	}
11069 	return I40E_SUCCESS;
11070 }
11071 
11072 /*
11073  * i40e_dcb_init_configure - initial dcb config
11074  * @dev: device being configured
11075  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11076  *
11077  * Returns 0 on success, negative value on failure
11078  */
11079 int
11080 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11081 {
11082 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11083 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11084 	int i, ret = 0;
11085 
11086 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11087 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11088 		return -ENOTSUP;
11089 	}
11090 
11091 	/* DCB initialization:
11092 	 * Update DCB configuration from the Firmware and configure
11093 	 * LLDP MIB change event.
11094 	 */
11095 	if (sw_dcb == TRUE) {
11096 		/* Stopping lldp is necessary for DPDK, but it will cause
11097 		 * DCB init failed. For i40e_init_dcb(), the prerequisite
11098 		 * for successful initialization of DCB is that LLDP is
11099 		 * enabled. So it is needed to start lldp before DCB init
11100 		 * and stop it after initialization.
11101 		 */
11102 		ret = i40e_aq_start_lldp(hw, true, NULL);
11103 		if (ret != I40E_SUCCESS)
11104 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11105 
11106 		ret = i40e_init_dcb(hw, true);
11107 		/* If lldp agent is stopped, the return value from
11108 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11109 		 * adminq status. Otherwise, it should return success.
11110 		 */
11111 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11112 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11113 			memset(&hw->local_dcbx_config, 0,
11114 				sizeof(struct i40e_dcbx_config));
11115 			/* set dcb default configuration */
11116 			hw->local_dcbx_config.etscfg.willing = 0;
11117 			hw->local_dcbx_config.etscfg.maxtcs = 0;
11118 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11119 			hw->local_dcbx_config.etscfg.tsatable[0] =
11120 						I40E_IEEE_TSA_ETS;
11121 			/* all UPs mapping to TC0 */
11122 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11123 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11124 			hw->local_dcbx_config.etsrec =
11125 				hw->local_dcbx_config.etscfg;
11126 			hw->local_dcbx_config.pfc.willing = 0;
11127 			hw->local_dcbx_config.pfc.pfccap =
11128 						I40E_MAX_TRAFFIC_CLASS;
11129 			/* FW needs one App to configure HW */
11130 			hw->local_dcbx_config.numapps = 1;
11131 			hw->local_dcbx_config.app[0].selector =
11132 						I40E_APP_SEL_ETHTYPE;
11133 			hw->local_dcbx_config.app[0].priority = 3;
11134 			hw->local_dcbx_config.app[0].protocolid =
11135 						I40E_APP_PROTOID_FCOE;
11136 			ret = i40e_set_dcb_config(hw);
11137 			if (ret) {
11138 				PMD_INIT_LOG(ERR,
11139 					"default dcb config fails. err = %d, aq_err = %d.",
11140 					ret, hw->aq.asq_last_status);
11141 				return -ENOSYS;
11142 			}
11143 		} else {
11144 			PMD_INIT_LOG(ERR,
11145 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
11146 				ret, hw->aq.asq_last_status);
11147 			return -ENOTSUP;
11148 		}
11149 
11150 		if (i40e_need_stop_lldp(dev)) {
11151 			ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11152 			if (ret != I40E_SUCCESS)
11153 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11154 		}
11155 	} else {
11156 		ret = i40e_aq_start_lldp(hw, true, NULL);
11157 		if (ret != I40E_SUCCESS)
11158 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11159 
11160 		ret = i40e_init_dcb(hw, true);
11161 		if (!ret) {
11162 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11163 				PMD_INIT_LOG(ERR,
11164 					"HW doesn't support DCBX offload.");
11165 				return -ENOTSUP;
11166 			}
11167 		} else {
11168 			PMD_INIT_LOG(ERR,
11169 				"DCBX configuration failed, err = %d, aq_err = %d.",
11170 				ret, hw->aq.asq_last_status);
11171 			return -ENOTSUP;
11172 		}
11173 	}
11174 	return 0;
11175 }
11176 
11177 /*
11178  * i40e_dcb_setup - setup dcb related config
11179  * @dev: device being configured
11180  *
11181  * Returns 0 on success, negative value on failure
11182  */
11183 static int
11184 i40e_dcb_setup(struct rte_eth_dev *dev)
11185 {
11186 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11187 	struct i40e_dcbx_config dcb_cfg;
11188 	uint8_t tc_map = 0;
11189 	int ret = 0;
11190 
11191 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11192 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11193 		return -ENOTSUP;
11194 	}
11195 
11196 	if (pf->vf_num != 0)
11197 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11198 
11199 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11200 	if (ret) {
11201 		PMD_INIT_LOG(ERR, "invalid dcb config");
11202 		return -EINVAL;
11203 	}
11204 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11205 	if (ret) {
11206 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
11207 		return -ENOSYS;
11208 	}
11209 
11210 	return 0;
11211 }
11212 
11213 static int
11214 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11215 		      struct rte_eth_dcb_info *dcb_info)
11216 {
11217 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11218 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11219 	struct i40e_vsi *vsi = pf->main_vsi;
11220 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11221 	uint16_t bsf, tc_mapping;
11222 	int i, j = 0;
11223 
11224 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
11225 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11226 	else
11227 		dcb_info->nb_tcs = 1;
11228 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11229 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11230 	for (i = 0; i < dcb_info->nb_tcs; i++)
11231 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11232 
11233 	/* get queue mapping if vmdq is disabled */
11234 	if (!pf->nb_cfg_vmdq_vsi) {
11235 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11236 			if (!(vsi->enabled_tc & (1 << i)))
11237 				continue;
11238 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11239 			dcb_info->tc_queue.tc_rxq[j][i].base =
11240 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11241 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11242 			dcb_info->tc_queue.tc_txq[j][i].base =
11243 				dcb_info->tc_queue.tc_rxq[j][i].base;
11244 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11245 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11246 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11247 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11248 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11249 		}
11250 		return 0;
11251 	}
11252 
11253 	/* get queue mapping if vmdq is enabled */
11254 	do {
11255 		vsi = pf->vmdq[j].vsi;
11256 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11257 			if (!(vsi->enabled_tc & (1 << i)))
11258 				continue;
11259 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11260 			dcb_info->tc_queue.tc_rxq[j][i].base =
11261 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11262 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11263 			dcb_info->tc_queue.tc_txq[j][i].base =
11264 				dcb_info->tc_queue.tc_rxq[j][i].base;
11265 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11266 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11267 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11268 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11269 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11270 		}
11271 		j++;
11272 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
11273 	return 0;
11274 }
11275 
11276 static int
11277 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11278 {
11279 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11280 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
11281 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11282 	uint16_t msix_intr;
11283 
11284 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
11285 	if (msix_intr == I40E_MISC_VEC_ID)
11286 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11287 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
11288 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11289 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11290 	else
11291 		I40E_WRITE_REG(hw,
11292 			       I40E_PFINT_DYN_CTLN(msix_intr -
11293 						   I40E_RX_VEC_START),
11294 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11295 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11296 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11297 
11298 	I40E_WRITE_FLUSH(hw);
11299 	rte_intr_ack(pci_dev->intr_handle);
11300 
11301 	return 0;
11302 }
11303 
11304 static int
11305 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11306 {
11307 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11308 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
11309 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11310 	uint16_t msix_intr;
11311 
11312 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
11313 	if (msix_intr == I40E_MISC_VEC_ID)
11314 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11315 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11316 	else
11317 		I40E_WRITE_REG(hw,
11318 			       I40E_PFINT_DYN_CTLN(msix_intr -
11319 						   I40E_RX_VEC_START),
11320 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11321 	I40E_WRITE_FLUSH(hw);
11322 
11323 	return 0;
11324 }
11325 
11326 /**
11327  * This function is used to check if the register is valid.
11328  * Below is the valid registers list for X722 only:
11329  * 0x2b800--0x2bb00
11330  * 0x38700--0x38a00
11331  * 0x3d800--0x3db00
11332  * 0x208e00--0x209000
11333  * 0x20be00--0x20c000
11334  * 0x263c00--0x264000
11335  * 0x265c00--0x266000
11336  */
11337 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11338 {
11339 	if ((type != I40E_MAC_X722) &&
11340 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11341 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11342 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11343 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11344 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11345 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11346 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11347 		return 0;
11348 	else
11349 		return 1;
11350 }
11351 
11352 static int i40e_get_regs(struct rte_eth_dev *dev,
11353 			 struct rte_dev_reg_info *regs)
11354 {
11355 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11356 	uint32_t *ptr_data = regs->data;
11357 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11358 	const struct i40e_reg_info *reg_info;
11359 
11360 	if (ptr_data == NULL) {
11361 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11362 		regs->width = sizeof(uint32_t);
11363 		return 0;
11364 	}
11365 
11366 	/* The first few registers have to be read using AQ operations */
11367 	reg_idx = 0;
11368 	while (i40e_regs_adminq[reg_idx].name) {
11369 		reg_info = &i40e_regs_adminq[reg_idx++];
11370 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11371 			for (arr_idx2 = 0;
11372 					arr_idx2 <= reg_info->count2;
11373 					arr_idx2++) {
11374 				reg_offset = arr_idx * reg_info->stride1 +
11375 					arr_idx2 * reg_info->stride2;
11376 				reg_offset += reg_info->base_addr;
11377 				ptr_data[reg_offset >> 2] =
11378 					i40e_read_rx_ctl(hw, reg_offset);
11379 			}
11380 	}
11381 
11382 	/* The remaining registers can be read using primitives */
11383 	reg_idx = 0;
11384 	while (i40e_regs_others[reg_idx].name) {
11385 		reg_info = &i40e_regs_others[reg_idx++];
11386 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11387 			for (arr_idx2 = 0;
11388 					arr_idx2 <= reg_info->count2;
11389 					arr_idx2++) {
11390 				reg_offset = arr_idx * reg_info->stride1 +
11391 					arr_idx2 * reg_info->stride2;
11392 				reg_offset += reg_info->base_addr;
11393 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11394 					ptr_data[reg_offset >> 2] = 0;
11395 				else
11396 					ptr_data[reg_offset >> 2] =
11397 						I40E_READ_REG(hw, reg_offset);
11398 			}
11399 	}
11400 
11401 	return 0;
11402 }
11403 
11404 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11405 {
11406 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11407 
11408 	/* Convert word count to byte count */
11409 	return hw->nvm.sr_size << 1;
11410 }
11411 
11412 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11413 			   struct rte_dev_eeprom_info *eeprom)
11414 {
11415 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11416 	uint16_t *data = eeprom->data;
11417 	uint16_t offset, length, cnt_words;
11418 	int ret_code;
11419 
11420 	offset = eeprom->offset >> 1;
11421 	length = eeprom->length >> 1;
11422 	cnt_words = length;
11423 
11424 	if (offset > hw->nvm.sr_size ||
11425 		offset + length > hw->nvm.sr_size) {
11426 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11427 		return -EINVAL;
11428 	}
11429 
11430 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11431 
11432 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11433 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11434 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11435 		return -EIO;
11436 	}
11437 
11438 	return 0;
11439 }
11440 
11441 static int i40e_get_module_info(struct rte_eth_dev *dev,
11442 				struct rte_eth_dev_module_info *modinfo)
11443 {
11444 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11445 	uint32_t sff8472_comp = 0;
11446 	uint32_t sff8472_swap = 0;
11447 	uint32_t sff8636_rev = 0;
11448 	i40e_status status;
11449 	uint32_t type = 0;
11450 
11451 	/* Check if firmware supports reading module EEPROM. */
11452 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11453 		PMD_DRV_LOG(ERR,
11454 			    "Module EEPROM memory read not supported. "
11455 			    "Please update the NVM image.");
11456 		return -EINVAL;
11457 	}
11458 
11459 	status = i40e_update_link_info(hw);
11460 	if (status)
11461 		return -EIO;
11462 
11463 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11464 		PMD_DRV_LOG(ERR,
11465 			    "Cannot read module EEPROM memory. "
11466 			    "No module connected.");
11467 		return -EINVAL;
11468 	}
11469 
11470 	type = hw->phy.link_info.module_type[0];
11471 
11472 	switch (type) {
11473 	case I40E_MODULE_TYPE_SFP:
11474 		status = i40e_aq_get_phy_register(hw,
11475 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11476 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11477 				I40E_MODULE_SFF_8472_COMP,
11478 				&sff8472_comp, NULL);
11479 		if (status)
11480 			return -EIO;
11481 
11482 		status = i40e_aq_get_phy_register(hw,
11483 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11484 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11485 				I40E_MODULE_SFF_8472_SWAP,
11486 				&sff8472_swap, NULL);
11487 		if (status)
11488 			return -EIO;
11489 
11490 		/* Check if the module requires address swap to access
11491 		 * the other EEPROM memory page.
11492 		 */
11493 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11494 			PMD_DRV_LOG(WARNING,
11495 				    "Module address swap to access "
11496 				    "page 0xA2 is not supported.");
11497 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11498 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11499 		} else if (sff8472_comp == 0x00) {
11500 			/* Module is not SFF-8472 compliant */
11501 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11502 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11503 		} else {
11504 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11505 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11506 		}
11507 		break;
11508 	case I40E_MODULE_TYPE_QSFP_PLUS:
11509 		/* Read from memory page 0. */
11510 		status = i40e_aq_get_phy_register(hw,
11511 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11512 				0, 1,
11513 				I40E_MODULE_REVISION_ADDR,
11514 				&sff8636_rev, NULL);
11515 		if (status)
11516 			return -EIO;
11517 		/* Determine revision compliance byte */
11518 		if (sff8636_rev > 0x02) {
11519 			/* Module is SFF-8636 compliant */
11520 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11521 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11522 		} else {
11523 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11524 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11525 		}
11526 		break;
11527 	case I40E_MODULE_TYPE_QSFP28:
11528 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11529 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11530 		break;
11531 	default:
11532 		PMD_DRV_LOG(ERR, "Module type unrecognized");
11533 		return -EINVAL;
11534 	}
11535 	return 0;
11536 }
11537 
11538 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11539 				  struct rte_dev_eeprom_info *info)
11540 {
11541 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11542 	bool is_sfp = false;
11543 	i40e_status status;
11544 	uint8_t *data;
11545 	uint32_t value = 0;
11546 	uint32_t i;
11547 
11548 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11549 		is_sfp = true;
11550 
11551 	data = info->data;
11552 	for (i = 0; i < info->length; i++) {
11553 		u32 offset = i + info->offset;
11554 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11555 
11556 		/* Check if we need to access the other memory page */
11557 		if (is_sfp) {
11558 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11559 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11560 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
11561 			}
11562 		} else {
11563 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11564 				/* Compute memory page number and offset. */
11565 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11566 				addr++;
11567 			}
11568 		}
11569 		status = i40e_aq_get_phy_register(hw,
11570 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11571 				addr, 1, offset, &value, NULL);
11572 		if (status)
11573 			return -EIO;
11574 		data[i] = (uint8_t)value;
11575 	}
11576 	return 0;
11577 }
11578 
11579 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11580 				     struct rte_ether_addr *mac_addr)
11581 {
11582 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11583 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11584 	struct i40e_vsi *vsi = pf->main_vsi;
11585 	struct i40e_mac_filter_info mac_filter;
11586 	struct i40e_mac_filter *f;
11587 	int ret;
11588 
11589 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11590 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11591 		return -EINVAL;
11592 	}
11593 
11594 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
11595 		if (rte_is_same_ether_addr(&pf->dev_addr,
11596 						&f->mac_info.mac_addr))
11597 			break;
11598 	}
11599 
11600 	if (f == NULL) {
11601 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11602 		return -EIO;
11603 	}
11604 
11605 	mac_filter = f->mac_info;
11606 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11607 	if (ret != I40E_SUCCESS) {
11608 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11609 		return -EIO;
11610 	}
11611 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11612 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
11613 	if (ret != I40E_SUCCESS) {
11614 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
11615 		return -EIO;
11616 	}
11617 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11618 
11619 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11620 					mac_addr->addr_bytes, NULL);
11621 	if (ret != I40E_SUCCESS) {
11622 		PMD_DRV_LOG(ERR, "Failed to change mac");
11623 		return -EIO;
11624 	}
11625 
11626 	return 0;
11627 }
11628 
11629 static int
11630 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
11631 {
11632 	/* mtu setting is forbidden if port is start */
11633 	if (dev->data->dev_started != 0) {
11634 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11635 			    dev->data->port_id);
11636 		return -EBUSY;
11637 	}
11638 
11639 	return 0;
11640 }
11641 
11642 /* Restore ethertype filter */
11643 static void
11644 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11645 {
11646 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11647 	struct i40e_ethertype_filter_list
11648 		*ethertype_list = &pf->ethertype.ethertype_list;
11649 	struct i40e_ethertype_filter *f;
11650 	struct i40e_control_filter_stats stats;
11651 	uint16_t flags;
11652 
11653 	TAILQ_FOREACH(f, ethertype_list, rules) {
11654 		flags = 0;
11655 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11656 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11657 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11658 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11659 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11660 
11661 		memset(&stats, 0, sizeof(stats));
11662 		i40e_aq_add_rem_control_packet_filter(hw,
11663 					    f->input.mac_addr.addr_bytes,
11664 					    f->input.ether_type,
11665 					    flags, pf->main_vsi->seid,
11666 					    f->queue, 1, &stats, NULL);
11667 	}
11668 	PMD_DRV_LOG(INFO, "Ethertype filter:"
11669 		    " mac_etype_used = %u, etype_used = %u,"
11670 		    " mac_etype_free = %u, etype_free = %u",
11671 		    stats.mac_etype_used, stats.etype_used,
11672 		    stats.mac_etype_free, stats.etype_free);
11673 }
11674 
11675 /* Restore tunnel filter */
11676 static void
11677 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11678 {
11679 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11680 	struct i40e_vsi *vsi;
11681 	struct i40e_pf_vf *vf;
11682 	struct i40e_tunnel_filter_list
11683 		*tunnel_list = &pf->tunnel.tunnel_list;
11684 	struct i40e_tunnel_filter *f;
11685 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
11686 	bool big_buffer = 0;
11687 
11688 	TAILQ_FOREACH(f, tunnel_list, rules) {
11689 		if (!f->is_to_vf)
11690 			vsi = pf->main_vsi;
11691 		else {
11692 			vf = &pf->vfs[f->vf_id];
11693 			vsi = vf->vsi;
11694 		}
11695 		memset(&cld_filter, 0, sizeof(cld_filter));
11696 		rte_ether_addr_copy((struct rte_ether_addr *)
11697 				&f->input.outer_mac,
11698 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
11699 		rte_ether_addr_copy((struct rte_ether_addr *)
11700 				&f->input.inner_mac,
11701 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
11702 		cld_filter.element.inner_vlan = f->input.inner_vlan;
11703 		cld_filter.element.flags = f->input.flags;
11704 		cld_filter.element.tenant_id = f->input.tenant_id;
11705 		cld_filter.element.queue_number = f->queue;
11706 		rte_memcpy(cld_filter.general_fields,
11707 			   f->input.general_fields,
11708 			   sizeof(f->input.general_fields));
11709 
11710 		if (((f->input.flags &
11711 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11712 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11713 		    ((f->input.flags &
11714 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11715 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11716 		    ((f->input.flags &
11717 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11718 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
11719 			big_buffer = 1;
11720 
11721 		if (big_buffer)
11722 			i40e_aq_add_cloud_filters_bb(hw,
11723 					vsi->seid, &cld_filter, 1);
11724 		else
11725 			i40e_aq_add_cloud_filters(hw, vsi->seid,
11726 						  &cld_filter.element, 1);
11727 	}
11728 }
11729 
11730 static void
11731 i40e_filter_restore(struct i40e_pf *pf)
11732 {
11733 	i40e_ethertype_filter_restore(pf);
11734 	i40e_tunnel_filter_restore(pf);
11735 	i40e_fdir_filter_restore(pf);
11736 	(void)i40e_hash_filter_restore(pf);
11737 }
11738 
11739 bool
11740 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11741 {
11742 	if (strcmp(dev->device->driver->name, drv->driver.name))
11743 		return false;
11744 
11745 	return true;
11746 }
11747 
11748 bool
11749 is_i40e_supported(struct rte_eth_dev *dev)
11750 {
11751 	return is_device_supported(dev, &rte_i40e_pmd);
11752 }
11753 
11754 struct i40e_customized_pctype*
11755 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11756 {
11757 	int i;
11758 
11759 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11760 		if (pf->customized_pctype[i].index == index)
11761 			return &pf->customized_pctype[i];
11762 	}
11763 	return NULL;
11764 }
11765 
11766 static int
11767 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11768 			      uint32_t pkg_size, uint32_t proto_num,
11769 			      struct rte_pmd_i40e_proto_info *proto,
11770 			      enum rte_pmd_i40e_package_op op)
11771 {
11772 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11773 	uint32_t pctype_num;
11774 	struct rte_pmd_i40e_ptype_info *pctype;
11775 	uint32_t buff_size;
11776 	struct i40e_customized_pctype *new_pctype = NULL;
11777 	uint8_t proto_id;
11778 	uint8_t pctype_value;
11779 	char name[64];
11780 	uint32_t i, j, n;
11781 	int ret;
11782 
11783 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11784 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11785 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11786 		return -1;
11787 	}
11788 
11789 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11790 				(uint8_t *)&pctype_num, sizeof(pctype_num),
11791 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11792 	if (ret) {
11793 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
11794 		return -1;
11795 	}
11796 	if (!pctype_num) {
11797 		PMD_DRV_LOG(INFO, "No new pctype added");
11798 		return -1;
11799 	}
11800 
11801 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11802 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
11803 	if (!pctype) {
11804 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11805 		return -1;
11806 	}
11807 	/* get information about new pctype list */
11808 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11809 					(uint8_t *)pctype, buff_size,
11810 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11811 	if (ret) {
11812 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
11813 		rte_free(pctype);
11814 		return -1;
11815 	}
11816 
11817 	/* Update customized pctype. */
11818 	for (i = 0; i < pctype_num; i++) {
11819 		pctype_value = pctype[i].ptype_id;
11820 		memset(name, 0, sizeof(name));
11821 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11822 			proto_id = pctype[i].protocols[j];
11823 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11824 				continue;
11825 			for (n = 0; n < proto_num; n++) {
11826 				if (proto[n].proto_id != proto_id)
11827 					continue;
11828 				strlcat(name, proto[n].name, sizeof(name));
11829 				strlcat(name, "_", sizeof(name));
11830 				break;
11831 			}
11832 		}
11833 		name[strlen(name) - 1] = '\0';
11834 		PMD_DRV_LOG(INFO, "name = %s", name);
11835 		if (!strcmp(name, "GTPC"))
11836 			new_pctype =
11837 				i40e_find_customized_pctype(pf,
11838 						      I40E_CUSTOMIZED_GTPC);
11839 		else if (!strcmp(name, "GTPU_IPV4"))
11840 			new_pctype =
11841 				i40e_find_customized_pctype(pf,
11842 						   I40E_CUSTOMIZED_GTPU_IPV4);
11843 		else if (!strcmp(name, "GTPU_IPV6"))
11844 			new_pctype =
11845 				i40e_find_customized_pctype(pf,
11846 						   I40E_CUSTOMIZED_GTPU_IPV6);
11847 		else if (!strcmp(name, "GTPU"))
11848 			new_pctype =
11849 				i40e_find_customized_pctype(pf,
11850 						      I40E_CUSTOMIZED_GTPU);
11851 		else if (!strcmp(name, "IPV4_L2TPV3"))
11852 			new_pctype =
11853 				i40e_find_customized_pctype(pf,
11854 						I40E_CUSTOMIZED_IPV4_L2TPV3);
11855 		else if (!strcmp(name, "IPV6_L2TPV3"))
11856 			new_pctype =
11857 				i40e_find_customized_pctype(pf,
11858 						I40E_CUSTOMIZED_IPV6_L2TPV3);
11859 		else if (!strcmp(name, "IPV4_ESP"))
11860 			new_pctype =
11861 				i40e_find_customized_pctype(pf,
11862 						I40E_CUSTOMIZED_ESP_IPV4);
11863 		else if (!strcmp(name, "IPV6_ESP"))
11864 			new_pctype =
11865 				i40e_find_customized_pctype(pf,
11866 						I40E_CUSTOMIZED_ESP_IPV6);
11867 		else if (!strcmp(name, "IPV4_UDP_ESP"))
11868 			new_pctype =
11869 				i40e_find_customized_pctype(pf,
11870 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
11871 		else if (!strcmp(name, "IPV6_UDP_ESP"))
11872 			new_pctype =
11873 				i40e_find_customized_pctype(pf,
11874 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
11875 		else if (!strcmp(name, "IPV4_AH"))
11876 			new_pctype =
11877 				i40e_find_customized_pctype(pf,
11878 						I40E_CUSTOMIZED_AH_IPV4);
11879 		else if (!strcmp(name, "IPV6_AH"))
11880 			new_pctype =
11881 				i40e_find_customized_pctype(pf,
11882 						I40E_CUSTOMIZED_AH_IPV6);
11883 		if (new_pctype) {
11884 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11885 				new_pctype->pctype = pctype_value;
11886 				new_pctype->valid = true;
11887 			} else {
11888 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11889 				new_pctype->valid = false;
11890 			}
11891 		}
11892 	}
11893 
11894 	rte_free(pctype);
11895 	return 0;
11896 }
11897 
11898 static int
11899 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11900 			     uint32_t pkg_size, uint32_t proto_num,
11901 			     struct rte_pmd_i40e_proto_info *proto,
11902 			     enum rte_pmd_i40e_package_op op)
11903 {
11904 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11905 	uint16_t port_id = dev->data->port_id;
11906 	uint32_t ptype_num;
11907 	struct rte_pmd_i40e_ptype_info *ptype;
11908 	uint32_t buff_size;
11909 	uint8_t proto_id;
11910 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11911 	uint32_t i, j, n;
11912 	bool in_tunnel;
11913 	int ret;
11914 
11915 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11916 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11917 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11918 		return -1;
11919 	}
11920 
11921 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11922 		rte_pmd_i40e_ptype_mapping_reset(port_id);
11923 		return 0;
11924 	}
11925 
11926 	/* get information about new ptype num */
11927 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11928 				(uint8_t *)&ptype_num, sizeof(ptype_num),
11929 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11930 	if (ret) {
11931 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
11932 		return ret;
11933 	}
11934 	if (!ptype_num) {
11935 		PMD_DRV_LOG(INFO, "No new ptype added");
11936 		return -1;
11937 	}
11938 
11939 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11940 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
11941 	if (!ptype) {
11942 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11943 		return -1;
11944 	}
11945 
11946 	/* get information about new ptype list */
11947 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11948 					(uint8_t *)ptype, buff_size,
11949 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11950 	if (ret) {
11951 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
11952 		rte_free(ptype);
11953 		return ret;
11954 	}
11955 
11956 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11957 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11958 	if (!ptype_mapping) {
11959 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11960 		rte_free(ptype);
11961 		return -1;
11962 	}
11963 
11964 	/* Update ptype mapping table. */
11965 	for (i = 0; i < ptype_num; i++) {
11966 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11967 		ptype_mapping[i].sw_ptype = 0;
11968 		in_tunnel = false;
11969 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11970 			proto_id = ptype[i].protocols[j];
11971 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11972 				continue;
11973 			for (n = 0; n < proto_num; n++) {
11974 				if (proto[n].proto_id != proto_id)
11975 					continue;
11976 				memset(name, 0, sizeof(name));
11977 				strcpy(name, proto[n].name);
11978 				PMD_DRV_LOG(INFO, "name = %s", name);
11979 				if (!strncasecmp(name, "PPPOE", 5))
11980 					ptype_mapping[i].sw_ptype |=
11981 						RTE_PTYPE_L2_ETHER_PPPOE;
11982 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11983 					 !in_tunnel) {
11984 					ptype_mapping[i].sw_ptype |=
11985 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11986 					ptype_mapping[i].sw_ptype |=
11987 						RTE_PTYPE_L4_FRAG;
11988 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11989 					   in_tunnel) {
11990 					ptype_mapping[i].sw_ptype |=
11991 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11992 					ptype_mapping[i].sw_ptype |=
11993 						RTE_PTYPE_INNER_L4_FRAG;
11994 				} else if (!strncasecmp(name, "OIPV4", 5)) {
11995 					ptype_mapping[i].sw_ptype |=
11996 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11997 					in_tunnel = true;
11998 				} else if (!strncasecmp(name, "IPV4", 4) &&
11999 					   !in_tunnel)
12000 					ptype_mapping[i].sw_ptype |=
12001 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12002 				else if (!strncasecmp(name, "IPV4", 4) &&
12003 					 in_tunnel)
12004 					ptype_mapping[i].sw_ptype |=
12005 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12006 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12007 					 !in_tunnel) {
12008 					ptype_mapping[i].sw_ptype |=
12009 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12010 					ptype_mapping[i].sw_ptype |=
12011 						RTE_PTYPE_L4_FRAG;
12012 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12013 					   in_tunnel) {
12014 					ptype_mapping[i].sw_ptype |=
12015 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12016 					ptype_mapping[i].sw_ptype |=
12017 						RTE_PTYPE_INNER_L4_FRAG;
12018 				} else if (!strncasecmp(name, "OIPV6", 5)) {
12019 					ptype_mapping[i].sw_ptype |=
12020 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12021 					in_tunnel = true;
12022 				} else if (!strncasecmp(name, "IPV6", 4) &&
12023 					   !in_tunnel)
12024 					ptype_mapping[i].sw_ptype |=
12025 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12026 				else if (!strncasecmp(name, "IPV6", 4) &&
12027 					 in_tunnel)
12028 					ptype_mapping[i].sw_ptype |=
12029 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12030 				else if (!strncasecmp(name, "UDP", 3) &&
12031 					 !in_tunnel)
12032 					ptype_mapping[i].sw_ptype |=
12033 						RTE_PTYPE_L4_UDP;
12034 				else if (!strncasecmp(name, "UDP", 3) &&
12035 					 in_tunnel)
12036 					ptype_mapping[i].sw_ptype |=
12037 						RTE_PTYPE_INNER_L4_UDP;
12038 				else if (!strncasecmp(name, "TCP", 3) &&
12039 					 !in_tunnel)
12040 					ptype_mapping[i].sw_ptype |=
12041 						RTE_PTYPE_L4_TCP;
12042 				else if (!strncasecmp(name, "TCP", 3) &&
12043 					 in_tunnel)
12044 					ptype_mapping[i].sw_ptype |=
12045 						RTE_PTYPE_INNER_L4_TCP;
12046 				else if (!strncasecmp(name, "SCTP", 4) &&
12047 					 !in_tunnel)
12048 					ptype_mapping[i].sw_ptype |=
12049 						RTE_PTYPE_L4_SCTP;
12050 				else if (!strncasecmp(name, "SCTP", 4) &&
12051 					 in_tunnel)
12052 					ptype_mapping[i].sw_ptype |=
12053 						RTE_PTYPE_INNER_L4_SCTP;
12054 				else if ((!strncasecmp(name, "ICMP", 4) ||
12055 					  !strncasecmp(name, "ICMPV6", 6)) &&
12056 					 !in_tunnel)
12057 					ptype_mapping[i].sw_ptype |=
12058 						RTE_PTYPE_L4_ICMP;
12059 				else if ((!strncasecmp(name, "ICMP", 4) ||
12060 					  !strncasecmp(name, "ICMPV6", 6)) &&
12061 					 in_tunnel)
12062 					ptype_mapping[i].sw_ptype |=
12063 						RTE_PTYPE_INNER_L4_ICMP;
12064 				else if (!strncasecmp(name, "GTPC", 4)) {
12065 					ptype_mapping[i].sw_ptype |=
12066 						RTE_PTYPE_TUNNEL_GTPC;
12067 					in_tunnel = true;
12068 				} else if (!strncasecmp(name, "GTPU", 4)) {
12069 					ptype_mapping[i].sw_ptype |=
12070 						RTE_PTYPE_TUNNEL_GTPU;
12071 					in_tunnel = true;
12072 				} else if (!strncasecmp(name, "ESP", 3)) {
12073 					ptype_mapping[i].sw_ptype |=
12074 						RTE_PTYPE_TUNNEL_ESP;
12075 					in_tunnel = true;
12076 				} else if (!strncasecmp(name, "GRENAT", 6)) {
12077 					ptype_mapping[i].sw_ptype |=
12078 						RTE_PTYPE_TUNNEL_GRENAT;
12079 					in_tunnel = true;
12080 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12081 					   !strncasecmp(name, "L2TPV2", 6) ||
12082 					   !strncasecmp(name, "L2TPV3", 6)) {
12083 					ptype_mapping[i].sw_ptype |=
12084 						RTE_PTYPE_TUNNEL_L2TP;
12085 					in_tunnel = true;
12086 				}
12087 
12088 				break;
12089 			}
12090 		}
12091 	}
12092 
12093 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12094 						ptype_num, 0);
12095 	if (ret)
12096 		PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12097 
12098 	rte_free(ptype_mapping);
12099 	rte_free(ptype);
12100 	return ret;
12101 }
12102 
12103 void
12104 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12105 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12106 {
12107 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12108 	uint32_t proto_num;
12109 	struct rte_pmd_i40e_proto_info *proto;
12110 	uint32_t buff_size;
12111 	uint32_t i;
12112 	int ret;
12113 
12114 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12115 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12116 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12117 		return;
12118 	}
12119 
12120 	/* get information about protocol number */
12121 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12122 				       (uint8_t *)&proto_num, sizeof(proto_num),
12123 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12124 	if (ret) {
12125 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
12126 		return;
12127 	}
12128 	if (!proto_num) {
12129 		PMD_DRV_LOG(INFO, "No new protocol added");
12130 		return;
12131 	}
12132 
12133 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12134 	proto = rte_zmalloc("new_proto", buff_size, 0);
12135 	if (!proto) {
12136 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12137 		return;
12138 	}
12139 
12140 	/* get information about protocol list */
12141 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12142 					(uint8_t *)proto, buff_size,
12143 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12144 	if (ret) {
12145 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
12146 		rte_free(proto);
12147 		return;
12148 	}
12149 
12150 	/* Check if GTP is supported. */
12151 	for (i = 0; i < proto_num; i++) {
12152 		if (!strncmp(proto[i].name, "GTP", 3)) {
12153 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12154 				pf->gtp_support = true;
12155 			else
12156 				pf->gtp_support = false;
12157 			break;
12158 		}
12159 	}
12160 
12161 	/* Check if ESP is supported. */
12162 	for (i = 0; i < proto_num; i++) {
12163 		if (!strncmp(proto[i].name, "ESP", 3)) {
12164 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12165 				pf->esp_support = true;
12166 			else
12167 				pf->esp_support = false;
12168 			break;
12169 		}
12170 	}
12171 
12172 	/* Update customized pctype info */
12173 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12174 					    proto_num, proto, op);
12175 	if (ret)
12176 		PMD_DRV_LOG(INFO, "No pctype is updated.");
12177 
12178 	/* Update customized ptype info */
12179 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12180 					   proto_num, proto, op);
12181 	if (ret)
12182 		PMD_DRV_LOG(INFO, "No ptype is updated.");
12183 
12184 	rte_free(proto);
12185 }
12186 
12187 /* Create a QinQ cloud filter
12188  *
12189  * The Fortville NIC has limited resources for tunnel filters,
12190  * so we can only reuse existing filters.
12191  *
12192  * In step 1 we define which Field Vector fields can be used for
12193  * filter types.
12194  * As we do not have the inner tag defined as a field,
12195  * we have to define it first, by reusing one of L1 entries.
12196  *
12197  * In step 2 we are replacing one of existing filter types with
12198  * a new one for QinQ.
12199  * As we reusing L1 and replacing L2, some of the default filter
12200  * types will disappear,which depends on L1 and L2 entries we reuse.
12201  *
12202  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12203  *
12204  * 1.	Create L1 filter of outer vlan (12b) which will be in use
12205  *		later when we define the cloud filter.
12206  *	a.	Valid_flags.replace_cloud = 0
12207  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
12208  *	c.	New_filter = 0x10
12209  *	d.	TR bit = 0xff (optional, not used here)
12210  *	e.	Buffer – 2 entries:
12211  *		i.	Byte 0 = 8 (outer vlan FV index).
12212  *			Byte 1 = 0 (rsv)
12213  *			Byte 2-3 = 0x0fff
12214  *		ii.	Byte 0 = 37 (inner vlan FV index).
12215  *			Byte 1 =0 (rsv)
12216  *			Byte 2-3 = 0x0fff
12217  *
12218  * Step 2:
12219  * 2.	Create cloud filter using two L1 filters entries: stag and
12220  *		new filter(outer vlan+ inner vlan)
12221  *	a.	Valid_flags.replace_cloud = 1
12222  *	b.	Old_filter = 1 (instead of outer IP)
12223  *	c.	New_filter = 0x10
12224  *	d.	Buffer – 2 entries:
12225  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
12226  *			Byte 1-3 = 0 (rsv)
12227  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12228  *			Byte 9-11 = 0 (rsv)
12229  */
12230 static int
12231 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12232 {
12233 	int ret = -ENOTSUP;
12234 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12235 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12236 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12237 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12238 
12239 	if (pf->support_multi_driver) {
12240 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12241 		return ret;
12242 	}
12243 
12244 	/* Init */
12245 	memset(&filter_replace, 0,
12246 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12247 	memset(&filter_replace_buf, 0,
12248 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12249 
12250 	/* create L1 filter */
12251 	filter_replace.old_filter_type =
12252 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12253 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12254 	filter_replace.tr_bit = 0;
12255 
12256 	/* Prepare the buffer, 2 entries */
12257 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12258 	filter_replace_buf.data[0] |=
12259 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12260 	/* Field Vector 12b mask */
12261 	filter_replace_buf.data[2] = 0xff;
12262 	filter_replace_buf.data[3] = 0x0f;
12263 	filter_replace_buf.data[4] =
12264 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12265 	filter_replace_buf.data[4] |=
12266 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12267 	/* Field Vector 12b mask */
12268 	filter_replace_buf.data[6] = 0xff;
12269 	filter_replace_buf.data[7] = 0x0f;
12270 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12271 			&filter_replace_buf);
12272 	if (ret != I40E_SUCCESS)
12273 		return ret;
12274 
12275 	if (filter_replace.old_filter_type !=
12276 	    filter_replace.new_filter_type)
12277 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12278 			    " original: 0x%x, new: 0x%x",
12279 			    dev->device->name,
12280 			    filter_replace.old_filter_type,
12281 			    filter_replace.new_filter_type);
12282 
12283 	/* Apply the second L2 cloud filter */
12284 	memset(&filter_replace, 0,
12285 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12286 	memset(&filter_replace_buf, 0,
12287 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12288 
12289 	/* create L2 filter, input for L2 filter will be L1 filter  */
12290 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12291 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12292 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12293 
12294 	/* Prepare the buffer, 2 entries */
12295 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12296 	filter_replace_buf.data[0] |=
12297 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12298 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12299 	filter_replace_buf.data[4] |=
12300 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12301 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12302 			&filter_replace_buf);
12303 	if (!ret && (filter_replace.old_filter_type !=
12304 		     filter_replace.new_filter_type))
12305 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12306 			    " original: 0x%x, new: 0x%x",
12307 			    dev->device->name,
12308 			    filter_replace.old_filter_type,
12309 			    filter_replace.new_filter_type);
12310 
12311 	return ret;
12312 }
12313 
12314 static int
12315 i40e_fec_get_capability(struct rte_eth_dev *dev,
12316 	struct rte_eth_fec_capa *speed_fec_capa, __rte_unused unsigned int num)
12317 {
12318 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12319 
12320 	if (hw->mac.type == I40E_MAC_X722 &&
12321 	    !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
12322 		PMD_DRV_LOG(ERR, "Setting FEC encoding not supported by"
12323 			 " firmware. Please update the NVM image.");
12324 		return -ENOTSUP;
12325 	}
12326 
12327 	if (hw->device_id == I40E_DEV_ID_25G_SFP28 ||
12328 	    hw->device_id == I40E_DEV_ID_25G_B) {
12329 		if (speed_fec_capa) {
12330 			speed_fec_capa->speed = RTE_ETH_SPEED_NUM_25G;
12331 			speed_fec_capa->capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
12332 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
12333 					     RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
12334 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
12335 		}
12336 
12337 		/* since HW only supports 25G */
12338 		return 1;
12339 	} else if (hw->device_id == I40E_DEV_ID_KX_X722) {
12340 		if (speed_fec_capa) {
12341 			speed_fec_capa->speed = RTE_ETH_SPEED_NUM_25G;
12342 			speed_fec_capa->capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
12343 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
12344 		}
12345 		return 1;
12346 	}
12347 
12348 	return -ENOTSUP;
12349 }
12350 
12351 static int
12352 i40e_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
12353 {
12354 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12355 	struct i40e_aq_get_phy_abilities_resp abilities = {0};
12356 	struct i40e_link_status link_status = {0};
12357 	uint8_t current_fec_mode = 0, fec_config = 0;
12358 	bool link_up, enable_lse;
12359 	int ret = 0;
12360 
12361 	enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
12362 	/* Get link info */
12363 	ret = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL);
12364 	if (ret != I40E_SUCCESS) {
12365 		PMD_DRV_LOG(ERR, "Failed to get link information: %d",
12366 				ret);
12367 		return -ENOTSUP;
12368 	}
12369 
12370 	link_up = link_status.link_info & I40E_AQ_LINK_UP;
12371 
12372 	ret = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
12373 						  NULL);
12374 	if (ret) {
12375 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d",
12376 				ret);
12377 		return -ENOTSUP;
12378 	}
12379 
12380 	/**
12381 	 * If link is down and AUTO is enabled, AUTO is returned,
12382 	 * otherwise, configured FEC mode is returned.
12383 	 * If link is up, current FEC mode is returned.
12384 	 */
12385 	fec_config = abilities.fec_cfg_curr_mod_ext_info
12386 					& I40E_AQ_PHY_FEC_CONFIG_MASK;
12387 	current_fec_mode = link_status.fec_info;
12388 
12389 	if (link_up) {
12390 		switch (current_fec_mode) {
12391 		case I40E_AQ_CONFIG_FEC_KR_ENA:
12392 			*fec_capa = RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_BASER);
12393 			break;
12394 		case I40E_AQ_CONFIG_FEC_RS_ENA:
12395 			*fec_capa = RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_RS);
12396 			break;
12397 		case 0:
12398 			*fec_capa = RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_NOFEC);
12399 			break;
12400 		default:
12401 			return -EINVAL;
12402 		}
12403 		return 0;
12404 	}
12405 
12406 	if (fec_config & I40E_AQ_ENABLE_FEC_AUTO) {
12407 		*fec_capa = RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_AUTO);
12408 		return 0;
12409 	}
12410 
12411 	uint32_t temp_fec_capa = 0;
12412 	if (fec_config & I40E_AQ_ENABLE_FEC_KR)
12413 		temp_fec_capa |= RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_BASER);
12414 	if (fec_config & I40E_AQ_ENABLE_FEC_RS)
12415 		temp_fec_capa |= RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_RS);
12416 	if (temp_fec_capa == 0)
12417 		temp_fec_capa = RTE_ETH_FEC_MODE_TO_CAPA(RTE_ETH_FEC_NOFEC);
12418 
12419 	*fec_capa = temp_fec_capa;
12420 	return 0;
12421 }
12422 
12423 static int
12424 i40e_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
12425 {
12426 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12427 	struct i40e_aq_get_phy_abilities_resp abilities = {0};
12428 	struct i40e_aq_set_phy_config config = {0};
12429 	enum i40e_status_code status;
12430 	uint8_t req_fec = 0, fec_auto = 0, fec_kr = 0, fec_rs = 0;
12431 
12432 	if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
12433 	    hw->device_id != I40E_DEV_ID_25G_B &&
12434 	    hw->device_id != I40E_DEV_ID_KX_X722) {
12435 		return -ENOTSUP;
12436 	}
12437 
12438 	if (hw->mac.type == I40E_MAC_X722 &&
12439 	    !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
12440 		PMD_DRV_LOG(ERR, "Setting FEC encoding not supported by"
12441 			 " firmware. Please update the NVM image.");
12442 		return -ENOTSUP;
12443 	}
12444 
12445 	/**
12446 	 * Copy the current user PHY configuration. The current user PHY
12447 	 * configuration is initialized during probe from PHY capabilities
12448 	 * software mode, and updated on set PHY configuration.
12449 	 */
12450 	if (fec_capa & ~(RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
12451 		RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | RTE_ETH_FEC_MODE_CAPA_MASK(RS)))
12452 		return -EINVAL;
12453 
12454 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
12455 		fec_auto = 1;
12456 
12457 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
12458 		fec_kr = 1;
12459 
12460 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
12461 		fec_rs = 1;
12462 
12463 	if (fec_auto) {
12464 		if (hw->mac.type == I40E_MAC_X722) {
12465 			PMD_DRV_LOG(ERR, "X722 Unsupported FEC mode: AUTO");
12466 			return -EINVAL;
12467 		}
12468 		if (fec_kr || fec_rs) {
12469 			if (fec_kr)
12470 				req_fec = I40E_AQ_SET_FEC_ABILITY_KR |
12471 							I40E_AQ_SET_FEC_REQUEST_KR;
12472 			if (fec_rs) {
12473 				if (hw->mac.type == I40E_MAC_X722) {
12474 					PMD_DRV_LOG(ERR, "X722 Unsupported FEC mode: RS");
12475 					return -EINVAL;
12476 				}
12477 				req_fec |= I40E_AQ_SET_FEC_ABILITY_RS |
12478 							I40E_AQ_SET_FEC_REQUEST_RS;
12479 			}
12480 		} else {
12481 			if (hw->mac.type == I40E_MAC_X722) {
12482 				req_fec = I40E_AQ_SET_FEC_ABILITY_KR |
12483 						  I40E_AQ_SET_FEC_REQUEST_KR;
12484 			} else {
12485 				req_fec = I40E_AQ_SET_FEC_ABILITY_KR |
12486 						  I40E_AQ_SET_FEC_REQUEST_KR |
12487 						  I40E_AQ_SET_FEC_ABILITY_RS |
12488 						  I40E_AQ_SET_FEC_REQUEST_RS;
12489 			}
12490 		}
12491 	} else {
12492 		if (fec_kr ^ fec_rs) {
12493 			if (fec_kr) {
12494 				req_fec = I40E_AQ_SET_FEC_ABILITY_KR |
12495 							I40E_AQ_SET_FEC_REQUEST_KR;
12496 			} else {
12497 				if (hw->mac.type == I40E_MAC_X722) {
12498 					PMD_DRV_LOG(ERR, "X722 Unsupported FEC mode: RS");
12499 					return -EINVAL;
12500 				}
12501 				req_fec = I40E_AQ_SET_FEC_ABILITY_RS |
12502 							I40E_AQ_SET_FEC_REQUEST_RS;
12503 			}
12504 		} else {
12505 			return -EINVAL;
12506 		}
12507 	}
12508 
12509 	/* Get the current phy config */
12510 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
12511 					      NULL);
12512 	if (status) {
12513 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d",
12514 				status);
12515 		return -ENOTSUP;
12516 	}
12517 
12518 	if (abilities.fec_cfg_curr_mod_ext_info != req_fec) {
12519 		config.phy_type = abilities.phy_type;
12520 		config.abilities = abilities.abilities |
12521 				   I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
12522 		config.phy_type_ext = abilities.phy_type_ext;
12523 		config.link_speed = abilities.link_speed;
12524 		config.eee_capability = abilities.eee_capability;
12525 		config.eeer = abilities.eeer_val;
12526 		config.low_power_ctrl = abilities.d3_lpan;
12527 		config.fec_config = req_fec & I40E_AQ_PHY_FEC_CONFIG_MASK;
12528 		status = i40e_aq_set_phy_config(hw, &config, NULL);
12529 		if (status) {
12530 			PMD_DRV_LOG(ERR, "Failed to set PHY capabilities: %d",
12531 			status);
12532 			return -ENOTSUP;
12533 		}
12534 	}
12535 
12536 	status = i40e_update_link_info(hw);
12537 	if (status) {
12538 		PMD_DRV_LOG(ERR, "Failed to set PHY capabilities: %d",
12539 			status);
12540 		return -ENOTSUP;
12541 	}
12542 
12543 	return 0;
12544 }
12545 
12546 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12547 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12548 #ifdef RTE_ETHDEV_DEBUG_RX
12549 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12550 #endif
12551 #ifdef RTE_ETHDEV_DEBUG_TX
12552 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12553 #endif
12554 
12555 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12556 			      ETH_I40E_FLOATING_VEB_ARG "=1"
12557 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12558 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12559 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
12560