xref: /dpdk/drivers/net/qede/qede_ethdev.c (revision 08aa6271c86a561b66c6dd91f9a54fa2f12bc859)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 #include <rte_kvargs.h>
13 
14 /* Globals */
15 int qede_logtype_init;
16 int qede_logtype_driver;
17 
18 static const struct qed_eth_ops *qed_ops;
19 static int64_t timer_period = 1;
20 
21 /* VXLAN tunnel classification mapping */
22 const struct _qede_udp_tunn_types {
23 	uint16_t rte_filter_type;
24 	enum ecore_filter_ucast_type qede_type;
25 	enum ecore_tunn_clss qede_tunn_clss;
26 	const char *string;
27 } qede_tunn_types[] = {
28 	{
29 		ETH_TUNNEL_FILTER_OMAC,
30 		ECORE_FILTER_MAC,
31 		ECORE_TUNN_CLSS_MAC_VLAN,
32 		"outer-mac"
33 	},
34 	{
35 		ETH_TUNNEL_FILTER_TENID,
36 		ECORE_FILTER_VNI,
37 		ECORE_TUNN_CLSS_MAC_VNI,
38 		"vni"
39 	},
40 	{
41 		ETH_TUNNEL_FILTER_IMAC,
42 		ECORE_FILTER_INNER_MAC,
43 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44 		"inner-mac"
45 	},
46 	{
47 		ETH_TUNNEL_FILTER_IVLAN,
48 		ECORE_FILTER_INNER_VLAN,
49 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
50 		"inner-vlan"
51 	},
52 	{
53 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
54 		ECORE_FILTER_MAC_VNI_PAIR,
55 		ECORE_TUNN_CLSS_MAC_VNI,
56 		"outer-mac and vni"
57 	},
58 	{
59 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
60 		ECORE_FILTER_UNUSED,
61 		MAX_ECORE_TUNN_CLSS,
62 		"outer-mac and inner-mac"
63 	},
64 	{
65 		ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
66 		ECORE_FILTER_UNUSED,
67 		MAX_ECORE_TUNN_CLSS,
68 		"outer-mac and inner-vlan"
69 	},
70 	{
71 		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
72 		ECORE_FILTER_INNER_MAC_VNI_PAIR,
73 		ECORE_TUNN_CLSS_INNER_MAC_VNI,
74 		"vni and inner-mac",
75 	},
76 	{
77 		ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
78 		ECORE_FILTER_UNUSED,
79 		MAX_ECORE_TUNN_CLSS,
80 		"vni and inner-vlan",
81 	},
82 	{
83 		ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
84 		ECORE_FILTER_INNER_PAIR,
85 		ECORE_TUNN_CLSS_INNER_MAC_VLAN,
86 		"inner-mac and inner-vlan",
87 	},
88 	{
89 		ETH_TUNNEL_FILTER_OIP,
90 		ECORE_FILTER_UNUSED,
91 		MAX_ECORE_TUNN_CLSS,
92 		"outer-IP"
93 	},
94 	{
95 		ETH_TUNNEL_FILTER_IIP,
96 		ECORE_FILTER_UNUSED,
97 		MAX_ECORE_TUNN_CLSS,
98 		"inner-IP"
99 	},
100 	{
101 		RTE_TUNNEL_FILTER_IMAC_IVLAN,
102 		ECORE_FILTER_UNUSED,
103 		MAX_ECORE_TUNN_CLSS,
104 		"IMAC_IVLAN"
105 	},
106 	{
107 		RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
108 		ECORE_FILTER_UNUSED,
109 		MAX_ECORE_TUNN_CLSS,
110 		"IMAC_IVLAN_TENID"
111 	},
112 	{
113 		RTE_TUNNEL_FILTER_IMAC_TENID,
114 		ECORE_FILTER_UNUSED,
115 		MAX_ECORE_TUNN_CLSS,
116 		"IMAC_TENID"
117 	},
118 	{
119 		RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
120 		ECORE_FILTER_UNUSED,
121 		MAX_ECORE_TUNN_CLSS,
122 		"OMAC_TENID_IMAC"
123 	},
124 };
125 
126 struct rte_qede_xstats_name_off {
127 	char name[RTE_ETH_XSTATS_NAME_SIZE];
128 	uint64_t offset;
129 };
130 
131 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
132 	{"rx_unicast_bytes",
133 		offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
134 	{"rx_multicast_bytes",
135 		offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
136 	{"rx_broadcast_bytes",
137 		offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
138 	{"rx_unicast_packets",
139 		offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
140 	{"rx_multicast_packets",
141 		offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
142 	{"rx_broadcast_packets",
143 		offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
144 
145 	{"tx_unicast_bytes",
146 		offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
147 	{"tx_multicast_bytes",
148 		offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
149 	{"tx_broadcast_bytes",
150 		offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
151 	{"tx_unicast_packets",
152 		offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
153 	{"tx_multicast_packets",
154 		offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
155 	{"tx_broadcast_packets",
156 		offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
157 
158 	{"rx_64_byte_packets",
159 		offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
160 	{"rx_65_to_127_byte_packets",
161 		offsetof(struct ecore_eth_stats_common,
162 			 rx_65_to_127_byte_packets)},
163 	{"rx_128_to_255_byte_packets",
164 		offsetof(struct ecore_eth_stats_common,
165 			 rx_128_to_255_byte_packets)},
166 	{"rx_256_to_511_byte_packets",
167 		offsetof(struct ecore_eth_stats_common,
168 			 rx_256_to_511_byte_packets)},
169 	{"rx_512_to_1023_byte_packets",
170 		offsetof(struct ecore_eth_stats_common,
171 			 rx_512_to_1023_byte_packets)},
172 	{"rx_1024_to_1518_byte_packets",
173 		offsetof(struct ecore_eth_stats_common,
174 			 rx_1024_to_1518_byte_packets)},
175 	{"tx_64_byte_packets",
176 		offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
177 	{"tx_65_to_127_byte_packets",
178 		offsetof(struct ecore_eth_stats_common,
179 			 tx_65_to_127_byte_packets)},
180 	{"tx_128_to_255_byte_packets",
181 		offsetof(struct ecore_eth_stats_common,
182 			 tx_128_to_255_byte_packets)},
183 	{"tx_256_to_511_byte_packets",
184 		offsetof(struct ecore_eth_stats_common,
185 			 tx_256_to_511_byte_packets)},
186 	{"tx_512_to_1023_byte_packets",
187 		offsetof(struct ecore_eth_stats_common,
188 			 tx_512_to_1023_byte_packets)},
189 	{"tx_1024_to_1518_byte_packets",
190 		offsetof(struct ecore_eth_stats_common,
191 			 tx_1024_to_1518_byte_packets)},
192 
193 	{"rx_mac_crtl_frames",
194 		offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
195 	{"tx_mac_control_frames",
196 		offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
197 	{"rx_pause_frames",
198 		offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
199 	{"tx_pause_frames",
200 		offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
201 	{"rx_priority_flow_control_frames",
202 		offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
203 	{"tx_priority_flow_control_frames",
204 		offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
205 
206 	{"rx_crc_errors",
207 		offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
208 	{"rx_align_errors",
209 		offsetof(struct ecore_eth_stats_common, rx_align_errors)},
210 	{"rx_carrier_errors",
211 		offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
212 	{"rx_oversize_packet_errors",
213 		offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
214 	{"rx_jabber_errors",
215 		offsetof(struct ecore_eth_stats_common, rx_jabbers)},
216 	{"rx_undersize_packet_errors",
217 		offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
218 	{"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
219 	{"rx_host_buffer_not_available",
220 		offsetof(struct ecore_eth_stats_common, no_buff_discards)},
221 	/* Number of packets discarded because they are bigger than MTU */
222 	{"rx_packet_too_big_discards",
223 		offsetof(struct ecore_eth_stats_common,
224 			 packet_too_big_discard)},
225 	{"rx_ttl_zero_discards",
226 		offsetof(struct ecore_eth_stats_common, ttl0_discard)},
227 	{"rx_multi_function_tag_filter_discards",
228 		offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
229 	{"rx_mac_filter_discards",
230 		offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
231 	{"rx_hw_buffer_truncates",
232 		offsetof(struct ecore_eth_stats_common, brb_truncates)},
233 	{"rx_hw_buffer_discards",
234 		offsetof(struct ecore_eth_stats_common, brb_discards)},
235 	{"tx_error_drop_packets",
236 		offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
237 
238 	{"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
239 	{"rx_mac_unicast_packets",
240 		offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
241 	{"rx_mac_multicast_packets",
242 		offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
243 	{"rx_mac_broadcast_packets",
244 		offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
245 	{"rx_mac_frames_ok",
246 		offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
247 	{"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
248 	{"tx_mac_unicast_packets",
249 		offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
250 	{"tx_mac_multicast_packets",
251 		offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
252 	{"tx_mac_broadcast_packets",
253 		offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
254 
255 	{"lro_coalesced_packets",
256 		offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
257 	{"lro_coalesced_events",
258 		offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
259 	{"lro_aborts_num",
260 		offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
261 	{"lro_not_coalesced_packets",
262 		offsetof(struct ecore_eth_stats_common,
263 			 tpa_not_coalesced_pkts)},
264 	{"lro_coalesced_bytes",
265 		offsetof(struct ecore_eth_stats_common,
266 			 tpa_coalesced_bytes)},
267 };
268 
269 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
270 	{"rx_1519_to_1522_byte_packets",
271 		offsetof(struct ecore_eth_stats, bb) +
272 		offsetof(struct ecore_eth_stats_bb,
273 			 rx_1519_to_1522_byte_packets)},
274 	{"rx_1519_to_2047_byte_packets",
275 		offsetof(struct ecore_eth_stats, bb) +
276 		offsetof(struct ecore_eth_stats_bb,
277 			 rx_1519_to_2047_byte_packets)},
278 	{"rx_2048_to_4095_byte_packets",
279 		offsetof(struct ecore_eth_stats, bb) +
280 		offsetof(struct ecore_eth_stats_bb,
281 			 rx_2048_to_4095_byte_packets)},
282 	{"rx_4096_to_9216_byte_packets",
283 		offsetof(struct ecore_eth_stats, bb) +
284 		offsetof(struct ecore_eth_stats_bb,
285 			 rx_4096_to_9216_byte_packets)},
286 	{"rx_9217_to_16383_byte_packets",
287 		offsetof(struct ecore_eth_stats, bb) +
288 		offsetof(struct ecore_eth_stats_bb,
289 			 rx_9217_to_16383_byte_packets)},
290 
291 	{"tx_1519_to_2047_byte_packets",
292 		offsetof(struct ecore_eth_stats, bb) +
293 		offsetof(struct ecore_eth_stats_bb,
294 			 tx_1519_to_2047_byte_packets)},
295 	{"tx_2048_to_4095_byte_packets",
296 		offsetof(struct ecore_eth_stats, bb) +
297 		offsetof(struct ecore_eth_stats_bb,
298 			 tx_2048_to_4095_byte_packets)},
299 	{"tx_4096_to_9216_byte_packets",
300 		offsetof(struct ecore_eth_stats, bb) +
301 		offsetof(struct ecore_eth_stats_bb,
302 			 tx_4096_to_9216_byte_packets)},
303 	{"tx_9217_to_16383_byte_packets",
304 		offsetof(struct ecore_eth_stats, bb) +
305 		offsetof(struct ecore_eth_stats_bb,
306 			 tx_9217_to_16383_byte_packets)},
307 
308 	{"tx_lpi_entry_count",
309 		offsetof(struct ecore_eth_stats, bb) +
310 		offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
311 	{"tx_total_collisions",
312 		offsetof(struct ecore_eth_stats, bb) +
313 		offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
314 };
315 
316 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
317 	{"rx_1519_to_max_byte_packets",
318 		offsetof(struct ecore_eth_stats, ah) +
319 		offsetof(struct ecore_eth_stats_ah,
320 			 rx_1519_to_max_byte_packets)},
321 	{"tx_1519_to_max_byte_packets",
322 		offsetof(struct ecore_eth_stats, ah) +
323 		offsetof(struct ecore_eth_stats_ah,
324 			 tx_1519_to_max_byte_packets)},
325 };
326 
327 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
328 	{"rx_q_segments",
329 		offsetof(struct qede_rx_queue, rx_segs)},
330 	{"rx_q_hw_errors",
331 		offsetof(struct qede_rx_queue, rx_hw_errors)},
332 	{"rx_q_allocation_errors",
333 		offsetof(struct qede_rx_queue, rx_alloc_errors)}
334 };
335 
336 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
337 {
338 	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
339 }
340 
341 static void
342 qede_interrupt_handler(void *param)
343 {
344 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
345 	struct qede_dev *qdev = eth_dev->data->dev_private;
346 	struct ecore_dev *edev = &qdev->edev;
347 
348 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
349 	if (rte_intr_enable(eth_dev->intr_handle))
350 		DP_ERR(edev, "rte_intr_enable failed\n");
351 }
352 
353 static void
354 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
355 {
356 	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
357 	qdev->ops = qed_ops;
358 }
359 
360 static void qede_print_adapter_info(struct qede_dev *qdev)
361 {
362 	struct ecore_dev *edev = &qdev->edev;
363 	struct qed_dev_info *info = &qdev->dev_info.common;
364 	static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
365 	static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
366 
367 	DP_INFO(edev, "*********************************\n");
368 	DP_INFO(edev, " DPDK version:%s\n", rte_version());
369 	DP_INFO(edev, " Chip details : %s %c%d\n",
370 		  ECORE_IS_BB(edev) ? "BB" : "AH",
371 		  'A' + edev->chip_rev,
372 		  (int)edev->chip_metal);
373 	snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
374 		 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
375 	snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
376 		 ver_str, QEDE_PMD_VERSION);
377 	DP_INFO(edev, " Driver version : %s\n", drv_ver);
378 	DP_INFO(edev, " Firmware version : %s\n", ver_str);
379 
380 	snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
381 		 "%d.%d.%d.%d",
382 		(info->mfw_rev >> 24) & 0xff,
383 		(info->mfw_rev >> 16) & 0xff,
384 		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
385 	DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
386 	DP_INFO(edev, " Firmware file : %s\n", fw_file);
387 	DP_INFO(edev, "*********************************\n");
388 }
389 
390 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
391 {
392 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
393 	unsigned int i = 0, j = 0, qid;
394 	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
395 	struct qede_tx_queue *txq;
396 
397 	DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
398 
399 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
400 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
401 	txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
402 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
403 
404 	for_each_rss(qid) {
405 		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
406 			     offsetof(struct qede_rx_queue, rcv_pkts), 0,
407 			    sizeof(uint64_t));
408 		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
409 			     offsetof(struct qede_rx_queue, rx_hw_errors), 0,
410 			    sizeof(uint64_t));
411 		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
412 			     offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
413 			    sizeof(uint64_t));
414 
415 		if (xstats)
416 			for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
417 				OSAL_MEMSET((((char *)
418 					      (qdev->fp_array[qid].rxq)) +
419 					     qede_rxq_xstats_strings[j].offset),
420 					    0,
421 					    sizeof(uint64_t));
422 
423 		i++;
424 		if (i == rxq_stat_cntrs)
425 			break;
426 	}
427 
428 	i = 0;
429 
430 	for_each_tss(qid) {
431 		txq = qdev->fp_array[qid].txq;
432 
433 		OSAL_MEMSET((uint64_t *)(uintptr_t)
434 				(((uint64_t)(uintptr_t)(txq)) +
435 				 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
436 			    sizeof(uint64_t));
437 
438 		i++;
439 		if (i == txq_stat_cntrs)
440 			break;
441 	}
442 }
443 
444 static int
445 qede_stop_vport(struct ecore_dev *edev)
446 {
447 	struct ecore_hwfn *p_hwfn;
448 	uint8_t vport_id;
449 	int rc;
450 	int i;
451 
452 	vport_id = 0;
453 	for_each_hwfn(edev, i) {
454 		p_hwfn = &edev->hwfns[i];
455 		rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
456 					 vport_id);
457 		if (rc != ECORE_SUCCESS) {
458 			DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
459 			return rc;
460 		}
461 	}
462 
463 	DP_INFO(edev, "vport stopped\n");
464 
465 	return 0;
466 }
467 
468 static int
469 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
470 {
471 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
472 	struct ecore_sp_vport_start_params params;
473 	struct ecore_hwfn *p_hwfn;
474 	int rc;
475 	int i;
476 
477 	if (qdev->vport_started)
478 		qede_stop_vport(edev);
479 
480 	memset(&params, 0, sizeof(params));
481 	params.vport_id = 0;
482 	params.mtu = mtu;
483 	/* @DPDK - Disable FW placement */
484 	params.zero_placement_offset = 1;
485 	for_each_hwfn(edev, i) {
486 		p_hwfn = &edev->hwfns[i];
487 		params.concrete_fid = p_hwfn->hw_info.concrete_fid;
488 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
489 		rc = ecore_sp_vport_start(p_hwfn, &params);
490 		if (rc != ECORE_SUCCESS) {
491 			DP_ERR(edev, "Start V-PORT failed %d\n", rc);
492 			return rc;
493 		}
494 	}
495 	ecore_reset_vport_stats(edev);
496 	qdev->vport_started = true;
497 	DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
498 
499 	return 0;
500 }
501 
502 #define QEDE_NPAR_TX_SWITCHING		"npar_tx_switching"
503 #define QEDE_VF_TX_SWITCHING		"vf_tx_switching"
504 
505 /* Activate or deactivate vport via vport-update */
506 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
507 {
508 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
509 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
510 	struct ecore_sp_vport_update_params params;
511 	struct ecore_hwfn *p_hwfn;
512 	uint8_t i;
513 	int rc = -1;
514 
515 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
516 	params.vport_id = 0;
517 	params.update_vport_active_rx_flg = 1;
518 	params.update_vport_active_tx_flg = 1;
519 	params.vport_active_rx_flg = flg;
520 	params.vport_active_tx_flg = flg;
521 	if (!qdev->enable_tx_switching) {
522 		if ((QEDE_NPAR_TX_SWITCHING != NULL) ||
523 		    ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) {
524 			params.update_tx_switching_flg = 1;
525 			params.tx_switching_flg = !flg;
526 			DP_INFO(edev, "%s tx-switching is disabled\n",
527 				QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF");
528 		}
529 	}
530 	for_each_hwfn(edev, i) {
531 		p_hwfn = &edev->hwfns[i];
532 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
533 		rc = ecore_sp_vport_update(p_hwfn, &params,
534 				ECORE_SPQ_MODE_EBLOCK, NULL);
535 		if (rc != ECORE_SUCCESS) {
536 			DP_ERR(edev, "Failed to update vport\n");
537 			break;
538 		}
539 	}
540 	DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
541 
542 	return rc;
543 }
544 
545 static void
546 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
547 			   uint16_t mtu, bool enable)
548 {
549 	/* Enable LRO in split mode */
550 	sge_tpa_params->tpa_ipv4_en_flg = enable;
551 	sge_tpa_params->tpa_ipv6_en_flg = enable;
552 	sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
553 	sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
554 	/* set if tpa enable changes */
555 	sge_tpa_params->update_tpa_en_flg = 1;
556 	/* set if tpa parameters should be handled */
557 	sge_tpa_params->update_tpa_param_flg = enable;
558 
559 	sge_tpa_params->max_buffers_per_cqe = 20;
560 	/* Enable TPA in split mode. In this mode each TPA segment
561 	 * starts on the new BD, so there is one BD per segment.
562 	 */
563 	sge_tpa_params->tpa_pkt_split_flg = 1;
564 	sge_tpa_params->tpa_hdr_data_split_flg = 0;
565 	sge_tpa_params->tpa_gro_consistent_flg = 0;
566 	sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
567 	sge_tpa_params->tpa_max_size = 0x7FFF;
568 	sge_tpa_params->tpa_min_size_to_start = mtu / 2;
569 	sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
570 }
571 
572 /* Enable/disable LRO via vport-update */
573 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
574 {
575 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
576 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
577 	struct ecore_sp_vport_update_params params;
578 	struct ecore_sge_tpa_params tpa_params;
579 	struct ecore_hwfn *p_hwfn;
580 	int rc;
581 	int i;
582 
583 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
584 	memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
585 	qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
586 	params.vport_id = 0;
587 	params.sge_tpa_params = &tpa_params;
588 	for_each_hwfn(edev, i) {
589 		p_hwfn = &edev->hwfns[i];
590 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
591 		rc = ecore_sp_vport_update(p_hwfn, &params,
592 				ECORE_SPQ_MODE_EBLOCK, NULL);
593 		if (rc != ECORE_SUCCESS) {
594 			DP_ERR(edev, "Failed to update LRO\n");
595 			return -1;
596 		}
597 	}
598 	qdev->enable_lro = flg;
599 	eth_dev->data->lro = flg;
600 
601 	DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
602 
603 	return 0;
604 }
605 
606 /* Update MTU via vport-update without doing port restart.
607  * The vport must be deactivated before calling this API.
608  */
609 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
610 {
611 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
612 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
613 	struct ecore_sp_vport_update_params params;
614 	struct ecore_hwfn *p_hwfn;
615 	int rc;
616 	int i;
617 
618 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
619 	params.vport_id = 0;
620 	params.mtu = mtu;
621 	params.vport_id = 0;
622 	for_each_hwfn(edev, i) {
623 		p_hwfn = &edev->hwfns[i];
624 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
625 		rc = ecore_sp_vport_update(p_hwfn, &params,
626 				ECORE_SPQ_MODE_EBLOCK, NULL);
627 		if (rc != ECORE_SUCCESS) {
628 			DP_ERR(edev, "Failed to update MTU\n");
629 			return -1;
630 		}
631 	}
632 	DP_INFO(edev, "MTU updated to %u\n", mtu);
633 
634 	return 0;
635 }
636 
637 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
638 {
639 	memset(ucast, 0, sizeof(struct ecore_filter_ucast));
640 	ucast->is_rx_filter = true;
641 	ucast->is_tx_filter = true;
642 	/* ucast->assert_on_error = true; - For debug */
643 }
644 
645 static int
646 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
647 			     enum qed_filter_rx_mode_type type)
648 {
649 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
650 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
651 	struct ecore_filter_accept_flags flags;
652 
653 	memset(&flags, 0, sizeof(flags));
654 
655 	flags.update_rx_mode_config = 1;
656 	flags.update_tx_mode_config = 1;
657 	flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
658 		ECORE_ACCEPT_MCAST_MATCHED |
659 		ECORE_ACCEPT_BCAST;
660 
661 	flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
662 		ECORE_ACCEPT_MCAST_MATCHED |
663 		ECORE_ACCEPT_BCAST;
664 
665 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
666 		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
667 		if (IS_VF(edev)) {
668 			flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
669 			DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
670 		}
671 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
672 		flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
673 	} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
674 				QED_FILTER_RX_MODE_TYPE_PROMISC)) {
675 		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
676 			ECORE_ACCEPT_MCAST_UNMATCHED;
677 	}
678 
679 	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
680 			ECORE_SPQ_MODE_CB, NULL);
681 }
682 
683 static int
684 qede_tunnel_update(struct qede_dev *qdev,
685 		   struct ecore_tunnel_info *tunn_info)
686 {
687 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
688 	enum _ecore_status_t rc = ECORE_INVAL;
689 	struct ecore_hwfn *p_hwfn;
690 	struct ecore_ptt *p_ptt;
691 	int i;
692 
693 	for_each_hwfn(edev, i) {
694 		p_hwfn = &edev->hwfns[i];
695 		if (IS_PF(edev)) {
696 			p_ptt = ecore_ptt_acquire(p_hwfn);
697 			if (!p_ptt) {
698 				DP_ERR(p_hwfn, "Can't acquire PTT\n");
699 				return -EAGAIN;
700 			}
701 		} else {
702 			p_ptt = NULL;
703 		}
704 
705 		rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
706 				tunn_info, ECORE_SPQ_MODE_CB, NULL);
707 		if (IS_PF(edev))
708 			ecore_ptt_release(p_hwfn, p_ptt);
709 
710 		if (rc != ECORE_SUCCESS)
711 			break;
712 	}
713 
714 	return rc;
715 }
716 
717 static int
718 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
719 		  bool enable)
720 {
721 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
722 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
723 	enum _ecore_status_t rc = ECORE_INVAL;
724 	struct ecore_tunnel_info tunn;
725 
726 	if (qdev->vxlan.enable == enable)
727 		return ECORE_SUCCESS;
728 
729 	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
730 	tunn.vxlan.b_update_mode = true;
731 	tunn.vxlan.b_mode_enabled = enable;
732 	tunn.b_update_rx_cls = true;
733 	tunn.b_update_tx_cls = true;
734 	tunn.vxlan.tun_cls = clss;
735 
736 	tunn.vxlan_port.b_update_port = true;
737 	tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
738 
739 	rc = qede_tunnel_update(qdev, &tunn);
740 	if (rc == ECORE_SUCCESS) {
741 		qdev->vxlan.enable = enable;
742 		qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
743 		DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
744 			enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
745 	} else {
746 		DP_ERR(edev, "Failed to update tunn_clss %u\n",
747 		       tunn.vxlan.tun_cls);
748 	}
749 
750 	return rc;
751 }
752 
753 static int
754 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
755 		  bool enable)
756 {
757 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
758 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
759 	enum _ecore_status_t rc = ECORE_INVAL;
760 	struct ecore_tunnel_info tunn;
761 
762 	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
763 	tunn.l2_geneve.b_update_mode = true;
764 	tunn.l2_geneve.b_mode_enabled = enable;
765 	tunn.ip_geneve.b_update_mode = true;
766 	tunn.ip_geneve.b_mode_enabled = enable;
767 	tunn.l2_geneve.tun_cls = clss;
768 	tunn.ip_geneve.tun_cls = clss;
769 	tunn.b_update_rx_cls = true;
770 	tunn.b_update_tx_cls = true;
771 
772 	tunn.geneve_port.b_update_port = true;
773 	tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
774 
775 	rc = qede_tunnel_update(qdev, &tunn);
776 	if (rc == ECORE_SUCCESS) {
777 		qdev->geneve.enable = enable;
778 		qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
779 		DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
780 			enable ? "enabled" : "disabled", qdev->geneve.udp_port);
781 	} else {
782 		DP_ERR(edev, "Failed to update tunn_clss %u\n",
783 		       clss);
784 	}
785 
786 	return rc;
787 }
788 
789 static int
790 qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
791 		  bool enable)
792 {
793 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
794 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
795 	enum _ecore_status_t rc = ECORE_INVAL;
796 	struct ecore_tunnel_info tunn;
797 
798 	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
799 	tunn.ip_gre.b_update_mode = true;
800 	tunn.ip_gre.b_mode_enabled = enable;
801 	tunn.ip_gre.tun_cls = clss;
802 	tunn.ip_gre.tun_cls = clss;
803 	tunn.b_update_rx_cls = true;
804 	tunn.b_update_tx_cls = true;
805 
806 	rc = qede_tunnel_update(qdev, &tunn);
807 	if (rc == ECORE_SUCCESS) {
808 		qdev->ipgre.enable = enable;
809 		DP_INFO(edev, "IPGRE is %s\n",
810 			enable ? "enabled" : "disabled");
811 	} else {
812 		DP_ERR(edev, "Failed to update tunn_clss %u\n",
813 		       clss);
814 	}
815 
816 	return rc;
817 }
818 
819 static int
820 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
821 		 enum rte_eth_tunnel_type tunn_type, bool enable)
822 {
823 	int rc = -EINVAL;
824 
825 	switch (tunn_type) {
826 	case RTE_TUNNEL_TYPE_VXLAN:
827 		rc = qede_vxlan_enable(eth_dev, clss, enable);
828 		break;
829 	case RTE_TUNNEL_TYPE_GENEVE:
830 		rc = qede_geneve_enable(eth_dev, clss, enable);
831 		break;
832 	case RTE_TUNNEL_TYPE_IP_IN_GRE:
833 		rc = qede_ipgre_enable(eth_dev, clss, enable);
834 		break;
835 	default:
836 		rc = -EINVAL;
837 		break;
838 	}
839 
840 	return rc;
841 }
842 
843 static int
844 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
845 		  bool add)
846 {
847 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
848 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
849 	struct qede_ucast_entry *tmp = NULL;
850 	struct qede_ucast_entry *u;
851 	struct ether_addr *mac_addr;
852 
853 	mac_addr  = (struct ether_addr *)ucast->mac;
854 	if (add) {
855 		SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
856 			if ((memcmp(mac_addr, &tmp->mac,
857 				    ETHER_ADDR_LEN) == 0) &&
858 			     ucast->vni == tmp->vni &&
859 			     ucast->vlan == tmp->vlan) {
860 				DP_ERR(edev, "Unicast MAC is already added"
861 				       " with vlan = %u, vni = %u\n",
862 				       ucast->vlan,  ucast->vni);
863 					return -EEXIST;
864 			}
865 		}
866 		u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
867 			       RTE_CACHE_LINE_SIZE);
868 		if (!u) {
869 			DP_ERR(edev, "Did not allocate memory for ucast\n");
870 			return -ENOMEM;
871 		}
872 		ether_addr_copy(mac_addr, &u->mac);
873 		u->vlan = ucast->vlan;
874 		u->vni = ucast->vni;
875 		SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
876 		qdev->num_uc_addr++;
877 	} else {
878 		SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
879 			if ((memcmp(mac_addr, &tmp->mac,
880 				    ETHER_ADDR_LEN) == 0) &&
881 			    ucast->vlan == tmp->vlan	  &&
882 			    ucast->vni == tmp->vni)
883 			break;
884 		}
885 		if (tmp == NULL) {
886 			DP_INFO(edev, "Unicast MAC is not found\n");
887 			return -EINVAL;
888 		}
889 		SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
890 		qdev->num_uc_addr--;
891 	}
892 
893 	return 0;
894 }
895 
896 static int
897 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
898 		  bool add)
899 {
900 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
901 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
902 	struct ether_addr *mac_addr;
903 	struct qede_mcast_entry *tmp = NULL;
904 	struct qede_mcast_entry *m;
905 
906 	mac_addr  = (struct ether_addr *)mcast->mac;
907 	if (add) {
908 		SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
909 			if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
910 				DP_ERR(edev,
911 					"Multicast MAC is already added\n");
912 				return -EEXIST;
913 			}
914 		}
915 		m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
916 			RTE_CACHE_LINE_SIZE);
917 		if (!m) {
918 			DP_ERR(edev,
919 				"Did not allocate memory for mcast\n");
920 			return -ENOMEM;
921 		}
922 		ether_addr_copy(mac_addr, &m->mac);
923 		SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
924 		qdev->num_mc_addr++;
925 	} else {
926 		SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
927 			if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
928 				break;
929 		}
930 		if (tmp == NULL) {
931 			DP_INFO(edev, "Multicast mac is not found\n");
932 			return -EINVAL;
933 		}
934 		SLIST_REMOVE(&qdev->mc_list_head, tmp,
935 			     qede_mcast_entry, list);
936 		qdev->num_mc_addr--;
937 	}
938 
939 	return 0;
940 }
941 
942 static enum _ecore_status_t
943 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
944 		 bool add)
945 {
946 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
947 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
948 	enum _ecore_status_t rc;
949 	struct ecore_filter_mcast mcast;
950 	struct qede_mcast_entry *tmp;
951 	uint16_t j = 0;
952 
953 	/* Multicast */
954 	if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
955 		if (add) {
956 			if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
957 				DP_ERR(edev,
958 				       "Mcast filter table limit exceeded, "
959 				       "Please enable mcast promisc mode\n");
960 				return -ECORE_INVAL;
961 			}
962 		}
963 		rc = qede_mcast_filter(eth_dev, ucast, add);
964 		if (rc == 0) {
965 			DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
966 			memset(&mcast, 0, sizeof(mcast));
967 			mcast.num_mc_addrs = qdev->num_mc_addr;
968 			mcast.opcode = ECORE_FILTER_ADD;
969 			SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
970 				ether_addr_copy(&tmp->mac,
971 					(struct ether_addr *)&mcast.mac[j]);
972 				j++;
973 			}
974 			rc = ecore_filter_mcast_cmd(edev, &mcast,
975 						    ECORE_SPQ_MODE_CB, NULL);
976 		}
977 		if (rc != ECORE_SUCCESS) {
978 			DP_ERR(edev, "Failed to add multicast filter"
979 			       " rc = %d, op = %d\n", rc, add);
980 		}
981 	} else { /* Unicast */
982 		if (add) {
983 			if (qdev->num_uc_addr >=
984 			    qdev->dev_info.num_mac_filters) {
985 				DP_ERR(edev,
986 				       "Ucast filter table limit exceeded,"
987 				       " Please enable promisc mode\n");
988 				return -ECORE_INVAL;
989 			}
990 		}
991 		rc = qede_ucast_filter(eth_dev, ucast, add);
992 		if (rc == 0)
993 			rc = ecore_filter_ucast_cmd(edev, ucast,
994 						    ECORE_SPQ_MODE_CB, NULL);
995 		if (rc != ECORE_SUCCESS) {
996 			DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
997 			       rc, add);
998 		}
999 	}
1000 
1001 	return rc;
1002 }
1003 
1004 static int
1005 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
1006 		  __rte_unused uint32_t index, __rte_unused uint32_t pool)
1007 {
1008 	struct ecore_filter_ucast ucast;
1009 	int re;
1010 
1011 	qede_set_ucast_cmn_params(&ucast);
1012 	ucast.type = ECORE_FILTER_MAC;
1013 	ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
1014 	re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
1015 	return re;
1016 }
1017 
1018 static void
1019 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
1020 {
1021 	struct qede_dev *qdev = eth_dev->data->dev_private;
1022 	struct ecore_dev *edev = &qdev->edev;
1023 	struct ecore_filter_ucast ucast;
1024 
1025 	PMD_INIT_FUNC_TRACE(edev);
1026 
1027 	if (index >= qdev->dev_info.num_mac_filters) {
1028 		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
1029 		       index, qdev->dev_info.num_mac_filters);
1030 		return;
1031 	}
1032 
1033 	qede_set_ucast_cmn_params(&ucast);
1034 	ucast.opcode = ECORE_FILTER_REMOVE;
1035 	ucast.type = ECORE_FILTER_MAC;
1036 
1037 	/* Use the index maintained by rte */
1038 	ether_addr_copy(&eth_dev->data->mac_addrs[index],
1039 			(struct ether_addr *)&ucast.mac);
1040 
1041 	qede_mac_int_ops(eth_dev, &ucast, false);
1042 }
1043 
1044 static int
1045 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
1046 {
1047 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1048 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1049 
1050 	if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
1051 					       mac_addr->addr_bytes)) {
1052 		DP_ERR(edev, "Setting MAC address is not allowed\n");
1053 		return -EPERM;
1054 	}
1055 
1056 	qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
1057 	return 0;
1058 }
1059 
1060 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
1061 {
1062 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1063 	struct ecore_sp_vport_update_params params;
1064 	struct ecore_hwfn *p_hwfn;
1065 	uint8_t i;
1066 	int rc;
1067 
1068 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1069 	params.vport_id = 0;
1070 	params.update_accept_any_vlan_flg = 1;
1071 	params.accept_any_vlan = flg;
1072 	for_each_hwfn(edev, i) {
1073 		p_hwfn = &edev->hwfns[i];
1074 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1075 		rc = ecore_sp_vport_update(p_hwfn, &params,
1076 				ECORE_SPQ_MODE_EBLOCK, NULL);
1077 		if (rc != ECORE_SUCCESS) {
1078 			DP_ERR(edev, "Failed to configure accept-any-vlan\n");
1079 			return;
1080 		}
1081 	}
1082 
1083 	DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
1084 }
1085 
1086 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
1087 {
1088 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1089 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1090 	struct ecore_sp_vport_update_params params;
1091 	struct ecore_hwfn *p_hwfn;
1092 	uint8_t i;
1093 	int rc;
1094 
1095 	memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1096 	params.vport_id = 0;
1097 	params.update_inner_vlan_removal_flg = 1;
1098 	params.inner_vlan_removal_flg = flg;
1099 	for_each_hwfn(edev, i) {
1100 		p_hwfn = &edev->hwfns[i];
1101 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1102 		rc = ecore_sp_vport_update(p_hwfn, &params,
1103 				ECORE_SPQ_MODE_EBLOCK, NULL);
1104 		if (rc != ECORE_SUCCESS) {
1105 			DP_ERR(edev, "Failed to update vport\n");
1106 			return -1;
1107 		}
1108 	}
1109 
1110 	DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
1111 	return 0;
1112 }
1113 
1114 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
1115 				uint16_t vlan_id, int on)
1116 {
1117 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1118 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1119 	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
1120 	struct qede_vlan_entry *tmp = NULL;
1121 	struct qede_vlan_entry *vlan;
1122 	struct ecore_filter_ucast ucast;
1123 	int rc;
1124 
1125 	if (on) {
1126 		if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1127 			DP_ERR(edev, "Reached max VLAN filter limit"
1128 				      " enabling accept_any_vlan\n");
1129 			qede_config_accept_any_vlan(qdev, true);
1130 			return 0;
1131 		}
1132 
1133 		SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1134 			if (tmp->vid == vlan_id) {
1135 				DP_ERR(edev, "VLAN %u already configured\n",
1136 				       vlan_id);
1137 				return -EEXIST;
1138 			}
1139 		}
1140 
1141 		vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1142 				  RTE_CACHE_LINE_SIZE);
1143 
1144 		if (!vlan) {
1145 			DP_ERR(edev, "Did not allocate memory for VLAN\n");
1146 			return -ENOMEM;
1147 		}
1148 
1149 		qede_set_ucast_cmn_params(&ucast);
1150 		ucast.opcode = ECORE_FILTER_ADD;
1151 		ucast.type = ECORE_FILTER_VLAN;
1152 		ucast.vlan = vlan_id;
1153 		rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1154 					    NULL);
1155 		if (rc != 0) {
1156 			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1157 			       rc);
1158 			rte_free(vlan);
1159 		} else {
1160 			vlan->vid = vlan_id;
1161 			SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1162 			qdev->configured_vlans++;
1163 			DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1164 				vlan_id, qdev->configured_vlans);
1165 		}
1166 	} else {
1167 		SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1168 			if (tmp->vid == vlan_id)
1169 				break;
1170 		}
1171 
1172 		if (!tmp) {
1173 			if (qdev->configured_vlans == 0) {
1174 				DP_INFO(edev,
1175 					"No VLAN filters configured yet\n");
1176 				return 0;
1177 			}
1178 
1179 			DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1180 			return -EINVAL;
1181 		}
1182 
1183 		SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1184 
1185 		qede_set_ucast_cmn_params(&ucast);
1186 		ucast.opcode = ECORE_FILTER_REMOVE;
1187 		ucast.type = ECORE_FILTER_VLAN;
1188 		ucast.vlan = vlan_id;
1189 		rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1190 					    NULL);
1191 		if (rc != 0) {
1192 			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1193 			       vlan_id, rc);
1194 		} else {
1195 			qdev->configured_vlans--;
1196 			DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1197 				vlan_id, qdev->configured_vlans);
1198 		}
1199 	}
1200 
1201 	return rc;
1202 }
1203 
1204 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1205 {
1206 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1207 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1208 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1209 
1210 	if (mask & ETH_VLAN_STRIP_MASK) {
1211 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1212 			(void)qede_vlan_stripping(eth_dev, 1);
1213 		else
1214 			(void)qede_vlan_stripping(eth_dev, 0);
1215 	}
1216 
1217 	if (mask & ETH_VLAN_FILTER_MASK) {
1218 		/* VLAN filtering kicks in when a VLAN is added */
1219 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
1220 			qede_vlan_filter_set(eth_dev, 0, 1);
1221 		} else {
1222 			if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1223 				DP_ERR(edev,
1224 				  " Please remove existing VLAN filters"
1225 				  " before disabling VLAN filtering\n");
1226 				/* Signal app that VLAN filtering is still
1227 				 * enabled
1228 				 */
1229 				eth_dev->data->dev_conf.rxmode.offloads |=
1230 						DEV_RX_OFFLOAD_VLAN_FILTER;
1231 			} else {
1232 				qede_vlan_filter_set(eth_dev, 0, 0);
1233 			}
1234 		}
1235 	}
1236 
1237 	if (mask & ETH_VLAN_EXTEND_MASK)
1238 		DP_ERR(edev, "Extend VLAN not supported\n");
1239 
1240 	qdev->vlan_offload_mask = mask;
1241 
1242 	DP_INFO(edev, "VLAN offload mask %d\n", mask);
1243 
1244 	return 0;
1245 }
1246 
1247 static void qede_prandom_bytes(uint32_t *buff)
1248 {
1249 	uint8_t i;
1250 
1251 	srand((unsigned int)time(NULL));
1252 	for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1253 		buff[i] = rand();
1254 }
1255 
1256 int qede_config_rss(struct rte_eth_dev *eth_dev)
1257 {
1258 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1259 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1260 	uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1261 	struct rte_eth_rss_reta_entry64 reta_conf[2];
1262 	struct rte_eth_rss_conf rss_conf;
1263 	uint32_t i, id, pos, q;
1264 
1265 	rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1266 	if (!rss_conf.rss_key) {
1267 		DP_INFO(edev, "Applying driver default key\n");
1268 		rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1269 		qede_prandom_bytes(&def_rss_key[0]);
1270 		rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1271 	}
1272 
1273 	/* Configure RSS hash */
1274 	if (qede_rss_hash_update(eth_dev, &rss_conf))
1275 		return -EINVAL;
1276 
1277 	/* Configure default RETA */
1278 	memset(reta_conf, 0, sizeof(reta_conf));
1279 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1280 		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1281 
1282 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1283 		id = i / RTE_RETA_GROUP_SIZE;
1284 		pos = i % RTE_RETA_GROUP_SIZE;
1285 		q = i % QEDE_RSS_COUNT(qdev);
1286 		reta_conf[id].reta[pos] = q;
1287 	}
1288 	if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1289 				 ECORE_RSS_IND_TABLE_SIZE))
1290 		return -EINVAL;
1291 
1292 	return 0;
1293 }
1294 
1295 static void qede_fastpath_start(struct ecore_dev *edev)
1296 {
1297 	struct ecore_hwfn *p_hwfn;
1298 	int i;
1299 
1300 	for_each_hwfn(edev, i) {
1301 		p_hwfn = &edev->hwfns[i];
1302 		ecore_hw_start_fastpath(p_hwfn);
1303 	}
1304 }
1305 
1306 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1307 {
1308 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1309 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1310 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1311 
1312 	PMD_INIT_FUNC_TRACE(edev);
1313 
1314 	/* Configure TPA parameters */
1315 	if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1316 		if (qede_enable_tpa(eth_dev, true))
1317 			return -EINVAL;
1318 		/* Enable scatter mode for LRO */
1319 		if (!eth_dev->data->scattered_rx)
1320 			rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1321 	}
1322 
1323 	/* Start queues */
1324 	if (qede_start_queues(eth_dev))
1325 		goto err;
1326 
1327 	if (IS_PF(edev))
1328 		qede_reset_queue_stats(qdev, true);
1329 
1330 	/* Newer SR-IOV PF driver expects RX/TX queues to be started before
1331 	 * enabling RSS. Hence RSS configuration is deferred upto this point.
1332 	 * Also, we would like to retain similar behavior in PF case, so we
1333 	 * don't do PF/VF specific check here.
1334 	 */
1335 	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1336 		if (qede_config_rss(eth_dev))
1337 			goto err;
1338 
1339 	/* Enable vport*/
1340 	if (qede_activate_vport(eth_dev, true))
1341 		goto err;
1342 
1343 	/* Update link status */
1344 	qede_link_update(eth_dev, 0);
1345 
1346 	/* Start/resume traffic */
1347 	qede_fastpath_start(edev);
1348 
1349 	DP_INFO(edev, "Device started\n");
1350 
1351 	return 0;
1352 err:
1353 	DP_ERR(edev, "Device start fails\n");
1354 	return -1; /* common error code is < 0 */
1355 }
1356 
1357 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1358 {
1359 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1360 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1361 
1362 	PMD_INIT_FUNC_TRACE(edev);
1363 
1364 	/* Disable vport */
1365 	if (qede_activate_vport(eth_dev, false))
1366 		return;
1367 
1368 	if (qdev->enable_lro)
1369 		qede_enable_tpa(eth_dev, false);
1370 
1371 	/* Stop queues */
1372 	qede_stop_queues(eth_dev);
1373 
1374 	/* Disable traffic */
1375 	ecore_hw_stop_fastpath(edev); /* TBD - loop */
1376 
1377 	if (IS_PF(edev))
1378 		qede_mac_addr_remove(eth_dev, 0);
1379 
1380 	DP_INFO(edev, "Device is stopped\n");
1381 }
1382 
1383 const char *valid_args[] = {
1384 	QEDE_NPAR_TX_SWITCHING,
1385 	QEDE_VF_TX_SWITCHING,
1386 	NULL,
1387 };
1388 
1389 static int qede_args_check(const char *key, const char *val, void *opaque)
1390 {
1391 	unsigned long tmp;
1392 	int ret = 0;
1393 	struct rte_eth_dev *eth_dev = opaque;
1394 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1395 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1396 
1397 	errno = 0;
1398 	tmp = strtoul(val, NULL, 0);
1399 	if (errno) {
1400 		DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1401 		return errno;
1402 	}
1403 
1404 	if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1405 	    (strcmp(QEDE_VF_TX_SWITCHING, key) == 0))
1406 		qdev->enable_tx_switching = !!tmp;
1407 
1408 	return ret;
1409 }
1410 
1411 static int qede_args(struct rte_eth_dev *eth_dev)
1412 {
1413 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1414 	struct rte_kvargs *kvlist;
1415 	struct rte_devargs *devargs;
1416 	int ret;
1417 	int i;
1418 
1419 	devargs = pci_dev->device.devargs;
1420 	if (!devargs)
1421 		return 0; /* return success */
1422 
1423 	kvlist = rte_kvargs_parse(devargs->args, valid_args);
1424 	if (kvlist == NULL)
1425 		return -EINVAL;
1426 
1427 	 /* Process parameters. */
1428 	for (i = 0; (valid_args[i] != NULL); ++i) {
1429 		if (rte_kvargs_count(kvlist, valid_args[i])) {
1430 			ret = rte_kvargs_process(kvlist, valid_args[i],
1431 						 qede_args_check, eth_dev);
1432 			if (ret != ECORE_SUCCESS) {
1433 				rte_kvargs_free(kvlist);
1434 				return ret;
1435 			}
1436 		}
1437 	}
1438 	rte_kvargs_free(kvlist);
1439 
1440 	return 0;
1441 }
1442 
1443 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1444 {
1445 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1446 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1447 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1448 	int ret;
1449 
1450 	PMD_INIT_FUNC_TRACE(edev);
1451 
1452 	/* Check requirements for 100G mode */
1453 	if (ECORE_IS_CMT(edev)) {
1454 		if (eth_dev->data->nb_rx_queues < 2 ||
1455 		    eth_dev->data->nb_tx_queues < 2) {
1456 			DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1457 			return -EINVAL;
1458 		}
1459 
1460 		if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1461 		    (eth_dev->data->nb_tx_queues % 2 != 0)) {
1462 			DP_ERR(edev,
1463 			       "100G mode needs even no. of RX/TX queues\n");
1464 			return -EINVAL;
1465 		}
1466 	}
1467 
1468 	/* We need to have min 1 RX queue.There is no min check in
1469 	 * rte_eth_dev_configure(), so we are checking it here.
1470 	 */
1471 	if (eth_dev->data->nb_rx_queues == 0) {
1472 		DP_ERR(edev, "Minimum one RX queue is required\n");
1473 		return -EINVAL;
1474 	}
1475 
1476 	/* Enable Tx switching by default */
1477 	qdev->enable_tx_switching = 1;
1478 
1479 	/* Parse devargs and fix up rxmode */
1480 	if (qede_args(eth_dev))
1481 		return -ENOTSUP;
1482 
1483 	if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1484 	      rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1485 		DP_ERR(edev, "Unsupported multi-queue mode\n");
1486 		return -ENOTSUP;
1487 	}
1488 	/* Flow director mode check */
1489 	if (qede_check_fdir_support(eth_dev))
1490 		return -ENOTSUP;
1491 
1492 	qede_dealloc_fp_resc(eth_dev);
1493 	qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1494 	qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1495 	if (qede_alloc_fp_resc(qdev))
1496 		return -ENOMEM;
1497 
1498 	/* If jumbo enabled adjust MTU */
1499 	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1500 		eth_dev->data->mtu =
1501 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1502 			ETHER_HDR_LEN - ETHER_CRC_LEN;
1503 
1504 	if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1505 		eth_dev->data->scattered_rx = 1;
1506 
1507 	if (qede_start_vport(qdev, eth_dev->data->mtu))
1508 		return -1;
1509 
1510 	qdev->mtu = eth_dev->data->mtu;
1511 
1512 	/* Enable VLAN offloads by default */
1513 	ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1514 					     ETH_VLAN_FILTER_MASK |
1515 					     ETH_VLAN_EXTEND_MASK);
1516 	if (ret)
1517 		return ret;
1518 
1519 	DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1520 			QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1521 
1522 	return 0;
1523 }
1524 
1525 /* Info about HW descriptor ring limitations */
1526 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1527 	.nb_max = 0x8000, /* 32K */
1528 	.nb_min = 128,
1529 	.nb_align = 128 /* lowest common multiple */
1530 };
1531 
1532 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1533 	.nb_max = 0x8000, /* 32K */
1534 	.nb_min = 256,
1535 	.nb_align = 256,
1536 	.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1537 	.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1538 };
1539 
1540 static void
1541 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1542 		  struct rte_eth_dev_info *dev_info)
1543 {
1544 	struct qede_dev *qdev = eth_dev->data->dev_private;
1545 	struct ecore_dev *edev = &qdev->edev;
1546 	struct qed_link_output link;
1547 	uint32_t speed_cap = 0;
1548 
1549 	PMD_INIT_FUNC_TRACE(edev);
1550 
1551 	dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1552 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1553 	dev_info->rx_desc_lim = qede_rx_desc_lim;
1554 	dev_info->tx_desc_lim = qede_tx_desc_lim;
1555 
1556 	if (IS_PF(edev))
1557 		dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1558 			QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1559 	else
1560 		dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1561 			QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1562 	dev_info->max_tx_queues = dev_info->max_rx_queues;
1563 
1564 	dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1565 	dev_info->max_vfs = 0;
1566 	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1567 	dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1568 	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1569 	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM	|
1570 				     DEV_RX_OFFLOAD_UDP_CKSUM	|
1571 				     DEV_RX_OFFLOAD_TCP_CKSUM	|
1572 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1573 				     DEV_RX_OFFLOAD_TCP_LRO	|
1574 				     DEV_RX_OFFLOAD_CRC_STRIP	|
1575 				     DEV_RX_OFFLOAD_SCATTER	|
1576 				     DEV_RX_OFFLOAD_JUMBO_FRAME |
1577 				     DEV_RX_OFFLOAD_VLAN_FILTER |
1578 				     DEV_RX_OFFLOAD_VLAN_STRIP);
1579 	dev_info->rx_queue_offload_capa = 0;
1580 
1581 	/* TX offloads are on a per-packet basis, so it is applicable
1582 	 * to both at port and queue levels.
1583 	 */
1584 	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT	|
1585 				     DEV_TX_OFFLOAD_IPV4_CKSUM	|
1586 				     DEV_TX_OFFLOAD_UDP_CKSUM	|
1587 				     DEV_TX_OFFLOAD_TCP_CKSUM	|
1588 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1589 				     DEV_TX_OFFLOAD_QINQ_INSERT |
1590 				     DEV_TX_OFFLOAD_MULTI_SEGS  |
1591 				     DEV_TX_OFFLOAD_TCP_TSO	|
1592 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1593 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1594 	dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1595 
1596 	dev_info->default_txconf = (struct rte_eth_txconf) {
1597 		.offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1598 	};
1599 
1600 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1601 		/* Packets are always dropped if no descriptors are available */
1602 		.rx_drop_en = 1,
1603 		/* The below RX offloads are always enabled */
1604 		.offloads = (DEV_RX_OFFLOAD_CRC_STRIP  |
1605 			     DEV_RX_OFFLOAD_IPV4_CKSUM |
1606 			     DEV_RX_OFFLOAD_TCP_CKSUM  |
1607 			     DEV_RX_OFFLOAD_UDP_CKSUM),
1608 	};
1609 
1610 	memset(&link, 0, sizeof(struct qed_link_output));
1611 	qdev->ops->common->get_link(edev, &link);
1612 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1613 		speed_cap |= ETH_LINK_SPEED_1G;
1614 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1615 		speed_cap |= ETH_LINK_SPEED_10G;
1616 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1617 		speed_cap |= ETH_LINK_SPEED_25G;
1618 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1619 		speed_cap |= ETH_LINK_SPEED_40G;
1620 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1621 		speed_cap |= ETH_LINK_SPEED_50G;
1622 	if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1623 		speed_cap |= ETH_LINK_SPEED_100G;
1624 	dev_info->speed_capa = speed_cap;
1625 }
1626 
1627 /* return 0 means link status changed, -1 means not changed */
1628 int
1629 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1630 {
1631 	struct qede_dev *qdev = eth_dev->data->dev_private;
1632 	struct ecore_dev *edev = &qdev->edev;
1633 	uint16_t link_duplex;
1634 	struct qed_link_output link;
1635 	struct rte_eth_link *curr = &eth_dev->data->dev_link;
1636 
1637 	memset(&link, 0, sizeof(struct qed_link_output));
1638 	qdev->ops->common->get_link(edev, &link);
1639 
1640 	/* Link Speed */
1641 	curr->link_speed = link.speed;
1642 
1643 	/* Link Mode */
1644 	switch (link.duplex) {
1645 	case QEDE_DUPLEX_HALF:
1646 		link_duplex = ETH_LINK_HALF_DUPLEX;
1647 		break;
1648 	case QEDE_DUPLEX_FULL:
1649 		link_duplex = ETH_LINK_FULL_DUPLEX;
1650 		break;
1651 	case QEDE_DUPLEX_UNKNOWN:
1652 	default:
1653 		link_duplex = -1;
1654 	}
1655 	curr->link_duplex = link_duplex;
1656 
1657 	/* Link Status */
1658 	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1659 
1660 	/* AN */
1661 	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1662 			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1663 
1664 	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1665 		curr->link_speed, curr->link_duplex,
1666 		curr->link_autoneg, curr->link_status);
1667 
1668 	/* return 0 means link status changed, -1 means not changed */
1669 	return ((curr->link_status == link.link_up) ? -1 : 0);
1670 }
1671 
1672 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1673 {
1674 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1675 	struct qede_dev *qdev = eth_dev->data->dev_private;
1676 	struct ecore_dev *edev = &qdev->edev;
1677 
1678 	PMD_INIT_FUNC_TRACE(edev);
1679 #endif
1680 
1681 	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1682 
1683 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1684 		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1685 
1686 	qed_configure_filter_rx_mode(eth_dev, type);
1687 }
1688 
1689 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1690 {
1691 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1692 	struct qede_dev *qdev = eth_dev->data->dev_private;
1693 	struct ecore_dev *edev = &qdev->edev;
1694 
1695 	PMD_INIT_FUNC_TRACE(edev);
1696 #endif
1697 
1698 	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1699 		qed_configure_filter_rx_mode(eth_dev,
1700 				QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1701 	else
1702 		qed_configure_filter_rx_mode(eth_dev,
1703 				QED_FILTER_RX_MODE_TYPE_REGULAR);
1704 }
1705 
1706 static void qede_poll_sp_sb_cb(void *param)
1707 {
1708 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1709 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1710 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1711 	int rc;
1712 
1713 	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1714 	qede_interrupt_action(&edev->hwfns[1]);
1715 
1716 	rc = rte_eal_alarm_set(timer_period * US_PER_S,
1717 			       qede_poll_sp_sb_cb,
1718 			       (void *)eth_dev);
1719 	if (rc != 0) {
1720 		DP_ERR(edev, "Unable to start periodic"
1721 			     " timer rc %d\n", rc);
1722 		assert(false && "Unable to start periodic timer");
1723 	}
1724 }
1725 
1726 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1727 {
1728 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1729 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1730 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1731 
1732 	PMD_INIT_FUNC_TRACE(edev);
1733 
1734 	/* dev_stop() shall cleanup fp resources in hw but without releasing
1735 	 * dma memories and sw structures so that dev_start() can be called
1736 	 * by the app without reconfiguration. However, in dev_close() we
1737 	 * can release all the resources and device can be brought up newly
1738 	 */
1739 	if (eth_dev->data->dev_started)
1740 		qede_dev_stop(eth_dev);
1741 
1742 	qede_stop_vport(edev);
1743 	qdev->vport_started = false;
1744 	qede_fdir_dealloc_resc(eth_dev);
1745 	qede_dealloc_fp_resc(eth_dev);
1746 
1747 	eth_dev->data->nb_rx_queues = 0;
1748 	eth_dev->data->nb_tx_queues = 0;
1749 
1750 	/* Bring the link down */
1751 	qede_dev_set_link_state(eth_dev, false);
1752 	qdev->ops->common->slowpath_stop(edev);
1753 	qdev->ops->common->remove(edev);
1754 	rte_intr_disable(&pci_dev->intr_handle);
1755 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1756 				     qede_interrupt_handler, (void *)eth_dev);
1757 	if (ECORE_IS_CMT(edev))
1758 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1759 }
1760 
1761 static int
1762 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1763 {
1764 	struct qede_dev *qdev = eth_dev->data->dev_private;
1765 	struct ecore_dev *edev = &qdev->edev;
1766 	struct ecore_eth_stats stats;
1767 	unsigned int i = 0, j = 0, qid;
1768 	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1769 	struct qede_tx_queue *txq;
1770 
1771 	ecore_get_vport_stats(edev, &stats);
1772 
1773 	/* RX Stats */
1774 	eth_stats->ipackets = stats.common.rx_ucast_pkts +
1775 	    stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1776 
1777 	eth_stats->ibytes = stats.common.rx_ucast_bytes +
1778 	    stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1779 
1780 	eth_stats->ierrors = stats.common.rx_crc_errors +
1781 	    stats.common.rx_align_errors +
1782 	    stats.common.rx_carrier_errors +
1783 	    stats.common.rx_oversize_packets +
1784 	    stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1785 
1786 	eth_stats->rx_nombuf = stats.common.no_buff_discards;
1787 
1788 	eth_stats->imissed = stats.common.mftag_filter_discards +
1789 	    stats.common.mac_filter_discards +
1790 	    stats.common.no_buff_discards +
1791 	    stats.common.brb_truncates + stats.common.brb_discards;
1792 
1793 	/* TX stats */
1794 	eth_stats->opackets = stats.common.tx_ucast_pkts +
1795 	    stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1796 
1797 	eth_stats->obytes = stats.common.tx_ucast_bytes +
1798 	    stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1799 
1800 	eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1801 
1802 	/* Queue stats */
1803 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1804 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
1805 	txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1806 			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
1807 	if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1808 	    (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1809 		DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1810 		       "Not all the queue stats will be displayed. Set"
1811 		       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1812 		       " appropriately and retry.\n");
1813 
1814 	for_each_rss(qid) {
1815 		eth_stats->q_ipackets[i] =
1816 			*(uint64_t *)(
1817 				((char *)(qdev->fp_array[qid].rxq)) +
1818 				offsetof(struct qede_rx_queue,
1819 				rcv_pkts));
1820 		eth_stats->q_errors[i] =
1821 			*(uint64_t *)(
1822 				((char *)(qdev->fp_array[qid].rxq)) +
1823 				offsetof(struct qede_rx_queue,
1824 				rx_hw_errors)) +
1825 			*(uint64_t *)(
1826 				((char *)(qdev->fp_array[qid].rxq)) +
1827 				offsetof(struct qede_rx_queue,
1828 				rx_alloc_errors));
1829 		i++;
1830 		if (i == rxq_stat_cntrs)
1831 			break;
1832 	}
1833 
1834 	for_each_tss(qid) {
1835 		txq = qdev->fp_array[qid].txq;
1836 		eth_stats->q_opackets[j] =
1837 			*((uint64_t *)(uintptr_t)
1838 				(((uint64_t)(uintptr_t)(txq)) +
1839 				 offsetof(struct qede_tx_queue,
1840 					  xmit_pkts)));
1841 		j++;
1842 		if (j == txq_stat_cntrs)
1843 			break;
1844 	}
1845 
1846 	return 0;
1847 }
1848 
1849 static unsigned
1850 qede_get_xstats_count(struct qede_dev *qdev) {
1851 	if (ECORE_IS_BB(&qdev->edev))
1852 		return RTE_DIM(qede_xstats_strings) +
1853 		       RTE_DIM(qede_bb_xstats_strings) +
1854 		       (RTE_DIM(qede_rxq_xstats_strings) *
1855 			RTE_MIN(QEDE_RSS_COUNT(qdev),
1856 				RTE_ETHDEV_QUEUE_STAT_CNTRS));
1857 	else
1858 		return RTE_DIM(qede_xstats_strings) +
1859 		       RTE_DIM(qede_ah_xstats_strings) +
1860 		       (RTE_DIM(qede_rxq_xstats_strings) *
1861 			RTE_MIN(QEDE_RSS_COUNT(qdev),
1862 				RTE_ETHDEV_QUEUE_STAT_CNTRS));
1863 }
1864 
1865 static int
1866 qede_get_xstats_names(struct rte_eth_dev *dev,
1867 		      struct rte_eth_xstat_name *xstats_names,
1868 		      __rte_unused unsigned int limit)
1869 {
1870 	struct qede_dev *qdev = dev->data->dev_private;
1871 	struct ecore_dev *edev = &qdev->edev;
1872 	const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1873 	unsigned int i, qid, stat_idx = 0;
1874 	unsigned int rxq_stat_cntrs;
1875 
1876 	if (xstats_names != NULL) {
1877 		for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1878 			snprintf(xstats_names[stat_idx].name,
1879 				sizeof(xstats_names[stat_idx].name),
1880 				"%s",
1881 				qede_xstats_strings[i].name);
1882 			stat_idx++;
1883 		}
1884 
1885 		if (ECORE_IS_BB(edev)) {
1886 			for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1887 				snprintf(xstats_names[stat_idx].name,
1888 					sizeof(xstats_names[stat_idx].name),
1889 					"%s",
1890 					qede_bb_xstats_strings[i].name);
1891 				stat_idx++;
1892 			}
1893 		} else {
1894 			for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1895 				snprintf(xstats_names[stat_idx].name,
1896 					sizeof(xstats_names[stat_idx].name),
1897 					"%s",
1898 					qede_ah_xstats_strings[i].name);
1899 				stat_idx++;
1900 			}
1901 		}
1902 
1903 		rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1904 					 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1905 		for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1906 			for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1907 				snprintf(xstats_names[stat_idx].name,
1908 					sizeof(xstats_names[stat_idx].name),
1909 					"%.4s%d%s",
1910 					qede_rxq_xstats_strings[i].name, qid,
1911 					qede_rxq_xstats_strings[i].name + 4);
1912 				stat_idx++;
1913 			}
1914 		}
1915 	}
1916 
1917 	return stat_cnt;
1918 }
1919 
1920 static int
1921 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1922 		unsigned int n)
1923 {
1924 	struct qede_dev *qdev = dev->data->dev_private;
1925 	struct ecore_dev *edev = &qdev->edev;
1926 	struct ecore_eth_stats stats;
1927 	const unsigned int num = qede_get_xstats_count(qdev);
1928 	unsigned int i, qid, stat_idx = 0;
1929 	unsigned int rxq_stat_cntrs;
1930 
1931 	if (n < num)
1932 		return num;
1933 
1934 	ecore_get_vport_stats(edev, &stats);
1935 
1936 	for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1937 		xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1938 					     qede_xstats_strings[i].offset);
1939 		xstats[stat_idx].id = stat_idx;
1940 		stat_idx++;
1941 	}
1942 
1943 	if (ECORE_IS_BB(edev)) {
1944 		for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1945 			xstats[stat_idx].value =
1946 					*(uint64_t *)(((char *)&stats) +
1947 					qede_bb_xstats_strings[i].offset);
1948 			xstats[stat_idx].id = stat_idx;
1949 			stat_idx++;
1950 		}
1951 	} else {
1952 		for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1953 			xstats[stat_idx].value =
1954 					*(uint64_t *)(((char *)&stats) +
1955 					qede_ah_xstats_strings[i].offset);
1956 			xstats[stat_idx].id = stat_idx;
1957 			stat_idx++;
1958 		}
1959 	}
1960 
1961 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1962 				 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1963 	for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1964 		for_each_rss(qid) {
1965 			for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1966 				xstats[stat_idx].value = *(uint64_t *)(
1967 					((char *)(qdev->fp_array[qid].rxq)) +
1968 					 qede_rxq_xstats_strings[i].offset);
1969 				xstats[stat_idx].id = stat_idx;
1970 				stat_idx++;
1971 			}
1972 		}
1973 	}
1974 
1975 	return stat_idx;
1976 }
1977 
1978 static void
1979 qede_reset_xstats(struct rte_eth_dev *dev)
1980 {
1981 	struct qede_dev *qdev = dev->data->dev_private;
1982 	struct ecore_dev *edev = &qdev->edev;
1983 
1984 	ecore_reset_vport_stats(edev);
1985 	qede_reset_queue_stats(qdev, true);
1986 }
1987 
1988 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1989 {
1990 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1991 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1992 	struct qed_link_params link_params;
1993 	int rc;
1994 
1995 	DP_INFO(edev, "setting link state %d\n", link_up);
1996 	memset(&link_params, 0, sizeof(link_params));
1997 	link_params.link_up = link_up;
1998 	rc = qdev->ops->common->set_link(edev, &link_params);
1999 	if (rc != ECORE_SUCCESS)
2000 		DP_ERR(edev, "Unable to set link state %d\n", link_up);
2001 
2002 	return rc;
2003 }
2004 
2005 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
2006 {
2007 	return qede_dev_set_link_state(eth_dev, true);
2008 }
2009 
2010 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
2011 {
2012 	return qede_dev_set_link_state(eth_dev, false);
2013 }
2014 
2015 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
2016 {
2017 	struct qede_dev *qdev = eth_dev->data->dev_private;
2018 	struct ecore_dev *edev = &qdev->edev;
2019 
2020 	ecore_reset_vport_stats(edev);
2021 	qede_reset_queue_stats(qdev, false);
2022 }
2023 
2024 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
2025 {
2026 	enum qed_filter_rx_mode_type type =
2027 	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
2028 
2029 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
2030 		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
2031 
2032 	qed_configure_filter_rx_mode(eth_dev, type);
2033 }
2034 
2035 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
2036 {
2037 	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
2038 		qed_configure_filter_rx_mode(eth_dev,
2039 				QED_FILTER_RX_MODE_TYPE_PROMISC);
2040 	else
2041 		qed_configure_filter_rx_mode(eth_dev,
2042 				QED_FILTER_RX_MODE_TYPE_REGULAR);
2043 }
2044 
2045 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
2046 			      struct rte_eth_fc_conf *fc_conf)
2047 {
2048 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2049 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2050 	struct qed_link_output current_link;
2051 	struct qed_link_params params;
2052 
2053 	memset(&current_link, 0, sizeof(current_link));
2054 	qdev->ops->common->get_link(edev, &current_link);
2055 
2056 	memset(&params, 0, sizeof(params));
2057 	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
2058 	if (fc_conf->autoneg) {
2059 		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
2060 			DP_ERR(edev, "Autoneg not supported\n");
2061 			return -EINVAL;
2062 		}
2063 		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2064 	}
2065 
2066 	/* Pause is assumed to be supported (SUPPORTED_Pause) */
2067 	if (fc_conf->mode == RTE_FC_FULL)
2068 		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
2069 					QED_LINK_PAUSE_RX_ENABLE);
2070 	if (fc_conf->mode == RTE_FC_TX_PAUSE)
2071 		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2072 	if (fc_conf->mode == RTE_FC_RX_PAUSE)
2073 		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2074 
2075 	params.link_up = true;
2076 	(void)qdev->ops->common->set_link(edev, &params);
2077 
2078 	return 0;
2079 }
2080 
2081 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
2082 			      struct rte_eth_fc_conf *fc_conf)
2083 {
2084 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2085 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2086 	struct qed_link_output current_link;
2087 
2088 	memset(&current_link, 0, sizeof(current_link));
2089 	qdev->ops->common->get_link(edev, &current_link);
2090 
2091 	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
2092 		fc_conf->autoneg = true;
2093 
2094 	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
2095 					 QED_LINK_PAUSE_TX_ENABLE))
2096 		fc_conf->mode = RTE_FC_FULL;
2097 	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
2098 		fc_conf->mode = RTE_FC_RX_PAUSE;
2099 	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
2100 		fc_conf->mode = RTE_FC_TX_PAUSE;
2101 	else
2102 		fc_conf->mode = RTE_FC_NONE;
2103 
2104 	return 0;
2105 }
2106 
2107 static const uint32_t *
2108 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2109 {
2110 	static const uint32_t ptypes[] = {
2111 		RTE_PTYPE_L2_ETHER,
2112 		RTE_PTYPE_L2_ETHER_VLAN,
2113 		RTE_PTYPE_L3_IPV4,
2114 		RTE_PTYPE_L3_IPV6,
2115 		RTE_PTYPE_L4_TCP,
2116 		RTE_PTYPE_L4_UDP,
2117 		RTE_PTYPE_TUNNEL_VXLAN,
2118 		RTE_PTYPE_L4_FRAG,
2119 		RTE_PTYPE_TUNNEL_GENEVE,
2120 		RTE_PTYPE_TUNNEL_GRE,
2121 		/* Inner */
2122 		RTE_PTYPE_INNER_L2_ETHER,
2123 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
2124 		RTE_PTYPE_INNER_L3_IPV4,
2125 		RTE_PTYPE_INNER_L3_IPV6,
2126 		RTE_PTYPE_INNER_L4_TCP,
2127 		RTE_PTYPE_INNER_L4_UDP,
2128 		RTE_PTYPE_INNER_L4_FRAG,
2129 		RTE_PTYPE_UNKNOWN
2130 	};
2131 
2132 	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
2133 		return ptypes;
2134 
2135 	return NULL;
2136 }
2137 
2138 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2139 {
2140 	*rss_caps = 0;
2141 	*rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
2142 	*rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
2143 	*rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
2144 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
2145 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
2146 	*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
2147 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
2148 	*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
2149 }
2150 
2151 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2152 			 struct rte_eth_rss_conf *rss_conf)
2153 {
2154 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2155 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2156 	struct ecore_sp_vport_update_params vport_update_params;
2157 	struct ecore_rss_params rss_params;
2158 	struct ecore_hwfn *p_hwfn;
2159 	uint32_t *key = (uint32_t *)rss_conf->rss_key;
2160 	uint64_t hf = rss_conf->rss_hf;
2161 	uint8_t len = rss_conf->rss_key_len;
2162 	uint8_t idx;
2163 	uint8_t i;
2164 	int rc;
2165 
2166 	memset(&vport_update_params, 0, sizeof(vport_update_params));
2167 	memset(&rss_params, 0, sizeof(rss_params));
2168 
2169 	DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2170 		(unsigned long)hf, len, key);
2171 
2172 	if (hf != 0) {
2173 		/* Enabling RSS */
2174 		DP_INFO(edev, "Enabling rss\n");
2175 
2176 		/* RSS caps */
2177 		qede_init_rss_caps(&rss_params.rss_caps, hf);
2178 		rss_params.update_rss_capabilities = 1;
2179 
2180 		/* RSS hash key */
2181 		if (key) {
2182 			if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2183 				DP_ERR(edev, "RSS key length exceeds limit\n");
2184 				return -EINVAL;
2185 			}
2186 			DP_INFO(edev, "Applying user supplied hash key\n");
2187 			rss_params.update_rss_key = 1;
2188 			memcpy(&rss_params.rss_key, key, len);
2189 		}
2190 		rss_params.rss_enable = 1;
2191 	}
2192 
2193 	rss_params.update_rss_config = 1;
2194 	/* tbl_size has to be set with capabilities */
2195 	rss_params.rss_table_size_log = 7;
2196 	vport_update_params.vport_id = 0;
2197 	/* pass the L2 handles instead of qids */
2198 	for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2199 		idx = qdev->rss_ind_table[i];
2200 		rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2201 	}
2202 	vport_update_params.rss_params = &rss_params;
2203 
2204 	for_each_hwfn(edev, i) {
2205 		p_hwfn = &edev->hwfns[i];
2206 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2207 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2208 					   ECORE_SPQ_MODE_EBLOCK, NULL);
2209 		if (rc) {
2210 			DP_ERR(edev, "vport-update for RSS failed\n");
2211 			return rc;
2212 		}
2213 	}
2214 	qdev->rss_enable = rss_params.rss_enable;
2215 
2216 	/* Update local structure for hash query */
2217 	qdev->rss_conf.rss_hf = hf;
2218 	qdev->rss_conf.rss_key_len = len;
2219 	if (qdev->rss_enable) {
2220 		if  (qdev->rss_conf.rss_key == NULL) {
2221 			qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2222 			if (qdev->rss_conf.rss_key == NULL) {
2223 				DP_ERR(edev, "No memory to store RSS key\n");
2224 				return -ENOMEM;
2225 			}
2226 		}
2227 		if (key && len) {
2228 			DP_INFO(edev, "Storing RSS key\n");
2229 			memcpy(qdev->rss_conf.rss_key, key, len);
2230 		}
2231 	} else if (!qdev->rss_enable && len == 0) {
2232 		if (qdev->rss_conf.rss_key) {
2233 			free(qdev->rss_conf.rss_key);
2234 			qdev->rss_conf.rss_key = NULL;
2235 			DP_INFO(edev, "Free RSS key\n");
2236 		}
2237 	}
2238 
2239 	return 0;
2240 }
2241 
2242 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2243 			   struct rte_eth_rss_conf *rss_conf)
2244 {
2245 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2246 
2247 	rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2248 	rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2249 
2250 	if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2251 		memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2252 		       rss_conf->rss_key_len);
2253 	return 0;
2254 }
2255 
2256 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2257 				    struct ecore_rss_params *rss)
2258 {
2259 	int i, fn;
2260 	bool rss_mode = 1; /* enable */
2261 	struct ecore_queue_cid *cid;
2262 	struct ecore_rss_params *t_rss;
2263 
2264 	/* In regular scenario, we'd simply need to take input handlers.
2265 	 * But in CMT, we'd have to split the handlers according to the
2266 	 * engine they were configured on. We'd then have to understand
2267 	 * whether RSS is really required, since 2-queues on CMT doesn't
2268 	 * require RSS.
2269 	 */
2270 
2271 	/* CMT should be round-robin */
2272 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2273 		cid = rss->rss_ind_table[i];
2274 
2275 		if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2276 			t_rss = &rss[0];
2277 		else
2278 			t_rss = &rss[1];
2279 
2280 		t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2281 	}
2282 
2283 	t_rss = &rss[1];
2284 	t_rss->update_rss_ind_table = 1;
2285 	t_rss->rss_table_size_log = 7;
2286 	t_rss->update_rss_config = 1;
2287 
2288 	/* Make sure RSS is actually required */
2289 	for_each_hwfn(edev, fn) {
2290 		for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2291 		     i++) {
2292 			if (rss[fn].rss_ind_table[i] !=
2293 			    rss[fn].rss_ind_table[0])
2294 				break;
2295 		}
2296 
2297 		if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2298 			DP_INFO(edev,
2299 				"CMT - 1 queue per-hwfn; Disabling RSS\n");
2300 			rss_mode = 0;
2301 			goto out;
2302 		}
2303 	}
2304 
2305 out:
2306 	t_rss->rss_enable = rss_mode;
2307 
2308 	return rss_mode;
2309 }
2310 
2311 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2312 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2313 			 uint16_t reta_size)
2314 {
2315 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2316 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2317 	struct ecore_sp_vport_update_params vport_update_params;
2318 	struct ecore_rss_params *params;
2319 	struct ecore_hwfn *p_hwfn;
2320 	uint16_t i, idx, shift;
2321 	uint8_t entry;
2322 	int rc = 0;
2323 
2324 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
2325 		DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2326 		       reta_size);
2327 		return -EINVAL;
2328 	}
2329 
2330 	memset(&vport_update_params, 0, sizeof(vport_update_params));
2331 	params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2332 			     RTE_CACHE_LINE_SIZE);
2333 	if (params == NULL) {
2334 		DP_ERR(edev, "failed to allocate memory\n");
2335 		return -ENOMEM;
2336 	}
2337 
2338 	for (i = 0; i < reta_size; i++) {
2339 		idx = i / RTE_RETA_GROUP_SIZE;
2340 		shift = i % RTE_RETA_GROUP_SIZE;
2341 		if (reta_conf[idx].mask & (1ULL << shift)) {
2342 			entry = reta_conf[idx].reta[shift];
2343 			/* Pass rxq handles to ecore */
2344 			params->rss_ind_table[i] =
2345 					qdev->fp_array[entry].rxq->handle;
2346 			/* Update the local copy for RETA query command */
2347 			qdev->rss_ind_table[i] = entry;
2348 		}
2349 	}
2350 
2351 	params->update_rss_ind_table = 1;
2352 	params->rss_table_size_log = 7;
2353 	params->update_rss_config = 1;
2354 
2355 	/* Fix up RETA for CMT mode device */
2356 	if (ECORE_IS_CMT(edev))
2357 		qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2358 							    params);
2359 	vport_update_params.vport_id = 0;
2360 	/* Use the current value of rss_enable */
2361 	params->rss_enable = qdev->rss_enable;
2362 	vport_update_params.rss_params = params;
2363 
2364 	for_each_hwfn(edev, i) {
2365 		p_hwfn = &edev->hwfns[i];
2366 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2367 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2368 					   ECORE_SPQ_MODE_EBLOCK, NULL);
2369 		if (rc) {
2370 			DP_ERR(edev, "vport-update for RSS failed\n");
2371 			goto out;
2372 		}
2373 	}
2374 
2375 out:
2376 	rte_free(params);
2377 	return rc;
2378 }
2379 
2380 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2381 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2382 			       uint16_t reta_size)
2383 {
2384 	struct qede_dev *qdev = eth_dev->data->dev_private;
2385 	struct ecore_dev *edev = &qdev->edev;
2386 	uint16_t i, idx, shift;
2387 	uint8_t entry;
2388 
2389 	if (reta_size > ETH_RSS_RETA_SIZE_128) {
2390 		DP_ERR(edev, "reta_size %d is not supported\n",
2391 		       reta_size);
2392 		return -EINVAL;
2393 	}
2394 
2395 	for (i = 0; i < reta_size; i++) {
2396 		idx = i / RTE_RETA_GROUP_SIZE;
2397 		shift = i % RTE_RETA_GROUP_SIZE;
2398 		if (reta_conf[idx].mask & (1ULL << shift)) {
2399 			entry = qdev->rss_ind_table[i];
2400 			reta_conf[idx].reta[shift] = entry;
2401 		}
2402 	}
2403 
2404 	return 0;
2405 }
2406 
2407 
2408 
2409 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2410 {
2411 	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2412 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2413 	struct rte_eth_dev_info dev_info = {0};
2414 	struct qede_fastpath *fp;
2415 	uint32_t max_rx_pkt_len;
2416 	uint32_t frame_size;
2417 	uint16_t rx_buf_size;
2418 	uint16_t bufsz;
2419 	bool restart = false;
2420 	int i;
2421 
2422 	PMD_INIT_FUNC_TRACE(edev);
2423 	qede_dev_info_get(dev, &dev_info);
2424 	max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2425 	frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
2426 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2427 		DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2428 		       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2429 			ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
2430 		return -EINVAL;
2431 	}
2432 	if (!dev->data->scattered_rx &&
2433 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2434 		DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2435 			dev->data->min_rx_buf_size);
2436 		return -EINVAL;
2437 	}
2438 	/* Temporarily replace I/O functions with dummy ones. It cannot
2439 	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2440 	 */
2441 	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2442 	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2443 	if (dev->data->dev_started) {
2444 		dev->data->dev_started = 0;
2445 		qede_dev_stop(dev);
2446 		restart = true;
2447 	} else {
2448 		if (IS_PF(edev))
2449 			qede_mac_addr_remove(dev, 0);
2450 	}
2451 	rte_delay_ms(1000);
2452 	qede_start_vport(qdev, mtu); /* Recreate vport */
2453 	qdev->mtu = mtu;
2454 
2455 	/* Fix up RX buf size for all queues of the port */
2456 	for_each_rss(i) {
2457 		fp = &qdev->fp_array[i];
2458 		if (fp->rxq != NULL) {
2459 			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2460 				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2461 			if (dev->data->scattered_rx)
2462 				rx_buf_size = bufsz + ETHER_HDR_LEN +
2463 					      ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
2464 			else
2465 				rx_buf_size = frame_size;
2466 			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2467 			fp->rxq->rx_buf_size = rx_buf_size;
2468 			DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
2469 		}
2470 	}
2471 	if (max_rx_pkt_len > ETHER_MAX_LEN)
2472 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
2473 	else
2474 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
2475 
2476 	/* Restore config lost due to vport stop */
2477 	if (IS_PF(edev))
2478 		qede_mac_addr_set(dev, &qdev->primary_mac);
2479 
2480 	if (dev->data->promiscuous)
2481 		qede_promiscuous_enable(dev);
2482 	else
2483 		qede_promiscuous_disable(dev);
2484 
2485 	if (dev->data->all_multicast)
2486 		qede_allmulticast_enable(dev);
2487 	else
2488 		qede_allmulticast_disable(dev);
2489 
2490 	qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
2491 
2492 	if (!dev->data->dev_started && restart) {
2493 		qede_dev_start(dev);
2494 		dev->data->dev_started = 1;
2495 	}
2496 
2497 	/* update max frame size */
2498 	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2499 	/* Reassign back */
2500 	dev->rx_pkt_burst = qede_recv_pkts;
2501 	dev->tx_pkt_burst = qede_xmit_pkts;
2502 
2503 	return 0;
2504 }
2505 
2506 static int
2507 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2508 		      struct rte_eth_udp_tunnel *tunnel_udp)
2509 {
2510 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2511 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2512 	struct ecore_tunnel_info tunn; /* @DPDK */
2513 	uint16_t udp_port;
2514 	int rc;
2515 
2516 	PMD_INIT_FUNC_TRACE(edev);
2517 
2518 	memset(&tunn, 0, sizeof(tunn));
2519 
2520 	switch (tunnel_udp->prot_type) {
2521 	case RTE_TUNNEL_TYPE_VXLAN:
2522 		if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2523 			DP_ERR(edev, "UDP port %u doesn't exist\n",
2524 				tunnel_udp->udp_port);
2525 			return ECORE_INVAL;
2526 		}
2527 		udp_port = 0;
2528 
2529 		tunn.vxlan_port.b_update_port = true;
2530 		tunn.vxlan_port.port = udp_port;
2531 
2532 		rc = qede_tunnel_update(qdev, &tunn);
2533 		if (rc != ECORE_SUCCESS) {
2534 			DP_ERR(edev, "Unable to config UDP port %u\n",
2535 			       tunn.vxlan_port.port);
2536 			return rc;
2537 		}
2538 
2539 		qdev->vxlan.udp_port = udp_port;
2540 		/* If the request is to delete UDP port and if the number of
2541 		 * VXLAN filters have reached 0 then VxLAN offload can be be
2542 		 * disabled.
2543 		 */
2544 		if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2545 			return qede_vxlan_enable(eth_dev,
2546 					ECORE_TUNN_CLSS_MAC_VLAN, false);
2547 
2548 		break;
2549 	case RTE_TUNNEL_TYPE_GENEVE:
2550 		if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
2551 			DP_ERR(edev, "UDP port %u doesn't exist\n",
2552 				tunnel_udp->udp_port);
2553 			return ECORE_INVAL;
2554 		}
2555 
2556 		udp_port = 0;
2557 
2558 		tunn.geneve_port.b_update_port = true;
2559 		tunn.geneve_port.port = udp_port;
2560 
2561 		rc = qede_tunnel_update(qdev, &tunn);
2562 		if (rc != ECORE_SUCCESS) {
2563 			DP_ERR(edev, "Unable to config UDP port %u\n",
2564 			       tunn.vxlan_port.port);
2565 			return rc;
2566 		}
2567 
2568 		qdev->vxlan.udp_port = udp_port;
2569 		/* If the request is to delete UDP port and if the number of
2570 		 * GENEVE filters have reached 0 then GENEVE offload can be be
2571 		 * disabled.
2572 		 */
2573 		if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
2574 			return qede_geneve_enable(eth_dev,
2575 					ECORE_TUNN_CLSS_MAC_VLAN, false);
2576 
2577 		break;
2578 
2579 	default:
2580 		return ECORE_INVAL;
2581 	}
2582 
2583 	return 0;
2584 
2585 }
2586 static int
2587 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2588 		      struct rte_eth_udp_tunnel *tunnel_udp)
2589 {
2590 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2591 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2592 	struct ecore_tunnel_info tunn; /* @DPDK */
2593 	uint16_t udp_port;
2594 	int rc;
2595 
2596 	PMD_INIT_FUNC_TRACE(edev);
2597 
2598 	memset(&tunn, 0, sizeof(tunn));
2599 
2600 	switch (tunnel_udp->prot_type) {
2601 	case RTE_TUNNEL_TYPE_VXLAN:
2602 		if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2603 			DP_INFO(edev,
2604 				"UDP port %u for VXLAN was already configured\n",
2605 				tunnel_udp->udp_port);
2606 			return ECORE_SUCCESS;
2607 		}
2608 
2609 		/* Enable VxLAN tunnel with default MAC/VLAN classification if
2610 		 * it was not enabled while adding VXLAN filter before UDP port
2611 		 * update.
2612 		 */
2613 		if (!qdev->vxlan.enable) {
2614 			rc = qede_vxlan_enable(eth_dev,
2615 				ECORE_TUNN_CLSS_MAC_VLAN, true);
2616 			if (rc != ECORE_SUCCESS) {
2617 				DP_ERR(edev, "Failed to enable VXLAN "
2618 					"prior to updating UDP port\n");
2619 				return rc;
2620 			}
2621 		}
2622 		udp_port = tunnel_udp->udp_port;
2623 
2624 		tunn.vxlan_port.b_update_port = true;
2625 		tunn.vxlan_port.port = udp_port;
2626 
2627 		rc = qede_tunnel_update(qdev, &tunn);
2628 		if (rc != ECORE_SUCCESS) {
2629 			DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
2630 			       udp_port);
2631 			return rc;
2632 		}
2633 
2634 		DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
2635 
2636 		qdev->vxlan.udp_port = udp_port;
2637 		break;
2638 	case RTE_TUNNEL_TYPE_GENEVE:
2639 		if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
2640 			DP_INFO(edev,
2641 				"UDP port %u for GENEVE was already configured\n",
2642 				tunnel_udp->udp_port);
2643 			return ECORE_SUCCESS;
2644 		}
2645 
2646 		/* Enable GENEVE tunnel with default MAC/VLAN classification if
2647 		 * it was not enabled while adding GENEVE filter before UDP port
2648 		 * update.
2649 		 */
2650 		if (!qdev->geneve.enable) {
2651 			rc = qede_geneve_enable(eth_dev,
2652 				ECORE_TUNN_CLSS_MAC_VLAN, true);
2653 			if (rc != ECORE_SUCCESS) {
2654 				DP_ERR(edev, "Failed to enable GENEVE "
2655 					"prior to updating UDP port\n");
2656 				return rc;
2657 			}
2658 		}
2659 		udp_port = tunnel_udp->udp_port;
2660 
2661 		tunn.geneve_port.b_update_port = true;
2662 		tunn.geneve_port.port = udp_port;
2663 
2664 		rc = qede_tunnel_update(qdev, &tunn);
2665 		if (rc != ECORE_SUCCESS) {
2666 			DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
2667 			       udp_port);
2668 			return rc;
2669 		}
2670 
2671 		DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
2672 
2673 		qdev->geneve.udp_port = udp_port;
2674 		break;
2675 	default:
2676 		return ECORE_INVAL;
2677 	}
2678 
2679 	return 0;
2680 }
2681 
2682 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2683 				       uint32_t *clss, char *str)
2684 {
2685 	uint16_t j;
2686 	*clss = MAX_ECORE_TUNN_CLSS;
2687 
2688 	for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2689 		if (filter == qede_tunn_types[j].rte_filter_type) {
2690 			*type = qede_tunn_types[j].qede_type;
2691 			*clss = qede_tunn_types[j].qede_tunn_clss;
2692 			strcpy(str, qede_tunn_types[j].string);
2693 			return;
2694 		}
2695 	}
2696 }
2697 
2698 static int
2699 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2700 			      const struct rte_eth_tunnel_filter_conf *conf,
2701 			      uint32_t type)
2702 {
2703 	/* Init commmon ucast params first */
2704 	qede_set_ucast_cmn_params(ucast);
2705 
2706 	/* Copy out the required fields based on classification type */
2707 	ucast->type = type;
2708 
2709 	switch (type) {
2710 	case ECORE_FILTER_VNI:
2711 		ucast->vni = conf->tenant_id;
2712 	break;
2713 	case ECORE_FILTER_INNER_VLAN:
2714 		ucast->vlan = conf->inner_vlan;
2715 	break;
2716 	case ECORE_FILTER_MAC:
2717 		memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2718 		       ETHER_ADDR_LEN);
2719 	break;
2720 	case ECORE_FILTER_INNER_MAC:
2721 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2722 		       ETHER_ADDR_LEN);
2723 	break;
2724 	case ECORE_FILTER_MAC_VNI_PAIR:
2725 		memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2726 			ETHER_ADDR_LEN);
2727 		ucast->vni = conf->tenant_id;
2728 	break;
2729 	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2730 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2731 			ETHER_ADDR_LEN);
2732 		ucast->vni = conf->tenant_id;
2733 	break;
2734 	case ECORE_FILTER_INNER_PAIR:
2735 		memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2736 			ETHER_ADDR_LEN);
2737 		ucast->vlan = conf->inner_vlan;
2738 	break;
2739 	default:
2740 		return -EINVAL;
2741 	}
2742 
2743 	return ECORE_SUCCESS;
2744 }
2745 
2746 static int
2747 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2748 			 const struct rte_eth_tunnel_filter_conf *conf,
2749 			 __attribute__((unused)) enum rte_filter_op filter_op,
2750 			 enum ecore_tunn_clss *clss,
2751 			 bool add)
2752 {
2753 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2754 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2755 	struct ecore_filter_ucast ucast = {0};
2756 	enum ecore_filter_ucast_type type;
2757 	uint16_t filter_type = 0;
2758 	char str[80];
2759 	int rc;
2760 
2761 	filter_type = conf->filter_type;
2762 	/* Determine if the given filter classification is supported */
2763 	qede_get_ecore_tunn_params(filter_type, &type, clss, str);
2764 	if (*clss == MAX_ECORE_TUNN_CLSS) {
2765 		DP_ERR(edev, "Unsupported filter type\n");
2766 		return -EINVAL;
2767 	}
2768 	/* Init tunnel ucast params */
2769 	rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2770 	if (rc != ECORE_SUCCESS) {
2771 		DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
2772 		conf->filter_type);
2773 		return rc;
2774 	}
2775 	DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2776 		str, filter_op, ucast.type);
2777 
2778 	ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
2779 
2780 	/* Skip MAC/VLAN if filter is based on VNI */
2781 	if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2782 		rc = qede_mac_int_ops(eth_dev, &ucast, add);
2783 		if ((rc == 0) && add) {
2784 			/* Enable accept anyvlan */
2785 			qede_config_accept_any_vlan(qdev, true);
2786 		}
2787 	} else {
2788 		rc = qede_ucast_filter(eth_dev, &ucast, add);
2789 		if (rc == 0)
2790 			rc = ecore_filter_ucast_cmd(edev, &ucast,
2791 					    ECORE_SPQ_MODE_CB, NULL);
2792 	}
2793 
2794 	return rc;
2795 }
2796 
2797 static int
2798 qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
2799 			enum rte_filter_op filter_op,
2800 			const struct rte_eth_tunnel_filter_conf *conf)
2801 {
2802 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2803 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2804 	enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2805 	bool add;
2806 	int rc;
2807 
2808 	PMD_INIT_FUNC_TRACE(edev);
2809 
2810 	switch (filter_op) {
2811 	case RTE_ETH_FILTER_ADD:
2812 		add = true;
2813 		break;
2814 	case RTE_ETH_FILTER_DELETE:
2815 		add = false;
2816 		break;
2817 	default:
2818 		DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2819 		return -EINVAL;
2820 	}
2821 
2822 	if (IS_VF(edev))
2823 		return qede_tunn_enable(eth_dev,
2824 					ECORE_TUNN_CLSS_MAC_VLAN,
2825 					conf->tunnel_type, add);
2826 
2827 	rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
2828 	if (rc != ECORE_SUCCESS)
2829 		return rc;
2830 
2831 	if (add) {
2832 		if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
2833 			qdev->vxlan.num_filters++;
2834 			qdev->vxlan.filter_type = conf->filter_type;
2835 		} else { /* GENEVE */
2836 			qdev->geneve.num_filters++;
2837 			qdev->geneve.filter_type = conf->filter_type;
2838 		}
2839 
2840 		if (!qdev->vxlan.enable || !qdev->geneve.enable ||
2841 		    !qdev->ipgre.enable)
2842 			return qede_tunn_enable(eth_dev, clss,
2843 						conf->tunnel_type,
2844 						true);
2845 	} else {
2846 		if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
2847 			qdev->vxlan.num_filters--;
2848 		else /*GENEVE*/
2849 			qdev->geneve.num_filters--;
2850 
2851 		/* Disable VXLAN if VXLAN filters become 0 */
2852 		if ((qdev->vxlan.num_filters == 0) ||
2853 		    (qdev->geneve.num_filters == 0))
2854 			return qede_tunn_enable(eth_dev, clss,
2855 						conf->tunnel_type,
2856 						false);
2857 	}
2858 
2859 	return 0;
2860 }
2861 
2862 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2863 			 enum rte_filter_type filter_type,
2864 			 enum rte_filter_op filter_op,
2865 			 void *arg)
2866 {
2867 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2868 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2869 	struct rte_eth_tunnel_filter_conf *filter_conf =
2870 			(struct rte_eth_tunnel_filter_conf *)arg;
2871 
2872 	switch (filter_type) {
2873 	case RTE_ETH_FILTER_TUNNEL:
2874 		switch (filter_conf->tunnel_type) {
2875 		case RTE_TUNNEL_TYPE_VXLAN:
2876 		case RTE_TUNNEL_TYPE_GENEVE:
2877 		case RTE_TUNNEL_TYPE_IP_IN_GRE:
2878 			DP_INFO(edev,
2879 				"Packet steering to the specified Rx queue"
2880 				" is not supported with UDP tunneling");
2881 			return(qede_tunn_filter_config(eth_dev, filter_op,
2882 						      filter_conf));
2883 		case RTE_TUNNEL_TYPE_TEREDO:
2884 		case RTE_TUNNEL_TYPE_NVGRE:
2885 		case RTE_L2_TUNNEL_TYPE_E_TAG:
2886 			DP_ERR(edev, "Unsupported tunnel type %d\n",
2887 				filter_conf->tunnel_type);
2888 			return -EINVAL;
2889 		case RTE_TUNNEL_TYPE_NONE:
2890 		default:
2891 			return 0;
2892 		}
2893 		break;
2894 	case RTE_ETH_FILTER_FDIR:
2895 		return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2896 	case RTE_ETH_FILTER_NTUPLE:
2897 		return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2898 	case RTE_ETH_FILTER_MACVLAN:
2899 	case RTE_ETH_FILTER_ETHERTYPE:
2900 	case RTE_ETH_FILTER_FLEXIBLE:
2901 	case RTE_ETH_FILTER_SYN:
2902 	case RTE_ETH_FILTER_HASH:
2903 	case RTE_ETH_FILTER_L2_TUNNEL:
2904 	case RTE_ETH_FILTER_MAX:
2905 	default:
2906 		DP_ERR(edev, "Unsupported filter type %d\n",
2907 			filter_type);
2908 		return -EINVAL;
2909 	}
2910 
2911 	return 0;
2912 }
2913 
2914 static const struct eth_dev_ops qede_eth_dev_ops = {
2915 	.dev_configure = qede_dev_configure,
2916 	.dev_infos_get = qede_dev_info_get,
2917 	.rx_queue_setup = qede_rx_queue_setup,
2918 	.rx_queue_release = qede_rx_queue_release,
2919 	.tx_queue_setup = qede_tx_queue_setup,
2920 	.tx_queue_release = qede_tx_queue_release,
2921 	.dev_start = qede_dev_start,
2922 	.dev_set_link_up = qede_dev_set_link_up,
2923 	.dev_set_link_down = qede_dev_set_link_down,
2924 	.link_update = qede_link_update,
2925 	.promiscuous_enable = qede_promiscuous_enable,
2926 	.promiscuous_disable = qede_promiscuous_disable,
2927 	.allmulticast_enable = qede_allmulticast_enable,
2928 	.allmulticast_disable = qede_allmulticast_disable,
2929 	.dev_stop = qede_dev_stop,
2930 	.dev_close = qede_dev_close,
2931 	.stats_get = qede_get_stats,
2932 	.stats_reset = qede_reset_stats,
2933 	.xstats_get = qede_get_xstats,
2934 	.xstats_reset = qede_reset_xstats,
2935 	.xstats_get_names = qede_get_xstats_names,
2936 	.mac_addr_add = qede_mac_addr_add,
2937 	.mac_addr_remove = qede_mac_addr_remove,
2938 	.mac_addr_set = qede_mac_addr_set,
2939 	.vlan_offload_set = qede_vlan_offload_set,
2940 	.vlan_filter_set = qede_vlan_filter_set,
2941 	.flow_ctrl_set = qede_flow_ctrl_set,
2942 	.flow_ctrl_get = qede_flow_ctrl_get,
2943 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2944 	.rss_hash_update = qede_rss_hash_update,
2945 	.rss_hash_conf_get = qede_rss_hash_conf_get,
2946 	.reta_update  = qede_rss_reta_update,
2947 	.reta_query  = qede_rss_reta_query,
2948 	.mtu_set = qede_set_mtu,
2949 	.filter_ctrl = qede_dev_filter_ctrl,
2950 	.udp_tunnel_port_add = qede_udp_dst_port_add,
2951 	.udp_tunnel_port_del = qede_udp_dst_port_del,
2952 };
2953 
2954 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2955 	.dev_configure = qede_dev_configure,
2956 	.dev_infos_get = qede_dev_info_get,
2957 	.rx_queue_setup = qede_rx_queue_setup,
2958 	.rx_queue_release = qede_rx_queue_release,
2959 	.tx_queue_setup = qede_tx_queue_setup,
2960 	.tx_queue_release = qede_tx_queue_release,
2961 	.dev_start = qede_dev_start,
2962 	.dev_set_link_up = qede_dev_set_link_up,
2963 	.dev_set_link_down = qede_dev_set_link_down,
2964 	.link_update = qede_link_update,
2965 	.promiscuous_enable = qede_promiscuous_enable,
2966 	.promiscuous_disable = qede_promiscuous_disable,
2967 	.allmulticast_enable = qede_allmulticast_enable,
2968 	.allmulticast_disable = qede_allmulticast_disable,
2969 	.dev_stop = qede_dev_stop,
2970 	.dev_close = qede_dev_close,
2971 	.stats_get = qede_get_stats,
2972 	.stats_reset = qede_reset_stats,
2973 	.xstats_get = qede_get_xstats,
2974 	.xstats_reset = qede_reset_xstats,
2975 	.xstats_get_names = qede_get_xstats_names,
2976 	.vlan_offload_set = qede_vlan_offload_set,
2977 	.vlan_filter_set = qede_vlan_filter_set,
2978 	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2979 	.rss_hash_update = qede_rss_hash_update,
2980 	.rss_hash_conf_get = qede_rss_hash_conf_get,
2981 	.reta_update  = qede_rss_reta_update,
2982 	.reta_query  = qede_rss_reta_query,
2983 	.mtu_set = qede_set_mtu,
2984 	.udp_tunnel_port_add = qede_udp_dst_port_add,
2985 	.udp_tunnel_port_del = qede_udp_dst_port_del,
2986 };
2987 
2988 static void qede_update_pf_params(struct ecore_dev *edev)
2989 {
2990 	struct ecore_pf_params pf_params;
2991 
2992 	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2993 	pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2994 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2995 	qed_ops->common->update_pf_params(edev, &pf_params);
2996 }
2997 
2998 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2999 {
3000 	struct rte_pci_device *pci_dev;
3001 	struct rte_pci_addr pci_addr;
3002 	struct qede_dev *adapter;
3003 	struct ecore_dev *edev;
3004 	struct qed_dev_eth_info dev_info;
3005 	struct qed_slowpath_params params;
3006 	static bool do_once = true;
3007 	uint8_t bulletin_change;
3008 	uint8_t vf_mac[ETHER_ADDR_LEN];
3009 	uint8_t is_mac_forced;
3010 	bool is_mac_exist;
3011 	/* Fix up ecore debug level */
3012 	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
3013 	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
3014 	int rc;
3015 
3016 	/* Extract key data structures */
3017 	adapter = eth_dev->data->dev_private;
3018 	adapter->ethdev = eth_dev;
3019 	edev = &adapter->edev;
3020 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3021 	pci_addr = pci_dev->addr;
3022 
3023 	PMD_INIT_FUNC_TRACE(edev);
3024 
3025 	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
3026 		 pci_addr.bus, pci_addr.devid, pci_addr.function,
3027 		 eth_dev->data->port_id);
3028 
3029 	eth_dev->rx_pkt_burst = qede_recv_pkts;
3030 	eth_dev->tx_pkt_burst = qede_xmit_pkts;
3031 	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
3032 
3033 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3034 		DP_ERR(edev, "Skipping device init from secondary process\n");
3035 		return 0;
3036 	}
3037 
3038 	rte_eth_copy_pci_info(eth_dev, pci_dev);
3039 
3040 	/* @DPDK */
3041 	edev->vendor_id = pci_dev->id.vendor_id;
3042 	edev->device_id = pci_dev->id.device_id;
3043 
3044 	qed_ops = qed_get_eth_ops();
3045 	if (!qed_ops) {
3046 		DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
3047 		return -EINVAL;
3048 	}
3049 
3050 	DP_INFO(edev, "Starting qede probe\n");
3051 	rc = qed_ops->common->probe(edev, pci_dev, dp_module,
3052 				    dp_level, is_vf);
3053 	if (rc != 0) {
3054 		DP_ERR(edev, "qede probe failed rc %d\n", rc);
3055 		return -ENODEV;
3056 	}
3057 	qede_update_pf_params(edev);
3058 	rte_intr_callback_register(&pci_dev->intr_handle,
3059 				   qede_interrupt_handler, (void *)eth_dev);
3060 	if (rte_intr_enable(&pci_dev->intr_handle)) {
3061 		DP_ERR(edev, "rte_intr_enable() failed\n");
3062 		return -ENODEV;
3063 	}
3064 
3065 	/* Start the Slowpath-process */
3066 	memset(&params, 0, sizeof(struct qed_slowpath_params));
3067 	params.int_mode = ECORE_INT_MODE_MSIX;
3068 	params.drv_major = QEDE_PMD_VERSION_MAJOR;
3069 	params.drv_minor = QEDE_PMD_VERSION_MINOR;
3070 	params.drv_rev = QEDE_PMD_VERSION_REVISION;
3071 	params.drv_eng = QEDE_PMD_VERSION_PATCH;
3072 	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
3073 		QEDE_PMD_DRV_VER_STR_SIZE);
3074 
3075 	/* For CMT mode device do periodic polling for slowpath events.
3076 	 * This is required since uio device uses only one MSI-x
3077 	 * interrupt vector but we need one for each engine.
3078 	 */
3079 	if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
3080 		rc = rte_eal_alarm_set(timer_period * US_PER_S,
3081 				       qede_poll_sp_sb_cb,
3082 				       (void *)eth_dev);
3083 		if (rc != 0) {
3084 			DP_ERR(edev, "Unable to start periodic"
3085 				     " timer rc %d\n", rc);
3086 			return -EINVAL;
3087 		}
3088 	}
3089 
3090 	rc = qed_ops->common->slowpath_start(edev, &params);
3091 	if (rc) {
3092 		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
3093 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3094 				     (void *)eth_dev);
3095 		return -ENODEV;
3096 	}
3097 
3098 	rc = qed_ops->fill_dev_info(edev, &dev_info);
3099 	if (rc) {
3100 		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
3101 		qed_ops->common->slowpath_stop(edev);
3102 		qed_ops->common->remove(edev);
3103 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3104 				     (void *)eth_dev);
3105 		return -ENODEV;
3106 	}
3107 
3108 	qede_alloc_etherdev(adapter, &dev_info);
3109 
3110 	adapter->ops->common->set_name(edev, edev->name);
3111 
3112 	if (!is_vf)
3113 		adapter->dev_info.num_mac_filters =
3114 			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
3115 					    ECORE_MAC);
3116 	else
3117 		ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
3118 				(uint32_t *)&adapter->dev_info.num_mac_filters);
3119 
3120 	/* Allocate memory for storing MAC addr */
3121 	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
3122 					(ETHER_ADDR_LEN *
3123 					adapter->dev_info.num_mac_filters),
3124 					RTE_CACHE_LINE_SIZE);
3125 
3126 	if (eth_dev->data->mac_addrs == NULL) {
3127 		DP_ERR(edev, "Failed to allocate MAC address\n");
3128 		qed_ops->common->slowpath_stop(edev);
3129 		qed_ops->common->remove(edev);
3130 		rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3131 				     (void *)eth_dev);
3132 		return -ENOMEM;
3133 	}
3134 
3135 	if (!is_vf) {
3136 		ether_addr_copy((struct ether_addr *)edev->hwfns[0].
3137 				hw_info.hw_mac_addr,
3138 				&eth_dev->data->mac_addrs[0]);
3139 		ether_addr_copy(&eth_dev->data->mac_addrs[0],
3140 				&adapter->primary_mac);
3141 	} else {
3142 		ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
3143 				       &bulletin_change);
3144 		if (bulletin_change) {
3145 			is_mac_exist =
3146 			    ecore_vf_bulletin_get_forced_mac(
3147 						ECORE_LEADING_HWFN(edev),
3148 						vf_mac,
3149 						&is_mac_forced);
3150 			if (is_mac_exist && is_mac_forced) {
3151 				DP_INFO(edev, "VF macaddr received from PF\n");
3152 				ether_addr_copy((struct ether_addr *)&vf_mac,
3153 						&eth_dev->data->mac_addrs[0]);
3154 				ether_addr_copy(&eth_dev->data->mac_addrs[0],
3155 						&adapter->primary_mac);
3156 			} else {
3157 				DP_ERR(edev, "No VF macaddr assigned\n");
3158 			}
3159 		}
3160 	}
3161 
3162 	eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
3163 
3164 	if (do_once) {
3165 		qede_print_adapter_info(adapter);
3166 		do_once = false;
3167 	}
3168 
3169 	/* Bring-up the link */
3170 	qede_dev_set_link_state(eth_dev, true);
3171 
3172 	adapter->num_tx_queues = 0;
3173 	adapter->num_rx_queues = 0;
3174 	SLIST_INIT(&adapter->fdir_info.fdir_list_head);
3175 	SLIST_INIT(&adapter->vlan_list_head);
3176 	SLIST_INIT(&adapter->uc_list_head);
3177 	adapter->mtu = ETHER_MTU;
3178 	adapter->vport_started = false;
3179 
3180 	/* VF tunnel offloads is enabled by default in PF driver */
3181 	adapter->vxlan.num_filters = 0;
3182 	adapter->geneve.num_filters = 0;
3183 	adapter->ipgre.num_filters = 0;
3184 	if (is_vf) {
3185 		adapter->vxlan.enable = true;
3186 		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
3187 					     ETH_TUNNEL_FILTER_IVLAN;
3188 		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
3189 		adapter->geneve.enable = true;
3190 		adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
3191 					      ETH_TUNNEL_FILTER_IVLAN;
3192 		adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
3193 		adapter->ipgre.enable = true;
3194 		adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
3195 					     ETH_TUNNEL_FILTER_IVLAN;
3196 	} else {
3197 		adapter->vxlan.enable = false;
3198 		adapter->geneve.enable = false;
3199 		adapter->ipgre.enable = false;
3200 	}
3201 
3202 	DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
3203 		adapter->primary_mac.addr_bytes[0],
3204 		adapter->primary_mac.addr_bytes[1],
3205 		adapter->primary_mac.addr_bytes[2],
3206 		adapter->primary_mac.addr_bytes[3],
3207 		adapter->primary_mac.addr_bytes[4],
3208 		adapter->primary_mac.addr_bytes[5]);
3209 
3210 	DP_INFO(edev, "Device initialized\n");
3211 
3212 	return 0;
3213 }
3214 
3215 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
3216 {
3217 	return qede_common_dev_init(eth_dev, 1);
3218 }
3219 
3220 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
3221 {
3222 	return qede_common_dev_init(eth_dev, 0);
3223 }
3224 
3225 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
3226 {
3227 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3228 	struct qede_dev *qdev = eth_dev->data->dev_private;
3229 	struct ecore_dev *edev = &qdev->edev;
3230 
3231 	PMD_INIT_FUNC_TRACE(edev);
3232 #endif
3233 
3234 	/* only uninitialize in the primary process */
3235 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3236 		return 0;
3237 
3238 	/* safe to close dev here */
3239 	qede_dev_close(eth_dev);
3240 
3241 	eth_dev->dev_ops = NULL;
3242 	eth_dev->rx_pkt_burst = NULL;
3243 	eth_dev->tx_pkt_burst = NULL;
3244 
3245 	if (eth_dev->data->mac_addrs)
3246 		rte_free(eth_dev->data->mac_addrs);
3247 
3248 	eth_dev->data->mac_addrs = NULL;
3249 
3250 	return 0;
3251 }
3252 
3253 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3254 {
3255 	return qede_dev_common_uninit(eth_dev);
3256 }
3257 
3258 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3259 {
3260 	return qede_dev_common_uninit(eth_dev);
3261 }
3262 
3263 static const struct rte_pci_id pci_id_qedevf_map[] = {
3264 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3265 	{
3266 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3267 	},
3268 	{
3269 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3270 	},
3271 	{
3272 		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3273 	},
3274 	{.vendor_id = 0,}
3275 };
3276 
3277 static const struct rte_pci_id pci_id_qede_map[] = {
3278 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3279 	{
3280 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3281 	},
3282 	{
3283 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3284 	},
3285 	{
3286 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3287 	},
3288 	{
3289 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3290 	},
3291 	{
3292 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3293 	},
3294 	{
3295 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3296 	},
3297 	{
3298 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3299 	},
3300 	{
3301 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3302 	},
3303 	{
3304 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3305 	},
3306 	{
3307 		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3308 	},
3309 	{.vendor_id = 0,}
3310 };
3311 
3312 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3313 	struct rte_pci_device *pci_dev)
3314 {
3315 	return rte_eth_dev_pci_generic_probe(pci_dev,
3316 		sizeof(struct qede_dev), qedevf_eth_dev_init);
3317 }
3318 
3319 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3320 {
3321 	return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3322 }
3323 
3324 static struct rte_pci_driver rte_qedevf_pmd = {
3325 	.id_table = pci_id_qedevf_map,
3326 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3327 	.probe = qedevf_eth_dev_pci_probe,
3328 	.remove = qedevf_eth_dev_pci_remove,
3329 };
3330 
3331 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3332 	struct rte_pci_device *pci_dev)
3333 {
3334 	return rte_eth_dev_pci_generic_probe(pci_dev,
3335 		sizeof(struct qede_dev), qede_eth_dev_init);
3336 }
3337 
3338 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3339 {
3340 	return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3341 }
3342 
3343 static struct rte_pci_driver rte_qede_pmd = {
3344 	.id_table = pci_id_qede_map,
3345 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3346 	.probe = qede_eth_dev_pci_probe,
3347 	.remove = qede_eth_dev_pci_remove,
3348 };
3349 
3350 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3351 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3352 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3353 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3354 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3355 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
3356 
3357 RTE_INIT(qede_init_log);
3358 static void
3359 qede_init_log(void)
3360 {
3361 	qede_logtype_init = rte_log_register("pmd.net.qede.init");
3362 	if (qede_logtype_init >= 0)
3363 		rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
3364 	qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
3365 	if (qede_logtype_driver >= 0)
3366 		rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
3367 }
3368